index int64 0 0 | repo_id stringclasses 596 values | file_path stringlengths 31 168 | content stringlengths 1 6.2M |
|---|---|---|---|
0 | lc_public_repos/langgraph/libs/scheduler-kafka/langgraph/scheduler | lc_public_repos/langgraph/libs/scheduler-kafka/langgraph/scheduler/kafka/types.py | import asyncio
import concurrent.futures
from typing import Any, NamedTuple, Optional, Protocol, Sequence, TypedDict, Union
from langchain_core.runnables import RunnableConfig
class Topics(NamedTuple):
orchestrator: str
executor: str
error: str
class Sendable(TypedDict):
topic: str
value: Optional[Any]
key: Optional[Any]
class MessageToOrchestrator(TypedDict):
input: Optional[dict[str, Any]]
config: RunnableConfig
finally_send: Optional[Sequence[Sendable]]
class ExecutorTask(TypedDict):
id: Optional[str]
path: tuple[Union[str, int], ...]
class MessageToExecutor(TypedDict):
config: RunnableConfig
task: ExecutorTask
finally_send: Optional[Sequence[Sendable]]
class ErrorMessage(TypedDict):
topic: str
error: str
msg: Union[MessageToExecutor, MessageToOrchestrator]
class TopicPartition(Protocol):
topic: str
partition: int
class ConsumerRecord(Protocol):
topic: str
"The topic this record is received from"
partition: int
"The partition from which this record is received"
offset: int
"The position of this record in the corresponding Kafka partition."
timestamp: int
"The timestamp of this record"
timestamp_type: int
"The timestamp type of this record"
key: Optional[bytes]
"The key (or `None` if no key is specified)"
value: Optional[bytes]
"The value"
class Consumer(Protocol):
def getmany(
self, timeout_ms: int, max_records: int
) -> dict[TopicPartition, Sequence[ConsumerRecord]]: ...
def commit(self) -> None: ...
class AsyncConsumer(Protocol):
async def getmany(
self, timeout_ms: int, max_records: int
) -> dict[TopicPartition, Sequence[ConsumerRecord]]: ...
async def commit(self) -> None: ...
class Producer(Protocol):
def send(
self,
topic: str,
*,
key: Optional[bytes] = None,
value: Optional[bytes] = None,
) -> concurrent.futures.Future: ...
class AsyncProducer(Protocol):
async def send(
self,
topic: str,
*,
key: Optional[bytes] = None,
value: Optional[bytes] = None,
) -> asyncio.Future: ...
|
0 | lc_public_repos/langgraph/libs/scheduler-kafka/langgraph/scheduler | lc_public_repos/langgraph/libs/scheduler-kafka/langgraph/scheduler/kafka/default_async.py | import aiokafka
class DefaultAsyncConsumer(aiokafka.AIOKafkaConsumer):
pass
class DefaultAsyncProducer(aiokafka.AIOKafkaProducer):
pass
|
0 | lc_public_repos/langgraph/libs/scheduler-kafka/langgraph/scheduler | lc_public_repos/langgraph/libs/scheduler-kafka/langgraph/scheduler/kafka/default_sync.py | import concurrent.futures
from typing import Optional, Sequence
from kafka import KafkaConsumer, KafkaProducer
from langgraph.scheduler.kafka.types import ConsumerRecord, TopicPartition
class DefaultConsumer(KafkaConsumer):
def getmany(
self, timeout_ms: int, max_records: int
) -> dict[TopicPartition, Sequence[ConsumerRecord]]:
return self.poll(timeout_ms=timeout_ms, max_records=max_records)
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
class DefaultProducer(KafkaProducer):
def send(
self,
topic: str,
*,
key: Optional[bytes] = None,
value: Optional[bytes] = None,
) -> concurrent.futures.Future:
fut = concurrent.futures.Future()
kfut = super().send(topic, key=key, value=value)
kfut.add_callback(fut.set_result)
kfut.add_errback(fut.set_exception)
return fut
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
|
0 | lc_public_repos/langgraph/libs/scheduler-kafka/langgraph/scheduler | lc_public_repos/langgraph/libs/scheduler-kafka/langgraph/scheduler/kafka/serde.py | from typing import Any
import orjson
from langgraph.checkpoint.serde.jsonplus import JsonPlusSerializer
SERIALIZER = JsonPlusSerializer()
def loads(v: bytes) -> Any:
return SERIALIZER.loads(v)
def dumps(v: Any) -> bytes:
return orjson.dumps(v, default=_default)
def _default(v: Any) -> Any:
# things we don't know how to serialize (eg. functions) ignore
return None
|
0 | lc_public_repos/langgraph/libs | lc_public_repos/langgraph/libs/checkpoint-duckdb/Makefile | .PHONY: test test_watch lint format
######################
# TESTING AND COVERAGE
######################
test:
poetry run pytest tests
test_watch:
poetry run ptw .
######################
# LINTING AND FORMATTING
######################
# Define a variable for Python and notebook files.
PYTHON_FILES=.
MYPY_CACHE=.mypy_cache
lint format: PYTHON_FILES=.
lint_diff format_diff: PYTHON_FILES=$(shell git diff --name-only --relative --diff-filter=d main . | grep -E '\.py$$|\.ipynb$$')
lint_package: PYTHON_FILES=langgraph
lint_tests: PYTHON_FILES=tests
lint_tests: MYPY_CACHE=.mypy_cache_test
lint lint_diff lint_package lint_tests:
poetry run ruff check .
[ "$(PYTHON_FILES)" = "" ] || poetry run ruff format $(PYTHON_FILES) --diff
[ "$(PYTHON_FILES)" = "" ] || poetry run ruff check --select I $(PYTHON_FILES)
[ "$(PYTHON_FILES)" = "" ] || mkdir -p $(MYPY_CACHE)
[ "$(PYTHON_FILES)" = "" ] || poetry run mypy $(PYTHON_FILES) --cache-dir $(MYPY_CACHE)
format format_diff:
poetry run ruff format $(PYTHON_FILES)
poetry run ruff check --select I --fix $(PYTHON_FILES)
|
0 | lc_public_repos/langgraph/libs | lc_public_repos/langgraph/libs/checkpoint-duckdb/poetry.lock | # This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand.
[[package]]
name = "annotated-types"
version = "0.7.0"
description = "Reusable constraint types to use with typing.Annotated"
optional = false
python-versions = ">=3.8"
files = [
{file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"},
{file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"},
]
[[package]]
name = "anyio"
version = "4.4.0"
description = "High level compatibility layer for multiple asynchronous event loop implementations"
optional = false
python-versions = ">=3.8"
files = [
{file = "anyio-4.4.0-py3-none-any.whl", hash = "sha256:c1b2d8f46a8a812513012e1107cb0e68c17159a7a594208005a57dc776e1bdc7"},
{file = "anyio-4.4.0.tar.gz", hash = "sha256:5aadc6a1bbb7cdb0bede386cac5e2940f5e2ff3aa20277e991cf028e0585ce94"},
]
[package.dependencies]
exceptiongroup = {version = ">=1.0.2", markers = "python_version < \"3.11\""}
idna = ">=2.8"
sniffio = ">=1.1"
typing-extensions = {version = ">=4.1", markers = "python_version < \"3.11\""}
[package.extras]
doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"]
test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"]
trio = ["trio (>=0.23)"]
[[package]]
name = "certifi"
version = "2024.7.4"
description = "Python package for providing Mozilla's CA Bundle."
optional = false
python-versions = ">=3.6"
files = [
{file = "certifi-2024.7.4-py3-none-any.whl", hash = "sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90"},
{file = "certifi-2024.7.4.tar.gz", hash = "sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b"},
]
[[package]]
name = "charset-normalizer"
version = "3.3.2"
description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
optional = false
python-versions = ">=3.7.0"
files = [
{file = "charset-normalizer-3.3.2.tar.gz", hash = "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"},
{file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3"},
{file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027"},
{file = "charset_normalizer-3.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03"},
{file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d"},
{file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e"},
{file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6"},
{file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5"},
{file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537"},
{file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c"},
{file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12"},
{file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f"},
{file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269"},
{file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519"},
{file = "charset_normalizer-3.3.2-cp310-cp310-win32.whl", hash = "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73"},
{file = "charset_normalizer-3.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09"},
{file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db"},
{file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96"},
{file = "charset_normalizer-3.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e"},
{file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f"},
{file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574"},
{file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4"},
{file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8"},
{file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc"},
{file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae"},
{file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887"},
{file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae"},
{file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce"},
{file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f"},
{file = "charset_normalizer-3.3.2-cp311-cp311-win32.whl", hash = "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab"},
{file = "charset_normalizer-3.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77"},
{file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8"},
{file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b"},
{file = "charset_normalizer-3.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6"},
{file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a"},
{file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389"},
{file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa"},
{file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b"},
{file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed"},
{file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26"},
{file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d"},
{file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068"},
{file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143"},
{file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4"},
{file = "charset_normalizer-3.3.2-cp312-cp312-win32.whl", hash = "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7"},
{file = "charset_normalizer-3.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001"},
{file = "charset_normalizer-3.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c"},
{file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5"},
{file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985"},
{file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6"},
{file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714"},
{file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786"},
{file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5"},
{file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c"},
{file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8"},
{file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711"},
{file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811"},
{file = "charset_normalizer-3.3.2-cp37-cp37m-win32.whl", hash = "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4"},
{file = "charset_normalizer-3.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99"},
{file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a"},
{file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac"},
{file = "charset_normalizer-3.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a"},
{file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33"},
{file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238"},
{file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a"},
{file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2"},
{file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8"},
{file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898"},
{file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99"},
{file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d"},
{file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04"},
{file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087"},
{file = "charset_normalizer-3.3.2-cp38-cp38-win32.whl", hash = "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25"},
{file = "charset_normalizer-3.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b"},
{file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4"},
{file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d"},
{file = "charset_normalizer-3.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0"},
{file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269"},
{file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c"},
{file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519"},
{file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796"},
{file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185"},
{file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c"},
{file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458"},
{file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2"},
{file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8"},
{file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561"},
{file = "charset_normalizer-3.3.2-cp39-cp39-win32.whl", hash = "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f"},
{file = "charset_normalizer-3.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d"},
{file = "charset_normalizer-3.3.2-py3-none-any.whl", hash = "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc"},
]
[[package]]
name = "codespell"
version = "2.3.0"
description = "Codespell"
optional = false
python-versions = ">=3.8"
files = [
{file = "codespell-2.3.0-py3-none-any.whl", hash = "sha256:a9c7cef2501c9cfede2110fd6d4e5e62296920efe9abfb84648df866e47f58d1"},
{file = "codespell-2.3.0.tar.gz", hash = "sha256:360c7d10f75e65f67bad720af7007e1060a5d395670ec11a7ed1fed9dd17471f"},
]
[package.extras]
dev = ["Pygments", "build", "chardet", "pre-commit", "pytest", "pytest-cov", "pytest-dependency", "ruff", "tomli", "twine"]
hard-encoding-detection = ["chardet"]
toml = ["tomli"]
types = ["chardet (>=5.1.0)", "mypy", "pytest", "pytest-cov", "pytest-dependency"]
[[package]]
name = "colorama"
version = "0.4.6"
description = "Cross-platform colored terminal text."
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7"
files = [
{file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"},
{file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"},
]
[[package]]
name = "docopt"
version = "0.6.2"
description = "Pythonic argument parser, that will make you smile"
optional = false
python-versions = "*"
files = [
{file = "docopt-0.6.2.tar.gz", hash = "sha256:49b3a825280bd66b3aa83585ef59c4a8c82f2c8a522dbe754a8bc8d08c85c491"},
]
[[package]]
name = "duckdb"
version = "1.1.2"
description = "DuckDB in-process database"
optional = false
python-versions = ">=3.7.0"
files = [
{file = "duckdb-1.1.2-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:91e7f99cf5cab1d26f92cb014429153497d805e79689baa44f4c4585a8cb243f"},
{file = "duckdb-1.1.2-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:0107de622fe208142a1108263a03c43956048dcc99be3702d8e5d2aeaf99554c"},
{file = "duckdb-1.1.2-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:8a09610f780857677725897856f8cdf3cafd8a991f871e6cb8ba88b2dbc8d737"},
{file = "duckdb-1.1.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c0f0ddac0482f0f3fece54d720d13819e82ae26c01a939ffa66a87be53f7f665"},
{file = "duckdb-1.1.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:84103373e818758dfa361d27781d0f096553843c5ffb9193260a0786c5248270"},
{file = "duckdb-1.1.2-cp310-cp310-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bfdfd23e2bf58014ad0673973bd0ed88cd048dfe8e82420814a71d7d52ef2288"},
{file = "duckdb-1.1.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:25889e6e29b87047b1dd56385ac08156e4713c59326cc6fff89657d01b2c417b"},
{file = "duckdb-1.1.2-cp310-cp310-win_amd64.whl", hash = "sha256:312570fa5277c3079de18388b86c2d87cbe1044838bb152b235c0227581d5d42"},
{file = "duckdb-1.1.2-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:568439ea4fce8cb72ec1f767cd510686a9e7e29a011fc7c56d990059a6e94e48"},
{file = "duckdb-1.1.2-cp311-cp311-macosx_12_0_universal2.whl", hash = "sha256:74974f2d7210623a5d61b1fb0cb589c6e5ffcbf7dbb757a04c5ba24adcfc8cac"},
{file = "duckdb-1.1.2-cp311-cp311-macosx_12_0_x86_64.whl", hash = "sha256:e26422a3358c816d764639070945b73eef55d1b4df990989e3492c85ef725c21"},
{file = "duckdb-1.1.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:87e972bd452eeeab197fe39dcaeecdb7c264b1f75a0ee67e532e235fe45b84df"},
{file = "duckdb-1.1.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a6b73e70b73c8df85da383f6e557c03cad5c877868b9a7e41715761e8166c1e"},
{file = "duckdb-1.1.2-cp311-cp311-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:623cb1952466aae5907af84107bcdec25a5ca021a8b6441e961f41edc724f6f2"},
{file = "duckdb-1.1.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:d9fc0b550f96901fa7e76dc70a13f6477ad3e18ef1cb21d414c3a5569de3f27e"},
{file = "duckdb-1.1.2-cp311-cp311-win_amd64.whl", hash = "sha256:181edb1973bd8f493bcb6ecfa035f1a592dff4667758592f300619012ba251c0"},
{file = "duckdb-1.1.2-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:83372b1b411086cac01ab2071122772fa66170b1b41ddbc37527464066083668"},
{file = "duckdb-1.1.2-cp312-cp312-macosx_12_0_universal2.whl", hash = "sha256:db37441deddfee6ac35a0c742d2f9e90e4e50b9e76d586a060d122b8fc56dada"},
{file = "duckdb-1.1.2-cp312-cp312-macosx_12_0_x86_64.whl", hash = "sha256:19142a77e72874aeaa6fda30aeb13612c6de5e8c60fbcc3392cea6ef0694eeaf"},
{file = "duckdb-1.1.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:099d99dd48d6e4682a3dd6233ceab73d977ebe1a87afaac54cf77c844e24514a"},
{file = "duckdb-1.1.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:be86e586ca7af7e807f72479a2b8d0983565360b19dbda4ef8a9d7b3909b8e2c"},
{file = "duckdb-1.1.2-cp312-cp312-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:578e0953e4d8ba8da0cd69fb2930c45f51ce47d213b77d8a4cd461f9c0960b87"},
{file = "duckdb-1.1.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:72b5eb5762c1a5e68849c7143f3b3747a9f15c040e34e41559f233a1569ad16f"},
{file = "duckdb-1.1.2-cp312-cp312-win_amd64.whl", hash = "sha256:9b4c6b6a08180261d98330d97355503961a25ca31cd9ef296e0681f7895b4a2c"},
{file = "duckdb-1.1.2-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:695dcbc561374b126e86659709feadf883c9969ed718e94713edd4ba15d16619"},
{file = "duckdb-1.1.2-cp313-cp313-macosx_12_0_universal2.whl", hash = "sha256:ada29be1e889f486c6cf1f6dffd15463e748faf361f33996f2e862779edc24a9"},
{file = "duckdb-1.1.2-cp313-cp313-macosx_12_0_x86_64.whl", hash = "sha256:6ca722738fa9eb6218619740631de29acfdd132de6f6a6350fee5e291c2f6117"},
{file = "duckdb-1.1.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c796d33f1e5a0c8c570d22da0c0b1db8578687e427029e1ce2c8ce3f9fffa6a3"},
{file = "duckdb-1.1.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f5c0996988a70dd3bc8111d9b9aeab7e38ed1999a52607c5f1b528e362b4dd1c"},
{file = "duckdb-1.1.2-cp313-cp313-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6c37b039f6d6fed14d89450f5ccf54922b3304192d7412e12d6cc8d9e757f7a2"},
{file = "duckdb-1.1.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e8c766b87f675c76d6d17103bf6fb9fb1a9e2fcb3d9b25c28bbc634bde31223e"},
{file = "duckdb-1.1.2-cp313-cp313-win_amd64.whl", hash = "sha256:e3e6300b7ccaf64b609f4f0780a6e1d25ab8cf34cceed46e62c35b6c4c5cb63b"},
{file = "duckdb-1.1.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8a01fae9604a54ecbc26e7503c522311f15afbd2870e6d8f6fbef4545dfae550"},
{file = "duckdb-1.1.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:492b1d86a696428bd3f14dc1c7c3230e2dbca8978f288be64b04a26e0e00fad5"},
{file = "duckdb-1.1.2-cp37-cp37m-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1bba58459ad897a78c4e478a097626fc266459a40338cecc68a49a8d5dc72fb7"},
{file = "duckdb-1.1.2-cp37-cp37m-win_amd64.whl", hash = "sha256:d395a3bf510bf24686821eec15802624797dcb33e8f14f8a7cc8e17d909474af"},
{file = "duckdb-1.1.2-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:fd800f75728727fe699ed1eb22b636867cf48c9dd105ee88b977e20c89df4509"},
{file = "duckdb-1.1.2-cp38-cp38-macosx_12_0_universal2.whl", hash = "sha256:d8caaf43909e49537e26df51d80d075ae2b25a610d28ed8bd31d6ccebeaf3c65"},
{file = "duckdb-1.1.2-cp38-cp38-macosx_12_0_x86_64.whl", hash = "sha256:564166811c68d9c7f9911eb707ad32ec9c2507b98336d894fbe658b85bf1c697"},
{file = "duckdb-1.1.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19386aa09f0d6f97634ba2972096d1c80d880176dfb0e949eadc91c98262a663"},
{file = "duckdb-1.1.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e9e8387bcc9a591ad14011ddfec0d408d1d9b1889c6c9b495a04c7016a24b9b3"},
{file = "duckdb-1.1.2-cp38-cp38-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f8c5ff4970403ed3ff0ac71fe0ce1e6be3199df9d542afc84c424b444ba4ffe8"},
{file = "duckdb-1.1.2-cp38-cp38-win_amd64.whl", hash = "sha256:9283dcca87c3260eb631a99d738fa72b8545ed45b475bc72ad254f7310e14284"},
{file = "duckdb-1.1.2-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:f87edaf20001530e63a4f7bda13b55dc3152d7171226915f2bf34e0813c8759e"},
{file = "duckdb-1.1.2-cp39-cp39-macosx_12_0_universal2.whl", hash = "sha256:efec169b3fe0b821e3207ba3e445f227d42dd62b4440ff79c37fa168a4fc5a71"},
{file = "duckdb-1.1.2-cp39-cp39-macosx_12_0_x86_64.whl", hash = "sha256:89164a2d29d56605a95ee5032aa415dd487028c4fd3e06d971497840e74c56e7"},
{file = "duckdb-1.1.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6858e10c60ff7e70e61d3dd53d2545c8b2609942e45fd6de38cd0dee52932de3"},
{file = "duckdb-1.1.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ca967c5a57b1d0cb0fd5e539ab24110e5a59dcbedd365bb2dc80533d6e44a8d"},
{file = "duckdb-1.1.2-cp39-cp39-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4ce949f1d7999aa6a046eb64067eee41d4c5c2872ba4fa408c9947742d0c7231"},
{file = "duckdb-1.1.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:9ba6d1f918e6ca47a368a0c32806016405cb9beb2c245806b0ca998f569d2bdf"},
{file = "duckdb-1.1.2-cp39-cp39-win_amd64.whl", hash = "sha256:7111fd3e7b334a7be383313ce29918b7c643e4f6ef44d6d63c3ab3fa6716c114"},
{file = "duckdb-1.1.2.tar.gz", hash = "sha256:c8232861dc8ec6daa29067056d5a0e5789919f2ab22ab792787616d7cd52f02a"},
]
[[package]]
name = "exceptiongroup"
version = "1.2.2"
description = "Backport of PEP 654 (exception groups)"
optional = false
python-versions = ">=3.7"
files = [
{file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"},
{file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"},
]
[package.extras]
test = ["pytest (>=6)"]
[[package]]
name = "h11"
version = "0.14.0"
description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1"
optional = false
python-versions = ">=3.7"
files = [
{file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"},
{file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"},
]
[[package]]
name = "httpcore"
version = "1.0.5"
description = "A minimal low-level HTTP client."
optional = false
python-versions = ">=3.8"
files = [
{file = "httpcore-1.0.5-py3-none-any.whl", hash = "sha256:421f18bac248b25d310f3cacd198d55b8e6125c107797b609ff9b7a6ba7991b5"},
{file = "httpcore-1.0.5.tar.gz", hash = "sha256:34a38e2f9291467ee3b44e89dd52615370e152954ba21721378a87b2960f7a61"},
]
[package.dependencies]
certifi = "*"
h11 = ">=0.13,<0.15"
[package.extras]
asyncio = ["anyio (>=4.0,<5.0)"]
http2 = ["h2 (>=3,<5)"]
socks = ["socksio (==1.*)"]
trio = ["trio (>=0.22.0,<0.26.0)"]
[[package]]
name = "httpx"
version = "0.27.2"
description = "The next generation HTTP client."
optional = false
python-versions = ">=3.8"
files = [
{file = "httpx-0.27.2-py3-none-any.whl", hash = "sha256:7bb2708e112d8fdd7829cd4243970f0c223274051cb35ee80c03301ee29a3df0"},
{file = "httpx-0.27.2.tar.gz", hash = "sha256:f7c2be1d2f3c3c3160d441802406b206c2b76f5947b11115e6df10c6c65e66c2"},
]
[package.dependencies]
anyio = "*"
certifi = "*"
httpcore = "==1.*"
idna = "*"
sniffio = "*"
[package.extras]
brotli = ["brotli", "brotlicffi"]
cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"]
http2 = ["h2 (>=3,<5)"]
socks = ["socksio (==1.*)"]
zstd = ["zstandard (>=0.18.0)"]
[[package]]
name = "idna"
version = "3.7"
description = "Internationalized Domain Names in Applications (IDNA)"
optional = false
python-versions = ">=3.5"
files = [
{file = "idna-3.7-py3-none-any.whl", hash = "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0"},
{file = "idna-3.7.tar.gz", hash = "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc"},
]
[[package]]
name = "iniconfig"
version = "2.0.0"
description = "brain-dead simple config-ini parsing"
optional = false
python-versions = ">=3.7"
files = [
{file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"},
{file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"},
]
[[package]]
name = "jsonpatch"
version = "1.33"
description = "Apply JSON-Patches (RFC 6902)"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.*"
files = [
{file = "jsonpatch-1.33-py2.py3-none-any.whl", hash = "sha256:0ae28c0cd062bbd8b8ecc26d7d164fbbea9652a1a3693f3b956c1eae5145dade"},
{file = "jsonpatch-1.33.tar.gz", hash = "sha256:9fcd4009c41e6d12348b4a0ff2563ba56a2923a7dfee731d004e212e1ee5030c"},
]
[package.dependencies]
jsonpointer = ">=1.9"
[[package]]
name = "jsonpointer"
version = "3.0.0"
description = "Identify specific nodes in a JSON document (RFC 6901)"
optional = false
python-versions = ">=3.7"
files = [
{file = "jsonpointer-3.0.0-py2.py3-none-any.whl", hash = "sha256:13e088adc14fca8b6aa8177c044e12701e6ad4b28ff10e65f2267a90109c9942"},
{file = "jsonpointer-3.0.0.tar.gz", hash = "sha256:2b2d729f2091522d61c3b31f82e11870f60b68f43fbc705cb76bf4b832af59ef"},
]
[[package]]
name = "langchain-core"
version = "0.3.0"
description = "Building applications with LLMs through composability"
optional = false
python-versions = "<4.0,>=3.9"
files = [
{file = "langchain_core-0.3.0-py3-none-any.whl", hash = "sha256:bee6dae2366d037ef0c5b87401fed14b5497cad26f97724e8c9ca7bc9239e847"},
{file = "langchain_core-0.3.0.tar.gz", hash = "sha256:1249149ea3ba24c9c761011483c14091573a5eb1a773aa0db9c8ad155dd4a69d"},
]
[package.dependencies]
jsonpatch = ">=1.33,<2.0"
langsmith = ">=0.1.117,<0.2.0"
packaging = ">=23.2,<25"
pydantic = [
{version = ">=2.5.2,<3.0.0", markers = "python_full_version < \"3.12.4\""},
{version = ">=2.7.4,<3.0.0", markers = "python_full_version >= \"3.12.4\""},
]
PyYAML = ">=5.3"
tenacity = ">=8.1.0,<8.4.0 || >8.4.0,<9.0.0"
typing-extensions = ">=4.7"
[[package]]
name = "langgraph-checkpoint"
version = "2.0.2"
description = "Library with base interfaces for LangGraph checkpoint savers."
optional = false
python-versions = "^3.9.0,<4.0"
files = []
develop = true
[package.dependencies]
langchain-core = ">=0.2.38,<0.4"
msgpack = "^1.1.0"
[package.source]
type = "directory"
url = "../checkpoint"
[[package]]
name = "langsmith"
version = "0.1.120"
description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform."
optional = false
python-versions = "<4.0,>=3.8.1"
files = [
{file = "langsmith-0.1.120-py3-none-any.whl", hash = "sha256:54d2785e301646c0988e0a69ebe4d976488c87b41928b358cb153b6ddd8db62b"},
{file = "langsmith-0.1.120.tar.gz", hash = "sha256:25499ca187b41bd89d784b272b97a8d76f60e0e21bdf20336e8a2aa6a9b23ac9"},
]
[package.dependencies]
httpx = ">=0.23.0,<1"
orjson = ">=3.9.14,<4.0.0"
pydantic = [
{version = ">=1,<3", markers = "python_full_version < \"3.12.4\""},
{version = ">=2.7.4,<3.0.0", markers = "python_full_version >= \"3.12.4\""},
]
requests = ">=2,<3"
[[package]]
name = "msgpack"
version = "1.1.0"
description = "MessagePack serializer"
optional = false
python-versions = ">=3.8"
files = [
{file = "msgpack-1.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7ad442d527a7e358a469faf43fda45aaf4ac3249c8310a82f0ccff9164e5dccd"},
{file = "msgpack-1.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:74bed8f63f8f14d75eec75cf3d04ad581da6b914001b474a5d3cd3372c8cc27d"},
{file = "msgpack-1.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:914571a2a5b4e7606997e169f64ce53a8b1e06f2cf2c3a7273aa106236d43dd5"},
{file = "msgpack-1.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c921af52214dcbb75e6bdf6a661b23c3e6417f00c603dd2070bccb5c3ef499f5"},
{file = "msgpack-1.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d8ce0b22b890be5d252de90d0e0d119f363012027cf256185fc3d474c44b1b9e"},
{file = "msgpack-1.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:73322a6cc57fcee3c0c57c4463d828e9428275fb85a27aa2aa1a92fdc42afd7b"},
{file = "msgpack-1.1.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:e1f3c3d21f7cf67bcf2da8e494d30a75e4cf60041d98b3f79875afb5b96f3a3f"},
{file = "msgpack-1.1.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:64fc9068d701233effd61b19efb1485587560b66fe57b3e50d29c5d78e7fef68"},
{file = "msgpack-1.1.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:42f754515e0f683f9c79210a5d1cad631ec3d06cea5172214d2176a42e67e19b"},
{file = "msgpack-1.1.0-cp310-cp310-win32.whl", hash = "sha256:3df7e6b05571b3814361e8464f9304c42d2196808e0119f55d0d3e62cd5ea044"},
{file = "msgpack-1.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:685ec345eefc757a7c8af44a3032734a739f8c45d1b0ac45efc5d8977aa4720f"},
{file = "msgpack-1.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:3d364a55082fb2a7416f6c63ae383fbd903adb5a6cf78c5b96cc6316dc1cedc7"},
{file = "msgpack-1.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:79ec007767b9b56860e0372085f8504db5d06bd6a327a335449508bbee9648fa"},
{file = "msgpack-1.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6ad622bf7756d5a497d5b6836e7fc3752e2dd6f4c648e24b1803f6048596f701"},
{file = "msgpack-1.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e59bca908d9ca0de3dc8684f21ebf9a690fe47b6be93236eb40b99af28b6ea6"},
{file = "msgpack-1.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e1da8f11a3dd397f0a32c76165cf0c4eb95b31013a94f6ecc0b280c05c91b59"},
{file = "msgpack-1.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:452aff037287acb1d70a804ffd022b21fa2bb7c46bee884dbc864cc9024128a0"},
{file = "msgpack-1.1.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8da4bf6d54ceed70e8861f833f83ce0814a2b72102e890cbdfe4b34764cdd66e"},
{file = "msgpack-1.1.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:41c991beebf175faf352fb940bf2af9ad1fb77fd25f38d9142053914947cdbf6"},
{file = "msgpack-1.1.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a52a1f3a5af7ba1c9ace055b659189f6c669cf3657095b50f9602af3a3ba0fe5"},
{file = "msgpack-1.1.0-cp311-cp311-win32.whl", hash = "sha256:58638690ebd0a06427c5fe1a227bb6b8b9fdc2bd07701bec13c2335c82131a88"},
{file = "msgpack-1.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:fd2906780f25c8ed5d7b323379f6138524ba793428db5d0e9d226d3fa6aa1788"},
{file = "msgpack-1.1.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:d46cf9e3705ea9485687aa4001a76e44748b609d260af21c4ceea7f2212a501d"},
{file = "msgpack-1.1.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5dbad74103df937e1325cc4bfeaf57713be0b4f15e1c2da43ccdd836393e2ea2"},
{file = "msgpack-1.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:58dfc47f8b102da61e8949708b3eafc3504509a5728f8b4ddef84bd9e16ad420"},
{file = "msgpack-1.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4676e5be1b472909b2ee6356ff425ebedf5142427842aa06b4dfd5117d1ca8a2"},
{file = "msgpack-1.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17fb65dd0bec285907f68b15734a993ad3fc94332b5bb21b0435846228de1f39"},
{file = "msgpack-1.1.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a51abd48c6d8ac89e0cfd4fe177c61481aca2d5e7ba42044fd218cfd8ea9899f"},
{file = "msgpack-1.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2137773500afa5494a61b1208619e3871f75f27b03bcfca7b3a7023284140247"},
{file = "msgpack-1.1.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:398b713459fea610861c8a7b62a6fec1882759f308ae0795b5413ff6a160cf3c"},
{file = "msgpack-1.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:06f5fd2f6bb2a7914922d935d3b8bb4a7fff3a9a91cfce6d06c13bc42bec975b"},
{file = "msgpack-1.1.0-cp312-cp312-win32.whl", hash = "sha256:ad33e8400e4ec17ba782f7b9cf868977d867ed784a1f5f2ab46e7ba53b6e1e1b"},
{file = "msgpack-1.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:115a7af8ee9e8cddc10f87636767857e7e3717b7a2e97379dc2054712693e90f"},
{file = "msgpack-1.1.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:071603e2f0771c45ad9bc65719291c568d4edf120b44eb36324dcb02a13bfddf"},
{file = "msgpack-1.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:0f92a83b84e7c0749e3f12821949d79485971f087604178026085f60ce109330"},
{file = "msgpack-1.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:4a1964df7b81285d00a84da4e70cb1383f2e665e0f1f2a7027e683956d04b734"},
{file = "msgpack-1.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:59caf6a4ed0d164055ccff8fe31eddc0ebc07cf7326a2aaa0dbf7a4001cd823e"},
{file = "msgpack-1.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0907e1a7119b337971a689153665764adc34e89175f9a34793307d9def08e6ca"},
{file = "msgpack-1.1.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:65553c9b6da8166e819a6aa90ad15288599b340f91d18f60b2061f402b9a4915"},
{file = "msgpack-1.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:7a946a8992941fea80ed4beae6bff74ffd7ee129a90b4dd5cf9c476a30e9708d"},
{file = "msgpack-1.1.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:4b51405e36e075193bc051315dbf29168d6141ae2500ba8cd80a522964e31434"},
{file = "msgpack-1.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:b4c01941fd2ff87c2a934ee6055bda4ed353a7846b8d4f341c428109e9fcde8c"},
{file = "msgpack-1.1.0-cp313-cp313-win32.whl", hash = "sha256:7c9a35ce2c2573bada929e0b7b3576de647b0defbd25f5139dcdaba0ae35a4cc"},
{file = "msgpack-1.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:bce7d9e614a04d0883af0b3d4d501171fbfca038f12c77fa838d9f198147a23f"},
{file = "msgpack-1.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c40ffa9a15d74e05ba1fe2681ea33b9caffd886675412612d93ab17b58ea2fec"},
{file = "msgpack-1.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f1ba6136e650898082d9d5a5217d5906d1e138024f836ff48691784bbe1adf96"},
{file = "msgpack-1.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e0856a2b7e8dcb874be44fea031d22e5b3a19121be92a1e098f46068a11b0870"},
{file = "msgpack-1.1.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:471e27a5787a2e3f974ba023f9e265a8c7cfd373632247deb225617e3100a3c7"},
{file = "msgpack-1.1.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:646afc8102935a388ffc3914b336d22d1c2d6209c773f3eb5dd4d6d3b6f8c1cb"},
{file = "msgpack-1.1.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:13599f8829cfbe0158f6456374e9eea9f44eee08076291771d8ae93eda56607f"},
{file = "msgpack-1.1.0-cp38-cp38-win32.whl", hash = "sha256:8a84efb768fb968381e525eeeb3d92857e4985aacc39f3c47ffd00eb4509315b"},
{file = "msgpack-1.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:879a7b7b0ad82481c52d3c7eb99bf6f0645dbdec5134a4bddbd16f3506947feb"},
{file = "msgpack-1.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:53258eeb7a80fc46f62fd59c876957a2d0e15e6449a9e71842b6d24419d88ca1"},
{file = "msgpack-1.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7e7b853bbc44fb03fbdba34feb4bd414322180135e2cb5164f20ce1c9795ee48"},
{file = "msgpack-1.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f3e9b4936df53b970513eac1758f3882c88658a220b58dcc1e39606dccaaf01c"},
{file = "msgpack-1.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:46c34e99110762a76e3911fc923222472c9d681f1094096ac4102c18319e6468"},
{file = "msgpack-1.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8a706d1e74dd3dea05cb54580d9bd8b2880e9264856ce5068027eed09680aa74"},
{file = "msgpack-1.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:534480ee5690ab3cbed89d4c8971a5c631b69a8c0883ecfea96c19118510c846"},
{file = "msgpack-1.1.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:8cf9e8c3a2153934a23ac160cc4cba0ec035f6867c8013cc6077a79823370346"},
{file = "msgpack-1.1.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:3180065ec2abbe13a4ad37688b61b99d7f9e012a535b930e0e683ad6bc30155b"},
{file = "msgpack-1.1.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:c5a91481a3cc573ac8c0d9aace09345d989dc4a0202b7fcb312c88c26d4e71a8"},
{file = "msgpack-1.1.0-cp39-cp39-win32.whl", hash = "sha256:f80bc7d47f76089633763f952e67f8214cb7b3ee6bfa489b3cb6a84cfac114cd"},
{file = "msgpack-1.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:4d1b7ff2d6146e16e8bd665ac726a89c74163ef8cd39fa8c1087d4e52d3a2325"},
{file = "msgpack-1.1.0.tar.gz", hash = "sha256:dd432ccc2c72b914e4cb77afce64aab761c1137cc698be3984eee260bcb2896e"},
]
[[package]]
name = "mypy"
version = "1.11.2"
description = "Optional static typing for Python"
optional = false
python-versions = ">=3.8"
files = [
{file = "mypy-1.11.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d42a6dd818ffce7be66cce644f1dff482f1d97c53ca70908dff0b9ddc120b77a"},
{file = "mypy-1.11.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:801780c56d1cdb896eacd5619a83e427ce436d86a3bdf9112527f24a66618fef"},
{file = "mypy-1.11.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:41ea707d036a5307ac674ea172875f40c9d55c5394f888b168033177fce47383"},
{file = "mypy-1.11.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6e658bd2d20565ea86da7d91331b0eed6d2eee22dc031579e6297f3e12c758c8"},
{file = "mypy-1.11.2-cp310-cp310-win_amd64.whl", hash = "sha256:478db5f5036817fe45adb7332d927daa62417159d49783041338921dcf646fc7"},
{file = "mypy-1.11.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:75746e06d5fa1e91bfd5432448d00d34593b52e7e91a187d981d08d1f33d4385"},
{file = "mypy-1.11.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a976775ab2256aadc6add633d44f100a2517d2388906ec4f13231fafbb0eccca"},
{file = "mypy-1.11.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:cd953f221ac1379050a8a646585a29574488974f79d8082cedef62744f0a0104"},
{file = "mypy-1.11.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:57555a7715c0a34421013144a33d280e73c08df70f3a18a552938587ce9274f4"},
{file = "mypy-1.11.2-cp311-cp311-win_amd64.whl", hash = "sha256:36383a4fcbad95f2657642a07ba22ff797de26277158f1cc7bd234821468b1b6"},
{file = "mypy-1.11.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:e8960dbbbf36906c5c0b7f4fbf2f0c7ffb20f4898e6a879fcf56a41a08b0d318"},
{file = "mypy-1.11.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:06d26c277962f3fb50e13044674aa10553981ae514288cb7d0a738f495550b36"},
{file = "mypy-1.11.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6e7184632d89d677973a14d00ae4d03214c8bc301ceefcdaf5c474866814c987"},
{file = "mypy-1.11.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:3a66169b92452f72117e2da3a576087025449018afc2d8e9bfe5ffab865709ca"},
{file = "mypy-1.11.2-cp312-cp312-win_amd64.whl", hash = "sha256:969ea3ef09617aff826885a22ece0ddef69d95852cdad2f60c8bb06bf1f71f70"},
{file = "mypy-1.11.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:37c7fa6121c1cdfcaac97ce3d3b5588e847aa79b580c1e922bb5d5d2902df19b"},
{file = "mypy-1.11.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4a8a53bc3ffbd161b5b2a4fff2f0f1e23a33b0168f1c0778ec70e1a3d66deb86"},
{file = "mypy-1.11.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2ff93107f01968ed834f4256bc1fc4475e2fecf6c661260066a985b52741ddce"},
{file = "mypy-1.11.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:edb91dded4df17eae4537668b23f0ff6baf3707683734b6a818d5b9d0c0c31a1"},
{file = "mypy-1.11.2-cp38-cp38-win_amd64.whl", hash = "sha256:ee23de8530d99b6db0573c4ef4bd8f39a2a6f9b60655bf7a1357e585a3486f2b"},
{file = "mypy-1.11.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:801ca29f43d5acce85f8e999b1e431fb479cb02d0e11deb7d2abb56bdaf24fd6"},
{file = "mypy-1.11.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:af8d155170fcf87a2afb55b35dc1a0ac21df4431e7d96717621962e4b9192e70"},
{file = "mypy-1.11.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f7821776e5c4286b6a13138cc935e2e9b6fde05e081bdebf5cdb2bb97c9df81d"},
{file = "mypy-1.11.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:539c570477a96a4e6fb718b8d5c3e0c0eba1f485df13f86d2970c91f0673148d"},
{file = "mypy-1.11.2-cp39-cp39-win_amd64.whl", hash = "sha256:3f14cd3d386ac4d05c5a39a51b84387403dadbd936e17cb35882134d4f8f0d24"},
{file = "mypy-1.11.2-py3-none-any.whl", hash = "sha256:b499bc07dbdcd3de92b0a8b29fdf592c111276f6a12fe29c30f6c417dd546d12"},
{file = "mypy-1.11.2.tar.gz", hash = "sha256:7f9993ad3e0ffdc95c2a14b66dee63729f021968bff8ad911867579c65d13a79"},
]
[package.dependencies]
mypy-extensions = ">=1.0.0"
tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""}
typing-extensions = ">=4.6.0"
[package.extras]
dmypy = ["psutil (>=4.0)"]
install-types = ["pip"]
mypyc = ["setuptools (>=50)"]
reports = ["lxml"]
[[package]]
name = "mypy-extensions"
version = "1.0.0"
description = "Type system extensions for programs checked with the mypy type checker."
optional = false
python-versions = ">=3.5"
files = [
{file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"},
{file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"},
]
[[package]]
name = "orjson"
version = "3.10.6"
description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy"
optional = false
python-versions = ">=3.8"
files = [
{file = "orjson-3.10.6-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:fb0ee33124db6eaa517d00890fc1a55c3bfe1cf78ba4a8899d71a06f2d6ff5c7"},
{file = "orjson-3.10.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c1c4b53b24a4c06547ce43e5fee6ec4e0d8fe2d597f4647fc033fd205707365"},
{file = "orjson-3.10.6-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eadc8fd310edb4bdbd333374f2c8fec6794bbbae99b592f448d8214a5e4050c0"},
{file = "orjson-3.10.6-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:61272a5aec2b2661f4fa2b37c907ce9701e821b2c1285d5c3ab0207ebd358d38"},
{file = "orjson-3.10.6-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:57985ee7e91d6214c837936dc1608f40f330a6b88bb13f5a57ce5257807da143"},
{file = "orjson-3.10.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:633a3b31d9d7c9f02d49c4ab4d0a86065c4a6f6adc297d63d272e043472acab5"},
{file = "orjson-3.10.6-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:1c680b269d33ec444afe2bdc647c9eb73166fa47a16d9a75ee56a374f4a45f43"},
{file = "orjson-3.10.6-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f759503a97a6ace19e55461395ab0d618b5a117e8d0fbb20e70cfd68a47327f2"},
{file = "orjson-3.10.6-cp310-none-win32.whl", hash = "sha256:95a0cce17f969fb5391762e5719575217bd10ac5a189d1979442ee54456393f3"},
{file = "orjson-3.10.6-cp310-none-win_amd64.whl", hash = "sha256:df25d9271270ba2133cc88ee83c318372bdc0f2cd6f32e7a450809a111efc45c"},
{file = "orjson-3.10.6-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:b1ec490e10d2a77c345def52599311849fc063ae0e67cf4f84528073152bb2ba"},
{file = "orjson-3.10.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:55d43d3feb8f19d07e9f01e5b9be4f28801cf7c60d0fa0d279951b18fae1932b"},
{file = "orjson-3.10.6-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ac3045267e98fe749408eee1593a142e02357c5c99be0802185ef2170086a863"},
{file = "orjson-3.10.6-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c27bc6a28ae95923350ab382c57113abd38f3928af3c80be6f2ba7eb8d8db0b0"},
{file = "orjson-3.10.6-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d27456491ca79532d11e507cadca37fb8c9324a3976294f68fb1eff2dc6ced5a"},
{file = "orjson-3.10.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:05ac3d3916023745aa3b3b388e91b9166be1ca02b7c7e41045da6d12985685f0"},
{file = "orjson-3.10.6-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1335d4ef59ab85cab66fe73fd7a4e881c298ee7f63ede918b7faa1b27cbe5212"},
{file = "orjson-3.10.6-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:4bbc6d0af24c1575edc79994c20e1b29e6fb3c6a570371306db0993ecf144dc5"},
{file = "orjson-3.10.6-cp311-none-win32.whl", hash = "sha256:450e39ab1f7694465060a0550b3f6d328d20297bf2e06aa947b97c21e5241fbd"},
{file = "orjson-3.10.6-cp311-none-win_amd64.whl", hash = "sha256:227df19441372610b20e05bdb906e1742ec2ad7a66ac8350dcfd29a63014a83b"},
{file = "orjson-3.10.6-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:ea2977b21f8d5d9b758bb3f344a75e55ca78e3ff85595d248eee813ae23ecdfb"},
{file = "orjson-3.10.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b6f3d167d13a16ed263b52dbfedff52c962bfd3d270b46b7518365bcc2121eed"},
{file = "orjson-3.10.6-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f710f346e4c44a4e8bdf23daa974faede58f83334289df80bc9cd12fe82573c7"},
{file = "orjson-3.10.6-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7275664f84e027dcb1ad5200b8b18373e9c669b2a9ec33d410c40f5ccf4b257e"},
{file = "orjson-3.10.6-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0943e4c701196b23c240b3d10ed8ecd674f03089198cf503105b474a4f77f21f"},
{file = "orjson-3.10.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:446dee5a491b5bc7d8f825d80d9637e7af43f86a331207b9c9610e2f93fee22a"},
{file = "orjson-3.10.6-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:64c81456d2a050d380786413786b057983892db105516639cb5d3ee3c7fd5148"},
{file = "orjson-3.10.6-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:960db0e31c4e52fa0fc3ecbaea5b2d3b58f379e32a95ae6b0ebeaa25b93dfd34"},
{file = "orjson-3.10.6-cp312-none-win32.whl", hash = "sha256:a6ea7afb5b30b2317e0bee03c8d34c8181bc5a36f2afd4d0952f378972c4efd5"},
{file = "orjson-3.10.6-cp312-none-win_amd64.whl", hash = "sha256:874ce88264b7e655dde4aeaacdc8fd772a7962faadfb41abe63e2a4861abc3dc"},
{file = "orjson-3.10.6-cp313-none-win32.whl", hash = "sha256:efdf2c5cde290ae6b83095f03119bdc00303d7a03b42b16c54517baa3c4ca3d0"},
{file = "orjson-3.10.6-cp313-none-win_amd64.whl", hash = "sha256:8e190fe7888e2e4392f52cafb9626113ba135ef53aacc65cd13109eb9746c43e"},
{file = "orjson-3.10.6-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:66680eae4c4e7fc193d91cfc1353ad6d01b4801ae9b5314f17e11ba55e934183"},
{file = "orjson-3.10.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:caff75b425db5ef8e8f23af93c80f072f97b4fb3afd4af44482905c9f588da28"},
{file = "orjson-3.10.6-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3722fddb821b6036fd2a3c814f6bd9b57a89dc6337b9924ecd614ebce3271394"},
{file = "orjson-3.10.6-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c2c116072a8533f2fec435fde4d134610f806bdac20188c7bd2081f3e9e0133f"},
{file = "orjson-3.10.6-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6eeb13218c8cf34c61912e9df2de2853f1d009de0e46ea09ccdf3d757896af0a"},
{file = "orjson-3.10.6-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:965a916373382674e323c957d560b953d81d7a8603fbeee26f7b8248638bd48b"},
{file = "orjson-3.10.6-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:03c95484d53ed8e479cade8628c9cea00fd9d67f5554764a1110e0d5aa2de96e"},
{file = "orjson-3.10.6-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:e060748a04cccf1e0a6f2358dffea9c080b849a4a68c28b1b907f272b5127e9b"},
{file = "orjson-3.10.6-cp38-none-win32.whl", hash = "sha256:738dbe3ef909c4b019d69afc19caf6b5ed0e2f1c786b5d6215fbb7539246e4c6"},
{file = "orjson-3.10.6-cp38-none-win_amd64.whl", hash = "sha256:d40f839dddf6a7d77114fe6b8a70218556408c71d4d6e29413bb5f150a692ff7"},
{file = "orjson-3.10.6-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:697a35a083c4f834807a6232b3e62c8b280f7a44ad0b759fd4dce748951e70db"},
{file = "orjson-3.10.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fd502f96bf5ea9a61cbc0b2b5900d0dd68aa0da197179042bdd2be67e51a1e4b"},
{file = "orjson-3.10.6-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f215789fb1667cdc874c1b8af6a84dc939fd802bf293a8334fce185c79cd359b"},
{file = "orjson-3.10.6-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a2debd8ddce948a8c0938c8c93ade191d2f4ba4649a54302a7da905a81f00b56"},
{file = "orjson-3.10.6-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5410111d7b6681d4b0d65e0f58a13be588d01b473822483f77f513c7f93bd3b2"},
{file = "orjson-3.10.6-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bb1f28a137337fdc18384079fa5726810681055b32b92253fa15ae5656e1dddb"},
{file = "orjson-3.10.6-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:bf2fbbce5fe7cd1aa177ea3eab2b8e6a6bc6e8592e4279ed3db2d62e57c0e1b2"},
{file = "orjson-3.10.6-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:79b9b9e33bd4c517445a62b90ca0cc279b0f1f3970655c3df9e608bc3f91741a"},
{file = "orjson-3.10.6-cp39-none-win32.whl", hash = "sha256:30b0a09a2014e621b1adf66a4f705f0809358350a757508ee80209b2d8dae219"},
{file = "orjson-3.10.6-cp39-none-win_amd64.whl", hash = "sha256:49e3bc615652617d463069f91b867a4458114c5b104e13b7ae6872e5f79d0844"},
{file = "orjson-3.10.6.tar.gz", hash = "sha256:e54b63d0a7c6c54a5f5f726bc93a2078111ef060fec4ecbf34c5db800ca3b3a7"},
]
[[package]]
name = "packaging"
version = "24.1"
description = "Core utilities for Python packages"
optional = false
python-versions = ">=3.8"
files = [
{file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"},
{file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"},
]
[[package]]
name = "pluggy"
version = "1.5.0"
description = "plugin and hook calling mechanisms for python"
optional = false
python-versions = ">=3.8"
files = [
{file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"},
{file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"},
]
[package.extras]
dev = ["pre-commit", "tox"]
testing = ["pytest", "pytest-benchmark"]
[[package]]
name = "pydantic"
version = "2.8.2"
description = "Data validation using Python type hints"
optional = false
python-versions = ">=3.8"
files = [
{file = "pydantic-2.8.2-py3-none-any.whl", hash = "sha256:73ee9fddd406dc318b885c7a2eab8a6472b68b8fb5ba8150949fc3db939f23c8"},
{file = "pydantic-2.8.2.tar.gz", hash = "sha256:6f62c13d067b0755ad1c21a34bdd06c0c12625a22b0fc09c6b149816604f7c2a"},
]
[package.dependencies]
annotated-types = ">=0.4.0"
pydantic-core = "2.20.1"
typing-extensions = [
{version = ">=4.6.1", markers = "python_version < \"3.13\""},
{version = ">=4.12.2", markers = "python_version >= \"3.13\""},
]
[package.extras]
email = ["email-validator (>=2.0.0)"]
[[package]]
name = "pydantic-core"
version = "2.20.1"
description = "Core functionality for Pydantic validation and serialization"
optional = false
python-versions = ">=3.8"
files = [
{file = "pydantic_core-2.20.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:3acae97ffd19bf091c72df4d726d552c473f3576409b2a7ca36b2f535ffff4a3"},
{file = "pydantic_core-2.20.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:41f4c96227a67a013e7de5ff8f20fb496ce573893b7f4f2707d065907bffdbd6"},
{file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f239eb799a2081495ea659d8d4a43a8f42cd1fe9ff2e7e436295c38a10c286a"},
{file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:53e431da3fc53360db73eedf6f7124d1076e1b4ee4276b36fb25514544ceb4a3"},
{file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f1f62b2413c3a0e846c3b838b2ecd6c7a19ec6793b2a522745b0869e37ab5bc1"},
{file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d41e6daee2813ecceea8eda38062d69e280b39df793f5a942fa515b8ed67953"},
{file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d482efec8b7dc6bfaedc0f166b2ce349df0011f5d2f1f25537ced4cfc34fd98"},
{file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e93e1a4b4b33daed65d781a57a522ff153dcf748dee70b40c7258c5861e1768a"},
{file = "pydantic_core-2.20.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e7c4ea22b6739b162c9ecaaa41d718dfad48a244909fe7ef4b54c0b530effc5a"},
{file = "pydantic_core-2.20.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4f2790949cf385d985a31984907fecb3896999329103df4e4983a4a41e13e840"},
{file = "pydantic_core-2.20.1-cp310-none-win32.whl", hash = "sha256:5e999ba8dd90e93d57410c5e67ebb67ffcaadcea0ad973240fdfd3a135506250"},
{file = "pydantic_core-2.20.1-cp310-none-win_amd64.whl", hash = "sha256:512ecfbefef6dac7bc5eaaf46177b2de58cdf7acac8793fe033b24ece0b9566c"},
{file = "pydantic_core-2.20.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d2a8fa9d6d6f891f3deec72f5cc668e6f66b188ab14bb1ab52422fe8e644f312"},
{file = "pydantic_core-2.20.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:175873691124f3d0da55aeea1d90660a6ea7a3cfea137c38afa0a5ffabe37b88"},
{file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37eee5b638f0e0dcd18d21f59b679686bbd18917b87db0193ae36f9c23c355fc"},
{file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:25e9185e2d06c16ee438ed39bf62935ec436474a6ac4f9358524220f1b236e43"},
{file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:150906b40ff188a3260cbee25380e7494ee85048584998c1e66df0c7a11c17a6"},
{file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ad4aeb3e9a97286573c03df758fc7627aecdd02f1da04516a86dc159bf70121"},
{file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d3f3ed29cd9f978c604708511a1f9c2fdcb6c38b9aae36a51905b8811ee5cbf1"},
{file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b0dae11d8f5ded51699c74d9548dcc5938e0804cc8298ec0aa0da95c21fff57b"},
{file = "pydantic_core-2.20.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:faa6b09ee09433b87992fb5a2859efd1c264ddc37280d2dd5db502126d0e7f27"},
{file = "pydantic_core-2.20.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9dc1b507c12eb0481d071f3c1808f0529ad41dc415d0ca11f7ebfc666e66a18b"},
{file = "pydantic_core-2.20.1-cp311-none-win32.whl", hash = "sha256:fa2fddcb7107e0d1808086ca306dcade7df60a13a6c347a7acf1ec139aa6789a"},
{file = "pydantic_core-2.20.1-cp311-none-win_amd64.whl", hash = "sha256:40a783fb7ee353c50bd3853e626f15677ea527ae556429453685ae32280c19c2"},
{file = "pydantic_core-2.20.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:595ba5be69b35777474fa07f80fc260ea71255656191adb22a8c53aba4479231"},
{file = "pydantic_core-2.20.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a4f55095ad087474999ee28d3398bae183a66be4823f753cd7d67dd0153427c9"},
{file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f9aa05d09ecf4c75157197f27cdc9cfaeb7c5f15021c6373932bf3e124af029f"},
{file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e97fdf088d4b31ff4ba35db26d9cc472ac7ef4a2ff2badeabf8d727b3377fc52"},
{file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bc633a9fe1eb87e250b5c57d389cf28998e4292336926b0b6cdaee353f89a237"},
{file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d573faf8eb7e6b1cbbcb4f5b247c60ca8be39fe2c674495df0eb4318303137fe"},
{file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26dc97754b57d2fd00ac2b24dfa341abffc380b823211994c4efac7f13b9e90e"},
{file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:33499e85e739a4b60c9dac710c20a08dc73cb3240c9a0e22325e671b27b70d24"},
{file = "pydantic_core-2.20.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:bebb4d6715c814597f85297c332297c6ce81e29436125ca59d1159b07f423eb1"},
{file = "pydantic_core-2.20.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:516d9227919612425c8ef1c9b869bbbee249bc91912c8aaffb66116c0b447ebd"},
{file = "pydantic_core-2.20.1-cp312-none-win32.whl", hash = "sha256:469f29f9093c9d834432034d33f5fe45699e664f12a13bf38c04967ce233d688"},
{file = "pydantic_core-2.20.1-cp312-none-win_amd64.whl", hash = "sha256:035ede2e16da7281041f0e626459bcae33ed998cca6a0a007a5ebb73414ac72d"},
{file = "pydantic_core-2.20.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:0827505a5c87e8aa285dc31e9ec7f4a17c81a813d45f70b1d9164e03a813a686"},
{file = "pydantic_core-2.20.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:19c0fa39fa154e7e0b7f82f88ef85faa2a4c23cc65aae2f5aea625e3c13c735a"},
{file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa223cd1e36b642092c326d694d8bf59b71ddddc94cdb752bbbb1c5c91d833b"},
{file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c336a6d235522a62fef872c6295a42ecb0c4e1d0f1a3e500fe949415761b8a19"},
{file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7eb6a0587eded33aeefea9f916899d42b1799b7b14b8f8ff2753c0ac1741edac"},
{file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:70c8daf4faca8da5a6d655f9af86faf6ec2e1768f4b8b9d0226c02f3d6209703"},
{file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e9fa4c9bf273ca41f940bceb86922a7667cd5bf90e95dbb157cbb8441008482c"},
{file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:11b71d67b4725e7e2a9f6e9c0ac1239bbc0c48cce3dc59f98635efc57d6dac83"},
{file = "pydantic_core-2.20.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:270755f15174fb983890c49881e93f8f1b80f0b5e3a3cc1394a255706cabd203"},
{file = "pydantic_core-2.20.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:c81131869240e3e568916ef4c307f8b99583efaa60a8112ef27a366eefba8ef0"},
{file = "pydantic_core-2.20.1-cp313-none-win32.whl", hash = "sha256:b91ced227c41aa29c672814f50dbb05ec93536abf8f43cd14ec9521ea09afe4e"},
{file = "pydantic_core-2.20.1-cp313-none-win_amd64.whl", hash = "sha256:65db0f2eefcaad1a3950f498aabb4875c8890438bc80b19362cf633b87a8ab20"},
{file = "pydantic_core-2.20.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:4745f4ac52cc6686390c40eaa01d48b18997cb130833154801a442323cc78f91"},
{file = "pydantic_core-2.20.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a8ad4c766d3f33ba8fd692f9aa297c9058970530a32c728a2c4bfd2616d3358b"},
{file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41e81317dd6a0127cabce83c0c9c3fbecceae981c8391e6f1dec88a77c8a569a"},
{file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:04024d270cf63f586ad41fff13fde4311c4fc13ea74676962c876d9577bcc78f"},
{file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eaad4ff2de1c3823fddf82f41121bdf453d922e9a238642b1dedb33c4e4f98ad"},
{file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:26ab812fa0c845df815e506be30337e2df27e88399b985d0bb4e3ecfe72df31c"},
{file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c5ebac750d9d5f2706654c638c041635c385596caf68f81342011ddfa1e5598"},
{file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2aafc5a503855ea5885559eae883978c9b6d8c8993d67766ee73d82e841300dd"},
{file = "pydantic_core-2.20.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:4868f6bd7c9d98904b748a2653031fc9c2f85b6237009d475b1008bfaeb0a5aa"},
{file = "pydantic_core-2.20.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:aa2f457b4af386254372dfa78a2eda2563680d982422641a85f271c859df1987"},
{file = "pydantic_core-2.20.1-cp38-none-win32.whl", hash = "sha256:225b67a1f6d602de0ce7f6c1c3ae89a4aa25d3de9be857999e9124f15dab486a"},
{file = "pydantic_core-2.20.1-cp38-none-win_amd64.whl", hash = "sha256:6b507132dcfc0dea440cce23ee2182c0ce7aba7054576efc65634f080dbe9434"},
{file = "pydantic_core-2.20.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:b03f7941783b4c4a26051846dea594628b38f6940a2fdc0df00b221aed39314c"},
{file = "pydantic_core-2.20.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1eedfeb6089ed3fad42e81a67755846ad4dcc14d73698c120a82e4ccf0f1f9f6"},
{file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:635fee4e041ab9c479e31edda27fcf966ea9614fff1317e280d99eb3e5ab6fe2"},
{file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:77bf3ac639c1ff567ae3b47f8d4cc3dc20f9966a2a6dd2311dcc055d3d04fb8a"},
{file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ed1b0132f24beeec5a78b67d9388656d03e6a7c837394f99257e2d55b461611"},
{file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c6514f963b023aeee506678a1cf821fe31159b925c4b76fe2afa94cc70b3222b"},
{file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10d4204d8ca33146e761c79f83cc861df20e7ae9f6487ca290a97702daf56006"},
{file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2d036c7187b9422ae5b262badb87a20a49eb6c5238b2004e96d4da1231badef1"},
{file = "pydantic_core-2.20.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9ebfef07dbe1d93efb94b4700f2d278494e9162565a54f124c404a5656d7ff09"},
{file = "pydantic_core-2.20.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6b9d9bb600328a1ce523ab4f454859e9d439150abb0906c5a1983c146580ebab"},
{file = "pydantic_core-2.20.1-cp39-none-win32.whl", hash = "sha256:784c1214cb6dd1e3b15dd8b91b9a53852aed16671cc3fbe4786f4f1db07089e2"},
{file = "pydantic_core-2.20.1-cp39-none-win_amd64.whl", hash = "sha256:d2fe69c5434391727efa54b47a1e7986bb0186e72a41b203df8f5b0a19a4f669"},
{file = "pydantic_core-2.20.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a45f84b09ac9c3d35dfcf6a27fd0634d30d183205230a0ebe8373a0e8cfa0906"},
{file = "pydantic_core-2.20.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d02a72df14dfdbaf228424573a07af10637bd490f0901cee872c4f434a735b94"},
{file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d2b27e6af28f07e2f195552b37d7d66b150adbaa39a6d327766ffd695799780f"},
{file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:084659fac3c83fd674596612aeff6041a18402f1e1bc19ca39e417d554468482"},
{file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:242b8feb3c493ab78be289c034a1f659e8826e2233786e36f2893a950a719bb6"},
{file = "pydantic_core-2.20.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:38cf1c40a921d05c5edc61a785c0ddb4bed67827069f535d794ce6bcded919fc"},
{file = "pydantic_core-2.20.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e0bbdd76ce9aa5d4209d65f2b27fc6e5ef1312ae6c5333c26db3f5ade53a1e99"},
{file = "pydantic_core-2.20.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:254ec27fdb5b1ee60684f91683be95e5133c994cc54e86a0b0963afa25c8f8a6"},
{file = "pydantic_core-2.20.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:407653af5617f0757261ae249d3fba09504d7a71ab36ac057c938572d1bc9331"},
{file = "pydantic_core-2.20.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:c693e916709c2465b02ca0ad7b387c4f8423d1db7b4649c551f27a529181c5ad"},
{file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5b5ff4911aea936a47d9376fd3ab17e970cc543d1b68921886e7f64bd28308d1"},
{file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:177f55a886d74f1808763976ac4efd29b7ed15c69f4d838bbd74d9d09cf6fa86"},
{file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:964faa8a861d2664f0c7ab0c181af0bea66098b1919439815ca8803ef136fc4e"},
{file = "pydantic_core-2.20.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:4dd484681c15e6b9a977c785a345d3e378d72678fd5f1f3c0509608da24f2ac0"},
{file = "pydantic_core-2.20.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f6d6cff3538391e8486a431569b77921adfcdef14eb18fbf19b7c0a5294d4e6a"},
{file = "pydantic_core-2.20.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a6d511cc297ff0883bc3708b465ff82d7560193169a8b93260f74ecb0a5e08a7"},
{file = "pydantic_core-2.20.1.tar.gz", hash = "sha256:26ca695eeee5f9f1aeeb211ffc12f10bcb6f71e2989988fda61dabd65db878d4"},
]
[package.dependencies]
typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0"
[[package]]
name = "pytest"
version = "7.4.4"
description = "pytest: simple powerful testing with Python"
optional = false
python-versions = ">=3.7"
files = [
{file = "pytest-7.4.4-py3-none-any.whl", hash = "sha256:b090cdf5ed60bf4c45261be03239c2c1c22df034fbffe691abe93cd80cea01d8"},
{file = "pytest-7.4.4.tar.gz", hash = "sha256:2cf0005922c6ace4a3e2ec8b4080eb0d9753fdc93107415332f50ce9e7994280"},
]
[package.dependencies]
colorama = {version = "*", markers = "sys_platform == \"win32\""}
exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""}
iniconfig = "*"
packaging = "*"
pluggy = ">=0.12,<2.0"
tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""}
[package.extras]
testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"]
[[package]]
name = "pytest-asyncio"
version = "0.21.2"
description = "Pytest support for asyncio"
optional = false
python-versions = ">=3.7"
files = [
{file = "pytest_asyncio-0.21.2-py3-none-any.whl", hash = "sha256:ab664c88bb7998f711d8039cacd4884da6430886ae8bbd4eded552ed2004f16b"},
{file = "pytest_asyncio-0.21.2.tar.gz", hash = "sha256:d67738fc232b94b326b9d060750beb16e0074210b98dd8b58a5239fa2a154f45"},
]
[package.dependencies]
pytest = ">=7.0.0"
[package.extras]
docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1.0)"]
testing = ["coverage (>=6.2)", "flaky (>=3.5.0)", "hypothesis (>=5.7.1)", "mypy (>=0.931)", "pytest-trio (>=0.7.0)"]
[[package]]
name = "pytest-mock"
version = "3.14.0"
description = "Thin-wrapper around the mock package for easier use with pytest"
optional = false
python-versions = ">=3.8"
files = [
{file = "pytest-mock-3.14.0.tar.gz", hash = "sha256:2719255a1efeceadbc056d6bf3df3d1c5015530fb40cf347c0f9afac88410bd0"},
{file = "pytest_mock-3.14.0-py3-none-any.whl", hash = "sha256:0b72c38033392a5f4621342fe11e9219ac11ec9d375f8e2a0c164539e0d70f6f"},
]
[package.dependencies]
pytest = ">=6.2.5"
[package.extras]
dev = ["pre-commit", "pytest-asyncio", "tox"]
[[package]]
name = "pytest-watch"
version = "4.2.0"
description = "Local continuous test runner with pytest and watchdog."
optional = false
python-versions = "*"
files = [
{file = "pytest-watch-4.2.0.tar.gz", hash = "sha256:06136f03d5b361718b8d0d234042f7b2f203910d8568f63df2f866b547b3d4b9"},
]
[package.dependencies]
colorama = ">=0.3.3"
docopt = ">=0.4.0"
pytest = ">=2.6.4"
watchdog = ">=0.6.0"
[[package]]
name = "pyyaml"
version = "6.0.1"
description = "YAML parser and emitter for Python"
optional = false
python-versions = ">=3.6"
files = [
{file = "PyYAML-6.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a"},
{file = "PyYAML-6.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f"},
{file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"},
{file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"},
{file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"},
{file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"},
{file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"},
{file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"},
{file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"},
{file = "PyYAML-6.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab"},
{file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"},
{file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"},
{file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"},
{file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"},
{file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"},
{file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"},
{file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"},
{file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"},
{file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"},
{file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"},
{file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"},
{file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"},
{file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"},
{file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"},
{file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"},
{file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"},
{file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd"},
{file = "PyYAML-6.0.1-cp36-cp36m-win32.whl", hash = "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585"},
{file = "PyYAML-6.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa"},
{file = "PyYAML-6.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3"},
{file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27"},
{file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3"},
{file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c"},
{file = "PyYAML-6.0.1-cp37-cp37m-win32.whl", hash = "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba"},
{file = "PyYAML-6.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867"},
{file = "PyYAML-6.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595"},
{file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"},
{file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"},
{file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"},
{file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"},
{file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"},
{file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"},
{file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"},
{file = "PyYAML-6.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859"},
{file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"},
{file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"},
{file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"},
{file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"},
{file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"},
{file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"},
{file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"},
]
[[package]]
name = "requests"
version = "2.32.3"
description = "Python HTTP for Humans."
optional = false
python-versions = ">=3.8"
files = [
{file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"},
{file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"},
]
[package.dependencies]
certifi = ">=2017.4.17"
charset-normalizer = ">=2,<4"
idna = ">=2.5,<4"
urllib3 = ">=1.21.1,<3"
[package.extras]
socks = ["PySocks (>=1.5.6,!=1.5.7)"]
use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"]
[[package]]
name = "ruff"
version = "0.6.2"
description = "An extremely fast Python linter and code formatter, written in Rust."
optional = false
python-versions = ">=3.7"
files = [
{file = "ruff-0.6.2-py3-none-linux_armv6l.whl", hash = "sha256:5c8cbc6252deb3ea840ad6a20b0f8583caab0c5ef4f9cca21adc5a92b8f79f3c"},
{file = "ruff-0.6.2-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:17002fe241e76544448a8e1e6118abecbe8cd10cf68fde635dad480dba594570"},
{file = "ruff-0.6.2-py3-none-macosx_11_0_arm64.whl", hash = "sha256:3dbeac76ed13456f8158b8f4fe087bf87882e645c8e8b606dd17b0b66c2c1158"},
{file = "ruff-0.6.2-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:094600ee88cda325988d3f54e3588c46de5c18dae09d683ace278b11f9d4d534"},
{file = "ruff-0.6.2-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:316d418fe258c036ba05fbf7dfc1f7d3d4096db63431546163b472285668132b"},
{file = "ruff-0.6.2-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d72b8b3abf8a2d51b7b9944a41307d2f442558ccb3859bbd87e6ae9be1694a5d"},
{file = "ruff-0.6.2-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:2aed7e243be68487aa8982e91c6e260982d00da3f38955873aecd5a9204b1d66"},
{file = "ruff-0.6.2-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d371f7fc9cec83497fe7cf5eaf5b76e22a8efce463de5f775a1826197feb9df8"},
{file = "ruff-0.6.2-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8f310d63af08f583363dfb844ba8f9417b558199c58a5999215082036d795a1"},
{file = "ruff-0.6.2-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7db6880c53c56addb8638fe444818183385ec85eeada1d48fc5abe045301b2f1"},
{file = "ruff-0.6.2-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:1175d39faadd9a50718f478d23bfc1d4da5743f1ab56af81a2b6caf0a2394f23"},
{file = "ruff-0.6.2-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:5b939f9c86d51635fe486585389f54582f0d65b8238e08c327c1534844b3bb9a"},
{file = "ruff-0.6.2-py3-none-musllinux_1_2_i686.whl", hash = "sha256:d0d62ca91219f906caf9b187dea50d17353f15ec9bb15aae4a606cd697b49b4c"},
{file = "ruff-0.6.2-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:7438a7288f9d67ed3c8ce4d059e67f7ed65e9fe3aa2ab6f5b4b3610e57e3cb56"},
{file = "ruff-0.6.2-py3-none-win32.whl", hash = "sha256:279d5f7d86696df5f9549b56b9b6a7f6c72961b619022b5b7999b15db392a4da"},
{file = "ruff-0.6.2-py3-none-win_amd64.whl", hash = "sha256:d9f3469c7dd43cd22eb1c3fc16926fb8258d50cb1b216658a07be95dd117b0f2"},
{file = "ruff-0.6.2-py3-none-win_arm64.whl", hash = "sha256:f28fcd2cd0e02bdf739297516d5643a945cc7caf09bd9bcb4d932540a5ea4fa9"},
{file = "ruff-0.6.2.tar.gz", hash = "sha256:239ee6beb9e91feb8e0ec384204a763f36cb53fb895a1a364618c6abb076b3be"},
]
[[package]]
name = "sniffio"
version = "1.3.1"
description = "Sniff out which async library your code is running under"
optional = false
python-versions = ">=3.7"
files = [
{file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"},
{file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"},
]
[[package]]
name = "tenacity"
version = "8.5.0"
description = "Retry code until it succeeds"
optional = false
python-versions = ">=3.8"
files = [
{file = "tenacity-8.5.0-py3-none-any.whl", hash = "sha256:b594c2a5945830c267ce6b79a166228323ed52718f30302c1359836112346687"},
{file = "tenacity-8.5.0.tar.gz", hash = "sha256:8bc6c0c8a09b31e6cad13c47afbed1a567518250a9a171418582ed8d9c20ca78"},
]
[package.extras]
doc = ["reno", "sphinx"]
test = ["pytest", "tornado (>=4.5)", "typeguard"]
[[package]]
name = "tomli"
version = "2.0.1"
description = "A lil' TOML parser"
optional = false
python-versions = ">=3.7"
files = [
{file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"},
{file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"},
]
[[package]]
name = "typing-extensions"
version = "4.12.2"
description = "Backported and Experimental Type Hints for Python 3.8+"
optional = false
python-versions = ">=3.8"
files = [
{file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"},
{file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"},
]
[[package]]
name = "urllib3"
version = "2.2.2"
description = "HTTP library with thread-safe connection pooling, file post, and more."
optional = false
python-versions = ">=3.8"
files = [
{file = "urllib3-2.2.2-py3-none-any.whl", hash = "sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472"},
{file = "urllib3-2.2.2.tar.gz", hash = "sha256:dd505485549a7a552833da5e6063639d0d177c04f23bc3864e41e5dc5f612168"},
]
[package.extras]
brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"]
h2 = ["h2 (>=4,<5)"]
socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"]
zstd = ["zstandard (>=0.18.0)"]
[[package]]
name = "watchdog"
version = "4.0.1"
description = "Filesystem events monitoring"
optional = false
python-versions = ">=3.8"
files = [
{file = "watchdog-4.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:da2dfdaa8006eb6a71051795856bedd97e5b03e57da96f98e375682c48850645"},
{file = "watchdog-4.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e93f451f2dfa433d97765ca2634628b789b49ba8b504fdde5837cdcf25fdb53b"},
{file = "watchdog-4.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ef0107bbb6a55f5be727cfc2ef945d5676b97bffb8425650dadbb184be9f9a2b"},
{file = "watchdog-4.0.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:17e32f147d8bf9657e0922c0940bcde863b894cd871dbb694beb6704cfbd2fb5"},
{file = "watchdog-4.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:03e70d2df2258fb6cb0e95bbdbe06c16e608af94a3ffbd2b90c3f1e83eb10767"},
{file = "watchdog-4.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:123587af84260c991dc5f62a6e7ef3d1c57dfddc99faacee508c71d287248459"},
{file = "watchdog-4.0.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:093b23e6906a8b97051191a4a0c73a77ecc958121d42346274c6af6520dec175"},
{file = "watchdog-4.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:611be3904f9843f0529c35a3ff3fd617449463cb4b73b1633950b3d97fa4bfb7"},
{file = "watchdog-4.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:62c613ad689ddcb11707f030e722fa929f322ef7e4f18f5335d2b73c61a85c28"},
{file = "watchdog-4.0.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:d4925e4bf7b9bddd1c3de13c9b8a2cdb89a468f640e66fbfabaf735bd85b3e35"},
{file = "watchdog-4.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cad0bbd66cd59fc474b4a4376bc5ac3fc698723510cbb64091c2a793b18654db"},
{file = "watchdog-4.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a3c2c317a8fb53e5b3d25790553796105501a235343f5d2bf23bb8649c2c8709"},
{file = "watchdog-4.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c9904904b6564d4ee8a1ed820db76185a3c96e05560c776c79a6ce5ab71888ba"},
{file = "watchdog-4.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:667f3c579e813fcbad1b784db7a1aaa96524bed53437e119f6a2f5de4db04235"},
{file = "watchdog-4.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d10a681c9a1d5a77e75c48a3b8e1a9f2ae2928eda463e8d33660437705659682"},
{file = "watchdog-4.0.1-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:0144c0ea9997b92615af1d94afc0c217e07ce2c14912c7b1a5731776329fcfc7"},
{file = "watchdog-4.0.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:998d2be6976a0ee3a81fb8e2777900c28641fb5bfbd0c84717d89bca0addcdc5"},
{file = "watchdog-4.0.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:e7921319fe4430b11278d924ef66d4daa469fafb1da679a2e48c935fa27af193"},
{file = "watchdog-4.0.1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:f0de0f284248ab40188f23380b03b59126d1479cd59940f2a34f8852db710625"},
{file = "watchdog-4.0.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:bca36be5707e81b9e6ce3208d92d95540d4ca244c006b61511753583c81c70dd"},
{file = "watchdog-4.0.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:ab998f567ebdf6b1da7dc1e5accfaa7c6992244629c0fdaef062f43249bd8dee"},
{file = "watchdog-4.0.1-py3-none-manylinux2014_aarch64.whl", hash = "sha256:dddba7ca1c807045323b6af4ff80f5ddc4d654c8bce8317dde1bd96b128ed253"},
{file = "watchdog-4.0.1-py3-none-manylinux2014_armv7l.whl", hash = "sha256:4513ec234c68b14d4161440e07f995f231be21a09329051e67a2118a7a612d2d"},
{file = "watchdog-4.0.1-py3-none-manylinux2014_i686.whl", hash = "sha256:4107ac5ab936a63952dea2a46a734a23230aa2f6f9db1291bf171dac3ebd53c6"},
{file = "watchdog-4.0.1-py3-none-manylinux2014_ppc64.whl", hash = "sha256:6e8c70d2cd745daec2a08734d9f63092b793ad97612470a0ee4cbb8f5f705c57"},
{file = "watchdog-4.0.1-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:f27279d060e2ab24c0aa98363ff906d2386aa6c4dc2f1a374655d4e02a6c5e5e"},
{file = "watchdog-4.0.1-py3-none-manylinux2014_s390x.whl", hash = "sha256:f8affdf3c0f0466e69f5b3917cdd042f89c8c63aebdb9f7c078996f607cdb0f5"},
{file = "watchdog-4.0.1-py3-none-manylinux2014_x86_64.whl", hash = "sha256:ac7041b385f04c047fcc2951dc001671dee1b7e0615cde772e84b01fbf68ee84"},
{file = "watchdog-4.0.1-py3-none-win32.whl", hash = "sha256:206afc3d964f9a233e6ad34618ec60b9837d0582b500b63687e34011e15bb429"},
{file = "watchdog-4.0.1-py3-none-win_amd64.whl", hash = "sha256:7577b3c43e5909623149f76b099ac49a1a01ca4e167d1785c76eb52fa585745a"},
{file = "watchdog-4.0.1-py3-none-win_ia64.whl", hash = "sha256:d7b9f5f3299e8dd230880b6c55504a1f69cf1e4316275d1b215ebdd8187ec88d"},
{file = "watchdog-4.0.1.tar.gz", hash = "sha256:eebaacf674fa25511e8867028d281e602ee6500045b57f43b08778082f7f8b44"},
]
[package.extras]
watchmedo = ["PyYAML (>=3.10)"]
[metadata]
lock-version = "2.0"
python-versions = "^3.9.0,<4.0"
content-hash = "c319b072af396b6f10fd6f75544816ea717741ed8b35ce675df65506c585f67e"
|
0 | lc_public_repos/langgraph/libs | lc_public_repos/langgraph/libs/checkpoint-duckdb/README.md | # LangGraph Checkpoint DuckDB
Implementation of LangGraph CheckpointSaver that uses DuckDB.
## Usage
> [!IMPORTANT]
> When using DuckDB checkpointers for the first time, make sure to call `.setup()` method on them to create required tables. See example below.
```python
from langgraph.checkpoint.duckdb import DuckDBSaver
write_config = {"configurable": {"thread_id": "1", "checkpoint_ns": ""}}
read_config = {"configurable": {"thread_id": "1"}}
with DuckDBSaver.from_conn_string(":memory:") as checkpointer:
# call .setup() the first time you're using the checkpointer
checkpointer.setup()
checkpoint = {
"v": 1,
"ts": "2024-07-31T20:14:19.804150+00:00",
"id": "1ef4f797-8335-6428-8001-8a1503f9b875",
"channel_values": {
"my_key": "meow",
"node": "node"
},
"channel_versions": {
"__start__": 2,
"my_key": 3,
"start:node": 3,
"node": 3
},
"versions_seen": {
"__input__": {},
"__start__": {
"__start__": 1
},
"node": {
"start:node": 2
}
},
"pending_sends": [],
}
# store checkpoint
checkpointer.put(write_config, checkpoint, {}, {})
# load checkpoint
checkpointer.get(read_config)
# list checkpoints
list(checkpointer.list(read_config))
```
### Async
```python
from langgraph.checkpoint.duckdb.aio import AsyncDuckDBSaver
async with AsyncDuckDBSaver.from_conn_string(":memory:") as checkpointer:
checkpoint = {
"v": 1,
"ts": "2024-07-31T20:14:19.804150+00:00",
"id": "1ef4f797-8335-6428-8001-8a1503f9b875",
"channel_values": {
"my_key": "meow",
"node": "node"
},
"channel_versions": {
"__start__": 2,
"my_key": 3,
"start:node": 3,
"node": 3
},
"versions_seen": {
"__input__": {},
"__start__": {
"__start__": 1
},
"node": {
"start:node": 2
}
},
"pending_sends": [],
}
# store checkpoint
await checkpointer.aput(write_config, checkpoint, {}, {})
# load checkpoint
await checkpointer.aget(read_config)
# list checkpoints
[c async for c in checkpointer.alist(read_config)]
```
|
0 | lc_public_repos/langgraph/libs | lc_public_repos/langgraph/libs/checkpoint-duckdb/pyproject.toml | [tool.poetry]
name = "langgraph-checkpoint-duckdb"
version = "2.0.1"
description = "Library with a DuckDB implementation of LangGraph checkpoint saver."
authors = []
license = "MIT"
readme = "README.md"
repository = "https://www.github.com/langchain-ai/langgraph"
packages = [{ include = "langgraph" }]
[tool.poetry.dependencies]
python = "^3.9.0,<4.0"
langgraph-checkpoint = "^2.0.2"
duckdb = ">=1.1.2"
[tool.poetry.group.dev.dependencies]
ruff = "^0.6.2"
codespell = "^2.2.0"
pytest = "^7.2.1"
anyio = "^4.4.0"
pytest-asyncio = "^0.21.1"
pytest-mock = "^3.11.1"
pytest-watch = "^4.2.0"
mypy = "^1.10.0"
langgraph-checkpoint = {path = "../checkpoint", develop = true}
[tool.pytest.ini_options]
# --strict-markers will raise errors on unknown marks.
# https://docs.pytest.org/en/7.1.x/how-to/mark.html#raising-errors-on-unknown-marks
#
# https://docs.pytest.org/en/7.1.x/reference/reference.html
# --strict-config any warnings encountered while parsing the `pytest`
# section of the configuration file raise errors.
addopts = "--strict-markers --strict-config --durations=5 -vv"
asyncio_mode = "auto"
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"
[tool.ruff]
lint.select = [
"E", # pycodestyle
"F", # Pyflakes
"UP", # pyupgrade
"B", # flake8-bugbear
"I", # isort
]
lint.ignore = ["E501", "B008", "UP007", "UP006"]
[tool.mypy]
# https://mypy.readthedocs.io/en/stable/config_file.html
disallow_untyped_defs = "True"
explicit_package_bases = "True"
warn_no_return = "False"
warn_unused_ignores = "True"
warn_redundant_casts = "True"
allow_redefinition = "True"
disable_error_code = "typeddict-item, return-value"
|
0 | lc_public_repos/langgraph/libs/checkpoint-duckdb | lc_public_repos/langgraph/libs/checkpoint-duckdb/tests/test_async_store.py | # type: ignore
import uuid
from datetime import datetime
from typing import Any
from unittest.mock import MagicMock
import pytest
from langgraph.store.base import GetOp, Item, ListNamespacesOp, PutOp, SearchOp
from langgraph.store.duckdb import AsyncDuckDBStore
class MockCursor:
def __init__(self, fetch_result: Any) -> None:
self.fetch_result = fetch_result
self.execute = MagicMock()
self.fetchall = MagicMock(return_value=self.fetch_result)
class MockConnection:
def __init__(self) -> None:
self.cursor = MagicMock()
@pytest.fixture
def mock_connection() -> MockConnection:
return MockConnection()
@pytest.fixture
async def store(mock_connection: MockConnection) -> AsyncDuckDBStore:
duck_db_store = AsyncDuckDBStore(mock_connection)
await duck_db_store.setup()
return duck_db_store
async def test_abatch_order(store: AsyncDuckDBStore) -> None:
mock_connection = store.conn
mock_get_cursor = MockCursor(
[
(
"test.foo",
"key1",
'{"data": "value1"}',
datetime.now(),
datetime.now(),
),
(
"test.bar",
"key2",
'{"data": "value2"}',
datetime.now(),
datetime.now(),
),
]
)
mock_search_cursor = MockCursor(
[
(
"test.foo",
"key1",
'{"data": "value1"}',
datetime.now(),
datetime.now(),
),
]
)
mock_list_namespaces_cursor = MockCursor(
[
("test",),
]
)
failures = []
def cursor_side_effect() -> Any:
cursor = MagicMock()
def execute_side_effect(query: str, *params: Any) -> None:
# My super sophisticated database.
if "WHERE prefix = ? AND key" in query:
cursor.fetchall = mock_get_cursor.fetchall
elif "SELECT prefix, key, value" in query:
cursor.fetchall = mock_search_cursor.fetchall
elif "SELECT DISTINCT ON (truncated_prefix)" in query:
cursor.fetchall = mock_list_namespaces_cursor.fetchall
elif "INSERT INTO " in query:
pass
else:
e = ValueError(f"Unmatched query: {query}")
failures.append(e)
raise e
cursor.execute = MagicMock(side_effect=execute_side_effect)
return cursor
mock_connection.cursor.side_effect = cursor_side_effect # type: ignore
ops = [
GetOp(namespace=("test",), key="key1"),
PutOp(namespace=("test",), key="key2", value={"data": "value2"}),
SearchOp(
namespace_prefix=("test",), filter={"data": "value1"}, limit=10, offset=0
),
ListNamespacesOp(match_conditions=None, max_depth=None, limit=10, offset=0),
GetOp(namespace=("test",), key="key3"),
]
results = await store.abatch(ops)
assert not failures
assert len(results) == 5
assert isinstance(results[0], Item)
assert isinstance(results[0].value, dict)
assert results[0].value == {"data": "value1"}
assert results[0].key == "key1"
assert results[1] is None
assert isinstance(results[2], list)
assert len(results[2]) == 1
assert isinstance(results[3], list)
assert results[3] == [("test",)]
assert results[4] is None
ops_reordered = [
SearchOp(namespace_prefix=("test",), filter=None, limit=5, offset=0),
GetOp(namespace=("test",), key="key2"),
ListNamespacesOp(match_conditions=None, max_depth=None, limit=5, offset=0),
PutOp(namespace=("test",), key="key3", value={"data": "value3"}),
GetOp(namespace=("test",), key="key1"),
]
results_reordered = await store.abatch(ops_reordered)
assert not failures
assert len(results_reordered) == 5
assert isinstance(results_reordered[0], list)
assert len(results_reordered[0]) == 1
assert isinstance(results_reordered[1], Item)
assert results_reordered[1].value == {"data": "value2"}
assert results_reordered[1].key == "key2"
assert isinstance(results_reordered[2], list)
assert results_reordered[2] == [("test",)]
assert results_reordered[3] is None
assert isinstance(results_reordered[4], Item)
assert results_reordered[4].value == {"data": "value1"}
assert results_reordered[4].key == "key1"
async def test_batch_get_ops(store: AsyncDuckDBStore) -> None:
mock_connection = store.conn
mock_cursor = MockCursor(
[
(
"test.foo",
"key1",
'{"data": "value1"}',
datetime.now(),
datetime.now(),
),
(
"test.bar",
"key2",
'{"data": "value2"}',
datetime.now(),
datetime.now(),
),
]
)
mock_connection.cursor.return_value = mock_cursor
ops = [
GetOp(namespace=("test",), key="key1"),
GetOp(namespace=("test",), key="key2"),
GetOp(namespace=("test",), key="key3"),
]
results = await store.abatch(ops)
assert len(results) == 3
assert results[0] is not None
assert results[1] is not None
assert results[2] is None
assert results[0].key == "key1"
assert results[1].key == "key2"
async def test_batch_put_ops(store: AsyncDuckDBStore) -> None:
mock_connection = store.conn
mock_cursor = MockCursor([])
mock_connection.cursor.return_value = mock_cursor
ops = [
PutOp(namespace=("test",), key="key1", value={"data": "value1"}),
PutOp(namespace=("test",), key="key2", value={"data": "value2"}),
PutOp(namespace=("test",), key="key3", value=None),
]
results = await store.abatch(ops)
assert len(results) == 3
assert all(result is None for result in results)
assert mock_cursor.execute.call_count == 2
async def test_batch_search_ops(store: AsyncDuckDBStore) -> None:
mock_connection = store.conn
mock_cursor = MockCursor(
[
(
"test.foo",
"key1",
'{"data": "value1"}',
datetime.now(),
datetime.now(),
),
(
"test.bar",
"key2",
'{"data": "value2"}',
datetime.now(),
datetime.now(),
),
]
)
mock_connection.cursor.return_value = mock_cursor
ops = [
SearchOp(
namespace_prefix=("test",), filter={"data": "value1"}, limit=10, offset=0
),
SearchOp(namespace_prefix=("test",), filter=None, limit=5, offset=0),
]
results = await store.abatch(ops)
assert len(results) == 2
assert len(results[0]) == 2
assert len(results[1]) == 2
async def test_batch_list_namespaces_ops(store: AsyncDuckDBStore) -> None:
mock_connection = store.conn
mock_cursor = MockCursor([("test.namespace1",), ("test.namespace2",)])
mock_connection.cursor.return_value = mock_cursor
ops = [ListNamespacesOp(match_conditions=None, max_depth=None, limit=10, offset=0)]
results = await store.abatch(ops)
assert len(results) == 1
assert results[0] == [("test", "namespace1"), ("test", "namespace2")]
# The following use the actual DB connection
async def test_basic_store_ops() -> None:
async with AsyncDuckDBStore.from_conn_string(":memory:") as store:
await store.setup()
namespace = ("test", "documents")
item_id = "doc1"
item_value = {"title": "Test Document", "content": "Hello, World!"}
await store.aput(namespace, item_id, item_value)
item = await store.aget(namespace, item_id)
assert item
assert item.namespace == namespace
assert item.key == item_id
assert item.value == item_value
updated_value = {
"title": "Updated Test Document",
"content": "Hello, LangGraph!",
}
await store.aput(namespace, item_id, updated_value)
updated_item = await store.aget(namespace, item_id)
assert updated_item.value == updated_value
assert updated_item.updated_at > item.updated_at
different_namespace = ("test", "other_documents")
item_in_different_namespace = await store.aget(different_namespace, item_id)
assert item_in_different_namespace is None
new_item_id = "doc2"
new_item_value = {"title": "Another Document", "content": "Greetings!"}
await store.aput(namespace, new_item_id, new_item_value)
search_results = await store.asearch(["test"], limit=10)
items = search_results
assert len(items) == 2
assert any(item.key == item_id for item in items)
assert any(item.key == new_item_id for item in items)
namespaces = await store.alist_namespaces(prefix=["test"])
assert ("test", "documents") in namespaces
await store.adelete(namespace, item_id)
await store.adelete(namespace, new_item_id)
deleted_item = await store.aget(namespace, item_id)
assert deleted_item is None
deleted_item = await store.aget(namespace, new_item_id)
assert deleted_item is None
empty_search_results = await store.asearch(["test"], limit=10)
assert len(empty_search_results) == 0
async def test_list_namespaces() -> None:
async with AsyncDuckDBStore.from_conn_string(":memory:") as store:
await store.setup()
test_pref = str(uuid.uuid4())
test_namespaces = [
(test_pref, "test", "documents", "public", test_pref),
(test_pref, "test", "documents", "private", test_pref),
(test_pref, "test", "images", "public", test_pref),
(test_pref, "test", "images", "private", test_pref),
(test_pref, "prod", "documents", "public", test_pref),
(
test_pref,
"prod",
"documents",
"some",
"nesting",
"public",
test_pref,
),
(test_pref, "prod", "documents", "private", test_pref),
]
for namespace in test_namespaces:
await store.aput(namespace, "dummy", {"content": "dummy"})
prefix_result = await store.alist_namespaces(prefix=[test_pref, "test"])
assert len(prefix_result) == 4
assert all([ns[1] == "test" for ns in prefix_result])
specific_prefix_result = await store.alist_namespaces(
prefix=[test_pref, "test", "documents"]
)
assert len(specific_prefix_result) == 2
assert all([ns[1:3] == ("test", "documents") for ns in specific_prefix_result])
suffix_result = await store.alist_namespaces(suffix=["public", test_pref])
assert len(suffix_result) == 4
assert all(ns[-2] == "public" for ns in suffix_result)
prefix_suffix_result = await store.alist_namespaces(
prefix=[test_pref, "test"], suffix=["public", test_pref]
)
assert len(prefix_suffix_result) == 2
assert all(
ns[1] == "test" and ns[-2] == "public" for ns in prefix_suffix_result
)
wildcard_prefix_result = await store.alist_namespaces(
prefix=[test_pref, "*", "documents"]
)
assert len(wildcard_prefix_result) == 5
assert all(ns[2] == "documents" for ns in wildcard_prefix_result)
wildcard_suffix_result = await store.alist_namespaces(
suffix=["*", "public", test_pref]
)
assert len(wildcard_suffix_result) == 4
assert all(ns[-2] == "public" for ns in wildcard_suffix_result)
wildcard_single = await store.alist_namespaces(
suffix=["some", "*", "public", test_pref]
)
assert len(wildcard_single) == 1
assert wildcard_single[0] == (
test_pref,
"prod",
"documents",
"some",
"nesting",
"public",
test_pref,
)
max_depth_result = await store.alist_namespaces(max_depth=3)
assert all([len(ns) <= 3 for ns in max_depth_result])
max_depth_result = await store.alist_namespaces(
max_depth=4, prefix=[test_pref, "*", "documents"]
)
assert (
len(set(tuple(res) for res in max_depth_result))
== len(max_depth_result)
== 5
)
limit_result = await store.alist_namespaces(prefix=[test_pref], limit=3)
assert len(limit_result) == 3
offset_result = await store.alist_namespaces(prefix=[test_pref], offset=3)
assert len(offset_result) == len(test_namespaces) - 3
empty_prefix_result = await store.alist_namespaces(prefix=[test_pref])
assert len(empty_prefix_result) == len(test_namespaces)
assert set(tuple(ns) for ns in empty_prefix_result) == set(
tuple(ns) for ns in test_namespaces
)
for namespace in test_namespaces:
await store.adelete(namespace, "dummy")
async def test_search():
async with AsyncDuckDBStore.from_conn_string(":memory:") as store:
await store.setup()
test_namespaces = [
("test_search", "documents", "user1"),
("test_search", "documents", "user2"),
("test_search", "reports", "department1"),
("test_search", "reports", "department2"),
]
test_items = [
{"title": "Doc 1", "author": "John Doe", "tags": ["important"]},
{"title": "Doc 2", "author": "Jane Smith", "tags": ["draft"]},
{"title": "Report A", "author": "John Doe", "tags": ["final"]},
{"title": "Report B", "author": "Alice Johnson", "tags": ["draft"]},
]
empty = await store.asearch(
(
"scoped",
"assistant_id",
"shared",
"6c5356f6-63ab-4158-868d-cd9fd14c736e",
),
limit=10,
offset=0,
)
assert len(empty) == 0
for namespace, item in zip(test_namespaces, test_items):
await store.aput(namespace, f"item_{namespace[-1]}", item)
docs_result = await store.asearch(["test_search", "documents"])
assert len(docs_result) == 2
assert all([item.namespace[1] == "documents" for item in docs_result]), [
item.namespace for item in docs_result
]
reports_result = await store.asearch(["test_search", "reports"])
assert len(reports_result) == 2
assert all(item.namespace[1] == "reports" for item in reports_result)
limited_result = await store.asearch(["test_search"], limit=2)
assert len(limited_result) == 2
offset_result = await store.asearch(["test_search"])
assert len(offset_result) == 4
offset_result = await store.asearch(["test_search"], offset=2)
assert len(offset_result) == 2
assert all(item not in limited_result for item in offset_result)
john_doe_result = await store.asearch(
["test_search"], filter={"author": "John Doe"}
)
assert len(john_doe_result) == 2
assert all(item.value["author"] == "John Doe" for item in john_doe_result)
draft_result = await store.asearch(["test_search"], filter={"tags": ["draft"]})
assert len(draft_result) == 2
assert all("draft" in item.value["tags"] for item in draft_result)
page1 = await store.asearch(["test_search"], limit=2, offset=0)
page2 = await store.asearch(["test_search"], limit=2, offset=2)
all_items = page1 + page2
assert len(all_items) == 4
assert len(set(item.key for item in all_items)) == 4
empty = await store.asearch(
(
"scoped",
"assistant_id",
"shared",
"again",
"maybe",
"some-long",
"6be5cb0e-2eb4-42e6-bb6b-fba3c269db25",
),
limit=10,
offset=0,
)
assert len(empty) == 0
# Test with a namespace beginning with a number (like a UUID)
uuid_namespace = (str(uuid.uuid4()), "documents")
uuid_item_id = "uuid_doc"
uuid_item_value = {
"title": "UUID Document",
"content": "This document has a UUID namespace.",
}
# Insert the item with the UUID namespace
await store.aput(uuid_namespace, uuid_item_id, uuid_item_value)
# Retrieve the item to verify it was stored correctly
retrieved_item = await store.aget(uuid_namespace, uuid_item_id)
assert retrieved_item is not None
assert retrieved_item.namespace == uuid_namespace
assert retrieved_item.key == uuid_item_id
assert retrieved_item.value == uuid_item_value
# Search for the item using the UUID namespace
search_result = await store.asearch([uuid_namespace[0]])
assert len(search_result) == 1
assert search_result[0].key == uuid_item_id
assert search_result[0].value == uuid_item_value
# Clean up: delete the item with the UUID namespace
await store.adelete(uuid_namespace, uuid_item_id)
# Verify the item was deleted
deleted_item = await store.aget(uuid_namespace, uuid_item_id)
assert deleted_item is None
for namespace in test_namespaces:
await store.adelete(namespace, f"item_{namespace[-1]}")
|
0 | lc_public_repos/langgraph/libs/checkpoint-duckdb | lc_public_repos/langgraph/libs/checkpoint-duckdb/tests/test_sync.py | from typing import Any
import pytest
from langchain_core.runnables import RunnableConfig
from langgraph.checkpoint.base import (
Checkpoint,
CheckpointMetadata,
create_checkpoint,
empty_checkpoint,
)
from langgraph.checkpoint.duckdb import DuckDBSaver
class TestDuckDBSaver:
@pytest.fixture(autouse=True)
def setup(self) -> None:
# objects for test setup
self.config_1: RunnableConfig = {
"configurable": {
"thread_id": "thread-1",
# for backwards compatibility testing
"thread_ts": "1",
"checkpoint_ns": "",
}
}
self.config_2: RunnableConfig = {
"configurable": {
"thread_id": "thread-2",
"checkpoint_id": "2",
"checkpoint_ns": "",
}
}
self.config_3: RunnableConfig = {
"configurable": {
"thread_id": "thread-2",
"checkpoint_id": "2-inner",
"checkpoint_ns": "inner",
}
}
self.chkpnt_1: Checkpoint = empty_checkpoint()
self.chkpnt_2: Checkpoint = create_checkpoint(self.chkpnt_1, {}, 1)
self.chkpnt_3: Checkpoint = empty_checkpoint()
self.metadata_1: CheckpointMetadata = {
"source": "input",
"step": 2,
"writes": {},
"score": 1,
}
self.metadata_2: CheckpointMetadata = {
"source": "loop",
"step": 1,
"writes": {"foo": "bar"},
"score": None,
}
self.metadata_3: CheckpointMetadata = {}
def test_search(self) -> None:
with DuckDBSaver.from_conn_string(":memory:") as saver:
saver.setup()
# save checkpoints
saver.put(self.config_1, self.chkpnt_1, self.metadata_1, {})
saver.put(self.config_2, self.chkpnt_2, self.metadata_2, {})
saver.put(self.config_3, self.chkpnt_3, self.metadata_3, {})
# call method / assertions
query_1 = {"source": "input"} # search by 1 key
query_2 = {
"step": 1,
"writes": {"foo": "bar"},
} # search by multiple keys
query_3: dict[str, Any] = {} # search by no keys, return all checkpoints
query_4 = {"source": "update", "step": 1} # no match
search_results_1 = list(saver.list(None, filter=query_1))
assert len(search_results_1) == 1
assert search_results_1[0].metadata == self.metadata_1
search_results_2 = list(saver.list(None, filter=query_2))
assert len(search_results_2) == 1
assert search_results_2[0].metadata == self.metadata_2
search_results_3 = list(saver.list(None, filter=query_3))
assert len(search_results_3) == 3
search_results_4 = list(saver.list(None, filter=query_4))
assert len(search_results_4) == 0
# search by config (defaults to checkpoints across all namespaces)
search_results_5 = list(
saver.list({"configurable": {"thread_id": "thread-2"}})
)
assert len(search_results_5) == 2
assert {
search_results_5[0].config["configurable"]["checkpoint_ns"],
search_results_5[1].config["configurable"]["checkpoint_ns"],
} == {"", "inner"}
# TODO: test before and limit params
def test_null_chars(self) -> None:
with DuckDBSaver.from_conn_string(":memory:") as saver:
saver.setup()
config = saver.put(self.config_1, self.chkpnt_1, {"my_key": "\x00abc"}, {})
assert saver.get_tuple(config).metadata["my_key"] == "abc" # type: ignore
assert (
list(saver.list(None, filter={"my_key": "abc"}))[0].metadata["my_key"] # type: ignore
== "abc"
)
|
0 | lc_public_repos/langgraph/libs/checkpoint-duckdb | lc_public_repos/langgraph/libs/checkpoint-duckdb/tests/test_store.py | # type: ignore
import uuid
from datetime import datetime
from typing import Any
from unittest.mock import MagicMock
import pytest
from langgraph.store.base import GetOp, Item, ListNamespacesOp, PutOp, SearchOp
from langgraph.store.duckdb import DuckDBStore
class MockCursor:
def __init__(self, fetch_result: Any) -> None:
self.fetch_result = fetch_result
self.execute = MagicMock()
self.fetchall = MagicMock(return_value=self.fetch_result)
class MockConnection:
def __init__(self) -> None:
self.cursor = MagicMock()
@pytest.fixture
def mock_connection() -> MockConnection:
return MockConnection()
@pytest.fixture
def store(mock_connection: MockConnection) -> DuckDBStore:
duck_db_store = DuckDBStore(mock_connection)
duck_db_store.setup()
return duck_db_store
def test_batch_order(store: DuckDBStore) -> None:
mock_connection = store.conn
mock_get_cursor = MockCursor(
[
(
"test.foo",
"key1",
'{"data": "value1"}',
datetime.now(),
datetime.now(),
),
(
"test.bar",
"key2",
'{"data": "value2"}',
datetime.now(),
datetime.now(),
),
]
)
mock_search_cursor = MockCursor(
[
(
"test.foo",
"key1",
'{"data": "value1"}',
datetime.now(),
datetime.now(),
),
]
)
mock_list_namespaces_cursor = MockCursor(
[
("test",),
]
)
failures = []
def cursor_side_effect() -> Any:
cursor = MagicMock()
def execute_side_effect(query: str, *params: Any) -> None:
# My super sophisticated database.
if "WHERE prefix = ? AND key" in query:
cursor.fetchall = mock_get_cursor.fetchall
elif "SELECT prefix, key, value" in query:
cursor.fetchall = mock_search_cursor.fetchall
elif "SELECT DISTINCT ON (truncated_prefix)" in query:
cursor.fetchall = mock_list_namespaces_cursor.fetchall
elif "INSERT INTO " in query:
pass
else:
e = ValueError(f"Unmatched query: {query}")
failures.append(e)
raise e
cursor.execute = MagicMock(side_effect=execute_side_effect)
return cursor
mock_connection.cursor.side_effect = cursor_side_effect
ops = [
GetOp(namespace=("test",), key="key1"),
PutOp(namespace=("test",), key="key2", value={"data": "value2"}),
SearchOp(
namespace_prefix=("test",), filter={"data": "value1"}, limit=10, offset=0
),
ListNamespacesOp(match_conditions=None, max_depth=None, limit=10, offset=0),
GetOp(namespace=("test",), key="key3"),
]
results = store.batch(ops)
assert not failures
assert len(results) == 5
assert isinstance(results[0], Item)
assert isinstance(results[0].value, dict)
assert results[0].value == {"data": "value1"}
assert results[0].key == "key1"
assert results[1] is None
assert isinstance(results[2], list)
assert len(results[2]) == 1
assert isinstance(results[3], list)
assert results[3] == [("test",)]
assert results[4] is None
ops_reordered = [
SearchOp(namespace_prefix=("test",), filter=None, limit=5, offset=0),
GetOp(namespace=("test",), key="key2"),
ListNamespacesOp(match_conditions=None, max_depth=None, limit=5, offset=0),
PutOp(namespace=("test",), key="key3", value={"data": "value3"}),
GetOp(namespace=("test",), key="key1"),
]
results_reordered = store.batch(ops_reordered)
assert not failures
assert len(results_reordered) == 5
assert isinstance(results_reordered[0], list)
assert len(results_reordered[0]) == 1
assert isinstance(results_reordered[1], Item)
assert results_reordered[1].value == {"data": "value2"}
assert results_reordered[1].key == "key2"
assert isinstance(results_reordered[2], list)
assert results_reordered[2] == [("test",)]
assert results_reordered[3] is None
assert isinstance(results_reordered[4], Item)
assert results_reordered[4].value == {"data": "value1"}
assert results_reordered[4].key == "key1"
def test_batch_get_ops(store: DuckDBStore) -> None:
mock_connection = store.conn
mock_cursor = MockCursor(
[
(
"test.foo",
"key1",
'{"data": "value1"}',
datetime.now(),
datetime.now(),
),
(
"test.bar",
"key2",
'{"data": "value2"}',
datetime.now(),
datetime.now(),
),
]
)
mock_connection.cursor.return_value = mock_cursor
ops = [
GetOp(namespace=("test",), key="key1"),
GetOp(namespace=("test",), key="key2"),
GetOp(namespace=("test",), key="key3"),
]
results = store.batch(ops)
assert len(results) == 3
assert results[0] is not None
assert results[1] is not None
assert results[2] is None
assert results[0].key == "key1"
assert results[1].key == "key2"
def test_batch_put_ops(store: DuckDBStore) -> None:
mock_connection = store.conn
mock_cursor = MockCursor([])
mock_connection.cursor.return_value = mock_cursor
ops = [
PutOp(namespace=("test",), key="key1", value={"data": "value1"}),
PutOp(namespace=("test",), key="key2", value={"data": "value2"}),
PutOp(namespace=("test",), key="key3", value=None),
]
results = store.batch(ops)
assert len(results) == 3
assert all(result is None for result in results)
assert mock_cursor.execute.call_count == 2
def test_batch_search_ops(store: DuckDBStore) -> None:
mock_connection = store.conn
mock_cursor = MockCursor(
[
(
"test.foo",
"key1",
'{"data": "value1"}',
datetime.now(),
datetime.now(),
),
(
"test.bar",
"key2",
'{"data": "value2"}',
datetime.now(),
datetime.now(),
),
]
)
mock_connection.cursor.return_value = mock_cursor
ops = [
SearchOp(
namespace_prefix=("test",), filter={"data": "value1"}, limit=10, offset=0
),
SearchOp(namespace_prefix=("test",), filter=None, limit=5, offset=0),
]
results = store.batch(ops)
assert len(results) == 2
assert len(results[0]) == 2
assert len(results[1]) == 2
def test_batch_list_namespaces_ops(store: DuckDBStore) -> None:
mock_connection = store.conn
mock_cursor = MockCursor([("test.namespace1",), ("test.namespace2",)])
mock_connection.cursor.return_value = mock_cursor
ops = [ListNamespacesOp(match_conditions=None, max_depth=None, limit=10, offset=0)]
results = store.batch(ops)
assert len(results) == 1
assert results[0] == [("test", "namespace1"), ("test", "namespace2")]
def test_basic_store_ops() -> None:
with DuckDBStore.from_conn_string(":memory:") as store:
store.setup()
namespace = ("test", "documents")
item_id = "doc1"
item_value = {"title": "Test Document", "content": "Hello, World!"}
store.put(namespace, item_id, item_value)
item = store.get(namespace, item_id)
assert item
assert item.namespace == namespace
assert item.key == item_id
assert item.value == item_value
updated_value = {
"title": "Updated Test Document",
"content": "Hello, LangGraph!",
}
store.put(namespace, item_id, updated_value)
updated_item = store.get(namespace, item_id)
assert updated_item.value == updated_value
assert updated_item.updated_at > item.updated_at
different_namespace = ("test", "other_documents")
item_in_different_namespace = store.get(different_namespace, item_id)
assert item_in_different_namespace is None
new_item_id = "doc2"
new_item_value = {"title": "Another Document", "content": "Greetings!"}
store.put(namespace, new_item_id, new_item_value)
search_results = store.search(["test"], limit=10)
items = search_results
assert len(items) == 2
assert any(item.key == item_id for item in items)
assert any(item.key == new_item_id for item in items)
namespaces = store.list_namespaces(prefix=["test"])
assert ("test", "documents") in namespaces
store.delete(namespace, item_id)
store.delete(namespace, new_item_id)
deleted_item = store.get(namespace, item_id)
assert deleted_item is None
deleted_item = store.get(namespace, new_item_id)
assert deleted_item is None
empty_search_results = store.search(["test"], limit=10)
assert len(empty_search_results) == 0
def test_list_namespaces() -> None:
with DuckDBStore.from_conn_string(":memory:") as store:
store.setup()
test_pref = str(uuid.uuid4())
test_namespaces = [
(test_pref, "test", "documents", "public", test_pref),
(test_pref, "test", "documents", "private", test_pref),
(test_pref, "test", "images", "public", test_pref),
(test_pref, "test", "images", "private", test_pref),
(test_pref, "prod", "documents", "public", test_pref),
(
test_pref,
"prod",
"documents",
"some",
"nesting",
"public",
test_pref,
),
(test_pref, "prod", "documents", "private", test_pref),
]
for namespace in test_namespaces:
store.put(namespace, "dummy", {"content": "dummy"})
prefix_result = store.list_namespaces(prefix=[test_pref, "test"])
assert len(prefix_result) == 4
assert all([ns[1] == "test" for ns in prefix_result])
specific_prefix_result = store.list_namespaces(
prefix=[test_pref, "test", "documents"]
)
assert len(specific_prefix_result) == 2
assert all([ns[1:3] == ("test", "documents") for ns in specific_prefix_result])
suffix_result = store.list_namespaces(suffix=["public", test_pref])
assert len(suffix_result) == 4
assert all(ns[-2] == "public" for ns in suffix_result)
prefix_suffix_result = store.list_namespaces(
prefix=[test_pref, "test"], suffix=["public", test_pref]
)
assert len(prefix_suffix_result) == 2
assert all(
ns[1] == "test" and ns[-2] == "public" for ns in prefix_suffix_result
)
wildcard_prefix_result = store.list_namespaces(
prefix=[test_pref, "*", "documents"]
)
assert len(wildcard_prefix_result) == 5
assert all(ns[2] == "documents" for ns in wildcard_prefix_result)
wildcard_suffix_result = store.list_namespaces(
suffix=["*", "public", test_pref]
)
assert len(wildcard_suffix_result) == 4
assert all(ns[-2] == "public" for ns in wildcard_suffix_result)
wildcard_single = store.list_namespaces(
suffix=["some", "*", "public", test_pref]
)
assert len(wildcard_single) == 1
assert wildcard_single[0] == (
test_pref,
"prod",
"documents",
"some",
"nesting",
"public",
test_pref,
)
max_depth_result = store.list_namespaces(max_depth=3)
assert all([len(ns) <= 3 for ns in max_depth_result])
max_depth_result = store.list_namespaces(
max_depth=4, prefix=[test_pref, "*", "documents"]
)
assert (
len(set(tuple(res) for res in max_depth_result))
== len(max_depth_result)
== 5
)
limit_result = store.list_namespaces(prefix=[test_pref], limit=3)
assert len(limit_result) == 3
offset_result = store.list_namespaces(prefix=[test_pref], offset=3)
assert len(offset_result) == len(test_namespaces) - 3
empty_prefix_result = store.list_namespaces(prefix=[test_pref])
assert len(empty_prefix_result) == len(test_namespaces)
assert set(tuple(ns) for ns in empty_prefix_result) == set(
tuple(ns) for ns in test_namespaces
)
for namespace in test_namespaces:
store.delete(namespace, "dummy")
def test_search():
with DuckDBStore.from_conn_string(":memory:") as store:
store.setup()
test_namespaces = [
("test_search", "documents", "user1"),
("test_search", "documents", "user2"),
("test_search", "reports", "department1"),
("test_search", "reports", "department2"),
]
test_items = [
{"title": "Doc 1", "author": "John Doe", "tags": ["important"]},
{"title": "Doc 2", "author": "Jane Smith", "tags": ["draft"]},
{"title": "Report A", "author": "John Doe", "tags": ["final"]},
{"title": "Report B", "author": "Alice Johnson", "tags": ["draft"]},
]
for namespace, item in zip(test_namespaces, test_items):
store.put(namespace, f"item_{namespace[-1]}", item)
docs_result = store.search(["test_search", "documents"])
assert len(docs_result) == 2
assert all(
[item.namespace[1] == "documents" for item in docs_result]
), docs_result
reports_result = store.search(["test_search", "reports"])
assert len(reports_result) == 2
assert all(item.namespace[1] == "reports" for item in reports_result)
limited_result = store.search(["test_search"], limit=2)
assert len(limited_result) == 2
offset_result = store.search(["test_search"])
assert len(offset_result) == 4
offset_result = store.search(["test_search"], offset=2)
assert len(offset_result) == 2
assert all(item not in limited_result for item in offset_result)
john_doe_result = store.search(["test_search"], filter={"author": "John Doe"})
assert len(john_doe_result) == 2
assert all(item.value["author"] == "John Doe" for item in john_doe_result)
draft_result = store.search(["test_search"], filter={"tags": ["draft"]})
assert len(draft_result) == 2
assert all("draft" in item.value["tags"] for item in draft_result)
page1 = store.search(["test_search"], limit=2, offset=0)
page2 = store.search(["test_search"], limit=2, offset=2)
all_items = page1 + page2
assert len(all_items) == 4
assert len(set(item.key for item in all_items)) == 4
for namespace in test_namespaces:
store.delete(namespace, f"item_{namespace[-1]}")
|
0 | lc_public_repos/langgraph/libs/checkpoint-duckdb | lc_public_repos/langgraph/libs/checkpoint-duckdb/tests/test_async.py | from typing import Any
import pytest
from langchain_core.runnables import RunnableConfig
from langgraph.checkpoint.base import (
Checkpoint,
CheckpointMetadata,
create_checkpoint,
empty_checkpoint,
)
from langgraph.checkpoint.duckdb.aio import AsyncDuckDBSaver
class TestAsyncDuckDBSaver:
@pytest.fixture(autouse=True)
async def setup(self) -> None:
# objects for test setup
self.config_1: RunnableConfig = {
"configurable": {
"thread_id": "thread-1",
# for backwards compatibility testing
"thread_ts": "1",
"checkpoint_ns": "",
}
}
self.config_2: RunnableConfig = {
"configurable": {
"thread_id": "thread-2",
"checkpoint_id": "2",
"checkpoint_ns": "",
}
}
self.config_3: RunnableConfig = {
"configurable": {
"thread_id": "thread-2",
"checkpoint_id": "2-inner",
"checkpoint_ns": "inner",
}
}
self.chkpnt_1: Checkpoint = empty_checkpoint()
self.chkpnt_2: Checkpoint = create_checkpoint(self.chkpnt_1, {}, 1)
self.chkpnt_3: Checkpoint = empty_checkpoint()
self.metadata_1: CheckpointMetadata = {
"source": "input",
"step": 2,
"writes": {},
"score": 1,
}
self.metadata_2: CheckpointMetadata = {
"source": "loop",
"step": 1,
"writes": {"foo": "bar"},
"score": None,
}
self.metadata_3: CheckpointMetadata = {}
async def test_asearch(self) -> None:
async with AsyncDuckDBSaver.from_conn_string(":memory:") as saver:
await saver.setup()
await saver.aput(self.config_1, self.chkpnt_1, self.metadata_1, {})
await saver.aput(self.config_2, self.chkpnt_2, self.metadata_2, {})
await saver.aput(self.config_3, self.chkpnt_3, self.metadata_3, {})
# call method / assertions
query_1 = {"source": "input"} # search by 1 key
query_2 = {
"step": 1,
"writes": {"foo": "bar"},
} # search by multiple keys
query_3: dict[str, Any] = {} # search by no keys, return all checkpoints
query_4 = {"source": "update", "step": 1} # no match
search_results_1 = [c async for c in saver.alist(None, filter=query_1)]
assert len(search_results_1) == 1
assert search_results_1[0].metadata == self.metadata_1
search_results_2 = [c async for c in saver.alist(None, filter=query_2)]
assert len(search_results_2) == 1
assert search_results_2[0].metadata == self.metadata_2
search_results_3 = [c async for c in saver.alist(None, filter=query_3)]
assert len(search_results_3) == 3
search_results_4 = [c async for c in saver.alist(None, filter=query_4)]
assert len(search_results_4) == 0
# search by config (defaults to checkpoints across all namespaces)
search_results_5 = [
c
async for c in saver.alist({"configurable": {"thread_id": "thread-2"}})
]
assert len(search_results_5) == 2
assert {
search_results_5[0].config["configurable"]["checkpoint_ns"],
search_results_5[1].config["configurable"]["checkpoint_ns"],
} == {"", "inner"}
# TODO: test before and limit params
async def test_null_chars(self) -> None:
async with AsyncDuckDBSaver.from_conn_string(":memory:") as saver:
await saver.setup()
config = await saver.aput(
self.config_1, self.chkpnt_1, {"my_key": "\x00abc"}, {}
)
assert (await saver.aget_tuple(config)).metadata["my_key"] == "abc" # type: ignore
assert [c async for c in saver.alist(None, filter={"my_key": "abc"})][
0
].metadata["my_key"] == "abc"
|
0 | lc_public_repos/langgraph/libs/checkpoint-duckdb/langgraph/checkpoint | lc_public_repos/langgraph/libs/checkpoint-duckdb/langgraph/checkpoint/duckdb/aio.py | import asyncio
from contextlib import asynccontextmanager
from typing import Any, AsyncIterator, Iterator, Optional, Sequence
from langchain_core.runnables import RunnableConfig
import duckdb
from langgraph.checkpoint.base import (
WRITES_IDX_MAP,
ChannelVersions,
Checkpoint,
CheckpointMetadata,
CheckpointTuple,
get_checkpoint_id,
)
from langgraph.checkpoint.duckdb.base import BaseDuckDBSaver
from langgraph.checkpoint.serde.base import SerializerProtocol
class AsyncDuckDBSaver(BaseDuckDBSaver):
lock: asyncio.Lock
def __init__(
self,
conn: duckdb.DuckDBPyConnection,
serde: Optional[SerializerProtocol] = None,
) -> None:
super().__init__(serde=serde)
self.conn = conn
self.lock = asyncio.Lock()
self.loop = asyncio.get_running_loop()
@classmethod
@asynccontextmanager
async def from_conn_string(
cls,
conn_string: str,
) -> AsyncIterator["AsyncDuckDBSaver"]:
"""Create a new AsyncDuckDBSaver instance from a connection string.
Args:
conn_string (str): The DuckDB connection info string.
Returns:
AsyncDuckDBSaver: A new AsyncDuckDBSaver instance.
"""
with duckdb.connect(conn_string) as conn:
yield cls(conn)
async def setup(self) -> None:
"""Set up the checkpoint database asynchronously.
This method creates the necessary tables in the DuckDB database if they don't
already exist and runs database migrations. It MUST be called directly by the user
the first time checkpointer is used.
"""
async with self.lock:
with self.conn.cursor() as cur:
try:
await asyncio.to_thread(
cur.execute,
"SELECT v FROM checkpoint_migrations ORDER BY v DESC LIMIT 1",
)
row = await asyncio.to_thread(cur.fetchone)
if row is None:
version = -1
else:
version = row[0]
except duckdb.CatalogException:
version = -1
for v, migration in zip(
range(version + 1, len(self.MIGRATIONS)),
self.MIGRATIONS[version + 1 :],
):
await asyncio.to_thread(cur.execute, migration)
await asyncio.to_thread(
cur.execute,
"INSERT INTO checkpoint_migrations (v) VALUES (?)",
[v],
)
async def alist(
self,
config: Optional[RunnableConfig],
*,
filter: Optional[dict[str, Any]] = None,
before: Optional[RunnableConfig] = None,
limit: Optional[int] = None,
) -> AsyncIterator[CheckpointTuple]:
"""List checkpoints from the database asynchronously.
This method retrieves a list of checkpoint tuples from the DuckDB database based
on the provided config. The checkpoints are ordered by checkpoint ID in descending order (newest first).
Args:
config (Optional[RunnableConfig]): Base configuration for filtering checkpoints.
filter (Optional[Dict[str, Any]]): Additional filtering criteria for metadata.
before (Optional[RunnableConfig]): If provided, only checkpoints before the specified checkpoint ID are returned. Defaults to None.
limit (Optional[int]): Maximum number of checkpoints to return.
Yields:
AsyncIterator[CheckpointTuple]: An asynchronous iterator of matching checkpoint tuples.
"""
where, args = self._search_where(config, filter, before)
query = self.SELECT_SQL + where + " ORDER BY checkpoint_id DESC"
if limit:
query += f" LIMIT {limit}"
# if we change this to use .stream() we need to make sure to close the cursor
async with self._cursor() as cur:
await asyncio.to_thread(cur.execute, query, args)
results = await asyncio.to_thread(cur.fetchall)
for value in results:
(
thread_id,
checkpoint,
checkpoint_ns,
checkpoint_id,
parent_checkpoint_id,
metadata,
channel_values,
pending_writes,
pending_sends,
) = value
yield CheckpointTuple(
{
"configurable": {
"thread_id": thread_id,
"checkpoint_ns": checkpoint_ns,
"checkpoint_id": checkpoint_id,
}
},
await asyncio.to_thread(
self._load_checkpoint,
checkpoint,
channel_values,
pending_sends,
),
self._load_metadata(metadata),
(
{
"configurable": {
"thread_id": thread_id,
"checkpoint_ns": checkpoint_ns,
"checkpoint_id": parent_checkpoint_id,
}
}
if parent_checkpoint_id
else None
),
await asyncio.to_thread(self._load_writes, pending_writes),
)
async def aget_tuple(self, config: RunnableConfig) -> Optional[CheckpointTuple]:
"""Get a checkpoint tuple from the database asynchronously.
This method retrieves a checkpoint tuple from the DuckDBdatabase based on the
provided config. If the config contains a "checkpoint_id" key, the checkpoint with
the matching thread ID and "checkpoint_id" is retrieved. Otherwise, the latest checkpoint
for the given thread ID is retrieved.
Args:
config (RunnableConfig): The config to use for retrieving the checkpoint.
Returns:
Optional[CheckpointTuple]: The retrieved checkpoint tuple, or None if no matching checkpoint was found.
"""
thread_id = config["configurable"]["thread_id"]
checkpoint_id = get_checkpoint_id(config)
checkpoint_ns = config["configurable"].get("checkpoint_ns", "")
if checkpoint_id:
args: tuple[Any, ...] = (thread_id, checkpoint_ns, checkpoint_id)
where = "WHERE thread_id = ? AND checkpoint_ns = ? AND checkpoint_id = ?"
else:
args = (thread_id, checkpoint_ns)
where = "WHERE thread_id = ? AND checkpoint_ns = ? ORDER BY checkpoint_id DESC LIMIT 1"
async with self._cursor() as cur:
await asyncio.to_thread(
cur.execute,
self.SELECT_SQL + where,
args,
)
value = await asyncio.to_thread(cur.fetchone)
if value:
(
thread_id,
checkpoint,
checkpoint_ns,
checkpoint_id,
parent_checkpoint_id,
metadata,
channel_values,
pending_writes,
pending_sends,
) = value
return CheckpointTuple(
{
"configurable": {
"thread_id": thread_id,
"checkpoint_ns": checkpoint_ns,
"checkpoint_id": checkpoint_id,
}
},
await asyncio.to_thread(
self._load_checkpoint,
checkpoint,
channel_values,
pending_sends,
),
self._load_metadata(metadata),
(
{
"configurable": {
"thread_id": thread_id,
"checkpoint_ns": checkpoint_ns,
"checkpoint_id": parent_checkpoint_id,
}
}
if parent_checkpoint_id
else None
),
await asyncio.to_thread(self._load_writes, pending_writes),
)
async def aput(
self,
config: RunnableConfig,
checkpoint: Checkpoint,
metadata: CheckpointMetadata,
new_versions: ChannelVersions,
) -> RunnableConfig:
"""Save a checkpoint to the database asynchronously.
This method saves a checkpoint to the DuckDB database. The checkpoint is associated
with the provided config and its parent config (if any).
Args:
config (RunnableConfig): The config to associate with the checkpoint.
checkpoint (Checkpoint): The checkpoint to save.
metadata (CheckpointMetadata): Additional metadata to save with the checkpoint.
new_versions (ChannelVersions): New channel versions as of this write.
Returns:
RunnableConfig: Updated configuration after storing the checkpoint.
"""
configurable = config["configurable"].copy()
thread_id = configurable.pop("thread_id")
checkpoint_ns = configurable.pop("checkpoint_ns")
checkpoint_id = configurable.pop(
"checkpoint_id", configurable.pop("thread_ts", None)
)
copy = checkpoint.copy()
next_config = {
"configurable": {
"thread_id": thread_id,
"checkpoint_ns": checkpoint_ns,
"checkpoint_id": checkpoint["id"],
}
}
checkpoint_blobs = await asyncio.to_thread(
self._dump_blobs,
thread_id,
checkpoint_ns,
copy.pop("channel_values"), # type: ignore[misc]
new_versions,
)
async with self._cursor() as cur:
if checkpoint_blobs:
await asyncio.to_thread(
cur.executemany, self.UPSERT_CHECKPOINT_BLOBS_SQL, checkpoint_blobs
)
await asyncio.to_thread(
cur.execute,
self.UPSERT_CHECKPOINTS_SQL,
(
thread_id,
checkpoint_ns,
checkpoint["id"],
checkpoint_id,
self._dump_checkpoint(copy),
self._dump_metadata(metadata),
),
)
return next_config
async def aput_writes(
self,
config: RunnableConfig,
writes: Sequence[tuple[str, Any]],
task_id: str,
) -> None:
"""Store intermediate writes linked to a checkpoint asynchronously.
This method saves intermediate writes associated with a checkpoint to the database.
Args:
config (RunnableConfig): Configuration of the related checkpoint.
writes (Sequence[Tuple[str, Any]]): List of writes to store, each as (channel, value) pair.
task_id (str): Identifier for the task creating the writes.
"""
query = (
self.UPSERT_CHECKPOINT_WRITES_SQL
if all(w[0] in WRITES_IDX_MAP for w in writes)
else self.INSERT_CHECKPOINT_WRITES_SQL
)
params = await asyncio.to_thread(
self._dump_writes,
config["configurable"]["thread_id"],
config["configurable"]["checkpoint_ns"],
config["configurable"]["checkpoint_id"],
task_id,
writes,
)
async with self._cursor() as cur:
await asyncio.to_thread(cur.executemany, query, params)
@asynccontextmanager
async def _cursor(self) -> AsyncIterator[duckdb.DuckDBPyConnection]:
async with self.lock:
with self.conn.cursor() as cur:
yield cur
def list(
self,
config: Optional[RunnableConfig],
*,
filter: Optional[dict[str, Any]] = None,
before: Optional[RunnableConfig] = None,
limit: Optional[int] = None,
) -> Iterator[CheckpointTuple]:
"""List checkpoints from the database.
This method retrieves a list of checkpoint tuples from the DuckDB database based
on the provided config. The checkpoints are ordered by checkpoint ID in descending order (newest first).
Args:
config (Optional[RunnableConfig]): Base configuration for filtering checkpoints.
filter (Optional[Dict[str, Any]]): Additional filtering criteria for metadata.
before (Optional[RunnableConfig]): If provided, only checkpoints before the specified checkpoint ID are returned. Defaults to None.
limit (Optional[int]): Maximum number of checkpoints to return.
Yields:
Iterator[CheckpointTuple]: An iterator of matching checkpoint tuples.
"""
aiter_ = self.alist(config, filter=filter, before=before, limit=limit)
while True:
try:
yield asyncio.run_coroutine_threadsafe(
anext(aiter_),
self.loop,
).result()
except StopAsyncIteration:
break
def get_tuple(self, config: RunnableConfig) -> Optional[CheckpointTuple]:
"""Get a checkpoint tuple from the database.
This method retrieves a checkpoint tuple from the DuckDB database based on the
provided config. If the config contains a "checkpoint_id" key, the checkpoint with
the matching thread ID and "checkpoint_id" is retrieved. Otherwise, the latest checkpoint
for the given thread ID is retrieved.
Args:
config (RunnableConfig): The config to use for retrieving the checkpoint.
Returns:
Optional[CheckpointTuple]: The retrieved checkpoint tuple, or None if no matching checkpoint was found.
"""
try:
# check if we are in the main thread, only bg threads can block
# we don't check in other methods to avoid the overhead
if asyncio.get_running_loop() is self.loop:
raise asyncio.InvalidStateError(
"Synchronous calls to AsyncDuckDBSaver are only allowed from a "
"different thread. From the main thread, use the async interface."
"For example, use `await checkpointer.aget_tuple(...)` or `await "
"graph.ainvoke(...)`."
)
except RuntimeError:
pass
return asyncio.run_coroutine_threadsafe(
self.aget_tuple(config), self.loop
).result()
def put(
self,
config: RunnableConfig,
checkpoint: Checkpoint,
metadata: CheckpointMetadata,
new_versions: ChannelVersions,
) -> RunnableConfig:
"""Save a checkpoint to the database.
This method saves a checkpoint to the DuckDB database. The checkpoint is associated
with the provided config and its parent config (if any).
Args:
config (RunnableConfig): The config to associate with the checkpoint.
checkpoint (Checkpoint): The checkpoint to save.
metadata (CheckpointMetadata): Additional metadata to save with the checkpoint.
new_versions (ChannelVersions): New channel versions as of this write.
Returns:
RunnableConfig: Updated configuration after storing the checkpoint.
"""
return asyncio.run_coroutine_threadsafe(
self.aput(config, checkpoint, metadata, new_versions), self.loop
).result()
def put_writes(
self,
config: RunnableConfig,
writes: Sequence[tuple[str, Any]],
task_id: str,
) -> None:
"""Store intermediate writes linked to a checkpoint.
This method saves intermediate writes associated with a checkpoint to the database.
Args:
config (RunnableConfig): Configuration of the related checkpoint.
writes (Sequence[Tuple[str, Any]]): List of writes to store, each as (channel, value) pair.
task_id (str): Identifier for the task creating the writes.
"""
return asyncio.run_coroutine_threadsafe(
self.aput_writes(config, writes, task_id), self.loop
).result()
|
0 | lc_public_repos/langgraph/libs/checkpoint-duckdb/langgraph/checkpoint | lc_public_repos/langgraph/libs/checkpoint-duckdb/langgraph/checkpoint/duckdb/base.py | import json
import random
from typing import Any, List, Optional, Sequence, Tuple, cast
from langchain_core.runnables import RunnableConfig
from langgraph.checkpoint.base import (
WRITES_IDX_MAP,
BaseCheckpointSaver,
ChannelVersions,
Checkpoint,
CheckpointMetadata,
get_checkpoint_id,
)
from langgraph.checkpoint.serde.jsonplus import JsonPlusSerializer
from langgraph.checkpoint.serde.types import TASKS, ChannelProtocol
MetadataInput = Optional[dict[str, Any]]
"""
To add a new migration, add a new string to the MIGRATIONS list.
The position of the migration in the list is the version number.
"""
MIGRATIONS = [
"""CREATE TABLE IF NOT EXISTS checkpoint_migrations (
v INTEGER PRIMARY KEY
);""",
"""CREATE TABLE IF NOT EXISTS checkpoints (
thread_id TEXT NOT NULL,
checkpoint_ns TEXT NOT NULL DEFAULT '',
checkpoint_id TEXT NOT NULL,
parent_checkpoint_id TEXT,
type TEXT,
checkpoint JSON NOT NULL,
metadata JSON NOT NULL DEFAULT '{}',
PRIMARY KEY (thread_id, checkpoint_ns, checkpoint_id)
);""",
"""CREATE TABLE IF NOT EXISTS checkpoint_blobs (
thread_id TEXT NOT NULL,
checkpoint_ns TEXT NOT NULL DEFAULT '',
channel TEXT NOT NULL,
version TEXT NOT NULL,
type TEXT NOT NULL,
blob BLOB,
PRIMARY KEY (thread_id, checkpoint_ns, channel, version)
);""",
"""CREATE TABLE IF NOT EXISTS checkpoint_writes (
thread_id TEXT NOT NULL,
checkpoint_ns TEXT NOT NULL DEFAULT '',
checkpoint_id TEXT NOT NULL,
task_id TEXT NOT NULL,
idx INTEGER NOT NULL,
channel TEXT NOT NULL,
type TEXT,
blob BLOB NOT NULL,
PRIMARY KEY (thread_id, checkpoint_ns, checkpoint_id, task_id, idx)
);""",
]
SELECT_SQL = f"""
select
thread_id,
checkpoint,
checkpoint_ns,
checkpoint_id,
parent_checkpoint_id,
metadata,
(
select array_agg(array[bl.channel::bytea, bl.type::bytea, bl.blob])
from (
SELECT unnest(json_keys(json_extract(checkpoint, '$.channel_versions'))) as key
) cv
inner join checkpoint_blobs bl
on bl.thread_id = checkpoints.thread_id
and bl.checkpoint_ns = checkpoints.checkpoint_ns
and bl.channel = cv.key
and bl.version = json_extract_string(checkpoint, '$.channel_versions.' || cv.key)
) as channel_values,
(
select
array_agg(array[cw.task_id::blob, cw.channel::blob, cw.type::blob, cw.blob])
from checkpoint_writes cw
where cw.thread_id = checkpoints.thread_id
and cw.checkpoint_ns = checkpoints.checkpoint_ns
and cw.checkpoint_id = checkpoints.checkpoint_id
) as pending_writes,
(
select array_agg(array[cw.type::blob, cw.blob])
from checkpoint_writes cw
where cw.thread_id = checkpoints.thread_id
and cw.checkpoint_ns = checkpoints.checkpoint_ns
and cw.checkpoint_id = checkpoints.parent_checkpoint_id
and cw.channel = '{TASKS}'
) as pending_sends
from checkpoints """
UPSERT_CHECKPOINT_BLOBS_SQL = """
INSERT INTO checkpoint_blobs (thread_id, checkpoint_ns, channel, version, type, blob)
VALUES (?, ?, ?, ?, ?, ?)
ON CONFLICT (thread_id, checkpoint_ns, channel, version) DO NOTHING
"""
UPSERT_CHECKPOINTS_SQL = """
INSERT INTO checkpoints (thread_id, checkpoint_ns, checkpoint_id, parent_checkpoint_id, checkpoint, metadata)
VALUES (?, ?, ?, ?, ?, ?)
ON CONFLICT (thread_id, checkpoint_ns, checkpoint_id)
DO UPDATE SET
checkpoint = EXCLUDED.checkpoint,
metadata = EXCLUDED.metadata;
"""
UPSERT_CHECKPOINT_WRITES_SQL = """
INSERT INTO checkpoint_writes (thread_id, checkpoint_ns, checkpoint_id, task_id, idx, channel, type, blob)
VALUES (?, ?, ?, ?, ?, ?, ?, ?)
ON CONFLICT (thread_id, checkpoint_ns, checkpoint_id, task_id, idx) DO UPDATE SET
channel = EXCLUDED.channel,
type = EXCLUDED.type,
blob = EXCLUDED.blob;
"""
INSERT_CHECKPOINT_WRITES_SQL = """
INSERT INTO checkpoint_writes (thread_id, checkpoint_ns, checkpoint_id, task_id, idx, channel, type, blob)
VALUES (?, ?, ?, ?, ?, ?, ?, ?)
ON CONFLICT (thread_id, checkpoint_ns, checkpoint_id, task_id, idx) DO NOTHING
"""
class BaseDuckDBSaver(BaseCheckpointSaver[str]):
SELECT_SQL = SELECT_SQL
MIGRATIONS = MIGRATIONS
UPSERT_CHECKPOINT_BLOBS_SQL = UPSERT_CHECKPOINT_BLOBS_SQL
UPSERT_CHECKPOINTS_SQL = UPSERT_CHECKPOINTS_SQL
UPSERT_CHECKPOINT_WRITES_SQL = UPSERT_CHECKPOINT_WRITES_SQL
INSERT_CHECKPOINT_WRITES_SQL = INSERT_CHECKPOINT_WRITES_SQL
jsonplus_serde = JsonPlusSerializer()
def _load_checkpoint(
self,
checkpoint_json_str: str,
channel_values: list[tuple[bytes, bytes, bytes]],
pending_sends: list[tuple[bytes, bytes]],
) -> Checkpoint:
checkpoint = json.loads(checkpoint_json_str)
return {
**checkpoint,
"pending_sends": [
self.serde.loads_typed((c.decode(), b)) for c, b in pending_sends or []
],
"channel_values": self._load_blobs(channel_values),
}
def _dump_checkpoint(self, checkpoint: Checkpoint) -> dict[str, Any]:
return {**checkpoint, "pending_sends": []}
def _load_blobs(
self, blob_values: list[tuple[bytes, bytes, bytes]]
) -> dict[str, Any]:
if not blob_values:
return {}
return {
k.decode(): self.serde.loads_typed((t.decode(), v))
for k, t, v in blob_values
if t.decode() != "empty"
}
def _dump_blobs(
self,
thread_id: str,
checkpoint_ns: str,
values: dict[str, Any],
versions: ChannelVersions,
) -> list[tuple[str, str, str, str, str, Optional[bytes]]]:
if not versions:
return []
return [
(
thread_id,
checkpoint_ns,
k,
cast(str, ver),
*(
self.serde.dumps_typed(values[k])
if k in values
else ("empty", None)
),
)
for k, ver in versions.items()
]
def _load_writes(
self, writes: list[tuple[bytes, bytes, bytes, bytes]]
) -> list[tuple[str, str, Any]]:
return (
[
(
tid.decode(),
channel.decode(),
self.serde.loads_typed((t.decode(), v)),
)
for tid, channel, t, v in writes
]
if writes
else []
)
def _dump_writes(
self,
thread_id: str,
checkpoint_ns: str,
checkpoint_id: str,
task_id: str,
writes: Sequence[tuple[str, Any]],
) -> list[tuple[str, str, str, str, int, str, str, bytes]]:
return [
(
thread_id,
checkpoint_ns,
checkpoint_id,
task_id,
WRITES_IDX_MAP.get(channel, idx),
channel,
*self.serde.dumps_typed(value),
)
for idx, (channel, value) in enumerate(writes)
]
def _load_metadata(self, metadata_json_str: str) -> CheckpointMetadata:
return self.jsonplus_serde.loads(metadata_json_str.encode())
def _dump_metadata(self, metadata: CheckpointMetadata) -> str:
serialized_metadata = self.jsonplus_serde.dumps(metadata)
# NOTE: we're using JSON serializer (not msgpack), so we need to remove null characters before writing
return serialized_metadata.decode().replace("\\u0000", "")
def get_next_version(self, current: Optional[str], channel: ChannelProtocol) -> str:
if current is None:
current_v = 0
elif isinstance(current, int):
current_v = current
else:
current_v = int(current.split(".")[0])
next_v = current_v + 1
next_h = random.random()
return f"{next_v:032}.{next_h:016}"
def _search_where(
self,
config: Optional[RunnableConfig],
filter: MetadataInput,
before: Optional[RunnableConfig] = None,
) -> Tuple[str, List[Any]]:
"""Return WHERE clause predicates for alist() given config, filter, before.
This method returns a tuple of a string and a tuple of values. The string
is the parametered WHERE clause predicate (including the WHERE keyword):
"WHERE column1 = $1 AND column2 IS $2". The list of values contains the
values for each of the corresponding parameters.
"""
wheres = []
param_values = []
# construct predicate for config filter
if config:
wheres.append("thread_id = ?")
param_values.append(config["configurable"]["thread_id"])
checkpoint_ns = config["configurable"].get("checkpoint_ns")
if checkpoint_ns is not None:
wheres.append("checkpoint_ns = ?")
param_values.append(checkpoint_ns)
if checkpoint_id := get_checkpoint_id(config):
wheres.append("checkpoint_id = ?")
param_values.append(checkpoint_id)
# construct predicate for metadata filter
if filter:
wheres.append("json_contains(metadata, ?)")
param_values.append(json.dumps(filter))
# construct predicate for `before`
if before is not None:
wheres.append("checkpoint_id < ?")
param_values.append(get_checkpoint_id(before))
return (
"WHERE " + " AND ".join(wheres) if wheres else "",
param_values,
)
|
0 | lc_public_repos/langgraph/libs/checkpoint-duckdb/langgraph/checkpoint | lc_public_repos/langgraph/libs/checkpoint-duckdb/langgraph/checkpoint/duckdb/__init__.py | import threading
from contextlib import contextmanager
from typing import Any, Iterator, Optional, Sequence
from langchain_core.runnables import RunnableConfig
import duckdb
from langgraph.checkpoint.base import (
WRITES_IDX_MAP,
ChannelVersions,
Checkpoint,
CheckpointMetadata,
CheckpointTuple,
get_checkpoint_id,
)
from langgraph.checkpoint.duckdb.base import BaseDuckDBSaver
from langgraph.checkpoint.serde.base import SerializerProtocol
class DuckDBSaver(BaseDuckDBSaver):
lock: threading.Lock
def __init__(
self,
conn: duckdb.DuckDBPyConnection,
serde: Optional[SerializerProtocol] = None,
) -> None:
super().__init__(serde=serde)
self.conn = conn
self.lock = threading.Lock()
@classmethod
@contextmanager
def from_conn_string(cls, conn_string: str) -> Iterator["DuckDBSaver"]:
"""Create a new DuckDBSaver instance from a connection string.
Args:
conn_string (str): The DuckDB connection info string.
Returns:
DuckDBSaver: A new DuckDBSaver instance.
"""
with duckdb.connect(conn_string) as conn:
yield cls(conn)
def setup(self) -> None:
"""Set up the checkpoint database asynchronously.
This method creates the necessary tables in the DuckDB database if they don't
already exist and runs database migrations. It MUST be called directly by the user
the first time checkpointer is used.
"""
with self.lock, self.conn.cursor() as cur:
try:
row = cur.execute(
"SELECT v FROM checkpoint_migrations ORDER BY v DESC LIMIT 1"
).fetchone()
if row is None:
version = -1
else:
version = row[0]
except duckdb.CatalogException:
version = -1
for v, migration in zip(
range(version + 1, len(self.MIGRATIONS)),
self.MIGRATIONS[version + 1 :],
):
cur.execute(migration)
cur.execute("INSERT INTO checkpoint_migrations (v) VALUES (?)", [v])
def list(
self,
config: Optional[RunnableConfig],
*,
filter: Optional[dict[str, Any]] = None,
before: Optional[RunnableConfig] = None,
limit: Optional[int] = None,
) -> Iterator[CheckpointTuple]:
"""List checkpoints from the database.
This method retrieves a list of checkpoint tuples from the DuckDB database based
on the provided config. The checkpoints are ordered by checkpoint ID in descending order (newest first).
Args:
config (RunnableConfig): The config to use for listing the checkpoints.
filter (Optional[Dict[str, Any]]): Additional filtering criteria for metadata. Defaults to None.
before (Optional[RunnableConfig]): If provided, only checkpoints before the specified checkpoint ID are returned. Defaults to None.
limit (Optional[int]): The maximum number of checkpoints to return. Defaults to None.
Yields:
Iterator[CheckpointTuple]: An iterator of checkpoint tuples.
Examples:
>>> from langgraph.checkpoint.duckdb import DuckDBSaver
>>> with DuckDBSaver.from_conn_string(":memory:") as memory:
... # Run a graph, then list the checkpoints
>>> config = {"configurable": {"thread_id": "1"}}
>>> checkpoints = list(memory.list(config, limit=2))
>>> print(checkpoints)
[CheckpointTuple(...), CheckpointTuple(...)]
>>> config = {"configurable": {"thread_id": "1"}}
>>> before = {"configurable": {"checkpoint_id": "1ef4f797-8335-6428-8001-8a1503f9b875"}}
>>> with DuckDBSaver.from_conn_string(":memory:") as memory:
... # Run a graph, then list the checkpoints
>>> checkpoints = list(memory.list(config, before=before))
>>> print(checkpoints)
[CheckpointTuple(...), ...]
"""
where, args = self._search_where(config, filter, before)
query = self.SELECT_SQL + where + " ORDER BY checkpoint_id DESC"
if limit:
query += f" LIMIT {limit}"
# if we change this to use .stream() we need to make sure to close the cursor
with self._cursor() as cur:
cur.execute(query, args)
for value in cur.fetchall():
(
thread_id,
checkpoint,
checkpoint_ns,
checkpoint_id,
parent_checkpoint_id,
metadata,
channel_values,
pending_writes,
pending_sends,
) = value
yield CheckpointTuple(
{
"configurable": {
"thread_id": thread_id,
"checkpoint_ns": checkpoint_ns,
"checkpoint_id": checkpoint_id,
}
},
self._load_checkpoint(
checkpoint,
channel_values,
pending_sends,
),
self._load_metadata(metadata),
(
{
"configurable": {
"thread_id": thread_id,
"checkpoint_ns": checkpoint_ns,
"checkpoint_id": parent_checkpoint_id,
}
}
if parent_checkpoint_id
else None
),
self._load_writes(pending_writes),
)
def get_tuple(self, config: RunnableConfig) -> Optional[CheckpointTuple]:
"""Get a checkpoint tuple from the database.
This method retrieves a checkpoint tuple from the DuckDB database based on the
provided config. If the config contains a "checkpoint_id" key, the checkpoint with
the matching thread ID and timestamp is retrieved. Otherwise, the latest checkpoint
for the given thread ID is retrieved.
Args:
config (RunnableConfig): The config to use for retrieving the checkpoint.
Returns:
Optional[CheckpointTuple]: The retrieved checkpoint tuple, or None if no matching checkpoint was found.
Examples:
Basic:
>>> config = {"configurable": {"thread_id": "1"}}
>>> checkpoint_tuple = memory.get_tuple(config)
>>> print(checkpoint_tuple)
CheckpointTuple(...)
With timestamp:
>>> config = {
... "configurable": {
... "thread_id": "1",
... "checkpoint_ns": "",
... "checkpoint_id": "1ef4f797-8335-6428-8001-8a1503f9b875",
... }
... }
>>> checkpoint_tuple = memory.get_tuple(config)
>>> print(checkpoint_tuple)
CheckpointTuple(...)
""" # noqa
thread_id = config["configurable"]["thread_id"]
checkpoint_id = get_checkpoint_id(config)
checkpoint_ns = config["configurable"].get("checkpoint_ns", "")
if checkpoint_id:
args: tuple[Any, ...] = (thread_id, checkpoint_ns, checkpoint_id)
where = "WHERE thread_id = ? AND checkpoint_ns = ? AND checkpoint_id = ?"
else:
args = (thread_id, checkpoint_ns)
where = "WHERE thread_id = ? AND checkpoint_ns = ? ORDER BY checkpoint_id DESC LIMIT 1"
with self._cursor() as cur:
cur.execute(
self.SELECT_SQL + where,
args,
)
value = cur.fetchone()
if value:
(
thread_id,
checkpoint,
checkpoint_ns,
checkpoint_id,
parent_checkpoint_id,
metadata,
channel_values,
pending_writes,
pending_sends,
) = value
return CheckpointTuple(
{
"configurable": {
"thread_id": thread_id,
"checkpoint_ns": checkpoint_ns,
"checkpoint_id": checkpoint_id,
}
},
self._load_checkpoint(
checkpoint,
channel_values,
pending_sends,
),
self._load_metadata(metadata),
(
{
"configurable": {
"thread_id": thread_id,
"checkpoint_ns": checkpoint_ns,
"checkpoint_id": parent_checkpoint_id,
}
}
if parent_checkpoint_id
else None
),
self._load_writes(pending_writes),
)
def put(
self,
config: RunnableConfig,
checkpoint: Checkpoint,
metadata: CheckpointMetadata,
new_versions: ChannelVersions,
) -> RunnableConfig:
"""Save a checkpoint to the database.
This method saves a checkpoint to the DuckDB database. The checkpoint is associated
with the provided config and its parent config (if any).
Args:
config (RunnableConfig): The config to associate with the checkpoint.
checkpoint (Checkpoint): The checkpoint to save.
metadata (CheckpointMetadata): Additional metadata to save with the checkpoint.
new_versions (ChannelVersions): New channel versions as of this write.
Returns:
RunnableConfig: Updated configuration after storing the checkpoint.
Examples:
>>> from langgraph.checkpoint.duckdb import DuckDBSaver
>>> with DuckDBSaver.from_conn_string(":memory:") as memory:
>>> config = {"configurable": {"thread_id": "1", "checkpoint_ns": ""}}
>>> checkpoint = {"ts": "2024-05-04T06:32:42.235444+00:00", "id": "1ef4f797-8335-6428-8001-8a1503f9b875", "channel_values": {"key": "value"}}
>>> saved_config = memory.put(config, checkpoint, {"source": "input", "step": 1, "writes": {"key": "value"}}, {})
>>> print(saved_config)
{'configurable': {'thread_id': '1', 'checkpoint_ns': '', 'checkpoint_id': '1ef4f797-8335-6428-8001-8a1503f9b875'}}
"""
configurable = config["configurable"].copy()
thread_id = configurable.pop("thread_id")
checkpoint_ns = configurable.pop("checkpoint_ns")
checkpoint_id = configurable.pop(
"checkpoint_id", configurable.pop("thread_ts", None)
)
copy = checkpoint.copy()
next_config = {
"configurable": {
"thread_id": thread_id,
"checkpoint_ns": checkpoint_ns,
"checkpoint_id": checkpoint["id"],
}
}
checkpoint_blobs = self._dump_blobs(
thread_id,
checkpoint_ns,
copy.pop("channel_values"), # type: ignore[misc]
new_versions,
)
with self._cursor() as cur:
if checkpoint_blobs:
cur.executemany(self.UPSERT_CHECKPOINT_BLOBS_SQL, checkpoint_blobs)
cur.execute(
self.UPSERT_CHECKPOINTS_SQL,
(
thread_id,
checkpoint_ns,
checkpoint["id"],
checkpoint_id,
self._dump_checkpoint(copy),
self._dump_metadata(metadata),
),
)
return next_config
def put_writes(
self,
config: RunnableConfig,
writes: Sequence[tuple[str, Any]],
task_id: str,
) -> None:
"""Store intermediate writes linked to a checkpoint.
This method saves intermediate writes associated with a checkpoint to the DuckDB database.
Args:
config (RunnableConfig): Configuration of the related checkpoint.
writes (List[Tuple[str, Any]]): List of writes to store.
task_id (str): Identifier for the task creating the writes.
"""
query = (
self.UPSERT_CHECKPOINT_WRITES_SQL
if all(w[0] in WRITES_IDX_MAP for w in writes)
else self.INSERT_CHECKPOINT_WRITES_SQL
)
with self._cursor() as cur:
cur.executemany(
query,
self._dump_writes(
config["configurable"]["thread_id"],
config["configurable"]["checkpoint_ns"],
config["configurable"]["checkpoint_id"],
task_id,
writes,
),
)
@contextmanager
def _cursor(self) -> Iterator[duckdb.DuckDBPyConnection]:
with self.lock, self.conn.cursor() as cur:
yield cur
__all__ = ["DuckDBSaver", "Conn"]
|
0 | lc_public_repos/langgraph/libs/checkpoint-duckdb/langgraph/store | lc_public_repos/langgraph/libs/checkpoint-duckdb/langgraph/store/duckdb/aio.py | import asyncio
import logging
from contextlib import asynccontextmanager
from typing import (
AsyncIterator,
Iterable,
Sequence,
cast,
)
import duckdb
from langgraph.store.base import GetOp, ListNamespacesOp, Op, PutOp, Result, SearchOp
from langgraph.store.base.batch import AsyncBatchedBaseStore
from langgraph.store.duckdb.base import (
BaseDuckDBStore,
_convert_ns,
_group_ops,
_row_to_item,
)
logger = logging.getLogger(__name__)
class AsyncDuckDBStore(AsyncBatchedBaseStore, BaseDuckDBStore):
def __init__(
self,
conn: duckdb.DuckDBPyConnection,
) -> None:
super().__init__()
self.conn = conn
self.loop = asyncio.get_running_loop()
async def abatch(self, ops: Iterable[Op]) -> list[Result]:
grouped_ops, num_ops = _group_ops(ops)
results: list[Result] = [None] * num_ops
tasks = []
if GetOp in grouped_ops:
tasks.append(
self._batch_get_ops(
cast(Sequence[tuple[int, GetOp]], grouped_ops[GetOp]), results
)
)
if PutOp in grouped_ops:
tasks.append(
self._batch_put_ops(
cast(Sequence[tuple[int, PutOp]], grouped_ops[PutOp])
)
)
if SearchOp in grouped_ops:
tasks.append(
self._batch_search_ops(
cast(Sequence[tuple[int, SearchOp]], grouped_ops[SearchOp]),
results,
)
)
if ListNamespacesOp in grouped_ops:
tasks.append(
self._batch_list_namespaces_ops(
cast(
Sequence[tuple[int, ListNamespacesOp]],
grouped_ops[ListNamespacesOp],
),
results,
)
)
await asyncio.gather(*tasks)
return results
def batch(self, ops: Iterable[Op]) -> list[Result]:
return asyncio.run_coroutine_threadsafe(self.abatch(ops), self.loop).result()
async def _batch_get_ops(
self,
get_ops: Sequence[tuple[int, GetOp]],
results: list[Result],
) -> None:
cursors = []
for query, params, namespace, items in self._get_batch_GET_ops_queries(get_ops):
cur = self.conn.cursor()
await asyncio.to_thread(cur.execute, query, params)
cursors.append((cur, namespace, items))
for cur, namespace, items in cursors:
rows = await asyncio.to_thread(cur.fetchall)
key_to_row = {row[1]: row for row in rows}
for idx, key in items:
row = key_to_row.get(key)
if row:
results[idx] = _row_to_item(namespace, row)
else:
results[idx] = None
async def _batch_put_ops(
self,
put_ops: Sequence[tuple[int, PutOp]],
) -> None:
queries = self._get_batch_PUT_queries(put_ops)
for query, params in queries:
cur = self.conn.cursor()
await asyncio.to_thread(cur.execute, query, params)
async def _batch_search_ops(
self,
search_ops: Sequence[tuple[int, SearchOp]],
results: list[Result],
) -> None:
queries = self._get_batch_search_queries(search_ops)
cursors: list[tuple[duckdb.DuckDBPyConnection, int]] = []
for (query, params), (idx, _) in zip(queries, search_ops):
cur = self.conn.cursor()
await asyncio.to_thread(cur.execute, query, params)
cursors.append((cur, idx))
for cur, idx in cursors:
rows = await asyncio.to_thread(cur.fetchall)
items = [_row_to_item(_convert_ns(row[0]), row) for row in rows]
results[idx] = items
async def _batch_list_namespaces_ops(
self,
list_ops: Sequence[tuple[int, ListNamespacesOp]],
results: list[Result],
) -> None:
queries = self._get_batch_list_namespaces_queries(list_ops)
cursors: list[tuple[duckdb.DuckDBPyConnection, int]] = []
for (query, params), (idx, _) in zip(queries, list_ops):
cur = self.conn.cursor()
await asyncio.to_thread(cur.execute, query, params)
cursors.append((cur, idx))
for cur, idx in cursors:
rows = cast(list[tuple], await asyncio.to_thread(cur.fetchall))
namespaces = [_convert_ns(row[0]) for row in rows]
results[idx] = namespaces
@classmethod
@asynccontextmanager
async def from_conn_string(
cls,
conn_string: str,
) -> AsyncIterator["AsyncDuckDBStore"]:
"""Create a new AsyncDuckDBStore instance from a connection string.
Args:
conn_string (str): The DuckDB connection info string.
Returns:
AsyncDuckDBStore: A new AsyncDuckDBStore instance.
"""
with duckdb.connect(conn_string) as conn:
yield cls(conn)
async def setup(self) -> None:
"""Set up the store database asynchronously.
This method creates the necessary tables in the DuckDB database if they don't
already exist and runs database migrations. It is called automatically when needed and should not be called
directly by the user.
"""
cur = self.conn.cursor()
try:
await asyncio.to_thread(
cur.execute, "SELECT v FROM store_migrations ORDER BY v DESC LIMIT 1"
)
row = await asyncio.to_thread(cur.fetchone)
if row is None:
version = -1
else:
version = row[0]
except duckdb.CatalogException:
version = -1
# Create store_migrations table if it doesn't exist
await asyncio.to_thread(
cur.execute,
"""
CREATE TABLE IF NOT EXISTS store_migrations (
v INTEGER PRIMARY KEY
)
""",
)
for v, migration in enumerate(
self.MIGRATIONS[version + 1 :], start=version + 1
):
await asyncio.to_thread(cur.execute, migration)
await asyncio.to_thread(
cur.execute, "INSERT INTO store_migrations (v) VALUES (?)", (v,)
)
|
0 | lc_public_repos/langgraph/libs/checkpoint-duckdb/langgraph/store | lc_public_repos/langgraph/libs/checkpoint-duckdb/langgraph/store/duckdb/base.py | import asyncio
import json
import logging
from collections import defaultdict
from contextlib import contextmanager
from typing import (
Any,
Generic,
Iterable,
Iterator,
Sequence,
TypeVar,
Union,
cast,
)
import duckdb
from langgraph.store.base import (
BaseStore,
GetOp,
Item,
ListNamespacesOp,
Op,
PutOp,
Result,
SearchItem,
SearchOp,
)
logger = logging.getLogger(__name__)
MIGRATIONS = [
"""
CREATE TABLE IF NOT EXISTS store (
prefix TEXT NOT NULL,
key TEXT NOT NULL,
value JSON NOT NULL,
created_at TIMESTAMP DEFAULT now(),
updated_at TIMESTAMP DEFAULT now(),
PRIMARY KEY (prefix, key)
);
""",
"""
CREATE INDEX IF NOT EXISTS store_prefix_idx ON store (prefix);
""",
]
C = TypeVar("C", bound=duckdb.DuckDBPyConnection)
class BaseDuckDBStore(Generic[C]):
MIGRATIONS = MIGRATIONS
conn: C
def _get_batch_GET_ops_queries(
self,
get_ops: Sequence[tuple[int, GetOp]],
) -> list[tuple[str, tuple, tuple[str, ...], list]]:
namespace_groups = defaultdict(list)
for idx, op in get_ops:
namespace_groups[op.namespace].append((idx, op.key))
results = []
for namespace, items in namespace_groups.items():
_, keys = zip(*items)
keys_to_query = ",".join(["?"] * len(keys))
query = f"""
SELECT prefix, key, value, created_at, updated_at
FROM store
WHERE prefix = ? AND key IN ({keys_to_query})
"""
params = (_namespace_to_text(namespace), *keys)
results.append((query, params, namespace, items))
return results
def _get_batch_PUT_queries(
self,
put_ops: Sequence[tuple[int, PutOp]],
) -> list[tuple[str, Sequence]]:
inserts: list[PutOp] = []
deletes: list[PutOp] = []
for _, op in put_ops:
if op.value is None:
deletes.append(op)
else:
inserts.append(op)
queries: list[tuple[str, Sequence]] = []
if deletes:
namespace_groups: dict[tuple[str, ...], list[str]] = defaultdict(list)
for op in deletes:
namespace_groups[op.namespace].append(op.key)
for namespace, keys in namespace_groups.items():
placeholders = ",".join(["?"] * len(keys))
query = (
f"DELETE FROM store WHERE prefix = ? AND key IN ({placeholders})"
)
params = (_namespace_to_text(namespace), *keys)
queries.append((query, params))
if inserts:
values = []
insertion_params = []
for op in inserts:
values.append("(?, ?, ?, now(), now())")
insertion_params.extend(
[
_namespace_to_text(op.namespace),
op.key,
json.dumps(op.value),
]
)
values_str = ",".join(values)
query = f"""
INSERT INTO store (prefix, key, value, created_at, updated_at)
VALUES {values_str}
ON CONFLICT (prefix, key) DO UPDATE
SET value = EXCLUDED.value, updated_at = now()
"""
queries.append((query, insertion_params))
return queries
def _get_batch_search_queries(
self,
search_ops: Sequence[tuple[int, SearchOp]],
) -> list[tuple[str, Sequence]]:
queries: list[tuple[str, Sequence]] = []
for _, op in search_ops:
query = """
SELECT prefix, key, value, created_at, updated_at
FROM store
WHERE prefix LIKE ?
"""
params: list = [f"{_namespace_to_text(op.namespace_prefix)}%"]
if op.filter:
filter_conditions = []
for key, value in op.filter.items():
filter_conditions.append(f"json_extract(value, '$.{key}') = ?")
params.append(json.dumps(value))
query += " AND " + " AND ".join(filter_conditions)
query += " ORDER BY updated_at DESC LIMIT ? OFFSET ?"
params.extend([op.limit, op.offset])
queries.append((query, params))
return queries
def _get_batch_list_namespaces_queries(
self,
list_ops: Sequence[tuple[int, ListNamespacesOp]],
) -> list[tuple[str, Sequence]]:
queries: list[tuple[str, Sequence]] = []
for _, op in list_ops:
query = """
WITH split_prefix AS (
SELECT
prefix,
string_split(prefix, '.') AS parts
FROM store
)
SELECT DISTINCT ON (truncated_prefix)
CASE
WHEN ? IS NOT NULL THEN
array_to_string(array_slice(parts, 1, ?), '.')
ELSE prefix
END AS truncated_prefix,
prefix
FROM split_prefix
"""
params: list[Any] = [op.max_depth, op.max_depth]
conditions = []
if op.match_conditions:
for condition in op.match_conditions:
if condition.match_type == "prefix":
conditions.append("prefix LIKE ?")
params.append(
f"{_namespace_to_text(condition.path, handle_wildcards=True)}%"
)
elif condition.match_type == "suffix":
conditions.append("prefix LIKE ?")
params.append(
f"%{_namespace_to_text(condition.path, handle_wildcards=True)}"
)
else:
logger.warning(
f"Unknown match_type in list_namespaces: {condition.match_type}"
)
if conditions:
query += " WHERE " + " AND ".join(conditions)
query += " ORDER BY prefix LIMIT ? OFFSET ?"
params.extend([op.limit, op.offset])
queries.append((query, params))
return queries
class DuckDBStore(BaseStore, BaseDuckDBStore[duckdb.DuckDBPyConnection]):
def __init__(
self,
conn: duckdb.DuckDBPyConnection,
) -> None:
super().__init__()
self.conn = conn
def batch(self, ops: Iterable[Op]) -> list[Result]:
grouped_ops, num_ops = _group_ops(ops)
results: list[Result] = [None] * num_ops
if GetOp in grouped_ops:
self._batch_get_ops(
cast(Sequence[tuple[int, GetOp]], grouped_ops[GetOp]), results
)
if PutOp in grouped_ops:
self._batch_put_ops(cast(Sequence[tuple[int, PutOp]], grouped_ops[PutOp]))
if SearchOp in grouped_ops:
self._batch_search_ops(
cast(Sequence[tuple[int, SearchOp]], grouped_ops[SearchOp]),
results,
)
if ListNamespacesOp in grouped_ops:
self._batch_list_namespaces_ops(
cast(
Sequence[tuple[int, ListNamespacesOp]],
grouped_ops[ListNamespacesOp],
),
results,
)
return results
async def abatch(self, ops: Iterable[Op]) -> list[Result]:
return await asyncio.get_running_loop().run_in_executor(None, self.batch, ops)
def _batch_get_ops(
self,
get_ops: Sequence[tuple[int, GetOp]],
results: list[Result],
) -> None:
cursors = []
for query, params, namespace, items in self._get_batch_GET_ops_queries(get_ops):
cur = self.conn.cursor()
cur.execute(query, params)
cursors.append((cur, namespace, items))
for cur, namespace, items in cursors:
rows = cur.fetchall()
key_to_row = {row[1]: row for row in rows}
for idx, key in items:
row = key_to_row.get(key)
if row:
results[idx] = _row_to_item(namespace, row)
else:
results[idx] = None
def _batch_put_ops(
self,
put_ops: Sequence[tuple[int, PutOp]],
) -> None:
queries = self._get_batch_PUT_queries(put_ops)
for query, params in queries:
cur = self.conn.cursor()
cur.execute(query, params)
def _batch_search_ops(
self,
search_ops: Sequence[tuple[int, SearchOp]],
results: list[Result],
) -> None:
queries = self._get_batch_search_queries(search_ops)
cursors: list[tuple[duckdb.DuckDBPyConnection, int]] = []
for (query, params), (idx, _) in zip(queries, search_ops):
cur = self.conn.cursor()
cur.execute(query, params)
cursors.append((cur, idx))
for cur, idx in cursors:
rows = cur.fetchall()
items = [_row_to_search_item(_convert_ns(row[0]), row) for row in rows]
results[idx] = items
def _batch_list_namespaces_ops(
self,
list_ops: Sequence[tuple[int, ListNamespacesOp]],
results: list[Result],
) -> None:
queries = self._get_batch_list_namespaces_queries(list_ops)
cursors: list[tuple[duckdb.DuckDBPyConnection, int]] = []
for (query, params), (idx, _) in zip(queries, list_ops):
cur = self.conn.cursor()
cur.execute(query, params)
cursors.append((cur, idx))
for cur, idx in cursors:
rows = cast(list[dict], cur.fetchall())
namespaces = [_convert_ns(row[0]) for row in rows]
results[idx] = namespaces
@classmethod
@contextmanager
def from_conn_string(
cls,
conn_string: str,
) -> Iterator["DuckDBStore"]:
"""Create a new BaseDuckDBStore instance from a connection string.
Args:
conn_string (str): The DuckDB connection info string.
Returns:
DuckDBStore: A new DuckDBStore instance.
"""
with duckdb.connect(conn_string) as conn:
yield cls(conn=conn)
def setup(self) -> None:
"""Set up the store database.
This method creates the necessary tables in the DuckDB database if they don't
already exist and runs database migrations. It is called automatically when needed and should not be called
directly by the user.
"""
with self.conn.cursor() as cur:
try:
cur.execute("SELECT v FROM store_migrations ORDER BY v DESC LIMIT 1")
row = cast(dict, cur.fetchone())
if row is None:
version = -1
else:
version = row["v"]
except duckdb.CatalogException:
version = -1
# Create store_migrations table if it doesn't exist
cur.execute(
"""
CREATE TABLE IF NOT EXISTS store_migrations (
v INTEGER PRIMARY KEY
)
"""
)
for v, migration in enumerate(
self.MIGRATIONS[version + 1 :], start=version + 1
):
cur.execute(migration)
cur.execute("INSERT INTO store_migrations (v) VALUES (?)", (v,))
def _namespace_to_text(
namespace: tuple[str, ...], handle_wildcards: bool = False
) -> str:
"""Convert namespace tuple to text string."""
if handle_wildcards:
namespace = tuple("%" if val == "*" else val for val in namespace)
return ".".join(namespace)
def _row_to_item(
namespace: tuple[str, ...],
row: tuple,
) -> Item:
"""Convert a row from the database into an Item."""
_, key, val, created_at, updated_at = row
return Item(
value=val if isinstance(val, dict) else json.loads(val),
key=key,
namespace=namespace,
created_at=created_at,
updated_at=updated_at,
)
def _row_to_search_item(
namespace: tuple[str, ...],
row: tuple,
) -> SearchItem:
"""Convert a row from the database into an SearchItem."""
# TODO: Add support for search
_, key, val, created_at, updated_at = row
return SearchItem(
value=val if isinstance(val, dict) else json.loads(val),
key=key,
namespace=namespace,
created_at=created_at,
updated_at=updated_at,
)
def _group_ops(ops: Iterable[Op]) -> tuple[dict[type, list[tuple[int, Op]]], int]:
grouped_ops: dict[type, list[tuple[int, Op]]] = defaultdict(list)
tot = 0
for idx, op in enumerate(ops):
grouped_ops[type(op)].append((idx, op))
tot += 1
return grouped_ops, tot
def _convert_ns(namespace: Union[str, list]) -> tuple[str, ...]:
if isinstance(namespace, list):
return tuple(namespace)
return tuple(namespace.split("."))
|
0 | lc_public_repos/langgraph/libs/checkpoint-duckdb/langgraph/store | lc_public_repos/langgraph/libs/checkpoint-duckdb/langgraph/store/duckdb/__init__.py | from langgraph.store.duckdb.aio import AsyncDuckDBStore
from langgraph.store.duckdb.base import DuckDBStore
__all__ = ["AsyncDuckDBStore", "DuckDBStore"]
|
0 | lc_public_repos/langgraph/libs | lc_public_repos/langgraph/libs/cli/Makefile | .PHONY: test lint format test-integration
######################
# TESTING AND COVERAGE
######################
test:
poetry run pytest tests/unit_tests
test-integration:
poetry run pytest tests/integration_tests
######################
# LINTING AND FORMATTING
######################
# Define a variable for Python and notebook files.
PYTHON_FILES=.
MYPY_CACHE=.mypy_cache
lint format: PYTHON_FILES=.
lint_diff format_diff: PYTHON_FILES=$(shell git diff --name-only --relative --diff-filter=d main . | grep -E '\.py$$|\.ipynb$$')
lint_package: PYTHON_FILES=langgraph_cli
lint_tests: PYTHON_FILES=tests
lint_tests: MYPY_CACHE=.mypy_cache_test
lint lint_diff lint_package lint_tests:
poetry run ruff check .
[ "$(PYTHON_FILES)" = "" ] || poetry run ruff format $(PYTHON_FILES) --diff
[ "$(PYTHON_FILES)" = "" ] || poetry run ruff check --select I $(PYTHON_FILES)
[ "$(PYTHON_FILES)" = "" ] || mkdir -p $(MYPY_CACHE) || poetry run mypy $(PYTHON_FILES) --cache-dir $(MYPY_CACHE)
format format_diff:
poetry run ruff format $(PYTHON_FILES)
poetry run ruff check --select I --fix $(PYTHON_FILES)
|
0 | lc_public_repos/langgraph/libs | lc_public_repos/langgraph/libs/cli/LICENSE | MIT License
Copyright (c) 2024 LangChain, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
|
0 | lc_public_repos/langgraph/libs | lc_public_repos/langgraph/libs/cli/poetry.lock | # This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand.
[[package]]
name = "annotated-types"
version = "0.7.0"
description = "Reusable constraint types to use with typing.Annotated"
optional = true
python-versions = ">=3.8"
files = [
{file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"},
{file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"},
]
[[package]]
name = "anyio"
version = "4.6.2.post1"
description = "High level compatibility layer for multiple asynchronous event loop implementations"
optional = true
python-versions = ">=3.9"
files = [
{file = "anyio-4.6.2.post1-py3-none-any.whl", hash = "sha256:6d170c36fba3bdd840c73d3868c1e777e33676a69c3a72cf0a0d5d6d8009b61d"},
{file = "anyio-4.6.2.post1.tar.gz", hash = "sha256:4c8bc31ccdb51c7f7bd251f51c609e038d63e34219b44aa86e47576389880b4c"},
]
[package.dependencies]
idna = ">=2.8"
sniffio = ">=1.1"
[package.extras]
doc = ["Sphinx (>=7.4,<8.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"]
test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "truststore (>=0.9.1)", "uvloop (>=0.21.0b1)"]
trio = ["trio (>=0.26.1)"]
[[package]]
name = "certifi"
version = "2024.8.30"
description = "Python package for providing Mozilla's CA Bundle."
optional = true
python-versions = ">=3.6"
files = [
{file = "certifi-2024.8.30-py3-none-any.whl", hash = "sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8"},
{file = "certifi-2024.8.30.tar.gz", hash = "sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9"},
]
[[package]]
name = "cffi"
version = "1.17.1"
description = "Foreign Function Interface for Python calling C code."
optional = true
python-versions = ">=3.8"
files = [
{file = "cffi-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14"},
{file = "cffi-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67"},
{file = "cffi-1.17.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:edae79245293e15384b51f88b00613ba9f7198016a5948b5dddf4917d4d26382"},
{file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45398b671ac6d70e67da8e4224a065cec6a93541bb7aebe1b198a61b58c7b702"},
{file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ad9413ccdeda48c5afdae7e4fa2192157e991ff761e7ab8fdd8926f40b160cc3"},
{file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5da5719280082ac6bd9aa7becb3938dc9f9cbd57fac7d2871717b1feb0902ab6"},
{file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bb1a08b8008b281856e5971307cc386a8e9c5b625ac297e853d36da6efe9c17"},
{file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8"},
{file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6883e737d7d9e4899a8a695e00ec36bd4e5e4f18fabe0aca0efe0a4b44cdb13e"},
{file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6b8b4a92e1c65048ff98cfe1f735ef8f1ceb72e3d5f0c25fdb12087a23da22be"},
{file = "cffi-1.17.1-cp310-cp310-win32.whl", hash = "sha256:c9c3d058ebabb74db66e431095118094d06abf53284d9c81f27300d0e0d8bc7c"},
{file = "cffi-1.17.1-cp310-cp310-win_amd64.whl", hash = "sha256:0f048dcf80db46f0098ccac01132761580d28e28bc0f78ae0d58048063317e15"},
{file = "cffi-1.17.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401"},
{file = "cffi-1.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf"},
{file = "cffi-1.17.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4"},
{file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41"},
{file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1"},
{file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6"},
{file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d"},
{file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6"},
{file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f"},
{file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b"},
{file = "cffi-1.17.1-cp311-cp311-win32.whl", hash = "sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655"},
{file = "cffi-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0"},
{file = "cffi-1.17.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4"},
{file = "cffi-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c"},
{file = "cffi-1.17.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36"},
{file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5"},
{file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff"},
{file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99"},
{file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93"},
{file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3"},
{file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8"},
{file = "cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65"},
{file = "cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903"},
{file = "cffi-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e"},
{file = "cffi-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2"},
{file = "cffi-1.17.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3"},
{file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683"},
{file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5"},
{file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4"},
{file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd"},
{file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed"},
{file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9"},
{file = "cffi-1.17.1-cp313-cp313-win32.whl", hash = "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d"},
{file = "cffi-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a"},
{file = "cffi-1.17.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:636062ea65bd0195bc012fea9321aca499c0504409f413dc88af450b57ffd03b"},
{file = "cffi-1.17.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c7eac2ef9b63c79431bc4b25f1cd649d7f061a28808cbc6c47b534bd789ef964"},
{file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e221cf152cff04059d011ee126477f0d9588303eb57e88923578ace7baad17f9"},
{file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:31000ec67d4221a71bd3f67df918b1f88f676f1c3b535a7eb473255fdc0b83fc"},
{file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f17be4345073b0a7b8ea599688f692ac3ef23ce28e5df79c04de519dbc4912c"},
{file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2b1fac190ae3ebfe37b979cc1ce69c81f4e4fe5746bb401dca63a9062cdaf1"},
{file = "cffi-1.17.1-cp38-cp38-win32.whl", hash = "sha256:7596d6620d3fa590f677e9ee430df2958d2d6d6de2feeae5b20e82c00b76fbf8"},
{file = "cffi-1.17.1-cp38-cp38-win_amd64.whl", hash = "sha256:78122be759c3f8a014ce010908ae03364d00a1f81ab5c7f4a7a5120607ea56e1"},
{file = "cffi-1.17.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b2ab587605f4ba0bf81dc0cb08a41bd1c0a5906bd59243d56bad7668a6fc6c16"},
{file = "cffi-1.17.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:28b16024becceed8c6dfbc75629e27788d8a3f9030691a1dbf9821a128b22c36"},
{file = "cffi-1.17.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d599671f396c4723d016dbddb72fe8e0397082b0a77a4fab8028923bec050e8"},
{file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca74b8dbe6e8e8263c0ffd60277de77dcee6c837a3d0881d8c1ead7268c9e576"},
{file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f7f5baafcc48261359e14bcd6d9bff6d4b28d9103847c9e136694cb0501aef87"},
{file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98e3969bcff97cae1b2def8ba499ea3d6f31ddfdb7635374834cf89a1a08ecf0"},
{file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cdf5ce3acdfd1661132f2a9c19cac174758dc2352bfe37d98aa7512c6b7178b3"},
{file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9755e4345d1ec879e3849e62222a18c7174d65a6a92d5b346b1863912168b595"},
{file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f1e22e8c4419538cb197e4dd60acc919d7696e5ef98ee4da4e01d3f8cfa4cc5a"},
{file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c03e868a0b3bc35839ba98e74211ed2b05d2119be4e8a0f224fba9384f1fe02e"},
{file = "cffi-1.17.1-cp39-cp39-win32.whl", hash = "sha256:e31ae45bc2e29f6b2abd0de1cc3b9d5205aa847cafaecb8af1476a609a2f6eb7"},
{file = "cffi-1.17.1-cp39-cp39-win_amd64.whl", hash = "sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662"},
{file = "cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824"},
]
[package.dependencies]
pycparser = "*"
[[package]]
name = "charset-normalizer"
version = "3.4.0"
description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
optional = true
python-versions = ">=3.7.0"
files = [
{file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4f9fc98dad6c2eaa32fc3af1417d95b5e3d08aff968df0cd320066def971f9a6"},
{file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0de7b687289d3c1b3e8660d0741874abe7888100efe14bd0f9fd7141bcbda92b"},
{file = "charset_normalizer-3.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5ed2e36c3e9b4f21dd9422f6893dec0abf2cca553af509b10cd630f878d3eb99"},
{file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40d3ff7fc90b98c637bda91c89d51264a3dcf210cade3a2c6f838c7268d7a4ca"},
{file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1110e22af8ca26b90bd6364fe4c763329b0ebf1ee213ba32b68c73de5752323d"},
{file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:86f4e8cca779080f66ff4f191a685ced73d2f72d50216f7112185dc02b90b9b7"},
{file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f683ddc7eedd742e2889d2bfb96d69573fde1d92fcb811979cdb7165bb9c7d3"},
{file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:27623ba66c183eca01bf9ff833875b459cad267aeeb044477fedac35e19ba907"},
{file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f606a1881d2663630ea5b8ce2efe2111740df4b687bd78b34a8131baa007f79b"},
{file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:0b309d1747110feb25d7ed6b01afdec269c647d382c857ef4663bbe6ad95a912"},
{file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:136815f06a3ae311fae551c3df1f998a1ebd01ddd424aa5603a4336997629e95"},
{file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:14215b71a762336254351b00ec720a8e85cada43b987da5a042e4ce3e82bd68e"},
{file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:79983512b108e4a164b9c8d34de3992f76d48cadc9554c9e60b43f308988aabe"},
{file = "charset_normalizer-3.4.0-cp310-cp310-win32.whl", hash = "sha256:c94057af19bc953643a33581844649a7fdab902624d2eb739738a30e2b3e60fc"},
{file = "charset_normalizer-3.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:55f56e2ebd4e3bc50442fbc0888c9d8c94e4e06a933804e2af3e89e2f9c1c749"},
{file = "charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0d99dd8ff461990f12d6e42c7347fd9ab2532fb70e9621ba520f9e8637161d7c"},
{file = "charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c57516e58fd17d03ebe67e181a4e4e2ccab1168f8c2976c6a334d4f819fe5944"},
{file = "charset_normalizer-3.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6dba5d19c4dfab08e58d5b36304b3f92f3bd5d42c1a3fa37b5ba5cdf6dfcbcee"},
{file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf4475b82be41b07cc5e5ff94810e6a01f276e37c2d55571e3fe175e467a1a1c"},
{file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce031db0408e487fd2775d745ce30a7cd2923667cf3b69d48d219f1d8f5ddeb6"},
{file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ff4e7cdfdb1ab5698e675ca622e72d58a6fa2a8aa58195de0c0061288e6e3ea"},
{file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3710a9751938947e6327ea9f3ea6332a09bf0ba0c09cae9cb1f250bd1f1549bc"},
{file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82357d85de703176b5587dbe6ade8ff67f9f69a41c0733cf2425378b49954de5"},
{file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:47334db71978b23ebcf3c0f9f5ee98b8d65992b65c9c4f2d34c2eaf5bcaf0594"},
{file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8ce7fd6767a1cc5a92a639b391891bf1c268b03ec7e021c7d6d902285259685c"},
{file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f1a2f519ae173b5b6a2c9d5fa3116ce16e48b3462c8b96dfdded11055e3d6365"},
{file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:63bc5c4ae26e4bc6be6469943b8253c0fd4e4186c43ad46e713ea61a0ba49129"},
{file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:bcb4f8ea87d03bc51ad04add8ceaf9b0f085ac045ab4d74e73bbc2dc033f0236"},
{file = "charset_normalizer-3.4.0-cp311-cp311-win32.whl", hash = "sha256:9ae4ef0b3f6b41bad6366fb0ea4fc1d7ed051528e113a60fa2a65a9abb5b1d99"},
{file = "charset_normalizer-3.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:cee4373f4d3ad28f1ab6290684d8e2ebdb9e7a1b74fdc39e4c211995f77bec27"},
{file = "charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0713f3adb9d03d49d365b70b84775d0a0d18e4ab08d12bc46baa6132ba78aaf6"},
{file = "charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:de7376c29d95d6719048c194a9cf1a1b0393fbe8488a22008610b0361d834ecf"},
{file = "charset_normalizer-3.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4a51b48f42d9358460b78725283f04bddaf44a9358197b889657deba38f329db"},
{file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b295729485b06c1a0683af02a9e42d2caa9db04a373dc38a6a58cdd1e8abddf1"},
{file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ee803480535c44e7f5ad00788526da7d85525cfefaf8acf8ab9a310000be4b03"},
{file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d59d125ffbd6d552765510e3f31ed75ebac2c7470c7274195b9161a32350284"},
{file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8cda06946eac330cbe6598f77bb54e690b4ca93f593dee1568ad22b04f347c15"},
{file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07afec21bbbbf8a5cc3651aa96b980afe2526e7f048fdfb7f1014d84acc8b6d8"},
{file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6b40e8d38afe634559e398cc32b1472f376a4099c75fe6299ae607e404c033b2"},
{file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b8dcd239c743aa2f9c22ce674a145e0a25cb1566c495928440a181ca1ccf6719"},
{file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:84450ba661fb96e9fd67629b93d2941c871ca86fc38d835d19d4225ff946a631"},
{file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:44aeb140295a2f0659e113b31cfe92c9061622cadbc9e2a2f7b8ef6b1e29ef4b"},
{file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1db4e7fefefd0f548d73e2e2e041f9df5c59e178b4c72fbac4cc6f535cfb1565"},
{file = "charset_normalizer-3.4.0-cp312-cp312-win32.whl", hash = "sha256:5726cf76c982532c1863fb64d8c6dd0e4c90b6ece9feb06c9f202417a31f7dd7"},
{file = "charset_normalizer-3.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:b197e7094f232959f8f20541ead1d9862ac5ebea1d58e9849c1bf979255dfac9"},
{file = "charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:dd4eda173a9fcccb5f2e2bd2a9f423d180194b1bf17cf59e3269899235b2a114"},
{file = "charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e9e3c4c9e1ed40ea53acf11e2a386383c3304212c965773704e4603d589343ed"},
{file = "charset_normalizer-3.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:92a7e36b000bf022ef3dbb9c46bfe2d52c047d5e3f3343f43204263c5addc250"},
{file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:54b6a92d009cbe2fb11054ba694bc9e284dad30a26757b1e372a1fdddaf21920"},
{file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ffd9493de4c922f2a38c2bf62b831dcec90ac673ed1ca182fe11b4d8e9f2a64"},
{file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:35c404d74c2926d0287fbd63ed5d27eb911eb9e4a3bb2c6d294f3cfd4a9e0c23"},
{file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4796efc4faf6b53a18e3d46343535caed491776a22af773f366534056c4e1fbc"},
{file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e7fdd52961feb4c96507aa649550ec2a0d527c086d284749b2f582f2d40a2e0d"},
{file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:92db3c28b5b2a273346bebb24857fda45601aef6ae1c011c0a997106581e8a88"},
{file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ab973df98fc99ab39080bfb0eb3a925181454d7c3ac8a1e695fddfae696d9e90"},
{file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:4b67fdab07fdd3c10bb21edab3cbfe8cf5696f453afce75d815d9d7223fbe88b"},
{file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:aa41e526a5d4a9dfcfbab0716c7e8a1b215abd3f3df5a45cf18a12721d31cb5d"},
{file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ffc519621dce0c767e96b9c53f09c5d215578e10b02c285809f76509a3931482"},
{file = "charset_normalizer-3.4.0-cp313-cp313-win32.whl", hash = "sha256:f19c1585933c82098c2a520f8ec1227f20e339e33aca8fa6f956f6691b784e67"},
{file = "charset_normalizer-3.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:707b82d19e65c9bd28b81dde95249b07bf9f5b90ebe1ef17d9b57473f8a64b7b"},
{file = "charset_normalizer-3.4.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:dbe03226baf438ac4fda9e2d0715022fd579cb641c4cf639fa40d53b2fe6f3e2"},
{file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd9a8bd8900e65504a305bf8ae6fa9fbc66de94178c420791d0293702fce2df7"},
{file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b8831399554b92b72af5932cdbbd4ddc55c55f631bb13ff8fe4e6536a06c5c51"},
{file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a14969b8691f7998e74663b77b4c36c0337cb1df552da83d5c9004a93afdb574"},
{file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dcaf7c1524c0542ee2fc82cc8ec337f7a9f7edee2532421ab200d2b920fc97cf"},
{file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:425c5f215d0eecee9a56cdb703203dda90423247421bf0d67125add85d0c4455"},
{file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:d5b054862739d276e09928de37c79ddeec42a6e1bfc55863be96a36ba22926f6"},
{file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:f3e73a4255342d4eb26ef6df01e3962e73aa29baa3124a8e824c5d3364a65748"},
{file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:2f6c34da58ea9c1a9515621f4d9ac379871a8f21168ba1b5e09d74250de5ad62"},
{file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:f09cb5a7bbe1ecae6e87901a2eb23e0256bb524a79ccc53eb0b7629fbe7677c4"},
{file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:0099d79bdfcf5c1f0c2c72f91516702ebf8b0b8ddd8905f97a8aecf49712c621"},
{file = "charset_normalizer-3.4.0-cp37-cp37m-win32.whl", hash = "sha256:9c98230f5042f4945f957d006edccc2af1e03ed5e37ce7c373f00a5a4daa6149"},
{file = "charset_normalizer-3.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:62f60aebecfc7f4b82e3f639a7d1433a20ec32824db2199a11ad4f5e146ef5ee"},
{file = "charset_normalizer-3.4.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:af73657b7a68211996527dbfeffbb0864e043d270580c5aef06dc4b659a4b578"},
{file = "charset_normalizer-3.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cab5d0b79d987c67f3b9e9c53f54a61360422a5a0bc075f43cab5621d530c3b6"},
{file = "charset_normalizer-3.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:9289fd5dddcf57bab41d044f1756550f9e7cf0c8e373b8cdf0ce8773dc4bd417"},
{file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b493a043635eb376e50eedf7818f2f322eabbaa974e948bd8bdd29eb7ef2a51"},
{file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9fa2566ca27d67c86569e8c85297aaf413ffab85a8960500f12ea34ff98e4c41"},
{file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8e538f46104c815be19c975572d74afb53f29650ea2025bbfaef359d2de2f7f"},
{file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fd30dc99682dc2c603c2b315bded2799019cea829f8bf57dc6b61efde6611c8"},
{file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2006769bd1640bdf4d5641c69a3d63b71b81445473cac5ded39740a226fa88ab"},
{file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:dc15e99b2d8a656f8e666854404f1ba54765871104e50c8e9813af8a7db07f12"},
{file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:ab2e5bef076f5a235c3774b4f4028a680432cded7cad37bba0fd90d64b187d19"},
{file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:4ec9dd88a5b71abfc74e9df5ebe7921c35cbb3b641181a531ca65cdb5e8e4dea"},
{file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:43193c5cda5d612f247172016c4bb71251c784d7a4d9314677186a838ad34858"},
{file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:aa693779a8b50cd97570e5a0f343538a8dbd3e496fa5dcb87e29406ad0299654"},
{file = "charset_normalizer-3.4.0-cp38-cp38-win32.whl", hash = "sha256:7706f5850360ac01d80c89bcef1640683cc12ed87f42579dab6c5d3ed6888613"},
{file = "charset_normalizer-3.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:c3e446d253bd88f6377260d07c895816ebf33ffffd56c1c792b13bff9c3e1ade"},
{file = "charset_normalizer-3.4.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:980b4f289d1d90ca5efcf07958d3eb38ed9c0b7676bf2831a54d4f66f9c27dfa"},
{file = "charset_normalizer-3.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f28f891ccd15c514a0981f3b9db9aa23d62fe1a99997512b0491d2ed323d229a"},
{file = "charset_normalizer-3.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8aacce6e2e1edcb6ac625fb0f8c3a9570ccc7bfba1f63419b3769ccf6a00ed0"},
{file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd7af3717683bea4c87acd8c0d3d5b44d56120b26fd3f8a692bdd2d5260c620a"},
{file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5ff2ed8194587faf56555927b3aa10e6fb69d931e33953943bc4f837dfee2242"},
{file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e91f541a85298cf35433bf66f3fab2a4a2cff05c127eeca4af174f6d497f0d4b"},
{file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:309a7de0a0ff3040acaebb35ec45d18db4b28232f21998851cfa709eeff49d62"},
{file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:285e96d9d53422efc0d7a17c60e59f37fbf3dfa942073f666db4ac71e8d726d0"},
{file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:5d447056e2ca60382d460a604b6302d8db69476fd2015c81e7c35417cfabe4cd"},
{file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:20587d20f557fe189b7947d8e7ec5afa110ccf72a3128d61a2a387c3313f46be"},
{file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:130272c698667a982a5d0e626851ceff662565379baf0ff2cc58067b81d4f11d"},
{file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:ab22fbd9765e6954bc0bcff24c25ff71dcbfdb185fcdaca49e81bac68fe724d3"},
{file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7782afc9b6b42200f7362858f9e73b1f8316afb276d316336c0ec3bd73312742"},
{file = "charset_normalizer-3.4.0-cp39-cp39-win32.whl", hash = "sha256:2de62e8801ddfff069cd5c504ce3bc9672b23266597d4e4f50eda28846c322f2"},
{file = "charset_normalizer-3.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:95c3c157765b031331dd4db3c775e58deaee050a3042fcad72cbc4189d7c8dca"},
{file = "charset_normalizer-3.4.0-py3-none-any.whl", hash = "sha256:fe9f97feb71aa9896b81973a7bbada8c49501dc73e58a10fcef6663af95e5079"},
{file = "charset_normalizer-3.4.0.tar.gz", hash = "sha256:223217c3d4f82c3ac5e29032b3f1c2eb0fb591b72161f86d93f5719079dae93e"},
]
[[package]]
name = "click"
version = "8.1.7"
description = "Composable command line interface toolkit"
optional = false
python-versions = ">=3.7"
files = [
{file = "click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28"},
{file = "click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"},
]
[package.dependencies]
colorama = {version = "*", markers = "platform_system == \"Windows\""}
[[package]]
name = "codespell"
version = "2.3.0"
description = "Codespell"
optional = false
python-versions = ">=3.8"
files = [
{file = "codespell-2.3.0-py3-none-any.whl", hash = "sha256:a9c7cef2501c9cfede2110fd6d4e5e62296920efe9abfb84648df866e47f58d1"},
{file = "codespell-2.3.0.tar.gz", hash = "sha256:360c7d10f75e65f67bad720af7007e1060a5d395670ec11a7ed1fed9dd17471f"},
]
[package.extras]
dev = ["Pygments", "build", "chardet", "pre-commit", "pytest", "pytest-cov", "pytest-dependency", "ruff", "tomli", "twine"]
hard-encoding-detection = ["chardet"]
toml = ["tomli"]
types = ["chardet (>=5.1.0)", "mypy", "pytest", "pytest-cov", "pytest-dependency"]
[[package]]
name = "colorama"
version = "0.4.6"
description = "Cross-platform colored terminal text."
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7"
files = [
{file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"},
{file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"},
]
[[package]]
name = "cryptography"
version = "43.0.3"
description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers."
optional = true
python-versions = ">=3.7"
files = [
{file = "cryptography-43.0.3-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:bf7a1932ac4176486eab36a19ed4c0492da5d97123f1406cf15e41b05e787d2e"},
{file = "cryptography-43.0.3-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63efa177ff54aec6e1c0aefaa1a241232dcd37413835a9b674b6e3f0ae2bfd3e"},
{file = "cryptography-43.0.3-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e1ce50266f4f70bf41a2c6dc4358afadae90e2a1e5342d3c08883df1675374f"},
{file = "cryptography-43.0.3-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:443c4a81bb10daed9a8f334365fe52542771f25aedaf889fd323a853ce7377d6"},
{file = "cryptography-43.0.3-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:74f57f24754fe349223792466a709f8e0c093205ff0dca557af51072ff47ab18"},
{file = "cryptography-43.0.3-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:9762ea51a8fc2a88b70cf2995e5675b38d93bf36bd67d91721c309df184f49bd"},
{file = "cryptography-43.0.3-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:81ef806b1fef6b06dcebad789f988d3b37ccaee225695cf3e07648eee0fc6b73"},
{file = "cryptography-43.0.3-cp37-abi3-win32.whl", hash = "sha256:cbeb489927bd7af4aa98d4b261af9a5bc025bd87f0e3547e11584be9e9427be2"},
{file = "cryptography-43.0.3-cp37-abi3-win_amd64.whl", hash = "sha256:f46304d6f0c6ab8e52770addfa2fc41e6629495548862279641972b6215451cd"},
{file = "cryptography-43.0.3-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:8ac43ae87929a5982f5948ceda07001ee5e83227fd69cf55b109144938d96984"},
{file = "cryptography-43.0.3-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:846da004a5804145a5f441b8530b4bf35afbf7da70f82409f151695b127213d5"},
{file = "cryptography-43.0.3-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0f996e7268af62598f2fc1204afa98a3b5712313a55c4c9d434aef49cadc91d4"},
{file = "cryptography-43.0.3-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:f7b178f11ed3664fd0e995a47ed2b5ff0a12d893e41dd0494f406d1cf555cab7"},
{file = "cryptography-43.0.3-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:c2e6fc39c4ab499049df3bdf567f768a723a5e8464816e8f009f121a5a9f4405"},
{file = "cryptography-43.0.3-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:e1be4655c7ef6e1bbe6b5d0403526601323420bcf414598955968c9ef3eb7d16"},
{file = "cryptography-43.0.3-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:df6b6c6d742395dd77a23ea3728ab62f98379eff8fb61be2744d4679ab678f73"},
{file = "cryptography-43.0.3-cp39-abi3-win32.whl", hash = "sha256:d56e96520b1020449bbace2b78b603442e7e378a9b3bd68de65c782db1507995"},
{file = "cryptography-43.0.3-cp39-abi3-win_amd64.whl", hash = "sha256:0c580952eef9bf68c4747774cde7ec1d85a6e61de97281f2dba83c7d2c806362"},
{file = "cryptography-43.0.3-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d03b5621a135bffecad2c73e9f4deb1a0f977b9a8ffe6f8e002bf6c9d07b918c"},
{file = "cryptography-43.0.3-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:a2a431ee15799d6db9fe80c82b055bae5a752bef645bba795e8e52687c69efe3"},
{file = "cryptography-43.0.3-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:281c945d0e28c92ca5e5930664c1cefd85efe80e5c0d2bc58dd63383fda29f83"},
{file = "cryptography-43.0.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:f18c716be16bc1fea8e95def49edf46b82fccaa88587a45f8dc0ff6ab5d8e0a7"},
{file = "cryptography-43.0.3-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:4a02ded6cd4f0a5562a8887df8b3bd14e822a90f97ac5e544c162899bc467664"},
{file = "cryptography-43.0.3-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:53a583b6637ab4c4e3591a15bc9db855b8d9dee9a669b550f311480acab6eb08"},
{file = "cryptography-43.0.3-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:1ec0bcf7e17c0c5669d881b1cd38c4972fade441b27bda1051665faaa89bdcaa"},
{file = "cryptography-43.0.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2ce6fae5bdad59577b44e4dfed356944fbf1d925269114c28be377692643b4ff"},
{file = "cryptography-43.0.3.tar.gz", hash = "sha256:315b9001266a492a6ff443b61238f956b214dbec9910a081ba5b6646a055a805"},
]
[package.dependencies]
cffi = {version = ">=1.12", markers = "platform_python_implementation != \"PyPy\""}
[package.extras]
docs = ["sphinx (>=5.3.0)", "sphinx-rtd-theme (>=1.1.1)"]
docstest = ["pyenchant (>=1.6.11)", "readme-renderer", "sphinxcontrib-spelling (>=4.0.1)"]
nox = ["nox"]
pep8test = ["check-sdist", "click", "mypy", "ruff"]
sdist = ["build"]
ssh = ["bcrypt (>=3.1.5)"]
test = ["certifi", "cryptography-vectors (==43.0.3)", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"]
test-randomorder = ["pytest-randomly"]
[[package]]
name = "docopt"
version = "0.6.2"
description = "Pythonic argument parser, that will make you smile"
optional = false
python-versions = "*"
files = [
{file = "docopt-0.6.2.tar.gz", hash = "sha256:49b3a825280bd66b3aa83585ef59c4a8c82f2c8a522dbe754a8bc8d08c85c491"},
]
[[package]]
name = "exceptiongroup"
version = "1.2.2"
description = "Backport of PEP 654 (exception groups)"
optional = false
python-versions = ">=3.7"
files = [
{file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"},
{file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"},
]
[package.extras]
test = ["pytest (>=6)"]
[[package]]
name = "h11"
version = "0.14.0"
description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1"
optional = true
python-versions = ">=3.7"
files = [
{file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"},
{file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"},
]
[[package]]
name = "httpcore"
version = "1.0.7"
description = "A minimal low-level HTTP client."
optional = true
python-versions = ">=3.8"
files = [
{file = "httpcore-1.0.7-py3-none-any.whl", hash = "sha256:a3fff8f43dc260d5bd363d9f9cf1830fa3a458b332856f34282de498ed420edd"},
{file = "httpcore-1.0.7.tar.gz", hash = "sha256:8551cb62a169ec7162ac7be8d4817d561f60e08eaa485234898414bb5a8a0b4c"},
]
[package.dependencies]
certifi = "*"
h11 = ">=0.13,<0.15"
[package.extras]
asyncio = ["anyio (>=4.0,<5.0)"]
http2 = ["h2 (>=3,<5)"]
socks = ["socksio (==1.*)"]
trio = ["trio (>=0.22.0,<1.0)"]
[[package]]
name = "httpx"
version = "0.27.2"
description = "The next generation HTTP client."
optional = true
python-versions = ">=3.8"
files = [
{file = "httpx-0.27.2-py3-none-any.whl", hash = "sha256:7bb2708e112d8fdd7829cd4243970f0c223274051cb35ee80c03301ee29a3df0"},
{file = "httpx-0.27.2.tar.gz", hash = "sha256:f7c2be1d2f3c3c3160d441802406b206c2b76f5947b11115e6df10c6c65e66c2"},
]
[package.dependencies]
anyio = "*"
certifi = "*"
httpcore = "==1.*"
idna = "*"
sniffio = "*"
[package.extras]
brotli = ["brotli", "brotlicffi"]
cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"]
http2 = ["h2 (>=3,<5)"]
socks = ["socksio (==1.*)"]
zstd = ["zstandard (>=0.18.0)"]
[[package]]
name = "httpx-sse"
version = "0.4.0"
description = "Consume Server-Sent Event (SSE) messages with HTTPX."
optional = true
python-versions = ">=3.8"
files = [
{file = "httpx-sse-0.4.0.tar.gz", hash = "sha256:1e81a3a3070ce322add1d3529ed42eb5f70817f45ed6ec915ab753f961139721"},
{file = "httpx_sse-0.4.0-py3-none-any.whl", hash = "sha256:f329af6eae57eaa2bdfd962b42524764af68075ea87370a2de920af5341e318f"},
]
[[package]]
name = "idna"
version = "3.10"
description = "Internationalized Domain Names in Applications (IDNA)"
optional = true
python-versions = ">=3.6"
files = [
{file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"},
{file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"},
]
[package.extras]
all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"]
[[package]]
name = "iniconfig"
version = "2.0.0"
description = "brain-dead simple config-ini parsing"
optional = false
python-versions = ">=3.7"
files = [
{file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"},
{file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"},
]
[[package]]
name = "jsonpatch"
version = "1.33"
description = "Apply JSON-Patches (RFC 6902)"
optional = true
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.*"
files = [
{file = "jsonpatch-1.33-py2.py3-none-any.whl", hash = "sha256:0ae28c0cd062bbd8b8ecc26d7d164fbbea9652a1a3693f3b956c1eae5145dade"},
{file = "jsonpatch-1.33.tar.gz", hash = "sha256:9fcd4009c41e6d12348b4a0ff2563ba56a2923a7dfee731d004e212e1ee5030c"},
]
[package.dependencies]
jsonpointer = ">=1.9"
[[package]]
name = "jsonpointer"
version = "3.0.0"
description = "Identify specific nodes in a JSON document (RFC 6901)"
optional = true
python-versions = ">=3.7"
files = [
{file = "jsonpointer-3.0.0-py2.py3-none-any.whl", hash = "sha256:13e088adc14fca8b6aa8177c044e12701e6ad4b28ff10e65f2267a90109c9942"},
{file = "jsonpointer-3.0.0.tar.gz", hash = "sha256:2b2d729f2091522d61c3b31f82e11870f60b68f43fbc705cb76bf4b832af59ef"},
]
[[package]]
name = "jsonschema-rs"
version = "0.25.1"
description = "A high-performance JSON Schema validator for Python"
optional = true
python-versions = ">=3.8"
files = [
{file = "jsonschema_rs-0.25.1-cp310-cp310-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:0dc49a465a02f97a8c747e852bc82322dd56ff7d66b3058c9656e15b8a5e381c"},
{file = "jsonschema_rs-0.25.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:cf196d06dea3af58a23e6a03e363140ee307fc80fc77d8c71068df3de1bca588"},
{file = "jsonschema_rs-0.25.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:f7bc2cac3891b75c301090effba458b6e7108d568aa2aeadb5695d7788991fa3"},
{file = "jsonschema_rs-0.25.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3bc130df50b530c57524c7edc29808700d51304f94477833fb0062e0f3df1dfb"},
{file = "jsonschema_rs-0.25.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8dace81a4652029493a19e24454bb5f821acdacf65da4f6472bbdc6751c45b79"},
{file = "jsonschema_rs-0.25.1-cp310-none-win32.whl", hash = "sha256:dc9771bbe5672f89a974b7302f5e4421559a8d0903eab2c38510dd61bd526086"},
{file = "jsonschema_rs-0.25.1-cp310-none-win_amd64.whl", hash = "sha256:7be237cc251c0fe9fe196adf4a7a8402339bbf58ef8710ab76b9e4b845fc94bd"},
{file = "jsonschema_rs-0.25.1-cp311-cp311-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:df22e50adba344efc018322305386357bf30c4b6a9b48349f565ff410d1eb78e"},
{file = "jsonschema_rs-0.25.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:db9fb2086a8a64e40e95e0f6bb0a91ea387419d0d239fc59d468586702286a89"},
{file = "jsonschema_rs-0.25.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:a168559e9f1fff42d9a2a78c46143fda7ee58a90b85a7d11140dfe16e609e92b"},
{file = "jsonschema_rs-0.25.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6e4f659487071009749077b4f5ee69300772fa321f92fa831e71fd4218839958"},
{file = "jsonschema_rs-0.25.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b1e9f7c05c8c9656bb98ecc93a0b91408a6e73063956dea7452cd64277f3b9f"},
{file = "jsonschema_rs-0.25.1-cp311-none-win32.whl", hash = "sha256:406a18dafac01b7799dd86b1b05bf14e8628d1f8bc0a1a9c187458f246c2f6cc"},
{file = "jsonschema_rs-0.25.1-cp311-none-win_amd64.whl", hash = "sha256:3546274ae6e11fcc1b058cdd763803c4db24e49bac125076bc4fab0ab61d4786"},
{file = "jsonschema_rs-0.25.1-cp312-cp312-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:cfaed89ef79cc972d11c50889fa02e8265aa5b6b6826c40749b956888712909d"},
{file = "jsonschema_rs-0.25.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:87432343bcbd91475af28c765caa0bbde534bbfb6fac1245735a5a1dd6b522a4"},
{file = "jsonschema_rs-0.25.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:e69c9faed321b7cfbeb37e98e385a0392ad8d25896fd3def56afd080e56e294c"},
{file = "jsonschema_rs-0.25.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f004018ffb7fbffc8f1dbeb1bda44f322c924e007bb6966ab3466bdffbc06e62"},
{file = "jsonschema_rs-0.25.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b05e2f6bf86ced9d912d72329a19d39a86fcc49a85e31b923456d982733eaa7f"},
{file = "jsonschema_rs-0.25.1-cp312-none-win32.whl", hash = "sha256:b3d2ea6217e3618ba587d374b89f4660de59293b0a2fa43278131bc2cb339f57"},
{file = "jsonschema_rs-0.25.1-cp312-none-win_amd64.whl", hash = "sha256:6ceeaec97e77d14d6355aec9e55b2356d6376fdf94f6fed10c29cadc8e51fdf6"},
{file = "jsonschema_rs-0.25.1-cp313-cp313-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:4696a580f54855cda68c05bbd7c25328eadca34b6a31b36495961235c511148d"},
{file = "jsonschema_rs-0.25.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:a2d9844d70e5a481a1c363b8b02a3862cc1dbc3b14551a8060436e53243d5447"},
{file = "jsonschema_rs-0.25.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:fd168893519bae7c09fb7226f4ac4b6e73bc1ce9397c3846870cf980135762e8"},
{file = "jsonschema_rs-0.25.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e6627f4ecf1cb12765734f39a536bb4a062e35c9db756d52581b4d045489025"},
{file = "jsonschema_rs-0.25.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4784b0c1a0596595e0ea482d6656ccd33d7f14fbf32456e76fead0a67c6a7ec5"},
{file = "jsonschema_rs-0.25.1-cp313-none-win32.whl", hash = "sha256:7e6984721dbaaffc6a32ba15b4988df56e4069c9f867370a8a3c48a69a311ac3"},
{file = "jsonschema_rs-0.25.1-cp313-none-win_amd64.whl", hash = "sha256:62234e768a1cc57690602711e37f2b936b0f081dcd34a9c2b3e20ba0dafcbceb"},
{file = "jsonschema_rs-0.25.1-cp38-cp38-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:346a46f25d974b4ae1d36ac3c02677a699b46404b019a688ddabef40840019fc"},
{file = "jsonschema_rs-0.25.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:32f4908bc5c958a0a94a25260f5be59fba0062a60a40b61f0faedf12eed82063"},
{file = "jsonschema_rs-0.25.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:ce090f2038f1a01836adf9f821f7e8c82073710c7a55b002a2e7a1625c3f3954"},
{file = "jsonschema_rs-0.25.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d4ef0f3b0760ab151c35f5c8e090cdb201fb81eae2cf5ea1771f4d827d0cfbb1"},
{file = "jsonschema_rs-0.25.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2c732bd71da96d6600ae550401e7997fb470f73f95af8defefe4fcbc5e4e9043"},
{file = "jsonschema_rs-0.25.1-cp38-none-win32.whl", hash = "sha256:b9ea15dfa5c47b1658e77c3a04e7c38b66e6a617617e1b9f5dba53b67712da1e"},
{file = "jsonschema_rs-0.25.1-cp38-none-win_amd64.whl", hash = "sha256:0d8438bea4c09994973ac60c645f4735808329908f4b9421f07d7f2ddf8ac860"},
{file = "jsonschema_rs-0.25.1-cp39-cp39-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:6c9ea0b69fab4a27e0c8c0edbee24db2da44d2a97657469ee30a2b980b0b445c"},
{file = "jsonschema_rs-0.25.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:9de3946a1cd66daae805578ad5cbf21ff4d590d7206048f0eb2999aab056a78a"},
{file = "jsonschema_rs-0.25.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:3bd2ae54066e840cb5595d135b40c9458a0155e482998e00e922ea6b5ab57a24"},
{file = "jsonschema_rs-0.25.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eb7e2a6777e78a699d09e5c6e84318e97a8e80727939fcde3d0faff7f9da85f3"},
{file = "jsonschema_rs-0.25.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8a87babb64a93bad6bfbf2ddad0e33cab6c3f395e9572fe027387c7767e9741c"},
{file = "jsonschema_rs-0.25.1-cp39-none-win32.whl", hash = "sha256:570dffc76f0b4e9fa93cf4678a6b07d6fa446f1e5557525703160a94c0ab7968"},
{file = "jsonschema_rs-0.25.1-cp39-none-win_amd64.whl", hash = "sha256:9117e105f979f11f55ae940949a544e7e1a304b56ae65f535fa82cc8af4dc2e1"},
{file = "jsonschema_rs-0.25.1.tar.gz", hash = "sha256:f2fe71253bb0315061a5025b9336fd49660bca4094b04948f53e3acfd4197a64"},
]
[package.extras]
bench = ["fastjsonschema (>=2.20.0)", "jsonschema (>=4.23.0)", "pytest-benchmark (>=4.0.0)"]
tests = ["flask (>=2.2.5)", "hypothesis (>=6.79.4)", "pytest (>=7.4.4)"]
[[package]]
name = "langchain-core"
version = "0.3.21"
description = "Building applications with LLMs through composability"
optional = true
python-versions = "<4.0,>=3.9"
files = [
{file = "langchain_core-0.3.21-py3-none-any.whl", hash = "sha256:7e723dff80946a1198976c6876fea8326dc82566ef9bcb5f8d9188f738733665"},
{file = "langchain_core-0.3.21.tar.gz", hash = "sha256:561b52b258ffa50a9fb11d7a1940ebfd915654d1ec95b35e81dfd5ee84143411"},
]
[package.dependencies]
jsonpatch = ">=1.33,<2.0"
langsmith = ">=0.1.125,<0.2.0"
packaging = ">=23.2,<25"
pydantic = [
{version = ">=2.5.2,<3.0.0", markers = "python_full_version < \"3.12.4\""},
{version = ">=2.7.4,<3.0.0", markers = "python_full_version >= \"3.12.4\""},
]
PyYAML = ">=5.3"
tenacity = ">=8.1.0,<8.4.0 || >8.4.0,<10.0.0"
typing-extensions = ">=4.7"
[[package]]
name = "langgraph"
version = "0.2.53"
description = "Building stateful, multi-actor applications with LLMs"
optional = true
python-versions = "<4.0,>=3.9.0"
files = [
{file = "langgraph-0.2.53-py3-none-any.whl", hash = "sha256:b34b67d0a12ae0ba6f03af97ad0f744bc609bd0328e8b734618cc039985cfdea"},
{file = "langgraph-0.2.53.tar.gz", hash = "sha256:b83232a04f2b536cbeac542f9ad7e0265f41ac6b7c6706ba8e031e0e80cb13a6"},
]
[package.dependencies]
langchain-core = ">=0.2.43,<0.3.0 || >0.3.0,<0.3.1 || >0.3.1,<0.3.2 || >0.3.2,<0.3.3 || >0.3.3,<0.3.4 || >0.3.4,<0.3.5 || >0.3.5,<0.3.6 || >0.3.6,<0.3.7 || >0.3.7,<0.3.8 || >0.3.8,<0.3.9 || >0.3.9,<0.3.10 || >0.3.10,<0.3.11 || >0.3.11,<0.3.12 || >0.3.12,<0.3.13 || >0.3.13,<0.3.14 || >0.3.14,<0.4.0"
langgraph-checkpoint = ">=2.0.4,<3.0.0"
langgraph-sdk = ">=0.1.32,<0.2.0"
[[package]]
name = "langgraph-api"
version = "0.0.6"
description = ""
optional = true
python-versions = "<4.0,>=3.11.0"
files = [
{file = "langgraph_api-0.0.6-py3-none-any.whl", hash = "sha256:f64b13959d721143f6a023af5b9ffc9aa054064af98d21d5d8090cda7e7bffd2"},
{file = "langgraph_api-0.0.6.tar.gz", hash = "sha256:badac44fa1ec979509e56fc0da57eeb5f278ee5871f27803f73ea6d8822c21b9"},
]
[package.dependencies]
cryptography = ">=43.0.3,<44.0.0"
httpx = ">=0.27.0"
jsonschema-rs = ">=0.25.0,<0.26.0"
langchain-core = ">=0.2.38,<0.4.0"
langgraph = ">=0.2.52,<0.3.0"
langgraph-checkpoint = ">=2.0.7,<3.0"
langsmith = ">=0.1.63,<0.2.0"
orjson = ">=3.10.1"
pyjwt = ">=2.9.0,<3.0.0"
sse-starlette = ">=2.1.0,<3.0.0"
starlette = ">=0.38.6"
structlog = ">=24.4.0,<25.0.0"
tenacity = ">=8.3.0,<9.0.0"
uvicorn = ">=0.26.0"
watchfiles = ">=0.13"
[[package]]
name = "langgraph-checkpoint"
version = "2.0.7"
description = "Library with base interfaces for LangGraph checkpoint savers."
optional = true
python-versions = "<4.0.0,>=3.9.0"
files = [
{file = "langgraph_checkpoint-2.0.7-py3-none-any.whl", hash = "sha256:9709f672e1c5a47e13352067c2ffa114dd91d443967b7ce8a1d36d6fc170370e"},
{file = "langgraph_checkpoint-2.0.7.tar.gz", hash = "sha256:88d648a331d20aa8ce65280de34a34a9190380b004f6afcc5f9894fe3abeed08"},
]
[package.dependencies]
langchain-core = ">=0.2.38,<0.4"
msgpack = ">=1.1.0,<2.0.0"
[[package]]
name = "langgraph-sdk"
version = "0.1.40"
description = "SDK for interacting with LangGraph API"
optional = true
python-versions = "<4.0.0,>=3.9.0"
files = [
{file = "langgraph_sdk-0.1.40-py3-none-any.whl", hash = "sha256:8810cca5e4144cf3a5441fc76b4ee6e658ec95f932d3a0bf9ad63de117e925b9"},
{file = "langgraph_sdk-0.1.40.tar.gz", hash = "sha256:ab2719ac7274612a791a7a0ad9395d250357106cba8ba81bca9968fc91009af2"},
]
[package.dependencies]
httpx = ">=0.25.2"
httpx-sse = ">=0.4.0"
orjson = ">=3.10.1"
[[package]]
name = "langsmith"
version = "0.1.147"
description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform."
optional = true
python-versions = "<4.0,>=3.8.1"
files = [
{file = "langsmith-0.1.147-py3-none-any.whl", hash = "sha256:7166fc23b965ccf839d64945a78e9f1157757add228b086141eb03a60d699a15"},
{file = "langsmith-0.1.147.tar.gz", hash = "sha256:2e933220318a4e73034657103b3b1a3a6109cc5db3566a7e8e03be8d6d7def7a"},
]
[package.dependencies]
httpx = ">=0.23.0,<1"
orjson = {version = ">=3.9.14,<4.0.0", markers = "platform_python_implementation != \"PyPy\""}
pydantic = [
{version = ">=1,<3", markers = "python_full_version < \"3.12.4\""},
{version = ">=2.7.4,<3.0.0", markers = "python_full_version >= \"3.12.4\""},
]
requests = ">=2,<3"
requests-toolbelt = ">=1.0.0,<2.0.0"
[package.extras]
langsmith-pyo3 = ["langsmith-pyo3 (>=0.1.0rc2,<0.2.0)"]
[[package]]
name = "msgpack"
version = "1.1.0"
description = "MessagePack serializer"
optional = true
python-versions = ">=3.8"
files = [
{file = "msgpack-1.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7ad442d527a7e358a469faf43fda45aaf4ac3249c8310a82f0ccff9164e5dccd"},
{file = "msgpack-1.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:74bed8f63f8f14d75eec75cf3d04ad581da6b914001b474a5d3cd3372c8cc27d"},
{file = "msgpack-1.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:914571a2a5b4e7606997e169f64ce53a8b1e06f2cf2c3a7273aa106236d43dd5"},
{file = "msgpack-1.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c921af52214dcbb75e6bdf6a661b23c3e6417f00c603dd2070bccb5c3ef499f5"},
{file = "msgpack-1.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d8ce0b22b890be5d252de90d0e0d119f363012027cf256185fc3d474c44b1b9e"},
{file = "msgpack-1.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:73322a6cc57fcee3c0c57c4463d828e9428275fb85a27aa2aa1a92fdc42afd7b"},
{file = "msgpack-1.1.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:e1f3c3d21f7cf67bcf2da8e494d30a75e4cf60041d98b3f79875afb5b96f3a3f"},
{file = "msgpack-1.1.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:64fc9068d701233effd61b19efb1485587560b66fe57b3e50d29c5d78e7fef68"},
{file = "msgpack-1.1.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:42f754515e0f683f9c79210a5d1cad631ec3d06cea5172214d2176a42e67e19b"},
{file = "msgpack-1.1.0-cp310-cp310-win32.whl", hash = "sha256:3df7e6b05571b3814361e8464f9304c42d2196808e0119f55d0d3e62cd5ea044"},
{file = "msgpack-1.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:685ec345eefc757a7c8af44a3032734a739f8c45d1b0ac45efc5d8977aa4720f"},
{file = "msgpack-1.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:3d364a55082fb2a7416f6c63ae383fbd903adb5a6cf78c5b96cc6316dc1cedc7"},
{file = "msgpack-1.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:79ec007767b9b56860e0372085f8504db5d06bd6a327a335449508bbee9648fa"},
{file = "msgpack-1.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6ad622bf7756d5a497d5b6836e7fc3752e2dd6f4c648e24b1803f6048596f701"},
{file = "msgpack-1.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e59bca908d9ca0de3dc8684f21ebf9a690fe47b6be93236eb40b99af28b6ea6"},
{file = "msgpack-1.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e1da8f11a3dd397f0a32c76165cf0c4eb95b31013a94f6ecc0b280c05c91b59"},
{file = "msgpack-1.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:452aff037287acb1d70a804ffd022b21fa2bb7c46bee884dbc864cc9024128a0"},
{file = "msgpack-1.1.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8da4bf6d54ceed70e8861f833f83ce0814a2b72102e890cbdfe4b34764cdd66e"},
{file = "msgpack-1.1.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:41c991beebf175faf352fb940bf2af9ad1fb77fd25f38d9142053914947cdbf6"},
{file = "msgpack-1.1.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a52a1f3a5af7ba1c9ace055b659189f6c669cf3657095b50f9602af3a3ba0fe5"},
{file = "msgpack-1.1.0-cp311-cp311-win32.whl", hash = "sha256:58638690ebd0a06427c5fe1a227bb6b8b9fdc2bd07701bec13c2335c82131a88"},
{file = "msgpack-1.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:fd2906780f25c8ed5d7b323379f6138524ba793428db5d0e9d226d3fa6aa1788"},
{file = "msgpack-1.1.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:d46cf9e3705ea9485687aa4001a76e44748b609d260af21c4ceea7f2212a501d"},
{file = "msgpack-1.1.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5dbad74103df937e1325cc4bfeaf57713be0b4f15e1c2da43ccdd836393e2ea2"},
{file = "msgpack-1.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:58dfc47f8b102da61e8949708b3eafc3504509a5728f8b4ddef84bd9e16ad420"},
{file = "msgpack-1.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4676e5be1b472909b2ee6356ff425ebedf5142427842aa06b4dfd5117d1ca8a2"},
{file = "msgpack-1.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17fb65dd0bec285907f68b15734a993ad3fc94332b5bb21b0435846228de1f39"},
{file = "msgpack-1.1.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a51abd48c6d8ac89e0cfd4fe177c61481aca2d5e7ba42044fd218cfd8ea9899f"},
{file = "msgpack-1.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2137773500afa5494a61b1208619e3871f75f27b03bcfca7b3a7023284140247"},
{file = "msgpack-1.1.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:398b713459fea610861c8a7b62a6fec1882759f308ae0795b5413ff6a160cf3c"},
{file = "msgpack-1.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:06f5fd2f6bb2a7914922d935d3b8bb4a7fff3a9a91cfce6d06c13bc42bec975b"},
{file = "msgpack-1.1.0-cp312-cp312-win32.whl", hash = "sha256:ad33e8400e4ec17ba782f7b9cf868977d867ed784a1f5f2ab46e7ba53b6e1e1b"},
{file = "msgpack-1.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:115a7af8ee9e8cddc10f87636767857e7e3717b7a2e97379dc2054712693e90f"},
{file = "msgpack-1.1.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:071603e2f0771c45ad9bc65719291c568d4edf120b44eb36324dcb02a13bfddf"},
{file = "msgpack-1.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:0f92a83b84e7c0749e3f12821949d79485971f087604178026085f60ce109330"},
{file = "msgpack-1.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:4a1964df7b81285d00a84da4e70cb1383f2e665e0f1f2a7027e683956d04b734"},
{file = "msgpack-1.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:59caf6a4ed0d164055ccff8fe31eddc0ebc07cf7326a2aaa0dbf7a4001cd823e"},
{file = "msgpack-1.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0907e1a7119b337971a689153665764adc34e89175f9a34793307d9def08e6ca"},
{file = "msgpack-1.1.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:65553c9b6da8166e819a6aa90ad15288599b340f91d18f60b2061f402b9a4915"},
{file = "msgpack-1.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:7a946a8992941fea80ed4beae6bff74ffd7ee129a90b4dd5cf9c476a30e9708d"},
{file = "msgpack-1.1.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:4b51405e36e075193bc051315dbf29168d6141ae2500ba8cd80a522964e31434"},
{file = "msgpack-1.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:b4c01941fd2ff87c2a934ee6055bda4ed353a7846b8d4f341c428109e9fcde8c"},
{file = "msgpack-1.1.0-cp313-cp313-win32.whl", hash = "sha256:7c9a35ce2c2573bada929e0b7b3576de647b0defbd25f5139dcdaba0ae35a4cc"},
{file = "msgpack-1.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:bce7d9e614a04d0883af0b3d4d501171fbfca038f12c77fa838d9f198147a23f"},
{file = "msgpack-1.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c40ffa9a15d74e05ba1fe2681ea33b9caffd886675412612d93ab17b58ea2fec"},
{file = "msgpack-1.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f1ba6136e650898082d9d5a5217d5906d1e138024f836ff48691784bbe1adf96"},
{file = "msgpack-1.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e0856a2b7e8dcb874be44fea031d22e5b3a19121be92a1e098f46068a11b0870"},
{file = "msgpack-1.1.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:471e27a5787a2e3f974ba023f9e265a8c7cfd373632247deb225617e3100a3c7"},
{file = "msgpack-1.1.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:646afc8102935a388ffc3914b336d22d1c2d6209c773f3eb5dd4d6d3b6f8c1cb"},
{file = "msgpack-1.1.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:13599f8829cfbe0158f6456374e9eea9f44eee08076291771d8ae93eda56607f"},
{file = "msgpack-1.1.0-cp38-cp38-win32.whl", hash = "sha256:8a84efb768fb968381e525eeeb3d92857e4985aacc39f3c47ffd00eb4509315b"},
{file = "msgpack-1.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:879a7b7b0ad82481c52d3c7eb99bf6f0645dbdec5134a4bddbd16f3506947feb"},
{file = "msgpack-1.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:53258eeb7a80fc46f62fd59c876957a2d0e15e6449a9e71842b6d24419d88ca1"},
{file = "msgpack-1.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7e7b853bbc44fb03fbdba34feb4bd414322180135e2cb5164f20ce1c9795ee48"},
{file = "msgpack-1.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f3e9b4936df53b970513eac1758f3882c88658a220b58dcc1e39606dccaaf01c"},
{file = "msgpack-1.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:46c34e99110762a76e3911fc923222472c9d681f1094096ac4102c18319e6468"},
{file = "msgpack-1.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8a706d1e74dd3dea05cb54580d9bd8b2880e9264856ce5068027eed09680aa74"},
{file = "msgpack-1.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:534480ee5690ab3cbed89d4c8971a5c631b69a8c0883ecfea96c19118510c846"},
{file = "msgpack-1.1.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:8cf9e8c3a2153934a23ac160cc4cba0ec035f6867c8013cc6077a79823370346"},
{file = "msgpack-1.1.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:3180065ec2abbe13a4ad37688b61b99d7f9e012a535b930e0e683ad6bc30155b"},
{file = "msgpack-1.1.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:c5a91481a3cc573ac8c0d9aace09345d989dc4a0202b7fcb312c88c26d4e71a8"},
{file = "msgpack-1.1.0-cp39-cp39-win32.whl", hash = "sha256:f80bc7d47f76089633763f952e67f8214cb7b3ee6bfa489b3cb6a84cfac114cd"},
{file = "msgpack-1.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:4d1b7ff2d6146e16e8bd665ac726a89c74163ef8cd39fa8c1087d4e52d3a2325"},
{file = "msgpack-1.1.0.tar.gz", hash = "sha256:dd432ccc2c72b914e4cb77afce64aab761c1137cc698be3984eee260bcb2896e"},
]
[[package]]
name = "mypy"
version = "1.13.0"
description = "Optional static typing for Python"
optional = false
python-versions = ">=3.8"
files = [
{file = "mypy-1.13.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6607e0f1dd1fb7f0aca14d936d13fd19eba5e17e1cd2a14f808fa5f8f6d8f60a"},
{file = "mypy-1.13.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8a21be69bd26fa81b1f80a61ee7ab05b076c674d9b18fb56239d72e21d9f4c80"},
{file = "mypy-1.13.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7b2353a44d2179846a096e25691d54d59904559f4232519d420d64da6828a3a7"},
{file = "mypy-1.13.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0730d1c6a2739d4511dc4253f8274cdd140c55c32dfb0a4cf8b7a43f40abfa6f"},
{file = "mypy-1.13.0-cp310-cp310-win_amd64.whl", hash = "sha256:c5fc54dbb712ff5e5a0fca797e6e0aa25726c7e72c6a5850cfd2adbc1eb0a372"},
{file = "mypy-1.13.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:581665e6f3a8a9078f28d5502f4c334c0c8d802ef55ea0e7276a6e409bc0d82d"},
{file = "mypy-1.13.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3ddb5b9bf82e05cc9a627e84707b528e5c7caaa1c55c69e175abb15a761cec2d"},
{file = "mypy-1.13.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:20c7ee0bc0d5a9595c46f38beb04201f2620065a93755704e141fcac9f59db2b"},
{file = "mypy-1.13.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3790ded76f0b34bc9c8ba4def8f919dd6a46db0f5a6610fb994fe8efdd447f73"},
{file = "mypy-1.13.0-cp311-cp311-win_amd64.whl", hash = "sha256:51f869f4b6b538229c1d1bcc1dd7d119817206e2bc54e8e374b3dfa202defcca"},
{file = "mypy-1.13.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:5c7051a3461ae84dfb5dd15eff5094640c61c5f22257c8b766794e6dd85e72d5"},
{file = "mypy-1.13.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:39bb21c69a5d6342f4ce526e4584bc5c197fd20a60d14a8624d8743fffb9472e"},
{file = "mypy-1.13.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:164f28cb9d6367439031f4c81e84d3ccaa1e19232d9d05d37cb0bd880d3f93c2"},
{file = "mypy-1.13.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a4c1bfcdbce96ff5d96fc9b08e3831acb30dc44ab02671eca5953eadad07d6d0"},
{file = "mypy-1.13.0-cp312-cp312-win_amd64.whl", hash = "sha256:a0affb3a79a256b4183ba09811e3577c5163ed06685e4d4b46429a271ba174d2"},
{file = "mypy-1.13.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a7b44178c9760ce1a43f544e595d35ed61ac2c3de306599fa59b38a6048e1aa7"},
{file = "mypy-1.13.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5d5092efb8516d08440e36626f0153b5006d4088c1d663d88bf79625af3d1d62"},
{file = "mypy-1.13.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:de2904956dac40ced10931ac967ae63c5089bd498542194b436eb097a9f77bc8"},
{file = "mypy-1.13.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:7bfd8836970d33c2105562650656b6846149374dc8ed77d98424b40b09340ba7"},
{file = "mypy-1.13.0-cp313-cp313-win_amd64.whl", hash = "sha256:9f73dba9ec77acb86457a8fc04b5239822df0c14a082564737833d2963677dbc"},
{file = "mypy-1.13.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:100fac22ce82925f676a734af0db922ecfea991e1d7ec0ceb1e115ebe501301a"},
{file = "mypy-1.13.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7bcb0bb7f42a978bb323a7c88f1081d1b5dee77ca86f4100735a6f541299d8fb"},
{file = "mypy-1.13.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bde31fc887c213e223bbfc34328070996061b0833b0a4cfec53745ed61f3519b"},
{file = "mypy-1.13.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:07de989f89786f62b937851295ed62e51774722e5444a27cecca993fc3f9cd74"},
{file = "mypy-1.13.0-cp38-cp38-win_amd64.whl", hash = "sha256:4bde84334fbe19bad704b3f5b78c4abd35ff1026f8ba72b29de70dda0916beb6"},
{file = "mypy-1.13.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:0246bcb1b5de7f08f2826451abd947bf656945209b140d16ed317f65a17dc7dc"},
{file = "mypy-1.13.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7f5b7deae912cf8b77e990b9280f170381fdfbddf61b4ef80927edd813163732"},
{file = "mypy-1.13.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7029881ec6ffb8bc233a4fa364736789582c738217b133f1b55967115288a2bc"},
{file = "mypy-1.13.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:3e38b980e5681f28f033f3be86b099a247b13c491f14bb8b1e1e134d23bb599d"},
{file = "mypy-1.13.0-cp39-cp39-win_amd64.whl", hash = "sha256:a6789be98a2017c912ae6ccb77ea553bbaf13d27605d2ca20a76dfbced631b24"},
{file = "mypy-1.13.0-py3-none-any.whl", hash = "sha256:9c250883f9fd81d212e0952c92dbfcc96fc237f4b7c92f56ac81fd48460b3e5a"},
{file = "mypy-1.13.0.tar.gz", hash = "sha256:0291a61b6fbf3e6673e3405cfcc0e7650bebc7939659fdca2702958038bd835e"},
]
[package.dependencies]
mypy-extensions = ">=1.0.0"
tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""}
typing-extensions = ">=4.6.0"
[package.extras]
dmypy = ["psutil (>=4.0)"]
faster-cache = ["orjson"]
install-types = ["pip"]
mypyc = ["setuptools (>=50)"]
reports = ["lxml"]
[[package]]
name = "mypy-extensions"
version = "1.0.0"
description = "Type system extensions for programs checked with the mypy type checker."
optional = false
python-versions = ">=3.5"
files = [
{file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"},
{file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"},
]
[[package]]
name = "orjson"
version = "3.10.12"
description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy"
optional = true
python-versions = ">=3.8"
files = [
{file = "orjson-3.10.12-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:ece01a7ec71d9940cc654c482907a6b65df27251255097629d0dea781f255c6d"},
{file = "orjson-3.10.12-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c34ec9aebc04f11f4b978dd6caf697a2df2dd9b47d35aa4cc606cabcb9df69d7"},
{file = "orjson-3.10.12-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fd6ec8658da3480939c79b9e9e27e0db31dffcd4ba69c334e98c9976ac29140e"},
{file = "orjson-3.10.12-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f17e6baf4cf01534c9de8a16c0c611f3d94925d1701bf5f4aff17003677d8ced"},
{file = "orjson-3.10.12-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6402ebb74a14ef96f94a868569f5dccf70d791de49feb73180eb3c6fda2ade56"},
{file = "orjson-3.10.12-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0000758ae7c7853e0a4a6063f534c61656ebff644391e1f81698c1b2d2fc8cd2"},
{file = "orjson-3.10.12-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:888442dcee99fd1e5bd37a4abb94930915ca6af4db50e23e746cdf4d1e63db13"},
{file = "orjson-3.10.12-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:c1f7a3ce79246aa0e92f5458d86c54f257fb5dfdc14a192651ba7ec2c00f8a05"},
{file = "orjson-3.10.12-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:802a3935f45605c66fb4a586488a38af63cb37aaad1c1d94c982c40dcc452e85"},
{file = "orjson-3.10.12-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:1da1ef0113a2be19bb6c557fb0ec2d79c92ebd2fed4cfb1b26bab93f021fb885"},
{file = "orjson-3.10.12-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7a3273e99f367f137d5b3fecb5e9f45bcdbfac2a8b2f32fbc72129bbd48789c2"},
{file = "orjson-3.10.12-cp310-none-win32.whl", hash = "sha256:475661bf249fd7907d9b0a2a2421b4e684355a77ceef85b8352439a9163418c3"},
{file = "orjson-3.10.12-cp310-none-win_amd64.whl", hash = "sha256:87251dc1fb2b9e5ab91ce65d8f4caf21910d99ba8fb24b49fd0c118b2362d509"},
{file = "orjson-3.10.12-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:a734c62efa42e7df94926d70fe7d37621c783dea9f707a98cdea796964d4cf74"},
{file = "orjson-3.10.12-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:750f8b27259d3409eda8350c2919a58b0cfcd2054ddc1bd317a643afc646ef23"},
{file = "orjson-3.10.12-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bb52c22bfffe2857e7aa13b4622afd0dd9d16ea7cc65fd2bf318d3223b1b6252"},
{file = "orjson-3.10.12-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:440d9a337ac8c199ff8251e100c62e9488924c92852362cd27af0e67308c16ef"},
{file = "orjson-3.10.12-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a9e15c06491c69997dfa067369baab3bf094ecb74be9912bdc4339972323f252"},
{file = "orjson-3.10.12-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:362d204ad4b0b8724cf370d0cd917bb2dc913c394030da748a3bb632445ce7c4"},
{file = "orjson-3.10.12-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2b57cbb4031153db37b41622eac67329c7810e5f480fda4cfd30542186f006ae"},
{file = "orjson-3.10.12-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:165c89b53ef03ce0d7c59ca5c82fa65fe13ddf52eeb22e859e58c237d4e33b9b"},
{file = "orjson-3.10.12-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:5dee91b8dfd54557c1a1596eb90bcd47dbcd26b0baaed919e6861f076583e9da"},
{file = "orjson-3.10.12-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:77a4e1cfb72de6f905bdff061172adfb3caf7a4578ebf481d8f0530879476c07"},
{file = "orjson-3.10.12-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:038d42c7bc0606443459b8fe2d1f121db474c49067d8d14c6a075bbea8bf14dd"},
{file = "orjson-3.10.12-cp311-none-win32.whl", hash = "sha256:03b553c02ab39bed249bedd4abe37b2118324d1674e639b33fab3d1dafdf4d79"},
{file = "orjson-3.10.12-cp311-none-win_amd64.whl", hash = "sha256:8b8713b9e46a45b2af6b96f559bfb13b1e02006f4242c156cbadef27800a55a8"},
{file = "orjson-3.10.12-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:53206d72eb656ca5ac7d3a7141e83c5bbd3ac30d5eccfe019409177a57634b0d"},
{file = "orjson-3.10.12-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ac8010afc2150d417ebda810e8df08dd3f544e0dd2acab5370cfa6bcc0662f8f"},
{file = "orjson-3.10.12-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed459b46012ae950dd2e17150e838ab08215421487371fa79d0eced8d1461d70"},
{file = "orjson-3.10.12-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8dcb9673f108a93c1b52bfc51b0af422c2d08d4fc710ce9c839faad25020bb69"},
{file = "orjson-3.10.12-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:22a51ae77680c5c4652ebc63a83d5255ac7d65582891d9424b566fb3b5375ee9"},
{file = "orjson-3.10.12-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:910fdf2ac0637b9a77d1aad65f803bac414f0b06f720073438a7bd8906298192"},
{file = "orjson-3.10.12-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:24ce85f7100160936bc2116c09d1a8492639418633119a2224114f67f63a4559"},
{file = "orjson-3.10.12-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8a76ba5fc8dd9c913640292df27bff80a685bed3a3c990d59aa6ce24c352f8fc"},
{file = "orjson-3.10.12-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:ff70ef093895fd53f4055ca75f93f047e088d1430888ca1229393a7c0521100f"},
{file = "orjson-3.10.12-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:f4244b7018b5753ecd10a6d324ec1f347da130c953a9c88432c7fbc8875d13be"},
{file = "orjson-3.10.12-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:16135ccca03445f37921fa4b585cff9a58aa8d81ebcb27622e69bfadd220b32c"},
{file = "orjson-3.10.12-cp312-none-win32.whl", hash = "sha256:2d879c81172d583e34153d524fcba5d4adafbab8349a7b9f16ae511c2cee8708"},
{file = "orjson-3.10.12-cp312-none-win_amd64.whl", hash = "sha256:fc23f691fa0f5c140576b8c365bc942d577d861a9ee1142e4db468e4e17094fb"},
{file = "orjson-3.10.12-cp313-cp313-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:47962841b2a8aa9a258b377f5188db31ba49af47d4003a32f55d6f8b19006543"},
{file = "orjson-3.10.12-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6334730e2532e77b6054e87ca84f3072bee308a45a452ea0bffbbbc40a67e296"},
{file = "orjson-3.10.12-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:accfe93f42713c899fdac2747e8d0d5c659592df2792888c6c5f829472e4f85e"},
{file = "orjson-3.10.12-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a7974c490c014c48810d1dede6c754c3cc46598da758c25ca3b4001ac45b703f"},
{file = "orjson-3.10.12-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:3f250ce7727b0b2682f834a3facff88e310f52f07a5dcfd852d99637d386e79e"},
{file = "orjson-3.10.12-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:f31422ff9486ae484f10ffc51b5ab2a60359e92d0716fcce1b3593d7bb8a9af6"},
{file = "orjson-3.10.12-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5f29c5d282bb2d577c2a6bbde88d8fdcc4919c593f806aac50133f01b733846e"},
{file = "orjson-3.10.12-cp313-none-win32.whl", hash = "sha256:f45653775f38f63dc0e6cd4f14323984c3149c05d6007b58cb154dd080ddc0dc"},
{file = "orjson-3.10.12-cp313-none-win_amd64.whl", hash = "sha256:229994d0c376d5bdc91d92b3c9e6be2f1fbabd4cc1b59daae1443a46ee5e9825"},
{file = "orjson-3.10.12-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:7d69af5b54617a5fac5c8e5ed0859eb798e2ce8913262eb522590239db6c6763"},
{file = "orjson-3.10.12-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7ed119ea7d2953365724a7059231a44830eb6bbb0cfead33fcbc562f5fd8f935"},
{file = "orjson-3.10.12-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9c5fc1238ef197e7cad5c91415f524aaa51e004be5a9b35a1b8a84ade196f73f"},
{file = "orjson-3.10.12-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:43509843990439b05f848539d6f6198d4ac86ff01dd024b2f9a795c0daeeab60"},
{file = "orjson-3.10.12-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f72e27a62041cfb37a3de512247ece9f240a561e6c8662276beaf4d53d406db4"},
{file = "orjson-3.10.12-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a904f9572092bb6742ab7c16c623f0cdccbad9eeb2d14d4aa06284867bddd31"},
{file = "orjson-3.10.12-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:855c0833999ed5dc62f64552db26f9be767434917d8348d77bacaab84f787d7b"},
{file = "orjson-3.10.12-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:897830244e2320f6184699f598df7fb9db9f5087d6f3f03666ae89d607e4f8ed"},
{file = "orjson-3.10.12-cp38-cp38-musllinux_1_2_armv7l.whl", hash = "sha256:0b32652eaa4a7539f6f04abc6243619c56f8530c53bf9b023e1269df5f7816dd"},
{file = "orjson-3.10.12-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:36b4aa31e0f6a1aeeb6f8377769ca5d125db000f05c20e54163aef1d3fe8e833"},
{file = "orjson-3.10.12-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:5535163054d6cbf2796f93e4f0dbc800f61914c0e3c4ed8499cf6ece22b4a3da"},
{file = "orjson-3.10.12-cp38-none-win32.whl", hash = "sha256:90a5551f6f5a5fa07010bf3d0b4ca2de21adafbbc0af6cb700b63cd767266cb9"},
{file = "orjson-3.10.12-cp38-none-win_amd64.whl", hash = "sha256:703a2fb35a06cdd45adf5d733cf613cbc0cb3ae57643472b16bc22d325b5fb6c"},
{file = "orjson-3.10.12-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:f29de3ef71a42a5822765def1febfb36e0859d33abf5c2ad240acad5c6a1b78d"},
{file = "orjson-3.10.12-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:de365a42acc65d74953f05e4772c974dad6c51cfc13c3240899f534d611be967"},
{file = "orjson-3.10.12-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:91a5a0158648a67ff0004cb0df5df7dcc55bfc9ca154d9c01597a23ad54c8d0c"},
{file = "orjson-3.10.12-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c47ce6b8d90fe9646a25b6fb52284a14ff215c9595914af63a5933a49972ce36"},
{file = "orjson-3.10.12-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0eee4c2c5bfb5c1b47a5db80d2ac7aaa7e938956ae88089f098aff2c0f35d5d8"},
{file = "orjson-3.10.12-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:35d3081bbe8b86587eb5c98a73b97f13d8f9fea685cf91a579beddacc0d10566"},
{file = "orjson-3.10.12-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:73c23a6e90383884068bc2dba83d5222c9fcc3b99a0ed2411d38150734236755"},
{file = "orjson-3.10.12-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:5472be7dc3269b4b52acba1433dac239215366f89dc1d8d0e64029abac4e714e"},
{file = "orjson-3.10.12-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:7319cda750fca96ae5973efb31b17d97a5c5225ae0bc79bf5bf84df9e1ec2ab6"},
{file = "orjson-3.10.12-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:74d5ca5a255bf20b8def6a2b96b1e18ad37b4a122d59b154c458ee9494377f80"},
{file = "orjson-3.10.12-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:ff31d22ecc5fb85ef62c7d4afe8301d10c558d00dd24274d4bbe464380d3cd69"},
{file = "orjson-3.10.12-cp39-none-win32.whl", hash = "sha256:c22c3ea6fba91d84fcb4cda30e64aff548fcf0c44c876e681f47d61d24b12e6b"},
{file = "orjson-3.10.12-cp39-none-win_amd64.whl", hash = "sha256:be604f60d45ace6b0b33dd990a66b4526f1a7a186ac411c942674625456ca548"},
{file = "orjson-3.10.12.tar.gz", hash = "sha256:0a78bbda3aea0f9f079057ee1ee8a1ecf790d4f1af88dd67493c6b8ee52506ff"},
]
[[package]]
name = "packaging"
version = "24.2"
description = "Core utilities for Python packages"
optional = false
python-versions = ">=3.8"
files = [
{file = "packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759"},
{file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"},
]
[[package]]
name = "pluggy"
version = "1.5.0"
description = "plugin and hook calling mechanisms for python"
optional = false
python-versions = ">=3.8"
files = [
{file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"},
{file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"},
]
[package.extras]
dev = ["pre-commit", "tox"]
testing = ["pytest", "pytest-benchmark"]
[[package]]
name = "pycparser"
version = "2.22"
description = "C parser in Python"
optional = true
python-versions = ">=3.8"
files = [
{file = "pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc"},
{file = "pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6"},
]
[[package]]
name = "pydantic"
version = "2.10.2"
description = "Data validation using Python type hints"
optional = true
python-versions = ">=3.8"
files = [
{file = "pydantic-2.10.2-py3-none-any.whl", hash = "sha256:cfb96e45951117c3024e6b67b25cdc33a3cb7b2fa62e239f7af1378358a1d99e"},
{file = "pydantic-2.10.2.tar.gz", hash = "sha256:2bc2d7f17232e0841cbba4641e65ba1eb6fafb3a08de3a091ff3ce14a197c4fa"},
]
[package.dependencies]
annotated-types = ">=0.6.0"
pydantic-core = "2.27.1"
typing-extensions = ">=4.12.2"
[package.extras]
email = ["email-validator (>=2.0.0)"]
timezone = ["tzdata"]
[[package]]
name = "pydantic-core"
version = "2.27.1"
description = "Core functionality for Pydantic validation and serialization"
optional = true
python-versions = ">=3.8"
files = [
{file = "pydantic_core-2.27.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:71a5e35c75c021aaf400ac048dacc855f000bdfed91614b4a726f7432f1f3d6a"},
{file = "pydantic_core-2.27.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f82d068a2d6ecfc6e054726080af69a6764a10015467d7d7b9f66d6ed5afa23b"},
{file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:121ceb0e822f79163dd4699e4c54f5ad38b157084d97b34de8b232bcaad70278"},
{file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4603137322c18eaf2e06a4495f426aa8d8388940f3c457e7548145011bb68e05"},
{file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a33cd6ad9017bbeaa9ed78a2e0752c5e250eafb9534f308e7a5f7849b0b1bfb4"},
{file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:15cc53a3179ba0fcefe1e3ae50beb2784dede4003ad2dfd24f81bba4b23a454f"},
{file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45d9c5eb9273aa50999ad6adc6be5e0ecea7e09dbd0d31bd0c65a55a2592ca08"},
{file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8bf7b66ce12a2ac52d16f776b31d16d91033150266eb796967a7e4621707e4f6"},
{file = "pydantic_core-2.27.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:655d7dd86f26cb15ce8a431036f66ce0318648f8853d709b4167786ec2fa4807"},
{file = "pydantic_core-2.27.1-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:5556470f1a2157031e676f776c2bc20acd34c1990ca5f7e56f1ebf938b9ab57c"},
{file = "pydantic_core-2.27.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f69ed81ab24d5a3bd93861c8c4436f54afdf8e8cc421562b0c7504cf3be58206"},
{file = "pydantic_core-2.27.1-cp310-none-win32.whl", hash = "sha256:f5a823165e6d04ccea61a9f0576f345f8ce40ed533013580e087bd4d7442b52c"},
{file = "pydantic_core-2.27.1-cp310-none-win_amd64.whl", hash = "sha256:57866a76e0b3823e0b56692d1a0bf722bffb324839bb5b7226a7dbd6c9a40b17"},
{file = "pydantic_core-2.27.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:ac3b20653bdbe160febbea8aa6c079d3df19310d50ac314911ed8cc4eb7f8cb8"},
{file = "pydantic_core-2.27.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a5a8e19d7c707c4cadb8c18f5f60c843052ae83c20fa7d44f41594c644a1d330"},
{file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7f7059ca8d64fea7f238994c97d91f75965216bcbe5f695bb44f354893f11d52"},
{file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bed0f8a0eeea9fb72937ba118f9db0cb7e90773462af7962d382445f3005e5a4"},
{file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a3cb37038123447cf0f3ea4c74751f6a9d7afef0eb71aa07bf5f652b5e6a132c"},
{file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:84286494f6c5d05243456e04223d5a9417d7f443c3b76065e75001beb26f88de"},
{file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:acc07b2cfc5b835444b44a9956846b578d27beeacd4b52e45489e93276241025"},
{file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4fefee876e07a6e9aad7a8c8c9f85b0cdbe7df52b8a9552307b09050f7512c7e"},
{file = "pydantic_core-2.27.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:258c57abf1188926c774a4c94dd29237e77eda19462e5bb901d88adcab6af919"},
{file = "pydantic_core-2.27.1-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:35c14ac45fcfdf7167ca76cc80b2001205a8d5d16d80524e13508371fb8cdd9c"},
{file = "pydantic_core-2.27.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d1b26e1dff225c31897696cab7d4f0a315d4c0d9e8666dbffdb28216f3b17fdc"},
{file = "pydantic_core-2.27.1-cp311-none-win32.whl", hash = "sha256:2cdf7d86886bc6982354862204ae3b2f7f96f21a3eb0ba5ca0ac42c7b38598b9"},
{file = "pydantic_core-2.27.1-cp311-none-win_amd64.whl", hash = "sha256:3af385b0cee8df3746c3f406f38bcbfdc9041b5c2d5ce3e5fc6637256e60bbc5"},
{file = "pydantic_core-2.27.1-cp311-none-win_arm64.whl", hash = "sha256:81f2ec23ddc1b476ff96563f2e8d723830b06dceae348ce02914a37cb4e74b89"},
{file = "pydantic_core-2.27.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9cbd94fc661d2bab2bc702cddd2d3370bbdcc4cd0f8f57488a81bcce90c7a54f"},
{file = "pydantic_core-2.27.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5f8c4718cd44ec1580e180cb739713ecda2bdee1341084c1467802a417fe0f02"},
{file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:15aae984e46de8d376df515f00450d1522077254ef6b7ce189b38ecee7c9677c"},
{file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1ba5e3963344ff25fc8c40da90f44b0afca8cfd89d12964feb79ac1411a260ac"},
{file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:992cea5f4f3b29d6b4f7f1726ed8ee46c8331c6b4eed6db5b40134c6fe1768bb"},
{file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0325336f348dbee6550d129b1627cb8f5351a9dc91aad141ffb96d4937bd9529"},
{file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7597c07fbd11515f654d6ece3d0e4e5093edc30a436c63142d9a4b8e22f19c35"},
{file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3bbd5d8cc692616d5ef6fbbbd50dbec142c7e6ad9beb66b78a96e9c16729b089"},
{file = "pydantic_core-2.27.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:dc61505e73298a84a2f317255fcc72b710b72980f3a1f670447a21efc88f8381"},
{file = "pydantic_core-2.27.1-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:e1f735dc43da318cad19b4173dd1ffce1d84aafd6c9b782b3abc04a0d5a6f5bb"},
{file = "pydantic_core-2.27.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:f4e5658dbffe8843a0f12366a4c2d1c316dbe09bb4dfbdc9d2d9cd6031de8aae"},
{file = "pydantic_core-2.27.1-cp312-none-win32.whl", hash = "sha256:672ebbe820bb37988c4d136eca2652ee114992d5d41c7e4858cdd90ea94ffe5c"},
{file = "pydantic_core-2.27.1-cp312-none-win_amd64.whl", hash = "sha256:66ff044fd0bb1768688aecbe28b6190f6e799349221fb0de0e6f4048eca14c16"},
{file = "pydantic_core-2.27.1-cp312-none-win_arm64.whl", hash = "sha256:9a3b0793b1bbfd4146304e23d90045f2a9b5fd5823aa682665fbdaf2a6c28f3e"},
{file = "pydantic_core-2.27.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:f216dbce0e60e4d03e0c4353c7023b202d95cbaeff12e5fd2e82ea0a66905073"},
{file = "pydantic_core-2.27.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a2e02889071850bbfd36b56fd6bc98945e23670773bc7a76657e90e6b6603c08"},
{file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42b0e23f119b2b456d07ca91b307ae167cc3f6c846a7b169fca5326e32fdc6cf"},
{file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:764be71193f87d460a03f1f7385a82e226639732214b402f9aa61f0d025f0737"},
{file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1c00666a3bd2f84920a4e94434f5974d7bbc57e461318d6bb34ce9cdbbc1f6b2"},
{file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3ccaa88b24eebc0f849ce0a4d09e8a408ec5a94afff395eb69baf868f5183107"},
{file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c65af9088ac534313e1963443d0ec360bb2b9cba6c2909478d22c2e363d98a51"},
{file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:206b5cf6f0c513baffaeae7bd817717140770c74528f3e4c3e1cec7871ddd61a"},
{file = "pydantic_core-2.27.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:062f60e512fc7fff8b8a9d680ff0ddaaef0193dba9fa83e679c0c5f5fbd018bc"},
{file = "pydantic_core-2.27.1-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:a0697803ed7d4af5e4c1adf1670af078f8fcab7a86350e969f454daf598c4960"},
{file = "pydantic_core-2.27.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:58ca98a950171f3151c603aeea9303ef6c235f692fe555e883591103da709b23"},
{file = "pydantic_core-2.27.1-cp313-none-win32.whl", hash = "sha256:8065914ff79f7eab1599bd80406681f0ad08f8e47c880f17b416c9f8f7a26d05"},
{file = "pydantic_core-2.27.1-cp313-none-win_amd64.whl", hash = "sha256:ba630d5e3db74c79300d9a5bdaaf6200172b107f263c98a0539eeecb857b2337"},
{file = "pydantic_core-2.27.1-cp313-none-win_arm64.whl", hash = "sha256:45cf8588c066860b623cd11c4ba687f8d7175d5f7ef65f7129df8a394c502de5"},
{file = "pydantic_core-2.27.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:5897bec80a09b4084aee23f9b73a9477a46c3304ad1d2d07acca19723fb1de62"},
{file = "pydantic_core-2.27.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d0165ab2914379bd56908c02294ed8405c252250668ebcb438a55494c69f44ab"},
{file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b9af86e1d8e4cfc82c2022bfaa6f459381a50b94a29e95dcdda8442d6d83864"},
{file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f6c8a66741c5f5447e047ab0ba7a1c61d1e95580d64bce852e3df1f895c4067"},
{file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9a42d6a8156ff78981f8aa56eb6394114e0dedb217cf8b729f438f643608cbcd"},
{file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:64c65f40b4cd8b0e049a8edde07e38b476da7e3aaebe63287c899d2cff253fa5"},
{file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdcf339322a3fae5cbd504edcefddd5a50d9ee00d968696846f089b4432cf78"},
{file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bf99c8404f008750c846cb4ac4667b798a9f7de673ff719d705d9b2d6de49c5f"},
{file = "pydantic_core-2.27.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:8f1edcea27918d748c7e5e4d917297b2a0ab80cad10f86631e488b7cddf76a36"},
{file = "pydantic_core-2.27.1-cp38-cp38-musllinux_1_1_armv7l.whl", hash = "sha256:159cac0a3d096f79ab6a44d77a961917219707e2a130739c64d4dd46281f5c2a"},
{file = "pydantic_core-2.27.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:029d9757eb621cc6e1848fa0b0310310de7301057f623985698ed7ebb014391b"},
{file = "pydantic_core-2.27.1-cp38-none-win32.whl", hash = "sha256:a28af0695a45f7060e6f9b7092558a928a28553366519f64083c63a44f70e618"},
{file = "pydantic_core-2.27.1-cp38-none-win_amd64.whl", hash = "sha256:2d4567c850905d5eaaed2f7a404e61012a51caf288292e016360aa2b96ff38d4"},
{file = "pydantic_core-2.27.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:e9386266798d64eeb19dd3677051f5705bf873e98e15897ddb7d76f477131967"},
{file = "pydantic_core-2.27.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4228b5b646caa73f119b1ae756216b59cc6e2267201c27d3912b592c5e323b60"},
{file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0b3dfe500de26c52abe0477dde16192ac39c98f05bf2d80e76102d394bd13854"},
{file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:aee66be87825cdf72ac64cb03ad4c15ffef4143dbf5c113f64a5ff4f81477bf9"},
{file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b748c44bb9f53031c8cbc99a8a061bc181c1000c60a30f55393b6e9c45cc5bd"},
{file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ca038c7f6a0afd0b2448941b6ef9d5e1949e999f9e5517692eb6da58e9d44be"},
{file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e0bd57539da59a3e4671b90a502da9a28c72322a4f17866ba3ac63a82c4498e"},
{file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ac6c2c45c847bbf8f91930d88716a0fb924b51e0c6dad329b793d670ec5db792"},
{file = "pydantic_core-2.27.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b94d4ba43739bbe8b0ce4262bcc3b7b9f31459ad120fb595627eaeb7f9b9ca01"},
{file = "pydantic_core-2.27.1-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:00e6424f4b26fe82d44577b4c842d7df97c20be6439e8e685d0d715feceb9fb9"},
{file = "pydantic_core-2.27.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:38de0a70160dd97540335b7ad3a74571b24f1dc3ed33f815f0880682e6880131"},
{file = "pydantic_core-2.27.1-cp39-none-win32.whl", hash = "sha256:7ccebf51efc61634f6c2344da73e366c75e735960b5654b63d7e6f69a5885fa3"},
{file = "pydantic_core-2.27.1-cp39-none-win_amd64.whl", hash = "sha256:a57847b090d7892f123726202b7daa20df6694cbd583b67a592e856bff603d6c"},
{file = "pydantic_core-2.27.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3fa80ac2bd5856580e242dbc202db873c60a01b20309c8319b5c5986fbe53ce6"},
{file = "pydantic_core-2.27.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d950caa237bb1954f1b8c9227b5065ba6875ac9771bb8ec790d956a699b78676"},
{file = "pydantic_core-2.27.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0e4216e64d203e39c62df627aa882f02a2438d18a5f21d7f721621f7a5d3611d"},
{file = "pydantic_core-2.27.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:02a3d637bd387c41d46b002f0e49c52642281edacd2740e5a42f7017feea3f2c"},
{file = "pydantic_core-2.27.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:161c27ccce13b6b0c8689418da3885d3220ed2eae2ea5e9b2f7f3d48f1d52c27"},
{file = "pydantic_core-2.27.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:19910754e4cc9c63bc1c7f6d73aa1cfee82f42007e407c0f413695c2f7ed777f"},
{file = "pydantic_core-2.27.1-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:e173486019cc283dc9778315fa29a363579372fe67045e971e89b6365cc035ed"},
{file = "pydantic_core-2.27.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:af52d26579b308921b73b956153066481f064875140ccd1dfd4e77db89dbb12f"},
{file = "pydantic_core-2.27.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:981fb88516bd1ae8b0cbbd2034678a39dedc98752f264ac9bc5839d3923fa04c"},
{file = "pydantic_core-2.27.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5fde892e6c697ce3e30c61b239330fc5d569a71fefd4eb6512fc6caec9dd9e2f"},
{file = "pydantic_core-2.27.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:816f5aa087094099fff7edabb5e01cc370eb21aa1a1d44fe2d2aefdfb5599b31"},
{file = "pydantic_core-2.27.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c10c309e18e443ddb108f0ef64e8729363adbfd92d6d57beec680f6261556f3"},
{file = "pydantic_core-2.27.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98476c98b02c8e9b2eec76ac4156fd006628b1b2d0ef27e548ffa978393fd154"},
{file = "pydantic_core-2.27.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c3027001c28434e7ca5a6e1e527487051136aa81803ac812be51802150d880dd"},
{file = "pydantic_core-2.27.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:7699b1df36a48169cdebda7ab5a2bac265204003f153b4bd17276153d997670a"},
{file = "pydantic_core-2.27.1-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:1c39b07d90be6b48968ddc8c19e7585052088fd7ec8d568bb31ff64c70ae3c97"},
{file = "pydantic_core-2.27.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:46ccfe3032b3915586e469d4972973f893c0a2bb65669194a5bdea9bacc088c2"},
{file = "pydantic_core-2.27.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:62ba45e21cf6571d7f716d903b5b7b6d2617e2d5d67c0923dc47b9d41369f840"},
{file = "pydantic_core-2.27.1.tar.gz", hash = "sha256:62a763352879b84aa31058fc931884055fd75089cccbd9d58bb6afd01141b235"},
]
[package.dependencies]
typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0"
[[package]]
name = "pyjwt"
version = "2.10.1"
description = "JSON Web Token implementation in Python"
optional = true
python-versions = ">=3.9"
files = [
{file = "PyJWT-2.10.1-py3-none-any.whl", hash = "sha256:dcdd193e30abefd5debf142f9adfcdd2b58004e644f25406ffaebd50bd98dacb"},
{file = "pyjwt-2.10.1.tar.gz", hash = "sha256:3cc5772eb20009233caf06e9d8a0577824723b44e6648ee0a2aedb6cf9381953"},
]
[package.extras]
crypto = ["cryptography (>=3.4.0)"]
dev = ["coverage[toml] (==5.0.4)", "cryptography (>=3.4.0)", "pre-commit", "pytest (>=6.0.0,<7.0.0)", "sphinx", "sphinx-rtd-theme", "zope.interface"]
docs = ["sphinx", "sphinx-rtd-theme", "zope.interface"]
tests = ["coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)"]
[[package]]
name = "pytest"
version = "7.4.4"
description = "pytest: simple powerful testing with Python"
optional = false
python-versions = ">=3.7"
files = [
{file = "pytest-7.4.4-py3-none-any.whl", hash = "sha256:b090cdf5ed60bf4c45261be03239c2c1c22df034fbffe691abe93cd80cea01d8"},
{file = "pytest-7.4.4.tar.gz", hash = "sha256:2cf0005922c6ace4a3e2ec8b4080eb0d9753fdc93107415332f50ce9e7994280"},
]
[package.dependencies]
colorama = {version = "*", markers = "sys_platform == \"win32\""}
exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""}
iniconfig = "*"
packaging = "*"
pluggy = ">=0.12,<2.0"
tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""}
[package.extras]
testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"]
[[package]]
name = "pytest-asyncio"
version = "0.21.2"
description = "Pytest support for asyncio"
optional = false
python-versions = ">=3.7"
files = [
{file = "pytest_asyncio-0.21.2-py3-none-any.whl", hash = "sha256:ab664c88bb7998f711d8039cacd4884da6430886ae8bbd4eded552ed2004f16b"},
{file = "pytest_asyncio-0.21.2.tar.gz", hash = "sha256:d67738fc232b94b326b9d060750beb16e0074210b98dd8b58a5239fa2a154f45"},
]
[package.dependencies]
pytest = ">=7.0.0"
[package.extras]
docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1.0)"]
testing = ["coverage (>=6.2)", "flaky (>=3.5.0)", "hypothesis (>=5.7.1)", "mypy (>=0.931)", "pytest-trio (>=0.7.0)"]
[[package]]
name = "pytest-mock"
version = "3.14.0"
description = "Thin-wrapper around the mock package for easier use with pytest"
optional = false
python-versions = ">=3.8"
files = [
{file = "pytest-mock-3.14.0.tar.gz", hash = "sha256:2719255a1efeceadbc056d6bf3df3d1c5015530fb40cf347c0f9afac88410bd0"},
{file = "pytest_mock-3.14.0-py3-none-any.whl", hash = "sha256:0b72c38033392a5f4621342fe11e9219ac11ec9d375f8e2a0c164539e0d70f6f"},
]
[package.dependencies]
pytest = ">=6.2.5"
[package.extras]
dev = ["pre-commit", "pytest-asyncio", "tox"]
[[package]]
name = "pytest-watch"
version = "4.2.0"
description = "Local continuous test runner with pytest and watchdog."
optional = false
python-versions = "*"
files = [
{file = "pytest-watch-4.2.0.tar.gz", hash = "sha256:06136f03d5b361718b8d0d234042f7b2f203910d8568f63df2f866b547b3d4b9"},
]
[package.dependencies]
colorama = ">=0.3.3"
docopt = ">=0.4.0"
pytest = ">=2.6.4"
watchdog = ">=0.6.0"
[[package]]
name = "python-dotenv"
version = "1.0.1"
description = "Read key-value pairs from a .env file and set them as environment variables"
optional = true
python-versions = ">=3.8"
files = [
{file = "python-dotenv-1.0.1.tar.gz", hash = "sha256:e324ee90a023d808f1959c46bcbc04446a10ced277783dc6ee09987c37ec10ca"},
{file = "python_dotenv-1.0.1-py3-none-any.whl", hash = "sha256:f7b63ef50f1b690dddf550d03497b66d609393b40b564ed0d674909a68ebf16a"},
]
[package.extras]
cli = ["click (>=5.0)"]
[[package]]
name = "pyyaml"
version = "6.0.2"
description = "YAML parser and emitter for Python"
optional = true
python-versions = ">=3.8"
files = [
{file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"},
{file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"},
{file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237"},
{file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b"},
{file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed"},
{file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180"},
{file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68"},
{file = "PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99"},
{file = "PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e"},
{file = "PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774"},
{file = "PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee"},
{file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c"},
{file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317"},
{file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85"},
{file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"},
{file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e"},
{file = "PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5"},
{file = "PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44"},
{file = "PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab"},
{file = "PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725"},
{file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5"},
{file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425"},
{file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476"},
{file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48"},
{file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b"},
{file = "PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4"},
{file = "PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8"},
{file = "PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba"},
{file = "PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1"},
{file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133"},
{file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484"},
{file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5"},
{file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc"},
{file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652"},
{file = "PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183"},
{file = "PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563"},
{file = "PyYAML-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a"},
{file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5"},
{file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d"},
{file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083"},
{file = "PyYAML-6.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706"},
{file = "PyYAML-6.0.2-cp38-cp38-win32.whl", hash = "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a"},
{file = "PyYAML-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff"},
{file = "PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d"},
{file = "PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f"},
{file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290"},
{file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12"},
{file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19"},
{file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e"},
{file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725"},
{file = "PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631"},
{file = "PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8"},
{file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"},
]
[[package]]
name = "requests"
version = "2.32.3"
description = "Python HTTP for Humans."
optional = true
python-versions = ">=3.8"
files = [
{file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"},
{file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"},
]
[package.dependencies]
certifi = ">=2017.4.17"
charset-normalizer = ">=2,<4"
idna = ">=2.5,<4"
urllib3 = ">=1.21.1,<3"
[package.extras]
socks = ["PySocks (>=1.5.6,!=1.5.7)"]
use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"]
[[package]]
name = "requests-toolbelt"
version = "1.0.0"
description = "A utility belt for advanced users of python-requests"
optional = true
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
files = [
{file = "requests-toolbelt-1.0.0.tar.gz", hash = "sha256:7681a0a3d047012b5bdc0ee37d7f8f07ebe76ab08caeccfc3921ce23c88d5bc6"},
{file = "requests_toolbelt-1.0.0-py2.py3-none-any.whl", hash = "sha256:cccfdd665f0a24fcf4726e690f65639d272bb0637b9b92dfd91a5568ccf6bd06"},
]
[package.dependencies]
requests = ">=2.0.1,<3.0.0"
[[package]]
name = "ruff"
version = "0.6.9"
description = "An extremely fast Python linter and code formatter, written in Rust."
optional = false
python-versions = ">=3.7"
files = [
{file = "ruff-0.6.9-py3-none-linux_armv6l.whl", hash = "sha256:064df58d84ccc0ac0fcd63bc3090b251d90e2a372558c0f057c3f75ed73e1ccd"},
{file = "ruff-0.6.9-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:140d4b5c9f5fc7a7b074908a78ab8d384dd7f6510402267bc76c37195c02a7ec"},
{file = "ruff-0.6.9-py3-none-macosx_11_0_arm64.whl", hash = "sha256:53fd8ca5e82bdee8da7f506d7b03a261f24cd43d090ea9db9a1dc59d9313914c"},
{file = "ruff-0.6.9-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:645d7d8761f915e48a00d4ecc3686969761df69fb561dd914a773c1a8266e14e"},
{file = "ruff-0.6.9-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eae02b700763e3847595b9d2891488989cac00214da7f845f4bcf2989007d577"},
{file = "ruff-0.6.9-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7d5ccc9e58112441de8ad4b29dcb7a86dc25c5f770e3c06a9d57e0e5eba48829"},
{file = "ruff-0.6.9-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:417b81aa1c9b60b2f8edc463c58363075412866ae4e2b9ab0f690dc1e87ac1b5"},
{file = "ruff-0.6.9-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3c866b631f5fbce896a74a6e4383407ba7507b815ccc52bcedabb6810fdb3ef7"},
{file = "ruff-0.6.9-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7b118afbb3202f5911486ad52da86d1d52305b59e7ef2031cea3425142b97d6f"},
{file = "ruff-0.6.9-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a67267654edc23c97335586774790cde402fb6bbdb3c2314f1fc087dee320bfa"},
{file = "ruff-0.6.9-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:3ef0cc774b00fec123f635ce5c547dac263f6ee9fb9cc83437c5904183b55ceb"},
{file = "ruff-0.6.9-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:12edd2af0c60fa61ff31cefb90aef4288ac4d372b4962c2864aeea3a1a2460c0"},
{file = "ruff-0.6.9-py3-none-musllinux_1_2_i686.whl", hash = "sha256:55bb01caeaf3a60b2b2bba07308a02fca6ab56233302406ed5245180a05c5625"},
{file = "ruff-0.6.9-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:925d26471fa24b0ce5a6cdfab1bb526fb4159952385f386bdcc643813d472039"},
{file = "ruff-0.6.9-py3-none-win32.whl", hash = "sha256:eb61ec9bdb2506cffd492e05ac40e5bc6284873aceb605503d8494180d6fc84d"},
{file = "ruff-0.6.9-py3-none-win_amd64.whl", hash = "sha256:785d31851c1ae91f45b3d8fe23b8ae4b5170089021fbb42402d811135f0b7117"},
{file = "ruff-0.6.9-py3-none-win_arm64.whl", hash = "sha256:a9641e31476d601f83cd602608739a0840e348bda93fec9f1ee816f8b6798b93"},
{file = "ruff-0.6.9.tar.gz", hash = "sha256:b076ef717a8e5bc819514ee1d602bbdca5b4420ae13a9cf61a0c0a4f53a2baa2"},
]
[[package]]
name = "sniffio"
version = "1.3.1"
description = "Sniff out which async library your code is running under"
optional = true
python-versions = ">=3.7"
files = [
{file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"},
{file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"},
]
[[package]]
name = "sse-starlette"
version = "2.1.3"
description = "SSE plugin for Starlette"
optional = true
python-versions = ">=3.8"
files = [
{file = "sse_starlette-2.1.3-py3-none-any.whl", hash = "sha256:8ec846438b4665b9e8c560fcdea6bc8081a3abf7942faa95e5a744999d219772"},
{file = "sse_starlette-2.1.3.tar.gz", hash = "sha256:9cd27eb35319e1414e3d2558ee7414487f9529ce3b3cf9b21434fd110e017169"},
]
[package.dependencies]
anyio = "*"
starlette = "*"
uvicorn = "*"
[package.extras]
examples = ["fastapi"]
[[package]]
name = "starlette"
version = "0.41.3"
description = "The little ASGI library that shines."
optional = true
python-versions = ">=3.8"
files = [
{file = "starlette-0.41.3-py3-none-any.whl", hash = "sha256:44cedb2b7c77a9de33a8b74b2b90e9f50d11fcf25d8270ea525ad71a25374ff7"},
{file = "starlette-0.41.3.tar.gz", hash = "sha256:0e4ab3d16522a255be6b28260b938eae2482f98ce5cc934cb08dce8dc3ba5835"},
]
[package.dependencies]
anyio = ">=3.4.0,<5"
[package.extras]
full = ["httpx (>=0.22.0)", "itsdangerous", "jinja2", "python-multipart (>=0.0.7)", "pyyaml"]
[[package]]
name = "structlog"
version = "24.4.0"
description = "Structured Logging for Python"
optional = true
python-versions = ">=3.8"
files = [
{file = "structlog-24.4.0-py3-none-any.whl", hash = "sha256:597f61e80a91cc0749a9fd2a098ed76715a1c8a01f73e336b746504d1aad7610"},
{file = "structlog-24.4.0.tar.gz", hash = "sha256:b27bfecede327a6d2da5fbc96bd859f114ecc398a6389d664f62085ee7ae6fc4"},
]
[package.extras]
dev = ["freezegun (>=0.2.8)", "mypy (>=1.4)", "pretend", "pytest (>=6.0)", "pytest-asyncio (>=0.17)", "rich", "simplejson", "twisted"]
docs = ["cogapp", "furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-mermaid", "sphinxext-opengraph", "twisted"]
tests = ["freezegun (>=0.2.8)", "pretend", "pytest (>=6.0)", "pytest-asyncio (>=0.17)", "simplejson"]
typing = ["mypy (>=1.4)", "rich", "twisted"]
[[package]]
name = "tenacity"
version = "8.5.0"
description = "Retry code until it succeeds"
optional = true
python-versions = ">=3.8"
files = [
{file = "tenacity-8.5.0-py3-none-any.whl", hash = "sha256:b594c2a5945830c267ce6b79a166228323ed52718f30302c1359836112346687"},
{file = "tenacity-8.5.0.tar.gz", hash = "sha256:8bc6c0c8a09b31e6cad13c47afbed1a567518250a9a171418582ed8d9c20ca78"},
]
[package.extras]
doc = ["reno", "sphinx"]
test = ["pytest", "tornado (>=4.5)", "typeguard"]
[[package]]
name = "tomli"
version = "2.2.1"
description = "A lil' TOML parser"
optional = false
python-versions = ">=3.8"
files = [
{file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"},
{file = "tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6"},
{file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ece47d672db52ac607a3d9599a9d48dcb2f2f735c6c2d1f34130085bb12b112a"},
{file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6972ca9c9cc9f0acaa56a8ca1ff51e7af152a9f87fb64623e31d5c83700080ee"},
{file = "tomli-2.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c954d2250168d28797dd4e3ac5cf812a406cd5a92674ee4c8f123c889786aa8e"},
{file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8dd28b3e155b80f4d54beb40a441d366adcfe740969820caf156c019fb5c7ec4"},
{file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e59e304978767a54663af13c07b3d1af22ddee3bb2fb0618ca1593e4f593a106"},
{file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:33580bccab0338d00994d7f16f4c4ec25b776af3ffaac1ed74e0b3fc95e885a8"},
{file = "tomli-2.2.1-cp311-cp311-win32.whl", hash = "sha256:465af0e0875402f1d226519c9904f37254b3045fc5084697cefb9bdde1ff99ff"},
{file = "tomli-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:2d0f2fdd22b02c6d81637a3c95f8cd77f995846af7414c5c4b8d0545afa1bc4b"},
{file = "tomli-2.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4a8f6e44de52d5e6c657c9fe83b562f5f4256d8ebbfe4ff922c495620a7f6cea"},
{file = "tomli-2.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8d57ca8095a641b8237d5b079147646153d22552f1c637fd3ba7f4b0b29167a8"},
{file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e340144ad7ae1533cb897d406382b4b6fede8890a03738ff1683af800d54192"},
{file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db2b95f9de79181805df90bedc5a5ab4c165e6ec3fe99f970d0e302f384ad222"},
{file = "tomli-2.2.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40741994320b232529c802f8bc86da4e1aa9f413db394617b9a256ae0f9a7f77"},
{file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:400e720fe168c0f8521520190686ef8ef033fb19fc493da09779e592861b78c6"},
{file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:02abe224de6ae62c19f090f68da4e27b10af2b93213d36cf44e6e1c5abd19fdd"},
{file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b82ebccc8c8a36f2094e969560a1b836758481f3dc360ce9a3277c65f374285e"},
{file = "tomli-2.2.1-cp312-cp312-win32.whl", hash = "sha256:889f80ef92701b9dbb224e49ec87c645ce5df3fa2cc548664eb8a25e03127a98"},
{file = "tomli-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:7fc04e92e1d624a4a63c76474610238576942d6b8950a2d7f908a340494e67e4"},
{file = "tomli-2.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7"},
{file = "tomli-2.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:286f0ca2ffeeb5b9bd4fcc8d6c330534323ec51b2f52da063b11c502da16f30c"},
{file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a92ef1a44547e894e2a17d24e7557a5e85a9e1d0048b0b5e7541f76c5032cb13"},
{file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9316dc65bed1684c9a98ee68759ceaed29d229e985297003e494aa825ebb0281"},
{file = "tomli-2.2.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e85e99945e688e32d5a35c1ff38ed0b3f41f43fad8df0bdf79f72b2ba7bc5272"},
{file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ac065718db92ca818f8d6141b5f66369833d4a80a9d74435a268c52bdfa73140"},
{file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:d920f33822747519673ee656a4b6ac33e382eca9d331c87770faa3eef562aeb2"},
{file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a198f10c4d1b1375d7687bc25294306e551bf1abfa4eace6650070a5c1ae2744"},
{file = "tomli-2.2.1-cp313-cp313-win32.whl", hash = "sha256:d3f5614314d758649ab2ab3a62d4f2004c825922f9e370b29416484086b264ec"},
{file = "tomli-2.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:a38aa0308e754b0e3c67e344754dff64999ff9b513e691d0e786265c93583c69"},
{file = "tomli-2.2.1-py3-none-any.whl", hash = "sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc"},
{file = "tomli-2.2.1.tar.gz", hash = "sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff"},
]
[[package]]
name = "typing-extensions"
version = "4.12.2"
description = "Backported and Experimental Type Hints for Python 3.8+"
optional = false
python-versions = ">=3.8"
files = [
{file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"},
{file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"},
]
[[package]]
name = "urllib3"
version = "2.2.3"
description = "HTTP library with thread-safe connection pooling, file post, and more."
optional = true
python-versions = ">=3.8"
files = [
{file = "urllib3-2.2.3-py3-none-any.whl", hash = "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac"},
{file = "urllib3-2.2.3.tar.gz", hash = "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9"},
]
[package.extras]
brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"]
h2 = ["h2 (>=4,<5)"]
socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"]
zstd = ["zstandard (>=0.18.0)"]
[[package]]
name = "uvicorn"
version = "0.32.1"
description = "The lightning-fast ASGI server."
optional = true
python-versions = ">=3.8"
files = [
{file = "uvicorn-0.32.1-py3-none-any.whl", hash = "sha256:82ad92fd58da0d12af7482ecdb5f2470a04c9c9a53ced65b9bbb4a205377602e"},
{file = "uvicorn-0.32.1.tar.gz", hash = "sha256:ee9519c246a72b1c084cea8d3b44ed6026e78a4a309cbedae9c37e4cb9fbb175"},
]
[package.dependencies]
click = ">=7.0"
h11 = ">=0.8"
[package.extras]
standard = ["colorama (>=0.4)", "httptools (>=0.6.3)", "python-dotenv (>=0.13)", "pyyaml (>=5.1)", "uvloop (>=0.14.0,!=0.15.0,!=0.15.1)", "watchfiles (>=0.13)", "websockets (>=10.4)"]
[[package]]
name = "watchdog"
version = "6.0.0"
description = "Filesystem events monitoring"
optional = false
python-versions = ">=3.9"
files = [
{file = "watchdog-6.0.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d1cdb490583ebd691c012b3d6dae011000fe42edb7a82ece80965b42abd61f26"},
{file = "watchdog-6.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bc64ab3bdb6a04d69d4023b29422170b74681784ffb9463ed4870cf2f3e66112"},
{file = "watchdog-6.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c897ac1b55c5a1461e16dae288d22bb2e412ba9807df8397a635d88f671d36c3"},
{file = "watchdog-6.0.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6eb11feb5a0d452ee41f824e271ca311a09e250441c262ca2fd7ebcf2461a06c"},
{file = "watchdog-6.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ef810fbf7b781a5a593894e4f439773830bdecb885e6880d957d5b9382a960d2"},
{file = "watchdog-6.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:afd0fe1b2270917c5e23c2a65ce50c2a4abb63daafb0d419fde368e272a76b7c"},
{file = "watchdog-6.0.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:bdd4e6f14b8b18c334febb9c4425a878a2ac20efd1e0b231978e7b150f92a948"},
{file = "watchdog-6.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:c7c15dda13c4eb00d6fb6fc508b3c0ed88b9d5d374056b239c4ad1611125c860"},
{file = "watchdog-6.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6f10cb2d5902447c7d0da897e2c6768bca89174d0c6e1e30abec5421af97a5b0"},
{file = "watchdog-6.0.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:490ab2ef84f11129844c23fb14ecf30ef3d8a6abafd3754a6f75ca1e6654136c"},
{file = "watchdog-6.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:76aae96b00ae814b181bb25b1b98076d5fc84e8a53cd8885a318b42b6d3a5134"},
{file = "watchdog-6.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a175f755fc2279e0b7312c0035d52e27211a5bc39719dd529625b1930917345b"},
{file = "watchdog-6.0.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:e6f0e77c9417e7cd62af82529b10563db3423625c5fce018430b249bf977f9e8"},
{file = "watchdog-6.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:90c8e78f3b94014f7aaae121e6b909674df5b46ec24d6bebc45c44c56729af2a"},
{file = "watchdog-6.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e7631a77ffb1f7d2eefa4445ebbee491c720a5661ddf6df3498ebecae5ed375c"},
{file = "watchdog-6.0.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:c7ac31a19f4545dd92fc25d200694098f42c9a8e391bc00bdd362c5736dbf881"},
{file = "watchdog-6.0.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:9513f27a1a582d9808cf21a07dae516f0fab1cf2d7683a742c498b93eedabb11"},
{file = "watchdog-6.0.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7a0e56874cfbc4b9b05c60c8a1926fedf56324bb08cfbc188969777940aef3aa"},
{file = "watchdog-6.0.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:e6439e374fc012255b4ec786ae3c4bc838cd7309a540e5fe0952d03687d8804e"},
{file = "watchdog-6.0.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:7607498efa04a3542ae3e05e64da8202e58159aa1fa4acddf7678d34a35d4f13"},
{file = "watchdog-6.0.0-py3-none-manylinux2014_armv7l.whl", hash = "sha256:9041567ee8953024c83343288ccc458fd0a2d811d6a0fd68c4c22609e3490379"},
{file = "watchdog-6.0.0-py3-none-manylinux2014_i686.whl", hash = "sha256:82dc3e3143c7e38ec49d61af98d6558288c415eac98486a5c581726e0737c00e"},
{file = "watchdog-6.0.0-py3-none-manylinux2014_ppc64.whl", hash = "sha256:212ac9b8bf1161dc91bd09c048048a95ca3a4c4f5e5d4a7d1b1a7d5752a7f96f"},
{file = "watchdog-6.0.0-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:e3df4cbb9a450c6d49318f6d14f4bbc80d763fa587ba46ec86f99f9e6876bb26"},
{file = "watchdog-6.0.0-py3-none-manylinux2014_s390x.whl", hash = "sha256:2cce7cfc2008eb51feb6aab51251fd79b85d9894e98ba847408f662b3395ca3c"},
{file = "watchdog-6.0.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:20ffe5b202af80ab4266dcd3e91aae72bf2da48c0d33bdb15c66658e685e94e2"},
{file = "watchdog-6.0.0-py3-none-win32.whl", hash = "sha256:07df1fdd701c5d4c8e55ef6cf55b8f0120fe1aef7ef39a1c6fc6bc2e606d517a"},
{file = "watchdog-6.0.0-py3-none-win_amd64.whl", hash = "sha256:cbafb470cf848d93b5d013e2ecb245d4aa1c8fd0504e863ccefa32445359d680"},
{file = "watchdog-6.0.0-py3-none-win_ia64.whl", hash = "sha256:a1914259fa9e1454315171103c6a30961236f508b9b623eae470268bbcc6a22f"},
{file = "watchdog-6.0.0.tar.gz", hash = "sha256:9ddf7c82fda3ae8e24decda1338ede66e1c99883db93711d8fb941eaa2d8c282"},
]
[package.extras]
watchmedo = ["PyYAML (>=3.10)"]
[[package]]
name = "watchfiles"
version = "1.0.0"
description = "Simple, modern and high performance file watching and code reload in python."
optional = true
python-versions = ">=3.9"
files = [
{file = "watchfiles-1.0.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:1d19df28f99d6a81730658fbeb3ade8565ff687f95acb59665f11502b441be5f"},
{file = "watchfiles-1.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:28babb38cf2da8e170b706c4b84aa7e4528a6fa4f3ee55d7a0866456a1662041"},
{file = "watchfiles-1.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:12ab123135b2f42517f04e720526d41448667ae8249e651385afb5cda31fedc0"},
{file = "watchfiles-1.0.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:13a4f9ee0cd25682679eea5c14fc629e2eaa79aab74d963bc4e21f43b8ea1877"},
{file = "watchfiles-1.0.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9e1d9284cc84de7855fcf83472e51d32daf6f6cecd094160192628bc3fee1b78"},
{file = "watchfiles-1.0.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ee5edc939f53466b329bbf2e58333a5461e6c7b50c980fa6117439e2c18b42d"},
{file = "watchfiles-1.0.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5dccfc70480087567720e4e36ec381bba1ed68d7e5f368fe40c93b3b1eba0105"},
{file = "watchfiles-1.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c83a6d33a9eda0af6a7470240d1af487807adc269704fe76a4972dd982d16236"},
{file = "watchfiles-1.0.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:905f69aad276639eff3893759a07d44ea99560e67a1cf46ff389cd62f88872a2"},
{file = "watchfiles-1.0.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:09551237645d6bff3972592f2aa5424df9290e7a2e15d63c5f47c48cde585935"},
{file = "watchfiles-1.0.0-cp310-none-win32.whl", hash = "sha256:d2b39aa8edd9e5f56f99a2a2740a251dc58515398e9ed5a4b3e5ff2827060755"},
{file = "watchfiles-1.0.0-cp310-none-win_amd64.whl", hash = "sha256:2de52b499e1ab037f1a87cb8ebcb04a819bf087b1015a4cf6dcf8af3c2a2613e"},
{file = "watchfiles-1.0.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:fbd0ab7a9943bbddb87cbc2bf2f09317e74c77dc55b1f5657f81d04666c25269"},
{file = "watchfiles-1.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:774ef36b16b7198669ce655d4f75b4c3d370e7f1cbdfb997fb10ee98717e2058"},
{file = "watchfiles-1.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9b4fb98100267e6a5ebaff6aaa5d20aea20240584647470be39fe4823012ac96"},
{file = "watchfiles-1.0.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0fc3bf0effa2d8075b70badfdd7fb839d7aa9cea650d17886982840d71fdeabf"},
{file = "watchfiles-1.0.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:648e2b6db53eca6ef31245805cd528a16f56fa4cc15aeec97795eaf713c11435"},
{file = "watchfiles-1.0.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fa13d604fcb9417ae5f2e3de676e66aa97427d888e83662ad205bed35a313176"},
{file = "watchfiles-1.0.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:936f362e7ff28311b16f0b97ec51e8f2cc451763a3264640c6ed40fb252d1ee4"},
{file = "watchfiles-1.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:245fab124b9faf58430da547512d91734858df13f2ddd48ecfa5e493455ffccb"},
{file = "watchfiles-1.0.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:4ff9c7e84e8b644a8f985c42bcc81457240316f900fc72769aaedec9d088055a"},
{file = "watchfiles-1.0.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9c9a8d8fd97defe935ef8dd53d562e68942ad65067cd1c54d6ed8a088b1d931d"},
{file = "watchfiles-1.0.0-cp311-none-win32.whl", hash = "sha256:a0abf173975eb9dd17bb14c191ee79999e650997cc644562f91df06060610e62"},
{file = "watchfiles-1.0.0-cp311-none-win_amd64.whl", hash = "sha256:2a825ba4b32c214e3855b536eb1a1f7b006511d8e64b8215aac06eb680642d84"},
{file = "watchfiles-1.0.0-cp311-none-win_arm64.whl", hash = "sha256:a5a7a06cfc65e34fd0a765a7623c5ba14707a0870703888e51d3d67107589817"},
{file = "watchfiles-1.0.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:28fb64b5843d94e2c2483f7b024a1280662a44409bedee8f2f51439767e2d107"},
{file = "watchfiles-1.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:e3750434c83b61abb3163b49c64b04180b85b4dabb29a294513faec57f2ffdb7"},
{file = "watchfiles-1.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bedf84835069f51c7b026b3ca04e2e747ea8ed0a77c72006172c72d28c9f69fc"},
{file = "watchfiles-1.0.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:90004553be36427c3d06ec75b804233f8f816374165d5225b93abd94ba6e7234"},
{file = "watchfiles-1.0.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b46e15c34d4e401e976d6949ad3a74d244600d5c4b88c827a3fdf18691a46359"},
{file = "watchfiles-1.0.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:487d15927f1b0bd24e7df921913399bb1ab94424c386bea8b267754d698f8f0e"},
{file = "watchfiles-1.0.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1ff236d7a3f4b0a42f699a22fc374ba526bc55048a70cbb299661158e1bb5e1f"},
{file = "watchfiles-1.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c01446626574561756067f00b37e6b09c8622b0fc1e9fdbc7cbcea328d4e514"},
{file = "watchfiles-1.0.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:b551c465a59596f3d08170bd7e1c532c7260dd90ed8135778038e13c5d48aa81"},
{file = "watchfiles-1.0.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:e1ed613ee107269f66c2df631ec0fc8efddacface85314d392a4131abe299f00"},
{file = "watchfiles-1.0.0-cp312-none-win32.whl", hash = "sha256:5f75cd42e7e2254117cf37ff0e68c5b3f36c14543756b2da621408349bd9ca7c"},
{file = "watchfiles-1.0.0-cp312-none-win_amd64.whl", hash = "sha256:cf517701a4a872417f4e02a136e929537743461f9ec6cdb8184d9a04f4843545"},
{file = "watchfiles-1.0.0-cp312-none-win_arm64.whl", hash = "sha256:8a2127cd68950787ee36753e6d401c8ea368f73beaeb8e54df5516a06d1ecd82"},
{file = "watchfiles-1.0.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:95de85c254f7fe8cbdf104731f7f87f7f73ae229493bebca3722583160e6b152"},
{file = "watchfiles-1.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:533a7cbfe700e09780bb31c06189e39c65f06c7f447326fee707fd02f9a6e945"},
{file = "watchfiles-1.0.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a2218e78e2c6c07b1634a550095ac2a429026b2d5cbcd49a594f893f2bb8c936"},
{file = "watchfiles-1.0.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9122b8fdadc5b341315d255ab51d04893f417df4e6c1743b0aac8bf34e96e025"},
{file = "watchfiles-1.0.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9272fdbc0e9870dac3b505bce1466d386b4d8d6d2bacf405e603108d50446940"},
{file = "watchfiles-1.0.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4a3b33c3aefe9067ebd87846806cd5fc0b017ab70d628aaff077ab9abf4d06b3"},
{file = "watchfiles-1.0.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bc338ce9f8846543d428260fa0f9a716626963148edc937d71055d01d81e1525"},
{file = "watchfiles-1.0.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ac778a460ea22d63c7e6fb0bc0f5b16780ff0b128f7f06e57aaec63bd339285"},
{file = "watchfiles-1.0.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:53ae447f06f8f29f5ab40140f19abdab822387a7c426a369eb42184b021e97eb"},
{file = "watchfiles-1.0.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:1f73c2147a453315d672c1ad907abe6d40324e34a185b51e15624bc793f93cc6"},
{file = "watchfiles-1.0.0-cp313-none-win32.whl", hash = "sha256:eba98901a2eab909dbd79681190b9049acc650f6111fde1845484a4450761e98"},
{file = "watchfiles-1.0.0-cp313-none-win_amd64.whl", hash = "sha256:d562a6114ddafb09c33246c6ace7effa71ca4b6a2324a47f4b09b6445ea78941"},
{file = "watchfiles-1.0.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:3d94fd83ed54266d789f287472269c0def9120a2022674990bd24ad989ebd7a0"},
{file = "watchfiles-1.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:48051d1c504448b2fcda71c5e6e3610ae45de6a0b8f5a43b961f250be4bdf5a8"},
{file = "watchfiles-1.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:29cf884ad4285d23453c702ed03d689f9c0e865e3c85d20846d800d4787de00f"},
{file = "watchfiles-1.0.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d3572d4c34c4e9c33d25b3da47d9570d5122f8433b9ac6519dca49c2740d23cd"},
{file = "watchfiles-1.0.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2c2696611182c85eb0e755b62b456f48debff484b7306b56f05478b843ca8ece"},
{file = "watchfiles-1.0.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:550109001920a993a4383b57229c717fa73627d2a4e8fcb7ed33c7f1cddb0c85"},
{file = "watchfiles-1.0.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b555a93c15bd2c71081922be746291d776d47521a00703163e5fbe6d2a402399"},
{file = "watchfiles-1.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:947ccba18a38b85c366dafeac8df2f6176342d5992ca240a9d62588b214d731f"},
{file = "watchfiles-1.0.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ffd98a299b0a74d1b704ef0ed959efb753e656a4e0425c14e46ae4c3cbdd2919"},
{file = "watchfiles-1.0.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:f8c4f3a1210ed099a99e6a710df4ff2f8069411059ffe30fa5f9467ebed1256b"},
{file = "watchfiles-1.0.0-cp39-none-win32.whl", hash = "sha256:1e176b6b4119b3f369b2b4e003d53a226295ee862c0962e3afd5a1c15680b4e3"},
{file = "watchfiles-1.0.0-cp39-none-win_amd64.whl", hash = "sha256:2d9c0518fabf4a3f373b0a94bb9e4ea7a1df18dec45e26a4d182aa8918dee855"},
{file = "watchfiles-1.0.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f159ac795785cde4899e0afa539f4c723fb5dd336ce5605bc909d34edd00b79b"},
{file = "watchfiles-1.0.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:c3d258d78341d5d54c0c804a5b7faa66cd30ba50b2756a7161db07ce15363b8d"},
{file = "watchfiles-1.0.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5bbd0311588c2de7f9ea5cf3922ccacfd0ec0c1922870a2be503cc7df1ca8be7"},
{file = "watchfiles-1.0.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c9a13ac46b545a7d0d50f7641eefe47d1597e7d1783a5d89e09d080e6dff44b0"},
{file = "watchfiles-1.0.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b2bca898c1dc073912d3db7fa6926cc08be9575add9e84872de2c99c688bac4e"},
{file = "watchfiles-1.0.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:06d828fe2adc4ac8a64b875ca908b892a3603d596d43e18f7948f3fef5fc671c"},
{file = "watchfiles-1.0.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:074c7618cd6c807dc4eaa0982b4a9d3f8051cd0b72793511848fd64630174b17"},
{file = "watchfiles-1.0.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95dc785bc284552d044e561b8f4fe26d01ab5ca40d35852a6572d542adfeb4bc"},
{file = "watchfiles-1.0.0.tar.gz", hash = "sha256:37566c844c9ce3b5deb964fe1a23378e575e74b114618d211fbda8f59d7b5dab"},
]
[package.dependencies]
anyio = ">=3.0.0"
[extras]
inmem = ["langgraph-api", "python-dotenv"]
[metadata]
lock-version = "2.0"
python-versions = "^3.9.0,<4.0"
content-hash = "8eaaa66d9e6e447699e3bcee336dfe779b58c956f8c2ad6678008a07be935838"
|
0 | lc_public_repos/langgraph/libs | lc_public_repos/langgraph/libs/cli/README.md | # LangGraph CLI
The official command-line interface for LangGraph, providing tools to create, develop, and deploy LangGraph applications.
## Installation
Install via pip:
```bash
pip install langgraph-cli
```
For development mode with hot reloading:
```bash
pip install "langgraph-cli[inmem]"
```
## Commands
### `langgraph new` 🌱
Create a new LangGraph project from a template
```bash
langgraph new [PATH] --template TEMPLATE_NAME
```
### `langgraph dev` 🏃♀️
Run LangGraph API server in development mode with hot reloading
```bash
langgraph dev [OPTIONS]
--host TEXT Host to bind to (default: 127.0.0.1)
--port INTEGER Port to bind to (default: 2024)
--no-reload Disable auto-reload
--debug-port INTEGER Enable remote debugging
--no-browser Skip opening browser window
-c, --config FILE Config file path (default: langgraph.json)
```
### `langgraph up` 🚀
Launch LangGraph API server in Docker
```bash
langgraph up [OPTIONS]
-p, --port INTEGER Port to expose (default: 8123)
--wait Wait for services to start
--watch Restart on file changes
--verbose Show detailed logs
-c, --config FILE Config file path
-d, --docker-compose Additional services file
```
### `langgraph build`
Build a Docker image for your LangGraph application
```bash
langgraph build -t IMAGE_TAG [OPTIONS]
--platform TEXT Target platforms (e.g., linux/amd64,linux/arm64)
--pull / --no-pull Use latest/local base image
-c, --config FILE Config file path
```
### `langgraph dockerfile`
Generate a Dockerfile for custom deployments
```bash
langgraph dockerfile SAVE_PATH [OPTIONS]
-c, --config FILE Config file path
```
## Configuration
The CLI uses a `langgraph.json` configuration file with these key settings:
```json
{
"dependencies": ["langchain_openai", "./your_package"], // Required: Package dependencies
"graphs": {
"my_graph": "./your_package/file.py:graph" // Required: Graph definitions
},
"env": "./.env", // Optional: Environment variables
"python_version": "3.11", // Optional: Python version (3.11/3.12)
"pip_config_file": "./pip.conf", // Optional: pip configuration
"dockerfile_lines": [] // Optional: Additional Dockerfile commands
}
```
See the [full documentation](https://langchain-ai.github.io/langgraph/docs/cloud/reference/cli.html) for detailed configuration options.
## Development
To develop the CLI itself:
1. Clone the repository
2. Navigate to the CLI directory: `cd libs/cli`
3. Install development dependencies: `poetry install`
4. Make your changes to the CLI code
5. Test your changes:
```bash
# Run CLI commands directly
poetry run langgraph --help
# Or use the examples
cd examples
poetry install
poetry run langgraph dev # or other commands
```
## License
This project is licensed under the terms specified in the repository's LICENSE file.
|
0 | lc_public_repos/langgraph/libs | lc_public_repos/langgraph/libs/cli/pyproject.toml | [tool.poetry]
name = "langgraph-cli"
version = "0.1.61"
description = "CLI for interacting with LangGraph API"
authors = []
license = "MIT"
readme = "README.md"
repository = "https://www.github.com/langchain-ai/langgraph"
packages = [{ include = "langgraph_cli" }]
[tool.poetry.scripts]
langgraph = "langgraph_cli.cli:cli"
[tool.poetry.dependencies]
python = "^3.9.0,<4.0"
click = "^8.1.7"
langgraph-api = { version = ">=0.0.6,<0.1.0", optional = true, python = ">=3.11,<4.0" }
python-dotenv = { version = ">=0.8.0", optional = true }
[tool.poetry.group.dev.dependencies]
ruff = "^0.6.2"
codespell = "^2.2.0"
pytest = "^7.2.1"
pytest-asyncio = "^0.21.1"
pytest-mock = "^3.11.1"
pytest-watch = "^4.2.0"
mypy = "^1.10.0"
[tool.poetry.extras]
inmem = ["langgraph-api", "python-dotenv"]
[tool.pytest.ini_options]
# --strict-markers will raise errors on unknown marks.
# https://docs.pytest.org/en/7.1.x/how-to/mark.html#raising-errors-on-unknown-marks
#
# https://docs.pytest.org/en/7.1.x/reference/reference.html
# --strict-config any warnings encountered while parsing the `pytest`
# section of the configuration file raise errors.
addopts = "--strict-markers --strict-config --durations=5 -vv"
asyncio_mode = "auto"
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"
[tool.ruff]
lint.select = [
# pycodestyle
"E",
# Pyflakes
"F",
# pyupgrade
"UP",
# flake8-bugbear
"B",
# isort
"I",
]
lint.ignore = ["E501", "B008"]
|
0 | lc_public_repos/langgraph/libs/cli | lc_public_repos/langgraph/libs/cli/js-examples/.dockerignore | node_modules
dist |
0 | lc_public_repos/langgraph/libs/cli | lc_public_repos/langgraph/libs/cli/js-examples/tsconfig.json | {
"extends": "@tsconfig/recommended",
"compilerOptions": {
"target": "ES2021",
"lib": ["ES2021", "ES2022.Object", "DOM"],
"module": "NodeNext",
"moduleResolution": "nodenext",
"esModuleInterop": true,
"noImplicitReturns": true,
"declaration": true,
"noFallthroughCasesInSwitch": true,
"noUnusedLocals": true,
"noUnusedParameters": true,
"useDefineForClassFields": true,
"strictPropertyInitialization": false,
"allowJs": true,
"strict": true,
"strictFunctionTypes": false,
"outDir": "dist",
"types": ["jest", "node"],
"resolveJsonModule": true
},
"include": ["**/*.ts", "**/*.js"],
"exclude": ["node_modules", "dist"]
}
|
0 | lc_public_repos/langgraph/libs/cli | lc_public_repos/langgraph/libs/cli/js-examples/LICENSE | MIT License
Copyright (c) 2024 LangChain
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
|
0 | lc_public_repos/langgraph/libs/cli | lc_public_repos/langgraph/libs/cli/js-examples/.editorconfig | root = true
[*]
end_of_line = lf
insert_final_newline = true
[*.{js,json,yml}]
charset = utf-8
indent_style = space
indent_size = 2
|
0 | lc_public_repos/langgraph/libs/cli | lc_public_repos/langgraph/libs/cli/js-examples/yarn.lock | # THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY.
# yarn lockfile v1
"@ampproject/remapping@^2.2.0":
version "2.3.0"
resolved "https://registry.yarnpkg.com/@ampproject/remapping/-/remapping-2.3.0.tgz#ed441b6fa600072520ce18b43d2c8cc8caecc7f4"
integrity sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw==
dependencies:
"@jridgewell/gen-mapping" "^0.3.5"
"@jridgewell/trace-mapping" "^0.3.24"
"@babel/code-frame@^7.0.0", "@babel/code-frame@^7.12.13", "@babel/code-frame@^7.24.7":
version "7.24.7"
resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.24.7.tgz#882fd9e09e8ee324e496bd040401c6f046ef4465"
integrity sha512-BcYH1CVJBO9tvyIZ2jVeXgSIMvGZ2FDRvDdOIVQyuklNKSsx+eppDEBq/g47Ayw+RqNFE+URvOShmf+f/qwAlA==
dependencies:
"@babel/highlight" "^7.24.7"
picocolors "^1.0.0"
"@babel/compat-data@^7.25.2":
version "7.25.2"
resolved "https://registry.yarnpkg.com/@babel/compat-data/-/compat-data-7.25.2.tgz#e41928bd33475305c586f6acbbb7e3ade7a6f7f5"
integrity sha512-bYcppcpKBvX4znYaPEeFau03bp89ShqNMLs+rmdptMw+heSZh9+z84d2YG+K7cYLbWwzdjtDoW/uqZmPjulClQ==
"@babel/core@^7.11.6", "@babel/core@^7.12.3", "@babel/core@^7.23.9":
version "7.25.2"
resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.25.2.tgz#ed8eec275118d7613e77a352894cd12ded8eba77"
integrity sha512-BBt3opiCOxUr9euZ5/ro/Xv8/V7yJ5bjYMqG/C1YAo8MIKAnumZalCN+msbci3Pigy4lIQfPUpfMM27HMGaYEA==
dependencies:
"@ampproject/remapping" "^2.2.0"
"@babel/code-frame" "^7.24.7"
"@babel/generator" "^7.25.0"
"@babel/helper-compilation-targets" "^7.25.2"
"@babel/helper-module-transforms" "^7.25.2"
"@babel/helpers" "^7.25.0"
"@babel/parser" "^7.25.0"
"@babel/template" "^7.25.0"
"@babel/traverse" "^7.25.2"
"@babel/types" "^7.25.2"
convert-source-map "^2.0.0"
debug "^4.1.0"
gensync "^1.0.0-beta.2"
json5 "^2.2.3"
semver "^6.3.1"
"@babel/generator@^7.25.0", "@babel/generator@^7.7.2":
version "7.25.0"
resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.25.0.tgz#f858ddfa984350bc3d3b7f125073c9af6988f18e"
integrity sha512-3LEEcj3PVW8pW2R1SR1M89g/qrYk/m/mB/tLqn7dn4sbBUQyTqnlod+II2U4dqiGtUmkcnAmkMDralTFZttRiw==
dependencies:
"@babel/types" "^7.25.0"
"@jridgewell/gen-mapping" "^0.3.5"
"@jridgewell/trace-mapping" "^0.3.25"
jsesc "^2.5.1"
"@babel/helper-compilation-targets@^7.25.2":
version "7.25.2"
resolved "https://registry.yarnpkg.com/@babel/helper-compilation-targets/-/helper-compilation-targets-7.25.2.tgz#e1d9410a90974a3a5a66e84ff55ef62e3c02d06c"
integrity sha512-U2U5LsSaZ7TAt3cfaymQ8WHh0pxvdHoEk6HVpaexxixjyEquMh0L0YNJNM6CTGKMXV1iksi0iZkGw4AcFkPaaw==
dependencies:
"@babel/compat-data" "^7.25.2"
"@babel/helper-validator-option" "^7.24.8"
browserslist "^4.23.1"
lru-cache "^5.1.1"
semver "^6.3.1"
"@babel/helper-module-imports@^7.24.7":
version "7.24.7"
resolved "https://registry.yarnpkg.com/@babel/helper-module-imports/-/helper-module-imports-7.24.7.tgz#f2f980392de5b84c3328fc71d38bd81bbb83042b"
integrity sha512-8AyH3C+74cgCVVXow/myrynrAGv+nTVg5vKu2nZph9x7RcRwzmh0VFallJuFTZ9mx6u4eSdXZfcOzSqTUm0HCA==
dependencies:
"@babel/traverse" "^7.24.7"
"@babel/types" "^7.24.7"
"@babel/helper-module-transforms@^7.25.2":
version "7.25.2"
resolved "https://registry.yarnpkg.com/@babel/helper-module-transforms/-/helper-module-transforms-7.25.2.tgz#ee713c29768100f2776edf04d4eb23b8d27a66e6"
integrity sha512-BjyRAbix6j/wv83ftcVJmBt72QtHI56C7JXZoG2xATiLpmoC7dpd8WnkikExHDVPpi/3qCmO6WY1EaXOluiecQ==
dependencies:
"@babel/helper-module-imports" "^7.24.7"
"@babel/helper-simple-access" "^7.24.7"
"@babel/helper-validator-identifier" "^7.24.7"
"@babel/traverse" "^7.25.2"
"@babel/helper-plugin-utils@^7.0.0", "@babel/helper-plugin-utils@^7.10.4", "@babel/helper-plugin-utils@^7.12.13", "@babel/helper-plugin-utils@^7.14.5", "@babel/helper-plugin-utils@^7.24.7", "@babel/helper-plugin-utils@^7.8.0":
version "7.24.8"
resolved "https://registry.yarnpkg.com/@babel/helper-plugin-utils/-/helper-plugin-utils-7.24.8.tgz#94ee67e8ec0e5d44ea7baeb51e571bd26af07878"
integrity sha512-FFWx5142D8h2Mgr/iPVGH5G7w6jDn4jUSpZTyDnQO0Yn7Ks2Kuz6Pci8H6MPCoUJegd/UZQ3tAvfLCxQSnWWwg==
"@babel/helper-simple-access@^7.24.7":
version "7.24.7"
resolved "https://registry.yarnpkg.com/@babel/helper-simple-access/-/helper-simple-access-7.24.7.tgz#bcade8da3aec8ed16b9c4953b74e506b51b5edb3"
integrity sha512-zBAIvbCMh5Ts+b86r/CjU+4XGYIs+R1j951gxI3KmmxBMhCg4oQMsv6ZXQ64XOm/cvzfU1FmoCyt6+owc5QMYg==
dependencies:
"@babel/traverse" "^7.24.7"
"@babel/types" "^7.24.7"
"@babel/helper-string-parser@^7.24.8":
version "7.24.8"
resolved "https://registry.yarnpkg.com/@babel/helper-string-parser/-/helper-string-parser-7.24.8.tgz#5b3329c9a58803d5df425e5785865881a81ca48d"
integrity sha512-pO9KhhRcuUyGnJWwyEgnRJTSIZHiT+vMD0kPeD+so0l7mxkMT19g3pjY9GTnHySck/hDzq+dtW/4VgnMkippsQ==
"@babel/helper-validator-identifier@^7.24.7":
version "7.24.7"
resolved "https://registry.yarnpkg.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.24.7.tgz#75b889cfaf9e35c2aaf42cf0d72c8e91719251db"
integrity sha512-rR+PBcQ1SMQDDyF6X0wxtG8QyLCgUB0eRAGguqRLfkCA87l7yAP7ehq8SNj96OOGTO8OBV70KhuFYcIkHXOg0w==
"@babel/helper-validator-option@^7.24.8":
version "7.24.8"
resolved "https://registry.yarnpkg.com/@babel/helper-validator-option/-/helper-validator-option-7.24.8.tgz#3725cdeea8b480e86d34df15304806a06975e33d"
integrity sha512-xb8t9tD1MHLungh/AIoWYN+gVHaB9kwlu8gffXGSt3FFEIT7RjS+xWbc2vUD1UTZdIpKj/ab3rdqJ7ufngyi2Q==
"@babel/helpers@^7.25.0":
version "7.25.0"
resolved "https://registry.yarnpkg.com/@babel/helpers/-/helpers-7.25.0.tgz#e69beb7841cb93a6505531ede34f34e6a073650a"
integrity sha512-MjgLZ42aCm0oGjJj8CtSM3DB8NOOf8h2l7DCTePJs29u+v7yO/RBX9nShlKMgFnRks/Q4tBAe7Hxnov9VkGwLw==
dependencies:
"@babel/template" "^7.25.0"
"@babel/types" "^7.25.0"
"@babel/highlight@^7.24.7":
version "7.24.7"
resolved "https://registry.yarnpkg.com/@babel/highlight/-/highlight-7.24.7.tgz#a05ab1df134b286558aae0ed41e6c5f731bf409d"
integrity sha512-EStJpq4OuY8xYfhGVXngigBJRWxftKX9ksiGDnmlY3o7B/V7KIAc9X4oiK87uPJSc/vs5L869bem5fhZa8caZw==
dependencies:
"@babel/helper-validator-identifier" "^7.24.7"
chalk "^2.4.2"
js-tokens "^4.0.0"
picocolors "^1.0.0"
"@babel/parser@^7.1.0", "@babel/parser@^7.14.7", "@babel/parser@^7.20.7", "@babel/parser@^7.23.9", "@babel/parser@^7.25.0", "@babel/parser@^7.25.3":
version "7.25.3"
resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.25.3.tgz#91fb126768d944966263f0657ab222a642b82065"
integrity sha512-iLTJKDbJ4hMvFPgQwwsVoxtHyWpKKPBrxkANrSYewDPaPpT5py5yeVkgPIJ7XYXhndxJpaA3PyALSXQ7u8e/Dw==
dependencies:
"@babel/types" "^7.25.2"
"@babel/plugin-syntax-async-generators@^7.8.4":
version "7.8.4"
resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.8.4.tgz#a983fb1aeb2ec3f6ed042a210f640e90e786fe0d"
integrity sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw==
dependencies:
"@babel/helper-plugin-utils" "^7.8.0"
"@babel/plugin-syntax-bigint@^7.8.3":
version "7.8.3"
resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-bigint/-/plugin-syntax-bigint-7.8.3.tgz#4c9a6f669f5d0cdf1b90a1671e9a146be5300cea"
integrity sha512-wnTnFlG+YxQm3vDxpGE57Pj0srRU4sHE/mDkt1qv2YJJSeUAec2ma4WLUnUPeKjyrfntVwe/N6dCXpU+zL3Npg==
dependencies:
"@babel/helper-plugin-utils" "^7.8.0"
"@babel/plugin-syntax-class-properties@^7.12.13":
version "7.12.13"
resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-class-properties/-/plugin-syntax-class-properties-7.12.13.tgz#b5c987274c4a3a82b89714796931a6b53544ae10"
integrity sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA==
dependencies:
"@babel/helper-plugin-utils" "^7.12.13"
"@babel/plugin-syntax-class-static-block@^7.14.5":
version "7.14.5"
resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-class-static-block/-/plugin-syntax-class-static-block-7.14.5.tgz#195df89b146b4b78b3bf897fd7a257c84659d406"
integrity sha512-b+YyPmr6ldyNnM6sqYeMWE+bgJcJpO6yS4QD7ymxgH34GBPNDM/THBh8iunyvKIZztiwLH4CJZ0RxTk9emgpjw==
dependencies:
"@babel/helper-plugin-utils" "^7.14.5"
"@babel/plugin-syntax-import-attributes@^7.24.7":
version "7.24.7"
resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-import-attributes/-/plugin-syntax-import-attributes-7.24.7.tgz#b4f9ea95a79e6912480c4b626739f86a076624ca"
integrity sha512-hbX+lKKeUMGihnK8nvKqmXBInriT3GVjzXKFriV3YC6APGxMbP8RZNFwy91+hocLXq90Mta+HshoB31802bb8A==
dependencies:
"@babel/helper-plugin-utils" "^7.24.7"
"@babel/plugin-syntax-import-meta@^7.10.4":
version "7.10.4"
resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-import-meta/-/plugin-syntax-import-meta-7.10.4.tgz#ee601348c370fa334d2207be158777496521fd51"
integrity sha512-Yqfm+XDx0+Prh3VSeEQCPU81yC+JWZ2pDPFSS4ZdpfZhp4MkFMaDC1UqseovEKwSUpnIL7+vK+Clp7bfh0iD7g==
dependencies:
"@babel/helper-plugin-utils" "^7.10.4"
"@babel/plugin-syntax-json-strings@^7.8.3":
version "7.8.3"
resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-json-strings/-/plugin-syntax-json-strings-7.8.3.tgz#01ca21b668cd8218c9e640cb6dd88c5412b2c96a"
integrity sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA==
dependencies:
"@babel/helper-plugin-utils" "^7.8.0"
"@babel/plugin-syntax-jsx@^7.7.2":
version "7.24.7"
resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.24.7.tgz#39a1fa4a7e3d3d7f34e2acc6be585b718d30e02d"
integrity sha512-6ddciUPe/mpMnOKv/U+RSd2vvVy+Yw/JfBB0ZHYjEZt9NLHmCUylNYlsbqCCS1Bffjlb0fCwC9Vqz+sBz6PsiQ==
dependencies:
"@babel/helper-plugin-utils" "^7.24.7"
"@babel/plugin-syntax-logical-assignment-operators@^7.10.4":
version "7.10.4"
resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-logical-assignment-operators/-/plugin-syntax-logical-assignment-operators-7.10.4.tgz#ca91ef46303530448b906652bac2e9fe9941f699"
integrity sha512-d8waShlpFDinQ5MtvGU9xDAOzKH47+FFoney2baFIoMr952hKOLp1HR7VszoZvOsV/4+RRszNY7D17ba0te0ig==
dependencies:
"@babel/helper-plugin-utils" "^7.10.4"
"@babel/plugin-syntax-nullish-coalescing-operator@^7.8.3":
version "7.8.3"
resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-nullish-coalescing-operator/-/plugin-syntax-nullish-coalescing-operator-7.8.3.tgz#167ed70368886081f74b5c36c65a88c03b66d1a9"
integrity sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ==
dependencies:
"@babel/helper-plugin-utils" "^7.8.0"
"@babel/plugin-syntax-numeric-separator@^7.10.4":
version "7.10.4"
resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-numeric-separator/-/plugin-syntax-numeric-separator-7.10.4.tgz#b9b070b3e33570cd9fd07ba7fa91c0dd37b9af97"
integrity sha512-9H6YdfkcK/uOnY/K7/aA2xpzaAgkQn37yzWUMRK7OaPOqOpGS1+n0H5hxT9AUw9EsSjPW8SVyMJwYRtWs3X3ug==
dependencies:
"@babel/helper-plugin-utils" "^7.10.4"
"@babel/plugin-syntax-object-rest-spread@^7.8.3":
version "7.8.3"
resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.8.3.tgz#60e225edcbd98a640332a2e72dd3e66f1af55871"
integrity sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA==
dependencies:
"@babel/helper-plugin-utils" "^7.8.0"
"@babel/plugin-syntax-optional-catch-binding@^7.8.3":
version "7.8.3"
resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-optional-catch-binding/-/plugin-syntax-optional-catch-binding-7.8.3.tgz#6111a265bcfb020eb9efd0fdfd7d26402b9ed6c1"
integrity sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q==
dependencies:
"@babel/helper-plugin-utils" "^7.8.0"
"@babel/plugin-syntax-optional-chaining@^7.8.3":
version "7.8.3"
resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-optional-chaining/-/plugin-syntax-optional-chaining-7.8.3.tgz#4f69c2ab95167e0180cd5336613f8c5788f7d48a"
integrity sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg==
dependencies:
"@babel/helper-plugin-utils" "^7.8.0"
"@babel/plugin-syntax-private-property-in-object@^7.14.5":
version "7.14.5"
resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-private-property-in-object/-/plugin-syntax-private-property-in-object-7.14.5.tgz#0dc6671ec0ea22b6e94a1114f857970cd39de1ad"
integrity sha512-0wVnp9dxJ72ZUJDV27ZfbSj6iHLoytYZmh3rFcxNnvsJF3ktkzLDZPy/mA17HGsaQT3/DQsWYX1f1QGWkCoVUg==
dependencies:
"@babel/helper-plugin-utils" "^7.14.5"
"@babel/plugin-syntax-top-level-await@^7.14.5":
version "7.14.5"
resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.14.5.tgz#c1cfdadc35a646240001f06138247b741c34d94c"
integrity sha512-hx++upLv5U1rgYfwe1xBQUhRmU41NEvpUvrp8jkrSCdvGSnM5/qdRMtylJ6PG5OFkBaHkbTAKTnd3/YyESRHFw==
dependencies:
"@babel/helper-plugin-utils" "^7.14.5"
"@babel/plugin-syntax-typescript@^7.7.2":
version "7.24.7"
resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.24.7.tgz#58d458271b4d3b6bb27ee6ac9525acbb259bad1c"
integrity sha512-c/+fVeJBB0FeKsFvwytYiUD+LBvhHjGSI0g446PRGdSVGZLRNArBUno2PETbAly3tpiNAQR5XaZ+JslxkotsbA==
dependencies:
"@babel/helper-plugin-utils" "^7.24.7"
"@babel/template@^7.25.0", "@babel/template@^7.3.3":
version "7.25.0"
resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.25.0.tgz#e733dc3134b4fede528c15bc95e89cb98c52592a"
integrity sha512-aOOgh1/5XzKvg1jvVz7AVrx2piJ2XBi227DHmbY6y+bM9H2FlN+IfecYu4Xl0cNiiVejlsCri89LUsbj8vJD9Q==
dependencies:
"@babel/code-frame" "^7.24.7"
"@babel/parser" "^7.25.0"
"@babel/types" "^7.25.0"
"@babel/traverse@^7.24.7", "@babel/traverse@^7.25.2":
version "7.25.3"
resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.25.3.tgz#f1b901951c83eda2f3e29450ce92743783373490"
integrity sha512-HefgyP1x754oGCsKmV5reSmtV7IXj/kpaE1XYY+D9G5PvKKoFfSbiS4M77MdjuwlZKDIKFCffq9rPU+H/s3ZdQ==
dependencies:
"@babel/code-frame" "^7.24.7"
"@babel/generator" "^7.25.0"
"@babel/parser" "^7.25.3"
"@babel/template" "^7.25.0"
"@babel/types" "^7.25.2"
debug "^4.3.1"
globals "^11.1.0"
"@babel/types@^7.0.0", "@babel/types@^7.20.7", "@babel/types@^7.24.7", "@babel/types@^7.25.0", "@babel/types@^7.25.2", "@babel/types@^7.3.3":
version "7.25.2"
resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.25.2.tgz#55fb231f7dc958cd69ea141a4c2997e819646125"
integrity sha512-YTnYtra7W9e6/oAZEHj0bJehPRUlLH9/fbpT5LfB0NhQXyALCRkRs3zH9v07IYhkgpqX6Z78FnuccZr/l4Fs4Q==
dependencies:
"@babel/helper-string-parser" "^7.24.8"
"@babel/helper-validator-identifier" "^7.24.7"
to-fast-properties "^2.0.0"
"@bcoe/v8-coverage@^0.2.3":
version "0.2.3"
resolved "https://registry.yarnpkg.com/@bcoe/v8-coverage/-/v8-coverage-0.2.3.tgz#75a2e8b51cb758a7553d6804a5932d7aace75c39"
integrity sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw==
"@eslint-community/eslint-utils@^4.2.0":
version "4.4.0"
resolved "https://registry.yarnpkg.com/@eslint-community/eslint-utils/-/eslint-utils-4.4.0.tgz#a23514e8fb9af1269d5f7788aa556798d61c6b59"
integrity sha512-1/sA4dwrzBAyeUoQ6oxahHKmrZvsnLCg4RfxW3ZFGGmQkSNQPFNLV9CUEFQP1x9EYXHTo5p6xdhZM1Ne9p/AfA==
dependencies:
eslint-visitor-keys "^3.3.0"
"@eslint-community/regexpp@^4.4.0", "@eslint-community/regexpp@^4.6.1":
version "4.11.0"
resolved "https://registry.yarnpkg.com/@eslint-community/regexpp/-/regexpp-4.11.0.tgz#b0ffd0312b4a3fd2d6f77237e7248a5ad3a680ae"
integrity sha512-G/M/tIiMrTAxEWRfLfQJMmGNX28IxBg4PBz8XqQhqUHLFI6TL2htpIB1iQCj144V5ee/JaKyT9/WZ0MGZWfA7A==
"@eslint/eslintrc@^2.1.4":
version "2.1.4"
resolved "https://registry.yarnpkg.com/@eslint/eslintrc/-/eslintrc-2.1.4.tgz#388a269f0f25c1b6adc317b5a2c55714894c70ad"
integrity sha512-269Z39MS6wVJtsoUl10L60WdkhJVdPG24Q4eZTH3nnF6lpvSShEK3wQjDX9JRWAUPvPh7COouPpU9IrqaZFvtQ==
dependencies:
ajv "^6.12.4"
debug "^4.3.2"
espree "^9.6.0"
globals "^13.19.0"
ignore "^5.2.0"
import-fresh "^3.2.1"
js-yaml "^4.1.0"
minimatch "^3.1.2"
strip-json-comments "^3.1.1"
"@eslint/eslintrc@^3.1.0":
version "3.1.0"
resolved "https://registry.yarnpkg.com/@eslint/eslintrc/-/eslintrc-3.1.0.tgz#dbd3482bfd91efa663cbe7aa1f506839868207b6"
integrity sha512-4Bfj15dVJdoy3RfZmmo86RK1Fwzn6SstsvK9JS+BaVKqC6QQQQyXekNaC+g+LKNgkQ+2VhGAzm6hO40AhMR3zQ==
dependencies:
ajv "^6.12.4"
debug "^4.3.2"
espree "^10.0.1"
globals "^14.0.0"
ignore "^5.2.0"
import-fresh "^3.2.1"
js-yaml "^4.1.0"
minimatch "^3.1.2"
strip-json-comments "^3.1.1"
"@eslint/js@8.57.0":
version "8.57.0"
resolved "https://registry.yarnpkg.com/@eslint/js/-/js-8.57.0.tgz#a5417ae8427873f1dd08b70b3574b453e67b5f7f"
integrity sha512-Ys+3g2TaW7gADOJzPt83SJtCDhMjndcDMFVQ/Tj9iA1BfJzFKD9mAUXT3OenpuPHbI6P/myECxRJrofUsDx/5g==
"@eslint/js@^9.9.1":
version "9.9.1"
resolved "https://registry.yarnpkg.com/@eslint/js/-/js-9.9.1.tgz#4a97e85e982099d6c7ee8410aacb55adaa576f06"
integrity sha512-xIDQRsfg5hNBqHz04H1R3scSVwmI+KUbqjsQKHKQ1DAUSaUjYPReZZmS/5PNiKu1fUvzDd6H7DEDKACSEhu+TQ==
"@humanwhocodes/config-array@^0.11.14":
version "0.11.14"
resolved "https://registry.yarnpkg.com/@humanwhocodes/config-array/-/config-array-0.11.14.tgz#d78e481a039f7566ecc9660b4ea7fe6b1fec442b"
integrity sha512-3T8LkOmg45BV5FICb15QQMsyUSWrQ8AygVfC7ZG32zOalnqrilm018ZVCw0eapXux8FtA33q8PSRSstjee3jSg==
dependencies:
"@humanwhocodes/object-schema" "^2.0.2"
debug "^4.3.1"
minimatch "^3.0.5"
"@humanwhocodes/module-importer@^1.0.1":
version "1.0.1"
resolved "https://registry.yarnpkg.com/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz#af5b2691a22b44be847b0ca81641c5fb6ad0172c"
integrity sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==
"@humanwhocodes/object-schema@^2.0.2":
version "2.0.3"
resolved "https://registry.yarnpkg.com/@humanwhocodes/object-schema/-/object-schema-2.0.3.tgz#4a2868d75d6d6963e423bcf90b7fd1be343409d3"
integrity sha512-93zYdMES/c1D69yZiKDBj0V24vqNzB/koF26KPaagAfd3P/4gUlh3Dys5ogAK+Exi9QyzlD8x/08Zt7wIKcDcA==
"@istanbuljs/load-nyc-config@^1.0.0":
version "1.1.0"
resolved "https://registry.yarnpkg.com/@istanbuljs/load-nyc-config/-/load-nyc-config-1.1.0.tgz#fd3db1d59ecf7cf121e80650bb86712f9b55eced"
integrity sha512-VjeHSlIzpv/NyD3N0YuHfXOPDIixcA1q2ZV98wsMqcYlPmv2n3Yb2lYP9XMElnaFVXg5A7YLTeLu6V84uQDjmQ==
dependencies:
camelcase "^5.3.1"
find-up "^4.1.0"
get-package-type "^0.1.0"
js-yaml "^3.13.1"
resolve-from "^5.0.0"
"@istanbuljs/schema@^0.1.2", "@istanbuljs/schema@^0.1.3":
version "0.1.3"
resolved "https://registry.yarnpkg.com/@istanbuljs/schema/-/schema-0.1.3.tgz#e45e384e4b8ec16bce2fd903af78450f6bf7ec98"
integrity sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA==
"@jest/console@^29.7.0":
version "29.7.0"
resolved "https://registry.yarnpkg.com/@jest/console/-/console-29.7.0.tgz#cd4822dbdb84529265c5a2bdb529a3c9cc950ffc"
integrity sha512-5Ni4CU7XHQi32IJ398EEP4RrB8eV09sXP2ROqD4bksHrnTree52PsxvX8tpL8LvTZ3pFzXyPbNQReSN41CAhOg==
dependencies:
"@jest/types" "^29.6.3"
"@types/node" "*"
chalk "^4.0.0"
jest-message-util "^29.7.0"
jest-util "^29.7.0"
slash "^3.0.0"
"@jest/core@^29.7.0":
version "29.7.0"
resolved "https://registry.yarnpkg.com/@jest/core/-/core-29.7.0.tgz#b6cccc239f30ff36609658c5a5e2291757ce448f"
integrity sha512-n7aeXWKMnGtDA48y8TLWJPJmLmmZ642Ceo78cYWEpiD7FzDgmNDV/GCVRorPABdXLJZ/9wzzgZAlHjXjxDHGsg==
dependencies:
"@jest/console" "^29.7.0"
"@jest/reporters" "^29.7.0"
"@jest/test-result" "^29.7.0"
"@jest/transform" "^29.7.0"
"@jest/types" "^29.6.3"
"@types/node" "*"
ansi-escapes "^4.2.1"
chalk "^4.0.0"
ci-info "^3.2.0"
exit "^0.1.2"
graceful-fs "^4.2.9"
jest-changed-files "^29.7.0"
jest-config "^29.7.0"
jest-haste-map "^29.7.0"
jest-message-util "^29.7.0"
jest-regex-util "^29.6.3"
jest-resolve "^29.7.0"
jest-resolve-dependencies "^29.7.0"
jest-runner "^29.7.0"
jest-runtime "^29.7.0"
jest-snapshot "^29.7.0"
jest-util "^29.7.0"
jest-validate "^29.7.0"
jest-watcher "^29.7.0"
micromatch "^4.0.4"
pretty-format "^29.7.0"
slash "^3.0.0"
strip-ansi "^6.0.0"
"@jest/environment@^29.7.0":
version "29.7.0"
resolved "https://registry.yarnpkg.com/@jest/environment/-/environment-29.7.0.tgz#24d61f54ff1f786f3cd4073b4b94416383baf2a7"
integrity sha512-aQIfHDq33ExsN4jP1NWGXhxgQ/wixs60gDiKO+XVMd8Mn0NWPWgc34ZQDTb2jKaUWQ7MuwoitXAsN2XVXNMpAw==
dependencies:
"@jest/fake-timers" "^29.7.0"
"@jest/types" "^29.6.3"
"@types/node" "*"
jest-mock "^29.7.0"
"@jest/expect-utils@^29.7.0":
version "29.7.0"
resolved "https://registry.yarnpkg.com/@jest/expect-utils/-/expect-utils-29.7.0.tgz#023efe5d26a8a70f21677d0a1afc0f0a44e3a1c6"
integrity sha512-GlsNBWiFQFCVi9QVSx7f5AgMeLxe9YCCs5PuP2O2LdjDAA8Jh9eX7lA1Jq/xdXw3Wb3hyvlFNfZIfcRetSzYcA==
dependencies:
jest-get-type "^29.6.3"
"@jest/expect@^29.7.0":
version "29.7.0"
resolved "https://registry.yarnpkg.com/@jest/expect/-/expect-29.7.0.tgz#76a3edb0cb753b70dfbfe23283510d3d45432bf2"
integrity sha512-8uMeAMycttpva3P1lBHB8VciS9V0XAr3GymPpipdyQXbBcuhkLQOSe8E/p92RyAdToS6ZD1tFkX+CkhoECE0dQ==
dependencies:
expect "^29.7.0"
jest-snapshot "^29.7.0"
"@jest/fake-timers@^29.7.0":
version "29.7.0"
resolved "https://registry.yarnpkg.com/@jest/fake-timers/-/fake-timers-29.7.0.tgz#fd91bf1fffb16d7d0d24a426ab1a47a49881a565"
integrity sha512-q4DH1Ha4TTFPdxLsqDXK1d3+ioSL7yL5oCMJZgDYm6i+6CygW5E5xVr/D1HdsGxjt1ZWSfUAs9OxSB/BNelWrQ==
dependencies:
"@jest/types" "^29.6.3"
"@sinonjs/fake-timers" "^10.0.2"
"@types/node" "*"
jest-message-util "^29.7.0"
jest-mock "^29.7.0"
jest-util "^29.7.0"
"@jest/globals@^29.7.0":
version "29.7.0"
resolved "https://registry.yarnpkg.com/@jest/globals/-/globals-29.7.0.tgz#8d9290f9ec47ff772607fa864ca1d5a2efae1d4d"
integrity sha512-mpiz3dutLbkW2MNFubUGUEVLkTGiqW6yLVTA+JbP6fI6J5iL9Y0Nlg8k95pcF8ctKwCS7WVxteBs29hhfAotzQ==
dependencies:
"@jest/environment" "^29.7.0"
"@jest/expect" "^29.7.0"
"@jest/types" "^29.6.3"
jest-mock "^29.7.0"
"@jest/reporters@^29.7.0":
version "29.7.0"
resolved "https://registry.yarnpkg.com/@jest/reporters/-/reporters-29.7.0.tgz#04b262ecb3b8faa83b0b3d321623972393e8f4c7"
integrity sha512-DApq0KJbJOEzAFYjHADNNxAE3KbhxQB1y5Kplb5Waqw6zVbuWatSnMjE5gs8FUgEPmNsnZA3NCWl9NG0ia04Pg==
dependencies:
"@bcoe/v8-coverage" "^0.2.3"
"@jest/console" "^29.7.0"
"@jest/test-result" "^29.7.0"
"@jest/transform" "^29.7.0"
"@jest/types" "^29.6.3"
"@jridgewell/trace-mapping" "^0.3.18"
"@types/node" "*"
chalk "^4.0.0"
collect-v8-coverage "^1.0.0"
exit "^0.1.2"
glob "^7.1.3"
graceful-fs "^4.2.9"
istanbul-lib-coverage "^3.0.0"
istanbul-lib-instrument "^6.0.0"
istanbul-lib-report "^3.0.0"
istanbul-lib-source-maps "^4.0.0"
istanbul-reports "^3.1.3"
jest-message-util "^29.7.0"
jest-util "^29.7.0"
jest-worker "^29.7.0"
slash "^3.0.0"
string-length "^4.0.1"
strip-ansi "^6.0.0"
v8-to-istanbul "^9.0.1"
"@jest/schemas@^29.6.3":
version "29.6.3"
resolved "https://registry.yarnpkg.com/@jest/schemas/-/schemas-29.6.3.tgz#430b5ce8a4e0044a7e3819663305a7b3091c8e03"
integrity sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==
dependencies:
"@sinclair/typebox" "^0.27.8"
"@jest/source-map@^29.6.3":
version "29.6.3"
resolved "https://registry.yarnpkg.com/@jest/source-map/-/source-map-29.6.3.tgz#d90ba772095cf37a34a5eb9413f1b562a08554c4"
integrity sha512-MHjT95QuipcPrpLM+8JMSzFx6eHp5Bm+4XeFDJlwsvVBjmKNiIAvasGK2fxz2WbGRlnvqehFbh07MMa7n3YJnw==
dependencies:
"@jridgewell/trace-mapping" "^0.3.18"
callsites "^3.0.0"
graceful-fs "^4.2.9"
"@jest/test-result@^29.7.0":
version "29.7.0"
resolved "https://registry.yarnpkg.com/@jest/test-result/-/test-result-29.7.0.tgz#8db9a80aa1a097bb2262572686734baed9b1657c"
integrity sha512-Fdx+tv6x1zlkJPcWXmMDAG2HBnaR9XPSd5aDWQVsfrZmLVT3lU1cwyxLgRmXR9yrq4NBoEm9BMsfgFzTQAbJYA==
dependencies:
"@jest/console" "^29.7.0"
"@jest/types" "^29.6.3"
"@types/istanbul-lib-coverage" "^2.0.0"
collect-v8-coverage "^1.0.0"
"@jest/test-sequencer@^29.7.0":
version "29.7.0"
resolved "https://registry.yarnpkg.com/@jest/test-sequencer/-/test-sequencer-29.7.0.tgz#6cef977ce1d39834a3aea887a1726628a6f072ce"
integrity sha512-GQwJ5WZVrKnOJuiYiAF52UNUJXgTZx1NHjFSEB0qEMmSZKAkdMoIzw/Cj6x6NF4AvV23AUqDpFzQkN/eYCYTxw==
dependencies:
"@jest/test-result" "^29.7.0"
graceful-fs "^4.2.9"
jest-haste-map "^29.7.0"
slash "^3.0.0"
"@jest/transform@^29.7.0":
version "29.7.0"
resolved "https://registry.yarnpkg.com/@jest/transform/-/transform-29.7.0.tgz#df2dd9c346c7d7768b8a06639994640c642e284c"
integrity sha512-ok/BTPFzFKVMwO5eOHRrvnBVHdRy9IrsrW1GpMaQ9MCnilNLXQKmAX8s1YXDFaai9xJpac2ySzV0YeRRECr2Vw==
dependencies:
"@babel/core" "^7.11.6"
"@jest/types" "^29.6.3"
"@jridgewell/trace-mapping" "^0.3.18"
babel-plugin-istanbul "^6.1.1"
chalk "^4.0.0"
convert-source-map "^2.0.0"
fast-json-stable-stringify "^2.1.0"
graceful-fs "^4.2.9"
jest-haste-map "^29.7.0"
jest-regex-util "^29.6.3"
jest-util "^29.7.0"
micromatch "^4.0.4"
pirates "^4.0.4"
slash "^3.0.0"
write-file-atomic "^4.0.2"
"@jest/types@^29.6.3":
version "29.6.3"
resolved "https://registry.yarnpkg.com/@jest/types/-/types-29.6.3.tgz#1131f8cf634e7e84c5e77bab12f052af585fba59"
integrity sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==
dependencies:
"@jest/schemas" "^29.6.3"
"@types/istanbul-lib-coverage" "^2.0.0"
"@types/istanbul-reports" "^3.0.0"
"@types/node" "*"
"@types/yargs" "^17.0.8"
chalk "^4.0.0"
"@jridgewell/gen-mapping@^0.3.5":
version "0.3.5"
resolved "https://registry.yarnpkg.com/@jridgewell/gen-mapping/-/gen-mapping-0.3.5.tgz#dcce6aff74bdf6dad1a95802b69b04a2fcb1fb36"
integrity sha512-IzL8ZoEDIBRWEzlCcRhOaCupYyN5gdIK+Q6fbFdPDg6HqX6jpkItn7DFIpW9LQzXG6Df9sA7+OKnq0qlz/GaQg==
dependencies:
"@jridgewell/set-array" "^1.2.1"
"@jridgewell/sourcemap-codec" "^1.4.10"
"@jridgewell/trace-mapping" "^0.3.24"
"@jridgewell/resolve-uri@^3.1.0":
version "3.1.2"
resolved "https://registry.yarnpkg.com/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz#7a0ee601f60f99a20c7c7c5ff0c80388c1189bd6"
integrity sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==
"@jridgewell/set-array@^1.2.1":
version "1.2.1"
resolved "https://registry.yarnpkg.com/@jridgewell/set-array/-/set-array-1.2.1.tgz#558fb6472ed16a4c850b889530e6b36438c49280"
integrity sha512-R8gLRTZeyp03ymzP/6Lil/28tGeGEzhx1q2k703KGWRAI1VdvPIXdG70VJc2pAMw3NA6JKL5hhFu1sJX0Mnn/A==
"@jridgewell/sourcemap-codec@^1.4.10", "@jridgewell/sourcemap-codec@^1.4.14":
version "1.5.0"
resolved "https://registry.yarnpkg.com/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.0.tgz#3188bcb273a414b0d215fd22a58540b989b9409a"
integrity sha512-gv3ZRaISU3fjPAgNsriBRqGWQL6quFx04YMPW/zD8XMLsU32mhCCbfbO6KZFLjvYpCZ8zyDEgqsgf+PwPaM7GQ==
"@jridgewell/trace-mapping@^0.3.12", "@jridgewell/trace-mapping@^0.3.18", "@jridgewell/trace-mapping@^0.3.24", "@jridgewell/trace-mapping@^0.3.25":
version "0.3.25"
resolved "https://registry.yarnpkg.com/@jridgewell/trace-mapping/-/trace-mapping-0.3.25.tgz#15f190e98895f3fc23276ee14bc76b675c2e50f0"
integrity sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==
dependencies:
"@jridgewell/resolve-uri" "^3.1.0"
"@jridgewell/sourcemap-codec" "^1.4.14"
"@langchain/core@^0.3.2":
version "0.3.2"
resolved "https://registry.yarnpkg.com/@langchain/core/-/core-0.3.2.tgz#aff6d83149a40e0e735910f583aca0f1dd7d1bab"
integrity sha512-FeoDOStP8l1YdxgykpXnVoEnl4lxGNSOdYzUJN/EdFtkc6cIjDDS5+xewajme0+egaUsO4tGLezKaFpoWxAyQA==
dependencies:
ansi-styles "^5.0.0"
camelcase "6"
decamelize "1.2.0"
js-tiktoken "^1.0.12"
langsmith "^0.1.56"
mustache "^4.2.0"
p-queue "^6.6.2"
p-retry "4"
uuid "^10.0.0"
zod "^3.22.4"
zod-to-json-schema "^3.22.3"
"@langchain/langgraph-checkpoint@~0.0.6":
version "0.0.6"
resolved "https://registry.yarnpkg.com/@langchain/langgraph-checkpoint/-/langgraph-checkpoint-0.0.6.tgz#69f0c5c9aeefd48dcf0fa1ffa0744d8139a9f27d"
integrity sha512-hQsznlUMFKyOCaN9VtqNSSemfKATujNy5ePM6NX7lruk/Mmi2t7R9SsBnf9G2Yts+IaIwv3vJJaAFYEHfqbc5g==
dependencies:
uuid "^10.0.0"
"@langchain/langgraph@^0.2.5":
version "0.2.5"
resolved "https://registry.yarnpkg.com/@langchain/langgraph/-/langgraph-0.2.5.tgz#c42743a59adef03f2e1fea0c198a01694ae34d51"
integrity sha512-H4OgZyGRWZHBaiXXIb9avyB8zI6+3OewKn+UOZ+wUzYLKyF3cnq0cNF4/Ps+gxCa5RtOnsHIqQyRkojfXIOqgA==
dependencies:
"@langchain/langgraph-checkpoint" "~0.0.6"
double-ended-queue "^2.1.0-0"
uuid "^10.0.0"
zod "^3.23.8"
"@nodelib/fs.scandir@2.1.5":
version "2.1.5"
resolved "https://registry.yarnpkg.com/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz#7619c2eb21b25483f6d167548b4cfd5a7488c3d5"
integrity sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==
dependencies:
"@nodelib/fs.stat" "2.0.5"
run-parallel "^1.1.9"
"@nodelib/fs.stat@2.0.5", "@nodelib/fs.stat@^2.0.2":
version "2.0.5"
resolved "https://registry.yarnpkg.com/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz#5bd262af94e9d25bd1e71b05deed44876a222e8b"
integrity sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==
"@nodelib/fs.walk@^1.2.3", "@nodelib/fs.walk@^1.2.8":
version "1.2.8"
resolved "https://registry.yarnpkg.com/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz#e95737e8bb6746ddedf69c556953494f196fe69a"
integrity sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==
dependencies:
"@nodelib/fs.scandir" "2.1.5"
fastq "^1.6.0"
"@sinclair/typebox@^0.27.8":
version "0.27.8"
resolved "https://registry.yarnpkg.com/@sinclair/typebox/-/typebox-0.27.8.tgz#6667fac16c436b5434a387a34dedb013198f6e6e"
integrity sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==
"@sinonjs/commons@^3.0.0":
version "3.0.1"
resolved "https://registry.yarnpkg.com/@sinonjs/commons/-/commons-3.0.1.tgz#1029357e44ca901a615585f6d27738dbc89084cd"
integrity sha512-K3mCHKQ9sVh8o1C9cxkwxaOmXoAMlDxC1mYyHrjqOWEcBjYr76t96zL2zlj5dUGZ3HSw240X1qgH3Mjf1yJWpQ==
dependencies:
type-detect "4.0.8"
"@sinonjs/fake-timers@^10.0.2":
version "10.3.0"
resolved "https://registry.yarnpkg.com/@sinonjs/fake-timers/-/fake-timers-10.3.0.tgz#55fdff1ecab9f354019129daf4df0dd4d923ea66"
integrity sha512-V4BG07kuYSUkTCSBHG8G8TNhM+F19jXFWnQtzj+we8DrkpSBCee9Z3Ms8yiGer/dlmhe35/Xdgyo3/0rQKg7YA==
dependencies:
"@sinonjs/commons" "^3.0.0"
"@tsconfig/recommended@^1.0.7":
version "1.0.7"
resolved "https://registry.yarnpkg.com/@tsconfig/recommended/-/recommended-1.0.7.tgz#fdd95fc2c8d643c8b4a8ca45fd68eea248512407"
integrity sha512-xiNMgCuoy4mCL4JTywk9XFs5xpRUcKxtWEcMR6FNMtsgewYTIgIR+nvlP4A4iRCAzRsHMnPhvTRrzp4AGcRTEA==
"@types/babel__core@^7.1.14":
version "7.20.5"
resolved "https://registry.yarnpkg.com/@types/babel__core/-/babel__core-7.20.5.tgz#3df15f27ba85319caa07ba08d0721889bb39c017"
integrity sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==
dependencies:
"@babel/parser" "^7.20.7"
"@babel/types" "^7.20.7"
"@types/babel__generator" "*"
"@types/babel__template" "*"
"@types/babel__traverse" "*"
"@types/babel__generator@*":
version "7.6.8"
resolved "https://registry.yarnpkg.com/@types/babel__generator/-/babel__generator-7.6.8.tgz#f836c61f48b1346e7d2b0d93c6dacc5b9535d3ab"
integrity sha512-ASsj+tpEDsEiFr1arWrlN6V3mdfjRMZt6LtK/Vp/kreFLnr5QH5+DhvD5nINYZXzwJvXeGq+05iUXcAzVrqWtw==
dependencies:
"@babel/types" "^7.0.0"
"@types/babel__template@*":
version "7.4.4"
resolved "https://registry.yarnpkg.com/@types/babel__template/-/babel__template-7.4.4.tgz#5672513701c1b2199bc6dad636a9d7491586766f"
integrity sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A==
dependencies:
"@babel/parser" "^7.1.0"
"@babel/types" "^7.0.0"
"@types/babel__traverse@*", "@types/babel__traverse@^7.0.6":
version "7.20.6"
resolved "https://registry.yarnpkg.com/@types/babel__traverse/-/babel__traverse-7.20.6.tgz#8dc9f0ae0f202c08d8d4dab648912c8d6038e3f7"
integrity sha512-r1bzfrm0tomOI8g1SzvCaQHo6Lcv6zu0EA+W2kHrt8dyrHQxGzBBL4kdkzIS+jBMV+EYcMAEAqXqYaLJq5rOZg==
dependencies:
"@babel/types" "^7.20.7"
"@types/graceful-fs@^4.1.3":
version "4.1.9"
resolved "https://registry.yarnpkg.com/@types/graceful-fs/-/graceful-fs-4.1.9.tgz#2a06bc0f68a20ab37b3e36aa238be6abdf49e8b4"
integrity sha512-olP3sd1qOEe5dXTSaFvQG+02VdRXcdytWLAZsAq1PecU8uqQAhkrnbli7DagjtXKW/Bl7YJbUsa8MPcuc8LHEQ==
dependencies:
"@types/node" "*"
"@types/istanbul-lib-coverage@*", "@types/istanbul-lib-coverage@^2.0.0", "@types/istanbul-lib-coverage@^2.0.1":
version "2.0.6"
resolved "https://registry.yarnpkg.com/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.6.tgz#7739c232a1fee9b4d3ce8985f314c0c6d33549d7"
integrity sha512-2QF/t/auWm0lsy8XtKVPG19v3sSOQlJe/YHZgfjb/KBBHOGSV+J2q/S671rcq9uTBrLAXmZpqJiaQbMT+zNU1w==
"@types/istanbul-lib-report@*":
version "3.0.3"
resolved "https://registry.yarnpkg.com/@types/istanbul-lib-report/-/istanbul-lib-report-3.0.3.tgz#53047614ae72e19fc0401d872de3ae2b4ce350bf"
integrity sha512-NQn7AHQnk/RSLOxrBbGyJM/aVQ+pjj5HCgasFxc0K/KhoATfQ/47AyUl15I2yBUpihjmas+a+VJBOqecrFH+uA==
dependencies:
"@types/istanbul-lib-coverage" "*"
"@types/istanbul-reports@^3.0.0":
version "3.0.4"
resolved "https://registry.yarnpkg.com/@types/istanbul-reports/-/istanbul-reports-3.0.4.tgz#0f03e3d2f670fbdac586e34b433783070cc16f54"
integrity sha512-pk2B1NWalF9toCRu6gjBzR69syFjP4Od8WRAX+0mmf9lAjCRicLOWc+ZrxZHx/0XRjotgkF9t6iaMJ+aXcOdZQ==
dependencies:
"@types/istanbul-lib-report" "*"
"@types/jest@^29.5.0":
version "29.5.12"
resolved "https://registry.yarnpkg.com/@types/jest/-/jest-29.5.12.tgz#7f7dc6eb4cf246d2474ed78744b05d06ce025544"
integrity sha512-eDC8bTvT/QhYdxJAulQikueigY5AsdBRH2yDKW3yveW7svY3+DzN84/2NUgkw10RTiJbWqZrTtoGVdYlvFJdLw==
dependencies:
expect "^29.0.0"
pretty-format "^29.0.0"
"@types/json-schema@^7.0.9":
version "7.0.15"
resolved "https://registry.yarnpkg.com/@types/json-schema/-/json-schema-7.0.15.tgz#596a1747233694d50f6ad8a7869fcb6f56cf5841"
integrity sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==
"@types/json5@^0.0.29":
version "0.0.29"
resolved "https://registry.yarnpkg.com/@types/json5/-/json5-0.0.29.tgz#ee28707ae94e11d2b827bcbe5270bcea7f3e71ee"
integrity sha512-dRLjCWHYg4oaA77cxO64oO+7JwCwnIzkZPdrrC71jQmQtlhM556pwKo5bUzqvZndkVbeFLIIi+9TC40JNF5hNQ==
"@types/node@*":
version "22.4.1"
resolved "https://registry.yarnpkg.com/@types/node/-/node-22.4.1.tgz#9b595d292c65b94c20923159e2ce947731b6fdce"
integrity sha512-1tbpb9325+gPnKK0dMm+/LMriX0vKxf6RnB0SZUqfyVkQ4fMgUSySqhxE/y8Jvs4NyF1yHzTfG9KlnkIODxPKg==
dependencies:
undici-types "~6.19.2"
"@types/retry@0.12.0":
version "0.12.0"
resolved "https://registry.yarnpkg.com/@types/retry/-/retry-0.12.0.tgz#2b35eccfcee7d38cd72ad99232fbd58bffb3c84d"
integrity sha512-wWKOClTTiizcZhXnPY4wikVAwmdYHp8q6DmC+EJUzAMsycb7HB32Kh9RN4+0gExjmPmZSAQjgURXIGATPegAvA==
"@types/semver@^7.3.12":
version "7.5.8"
resolved "https://registry.yarnpkg.com/@types/semver/-/semver-7.5.8.tgz#8268a8c57a3e4abd25c165ecd36237db7948a55e"
integrity sha512-I8EUhyrgfLrcTkzV3TSsGyl1tSuPrEDzr0yd5m90UgNxQkyDXULk3b6MlQqTCpZpNtWe1K0hzclnZkTcLBe2UQ==
"@types/stack-utils@^2.0.0":
version "2.0.3"
resolved "https://registry.yarnpkg.com/@types/stack-utils/-/stack-utils-2.0.3.tgz#6209321eb2c1712a7e7466422b8cb1fc0d9dd5d8"
integrity sha512-9aEbYZ3TbYMznPdcdr3SmIrLXwC/AKZXQeCf9Pgao5CKb8CyHuEX5jzWPTkvregvhRJHcpRO6BFoGW9ycaOkYw==
"@types/uuid@^10.0.0":
version "10.0.0"
resolved "https://registry.yarnpkg.com/@types/uuid/-/uuid-10.0.0.tgz#e9c07fe50da0f53dc24970cca94d619ff03f6f6d"
integrity sha512-7gqG38EyHgyP1S+7+xomFtL+ZNHcKv6DwNaCZmJmo1vgMugyF3TCnXVg4t1uk89mLNwnLtnY3TpOpCOyp1/xHQ==
"@types/yargs-parser@*":
version "21.0.3"
resolved "https://registry.yarnpkg.com/@types/yargs-parser/-/yargs-parser-21.0.3.tgz#815e30b786d2e8f0dcd85fd5bcf5e1a04d008f15"
integrity sha512-I4q9QU9MQv4oEOz4tAHJtNz1cwuLxn2F3xcc2iV5WdqLPpUnj30aUuxt1mAxYTG+oe8CZMV/+6rU4S4gRDzqtQ==
"@types/yargs@^17.0.8":
version "17.0.33"
resolved "https://registry.yarnpkg.com/@types/yargs/-/yargs-17.0.33.tgz#8c32303da83eec050a84b3c7ae7b9f922d13e32d"
integrity sha512-WpxBCKWPLr4xSsHgz511rFJAM+wS28w2zEO1QDNY5zM/S8ok70NNfztH0xwhqKyaK0OHCbN98LDAZuy1ctxDkA==
dependencies:
"@types/yargs-parser" "*"
"@typescript-eslint/eslint-plugin@^5.59.8":
version "5.62.0"
resolved "https://registry.yarnpkg.com/@typescript-eslint/eslint-plugin/-/eslint-plugin-5.62.0.tgz#aeef0328d172b9e37d9bab6dbc13b87ed88977db"
integrity sha512-TiZzBSJja/LbhNPvk6yc0JrX9XqhQ0hdh6M2svYfsHGejaKFIAGd9MQ+ERIMzLGlN/kZoYIgdxFV0PuljTKXag==
dependencies:
"@eslint-community/regexpp" "^4.4.0"
"@typescript-eslint/scope-manager" "5.62.0"
"@typescript-eslint/type-utils" "5.62.0"
"@typescript-eslint/utils" "5.62.0"
debug "^4.3.4"
graphemer "^1.4.0"
ignore "^5.2.0"
natural-compare-lite "^1.4.0"
semver "^7.3.7"
tsutils "^3.21.0"
"@typescript-eslint/parser@^5.59.8":
version "5.62.0"
resolved "https://registry.yarnpkg.com/@typescript-eslint/parser/-/parser-5.62.0.tgz#1b63d082d849a2fcae8a569248fbe2ee1b8a56c7"
integrity sha512-VlJEV0fOQ7BExOsHYAGrgbEiZoi8D+Bl2+f6V2RrXerRSylnp+ZBHmPvaIa8cz0Ajx7WO7Z5RqfgYg7ED1nRhA==
dependencies:
"@typescript-eslint/scope-manager" "5.62.0"
"@typescript-eslint/types" "5.62.0"
"@typescript-eslint/typescript-estree" "5.62.0"
debug "^4.3.4"
"@typescript-eslint/scope-manager@5.62.0":
version "5.62.0"
resolved "https://registry.yarnpkg.com/@typescript-eslint/scope-manager/-/scope-manager-5.62.0.tgz#d9457ccc6a0b8d6b37d0eb252a23022478c5460c"
integrity sha512-VXuvVvZeQCQb5Zgf4HAxc04q5j+WrNAtNh9OwCsCgpKqESMTu3tF/jhZ3xG6T4NZwWl65Bg8KuS2uEvhSfLl0w==
dependencies:
"@typescript-eslint/types" "5.62.0"
"@typescript-eslint/visitor-keys" "5.62.0"
"@typescript-eslint/type-utils@5.62.0":
version "5.62.0"
resolved "https://registry.yarnpkg.com/@typescript-eslint/type-utils/-/type-utils-5.62.0.tgz#286f0389c41681376cdad96b309cedd17d70346a"
integrity sha512-xsSQreu+VnfbqQpW5vnCJdq1Z3Q0U31qiWmRhr98ONQmcp/yhiPJFPq8MXiJVLiksmOKSjIldZzkebzHuCGzew==
dependencies:
"@typescript-eslint/typescript-estree" "5.62.0"
"@typescript-eslint/utils" "5.62.0"
debug "^4.3.4"
tsutils "^3.21.0"
"@typescript-eslint/types@5.62.0":
version "5.62.0"
resolved "https://registry.yarnpkg.com/@typescript-eslint/types/-/types-5.62.0.tgz#258607e60effa309f067608931c3df6fed41fd2f"
integrity sha512-87NVngcbVXUahrRTqIK27gD2t5Cu1yuCXxbLcFtCzZGlfyVWWh8mLHkoxzjsB6DDNnvdL+fW8MiwPEJyGJQDgQ==
"@typescript-eslint/typescript-estree@5.62.0":
version "5.62.0"
resolved "https://registry.yarnpkg.com/@typescript-eslint/typescript-estree/-/typescript-estree-5.62.0.tgz#7d17794b77fabcac615d6a48fb143330d962eb9b"
integrity sha512-CmcQ6uY7b9y694lKdRB8FEel7JbU/40iSAPomu++SjLMntB+2Leay2LO6i8VnJk58MtE9/nQSFIH6jpyRWyYzA==
dependencies:
"@typescript-eslint/types" "5.62.0"
"@typescript-eslint/visitor-keys" "5.62.0"
debug "^4.3.4"
globby "^11.1.0"
is-glob "^4.0.3"
semver "^7.3.7"
tsutils "^3.21.0"
"@typescript-eslint/utils@5.62.0":
version "5.62.0"
resolved "https://registry.yarnpkg.com/@typescript-eslint/utils/-/utils-5.62.0.tgz#141e809c71636e4a75daa39faed2fb5f4b10df86"
integrity sha512-n8oxjeb5aIbPFEtmQxQYOLI0i9n5ySBEY/ZEHHZqKQSFnxio1rv6dthascc9dLuwrL0RC5mPCxB7vnAVGAYWAQ==
dependencies:
"@eslint-community/eslint-utils" "^4.2.0"
"@types/json-schema" "^7.0.9"
"@types/semver" "^7.3.12"
"@typescript-eslint/scope-manager" "5.62.0"
"@typescript-eslint/types" "5.62.0"
"@typescript-eslint/typescript-estree" "5.62.0"
eslint-scope "^5.1.1"
semver "^7.3.7"
"@typescript-eslint/visitor-keys@5.62.0":
version "5.62.0"
resolved "https://registry.yarnpkg.com/@typescript-eslint/visitor-keys/-/visitor-keys-5.62.0.tgz#2174011917ce582875954ffe2f6912d5931e353e"
integrity sha512-07ny+LHRzQXepkGg6w0mFY41fVUNBrL2Roj/++7V1txKugfjm/Ci/qSND03r2RhlJhJYMcTn9AhhSSqQp0Ysyw==
dependencies:
"@typescript-eslint/types" "5.62.0"
eslint-visitor-keys "^3.3.0"
"@ungap/structured-clone@^1.2.0":
version "1.2.0"
resolved "https://registry.yarnpkg.com/@ungap/structured-clone/-/structured-clone-1.2.0.tgz#756641adb587851b5ccb3e095daf27ae581c8406"
integrity sha512-zuVdFrMJiuCDQUMCzQaD6KL28MjnqqN8XnAqiEq9PNm/hCPTSGfrXCOfwj1ow4LFb/tNymJPwsNbVePc1xFqrQ==
acorn-jsx@^5.3.2:
version "5.3.2"
resolved "https://registry.yarnpkg.com/acorn-jsx/-/acorn-jsx-5.3.2.tgz#7ed5bb55908b3b2f1bc55c6af1653bada7f07937"
integrity sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==
acorn@^8.12.0, acorn@^8.9.0:
version "8.12.1"
resolved "https://registry.yarnpkg.com/acorn/-/acorn-8.12.1.tgz#71616bdccbe25e27a54439e0046e89ca76df2248"
integrity sha512-tcpGyI9zbizT9JbV6oYE477V6mTlXvvi0T0G3SNIYE2apm/G5huBa1+K89VGeovbg+jycCrfhl3ADxErOuO6Jg==
ajv@^6.12.4:
version "6.12.6"
resolved "https://registry.yarnpkg.com/ajv/-/ajv-6.12.6.tgz#baf5a62e802b07d977034586f8c3baf5adf26df4"
integrity sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==
dependencies:
fast-deep-equal "^3.1.1"
fast-json-stable-stringify "^2.0.0"
json-schema-traverse "^0.4.1"
uri-js "^4.2.2"
ansi-escapes@^4.2.1:
version "4.3.2"
resolved "https://registry.yarnpkg.com/ansi-escapes/-/ansi-escapes-4.3.2.tgz#6b2291d1db7d98b6521d5f1efa42d0f3a9feb65e"
integrity sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ==
dependencies:
type-fest "^0.21.3"
ansi-regex@^5.0.1:
version "5.0.1"
resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-5.0.1.tgz#082cb2c89c9fe8659a311a53bd6a4dc5301db304"
integrity sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==
ansi-styles@^3.2.1:
version "3.2.1"
resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-3.2.1.tgz#41fbb20243e50b12be0f04b8dedbf07520ce841d"
integrity sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==
dependencies:
color-convert "^1.9.0"
ansi-styles@^4.0.0, ansi-styles@^4.1.0:
version "4.3.0"
resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-4.3.0.tgz#edd803628ae71c04c85ae7a0906edad34b648937"
integrity sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==
dependencies:
color-convert "^2.0.1"
ansi-styles@^5.0.0:
version "5.2.0"
resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-5.2.0.tgz#07449690ad45777d1924ac2abb2fc8895dba836b"
integrity sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==
anymatch@^3.0.3:
version "3.1.3"
resolved "https://registry.yarnpkg.com/anymatch/-/anymatch-3.1.3.tgz#790c58b19ba1720a84205b57c618d5ad8524973e"
integrity sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==
dependencies:
normalize-path "^3.0.0"
picomatch "^2.0.4"
argparse@^1.0.7:
version "1.0.10"
resolved "https://registry.yarnpkg.com/argparse/-/argparse-1.0.10.tgz#bcd6791ea5ae09725e17e5ad988134cd40b3d911"
integrity sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==
dependencies:
sprintf-js "~1.0.2"
argparse@^2.0.1:
version "2.0.1"
resolved "https://registry.yarnpkg.com/argparse/-/argparse-2.0.1.tgz#246f50f3ca78a3240f6c997e8a9bd1eac49e4b38"
integrity sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==
array-buffer-byte-length@^1.0.1:
version "1.0.1"
resolved "https://registry.yarnpkg.com/array-buffer-byte-length/-/array-buffer-byte-length-1.0.1.tgz#1e5583ec16763540a27ae52eed99ff899223568f"
integrity sha512-ahC5W1xgou+KTXix4sAO8Ki12Q+jf4i0+tmk3sC+zgcynshkHxzpXdImBehiUYKKKDwvfFiJl1tZt6ewscS1Mg==
dependencies:
call-bind "^1.0.5"
is-array-buffer "^3.0.4"
array-includes@^3.1.7:
version "3.1.8"
resolved "https://registry.yarnpkg.com/array-includes/-/array-includes-3.1.8.tgz#5e370cbe172fdd5dd6530c1d4aadda25281ba97d"
integrity sha512-itaWrbYbqpGXkGhZPGUulwnhVf5Hpy1xiCFsGqyIGglbBxmG5vSjxQen3/WGOjPpNEv1RtBLKxbmVXm8HpJStQ==
dependencies:
call-bind "^1.0.7"
define-properties "^1.2.1"
es-abstract "^1.23.2"
es-object-atoms "^1.0.0"
get-intrinsic "^1.2.4"
is-string "^1.0.7"
array-union@^2.1.0:
version "2.1.0"
resolved "https://registry.yarnpkg.com/array-union/-/array-union-2.1.0.tgz#b798420adbeb1de828d84acd8a2e23d3efe85e8d"
integrity sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==
array.prototype.findlastindex@^1.2.3:
version "1.2.5"
resolved "https://registry.yarnpkg.com/array.prototype.findlastindex/-/array.prototype.findlastindex-1.2.5.tgz#8c35a755c72908719453f87145ca011e39334d0d"
integrity sha512-zfETvRFA8o7EiNn++N5f/kaCw221hrpGsDmcpndVupkPzEc1Wuf3VgC0qby1BbHs7f5DVYjgtEU2LLh5bqeGfQ==
dependencies:
call-bind "^1.0.7"
define-properties "^1.2.1"
es-abstract "^1.23.2"
es-errors "^1.3.0"
es-object-atoms "^1.0.0"
es-shim-unscopables "^1.0.2"
array.prototype.flat@^1.3.2:
version "1.3.2"
resolved "https://registry.yarnpkg.com/array.prototype.flat/-/array.prototype.flat-1.3.2.tgz#1476217df8cff17d72ee8f3ba06738db5b387d18"
integrity sha512-djYB+Zx2vLewY8RWlNCUdHjDXs2XOgm602S9E7P/UpHgfeHL00cRiIF+IN/G/aUJ7kGPb6yO/ErDI5V2s8iycA==
dependencies:
call-bind "^1.0.2"
define-properties "^1.2.0"
es-abstract "^1.22.1"
es-shim-unscopables "^1.0.0"
array.prototype.flatmap@^1.3.2:
version "1.3.2"
resolved "https://registry.yarnpkg.com/array.prototype.flatmap/-/array.prototype.flatmap-1.3.2.tgz#c9a7c6831db8e719d6ce639190146c24bbd3e527"
integrity sha512-Ewyx0c9PmpcsByhSW4r+9zDU7sGjFc86qf/kKtuSCRdhfbk0SNLLkaT5qvcHnRGgc5NP/ly/y+qkXkqONX54CQ==
dependencies:
call-bind "^1.0.2"
define-properties "^1.2.0"
es-abstract "^1.22.1"
es-shim-unscopables "^1.0.0"
arraybuffer.prototype.slice@^1.0.3:
version "1.0.3"
resolved "https://registry.yarnpkg.com/arraybuffer.prototype.slice/-/arraybuffer.prototype.slice-1.0.3.tgz#097972f4255e41bc3425e37dc3f6421cf9aefde6"
integrity sha512-bMxMKAjg13EBSVscxTaYA4mRc5t1UAXa2kXiGTNfZ079HIWXEkKmkgFrh/nJqamaLSrXO5H4WFFkPEaLJWbs3A==
dependencies:
array-buffer-byte-length "^1.0.1"
call-bind "^1.0.5"
define-properties "^1.2.1"
es-abstract "^1.22.3"
es-errors "^1.2.1"
get-intrinsic "^1.2.3"
is-array-buffer "^3.0.4"
is-shared-array-buffer "^1.0.2"
async@^3.2.3:
version "3.2.6"
resolved "https://registry.yarnpkg.com/async/-/async-3.2.6.tgz#1b0728e14929d51b85b449b7f06e27c1145e38ce"
integrity sha512-htCUDlxyyCLMgaM3xXg0C0LW2xqfuQ6p05pCEIsXuyQ+a1koYKTuBMzRNwmybfLgvJDMd0r1LTn4+E0Ti6C2AA==
available-typed-arrays@^1.0.7:
version "1.0.7"
resolved "https://registry.yarnpkg.com/available-typed-arrays/-/available-typed-arrays-1.0.7.tgz#a5cc375d6a03c2efc87a553f3e0b1522def14846"
integrity sha512-wvUjBtSGN7+7SjNpq/9M2Tg350UZD3q62IFZLbRAR1bSMlCo1ZaeW+BJ+D090e4hIIZLBcTDWe4Mh4jvUDajzQ==
dependencies:
possible-typed-array-names "^1.0.0"
babel-jest@^29.7.0:
version "29.7.0"
resolved "https://registry.yarnpkg.com/babel-jest/-/babel-jest-29.7.0.tgz#f4369919225b684c56085998ac63dbd05be020d5"
integrity sha512-BrvGY3xZSwEcCzKvKsCi2GgHqDqsYkOP4/by5xCgIwGXQxIEh+8ew3gmrE1y7XRR6LHZIj6yLYnUi/mm2KXKBg==
dependencies:
"@jest/transform" "^29.7.0"
"@types/babel__core" "^7.1.14"
babel-plugin-istanbul "^6.1.1"
babel-preset-jest "^29.6.3"
chalk "^4.0.0"
graceful-fs "^4.2.9"
slash "^3.0.0"
babel-plugin-istanbul@^6.1.1:
version "6.1.1"
resolved "https://registry.yarnpkg.com/babel-plugin-istanbul/-/babel-plugin-istanbul-6.1.1.tgz#fa88ec59232fd9b4e36dbbc540a8ec9a9b47da73"
integrity sha512-Y1IQok9821cC9onCx5otgFfRm7Lm+I+wwxOx738M/WLPZ9Q42m4IG5W0FNX8WLL2gYMZo3JkuXIH2DOpWM+qwA==
dependencies:
"@babel/helper-plugin-utils" "^7.0.0"
"@istanbuljs/load-nyc-config" "^1.0.0"
"@istanbuljs/schema" "^0.1.2"
istanbul-lib-instrument "^5.0.4"
test-exclude "^6.0.0"
babel-plugin-jest-hoist@^29.6.3:
version "29.6.3"
resolved "https://registry.yarnpkg.com/babel-plugin-jest-hoist/-/babel-plugin-jest-hoist-29.6.3.tgz#aadbe943464182a8922c3c927c3067ff40d24626"
integrity sha512-ESAc/RJvGTFEzRwOTT4+lNDk/GNHMkKbNzsvT0qKRfDyyYTskxB5rnU2njIDYVxXCBHHEI1c0YwHob3WaYujOg==
dependencies:
"@babel/template" "^7.3.3"
"@babel/types" "^7.3.3"
"@types/babel__core" "^7.1.14"
"@types/babel__traverse" "^7.0.6"
babel-preset-current-node-syntax@^1.0.0:
version "1.1.0"
resolved "https://registry.yarnpkg.com/babel-preset-current-node-syntax/-/babel-preset-current-node-syntax-1.1.0.tgz#9a929eafece419612ef4ae4f60b1862ebad8ef30"
integrity sha512-ldYss8SbBlWva1bs28q78Ju5Zq1F+8BrqBZZ0VFhLBvhh6lCpC2o3gDJi/5DRLs9FgYZCnmPYIVFU4lRXCkyUw==
dependencies:
"@babel/plugin-syntax-async-generators" "^7.8.4"
"@babel/plugin-syntax-bigint" "^7.8.3"
"@babel/plugin-syntax-class-properties" "^7.12.13"
"@babel/plugin-syntax-class-static-block" "^7.14.5"
"@babel/plugin-syntax-import-attributes" "^7.24.7"
"@babel/plugin-syntax-import-meta" "^7.10.4"
"@babel/plugin-syntax-json-strings" "^7.8.3"
"@babel/plugin-syntax-logical-assignment-operators" "^7.10.4"
"@babel/plugin-syntax-nullish-coalescing-operator" "^7.8.3"
"@babel/plugin-syntax-numeric-separator" "^7.10.4"
"@babel/plugin-syntax-object-rest-spread" "^7.8.3"
"@babel/plugin-syntax-optional-catch-binding" "^7.8.3"
"@babel/plugin-syntax-optional-chaining" "^7.8.3"
"@babel/plugin-syntax-private-property-in-object" "^7.14.5"
"@babel/plugin-syntax-top-level-await" "^7.14.5"
babel-preset-jest@^29.6.3:
version "29.6.3"
resolved "https://registry.yarnpkg.com/babel-preset-jest/-/babel-preset-jest-29.6.3.tgz#fa05fa510e7d493896d7b0dd2033601c840f171c"
integrity sha512-0B3bhxR6snWXJZtR/RliHTDPRgn1sNHOR0yVtq/IiQFyuOVjFS+wuio/R4gSNkyYmKmJB4wGZv2NZanmKmTnNA==
dependencies:
babel-plugin-jest-hoist "^29.6.3"
babel-preset-current-node-syntax "^1.0.0"
balanced-match@^1.0.0:
version "1.0.2"
resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-1.0.2.tgz#e83e3a7e3f300b34cb9d87f615fa0cbf357690ee"
integrity sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==
base64-js@^1.5.1:
version "1.5.1"
resolved "https://registry.yarnpkg.com/base64-js/-/base64-js-1.5.1.tgz#1b1b440160a5bf7ad40b650f095963481903930a"
integrity sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==
brace-expansion@^1.1.7:
version "1.1.11"
resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-1.1.11.tgz#3c7fcbf529d87226f3d2f52b966ff5271eb441dd"
integrity sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==
dependencies:
balanced-match "^1.0.0"
concat-map "0.0.1"
brace-expansion@^2.0.1:
version "2.0.1"
resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-2.0.1.tgz#1edc459e0f0c548486ecf9fc99f2221364b9a0ae"
integrity sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==
dependencies:
balanced-match "^1.0.0"
braces@^3.0.3:
version "3.0.3"
resolved "https://registry.yarnpkg.com/braces/-/braces-3.0.3.tgz#490332f40919452272d55a8480adc0c441358789"
integrity sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==
dependencies:
fill-range "^7.1.1"
browserslist@^4.23.1:
version "4.23.3"
resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-4.23.3.tgz#debb029d3c93ebc97ffbc8d9cbb03403e227c800"
integrity sha512-btwCFJVjI4YWDNfau8RhZ+B1Q/VLoUITrm3RlP6y1tYGWIOa+InuYiRGXUBXo8nA1qKmHMyLB/iVQg5TT4eFoA==
dependencies:
caniuse-lite "^1.0.30001646"
electron-to-chromium "^1.5.4"
node-releases "^2.0.18"
update-browserslist-db "^1.1.0"
bs-logger@0.x:
version "0.2.6"
resolved "https://registry.yarnpkg.com/bs-logger/-/bs-logger-0.2.6.tgz#eb7d365307a72cf974cc6cda76b68354ad336bd8"
integrity sha512-pd8DCoxmbgc7hyPKOvxtqNcjYoOsABPQdcCUjGp3d42VR2CX1ORhk2A87oqqu5R1kk+76nsxZupkmyd+MVtCog==
dependencies:
fast-json-stable-stringify "2.x"
bser@2.1.1:
version "2.1.1"
resolved "https://registry.yarnpkg.com/bser/-/bser-2.1.1.tgz#e6787da20ece9d07998533cfd9de6f5c38f4bc05"
integrity sha512-gQxTNE/GAfIIrmHLUE3oJyp5FO6HRBfhjnw4/wMmA63ZGDJnWBmgY/lyQBpnDUkGmAhbSe39tx2d/iTOAfglwQ==
dependencies:
node-int64 "^0.4.0"
buffer-from@^1.0.0:
version "1.1.2"
resolved "https://registry.yarnpkg.com/buffer-from/-/buffer-from-1.1.2.tgz#2b146a6fd72e80b4f55d255f35ed59a3a9a41bd5"
integrity sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==
call-bind@^1.0.2, call-bind@^1.0.5, call-bind@^1.0.6, call-bind@^1.0.7:
version "1.0.7"
resolved "https://registry.yarnpkg.com/call-bind/-/call-bind-1.0.7.tgz#06016599c40c56498c18769d2730be242b6fa3b9"
integrity sha512-GHTSNSYICQ7scH7sZ+M2rFopRoLh8t2bLSW6BbgrtLsahOIB5iyAVJf9GjWK3cYTDaMj4XdBpM1cA6pIS0Kv2w==
dependencies:
es-define-property "^1.0.0"
es-errors "^1.3.0"
function-bind "^1.1.2"
get-intrinsic "^1.2.4"
set-function-length "^1.2.1"
callsites@^3.0.0:
version "3.1.0"
resolved "https://registry.yarnpkg.com/callsites/-/callsites-3.1.0.tgz#b3630abd8943432f54b3f0519238e33cd7df2f73"
integrity sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==
camelcase@6, camelcase@^6.2.0:
version "6.3.0"
resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-6.3.0.tgz#5685b95eb209ac9c0c177467778c9c84df58ba9a"
integrity sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==
camelcase@^5.3.1:
version "5.3.1"
resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-5.3.1.tgz#e3c9b31569e106811df242f715725a1f4c494320"
integrity sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==
caniuse-lite@^1.0.30001646:
version "1.0.30001651"
resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001651.tgz#52de59529e8b02b1aedcaaf5c05d9e23c0c28138"
integrity sha512-9Cf+Xv1jJNe1xPZLGuUXLNkE1BoDkqRqYyFJ9TDYSqhduqA4hu4oR9HluGoWYQC/aj8WHjsGVV+bwkh0+tegRg==
chalk@^2.4.2:
version "2.4.2"
resolved "https://registry.yarnpkg.com/chalk/-/chalk-2.4.2.tgz#cd42541677a54333cf541a49108c1432b44c9424"
integrity sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==
dependencies:
ansi-styles "^3.2.1"
escape-string-regexp "^1.0.5"
supports-color "^5.3.0"
chalk@^4.0.0, chalk@^4.0.2:
version "4.1.2"
resolved "https://registry.yarnpkg.com/chalk/-/chalk-4.1.2.tgz#aac4e2b7734a740867aeb16bf02aad556a1e7a01"
integrity sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==
dependencies:
ansi-styles "^4.1.0"
supports-color "^7.1.0"
char-regex@^1.0.2:
version "1.0.2"
resolved "https://registry.yarnpkg.com/char-regex/-/char-regex-1.0.2.tgz#d744358226217f981ed58f479b1d6bcc29545dcf"
integrity sha512-kWWXztvZ5SBQV+eRgKFeh8q5sLuZY2+8WUIzlxWVTg+oGwY14qylx1KbKzHd8P6ZYkAg0xyIDU9JMHhyJMZ1jw==
ci-info@^3.2.0:
version "3.9.0"
resolved "https://registry.yarnpkg.com/ci-info/-/ci-info-3.9.0.tgz#4279a62028a7b1f262f3473fc9605f5e218c59b4"
integrity sha512-NIxF55hv4nSqQswkAeiOi1r83xy8JldOFDTWiug55KBu9Jnblncd2U6ViHmYgHf01TPZS77NJBhBMKdWj9HQMQ==
cjs-module-lexer@^1.0.0:
version "1.3.1"
resolved "https://registry.yarnpkg.com/cjs-module-lexer/-/cjs-module-lexer-1.3.1.tgz#c485341ae8fd999ca4ee5af2d7a1c9ae01e0099c"
integrity sha512-a3KdPAANPbNE4ZUv9h6LckSl9zLsYOP4MBmhIPkRaeyybt+r4UghLvq+xw/YwUcC1gqylCkL4rdVs3Lwupjm4Q==
cliui@^8.0.1:
version "8.0.1"
resolved "https://registry.yarnpkg.com/cliui/-/cliui-8.0.1.tgz#0c04b075db02cbfe60dc8e6cf2f5486b1a3608aa"
integrity sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==
dependencies:
string-width "^4.2.0"
strip-ansi "^6.0.1"
wrap-ansi "^7.0.0"
co@^4.6.0:
version "4.6.0"
resolved "https://registry.yarnpkg.com/co/-/co-4.6.0.tgz#6ea6bdf3d853ae54ccb8e47bfa0bf3f9031fb184"
integrity sha512-QVb0dM5HvG+uaxitm8wONl7jltx8dqhfU33DcqtOZcLSVIKSDDLDi7+0LbAKiyI8hD9u42m2YxXSkMGWThaecQ==
collect-v8-coverage@^1.0.0:
version "1.0.2"
resolved "https://registry.yarnpkg.com/collect-v8-coverage/-/collect-v8-coverage-1.0.2.tgz#c0b29bcd33bcd0779a1344c2136051e6afd3d9e9"
integrity sha512-lHl4d5/ONEbLlJvaJNtsF/Lz+WvB07u2ycqTYbdrq7UypDXailES4valYb2eWiJFxZlVmpGekfqoxQhzyFdT4Q==
color-convert@^1.9.0:
version "1.9.3"
resolved "https://registry.yarnpkg.com/color-convert/-/color-convert-1.9.3.tgz#bb71850690e1f136567de629d2d5471deda4c1e8"
integrity sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==
dependencies:
color-name "1.1.3"
color-convert@^2.0.1:
version "2.0.1"
resolved "https://registry.yarnpkg.com/color-convert/-/color-convert-2.0.1.tgz#72d3a68d598c9bdb3af2ad1e84f21d896abd4de3"
integrity sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==
dependencies:
color-name "~1.1.4"
color-name@1.1.3:
version "1.1.3"
resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.3.tgz#a7d0558bd89c42f795dd42328f740831ca53bc25"
integrity sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==
color-name@~1.1.4:
version "1.1.4"
resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.4.tgz#c2a09a87acbde69543de6f63fa3995c826c536a2"
integrity sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==
commander@^10.0.1:
version "10.0.1"
resolved "https://registry.yarnpkg.com/commander/-/commander-10.0.1.tgz#881ee46b4f77d1c1dccc5823433aa39b022cbe06"
integrity sha512-y4Mg2tXshplEbSGzx7amzPwKKOCGuoSRP/CjEdwwk0FOGlUbq6lKuoyDZTNZkmxHdJtp54hdfY/JUrdL7Xfdug==
concat-map@0.0.1:
version "0.0.1"
resolved "https://registry.yarnpkg.com/concat-map/-/concat-map-0.0.1.tgz#d8a96bd77fd68df7793a73036a3ba0d5405d477b"
integrity sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==
convert-source-map@^2.0.0:
version "2.0.0"
resolved "https://registry.yarnpkg.com/convert-source-map/-/convert-source-map-2.0.0.tgz#4b560f649fc4e918dd0ab75cf4961e8bc882d82a"
integrity sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==
create-jest@^29.7.0:
version "29.7.0"
resolved "https://registry.yarnpkg.com/create-jest/-/create-jest-29.7.0.tgz#a355c5b3cb1e1af02ba177fe7afd7feee49a5320"
integrity sha512-Adz2bdH0Vq3F53KEMJOoftQFutWCukm6J24wbPWRO4k1kMY7gS7ds/uoJkNuV8wDCtWWnuwGcJwpWcih+zEW1Q==
dependencies:
"@jest/types" "^29.6.3"
chalk "^4.0.0"
exit "^0.1.2"
graceful-fs "^4.2.9"
jest-config "^29.7.0"
jest-util "^29.7.0"
prompts "^2.0.1"
cross-spawn@^7.0.2, cross-spawn@^7.0.3:
version "7.0.6"
resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-7.0.6.tgz#8a58fe78f00dcd70c370451759dfbfaf03e8ee9f"
integrity sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==
dependencies:
path-key "^3.1.0"
shebang-command "^2.0.0"
which "^2.0.1"
data-view-buffer@^1.0.1:
version "1.0.1"
resolved "https://registry.yarnpkg.com/data-view-buffer/-/data-view-buffer-1.0.1.tgz#8ea6326efec17a2e42620696e671d7d5a8bc66b2"
integrity sha512-0lht7OugA5x3iJLOWFhWK/5ehONdprk0ISXqVFn/NFrDu+cuc8iADFrGQz5BnRK7LLU3JmkbXSxaqX+/mXYtUA==
dependencies:
call-bind "^1.0.6"
es-errors "^1.3.0"
is-data-view "^1.0.1"
data-view-byte-length@^1.0.1:
version "1.0.1"
resolved "https://registry.yarnpkg.com/data-view-byte-length/-/data-view-byte-length-1.0.1.tgz#90721ca95ff280677eb793749fce1011347669e2"
integrity sha512-4J7wRJD3ABAzr8wP+OcIcqq2dlUKp4DVflx++hs5h5ZKydWMI6/D/fAot+yh6g2tHh8fLFTvNOaVN357NvSrOQ==
dependencies:
call-bind "^1.0.7"
es-errors "^1.3.0"
is-data-view "^1.0.1"
data-view-byte-offset@^1.0.0:
version "1.0.0"
resolved "https://registry.yarnpkg.com/data-view-byte-offset/-/data-view-byte-offset-1.0.0.tgz#5e0bbfb4828ed2d1b9b400cd8a7d119bca0ff18a"
integrity sha512-t/Ygsytq+R995EJ5PZlD4Cu56sWa8InXySaViRzw9apusqsOO2bQP+SbYzAhR0pFKoB+43lYy8rWban9JSuXnA==
dependencies:
call-bind "^1.0.6"
es-errors "^1.3.0"
is-data-view "^1.0.1"
debug@^3.2.7:
version "3.2.7"
resolved "https://registry.yarnpkg.com/debug/-/debug-3.2.7.tgz#72580b7e9145fb39b6676f9c5e5fb100b934179a"
integrity sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==
dependencies:
ms "^2.1.1"
debug@^4.1.0, debug@^4.1.1, debug@^4.3.1, debug@^4.3.2, debug@^4.3.4:
version "4.3.6"
resolved "https://registry.yarnpkg.com/debug/-/debug-4.3.6.tgz#2ab2c38fbaffebf8aa95fdfe6d88438c7a13c52b"
integrity sha512-O/09Bd4Z1fBrU4VzkhFqVgpPzaGbw6Sm9FEkBT1A/YBXQFGuuSxa1dN2nxgxS34JmKXqYx8CZAwEVoJFImUXIg==
dependencies:
ms "2.1.2"
decamelize@1.2.0:
version "1.2.0"
resolved "https://registry.yarnpkg.com/decamelize/-/decamelize-1.2.0.tgz#f6534d15148269b20352e7bee26f501f9a191290"
integrity sha512-z2S+W9X73hAUUki+N+9Za2lBlun89zigOyGrsax+KUQ6wKW4ZoWpEYBkGhQjwAjjDCkWxhY0VKEhk8wzY7F5cA==
dedent@^1.0.0:
version "1.5.3"
resolved "https://registry.yarnpkg.com/dedent/-/dedent-1.5.3.tgz#99aee19eb9bae55a67327717b6e848d0bf777e5a"
integrity sha512-NHQtfOOW68WD8lgypbLA5oT+Bt0xXJhiYvoR6SmmNXZfpzOGXwdKWmcwG8N7PwVVWV3eF/68nmD9BaJSsTBhyQ==
deep-is@^0.1.3:
version "0.1.4"
resolved "https://registry.yarnpkg.com/deep-is/-/deep-is-0.1.4.tgz#a6f2dce612fadd2ef1f519b73551f17e85199831"
integrity sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==
deepmerge@^4.2.2:
version "4.3.1"
resolved "https://registry.yarnpkg.com/deepmerge/-/deepmerge-4.3.1.tgz#44b5f2147cd3b00d4b56137685966f26fd25dd4a"
integrity sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==
define-data-property@^1.0.1, define-data-property@^1.1.4:
version "1.1.4"
resolved "https://registry.yarnpkg.com/define-data-property/-/define-data-property-1.1.4.tgz#894dc141bb7d3060ae4366f6a0107e68fbe48c5e"
integrity sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==
dependencies:
es-define-property "^1.0.0"
es-errors "^1.3.0"
gopd "^1.0.1"
define-properties@^1.2.0, define-properties@^1.2.1:
version "1.2.1"
resolved "https://registry.yarnpkg.com/define-properties/-/define-properties-1.2.1.tgz#10781cc616eb951a80a034bafcaa7377f6af2b6c"
integrity sha512-8QmQKqEASLd5nx0U1B1okLElbUuuttJ/AnYmRXbbbGDWh6uS208EjD4Xqq/I9wK7u0v6O08XhTWnt5XtEbR6Dg==
dependencies:
define-data-property "^1.0.1"
has-property-descriptors "^1.0.0"
object-keys "^1.1.1"
detect-newline@^3.0.0:
version "3.1.0"
resolved "https://registry.yarnpkg.com/detect-newline/-/detect-newline-3.1.0.tgz#576f5dfc63ae1a192ff192d8ad3af6308991b651"
integrity sha512-TLz+x/vEXm/Y7P7wn1EJFNLxYpUD4TgMosxY6fAVJUnJMbupHBOncxyWUG9OpTaH9EBD7uFI5LfEgmMOc54DsA==
diff-sequences@^29.6.3:
version "29.6.3"
resolved "https://registry.yarnpkg.com/diff-sequences/-/diff-sequences-29.6.3.tgz#4deaf894d11407c51efc8418012f9e70b84ea921"
integrity sha512-EjePK1srD3P08o2j4f0ExnylqRs5B9tJjcp9t1krH2qRi8CCdsYfwe9JgSLurFBWwq4uOlipzfk5fHNvwFKr8Q==
dir-glob@^3.0.1:
version "3.0.1"
resolved "https://registry.yarnpkg.com/dir-glob/-/dir-glob-3.0.1.tgz#56dbf73d992a4a93ba1584f4534063fd2e41717f"
integrity sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==
dependencies:
path-type "^4.0.0"
doctrine@^2.1.0:
version "2.1.0"
resolved "https://registry.yarnpkg.com/doctrine/-/doctrine-2.1.0.tgz#5cd01fc101621b42c4cd7f5d1a66243716d3f39d"
integrity sha512-35mSku4ZXK0vfCuHEDAwt55dg2jNajHZ1odvF+8SSr82EsZY4QmXfuWso8oEd8zRhVObSN18aM0CjSdoBX7zIw==
dependencies:
esutils "^2.0.2"
doctrine@^3.0.0:
version "3.0.0"
resolved "https://registry.yarnpkg.com/doctrine/-/doctrine-3.0.0.tgz#addebead72a6574db783639dc87a121773973961"
integrity sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==
dependencies:
esutils "^2.0.2"
dotenv@^16.4.5:
version "16.4.5"
resolved "https://registry.yarnpkg.com/dotenv/-/dotenv-16.4.5.tgz#cdd3b3b604cb327e286b4762e13502f717cb099f"
integrity sha512-ZmdL2rui+eB2YwhsWzjInR8LldtZHGDoQ1ugH85ppHKwpUHL7j7rN0Ti9NCnGiQbhaZ11FpR+7ao1dNsmduNUg==
double-ended-queue@^2.1.0-0:
version "2.1.0-0"
resolved "https://registry.yarnpkg.com/double-ended-queue/-/double-ended-queue-2.1.0-0.tgz#103d3527fd31528f40188130c841efdd78264e5c"
integrity sha512-+BNfZ+deCo8hMNpDqDnvT+c0XpJ5cUa6mqYq89bho2Ifze4URTqRkcwR399hWoTrTkbZ/XJYDgP6rc7pRgffEQ==
ejs@^3.1.10:
version "3.1.10"
resolved "https://registry.yarnpkg.com/ejs/-/ejs-3.1.10.tgz#69ab8358b14e896f80cc39e62087b88500c3ac3b"
integrity sha512-UeJmFfOrAQS8OJWPZ4qtgHyWExa088/MtK5UEyoJGFH67cDEXkZSviOiKRCZ4Xij0zxI3JECgYs3oKx+AizQBA==
dependencies:
jake "^10.8.5"
electron-to-chromium@^1.5.4:
version "1.5.12"
resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.5.12.tgz#ee31756eaa2e06f2aa606f170b7ad06dd402b4e4"
integrity sha512-tIhPkdlEoCL1Y+PToq3zRNehUaKp3wBX/sr7aclAWdIWjvqAe/Im/H0SiCM4c1Q8BLPHCdoJTol+ZblflydehA==
emittery@^0.13.1:
version "0.13.1"
resolved "https://registry.yarnpkg.com/emittery/-/emittery-0.13.1.tgz#c04b8c3457490e0847ae51fced3af52d338e3dad"
integrity sha512-DeWwawk6r5yR9jFgnDKYt4sLS0LmHJJi3ZOnb5/JdbYwj3nW+FxQnHIjhBKz8YLC7oRNPVM9NQ47I3CVx34eqQ==
emoji-regex@^8.0.0:
version "8.0.0"
resolved "https://registry.yarnpkg.com/emoji-regex/-/emoji-regex-8.0.0.tgz#e818fd69ce5ccfcb404594f842963bf53164cc37"
integrity sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==
error-ex@^1.3.1:
version "1.3.2"
resolved "https://registry.yarnpkg.com/error-ex/-/error-ex-1.3.2.tgz#b4ac40648107fdcdcfae242f428bea8a14d4f1bf"
integrity sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==
dependencies:
is-arrayish "^0.2.1"
es-abstract@^1.22.1, es-abstract@^1.22.3, es-abstract@^1.23.0, es-abstract@^1.23.2:
version "1.23.3"
resolved "https://registry.yarnpkg.com/es-abstract/-/es-abstract-1.23.3.tgz#8f0c5a35cd215312573c5a27c87dfd6c881a0aa0"
integrity sha512-e+HfNH61Bj1X9/jLc5v1owaLYuHdeHHSQlkhCBiTK8rBvKaULl/beGMxwrMXjpYrv4pz22BlY570vVePA2ho4A==
dependencies:
array-buffer-byte-length "^1.0.1"
arraybuffer.prototype.slice "^1.0.3"
available-typed-arrays "^1.0.7"
call-bind "^1.0.7"
data-view-buffer "^1.0.1"
data-view-byte-length "^1.0.1"
data-view-byte-offset "^1.0.0"
es-define-property "^1.0.0"
es-errors "^1.3.0"
es-object-atoms "^1.0.0"
es-set-tostringtag "^2.0.3"
es-to-primitive "^1.2.1"
function.prototype.name "^1.1.6"
get-intrinsic "^1.2.4"
get-symbol-description "^1.0.2"
globalthis "^1.0.3"
gopd "^1.0.1"
has-property-descriptors "^1.0.2"
has-proto "^1.0.3"
has-symbols "^1.0.3"
hasown "^2.0.2"
internal-slot "^1.0.7"
is-array-buffer "^3.0.4"
is-callable "^1.2.7"
is-data-view "^1.0.1"
is-negative-zero "^2.0.3"
is-regex "^1.1.4"
is-shared-array-buffer "^1.0.3"
is-string "^1.0.7"
is-typed-array "^1.1.13"
is-weakref "^1.0.2"
object-inspect "^1.13.1"
object-keys "^1.1.1"
object.assign "^4.1.5"
regexp.prototype.flags "^1.5.2"
safe-array-concat "^1.1.2"
safe-regex-test "^1.0.3"
string.prototype.trim "^1.2.9"
string.prototype.trimend "^1.0.8"
string.prototype.trimstart "^1.0.8"
typed-array-buffer "^1.0.2"
typed-array-byte-length "^1.0.1"
typed-array-byte-offset "^1.0.2"
typed-array-length "^1.0.6"
unbox-primitive "^1.0.2"
which-typed-array "^1.1.15"
es-define-property@^1.0.0:
version "1.0.0"
resolved "https://registry.yarnpkg.com/es-define-property/-/es-define-property-1.0.0.tgz#c7faefbdff8b2696cf5f46921edfb77cc4ba3845"
integrity sha512-jxayLKShrEqqzJ0eumQbVhTYQM27CfT1T35+gCgDFoL82JLsXqTJ76zv6A0YLOgEnLUMvLzsDsGIrl8NFpT2gQ==
dependencies:
get-intrinsic "^1.2.4"
es-errors@^1.2.1, es-errors@^1.3.0:
version "1.3.0"
resolved "https://registry.yarnpkg.com/es-errors/-/es-errors-1.3.0.tgz#05f75a25dab98e4fb1dcd5e1472c0546d5057c8f"
integrity sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==
es-object-atoms@^1.0.0:
version "1.0.0"
resolved "https://registry.yarnpkg.com/es-object-atoms/-/es-object-atoms-1.0.0.tgz#ddb55cd47ac2e240701260bc2a8e31ecb643d941"
integrity sha512-MZ4iQ6JwHOBQjahnjwaC1ZtIBH+2ohjamzAO3oaHcXYup7qxjF2fixyH+Q71voWHeOkI2q/TnJao/KfXYIZWbw==
dependencies:
es-errors "^1.3.0"
es-set-tostringtag@^2.0.3:
version "2.0.3"
resolved "https://registry.yarnpkg.com/es-set-tostringtag/-/es-set-tostringtag-2.0.3.tgz#8bb60f0a440c2e4281962428438d58545af39777"
integrity sha512-3T8uNMC3OQTHkFUsFq8r/BwAXLHvU/9O9mE0fBc/MY5iq/8H7ncvO947LmYA6ldWw9Uh8Yhf25zu6n7nML5QWQ==
dependencies:
get-intrinsic "^1.2.4"
has-tostringtag "^1.0.2"
hasown "^2.0.1"
es-shim-unscopables@^1.0.0, es-shim-unscopables@^1.0.2:
version "1.0.2"
resolved "https://registry.yarnpkg.com/es-shim-unscopables/-/es-shim-unscopables-1.0.2.tgz#1f6942e71ecc7835ed1c8a83006d8771a63a3763"
integrity sha512-J3yBRXCzDu4ULnQwxyToo/OjdMx6akgVC7K6few0a7F/0wLtmKKN7I73AH5T2836UuXRqN7Qg+IIUw/+YJksRw==
dependencies:
hasown "^2.0.0"
es-to-primitive@^1.2.1:
version "1.2.1"
resolved "https://registry.yarnpkg.com/es-to-primitive/-/es-to-primitive-1.2.1.tgz#e55cd4c9cdc188bcefb03b366c736323fc5c898a"
integrity sha512-QCOllgZJtaUo9miYBcLChTUaHNjJF3PYs1VidD7AwiEj1kYxKeQTctLAezAOH5ZKRH0g2IgPn6KwB4IT8iRpvA==
dependencies:
is-callable "^1.1.4"
is-date-object "^1.0.1"
is-symbol "^1.0.2"
escalade@^3.1.1, escalade@^3.1.2:
version "3.1.2"
resolved "https://registry.yarnpkg.com/escalade/-/escalade-3.1.2.tgz#54076e9ab29ea5bf3d8f1ed62acffbb88272df27"
integrity sha512-ErCHMCae19vR8vQGe50xIsVomy19rg6gFu3+r3jkEO46suLMWBksvVyoGgQV+jOfl84ZSOSlmv6Gxa89PmTGmA==
escape-string-regexp@^1.0.5:
version "1.0.5"
resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz#1b61c0562190a8dff6ae3bb2cf0200ca130b86d4"
integrity sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==
escape-string-regexp@^2.0.0:
version "2.0.0"
resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-2.0.0.tgz#a30304e99daa32e23b2fd20f51babd07cffca344"
integrity sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w==
escape-string-regexp@^4.0.0:
version "4.0.0"
resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz#14ba83a5d373e3d311e5afca29cf5bfad965bf34"
integrity sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==
eslint-config-prettier@^8.8.0:
version "8.10.0"
resolved "https://registry.yarnpkg.com/eslint-config-prettier/-/eslint-config-prettier-8.10.0.tgz#3a06a662130807e2502fc3ff8b4143d8a0658e11"
integrity sha512-SM8AMJdeQqRYT9O9zguiruQZaN7+z+E4eAP9oiLNGKMtomwaB1E9dcgUD6ZAn/eQAb52USbvezbiljfZUhbJcg==
eslint-import-resolver-node@^0.3.9:
version "0.3.9"
resolved "https://registry.yarnpkg.com/eslint-import-resolver-node/-/eslint-import-resolver-node-0.3.9.tgz#d4eaac52b8a2e7c3cd1903eb00f7e053356118ac"
integrity sha512-WFj2isz22JahUv+B788TlO3N6zL3nNJGU8CcZbPZvVEkBPaJdCV4vy5wyghty5ROFbCRnm132v8BScu5/1BQ8g==
dependencies:
debug "^3.2.7"
is-core-module "^2.13.0"
resolve "^1.22.4"
eslint-module-utils@^2.8.0:
version "2.8.2"
resolved "https://registry.yarnpkg.com/eslint-module-utils/-/eslint-module-utils-2.8.2.tgz#2ecad69d71e1fa81f17f7f24d5d3e46b168de663"
integrity sha512-3XnC5fDyc8M4J2E8pt8pmSVRX2M+5yWMCfI/kDZwauQeFgzQOuhcRBFKjTeJagqgk4sFKxe1mvNVnaWwImx/Tg==
dependencies:
debug "^3.2.7"
eslint-plugin-import@^2.27.5:
version "2.29.1"
resolved "https://registry.yarnpkg.com/eslint-plugin-import/-/eslint-plugin-import-2.29.1.tgz#d45b37b5ef5901d639c15270d74d46d161150643"
integrity sha512-BbPC0cuExzhiMo4Ff1BTVwHpjjv28C5R+btTOGaCRC7UEz801up0JadwkeSk5Ued6TG34uaczuVuH6qyy5YUxw==
dependencies:
array-includes "^3.1.7"
array.prototype.findlastindex "^1.2.3"
array.prototype.flat "^1.3.2"
array.prototype.flatmap "^1.3.2"
debug "^3.2.7"
doctrine "^2.1.0"
eslint-import-resolver-node "^0.3.9"
eslint-module-utils "^2.8.0"
hasown "^2.0.0"
is-core-module "^2.13.1"
is-glob "^4.0.3"
minimatch "^3.1.2"
object.fromentries "^2.0.7"
object.groupby "^1.0.1"
object.values "^1.1.7"
semver "^6.3.1"
tsconfig-paths "^3.15.0"
eslint-plugin-no-instanceof@^1.0.1:
version "1.0.1"
resolved "https://registry.yarnpkg.com/eslint-plugin-no-instanceof/-/eslint-plugin-no-instanceof-1.0.1.tgz#5d9fc86d160df6991b654b294a62390207f1bb97"
integrity sha512-zlqQ7EsfzbRO68uI+p8FIE7zYB4njs+nNbkNjSb5QmLi2et67zQLqSeaao5U9SpnlZTTJC87nS2oyHo2ACtajw==
eslint-plugin-prettier@^4.2.1:
version "4.2.1"
resolved "https://registry.yarnpkg.com/eslint-plugin-prettier/-/eslint-plugin-prettier-4.2.1.tgz#651cbb88b1dab98bfd42f017a12fa6b2d993f94b"
integrity sha512-f/0rXLXUt0oFYs8ra4w49wYZBG5GKZpAYsJSm6rnYL5uVDjd+zowwMwVZHnAjf4edNrKpCDYfXDgmRE/Ak7QyQ==
dependencies:
prettier-linter-helpers "^1.0.0"
eslint-scope@^5.1.1:
version "5.1.1"
resolved "https://registry.yarnpkg.com/eslint-scope/-/eslint-scope-5.1.1.tgz#e786e59a66cb92b3f6c1fb0d508aab174848f48c"
integrity sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==
dependencies:
esrecurse "^4.3.0"
estraverse "^4.1.1"
eslint-scope@^7.2.2:
version "7.2.2"
resolved "https://registry.yarnpkg.com/eslint-scope/-/eslint-scope-7.2.2.tgz#deb4f92563390f32006894af62a22dba1c46423f"
integrity sha512-dOt21O7lTMhDM+X9mB4GX+DZrZtCUJPL/wlcTqxyrx5IvO0IYtILdtrQGQp+8n5S0gwSVmOf9NQrjMOgfQZlIg==
dependencies:
esrecurse "^4.3.0"
estraverse "^5.2.0"
eslint-visitor-keys@^3.3.0, eslint-visitor-keys@^3.4.1, eslint-visitor-keys@^3.4.3:
version "3.4.3"
resolved "https://registry.yarnpkg.com/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz#0cd72fe8550e3c2eae156a96a4dddcd1c8ac5800"
integrity sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==
eslint-visitor-keys@^4.0.0:
version "4.0.0"
resolved "https://registry.yarnpkg.com/eslint-visitor-keys/-/eslint-visitor-keys-4.0.0.tgz#e3adc021aa038a2a8e0b2f8b0ce8f66b9483b1fb"
integrity sha512-OtIRv/2GyiF6o/d8K7MYKKbXrOUBIK6SfkIRM4Z0dY3w+LiQ0vy3F57m0Z71bjbyeiWFiHJ8brqnmE6H6/jEuw==
eslint@^8.41.0:
version "8.57.0"
resolved "https://registry.yarnpkg.com/eslint/-/eslint-8.57.0.tgz#c786a6fd0e0b68941aaf624596fb987089195668"
integrity sha512-dZ6+mexnaTIbSBZWgou51U6OmzIhYM2VcNdtiTtI7qPNZm35Akpr0f6vtw3w1Kmn5PYo+tZVfh13WrhpS6oLqQ==
dependencies:
"@eslint-community/eslint-utils" "^4.2.0"
"@eslint-community/regexpp" "^4.6.1"
"@eslint/eslintrc" "^2.1.4"
"@eslint/js" "8.57.0"
"@humanwhocodes/config-array" "^0.11.14"
"@humanwhocodes/module-importer" "^1.0.1"
"@nodelib/fs.walk" "^1.2.8"
"@ungap/structured-clone" "^1.2.0"
ajv "^6.12.4"
chalk "^4.0.0"
cross-spawn "^7.0.2"
debug "^4.3.2"
doctrine "^3.0.0"
escape-string-regexp "^4.0.0"
eslint-scope "^7.2.2"
eslint-visitor-keys "^3.4.3"
espree "^9.6.1"
esquery "^1.4.2"
esutils "^2.0.2"
fast-deep-equal "^3.1.3"
file-entry-cache "^6.0.1"
find-up "^5.0.0"
glob-parent "^6.0.2"
globals "^13.19.0"
graphemer "^1.4.0"
ignore "^5.2.0"
imurmurhash "^0.1.4"
is-glob "^4.0.0"
is-path-inside "^3.0.3"
js-yaml "^4.1.0"
json-stable-stringify-without-jsonify "^1.0.1"
levn "^0.4.1"
lodash.merge "^4.6.2"
minimatch "^3.1.2"
natural-compare "^1.4.0"
optionator "^0.9.3"
strip-ansi "^6.0.1"
text-table "^0.2.0"
espree@^10.0.1:
version "10.1.0"
resolved "https://registry.yarnpkg.com/espree/-/espree-10.1.0.tgz#8788dae611574c0f070691f522e4116c5a11fc56"
integrity sha512-M1M6CpiE6ffoigIOWYO9UDP8TMUw9kqb21tf+08IgDYjCsOvCuDt4jQcZmoYxx+w7zlKw9/N0KXfto+I8/FrXA==
dependencies:
acorn "^8.12.0"
acorn-jsx "^5.3.2"
eslint-visitor-keys "^4.0.0"
espree@^9.6.0, espree@^9.6.1:
version "9.6.1"
resolved "https://registry.yarnpkg.com/espree/-/espree-9.6.1.tgz#a2a17b8e434690a5432f2f8018ce71d331a48c6f"
integrity sha512-oruZaFkjorTpF32kDSI5/75ViwGeZginGGy2NoOSg3Q9bnwlnmDm4HLnkl0RE3n+njDXR037aY1+x58Z/zFdwQ==
dependencies:
acorn "^8.9.0"
acorn-jsx "^5.3.2"
eslint-visitor-keys "^3.4.1"
esprima@^4.0.0:
version "4.0.1"
resolved "https://registry.yarnpkg.com/esprima/-/esprima-4.0.1.tgz#13b04cdb3e6c5d19df91ab6987a8695619b0aa71"
integrity sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==
esquery@^1.4.2:
version "1.6.0"
resolved "https://registry.yarnpkg.com/esquery/-/esquery-1.6.0.tgz#91419234f804d852a82dceec3e16cdc22cf9dae7"
integrity sha512-ca9pw9fomFcKPvFLXhBKUK90ZvGibiGOvRJNbjljY7s7uq/5YO4BOzcYtJqExdx99rF6aAcnRxHmcUHcz6sQsg==
dependencies:
estraverse "^5.1.0"
esrecurse@^4.3.0:
version "4.3.0"
resolved "https://registry.yarnpkg.com/esrecurse/-/esrecurse-4.3.0.tgz#7ad7964d679abb28bee72cec63758b1c5d2c9921"
integrity sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==
dependencies:
estraverse "^5.2.0"
estraverse@^4.1.1:
version "4.3.0"
resolved "https://registry.yarnpkg.com/estraverse/-/estraverse-4.3.0.tgz#398ad3f3c5a24948be7725e83d11a7de28cdbd1d"
integrity sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==
estraverse@^5.1.0, estraverse@^5.2.0:
version "5.3.0"
resolved "https://registry.yarnpkg.com/estraverse/-/estraverse-5.3.0.tgz#2eea5290702f26ab8fe5370370ff86c965d21123"
integrity sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==
esutils@^2.0.2:
version "2.0.3"
resolved "https://registry.yarnpkg.com/esutils/-/esutils-2.0.3.tgz#74d2eb4de0b8da1293711910d50775b9b710ef64"
integrity sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==
eventemitter3@^4.0.4:
version "4.0.7"
resolved "https://registry.yarnpkg.com/eventemitter3/-/eventemitter3-4.0.7.tgz#2de9b68f6528d5644ef5c59526a1b4a07306169f"
integrity sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==
execa@^5.0.0:
version "5.1.1"
resolved "https://registry.yarnpkg.com/execa/-/execa-5.1.1.tgz#f80ad9cbf4298f7bd1d4c9555c21e93741c411dd"
integrity sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==
dependencies:
cross-spawn "^7.0.3"
get-stream "^6.0.0"
human-signals "^2.1.0"
is-stream "^2.0.0"
merge-stream "^2.0.0"
npm-run-path "^4.0.1"
onetime "^5.1.2"
signal-exit "^3.0.3"
strip-final-newline "^2.0.0"
exit@^0.1.2:
version "0.1.2"
resolved "https://registry.yarnpkg.com/exit/-/exit-0.1.2.tgz#0632638f8d877cc82107d30a0fff1a17cba1cd0c"
integrity sha512-Zk/eNKV2zbjpKzrsQ+n1G6poVbErQxJ0LBOJXaKZ1EViLzH+hrLu9cdXI4zw9dBQJslwBEpbQ2P1oS7nDxs6jQ==
expect@^29.0.0, expect@^29.7.0:
version "29.7.0"
resolved "https://registry.yarnpkg.com/expect/-/expect-29.7.0.tgz#578874590dcb3214514084c08115d8aee61e11bc"
integrity sha512-2Zks0hf1VLFYI1kbh0I5jP3KHHyCHpkfyHBzsSXRFgl/Bg9mWYfMW8oD+PdMPlEwy5HNsR9JutYy6pMeOh61nw==
dependencies:
"@jest/expect-utils" "^29.7.0"
jest-get-type "^29.6.3"
jest-matcher-utils "^29.7.0"
jest-message-util "^29.7.0"
jest-util "^29.7.0"
fast-deep-equal@^3.1.1, fast-deep-equal@^3.1.3:
version "3.1.3"
resolved "https://registry.yarnpkg.com/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz#3a7d56b559d6cbc3eb512325244e619a65c6c525"
integrity sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==
fast-diff@^1.1.2:
version "1.3.0"
resolved "https://registry.yarnpkg.com/fast-diff/-/fast-diff-1.3.0.tgz#ece407fa550a64d638536cd727e129c61616e0f0"
integrity sha512-VxPP4NqbUjj6MaAOafWeUn2cXWLcCtljklUtZf0Ind4XQ+QPtmA0b18zZy0jIQx+ExRVCR/ZQpBmik5lXshNsw==
fast-glob@^3.2.9:
version "3.3.2"
resolved "https://registry.yarnpkg.com/fast-glob/-/fast-glob-3.3.2.tgz#a904501e57cfdd2ffcded45e99a54fef55e46129"
integrity sha512-oX2ruAFQwf/Orj8m737Y5adxDQO0LAB7/S5MnxCdTNDd4p6BsyIVsv9JQsATbTSq8KHRpLwIHbVlUNatxd+1Ow==
dependencies:
"@nodelib/fs.stat" "^2.0.2"
"@nodelib/fs.walk" "^1.2.3"
glob-parent "^5.1.2"
merge2 "^1.3.0"
micromatch "^4.0.4"
fast-json-stable-stringify@2.x, fast-json-stable-stringify@^2.0.0, fast-json-stable-stringify@^2.1.0:
version "2.1.0"
resolved "https://registry.yarnpkg.com/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz#874bf69c6f404c2b5d99c481341399fd55892633"
integrity sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==
fast-levenshtein@^2.0.6:
version "2.0.6"
resolved "https://registry.yarnpkg.com/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz#3d8a5c66883a16a30ca8643e851f19baa7797917"
integrity sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==
fastq@^1.6.0:
version "1.17.1"
resolved "https://registry.yarnpkg.com/fastq/-/fastq-1.17.1.tgz#2a523f07a4e7b1e81a42b91b8bf2254107753b47"
integrity sha512-sRVD3lWVIXWg6By68ZN7vho9a1pQcN/WBFaAAsDDFzlJjvoGx0P8z7V1t72grFJfJhu3YPZBuu25f7Kaw2jN1w==
dependencies:
reusify "^1.0.4"
fb-watchman@^2.0.0:
version "2.0.2"
resolved "https://registry.yarnpkg.com/fb-watchman/-/fb-watchman-2.0.2.tgz#e9524ee6b5c77e9e5001af0f85f3adbb8623255c"
integrity sha512-p5161BqbuCaSnB8jIbzQHOlpgsPmK5rJVDfDKO91Axs5NC1uu3HRQm6wt9cd9/+GtQQIO53JdGXXoyDpTAsgYA==
dependencies:
bser "2.1.1"
file-entry-cache@^6.0.1:
version "6.0.1"
resolved "https://registry.yarnpkg.com/file-entry-cache/-/file-entry-cache-6.0.1.tgz#211b2dd9659cb0394b073e7323ac3c933d522027"
integrity sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==
dependencies:
flat-cache "^3.0.4"
filelist@^1.0.4:
version "1.0.4"
resolved "https://registry.yarnpkg.com/filelist/-/filelist-1.0.4.tgz#f78978a1e944775ff9e62e744424f215e58352b5"
integrity sha512-w1cEuf3S+DrLCQL7ET6kz+gmlJdbq9J7yXCSjK/OZCPA+qEN1WyF4ZAf0YYJa4/shHJra2t/d/r8SV4Ji+x+8Q==
dependencies:
minimatch "^5.0.1"
fill-range@^7.1.1:
version "7.1.1"
resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-7.1.1.tgz#44265d3cac07e3ea7dc247516380643754a05292"
integrity sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==
dependencies:
to-regex-range "^5.0.1"
find-up@^4.0.0, find-up@^4.1.0:
version "4.1.0"
resolved "https://registry.yarnpkg.com/find-up/-/find-up-4.1.0.tgz#97afe7d6cdc0bc5928584b7c8d7b16e8a9aa5d19"
integrity sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==
dependencies:
locate-path "^5.0.0"
path-exists "^4.0.0"
find-up@^5.0.0:
version "5.0.0"
resolved "https://registry.yarnpkg.com/find-up/-/find-up-5.0.0.tgz#4c92819ecb7083561e4f4a240a86be5198f536fc"
integrity sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==
dependencies:
locate-path "^6.0.0"
path-exists "^4.0.0"
flat-cache@^3.0.4:
version "3.2.0"
resolved "https://registry.yarnpkg.com/flat-cache/-/flat-cache-3.2.0.tgz#2c0c2d5040c99b1632771a9d105725c0115363ee"
integrity sha512-CYcENa+FtcUKLmhhqyctpclsq7QF38pKjZHsGNiSQF5r4FtoKDWabFDl3hzaEQMvT1LHEysw5twgLvpYYb4vbw==
dependencies:
flatted "^3.2.9"
keyv "^4.5.3"
rimraf "^3.0.2"
flatted@^3.2.9:
version "3.3.1"
resolved "https://registry.yarnpkg.com/flatted/-/flatted-3.3.1.tgz#21db470729a6734d4997002f439cb308987f567a"
integrity sha512-X8cqMLLie7KsNUDSdzeN8FYK9rEt4Dt67OsG/DNGnYTSDBG4uFAJFBnUeiV+zCVAvwFy56IjM9sH51jVaEhNxw==
for-each@^0.3.3:
version "0.3.3"
resolved "https://registry.yarnpkg.com/for-each/-/for-each-0.3.3.tgz#69b447e88a0a5d32c3e7084f3f1710034b21376e"
integrity sha512-jqYfLp7mo9vIyQf8ykW2v7A+2N4QjeCeI5+Dz9XraiO1ign81wjiH7Fb9vSOWvQfNtmSa4H2RoQTrrXivdUZmw==
dependencies:
is-callable "^1.1.3"
fs.realpath@^1.0.0:
version "1.0.0"
resolved "https://registry.yarnpkg.com/fs.realpath/-/fs.realpath-1.0.0.tgz#1504ad2523158caa40db4a2787cb01411994ea4f"
integrity sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==
fsevents@^2.3.2:
version "2.3.3"
resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-2.3.3.tgz#cac6407785d03675a2a5e1a5305c697b347d90d6"
integrity sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==
function-bind@^1.1.2:
version "1.1.2"
resolved "https://registry.yarnpkg.com/function-bind/-/function-bind-1.1.2.tgz#2c02d864d97f3ea6c8830c464cbd11ab6eab7a1c"
integrity sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==
function.prototype.name@^1.1.6:
version "1.1.6"
resolved "https://registry.yarnpkg.com/function.prototype.name/-/function.prototype.name-1.1.6.tgz#cdf315b7d90ee77a4c6ee216c3c3362da07533fd"
integrity sha512-Z5kx79swU5P27WEayXM1tBi5Ze/lbIyiNgU3qyXUOf9b2rgXYyF9Dy9Cx+IQv/Lc8WCG6L82zwUPpSS9hGehIg==
dependencies:
call-bind "^1.0.2"
define-properties "^1.2.0"
es-abstract "^1.22.1"
functions-have-names "^1.2.3"
functions-have-names@^1.2.3:
version "1.2.3"
resolved "https://registry.yarnpkg.com/functions-have-names/-/functions-have-names-1.2.3.tgz#0404fe4ee2ba2f607f0e0ec3c80bae994133b834"
integrity sha512-xckBUXyTIqT97tq2x2AMb+g163b5JFysYk0x4qxNFwbfQkmNZoiRHb6sPzI9/QV33WeuvVYBUIiD4NzNIyqaRQ==
gensync@^1.0.0-beta.2:
version "1.0.0-beta.2"
resolved "https://registry.yarnpkg.com/gensync/-/gensync-1.0.0-beta.2.tgz#32a6ee76c3d7f52d46b2b1ae5d93fea8580a25e0"
integrity sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==
get-caller-file@^2.0.5:
version "2.0.5"
resolved "https://registry.yarnpkg.com/get-caller-file/-/get-caller-file-2.0.5.tgz#4f94412a82db32f36e3b0b9741f8a97feb031f7e"
integrity sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==
get-intrinsic@^1.1.3, get-intrinsic@^1.2.1, get-intrinsic@^1.2.3, get-intrinsic@^1.2.4:
version "1.2.4"
resolved "https://registry.yarnpkg.com/get-intrinsic/-/get-intrinsic-1.2.4.tgz#e385f5a4b5227d449c3eabbad05494ef0abbeadd"
integrity sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==
dependencies:
es-errors "^1.3.0"
function-bind "^1.1.2"
has-proto "^1.0.1"
has-symbols "^1.0.3"
hasown "^2.0.0"
get-package-type@^0.1.0:
version "0.1.0"
resolved "https://registry.yarnpkg.com/get-package-type/-/get-package-type-0.1.0.tgz#8de2d803cff44df3bc6c456e6668b36c3926e11a"
integrity sha512-pjzuKtY64GYfWizNAJ0fr9VqttZkNiK2iS430LtIHzjBEr6bX8Am2zm4sW4Ro5wjWW5cAlRL1qAMTcXbjNAO2Q==
get-stream@^6.0.0:
version "6.0.1"
resolved "https://registry.yarnpkg.com/get-stream/-/get-stream-6.0.1.tgz#a262d8eef67aced57c2852ad6167526a43cbf7b7"
integrity sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==
get-symbol-description@^1.0.2:
version "1.0.2"
resolved "https://registry.yarnpkg.com/get-symbol-description/-/get-symbol-description-1.0.2.tgz#533744d5aa20aca4e079c8e5daf7fd44202821f5"
integrity sha512-g0QYk1dZBxGwk+Ngc+ltRH2IBp2f7zBkBMBJZCDerh6EhlhSR6+9irMCuT/09zD6qkarHUSn529sK/yL4S27mg==
dependencies:
call-bind "^1.0.5"
es-errors "^1.3.0"
get-intrinsic "^1.2.4"
glob-parent@^5.1.2:
version "5.1.2"
resolved "https://registry.yarnpkg.com/glob-parent/-/glob-parent-5.1.2.tgz#869832c58034fe68a4093c17dc15e8340d8401c4"
integrity sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==
dependencies:
is-glob "^4.0.1"
glob-parent@^6.0.2:
version "6.0.2"
resolved "https://registry.yarnpkg.com/glob-parent/-/glob-parent-6.0.2.tgz#6d237d99083950c79290f24c7642a3de9a28f9e3"
integrity sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==
dependencies:
is-glob "^4.0.3"
glob@^7.1.3, glob@^7.1.4:
version "7.2.3"
resolved "https://registry.yarnpkg.com/glob/-/glob-7.2.3.tgz#b8df0fb802bbfa8e89bd1d938b4e16578ed44f2b"
integrity sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==
dependencies:
fs.realpath "^1.0.0"
inflight "^1.0.4"
inherits "2"
minimatch "^3.1.1"
once "^1.3.0"
path-is-absolute "^1.0.0"
globals@^11.1.0:
version "11.12.0"
resolved "https://registry.yarnpkg.com/globals/-/globals-11.12.0.tgz#ab8795338868a0babd8525758018c2a7eb95c42e"
integrity sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==
globals@^13.19.0:
version "13.24.0"
resolved "https://registry.yarnpkg.com/globals/-/globals-13.24.0.tgz#8432a19d78ce0c1e833949c36adb345400bb1171"
integrity sha512-AhO5QUcj8llrbG09iWhPU2B204J1xnPeL8kQmVorSsy+Sjj1sk8gIyh6cUocGmH4L0UuhAJy+hJMRA4mgA4mFQ==
dependencies:
type-fest "^0.20.2"
globals@^14.0.0:
version "14.0.0"
resolved "https://registry.yarnpkg.com/globals/-/globals-14.0.0.tgz#898d7413c29babcf6bafe56fcadded858ada724e"
integrity sha512-oahGvuMGQlPw/ivIYBjVSrWAfWLBeku5tpPE2fOPLi+WHffIWbuh2tCjhyQhTBPMf5E9jDEH4FOmTYgYwbKwtQ==
globalthis@^1.0.3:
version "1.0.4"
resolved "https://registry.yarnpkg.com/globalthis/-/globalthis-1.0.4.tgz#7430ed3a975d97bfb59bcce41f5cabbafa651236"
integrity sha512-DpLKbNU4WylpxJykQujfCcwYWiV/Jhm50Goo0wrVILAv5jOr9d+H+UR3PhSCD2rCCEIg0uc+G+muBTwD54JhDQ==
dependencies:
define-properties "^1.2.1"
gopd "^1.0.1"
globby@^11.1.0:
version "11.1.0"
resolved "https://registry.yarnpkg.com/globby/-/globby-11.1.0.tgz#bd4be98bb042f83d796f7e3811991fbe82a0d34b"
integrity sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==
dependencies:
array-union "^2.1.0"
dir-glob "^3.0.1"
fast-glob "^3.2.9"
ignore "^5.2.0"
merge2 "^1.4.1"
slash "^3.0.0"
gopd@^1.0.1:
version "1.0.1"
resolved "https://registry.yarnpkg.com/gopd/-/gopd-1.0.1.tgz#29ff76de69dac7489b7c0918a5788e56477c332c"
integrity sha512-d65bNlIadxvpb/A2abVdlqKqV563juRnZ1Wtk6s1sIR8uNsXR70xqIzVqxVf1eTqDunwT2MkczEeaezCKTZhwA==
dependencies:
get-intrinsic "^1.1.3"
graceful-fs@^4.2.9:
version "4.2.11"
resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.2.11.tgz#4183e4e8bf08bb6e05bbb2f7d2e0c8f712ca40e3"
integrity sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==
graphemer@^1.4.0:
version "1.4.0"
resolved "https://registry.yarnpkg.com/graphemer/-/graphemer-1.4.0.tgz#fb2f1d55e0e3a1849aeffc90c4fa0dd53a0e66c6"
integrity sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag==
has-bigints@^1.0.1, has-bigints@^1.0.2:
version "1.0.2"
resolved "https://registry.yarnpkg.com/has-bigints/-/has-bigints-1.0.2.tgz#0871bd3e3d51626f6ca0966668ba35d5602d6eaa"
integrity sha512-tSvCKtBr9lkF0Ex0aQiP9N+OpV4zi2r/Nee5VkRDbaqv35RLYMzbwQfFSZZH0kR+Rd6302UJZ2p/bJCEoR3VoQ==
has-flag@^3.0.0:
version "3.0.0"
resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-3.0.0.tgz#b5d454dc2199ae225699f3467e5a07f3b955bafd"
integrity sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==
has-flag@^4.0.0:
version "4.0.0"
resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-4.0.0.tgz#944771fd9c81c81265c4d6941860da06bb59479b"
integrity sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==
has-property-descriptors@^1.0.0, has-property-descriptors@^1.0.2:
version "1.0.2"
resolved "https://registry.yarnpkg.com/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz#963ed7d071dc7bf5f084c5bfbe0d1b6222586854"
integrity sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==
dependencies:
es-define-property "^1.0.0"
has-proto@^1.0.1, has-proto@^1.0.3:
version "1.0.3"
resolved "https://registry.yarnpkg.com/has-proto/-/has-proto-1.0.3.tgz#b31ddfe9b0e6e9914536a6ab286426d0214f77fd"
integrity sha512-SJ1amZAJUiZS+PhsVLf5tGydlaVB8EdFpaSO4gmiUKUOxk8qzn5AIy4ZeJUmh22znIdk/uMAUT2pl3FxzVUH+Q==
has-symbols@^1.0.2, has-symbols@^1.0.3:
version "1.0.3"
resolved "https://registry.yarnpkg.com/has-symbols/-/has-symbols-1.0.3.tgz#bb7b2c4349251dce87b125f7bdf874aa7c8b39f8"
integrity sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==
has-tostringtag@^1.0.0, has-tostringtag@^1.0.2:
version "1.0.2"
resolved "https://registry.yarnpkg.com/has-tostringtag/-/has-tostringtag-1.0.2.tgz#2cdc42d40bef2e5b4eeab7c01a73c54ce7ab5abc"
integrity sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==
dependencies:
has-symbols "^1.0.3"
hasown@^2.0.0, hasown@^2.0.1, hasown@^2.0.2:
version "2.0.2"
resolved "https://registry.yarnpkg.com/hasown/-/hasown-2.0.2.tgz#003eaf91be7adc372e84ec59dc37252cedb80003"
integrity sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==
dependencies:
function-bind "^1.1.2"
html-escaper@^2.0.0:
version "2.0.2"
resolved "https://registry.yarnpkg.com/html-escaper/-/html-escaper-2.0.2.tgz#dfd60027da36a36dfcbe236262c00a5822681453"
integrity sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==
human-signals@^2.1.0:
version "2.1.0"
resolved "https://registry.yarnpkg.com/human-signals/-/human-signals-2.1.0.tgz#dc91fcba42e4d06e4abaed33b3e7a3c02f514ea0"
integrity sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==
ignore@^5.2.0:
version "5.3.2"
resolved "https://registry.yarnpkg.com/ignore/-/ignore-5.3.2.tgz#3cd40e729f3643fd87cb04e50bf0eb722bc596f5"
integrity sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==
import-fresh@^3.2.1:
version "3.3.0"
resolved "https://registry.yarnpkg.com/import-fresh/-/import-fresh-3.3.0.tgz#37162c25fcb9ebaa2e6e53d5b4d88ce17d9e0c2b"
integrity sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==
dependencies:
parent-module "^1.0.0"
resolve-from "^4.0.0"
import-local@^3.0.2:
version "3.2.0"
resolved "https://registry.yarnpkg.com/import-local/-/import-local-3.2.0.tgz#c3d5c745798c02a6f8b897726aba5100186ee260"
integrity sha512-2SPlun1JUPWoM6t3F0dw0FkCF/jWY8kttcY4f599GLTSjh2OCuuhdTkJQsEcZzBqbXZGKMK2OqW1oZsjtf/gQA==
dependencies:
pkg-dir "^4.2.0"
resolve-cwd "^3.0.0"
imurmurhash@^0.1.4:
version "0.1.4"
resolved "https://registry.yarnpkg.com/imurmurhash/-/imurmurhash-0.1.4.tgz#9218b9b2b928a238b13dc4fb6b6d576f231453ea"
integrity sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==
inflight@^1.0.4:
version "1.0.6"
resolved "https://registry.yarnpkg.com/inflight/-/inflight-1.0.6.tgz#49bd6331d7d02d0c09bc910a1075ba8165b56df9"
integrity sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==
dependencies:
once "^1.3.0"
wrappy "1"
inherits@2:
version "2.0.4"
resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.4.tgz#0fa2c64f932917c3433a0ded55363aae37416b7c"
integrity sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==
internal-slot@^1.0.7:
version "1.0.7"
resolved "https://registry.yarnpkg.com/internal-slot/-/internal-slot-1.0.7.tgz#c06dcca3ed874249881007b0a5523b172a190802"
integrity sha512-NGnrKwXzSms2qUUih/ILZ5JBqNTSa1+ZmP6flaIp6KmSElgE9qdndzS3cqjrDovwFdmwsGsLdeFgB6suw+1e9g==
dependencies:
es-errors "^1.3.0"
hasown "^2.0.0"
side-channel "^1.0.4"
is-array-buffer@^3.0.4:
version "3.0.4"
resolved "https://registry.yarnpkg.com/is-array-buffer/-/is-array-buffer-3.0.4.tgz#7a1f92b3d61edd2bc65d24f130530ea93d7fae98"
integrity sha512-wcjaerHw0ydZwfhiKbXJWLDY8A7yV7KhjQOpb83hGgGfId/aQa4TOvwyzn2PuswW2gPCYEL/nEAiSVpdOj1lXw==
dependencies:
call-bind "^1.0.2"
get-intrinsic "^1.2.1"
is-arrayish@^0.2.1:
version "0.2.1"
resolved "https://registry.yarnpkg.com/is-arrayish/-/is-arrayish-0.2.1.tgz#77c99840527aa8ecb1a8ba697b80645a7a926a9d"
integrity sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==
is-bigint@^1.0.1:
version "1.0.4"
resolved "https://registry.yarnpkg.com/is-bigint/-/is-bigint-1.0.4.tgz#08147a1875bc2b32005d41ccd8291dffc6691df3"
integrity sha512-zB9CruMamjym81i2JZ3UMn54PKGsQzsJeo6xvN3HJJ4CAsQNB6iRutp2To77OfCNuoxspsIhzaPoO1zyCEhFOg==
dependencies:
has-bigints "^1.0.1"
is-boolean-object@^1.1.0:
version "1.1.2"
resolved "https://registry.yarnpkg.com/is-boolean-object/-/is-boolean-object-1.1.2.tgz#5c6dc200246dd9321ae4b885a114bb1f75f63719"
integrity sha512-gDYaKHJmnj4aWxyj6YHyXVpdQawtVLHU5cb+eztPGczf6cjuTdwve5ZIEfgXqH4e57An1D1AKf8CZ3kYrQRqYA==
dependencies:
call-bind "^1.0.2"
has-tostringtag "^1.0.0"
is-callable@^1.1.3, is-callable@^1.1.4, is-callable@^1.2.7:
version "1.2.7"
resolved "https://registry.yarnpkg.com/is-callable/-/is-callable-1.2.7.tgz#3bc2a85ea742d9e36205dcacdd72ca1fdc51b055"
integrity sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA==
is-core-module@^2.13.0:
version "2.15.0"
resolved "https://registry.yarnpkg.com/is-core-module/-/is-core-module-2.15.0.tgz#71c72ec5442ace7e76b306e9d48db361f22699ea"
integrity sha512-Dd+Lb2/zvk9SKy1TGCt1wFJFo/MWBPMX5x7KcvLajWTGuomczdQX61PvY5yK6SVACwpoexWo81IfFyoKY2QnTA==
dependencies:
hasown "^2.0.2"
is-core-module@^2.13.1:
version "2.15.1"
resolved "https://registry.yarnpkg.com/is-core-module/-/is-core-module-2.15.1.tgz#a7363a25bee942fefab0de13bf6aa372c82dcc37"
integrity sha512-z0vtXSwucUJtANQWldhbtbt7BnL0vxiFjIdDLAatwhDYty2bad6s+rijD6Ri4YuYJubLzIJLUidCh09e1djEVQ==
dependencies:
hasown "^2.0.2"
is-data-view@^1.0.1:
version "1.0.1"
resolved "https://registry.yarnpkg.com/is-data-view/-/is-data-view-1.0.1.tgz#4b4d3a511b70f3dc26d42c03ca9ca515d847759f"
integrity sha512-AHkaJrsUVW6wq6JS8y3JnM/GJF/9cf+k20+iDzlSaJrinEo5+7vRiteOSwBhHRiAyQATN1AmY4hwzxJKPmYf+w==
dependencies:
is-typed-array "^1.1.13"
is-date-object@^1.0.1:
version "1.0.5"
resolved "https://registry.yarnpkg.com/is-date-object/-/is-date-object-1.0.5.tgz#0841d5536e724c25597bf6ea62e1bd38298df31f"
integrity sha512-9YQaSxsAiSwcvS33MBk3wTCVnWK+HhF8VZR2jRxehM16QcVOdHqPn4VPHmRK4lSr38n9JriurInLcP90xsYNfQ==
dependencies:
has-tostringtag "^1.0.0"
is-extglob@^2.1.1:
version "2.1.1"
resolved "https://registry.yarnpkg.com/is-extglob/-/is-extglob-2.1.1.tgz#a88c02535791f02ed37c76a1b9ea9773c833f8c2"
integrity sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==
is-fullwidth-code-point@^3.0.0:
version "3.0.0"
resolved "https://registry.yarnpkg.com/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz#f116f8064fe90b3f7844a38997c0b75051269f1d"
integrity sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==
is-generator-fn@^2.0.0:
version "2.1.0"
resolved "https://registry.yarnpkg.com/is-generator-fn/-/is-generator-fn-2.1.0.tgz#7d140adc389aaf3011a8f2a2a4cfa6faadffb118"
integrity sha512-cTIB4yPYL/Grw0EaSzASzg6bBy9gqCofvWN8okThAYIxKJZC+udlRAmGbM0XLeniEJSs8uEgHPGuHSe1XsOLSQ==
is-glob@^4.0.0, is-glob@^4.0.1, is-glob@^4.0.3:
version "4.0.3"
resolved "https://registry.yarnpkg.com/is-glob/-/is-glob-4.0.3.tgz#64f61e42cbbb2eec2071a9dac0b28ba1e65d5084"
integrity sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==
dependencies:
is-extglob "^2.1.1"
is-negative-zero@^2.0.3:
version "2.0.3"
resolved "https://registry.yarnpkg.com/is-negative-zero/-/is-negative-zero-2.0.3.tgz#ced903a027aca6381b777a5743069d7376a49747"
integrity sha512-5KoIu2Ngpyek75jXodFvnafB6DJgr3u8uuK0LEZJjrU19DrMD3EVERaR8sjz8CCGgpZvxPl9SuE1GMVPFHx1mw==
is-number-object@^1.0.4:
version "1.0.7"
resolved "https://registry.yarnpkg.com/is-number-object/-/is-number-object-1.0.7.tgz#59d50ada4c45251784e9904f5246c742f07a42fc"
integrity sha512-k1U0IRzLMo7ZlYIfzRu23Oh6MiIFasgpb9X76eqfFZAqwH44UI4KTBvBYIZ1dSL9ZzChTB9ShHfLkR4pdW5krQ==
dependencies:
has-tostringtag "^1.0.0"
is-number@^7.0.0:
version "7.0.0"
resolved "https://registry.yarnpkg.com/is-number/-/is-number-7.0.0.tgz#7535345b896734d5f80c4d06c50955527a14f12b"
integrity sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==
is-path-inside@^3.0.3:
version "3.0.3"
resolved "https://registry.yarnpkg.com/is-path-inside/-/is-path-inside-3.0.3.tgz#d231362e53a07ff2b0e0ea7fed049161ffd16283"
integrity sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==
is-regex@^1.1.4:
version "1.1.4"
resolved "https://registry.yarnpkg.com/is-regex/-/is-regex-1.1.4.tgz#eef5663cd59fa4c0ae339505323df6854bb15958"
integrity sha512-kvRdxDsxZjhzUX07ZnLydzS1TU/TJlTUHHY4YLL87e37oUA49DfkLqgy+VjFocowy29cKvcSiu+kIv728jTTVg==
dependencies:
call-bind "^1.0.2"
has-tostringtag "^1.0.0"
is-shared-array-buffer@^1.0.2, is-shared-array-buffer@^1.0.3:
version "1.0.3"
resolved "https://registry.yarnpkg.com/is-shared-array-buffer/-/is-shared-array-buffer-1.0.3.tgz#1237f1cba059cdb62431d378dcc37d9680181688"
integrity sha512-nA2hv5XIhLR3uVzDDfCIknerhx8XUKnstuOERPNNIinXG7v9u+ohXF67vxm4TPTEPU6lm61ZkwP3c9PCB97rhg==
dependencies:
call-bind "^1.0.7"
is-stream@^2.0.0:
version "2.0.1"
resolved "https://registry.yarnpkg.com/is-stream/-/is-stream-2.0.1.tgz#fac1e3d53b97ad5a9d0ae9cef2389f5810a5c077"
integrity sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==
is-string@^1.0.5, is-string@^1.0.7:
version "1.0.7"
resolved "https://registry.yarnpkg.com/is-string/-/is-string-1.0.7.tgz#0dd12bf2006f255bb58f695110eff7491eebc0fd"
integrity sha512-tE2UXzivje6ofPW7l23cjDOMa09gb7xlAqG6jG5ej6uPV32TlWP3NKPigtaGeHNu9fohccRYvIiZMfOOnOYUtg==
dependencies:
has-tostringtag "^1.0.0"
is-symbol@^1.0.2, is-symbol@^1.0.3:
version "1.0.4"
resolved "https://registry.yarnpkg.com/is-symbol/-/is-symbol-1.0.4.tgz#a6dac93b635b063ca6872236de88910a57af139c"
integrity sha512-C/CPBqKWnvdcxqIARxyOh4v1UUEOCHpgDa0WYgpKDFMszcrPcffg5uhwSgPCLD2WWxmq6isisz87tzT01tuGhg==
dependencies:
has-symbols "^1.0.2"
is-typed-array@^1.1.13:
version "1.1.13"
resolved "https://registry.yarnpkg.com/is-typed-array/-/is-typed-array-1.1.13.tgz#d6c5ca56df62334959322d7d7dd1cca50debe229"
integrity sha512-uZ25/bUAlUY5fR4OKT4rZQEBrzQWYV9ZJYGGsUmEJ6thodVJ1HX64ePQ6Z0qPWP+m+Uq6e9UugrE38jeYsDSMw==
dependencies:
which-typed-array "^1.1.14"
is-weakref@^1.0.2:
version "1.0.2"
resolved "https://registry.yarnpkg.com/is-weakref/-/is-weakref-1.0.2.tgz#9529f383a9338205e89765e0392efc2f100f06f2"
integrity sha512-qctsuLZmIQ0+vSSMfoVvyFe2+GSEvnmZ2ezTup1SBse9+twCCeial6EEi3Nc2KFcf6+qz2FBPnjXsk8xhKSaPQ==
dependencies:
call-bind "^1.0.2"
isarray@^2.0.5:
version "2.0.5"
resolved "https://registry.yarnpkg.com/isarray/-/isarray-2.0.5.tgz#8af1e4c1221244cc62459faf38940d4e644a5723"
integrity sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw==
isexe@^2.0.0:
version "2.0.0"
resolved "https://registry.yarnpkg.com/isexe/-/isexe-2.0.0.tgz#e8fbf374dc556ff8947a10dcb0572d633f2cfa10"
integrity sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==
istanbul-lib-coverage@^3.0.0, istanbul-lib-coverage@^3.2.0:
version "3.2.2"
resolved "https://registry.yarnpkg.com/istanbul-lib-coverage/-/istanbul-lib-coverage-3.2.2.tgz#2d166c4b0644d43a39f04bf6c2edd1e585f31756"
integrity sha512-O8dpsF+r0WV/8MNRKfnmrtCWhuKjxrq2w+jpzBL5UZKTi2LeVWnWOmWRxFlesJONmc+wLAGvKQZEOanko0LFTg==
istanbul-lib-instrument@^5.0.4:
version "5.2.1"
resolved "https://registry.yarnpkg.com/istanbul-lib-instrument/-/istanbul-lib-instrument-5.2.1.tgz#d10c8885c2125574e1c231cacadf955675e1ce3d"
integrity sha512-pzqtp31nLv/XFOzXGuvhCb8qhjmTVo5vjVk19XE4CRlSWz0KoeJ3bw9XsA7nOp9YBf4qHjwBxkDzKcME/J29Yg==
dependencies:
"@babel/core" "^7.12.3"
"@babel/parser" "^7.14.7"
"@istanbuljs/schema" "^0.1.2"
istanbul-lib-coverage "^3.2.0"
semver "^6.3.0"
istanbul-lib-instrument@^6.0.0:
version "6.0.3"
resolved "https://registry.yarnpkg.com/istanbul-lib-instrument/-/istanbul-lib-instrument-6.0.3.tgz#fa15401df6c15874bcb2105f773325d78c666765"
integrity sha512-Vtgk7L/R2JHyyGW07spoFlB8/lpjiOLTjMdms6AFMraYt3BaJauod/NGrfnVG/y4Ix1JEuMRPDPEj2ua+zz1/Q==
dependencies:
"@babel/core" "^7.23.9"
"@babel/parser" "^7.23.9"
"@istanbuljs/schema" "^0.1.3"
istanbul-lib-coverage "^3.2.0"
semver "^7.5.4"
istanbul-lib-report@^3.0.0:
version "3.0.1"
resolved "https://registry.yarnpkg.com/istanbul-lib-report/-/istanbul-lib-report-3.0.1.tgz#908305bac9a5bd175ac6a74489eafd0fc2445a7d"
integrity sha512-GCfE1mtsHGOELCU8e/Z7YWzpmybrx/+dSTfLrvY8qRmaY6zXTKWn6WQIjaAFw069icm6GVMNkgu0NzI4iPZUNw==
dependencies:
istanbul-lib-coverage "^3.0.0"
make-dir "^4.0.0"
supports-color "^7.1.0"
istanbul-lib-source-maps@^4.0.0:
version "4.0.1"
resolved "https://registry.yarnpkg.com/istanbul-lib-source-maps/-/istanbul-lib-source-maps-4.0.1.tgz#895f3a709fcfba34c6de5a42939022f3e4358551"
integrity sha512-n3s8EwkdFIJCG3BPKBYvskgXGoy88ARzvegkitk60NxRdwltLOTaH7CUiMRXvwYorl0Q712iEjcWB+fK/MrWVw==
dependencies:
debug "^4.1.1"
istanbul-lib-coverage "^3.0.0"
source-map "^0.6.1"
istanbul-reports@^3.1.3:
version "3.1.7"
resolved "https://registry.yarnpkg.com/istanbul-reports/-/istanbul-reports-3.1.7.tgz#daed12b9e1dca518e15c056e1e537e741280fa0b"
integrity sha512-BewmUXImeuRk2YY0PVbxgKAysvhRPUQE0h5QRM++nVWyubKGV0l8qQ5op8+B2DOmwSe63Jivj0BjkPQVf8fP5g==
dependencies:
html-escaper "^2.0.0"
istanbul-lib-report "^3.0.0"
jake@^10.8.5:
version "10.9.2"
resolved "https://registry.yarnpkg.com/jake/-/jake-10.9.2.tgz#6ae487e6a69afec3a5e167628996b59f35ae2b7f"
integrity sha512-2P4SQ0HrLQ+fw6llpLnOaGAvN2Zu6778SJMrCUwns4fOoG9ayrTiZk3VV8sCPkVZF8ab0zksVpS8FDY5pRCNBA==
dependencies:
async "^3.2.3"
chalk "^4.0.2"
filelist "^1.0.4"
minimatch "^3.1.2"
jest-changed-files@^29.7.0:
version "29.7.0"
resolved "https://registry.yarnpkg.com/jest-changed-files/-/jest-changed-files-29.7.0.tgz#1c06d07e77c78e1585d020424dedc10d6e17ac3a"
integrity sha512-fEArFiwf1BpQ+4bXSprcDc3/x4HSzL4al2tozwVpDFpsxALjLYdyiIK4e5Vz66GQJIbXJ82+35PtysofptNX2w==
dependencies:
execa "^5.0.0"
jest-util "^29.7.0"
p-limit "^3.1.0"
jest-circus@^29.7.0:
version "29.7.0"
resolved "https://registry.yarnpkg.com/jest-circus/-/jest-circus-29.7.0.tgz#b6817a45fcc835d8b16d5962d0c026473ee3668a"
integrity sha512-3E1nCMgipcTkCocFwM90XXQab9bS+GMsjdpmPrlelaxwD93Ad8iVEjX/vvHPdLPnFf+L40u+5+iutRdA1N9myw==
dependencies:
"@jest/environment" "^29.7.0"
"@jest/expect" "^29.7.0"
"@jest/test-result" "^29.7.0"
"@jest/types" "^29.6.3"
"@types/node" "*"
chalk "^4.0.0"
co "^4.6.0"
dedent "^1.0.0"
is-generator-fn "^2.0.0"
jest-each "^29.7.0"
jest-matcher-utils "^29.7.0"
jest-message-util "^29.7.0"
jest-runtime "^29.7.0"
jest-snapshot "^29.7.0"
jest-util "^29.7.0"
p-limit "^3.1.0"
pretty-format "^29.7.0"
pure-rand "^6.0.0"
slash "^3.0.0"
stack-utils "^2.0.3"
jest-cli@^29.7.0:
version "29.7.0"
resolved "https://registry.yarnpkg.com/jest-cli/-/jest-cli-29.7.0.tgz#5592c940798e0cae677eec169264f2d839a37995"
integrity sha512-OVVobw2IubN/GSYsxETi+gOe7Ka59EFMR/twOU3Jb2GnKKeMGJB5SGUUrEz3SFVmJASUdZUzy83sLNNQ2gZslg==
dependencies:
"@jest/core" "^29.7.0"
"@jest/test-result" "^29.7.0"
"@jest/types" "^29.6.3"
chalk "^4.0.0"
create-jest "^29.7.0"
exit "^0.1.2"
import-local "^3.0.2"
jest-config "^29.7.0"
jest-util "^29.7.0"
jest-validate "^29.7.0"
yargs "^17.3.1"
jest-config@^29.7.0:
version "29.7.0"
resolved "https://registry.yarnpkg.com/jest-config/-/jest-config-29.7.0.tgz#bcbda8806dbcc01b1e316a46bb74085a84b0245f"
integrity sha512-uXbpfeQ7R6TZBqI3/TxCU4q4ttk3u0PJeC+E0zbfSoSjq6bJ7buBPxzQPL0ifrkY4DNu4JUdk0ImlBUYi840eQ==
dependencies:
"@babel/core" "^7.11.6"
"@jest/test-sequencer" "^29.7.0"
"@jest/types" "^29.6.3"
babel-jest "^29.7.0"
chalk "^4.0.0"
ci-info "^3.2.0"
deepmerge "^4.2.2"
glob "^7.1.3"
graceful-fs "^4.2.9"
jest-circus "^29.7.0"
jest-environment-node "^29.7.0"
jest-get-type "^29.6.3"
jest-regex-util "^29.6.3"
jest-resolve "^29.7.0"
jest-runner "^29.7.0"
jest-util "^29.7.0"
jest-validate "^29.7.0"
micromatch "^4.0.4"
parse-json "^5.2.0"
pretty-format "^29.7.0"
slash "^3.0.0"
strip-json-comments "^3.1.1"
jest-diff@^29.7.0:
version "29.7.0"
resolved "https://registry.yarnpkg.com/jest-diff/-/jest-diff-29.7.0.tgz#017934a66ebb7ecf6f205e84699be10afd70458a"
integrity sha512-LMIgiIrhigmPrs03JHpxUh2yISK3vLFPkAodPeo0+BuF7wA2FoQbkEg1u8gBYBThncu7e1oEDUfIXVuTqLRUjw==
dependencies:
chalk "^4.0.0"
diff-sequences "^29.6.3"
jest-get-type "^29.6.3"
pretty-format "^29.7.0"
jest-docblock@^29.7.0:
version "29.7.0"
resolved "https://registry.yarnpkg.com/jest-docblock/-/jest-docblock-29.7.0.tgz#8fddb6adc3cdc955c93e2a87f61cfd350d5d119a"
integrity sha512-q617Auw3A612guyaFgsbFeYpNP5t2aoUNLwBUbc/0kD1R4t9ixDbyFTHd1nok4epoVFpr7PmeWHrhvuV3XaJ4g==
dependencies:
detect-newline "^3.0.0"
jest-each@^29.7.0:
version "29.7.0"
resolved "https://registry.yarnpkg.com/jest-each/-/jest-each-29.7.0.tgz#162a9b3f2328bdd991beaabffbb74745e56577d1"
integrity sha512-gns+Er14+ZrEoC5fhOfYCY1LOHHr0TI+rQUHZS8Ttw2l7gl+80eHc/gFf2Ktkw0+SIACDTeWvpFcv3B04VembQ==
dependencies:
"@jest/types" "^29.6.3"
chalk "^4.0.0"
jest-get-type "^29.6.3"
jest-util "^29.7.0"
pretty-format "^29.7.0"
jest-environment-node@^29.7.0:
version "29.7.0"
resolved "https://registry.yarnpkg.com/jest-environment-node/-/jest-environment-node-29.7.0.tgz#0b93e111dda8ec120bc8300e6d1fb9576e164376"
integrity sha512-DOSwCRqXirTOyheM+4d5YZOrWcdu0LNZ87ewUoywbcb2XR4wKgqiG8vNeYwhjFMbEkfju7wx2GYH0P2gevGvFw==
dependencies:
"@jest/environment" "^29.7.0"
"@jest/fake-timers" "^29.7.0"
"@jest/types" "^29.6.3"
"@types/node" "*"
jest-mock "^29.7.0"
jest-util "^29.7.0"
jest-get-type@^29.6.3:
version "29.6.3"
resolved "https://registry.yarnpkg.com/jest-get-type/-/jest-get-type-29.6.3.tgz#36f499fdcea197c1045a127319c0481723908fd1"
integrity sha512-zrteXnqYxfQh7l5FHyL38jL39di8H8rHoecLH3JNxH3BwOrBsNeabdap5e0I23lD4HHI8W5VFBZqG4Eaq5LNcw==
jest-haste-map@^29.7.0:
version "29.7.0"
resolved "https://registry.yarnpkg.com/jest-haste-map/-/jest-haste-map-29.7.0.tgz#3c2396524482f5a0506376e6c858c3bbcc17b104"
integrity sha512-fP8u2pyfqx0K1rGn1R9pyE0/KTn+G7PxktWidOBTqFPLYX0b9ksaMFkhK5vrS3DVun09pckLdlx90QthlW7AmA==
dependencies:
"@jest/types" "^29.6.3"
"@types/graceful-fs" "^4.1.3"
"@types/node" "*"
anymatch "^3.0.3"
fb-watchman "^2.0.0"
graceful-fs "^4.2.9"
jest-regex-util "^29.6.3"
jest-util "^29.7.0"
jest-worker "^29.7.0"
micromatch "^4.0.4"
walker "^1.0.8"
optionalDependencies:
fsevents "^2.3.2"
jest-leak-detector@^29.7.0:
version "29.7.0"
resolved "https://registry.yarnpkg.com/jest-leak-detector/-/jest-leak-detector-29.7.0.tgz#5b7ec0dadfdfec0ca383dc9aa016d36b5ea4c728"
integrity sha512-kYA8IJcSYtST2BY9I+SMC32nDpBT3J2NvWJx8+JCuCdl/CR1I4EKUJROiP8XtCcxqgTTBGJNdbB1A8XRKbTetw==
dependencies:
jest-get-type "^29.6.3"
pretty-format "^29.7.0"
jest-matcher-utils@^29.7.0:
version "29.7.0"
resolved "https://registry.yarnpkg.com/jest-matcher-utils/-/jest-matcher-utils-29.7.0.tgz#ae8fec79ff249fd592ce80e3ee474e83a6c44f12"
integrity sha512-sBkD+Xi9DtcChsI3L3u0+N0opgPYnCRPtGcQYrgXmR+hmt/fYfWAL0xRXYU8eWOdfuLgBe0YCW3AFtnRLagq/g==
dependencies:
chalk "^4.0.0"
jest-diff "^29.7.0"
jest-get-type "^29.6.3"
pretty-format "^29.7.0"
jest-message-util@^29.7.0:
version "29.7.0"
resolved "https://registry.yarnpkg.com/jest-message-util/-/jest-message-util-29.7.0.tgz#8bc392e204e95dfe7564abbe72a404e28e51f7f3"
integrity sha512-GBEV4GRADeP+qtB2+6u61stea8mGcOT4mCtrYISZwfu9/ISHFJ/5zOMXYbpBE9RsS5+Gb63DW4FgmnKJ79Kf6w==
dependencies:
"@babel/code-frame" "^7.12.13"
"@jest/types" "^29.6.3"
"@types/stack-utils" "^2.0.0"
chalk "^4.0.0"
graceful-fs "^4.2.9"
micromatch "^4.0.4"
pretty-format "^29.7.0"
slash "^3.0.0"
stack-utils "^2.0.3"
jest-mock@^29.7.0:
version "29.7.0"
resolved "https://registry.yarnpkg.com/jest-mock/-/jest-mock-29.7.0.tgz#4e836cf60e99c6fcfabe9f99d017f3fdd50a6347"
integrity sha512-ITOMZn+UkYS4ZFh83xYAOzWStloNzJFO2s8DWrE4lhtGD+AorgnbkiKERe4wQVBydIGPx059g6riW5Btp6Llnw==
dependencies:
"@jest/types" "^29.6.3"
"@types/node" "*"
jest-util "^29.7.0"
jest-pnp-resolver@^1.2.2:
version "1.2.3"
resolved "https://registry.yarnpkg.com/jest-pnp-resolver/-/jest-pnp-resolver-1.2.3.tgz#930b1546164d4ad5937d5540e711d4d38d4cad2e"
integrity sha512-+3NpwQEnRoIBtx4fyhblQDPgJI0H1IEIkX7ShLUjPGA7TtUTvI1oiKi3SR4oBR0hQhQR80l4WAe5RrXBwWMA8w==
jest-regex-util@^29.6.3:
version "29.6.3"
resolved "https://registry.yarnpkg.com/jest-regex-util/-/jest-regex-util-29.6.3.tgz#4a556d9c776af68e1c5f48194f4d0327d24e8a52"
integrity sha512-KJJBsRCyyLNWCNBOvZyRDnAIfUiRJ8v+hOBQYGn8gDyF3UegwiP4gwRR3/SDa42g1YbVycTidUF3rKjyLFDWbg==
jest-resolve-dependencies@^29.7.0:
version "29.7.0"
resolved "https://registry.yarnpkg.com/jest-resolve-dependencies/-/jest-resolve-dependencies-29.7.0.tgz#1b04f2c095f37fc776ff40803dc92921b1e88428"
integrity sha512-un0zD/6qxJ+S0et7WxeI3H5XSe9lTBBR7bOHCHXkKR6luG5mwDDlIzVQ0V5cZCuoTgEdcdwzTghYkTWfubi+nA==
dependencies:
jest-regex-util "^29.6.3"
jest-snapshot "^29.7.0"
jest-resolve@^29.7.0:
version "29.7.0"
resolved "https://registry.yarnpkg.com/jest-resolve/-/jest-resolve-29.7.0.tgz#64d6a8992dd26f635ab0c01e5eef4399c6bcbc30"
integrity sha512-IOVhZSrg+UvVAshDSDtHyFCCBUl/Q3AAJv8iZ6ZjnZ74xzvwuzLXid9IIIPgTnY62SJjfuupMKZsZQRsCvxEgA==
dependencies:
chalk "^4.0.0"
graceful-fs "^4.2.9"
jest-haste-map "^29.7.0"
jest-pnp-resolver "^1.2.2"
jest-util "^29.7.0"
jest-validate "^29.7.0"
resolve "^1.20.0"
resolve.exports "^2.0.0"
slash "^3.0.0"
jest-runner@^29.7.0:
version "29.7.0"
resolved "https://registry.yarnpkg.com/jest-runner/-/jest-runner-29.7.0.tgz#809af072d408a53dcfd2e849a4c976d3132f718e"
integrity sha512-fsc4N6cPCAahybGBfTRcq5wFR6fpLznMg47sY5aDpsoejOcVYFb07AHuSnR0liMcPTgBsA3ZJL6kFOjPdoNipQ==
dependencies:
"@jest/console" "^29.7.0"
"@jest/environment" "^29.7.0"
"@jest/test-result" "^29.7.0"
"@jest/transform" "^29.7.0"
"@jest/types" "^29.6.3"
"@types/node" "*"
chalk "^4.0.0"
emittery "^0.13.1"
graceful-fs "^4.2.9"
jest-docblock "^29.7.0"
jest-environment-node "^29.7.0"
jest-haste-map "^29.7.0"
jest-leak-detector "^29.7.0"
jest-message-util "^29.7.0"
jest-resolve "^29.7.0"
jest-runtime "^29.7.0"
jest-util "^29.7.0"
jest-watcher "^29.7.0"
jest-worker "^29.7.0"
p-limit "^3.1.0"
source-map-support "0.5.13"
jest-runtime@^29.7.0:
version "29.7.0"
resolved "https://registry.yarnpkg.com/jest-runtime/-/jest-runtime-29.7.0.tgz#efecb3141cf7d3767a3a0cc8f7c9990587d3d817"
integrity sha512-gUnLjgwdGqW7B4LvOIkbKs9WGbn+QLqRQQ9juC6HndeDiezIwhDP+mhMwHWCEcfQ5RUXa6OPnFF8BJh5xegwwQ==
dependencies:
"@jest/environment" "^29.7.0"
"@jest/fake-timers" "^29.7.0"
"@jest/globals" "^29.7.0"
"@jest/source-map" "^29.6.3"
"@jest/test-result" "^29.7.0"
"@jest/transform" "^29.7.0"
"@jest/types" "^29.6.3"
"@types/node" "*"
chalk "^4.0.0"
cjs-module-lexer "^1.0.0"
collect-v8-coverage "^1.0.0"
glob "^7.1.3"
graceful-fs "^4.2.9"
jest-haste-map "^29.7.0"
jest-message-util "^29.7.0"
jest-mock "^29.7.0"
jest-regex-util "^29.6.3"
jest-resolve "^29.7.0"
jest-snapshot "^29.7.0"
jest-util "^29.7.0"
slash "^3.0.0"
strip-bom "^4.0.0"
jest-snapshot@^29.7.0:
version "29.7.0"
resolved "https://registry.yarnpkg.com/jest-snapshot/-/jest-snapshot-29.7.0.tgz#c2c574c3f51865da1bb329036778a69bf88a6be5"
integrity sha512-Rm0BMWtxBcioHr1/OX5YCP8Uov4riHvKPknOGs804Zg9JGZgmIBkbtlxJC/7Z4msKYVbIJtfU+tKb8xlYNfdkw==
dependencies:
"@babel/core" "^7.11.6"
"@babel/generator" "^7.7.2"
"@babel/plugin-syntax-jsx" "^7.7.2"
"@babel/plugin-syntax-typescript" "^7.7.2"
"@babel/types" "^7.3.3"
"@jest/expect-utils" "^29.7.0"
"@jest/transform" "^29.7.0"
"@jest/types" "^29.6.3"
babel-preset-current-node-syntax "^1.0.0"
chalk "^4.0.0"
expect "^29.7.0"
graceful-fs "^4.2.9"
jest-diff "^29.7.0"
jest-get-type "^29.6.3"
jest-matcher-utils "^29.7.0"
jest-message-util "^29.7.0"
jest-util "^29.7.0"
natural-compare "^1.4.0"
pretty-format "^29.7.0"
semver "^7.5.3"
jest-util@^29.0.0, jest-util@^29.7.0:
version "29.7.0"
resolved "https://registry.yarnpkg.com/jest-util/-/jest-util-29.7.0.tgz#23c2b62bfb22be82b44de98055802ff3710fc0bc"
integrity sha512-z6EbKajIpqGKU56y5KBUgy1dt1ihhQJgWzUlZHArA/+X2ad7Cb5iF+AK1EWVL/Bo7Rz9uurpqw6SiBCefUbCGA==
dependencies:
"@jest/types" "^29.6.3"
"@types/node" "*"
chalk "^4.0.0"
ci-info "^3.2.0"
graceful-fs "^4.2.9"
picomatch "^2.2.3"
jest-validate@^29.7.0:
version "29.7.0"
resolved "https://registry.yarnpkg.com/jest-validate/-/jest-validate-29.7.0.tgz#7bf705511c64da591d46b15fce41400d52147d9c"
integrity sha512-ZB7wHqaRGVw/9hST/OuFUReG7M8vKeq0/J2egIGLdvjHCmYqGARhzXmtgi+gVeZ5uXFF219aOc3Ls2yLg27tkw==
dependencies:
"@jest/types" "^29.6.3"
camelcase "^6.2.0"
chalk "^4.0.0"
jest-get-type "^29.6.3"
leven "^3.1.0"
pretty-format "^29.7.0"
jest-watcher@^29.7.0:
version "29.7.0"
resolved "https://registry.yarnpkg.com/jest-watcher/-/jest-watcher-29.7.0.tgz#7810d30d619c3a62093223ce6bb359ca1b28a2f2"
integrity sha512-49Fg7WXkU3Vl2h6LbLtMQ/HyB6rXSIX7SqvBLQmssRBGN9I0PNvPmAmCWSOY6SOvrjhI/F7/bGAv9RtnsPA03g==
dependencies:
"@jest/test-result" "^29.7.0"
"@jest/types" "^29.6.3"
"@types/node" "*"
ansi-escapes "^4.2.1"
chalk "^4.0.0"
emittery "^0.13.1"
jest-util "^29.7.0"
string-length "^4.0.1"
jest-worker@^29.7.0:
version "29.7.0"
resolved "https://registry.yarnpkg.com/jest-worker/-/jest-worker-29.7.0.tgz#acad073acbbaeb7262bd5389e1bcf43e10058d4a"
integrity sha512-eIz2msL/EzL9UFTFFx7jBTkeZfku0yUAyZZZmJ93H2TYEiroIx2PQjEXcwYtYl8zXCxb+PAmA2hLIt/6ZEkPHw==
dependencies:
"@types/node" "*"
jest-util "^29.7.0"
merge-stream "^2.0.0"
supports-color "^8.0.0"
jest@^29.7.0:
version "29.7.0"
resolved "https://registry.yarnpkg.com/jest/-/jest-29.7.0.tgz#994676fc24177f088f1c5e3737f5697204ff2613"
integrity sha512-NIy3oAFp9shda19hy4HK0HRTWKtPJmGdnvywu01nOqNC2vZg+Z+fvJDxpMQA88eb2I9EcafcdjYgsDthnYTvGw==
dependencies:
"@jest/core" "^29.7.0"
"@jest/types" "^29.6.3"
import-local "^3.0.2"
jest-cli "^29.7.0"
js-tiktoken@^1.0.12:
version "1.0.14"
resolved "https://registry.yarnpkg.com/js-tiktoken/-/js-tiktoken-1.0.14.tgz#756f353262d559da16b58b5bcecfd93330076da2"
integrity sha512-Pk3l3WOgM9joguZY2k52+jH82RtABRgB5RdGFZNUGbOKGMVlNmafcPA3b0ITcCZPu1L9UclP1tne6aw7ZI4Myg==
dependencies:
base64-js "^1.5.1"
js-tokens@^4.0.0:
version "4.0.0"
resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-4.0.0.tgz#19203fb59991df98e3a287050d4647cdeaf32499"
integrity sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==
js-yaml@^3.13.1:
version "3.14.1"
resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-3.14.1.tgz#dae812fdb3825fa306609a8717383c50c36a0537"
integrity sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==
dependencies:
argparse "^1.0.7"
esprima "^4.0.0"
js-yaml@^4.1.0:
version "4.1.0"
resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-4.1.0.tgz#c1fb65f8f5017901cdd2c951864ba18458a10602"
integrity sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==
dependencies:
argparse "^2.0.1"
jsesc@^2.5.1:
version "2.5.2"
resolved "https://registry.yarnpkg.com/jsesc/-/jsesc-2.5.2.tgz#80564d2e483dacf6e8ef209650a67df3f0c283a4"
integrity sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==
json-buffer@3.0.1:
version "3.0.1"
resolved "https://registry.yarnpkg.com/json-buffer/-/json-buffer-3.0.1.tgz#9338802a30d3b6605fbe0613e094008ca8c05a13"
integrity sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==
json-parse-even-better-errors@^2.3.0:
version "2.3.1"
resolved "https://registry.yarnpkg.com/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz#7c47805a94319928e05777405dc12e1f7a4ee02d"
integrity sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==
json-schema-traverse@^0.4.1:
version "0.4.1"
resolved "https://registry.yarnpkg.com/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz#69f6a87d9513ab8bb8fe63bdb0979c448e684660"
integrity sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==
json-stable-stringify-without-jsonify@^1.0.1:
version "1.0.1"
resolved "https://registry.yarnpkg.com/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz#9db7b59496ad3f3cfef30a75142d2d930ad72651"
integrity sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==
json5@^1.0.2:
version "1.0.2"
resolved "https://registry.yarnpkg.com/json5/-/json5-1.0.2.tgz#63d98d60f21b313b77c4d6da18bfa69d80e1d593"
integrity sha512-g1MWMLBiz8FKi1e4w0UyVL3w+iJceWAFBAaBnnGKOpNa5f8TLktkbre1+s6oICydWAm+HRUGTmI+//xv2hvXYA==
dependencies:
minimist "^1.2.0"
json5@^2.2.3:
version "2.2.3"
resolved "https://registry.yarnpkg.com/json5/-/json5-2.2.3.tgz#78cd6f1a19bdc12b73db5ad0c61efd66c1e29283"
integrity sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==
keyv@^4.5.3:
version "4.5.4"
resolved "https://registry.yarnpkg.com/keyv/-/keyv-4.5.4.tgz#a879a99e29452f942439f2a405e3af8b31d4de93"
integrity sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==
dependencies:
json-buffer "3.0.1"
kleur@^3.0.3:
version "3.0.3"
resolved "https://registry.yarnpkg.com/kleur/-/kleur-3.0.3.tgz#a79c9ecc86ee1ce3fa6206d1216c501f147fc07e"
integrity sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w==
langsmith@^0.1.56:
version "0.1.58"
resolved "https://registry.yarnpkg.com/langsmith/-/langsmith-0.1.58.tgz#502aa6c22fecd15fa65c14ffbe7fcc4643201f47"
integrity sha512-crbJbfw6hLBbVDQlMRWRVYwppApiDMncsqqBtTP1udUvilAsw4btIpBq0Tf+Jr8iQs6cEpZr/h7lGr5DdzAGew==
dependencies:
"@types/uuid" "^10.0.0"
commander "^10.0.1"
p-queue "^6.6.2"
p-retry "4"
semver "^7.6.3"
uuid "^10.0.0"
leven@^3.1.0:
version "3.1.0"
resolved "https://registry.yarnpkg.com/leven/-/leven-3.1.0.tgz#77891de834064cccba82ae7842bb6b14a13ed7f2"
integrity sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==
levn@^0.4.1:
version "0.4.1"
resolved "https://registry.yarnpkg.com/levn/-/levn-0.4.1.tgz#ae4562c007473b932a6200d403268dd2fffc6ade"
integrity sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==
dependencies:
prelude-ls "^1.2.1"
type-check "~0.4.0"
lines-and-columns@^1.1.6:
version "1.2.4"
resolved "https://registry.yarnpkg.com/lines-and-columns/-/lines-and-columns-1.2.4.tgz#eca284f75d2965079309dc0ad9255abb2ebc1632"
integrity sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==
locate-path@^5.0.0:
version "5.0.0"
resolved "https://registry.yarnpkg.com/locate-path/-/locate-path-5.0.0.tgz#1afba396afd676a6d42504d0a67a3a7eb9f62aa0"
integrity sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==
dependencies:
p-locate "^4.1.0"
locate-path@^6.0.0:
version "6.0.0"
resolved "https://registry.yarnpkg.com/locate-path/-/locate-path-6.0.0.tgz#55321eb309febbc59c4801d931a72452a681d286"
integrity sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==
dependencies:
p-locate "^5.0.0"
lodash.memoize@4.x:
version "4.1.2"
resolved "https://registry.yarnpkg.com/lodash.memoize/-/lodash.memoize-4.1.2.tgz#bcc6c49a42a2840ed997f323eada5ecd182e0bfe"
integrity sha512-t7j+NzmgnQzTAYXcsHYLgimltOV1MXHtlOWf6GjL9Kj8GK5FInw5JotxvbOs+IvV1/Dzo04/fCGfLVs7aXb4Ag==
lodash.merge@^4.6.2:
version "4.6.2"
resolved "https://registry.yarnpkg.com/lodash.merge/-/lodash.merge-4.6.2.tgz#558aa53b43b661e1925a0afdfa36a9a1085fe57a"
integrity sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==
lru-cache@^5.1.1:
version "5.1.1"
resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-5.1.1.tgz#1da27e6710271947695daf6848e847f01d84b920"
integrity sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==
dependencies:
yallist "^3.0.2"
make-dir@^4.0.0:
version "4.0.0"
resolved "https://registry.yarnpkg.com/make-dir/-/make-dir-4.0.0.tgz#c3c2307a771277cd9638305f915c29ae741b614e"
integrity sha512-hXdUTZYIVOt1Ex//jAQi+wTZZpUpwBj/0QsOzqegb3rGMMeJiSEu5xLHnYfBrRV4RH2+OCSOO95Is/7x1WJ4bw==
dependencies:
semver "^7.5.3"
make-error@1.x:
version "1.3.6"
resolved "https://registry.yarnpkg.com/make-error/-/make-error-1.3.6.tgz#2eb2e37ea9b67c4891f684a1394799af484cf7a2"
integrity sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw==
makeerror@1.0.12:
version "1.0.12"
resolved "https://registry.yarnpkg.com/makeerror/-/makeerror-1.0.12.tgz#3e5dd2079a82e812e983cc6610c4a2cb0eaa801a"
integrity sha512-JmqCvUhmt43madlpFzG4BQzG2Z3m6tvQDNKdClZnO3VbIudJYmxsT0FNJMeiB2+JTSlTQTSbU8QdesVmwJcmLg==
dependencies:
tmpl "1.0.5"
merge-stream@^2.0.0:
version "2.0.0"
resolved "https://registry.yarnpkg.com/merge-stream/-/merge-stream-2.0.0.tgz#52823629a14dd00c9770fb6ad47dc6310f2c1f60"
integrity sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==
merge2@^1.3.0, merge2@^1.4.1:
version "1.4.1"
resolved "https://registry.yarnpkg.com/merge2/-/merge2-1.4.1.tgz#4368892f885e907455a6fd7dc55c0c9d404990ae"
integrity sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==
micromatch@^4.0.4:
version "4.0.8"
resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-4.0.8.tgz#d66fa18f3a47076789320b9b1af32bd86d9fa202"
integrity sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==
dependencies:
braces "^3.0.3"
picomatch "^2.3.1"
mimic-fn@^2.1.0:
version "2.1.0"
resolved "https://registry.yarnpkg.com/mimic-fn/-/mimic-fn-2.1.0.tgz#7ed2c2ccccaf84d3ffcb7a69b57711fc2083401b"
integrity sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==
minimatch@^3.0.4, minimatch@^3.0.5, minimatch@^3.1.1, minimatch@^3.1.2:
version "3.1.2"
resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.1.2.tgz#19cd194bfd3e428f049a70817c038d89ab4be35b"
integrity sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==
dependencies:
brace-expansion "^1.1.7"
minimatch@^5.0.1:
version "5.1.6"
resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-5.1.6.tgz#1cfcb8cf5522ea69952cd2af95ae09477f122a96"
integrity sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g==
dependencies:
brace-expansion "^2.0.1"
minimist@^1.2.0, minimist@^1.2.6:
version "1.2.8"
resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.8.tgz#c1a464e7693302e082a075cee0c057741ac4772c"
integrity sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==
ms@2.1.2:
version "2.1.2"
resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.2.tgz#d09d1f357b443f493382a8eb3ccd183872ae6009"
integrity sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==
ms@^2.1.1:
version "2.1.3"
resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.3.tgz#574c8138ce1d2b5861f0b44579dbadd60c6615b2"
integrity sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==
mustache@^4.2.0:
version "4.2.0"
resolved "https://registry.yarnpkg.com/mustache/-/mustache-4.2.0.tgz#e5892324d60a12ec9c2a73359edca52972bf6f64"
integrity sha512-71ippSywq5Yb7/tVYyGbkBggbU8H3u5Rz56fH60jGFgr8uHwxs+aSKeqmluIVzM0m0kB7xQjKS6qPfd0b2ZoqQ==
natural-compare-lite@^1.4.0:
version "1.4.0"
resolved "https://registry.yarnpkg.com/natural-compare-lite/-/natural-compare-lite-1.4.0.tgz#17b09581988979fddafe0201e931ba933c96cbb4"
integrity sha512-Tj+HTDSJJKaZnfiuw+iaF9skdPpTo2GtEly5JHnWV/hfv2Qj/9RKsGISQtLh2ox3l5EAGw487hnBee0sIJ6v2g==
natural-compare@^1.4.0:
version "1.4.0"
resolved "https://registry.yarnpkg.com/natural-compare/-/natural-compare-1.4.0.tgz#4abebfeed7541f2c27acfb29bdbbd15c8d5ba4f7"
integrity sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==
node-int64@^0.4.0:
version "0.4.0"
resolved "https://registry.yarnpkg.com/node-int64/-/node-int64-0.4.0.tgz#87a9065cdb355d3182d8f94ce11188b825c68a3b"
integrity sha512-O5lz91xSOeoXP6DulyHfllpq+Eg00MWitZIbtPfoSEvqIHdl5gfcY6hYzDWnj0qD5tz52PI08u9qUvSVeUBeHw==
node-releases@^2.0.18:
version "2.0.18"
resolved "https://registry.yarnpkg.com/node-releases/-/node-releases-2.0.18.tgz#f010e8d35e2fe8d6b2944f03f70213ecedc4ca3f"
integrity sha512-d9VeXT4SJ7ZeOqGX6R5EM022wpL+eWPooLI+5UpWn2jCT1aosUQEhQP214x33Wkwx3JQMvIm+tIoVOdodFS40g==
normalize-path@^3.0.0:
version "3.0.0"
resolved "https://registry.yarnpkg.com/normalize-path/-/normalize-path-3.0.0.tgz#0dcd69ff23a1c9b11fd0978316644a0388216a65"
integrity sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==
npm-run-path@^4.0.1:
version "4.0.1"
resolved "https://registry.yarnpkg.com/npm-run-path/-/npm-run-path-4.0.1.tgz#b7ecd1e5ed53da8e37a55e1c2269e0b97ed748ea"
integrity sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==
dependencies:
path-key "^3.0.0"
object-inspect@^1.13.1:
version "1.13.2"
resolved "https://registry.yarnpkg.com/object-inspect/-/object-inspect-1.13.2.tgz#dea0088467fb991e67af4058147a24824a3043ff"
integrity sha512-IRZSRuzJiynemAXPYtPe5BoI/RESNYR7TYm50MC5Mqbd3Jmw5y790sErYw3V6SryFJD64b74qQQs9wn5Bg/k3g==
object-keys@^1.1.1:
version "1.1.1"
resolved "https://registry.yarnpkg.com/object-keys/-/object-keys-1.1.1.tgz#1c47f272df277f3b1daf061677d9c82e2322c60e"
integrity sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==
object.assign@^4.1.5:
version "4.1.5"
resolved "https://registry.yarnpkg.com/object.assign/-/object.assign-4.1.5.tgz#3a833f9ab7fdb80fc9e8d2300c803d216d8fdbb0"
integrity sha512-byy+U7gp+FVwmyzKPYhW2h5l3crpmGsxl7X2s8y43IgxvG4g3QZ6CffDtsNQy1WsmZpQbO+ybo0AlW7TY6DcBQ==
dependencies:
call-bind "^1.0.5"
define-properties "^1.2.1"
has-symbols "^1.0.3"
object-keys "^1.1.1"
object.fromentries@^2.0.7:
version "2.0.8"
resolved "https://registry.yarnpkg.com/object.fromentries/-/object.fromentries-2.0.8.tgz#f7195d8a9b97bd95cbc1999ea939ecd1a2b00c65"
integrity sha512-k6E21FzySsSK5a21KRADBd/NGneRegFO5pLHfdQLpRDETUNJueLXs3WCzyQ3tFRDYgbq3KHGXfTbi2bs8WQ6rQ==
dependencies:
call-bind "^1.0.7"
define-properties "^1.2.1"
es-abstract "^1.23.2"
es-object-atoms "^1.0.0"
object.groupby@^1.0.1:
version "1.0.3"
resolved "https://registry.yarnpkg.com/object.groupby/-/object.groupby-1.0.3.tgz#9b125c36238129f6f7b61954a1e7176148d5002e"
integrity sha512-+Lhy3TQTuzXI5hevh8sBGqbmurHbbIjAi0Z4S63nthVLmLxfbj4T54a4CfZrXIrt9iP4mVAPYMo/v99taj3wjQ==
dependencies:
call-bind "^1.0.7"
define-properties "^1.2.1"
es-abstract "^1.23.2"
object.values@^1.1.7:
version "1.2.0"
resolved "https://registry.yarnpkg.com/object.values/-/object.values-1.2.0.tgz#65405a9d92cee68ac2d303002e0b8470a4d9ab1b"
integrity sha512-yBYjY9QX2hnRmZHAjG/f13MzmBzxzYgQhFrke06TTyKY5zSTEqkOeukBzIdVA3j3ulu8Qa3MbVFShV7T2RmGtQ==
dependencies:
call-bind "^1.0.7"
define-properties "^1.2.1"
es-object-atoms "^1.0.0"
once@^1.3.0:
version "1.4.0"
resolved "https://registry.yarnpkg.com/once/-/once-1.4.0.tgz#583b1aa775961d4b113ac17d9c50baef9dd76bd1"
integrity sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==
dependencies:
wrappy "1"
onetime@^5.1.2:
version "5.1.2"
resolved "https://registry.yarnpkg.com/onetime/-/onetime-5.1.2.tgz#d0e96ebb56b07476df1dd9c4806e5237985ca45e"
integrity sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==
dependencies:
mimic-fn "^2.1.0"
optionator@^0.9.3:
version "0.9.4"
resolved "https://registry.yarnpkg.com/optionator/-/optionator-0.9.4.tgz#7ea1c1a5d91d764fb282139c88fe11e182a3a734"
integrity sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==
dependencies:
deep-is "^0.1.3"
fast-levenshtein "^2.0.6"
levn "^0.4.1"
prelude-ls "^1.2.1"
type-check "^0.4.0"
word-wrap "^1.2.5"
p-finally@^1.0.0:
version "1.0.0"
resolved "https://registry.yarnpkg.com/p-finally/-/p-finally-1.0.0.tgz#3fbcfb15b899a44123b34b6dcc18b724336a2cae"
integrity sha512-LICb2p9CB7FS+0eR1oqWnHhp0FljGLZCWBE9aix0Uye9W8LTQPwMTYVGWQWIw9RdQiDg4+epXQODwIYJtSJaow==
p-limit@^2.2.0:
version "2.3.0"
resolved "https://registry.yarnpkg.com/p-limit/-/p-limit-2.3.0.tgz#3dd33c647a214fdfffd835933eb086da0dc21db1"
integrity sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==
dependencies:
p-try "^2.0.0"
p-limit@^3.0.2, p-limit@^3.1.0:
version "3.1.0"
resolved "https://registry.yarnpkg.com/p-limit/-/p-limit-3.1.0.tgz#e1daccbe78d0d1388ca18c64fea38e3e57e3706b"
integrity sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==
dependencies:
yocto-queue "^0.1.0"
p-locate@^4.1.0:
version "4.1.0"
resolved "https://registry.yarnpkg.com/p-locate/-/p-locate-4.1.0.tgz#a3428bb7088b3a60292f66919278b7c297ad4f07"
integrity sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==
dependencies:
p-limit "^2.2.0"
p-locate@^5.0.0:
version "5.0.0"
resolved "https://registry.yarnpkg.com/p-locate/-/p-locate-5.0.0.tgz#83c8315c6785005e3bd021839411c9e110e6d834"
integrity sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==
dependencies:
p-limit "^3.0.2"
p-queue@^6.6.2:
version "6.6.2"
resolved "https://registry.yarnpkg.com/p-queue/-/p-queue-6.6.2.tgz#2068a9dcf8e67dd0ec3e7a2bcb76810faa85e426"
integrity sha512-RwFpb72c/BhQLEXIZ5K2e+AhgNVmIejGlTgiB9MzZ0e93GRvqZ7uSi0dvRF7/XIXDeNkra2fNHBxTyPDGySpjQ==
dependencies:
eventemitter3 "^4.0.4"
p-timeout "^3.2.0"
p-retry@4:
version "4.6.2"
resolved "https://registry.yarnpkg.com/p-retry/-/p-retry-4.6.2.tgz#9baae7184057edd4e17231cee04264106e092a16"
integrity sha512-312Id396EbJdvRONlngUx0NydfrIQ5lsYu0znKVUzVvArzEIt08V1qhtyESbGVd1FGX7UKtiFp5uwKZdM8wIuQ==
dependencies:
"@types/retry" "0.12.0"
retry "^0.13.1"
p-timeout@^3.2.0:
version "3.2.0"
resolved "https://registry.yarnpkg.com/p-timeout/-/p-timeout-3.2.0.tgz#c7e17abc971d2a7962ef83626b35d635acf23dfe"
integrity sha512-rhIwUycgwwKcP9yTOOFK/AKsAopjjCakVqLHePO3CC6Mir1Z99xT+R63jZxAT5lFZLa2inS5h+ZS2GvR99/FBg==
dependencies:
p-finally "^1.0.0"
p-try@^2.0.0:
version "2.2.0"
resolved "https://registry.yarnpkg.com/p-try/-/p-try-2.2.0.tgz#cb2868540e313d61de58fafbe35ce9004d5540e6"
integrity sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==
parent-module@^1.0.0:
version "1.0.1"
resolved "https://registry.yarnpkg.com/parent-module/-/parent-module-1.0.1.tgz#691d2709e78c79fae3a156622452d00762caaaa2"
integrity sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==
dependencies:
callsites "^3.0.0"
parse-json@^5.2.0:
version "5.2.0"
resolved "https://registry.yarnpkg.com/parse-json/-/parse-json-5.2.0.tgz#c76fc66dee54231c962b22bcc8a72cf2f99753cd"
integrity sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==
dependencies:
"@babel/code-frame" "^7.0.0"
error-ex "^1.3.1"
json-parse-even-better-errors "^2.3.0"
lines-and-columns "^1.1.6"
path-exists@^4.0.0:
version "4.0.0"
resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-4.0.0.tgz#513bdbe2d3b95d7762e8c1137efa195c6c61b5b3"
integrity sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==
path-is-absolute@^1.0.0:
version "1.0.1"
resolved "https://registry.yarnpkg.com/path-is-absolute/-/path-is-absolute-1.0.1.tgz#174b9268735534ffbc7ace6bf53a5a9e1b5c5f5f"
integrity sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==
path-key@^3.0.0, path-key@^3.1.0:
version "3.1.1"
resolved "https://registry.yarnpkg.com/path-key/-/path-key-3.1.1.tgz#581f6ade658cbba65a0d3380de7753295054f375"
integrity sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==
path-parse@^1.0.7:
version "1.0.7"
resolved "https://registry.yarnpkg.com/path-parse/-/path-parse-1.0.7.tgz#fbc114b60ca42b30d9daf5858e4bd68bbedb6735"
integrity sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==
path-type@^4.0.0:
version "4.0.0"
resolved "https://registry.yarnpkg.com/path-type/-/path-type-4.0.0.tgz#84ed01c0a7ba380afe09d90a8c180dcd9d03043b"
integrity sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==
picocolors@^1.0.0, picocolors@^1.0.1:
version "1.0.1"
resolved "https://registry.yarnpkg.com/picocolors/-/picocolors-1.0.1.tgz#a8ad579b571952f0e5d25892de5445bcfe25aaa1"
integrity sha512-anP1Z8qwhkbmu7MFP5iTt+wQKXgwzf7zTyGlcdzabySa9vd0Xt392U0rVmz9poOaBj0uHJKyyo9/upk0HrEQew==
picomatch@^2.0.4, picomatch@^2.2.3, picomatch@^2.3.1:
version "2.3.1"
resolved "https://registry.yarnpkg.com/picomatch/-/picomatch-2.3.1.tgz#3ba3833733646d9d3e4995946c1365a67fb07a42"
integrity sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==
pirates@^4.0.4:
version "4.0.6"
resolved "https://registry.yarnpkg.com/pirates/-/pirates-4.0.6.tgz#3018ae32ecfcff6c29ba2267cbf21166ac1f36b9"
integrity sha512-saLsH7WeYYPiD25LDuLRRY/i+6HaPYr6G1OUlN39otzkSTxKnubR9RTxS3/Kk50s1g2JTgFwWQDQyplC5/SHZg==
pkg-dir@^4.2.0:
version "4.2.0"
resolved "https://registry.yarnpkg.com/pkg-dir/-/pkg-dir-4.2.0.tgz#f099133df7ede422e81d1d8448270eeb3e4261f3"
integrity sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==
dependencies:
find-up "^4.0.0"
possible-typed-array-names@^1.0.0:
version "1.0.0"
resolved "https://registry.yarnpkg.com/possible-typed-array-names/-/possible-typed-array-names-1.0.0.tgz#89bb63c6fada2c3e90adc4a647beeeb39cc7bf8f"
integrity sha512-d7Uw+eZoloe0EHDIYoe+bQ5WXnGMOpmiZFTuMWCwpjzzkL2nTjcKiAk4hh8TjnGye2TwWOk3UXucZ+3rbmBa8Q==
prelude-ls@^1.2.1:
version "1.2.1"
resolved "https://registry.yarnpkg.com/prelude-ls/-/prelude-ls-1.2.1.tgz#debc6489d7a6e6b0e7611888cec880337d316396"
integrity sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==
prettier-linter-helpers@^1.0.0:
version "1.0.0"
resolved "https://registry.yarnpkg.com/prettier-linter-helpers/-/prettier-linter-helpers-1.0.0.tgz#d23d41fe1375646de2d0104d3454a3008802cf7b"
integrity sha512-GbK2cP9nraSSUF9N2XwUwqfzlAFlMNYYl+ShE/V+H8a9uNl/oUqB1w2EL54Jh0OlyRSd8RfWYJ3coVS4TROP2w==
dependencies:
fast-diff "^1.1.2"
prettier@^3.3.3:
version "3.3.3"
resolved "https://registry.yarnpkg.com/prettier/-/prettier-3.3.3.tgz#30c54fe0be0d8d12e6ae61dbb10109ea00d53105"
integrity sha512-i2tDNA0O5IrMO757lfrdQZCc2jPNDVntV0m/+4whiDfWaTKfMNgR7Qz0NAeGz/nRqF4m5/6CLzbP4/liHt12Ew==
pretty-format@^29.0.0, pretty-format@^29.7.0:
version "29.7.0"
resolved "https://registry.yarnpkg.com/pretty-format/-/pretty-format-29.7.0.tgz#ca42c758310f365bfa71a0bda0a807160b776812"
integrity sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ==
dependencies:
"@jest/schemas" "^29.6.3"
ansi-styles "^5.0.0"
react-is "^18.0.0"
prompts@^2.0.1:
version "2.4.2"
resolved "https://registry.yarnpkg.com/prompts/-/prompts-2.4.2.tgz#7b57e73b3a48029ad10ebd44f74b01722a4cb069"
integrity sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q==
dependencies:
kleur "^3.0.3"
sisteransi "^1.0.5"
punycode@^2.1.0:
version "2.3.1"
resolved "https://registry.yarnpkg.com/punycode/-/punycode-2.3.1.tgz#027422e2faec0b25e1549c3e1bd8309b9133b6e5"
integrity sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==
pure-rand@^6.0.0:
version "6.1.0"
resolved "https://registry.yarnpkg.com/pure-rand/-/pure-rand-6.1.0.tgz#d173cf23258231976ccbdb05247c9787957604f2"
integrity sha512-bVWawvoZoBYpp6yIoQtQXHZjmz35RSVHnUOTefl8Vcjr8snTPY1wnpSPMWekcFwbxI6gtmT7rSYPFvz71ldiOA==
queue-microtask@^1.2.2:
version "1.2.3"
resolved "https://registry.yarnpkg.com/queue-microtask/-/queue-microtask-1.2.3.tgz#4929228bbc724dfac43e0efb058caf7b6cfb6243"
integrity sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==
react-is@^18.0.0:
version "18.3.1"
resolved "https://registry.yarnpkg.com/react-is/-/react-is-18.3.1.tgz#e83557dc12eae63a99e003a46388b1dcbb44db7e"
integrity sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==
regexp.prototype.flags@^1.5.2:
version "1.5.2"
resolved "https://registry.yarnpkg.com/regexp.prototype.flags/-/regexp.prototype.flags-1.5.2.tgz#138f644a3350f981a858c44f6bb1a61ff59be334"
integrity sha512-NcDiDkTLuPR+++OCKB0nWafEmhg/Da8aUPLPMQbK+bxKKCm1/S5he+AqYa4PlMCVBalb4/yxIRub6qkEx5yJbw==
dependencies:
call-bind "^1.0.6"
define-properties "^1.2.1"
es-errors "^1.3.0"
set-function-name "^2.0.1"
require-directory@^2.1.1:
version "2.1.1"
resolved "https://registry.yarnpkg.com/require-directory/-/require-directory-2.1.1.tgz#8c64ad5fd30dab1c976e2344ffe7f792a6a6df42"
integrity sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==
resolve-cwd@^3.0.0:
version "3.0.0"
resolved "https://registry.yarnpkg.com/resolve-cwd/-/resolve-cwd-3.0.0.tgz#0f0075f1bb2544766cf73ba6a6e2adfebcb13f2d"
integrity sha512-OrZaX2Mb+rJCpH/6CpSqt9xFVpN++x01XnN2ie9g6P5/3xelLAkXWVADpdz1IHD/KFfEXyE6V0U01OQ3UO2rEg==
dependencies:
resolve-from "^5.0.0"
resolve-from@^4.0.0:
version "4.0.0"
resolved "https://registry.yarnpkg.com/resolve-from/-/resolve-from-4.0.0.tgz#4abcd852ad32dd7baabfe9b40e00a36db5f392e6"
integrity sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==
resolve-from@^5.0.0:
version "5.0.0"
resolved "https://registry.yarnpkg.com/resolve-from/-/resolve-from-5.0.0.tgz#c35225843df8f776df21c57557bc087e9dfdfc69"
integrity sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==
resolve.exports@^2.0.0:
version "2.0.2"
resolved "https://registry.yarnpkg.com/resolve.exports/-/resolve.exports-2.0.2.tgz#f8c934b8e6a13f539e38b7098e2e36134f01e800"
integrity sha512-X2UW6Nw3n/aMgDVy+0rSqgHlv39WZAlZrXCdnbyEiKm17DSqHX4MmQMaST3FbeWR5FTuRcUwYAziZajji0Y7mg==
resolve@^1.20.0, resolve@^1.22.4:
version "1.22.8"
resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.22.8.tgz#b6c87a9f2aa06dfab52e3d70ac8cde321fa5a48d"
integrity sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw==
dependencies:
is-core-module "^2.13.0"
path-parse "^1.0.7"
supports-preserve-symlinks-flag "^1.0.0"
retry@^0.13.1:
version "0.13.1"
resolved "https://registry.yarnpkg.com/retry/-/retry-0.13.1.tgz#185b1587acf67919d63b357349e03537b2484658"
integrity sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg==
reusify@^1.0.4:
version "1.0.4"
resolved "https://registry.yarnpkg.com/reusify/-/reusify-1.0.4.tgz#90da382b1e126efc02146e90845a88db12925d76"
integrity sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==
rimraf@^3.0.2:
version "3.0.2"
resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-3.0.2.tgz#f1a5402ba6220ad52cc1282bac1ae3aa49fd061a"
integrity sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==
dependencies:
glob "^7.1.3"
run-parallel@^1.1.9:
version "1.2.0"
resolved "https://registry.yarnpkg.com/run-parallel/-/run-parallel-1.2.0.tgz#66d1368da7bdf921eb9d95bd1a9229e7f21a43ee"
integrity sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==
dependencies:
queue-microtask "^1.2.2"
safe-array-concat@^1.1.2:
version "1.1.2"
resolved "https://registry.yarnpkg.com/safe-array-concat/-/safe-array-concat-1.1.2.tgz#81d77ee0c4e8b863635227c721278dd524c20edb"
integrity sha512-vj6RsCsWBCf19jIeHEfkRMw8DPiBb+DMXklQ/1SGDHOMlHdPUkZXFQ2YdplS23zESTijAcurb1aSgJA3AgMu1Q==
dependencies:
call-bind "^1.0.7"
get-intrinsic "^1.2.4"
has-symbols "^1.0.3"
isarray "^2.0.5"
safe-regex-test@^1.0.3:
version "1.0.3"
resolved "https://registry.yarnpkg.com/safe-regex-test/-/safe-regex-test-1.0.3.tgz#a5b4c0f06e0ab50ea2c395c14d8371232924c377"
integrity sha512-CdASjNJPvRa7roO6Ra/gLYBTzYzzPyyBXxIMdGW3USQLyjWEls2RgW5UBTXaQVp+OrpeCK3bLem8smtmheoRuw==
dependencies:
call-bind "^1.0.6"
es-errors "^1.3.0"
is-regex "^1.1.4"
semver@^6.3.0, semver@^6.3.1:
version "6.3.1"
resolved "https://registry.yarnpkg.com/semver/-/semver-6.3.1.tgz#556d2ef8689146e46dcea4bfdd095f3434dffcb4"
integrity sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==
semver@^7.3.7, semver@^7.5.3, semver@^7.5.4, semver@^7.6.3:
version "7.6.3"
resolved "https://registry.yarnpkg.com/semver/-/semver-7.6.3.tgz#980f7b5550bc175fb4dc09403085627f9eb33143"
integrity sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A==
set-function-length@^1.2.1:
version "1.2.2"
resolved "https://registry.yarnpkg.com/set-function-length/-/set-function-length-1.2.2.tgz#aac72314198eaed975cf77b2c3b6b880695e5449"
integrity sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==
dependencies:
define-data-property "^1.1.4"
es-errors "^1.3.0"
function-bind "^1.1.2"
get-intrinsic "^1.2.4"
gopd "^1.0.1"
has-property-descriptors "^1.0.2"
set-function-name@^2.0.1:
version "2.0.2"
resolved "https://registry.yarnpkg.com/set-function-name/-/set-function-name-2.0.2.tgz#16a705c5a0dc2f5e638ca96d8a8cd4e1c2b90985"
integrity sha512-7PGFlmtwsEADb0WYyvCMa1t+yke6daIG4Wirafur5kcf+MhUnPms1UeR0CKQdTZD81yESwMHbtn+TR+dMviakQ==
dependencies:
define-data-property "^1.1.4"
es-errors "^1.3.0"
functions-have-names "^1.2.3"
has-property-descriptors "^1.0.2"
shebang-command@^2.0.0:
version "2.0.0"
resolved "https://registry.yarnpkg.com/shebang-command/-/shebang-command-2.0.0.tgz#ccd0af4f8835fbdc265b82461aaf0c36663f34ea"
integrity sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==
dependencies:
shebang-regex "^3.0.0"
shebang-regex@^3.0.0:
version "3.0.0"
resolved "https://registry.yarnpkg.com/shebang-regex/-/shebang-regex-3.0.0.tgz#ae16f1644d873ecad843b0307b143362d4c42172"
integrity sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==
side-channel@^1.0.4:
version "1.0.6"
resolved "https://registry.yarnpkg.com/side-channel/-/side-channel-1.0.6.tgz#abd25fb7cd24baf45466406b1096b7831c9215f2"
integrity sha512-fDW/EZ6Q9RiO8eFG8Hj+7u/oW+XrPTIChwCOM2+th2A6OblDtYYIpve9m+KvI9Z4C9qSEXlaGR6bTEYHReuglA==
dependencies:
call-bind "^1.0.7"
es-errors "^1.3.0"
get-intrinsic "^1.2.4"
object-inspect "^1.13.1"
signal-exit@^3.0.3, signal-exit@^3.0.7:
version "3.0.7"
resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-3.0.7.tgz#a9a1767f8af84155114eaabd73f99273c8f59ad9"
integrity sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==
sisteransi@^1.0.5:
version "1.0.5"
resolved "https://registry.yarnpkg.com/sisteransi/-/sisteransi-1.0.5.tgz#134d681297756437cc05ca01370d3a7a571075ed"
integrity sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==
slash@^3.0.0:
version "3.0.0"
resolved "https://registry.yarnpkg.com/slash/-/slash-3.0.0.tgz#6539be870c165adbd5240220dbe361f1bc4d4634"
integrity sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==
source-map-support@0.5.13:
version "0.5.13"
resolved "https://registry.yarnpkg.com/source-map-support/-/source-map-support-0.5.13.tgz#31b24a9c2e73c2de85066c0feb7d44767ed52932"
integrity sha512-SHSKFHadjVA5oR4PPqhtAVdcBWwRYVd6g6cAXnIbRiIwc2EhPrTuKUBdSLvlEKyIP3GCf89fltvcZiP9MMFA1w==
dependencies:
buffer-from "^1.0.0"
source-map "^0.6.0"
source-map@^0.6.0, source-map@^0.6.1:
version "0.6.1"
resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.6.1.tgz#74722af32e9614e9c287a8d0bbde48b5e2f1a263"
integrity sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==
sprintf-js@~1.0.2:
version "1.0.3"
resolved "https://registry.yarnpkg.com/sprintf-js/-/sprintf-js-1.0.3.tgz#04e6926f662895354f3dd015203633b857297e2c"
integrity sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==
stack-utils@^2.0.3:
version "2.0.6"
resolved "https://registry.yarnpkg.com/stack-utils/-/stack-utils-2.0.6.tgz#aaf0748169c02fc33c8232abccf933f54a1cc34f"
integrity sha512-XlkWvfIm6RmsWtNJx+uqtKLS8eqFbxUg0ZzLXqY0caEy9l7hruX8IpiDnjsLavoBgqCCR71TqWO8MaXYheJ3RQ==
dependencies:
escape-string-regexp "^2.0.0"
string-length@^4.0.1:
version "4.0.2"
resolved "https://registry.yarnpkg.com/string-length/-/string-length-4.0.2.tgz#a8a8dc7bd5c1a82b9b3c8b87e125f66871b6e57a"
integrity sha512-+l6rNN5fYHNhZZy41RXsYptCjA2Igmq4EG7kZAYFQI1E1VTXarr6ZPXBg6eq7Y6eK4FEhY6AJlyuFIb/v/S0VQ==
dependencies:
char-regex "^1.0.2"
strip-ansi "^6.0.0"
string-width@^4.1.0, string-width@^4.2.0, string-width@^4.2.3:
version "4.2.3"
resolved "https://registry.yarnpkg.com/string-width/-/string-width-4.2.3.tgz#269c7117d27b05ad2e536830a8ec895ef9c6d010"
integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==
dependencies:
emoji-regex "^8.0.0"
is-fullwidth-code-point "^3.0.0"
strip-ansi "^6.0.1"
string.prototype.trim@^1.2.9:
version "1.2.9"
resolved "https://registry.yarnpkg.com/string.prototype.trim/-/string.prototype.trim-1.2.9.tgz#b6fa326d72d2c78b6df02f7759c73f8f6274faa4"
integrity sha512-klHuCNxiMZ8MlsOihJhJEBJAiMVqU3Z2nEXWfWnIqjN0gEFS9J9+IxKozWWtQGcgoa1WUZzLjKPTr4ZHNFTFxw==
dependencies:
call-bind "^1.0.7"
define-properties "^1.2.1"
es-abstract "^1.23.0"
es-object-atoms "^1.0.0"
string.prototype.trimend@^1.0.8:
version "1.0.8"
resolved "https://registry.yarnpkg.com/string.prototype.trimend/-/string.prototype.trimend-1.0.8.tgz#3651b8513719e8a9f48de7f2f77640b26652b229"
integrity sha512-p73uL5VCHCO2BZZ6krwwQE3kCzM7NKmis8S//xEC6fQonchbum4eP6kR4DLEjQFO3Wnj3Fuo8NM0kOSjVdHjZQ==
dependencies:
call-bind "^1.0.7"
define-properties "^1.2.1"
es-object-atoms "^1.0.0"
string.prototype.trimstart@^1.0.8:
version "1.0.8"
resolved "https://registry.yarnpkg.com/string.prototype.trimstart/-/string.prototype.trimstart-1.0.8.tgz#7ee834dda8c7c17eff3118472bb35bfedaa34dde"
integrity sha512-UXSH262CSZY1tfu3G3Secr6uGLCFVPMhIqHjlgCUtCCcgihYc/xKs9djMTMUOb2j1mVSeU8EU6NWc/iQKU6Gfg==
dependencies:
call-bind "^1.0.7"
define-properties "^1.2.1"
es-object-atoms "^1.0.0"
strip-ansi@^6.0.0, strip-ansi@^6.0.1:
version "6.0.1"
resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-6.0.1.tgz#9e26c63d30f53443e9489495b2105d37b67a85d9"
integrity sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==
dependencies:
ansi-regex "^5.0.1"
strip-bom@^3.0.0:
version "3.0.0"
resolved "https://registry.yarnpkg.com/strip-bom/-/strip-bom-3.0.0.tgz#2334c18e9c759f7bdd56fdef7e9ae3d588e68ed3"
integrity sha512-vavAMRXOgBVNF6nyEEmL3DBK19iRpDcoIwW+swQ+CbGiu7lju6t+JklA1MHweoWtadgt4ISVUsXLyDq34ddcwA==
strip-bom@^4.0.0:
version "4.0.0"
resolved "https://registry.yarnpkg.com/strip-bom/-/strip-bom-4.0.0.tgz#9c3505c1db45bcedca3d9cf7a16f5c5aa3901878"
integrity sha512-3xurFv5tEgii33Zi8Jtp55wEIILR9eh34FAW00PZf+JnSsTmV/ioewSgQl97JHvgjoRGwPShsWm+IdrxB35d0w==
strip-final-newline@^2.0.0:
version "2.0.0"
resolved "https://registry.yarnpkg.com/strip-final-newline/-/strip-final-newline-2.0.0.tgz#89b852fb2fcbe936f6f4b3187afb0a12c1ab58ad"
integrity sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==
strip-json-comments@^3.1.1:
version "3.1.1"
resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-3.1.1.tgz#31f1281b3832630434831c310c01cccda8cbe006"
integrity sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==
supports-color@^5.3.0:
version "5.5.0"
resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-5.5.0.tgz#e2e69a44ac8772f78a1ec0b35b689df6530efc8f"
integrity sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==
dependencies:
has-flag "^3.0.0"
supports-color@^7.1.0:
version "7.2.0"
resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-7.2.0.tgz#1b7dcdcb32b8138801b3e478ba6a51caa89648da"
integrity sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==
dependencies:
has-flag "^4.0.0"
supports-color@^8.0.0:
version "8.1.1"
resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-8.1.1.tgz#cd6fc17e28500cff56c1b86c0a7fd4a54a73005c"
integrity sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==
dependencies:
has-flag "^4.0.0"
supports-preserve-symlinks-flag@^1.0.0:
version "1.0.0"
resolved "https://registry.yarnpkg.com/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz#6eda4bd344a3c94aea376d4cc31bc77311039e09"
integrity sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==
test-exclude@^6.0.0:
version "6.0.0"
resolved "https://registry.yarnpkg.com/test-exclude/-/test-exclude-6.0.0.tgz#04a8698661d805ea6fa293b6cb9e63ac044ef15e"
integrity sha512-cAGWPIyOHU6zlmg88jwm7VRyXnMN7iV68OGAbYDk/Mh/xC/pzVPlQtY6ngoIH/5/tciuhGfvESU8GrHrcxD56w==
dependencies:
"@istanbuljs/schema" "^0.1.2"
glob "^7.1.4"
minimatch "^3.0.4"
text-table@^0.2.0:
version "0.2.0"
resolved "https://registry.yarnpkg.com/text-table/-/text-table-0.2.0.tgz#7f5ee823ae805207c00af2df4a84ec3fcfa570b4"
integrity sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==
tmpl@1.0.5:
version "1.0.5"
resolved "https://registry.yarnpkg.com/tmpl/-/tmpl-1.0.5.tgz#8683e0b902bb9c20c4f726e3c0b69f36518c07cc"
integrity sha512-3f0uOEAQwIqGuWW2MVzYg8fV/QNnc/IpuJNG837rLuczAaLVHslWHZQj4IGiEl5Hs3kkbhwL9Ab7Hrsmuj+Smw==
to-fast-properties@^2.0.0:
version "2.0.0"
resolved "https://registry.yarnpkg.com/to-fast-properties/-/to-fast-properties-2.0.0.tgz#dc5e698cbd079265bc73e0377681a4e4e83f616e"
integrity sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog==
to-regex-range@^5.0.1:
version "5.0.1"
resolved "https://registry.yarnpkg.com/to-regex-range/-/to-regex-range-5.0.1.tgz#1648c44aae7c8d988a326018ed72f5b4dd0392e4"
integrity sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==
dependencies:
is-number "^7.0.0"
ts-jest@^29.1.0:
version "29.2.4"
resolved "https://registry.yarnpkg.com/ts-jest/-/ts-jest-29.2.4.tgz#38ccf487407d7a63054a72689f6f99b075e296e5"
integrity sha512-3d6tgDyhCI29HlpwIq87sNuI+3Q6GLTTCeYRHCs7vDz+/3GCMwEtV9jezLyl4ZtnBgx00I7hm8PCP8cTksMGrw==
dependencies:
bs-logger "0.x"
ejs "^3.1.10"
fast-json-stable-stringify "2.x"
jest-util "^29.0.0"
json5 "^2.2.3"
lodash.memoize "4.x"
make-error "1.x"
semver "^7.5.3"
yargs-parser "^21.0.1"
tsconfig-paths@^3.15.0:
version "3.15.0"
resolved "https://registry.yarnpkg.com/tsconfig-paths/-/tsconfig-paths-3.15.0.tgz#5299ec605e55b1abb23ec939ef15edaf483070d4"
integrity sha512-2Ac2RgzDe/cn48GvOe3M+o82pEFewD3UPbyoUHHdKasHwJKjds4fLXWf/Ux5kATBKN20oaFGu+jbElp1pos0mg==
dependencies:
"@types/json5" "^0.0.29"
json5 "^1.0.2"
minimist "^1.2.6"
strip-bom "^3.0.0"
tslib@^1.8.1:
version "1.14.1"
resolved "https://registry.yarnpkg.com/tslib/-/tslib-1.14.1.tgz#cf2d38bdc34a134bcaf1091c41f6619e2f672d00"
integrity sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg==
tsutils@^3.21.0:
version "3.21.0"
resolved "https://registry.yarnpkg.com/tsutils/-/tsutils-3.21.0.tgz#b48717d394cea6c1e096983eed58e9d61715b623"
integrity sha512-mHKK3iUXL+3UF6xL5k0PEhKRUBKPBCv/+RkEOpjRWxxx27KKRBmmA60A9pgOUvMi8GKhRMPEmjBRPzs2W7O1OA==
dependencies:
tslib "^1.8.1"
type-check@^0.4.0, type-check@~0.4.0:
version "0.4.0"
resolved "https://registry.yarnpkg.com/type-check/-/type-check-0.4.0.tgz#07b8203bfa7056c0657050e3ccd2c37730bab8f1"
integrity sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==
dependencies:
prelude-ls "^1.2.1"
type-detect@4.0.8:
version "4.0.8"
resolved "https://registry.yarnpkg.com/type-detect/-/type-detect-4.0.8.tgz#7646fb5f18871cfbb7749e69bd39a6388eb7450c"
integrity sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g==
type-fest@^0.20.2:
version "0.20.2"
resolved "https://registry.yarnpkg.com/type-fest/-/type-fest-0.20.2.tgz#1bf207f4b28f91583666cb5fbd327887301cd5f4"
integrity sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==
type-fest@^0.21.3:
version "0.21.3"
resolved "https://registry.yarnpkg.com/type-fest/-/type-fest-0.21.3.tgz#d260a24b0198436e133fa26a524a6d65fa3b2e37"
integrity sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w==
typed-array-buffer@^1.0.2:
version "1.0.2"
resolved "https://registry.yarnpkg.com/typed-array-buffer/-/typed-array-buffer-1.0.2.tgz#1867c5d83b20fcb5ccf32649e5e2fc7424474ff3"
integrity sha512-gEymJYKZtKXzzBzM4jqa9w6Q1Jjm7x2d+sh19AdsD4wqnMPDYyvwpsIc2Q/835kHuo3BEQ7CjelGhfTsoBb2MQ==
dependencies:
call-bind "^1.0.7"
es-errors "^1.3.0"
is-typed-array "^1.1.13"
typed-array-byte-length@^1.0.1:
version "1.0.1"
resolved "https://registry.yarnpkg.com/typed-array-byte-length/-/typed-array-byte-length-1.0.1.tgz#d92972d3cff99a3fa2e765a28fcdc0f1d89dec67"
integrity sha512-3iMJ9q0ao7WE9tWcaYKIptkNBuOIcZCCT0d4MRvuuH88fEoEH62IuQe0OtraD3ebQEoTRk8XCBoknUNc1Y67pw==
dependencies:
call-bind "^1.0.7"
for-each "^0.3.3"
gopd "^1.0.1"
has-proto "^1.0.3"
is-typed-array "^1.1.13"
typed-array-byte-offset@^1.0.2:
version "1.0.2"
resolved "https://registry.yarnpkg.com/typed-array-byte-offset/-/typed-array-byte-offset-1.0.2.tgz#f9ec1acb9259f395093e4567eb3c28a580d02063"
integrity sha512-Ous0vodHa56FviZucS2E63zkgtgrACj7omjwd/8lTEMEPFFyjfixMZ1ZXenpgCFBBt4EC1J2XsyVS2gkG0eTFA==
dependencies:
available-typed-arrays "^1.0.7"
call-bind "^1.0.7"
for-each "^0.3.3"
gopd "^1.0.1"
has-proto "^1.0.3"
is-typed-array "^1.1.13"
typed-array-length@^1.0.6:
version "1.0.6"
resolved "https://registry.yarnpkg.com/typed-array-length/-/typed-array-length-1.0.6.tgz#57155207c76e64a3457482dfdc1c9d1d3c4c73a3"
integrity sha512-/OxDN6OtAk5KBpGb28T+HZc2M+ADtvRxXrKKbUwtsLgdoxgX13hyy7ek6bFRl5+aBs2yZzB0c4CnQfAtVypW/g==
dependencies:
call-bind "^1.0.7"
for-each "^0.3.3"
gopd "^1.0.1"
has-proto "^1.0.3"
is-typed-array "^1.1.13"
possible-typed-array-names "^1.0.0"
typescript@^5.3.3:
version "5.5.4"
resolved "https://registry.yarnpkg.com/typescript/-/typescript-5.5.4.tgz#d9852d6c82bad2d2eda4fd74a5762a8f5909e9ba"
integrity sha512-Mtq29sKDAEYP7aljRgtPOpTvOfbwRWlS6dPRzwjdE+C0R4brX/GUyhHSecbHMFLNBLcJIPt9nl9yG5TZ1weH+Q==
unbox-primitive@^1.0.2:
version "1.0.2"
resolved "https://registry.yarnpkg.com/unbox-primitive/-/unbox-primitive-1.0.2.tgz#29032021057d5e6cdbd08c5129c226dff8ed6f9e"
integrity sha512-61pPlCD9h51VoreyJ0BReideM3MDKMKnh6+V9L08331ipq6Q8OFXZYiqP6n/tbHx4s5I9uRhcye6BrbkizkBDw==
dependencies:
call-bind "^1.0.2"
has-bigints "^1.0.2"
has-symbols "^1.0.3"
which-boxed-primitive "^1.0.2"
undici-types@~6.19.2:
version "6.19.8"
resolved "https://registry.yarnpkg.com/undici-types/-/undici-types-6.19.8.tgz#35111c9d1437ab83a7cdc0abae2f26d88eda0a02"
integrity sha512-ve2KP6f/JnbPBFyobGHuerC9g1FYGn/F8n1LWTwNxCEzd6IfqTwUQcNXgEtmmQ6DlRrC1hrSrBnCZPokRrDHjw==
update-browserslist-db@^1.1.0:
version "1.1.0"
resolved "https://registry.yarnpkg.com/update-browserslist-db/-/update-browserslist-db-1.1.0.tgz#7ca61c0d8650766090728046e416a8cde682859e"
integrity sha512-EdRAaAyk2cUE1wOf2DkEhzxqOQvFOoRJFNS6NeyJ01Gp2beMRpBAINjM2iDXE3KCuKhwnvHIQCJm6ThL2Z+HzQ==
dependencies:
escalade "^3.1.2"
picocolors "^1.0.1"
uri-js@^4.2.2:
version "4.4.1"
resolved "https://registry.yarnpkg.com/uri-js/-/uri-js-4.4.1.tgz#9b1a52595225859e55f669d928f88c6c57f2a77e"
integrity sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==
dependencies:
punycode "^2.1.0"
uuid@^10.0.0:
version "10.0.0"
resolved "https://registry.yarnpkg.com/uuid/-/uuid-10.0.0.tgz#5a95aa454e6e002725c79055fd42aaba30ca6294"
integrity sha512-8XkAphELsDnEGrDxUOHB3RGvXz6TeuYSGEZBOjtTtPm2lwhGBjLgOzLHB63IUWfBpNucQjND6d3AOudO+H3RWQ==
v8-to-istanbul@^9.0.1:
version "9.3.0"
resolved "https://registry.yarnpkg.com/v8-to-istanbul/-/v8-to-istanbul-9.3.0.tgz#b9572abfa62bd556c16d75fdebc1a411d5ff3175"
integrity sha512-kiGUalWN+rgBJ/1OHZsBtU4rXZOfj/7rKQxULKlIzwzQSvMJUUNgPwJEEh7gU6xEVxC0ahoOBvN2YI8GH6FNgA==
dependencies:
"@jridgewell/trace-mapping" "^0.3.12"
"@types/istanbul-lib-coverage" "^2.0.1"
convert-source-map "^2.0.0"
walker@^1.0.8:
version "1.0.8"
resolved "https://registry.yarnpkg.com/walker/-/walker-1.0.8.tgz#bd498db477afe573dc04185f011d3ab8a8d7653f"
integrity sha512-ts/8E8l5b7kY0vlWLewOkDXMmPdLcVV4GmOQLyxuSswIJsweeFZtAsMF7k1Nszz+TYBQrlYRmzOnr398y1JemQ==
dependencies:
makeerror "1.0.12"
which-boxed-primitive@^1.0.2:
version "1.0.2"
resolved "https://registry.yarnpkg.com/which-boxed-primitive/-/which-boxed-primitive-1.0.2.tgz#13757bc89b209b049fe5d86430e21cf40a89a8e6"
integrity sha512-bwZdv0AKLpplFY2KZRX6TvyuN7ojjr7lwkg6ml0roIy9YeuSr7JS372qlNW18UQYzgYK9ziGcerWqZOmEn9VNg==
dependencies:
is-bigint "^1.0.1"
is-boolean-object "^1.1.0"
is-number-object "^1.0.4"
is-string "^1.0.5"
is-symbol "^1.0.3"
which-typed-array@^1.1.14, which-typed-array@^1.1.15:
version "1.1.15"
resolved "https://registry.yarnpkg.com/which-typed-array/-/which-typed-array-1.1.15.tgz#264859e9b11a649b388bfaaf4f767df1f779b38d"
integrity sha512-oV0jmFtUky6CXfkqehVvBP/LSWJ2sy4vWMioiENyJLePrBO/yKyV9OyJySfAKosh+RYkIl5zJCNZ8/4JncrpdA==
dependencies:
available-typed-arrays "^1.0.7"
call-bind "^1.0.7"
for-each "^0.3.3"
gopd "^1.0.1"
has-tostringtag "^1.0.2"
which@^2.0.1:
version "2.0.2"
resolved "https://registry.yarnpkg.com/which/-/which-2.0.2.tgz#7c6a8dd0a636a0327e10b59c9286eee93f3f51b1"
integrity sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==
dependencies:
isexe "^2.0.0"
word-wrap@^1.2.5:
version "1.2.5"
resolved "https://registry.yarnpkg.com/word-wrap/-/word-wrap-1.2.5.tgz#d2c45c6dd4fbce621a66f136cbe328afd0410b34"
integrity sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==
wrap-ansi@^7.0.0:
version "7.0.0"
resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-7.0.0.tgz#67e145cff510a6a6984bdf1152911d69d2eb9e43"
integrity sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==
dependencies:
ansi-styles "^4.0.0"
string-width "^4.1.0"
strip-ansi "^6.0.0"
wrappy@1:
version "1.0.2"
resolved "https://registry.yarnpkg.com/wrappy/-/wrappy-1.0.2.tgz#b5243d8f3ec1aa35f1364605bc0d1036e30ab69f"
integrity sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==
write-file-atomic@^4.0.2:
version "4.0.2"
resolved "https://registry.yarnpkg.com/write-file-atomic/-/write-file-atomic-4.0.2.tgz#a9df01ae5b77858a027fd2e80768ee433555fcfd"
integrity sha512-7KxauUdBmSdWnmpaGFg+ppNjKF8uNLry8LyzjauQDOVONfFLNKrKvQOxZ/VuTIcS/gge/YNahf5RIIQWTSarlg==
dependencies:
imurmurhash "^0.1.4"
signal-exit "^3.0.7"
y18n@^5.0.5:
version "5.0.8"
resolved "https://registry.yarnpkg.com/y18n/-/y18n-5.0.8.tgz#7f4934d0f7ca8c56f95314939ddcd2dd91ce1d55"
integrity sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==
yallist@^3.0.2:
version "3.1.1"
resolved "https://registry.yarnpkg.com/yallist/-/yallist-3.1.1.tgz#dbb7daf9bfd8bac9ab45ebf602b8cbad0d5d08fd"
integrity sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==
yargs-parser@^21.0.1, yargs-parser@^21.1.1:
version "21.1.1"
resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-21.1.1.tgz#9096bceebf990d21bb31fa9516e0ede294a77d35"
integrity sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==
yargs@^17.3.1:
version "17.7.2"
resolved "https://registry.yarnpkg.com/yargs/-/yargs-17.7.2.tgz#991df39aca675a192b816e1e0363f9d75d2aa269"
integrity sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==
dependencies:
cliui "^8.0.1"
escalade "^3.1.1"
get-caller-file "^2.0.5"
require-directory "^2.1.1"
string-width "^4.2.3"
y18n "^5.0.5"
yargs-parser "^21.1.1"
yocto-queue@^0.1.0:
version "0.1.0"
resolved "https://registry.yarnpkg.com/yocto-queue/-/yocto-queue-0.1.0.tgz#0294eb3dee05028d31ee1a5fa2c556a6aaf10a1b"
integrity sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==
zod-to-json-schema@^3.22.3:
version "3.23.2"
resolved "https://registry.yarnpkg.com/zod-to-json-schema/-/zod-to-json-schema-3.23.2.tgz#bc7e379c8050462538383e382964c03d8fe008f9"
integrity sha512-uSt90Gzc/tUfyNqxnjlfBs8W6WSGpNBv0rVsNxP/BVSMHMKGdthPYff4xtCHYloJGM0CFxFsb3NbC0eqPhfImw==
zod@^3.22.4, zod@^3.23.8:
version "3.23.8"
resolved "https://registry.yarnpkg.com/zod/-/zod-3.23.8.tgz#e37b957b5d52079769fb8097099b592f0ef4067d"
integrity sha512-XBx9AXhXktjUqnepgTiE5flcKIYWi/rme0Eaj+5Y0lftuGBq+jyRu/md4WnuxqgP1ubdpNCsYEYPxrzVHD8d6g==
|
0 | lc_public_repos/langgraph/libs/cli | lc_public_repos/langgraph/libs/cli/js-examples/README.md | # New LangGraph.js Project
[](https://github.com/langchain-ai/new-langgraphjs-project/actions/workflows/unit-tests.yml)
[](https://github.com/langchain-ai/new-langgraphjs-project/actions/workflows/integration-tests.yml)
[](https://langgraph-studio.vercel.app/templates/open?githubUrl=https://github.com/langchain-ai/new-langgraphjs-project)
This template demonstrates a simple chatbot implemented using [LangGraph.js](https://github.com/langchain-ai/langgraphjs), designed for [LangGraph Studio](https://github.com/langchain-ai/langgraph-studio). The chatbot maintains persistent chat memory, allowing for coherent conversations across multiple interactions.

The core logic, defined in `src/agent/graph.ts`, showcases a straightforward chatbot that responds to user queries while maintaining context from previous messages.
## What it does
The simple chatbot:
1. Takes a user **message** as input
2. Maintains a history of the conversation
3. Returns a placeholder response, updating the conversation history
This template provides a foundation that can be easily customized and extended to create more complex conversational agents.
## Getting Started
Assuming you have already [installed LangGraph Studio](https://github.com/langchain-ai/langgraph-studio?tab=readme-ov-file#download), to set up:
1. Create a `.env` file. This template does not require any environment variables by default, but you will likely want to add some when customizing.
```bash
cp .env.example .env
```
<!--
Setup instruction auto-generated by `langgraph template lock`. DO NOT EDIT MANUALLY.
-->
<!--
End setup instructions
-->
2. Open the folder in LangGraph Studio!
3. Customize the code as needed.
## How to customize
1. **Add an LLM call**: You can select and install a chat model wrapper from [the LangChain.js ecosystem](https://js.langchain.com/docs/integrations/chat/), or use LangGraph.js without LangChain.js.
2. **Extend the graph**: The core logic of the chatbot is defined in [graph.ts](./src/agent/graph.ts). You can modify this file to add new nodes, edges, or change the flow of the conversation.
You can also extend this template by:
- Adding [custom tools or functions](https://js.langchain.com/docs/how_to/tool_calling) to enhance the chatbot's capabilities.
- Implementing additional logic for handling specific types of user queries or tasks.
- Add retrieval-augmented generation (RAG) capabilities by integrating [external APIs or databases](https://langchain-ai.github.io/langgraphjs/tutorials/rag/langgraph_agentic_rag/) to provide more customized responses.
## Development
While iterating on your graph, you can edit past state and rerun your app from previous states to debug specific nodes. Local changes will be automatically applied via hot reload. Try experimenting with:
- Modifying the system prompt to give your chatbot a unique personality.
- Adding new nodes to the graph for more complex conversation flows.
- Implementing conditional logic to handle different types of user inputs.
Follow-up requests will be appended to the same thread. You can create an entirely new thread, clearing previous history, using the `+` button in the top right.
For more advanced features and examples, refer to the [LangGraph.js documentation](https://github.com/langchain-ai/langgraphjs). These resources can help you adapt this template for your specific use case and build more sophisticated conversational agents.
LangGraph Studio also integrates with [LangSmith](https://smith.langchain.com/) for more in-depth tracing and collaboration with teammates, allowing you to analyze and optimize your chatbot's performance.
<!--
Configuration auto-generated by `langgraph template lock`. DO NOT EDIT MANUALLY.
{
"config_schemas": {
"agent": {
"type": "object",
"properties": {}
}
}
}
-->
|
0 | lc_public_repos/langgraph/libs/cli | lc_public_repos/langgraph/libs/cli/js-examples/.eslintrc.cjs | module.exports = {
extends: [
"eslint:recommended",
"prettier",
"plugin:@typescript-eslint/recommended",
],
parserOptions: {
ecmaVersion: 12,
parser: "@typescript-eslint/parser",
project: "./tsconfig.json",
sourceType: "module",
},
plugins: ["import", "@typescript-eslint", "no-instanceof"],
ignorePatterns: [
".eslintrc.cjs",
"scripts",
"src/utils/lodash/*",
"node_modules",
"dist",
"dist-cjs",
"*.js",
"*.cjs",
"*.d.ts",
],
rules: {
"no-process-env": 2,
"no-instanceof/no-instanceof": 2,
"@typescript-eslint/explicit-module-boundary-types": 0,
"@typescript-eslint/no-empty-function": 0,
"@typescript-eslint/no-shadow": 0,
"@typescript-eslint/no-empty-interface": 0,
"@typescript-eslint/no-use-before-define": ["error", "nofunc"],
"@typescript-eslint/no-unused-vars": ["warn", { args: "none" }],
"@typescript-eslint/no-floating-promises": "error",
"@typescript-eslint/no-misused-promises": "error",
camelcase: 0,
"class-methods-use-this": 0,
"import/extensions": [2, "ignorePackages"],
"import/no-extraneous-dependencies": [
"error",
{ devDependencies: ["**/*.test.ts"] },
],
"import/no-unresolved": 0,
"import/prefer-default-export": 0,
"keyword-spacing": "error",
"max-classes-per-file": 0,
"max-len": 0,
"no-await-in-loop": 0,
"no-bitwise": 0,
"no-console": 0,
"no-restricted-syntax": 0,
"no-shadow": 0,
"no-continue": 0,
"no-underscore-dangle": 0,
"no-use-before-define": 0,
"no-useless-constructor": 0,
"no-return-await": 0,
"consistent-return": 0,
"no-else-return": 0,
"new-cap": ["error", { properties: false, capIsNew: false }],
},
};
|
0 | lc_public_repos/langgraph/libs/cli | lc_public_repos/langgraph/libs/cli/js-examples/.env.example | # Copy this over:
# cp .env.example .env
# Then modify to suit your needs |
0 | lc_public_repos/langgraph/libs/cli | lc_public_repos/langgraph/libs/cli/js-examples/package.json | {
"name": "example-graph",
"version": "0.0.1",
"description": "A starter template for creating a LangGraph workflow.",
"packageManager": "yarn@1.22.22",
"main": "my_app/graph.ts",
"author": "Your Name",
"license": "MIT",
"private": true,
"type": "module",
"scripts": {
"build": "tsc",
"clean": "rm -rf dist",
"test": "node --experimental-vm-modules node_modules/jest/bin/jest.js --testPathPattern=\\.test\\.ts$ --testPathIgnorePatterns=\\.int\\.test\\.ts$",
"test:int": "node --experimental-vm-modules node_modules/jest/bin/jest.js --testPathPattern=\\.int\\.test\\.ts$",
"format": "prettier --write .",
"lint": "eslint src",
"format:check": "prettier --check .",
"lint:langgraph-json": "node scripts/checkLanggraphPaths.js",
"lint:all": "yarn lint & yarn lint:langgraph-json & yarn format:check",
"test:all": "yarn test && yarn test:int && yarn lint:langgraph"
},
"dependencies": {
"@langchain/core": "^0.3.2",
"@langchain/langgraph": "^0.2.5"
},
"devDependencies": {
"@eslint/eslintrc": "^3.1.0",
"@eslint/js": "^9.9.1",
"@tsconfig/recommended": "^1.0.7",
"@types/jest": "^29.5.0",
"@typescript-eslint/eslint-plugin": "^5.59.8",
"@typescript-eslint/parser": "^5.59.8",
"dotenv": "^16.4.5",
"eslint": "^8.41.0",
"eslint-config-prettier": "^8.8.0",
"eslint-plugin-import": "^2.27.5",
"eslint-plugin-no-instanceof": "^1.0.1",
"eslint-plugin-prettier": "^4.2.1",
"jest": "^29.7.0",
"prettier": "^3.3.3",
"ts-jest": "^29.1.0",
"typescript": "^5.3.3"
}
}
|
0 | lc_public_repos/langgraph/libs/cli | lc_public_repos/langgraph/libs/cli/js-examples/jest.config.js | export default {
preset: "ts-jest/presets/default-esm",
moduleNameMapper: {
"^(\\.{1,2}/.*)\\.js$": "$1",
},
transform: {
"^.+\\.tsx?$": [
"ts-jest",
{
useESM: true,
},
],
},
extensionsToTreatAsEsm: [".ts"],
setupFiles: ["dotenv/config"],
passWithNoTests: true,
testTimeout: 20_000,
};
|
0 | lc_public_repos/langgraph/libs/cli | lc_public_repos/langgraph/libs/cli/js-examples/langgraph.json | {
"node_version": "20",
"graphs": {
"agent": "./src/agent/graph.ts:graph"
},
"env": ".env",
"dependencies": ["."]
}
|
0 | lc_public_repos/langgraph/libs/cli/js-examples | lc_public_repos/langgraph/libs/cli/js-examples/tests/agent.test.ts | import { describe, it, expect } from "@jest/globals";
import { route } from "../src/agent/graph.js";
describe("Routers", () => {
it("Test route", async () => {
const res = route({ messages: [] });
expect(res).toEqual("callModel");
}, 100_000);
});
|
0 | lc_public_repos/langgraph/libs/cli/js-examples | lc_public_repos/langgraph/libs/cli/js-examples/tests/graph.int.test.ts | import { describe, it, expect } from "@jest/globals";
import { graph } from "../src/agent/graph.js";
describe("Graph", () => {
it("should process input through the graph", async () => {
const input = "What is the capital of France?";
const result = await graph.invoke({ input });
expect(result).toBeDefined();
expect(typeof result).toBe("object");
expect(result.messages).toBeDefined();
expect(Array.isArray(result.messages)).toBe(true);
expect(result.messages.length).toBeGreaterThan(0);
const lastMessage = result.messages[result.messages.length - 1];
expect(lastMessage.content.toString().toLowerCase()).toContain("hi");
}, 30000); // Increased timeout to 30 seconds
});
|
0 | lc_public_repos/langgraph/libs/cli/js-examples/src | lc_public_repos/langgraph/libs/cli/js-examples/src/agent/state.ts | import { BaseMessage, BaseMessageLike } from "@langchain/core/messages";
import { Annotation, messagesStateReducer } from "@langchain/langgraph";
/**
* A graph's StateAnnotation defines three main things:
* 1. The structure of the data to be passed between nodes (which "channels" to read from/write to and their types)
* 2. Default values for each field
* 3. Reducers for the state's. Reducers are functions that determine how to apply updates to the state.
* See [Reducers](https://langchain-ai.github.io/langgraphjs/concepts/low_level/#reducers) for more information.
*/
// This is the primary state of your agent, where you can store any information
export const StateAnnotation = Annotation.Root({
/**
* Messages track the primary execution state of the agent.
*
* Typically accumulates a pattern of:
*
* 1. HumanMessage - user input
* 2. AIMessage with .tool_calls - agent picking tool(s) to use to collect
* information
* 3. ToolMessage(s) - the responses (or errors) from the executed tools
*
* (... repeat steps 2 and 3 as needed ...)
* 4. AIMessage without .tool_calls - agent responding in unstructured
* format to the user.
*
* 5. HumanMessage - user responds with the next conversational turn.
*
* (... repeat steps 2-5 as needed ... )
*
* Merges two lists of messages or message-like objects with role and content,
* updating existing messages by ID.
*
* Message-like objects are automatically coerced by `messagesStateReducer` into
* LangChain message classes. If a message does not have a given id,
* LangGraph will automatically assign one.
*
* By default, this ensures the state is "append-only", unless the
* new message has the same ID as an existing message.
*
* Returns:
* A new list of messages with the messages from \`right\` merged into \`left\`.
* If a message in \`right\` has the same ID as a message in \`left\`, the
* message from \`right\` will replace the message from \`left\`.`
*/
messages: Annotation<BaseMessage[], BaseMessageLike[]>({
reducer: messagesStateReducer,
default: () => [],
}),
/**
* Feel free to add additional attributes to your state as needed.
* Common examples include retrieved documents, extracted entities, API connections, etc.
*
* For simple fields whose value should be overwritten by the return value of a node,
* you don't need to define a reducer or default.
*/
// additionalField: Annotation<string>,
});
|
0 | lc_public_repos/langgraph/libs/cli/js-examples/src | lc_public_repos/langgraph/libs/cli/js-examples/src/agent/graph.ts | /**
* Starter LangGraph.js Template
* Make this code your own!
*/
import { StateGraph } from "@langchain/langgraph";
import { RunnableConfig } from "@langchain/core/runnables";
import { StateAnnotation } from "./state.js";
/**
* Define a node, these do the work of the graph and should have most of the logic.
* Must return a subset of the properties set in StateAnnotation.
* @param state The current state of the graph.
* @param config Extra parameters passed into the state graph.
* @returns Some subset of parameters of the graph state, used to update the state
* for the edges and nodes executed next.
*/
const callModel = async (
state: typeof StateAnnotation.State,
_config: RunnableConfig,
): Promise<typeof StateAnnotation.Update> => {
/**
* Do some work... (e.g. call an LLM)
* For example, with LangChain you could do something like:
*
* ```bash
* $ npm i @langchain/anthropic
* ```
*
* ```ts
* import { ChatAnthropic } from "@langchain/anthropic";
* const model = new ChatAnthropic({
* model: "claude-3-5-sonnet-20240620",
* apiKey: process.env.ANTHROPIC_API_KEY,
* });
* const res = await model.invoke(state.messages);
* ```
*
* Or, with an SDK directly:
*
* ```bash
* $ npm i openai
* ```
*
* ```ts
* import OpenAI from "openai";
* const openai = new OpenAI({
* apiKey: process.env.OPENAI_API_KEY,
* });
*
* const chatCompletion = await openai.chat.completions.create({
* messages: [{
* role: state.messages[0]._getType(),
* content: state.messages[0].content,
* }],
* model: "gpt-4o-mini",
* });
* ```
*/
console.log("Current state:", state);
return {
messages: [
{
role: "assistant",
content: `Hi there! How are you?`,
},
],
};
};
/**
* Routing function: Determines whether to continue research or end the builder.
* This function decides if the gathered information is satisfactory or if more research is needed.
*
* @param state - The current state of the research builder
* @returns Either "callModel" to continue research or END to finish the builder
*/
export const route = (
state: typeof StateAnnotation.State,
): "__end__" | "callModel" => {
if (state.messages.length > 0) {
return "__end__";
}
// Loop back
return "callModel";
};
// Finally, create the graph itself.
const builder = new StateGraph(StateAnnotation)
// Add the nodes to do the work.
// Chaining the nodes together in this way
// updates the types of the StateGraph instance
// so you have static type checking when it comes time
// to add the edges.
.addNode("callModel", callModel)
// Regular edges mean "always transition to node B after node A is done"
// The "__start__" and "__end__" nodes are "virtual" nodes that are always present
// and represent the beginning and end of the builder.
.addEdge("__start__", "callModel")
// Conditional edges optionally route to different nodes (or end)
.addConditionalEdges("callModel", route);
export const graph = builder.compile();
graph.name = "New Agent";
|
0 | lc_public_repos/langgraph/libs/cli/tests | lc_public_repos/langgraph/libs/cli/tests/integration_tests/test_cli.py | import pytest
import requests
from langgraph_cli.templates import TEMPLATE_ID_TO_CONFIG
@pytest.mark.parametrize("template_key", TEMPLATE_ID_TO_CONFIG.keys())
def test_template_urls_work(template_key: str) -> None:
"""Integration test to verify that all template URLs are reachable."""
_, _, template_url = TEMPLATE_ID_TO_CONFIG[template_key]
response = requests.head(template_url)
# Returns 302 on a successful HEAD request
assert response.status_code == 302, f"URL {template_url} is not reachable."
|
0 | lc_public_repos/langgraph/libs/cli/tests | lc_public_repos/langgraph/libs/cli/tests/unit_tests/test_docker.py | from langgraph_cli.docker import (
DEFAULT_POSTGRES_URI,
DockerCapabilities,
Version,
compose,
)
from langgraph_cli.util import clean_empty_lines
DEFAULT_DOCKER_CAPABILITIES = DockerCapabilities(
version_docker=Version(26, 1, 1),
version_compose=Version(2, 27, 0),
healthcheck_start_interval=False,
)
def test_compose_with_no_debugger_and_custom_db():
port = 8123
custom_postgres_uri = "custom_postgres_uri"
actual_compose_str = compose(
DEFAULT_DOCKER_CAPABILITIES, port=port, postgres_uri=custom_postgres_uri
)
expected_compose_str = f"""services:
langgraph-redis:
image: redis:6
healthcheck:
test: redis-cli ping
interval: 5s
timeout: 1s
retries: 5
langgraph-api:
ports:
- "{port}:8000"
depends_on:
langgraph-redis:
condition: service_healthy
environment:
REDIS_URI: redis://langgraph-redis:6379
POSTGRES_URI: {custom_postgres_uri}"""
assert clean_empty_lines(actual_compose_str) == expected_compose_str
def test_compose_with_no_debugger_and_custom_db_with_healthcheck():
port = 8123
custom_postgres_uri = "custom_postgres_uri"
actual_compose_str = compose(
DEFAULT_DOCKER_CAPABILITIES._replace(healthcheck_start_interval=True),
port=port,
postgres_uri=custom_postgres_uri,
)
expected_compose_str = f"""services:
langgraph-redis:
image: redis:6
healthcheck:
test: redis-cli ping
interval: 5s
timeout: 1s
retries: 5
langgraph-api:
ports:
- "{port}:8000"
depends_on:
langgraph-redis:
condition: service_healthy
environment:
REDIS_URI: redis://langgraph-redis:6379
POSTGRES_URI: {custom_postgres_uri}
healthcheck:
test: python /api/healthcheck.py
interval: 60s
start_interval: 1s
start_period: 10s"""
assert clean_empty_lines(actual_compose_str) == expected_compose_str
def test_compose_with_debugger_and_custom_db():
port = 8123
custom_postgres_uri = "custom_postgres_uri"
actual_compose_str = compose(
DEFAULT_DOCKER_CAPABILITIES,
port=port,
postgres_uri=custom_postgres_uri,
)
expected_compose_str = f"""services:
langgraph-redis:
image: redis:6
healthcheck:
test: redis-cli ping
interval: 5s
timeout: 1s
retries: 5
langgraph-api:
ports:
- "{port}:8000"
depends_on:
langgraph-redis:
condition: service_healthy
environment:
REDIS_URI: redis://langgraph-redis:6379
POSTGRES_URI: {custom_postgres_uri}"""
assert clean_empty_lines(actual_compose_str) == expected_compose_str
def test_compose_with_debugger_and_default_db():
port = 8123
actual_compose_str = compose(DEFAULT_DOCKER_CAPABILITIES, port=port)
expected_compose_str = f"""volumes:
langgraph-data:
driver: local
services:
langgraph-redis:
image: redis:6
healthcheck:
test: redis-cli ping
interval: 5s
timeout: 1s
retries: 5
langgraph-postgres:
image: postgres:16
ports:
- "5433:5432"
environment:
POSTGRES_DB: postgres
POSTGRES_USER: postgres
POSTGRES_PASSWORD: postgres
volumes:
- langgraph-data:/var/lib/postgresql/data
healthcheck:
test: pg_isready -U postgres
start_period: 10s
timeout: 1s
retries: 5
interval: 5s
langgraph-api:
ports:
- "{port}:8000"
depends_on:
langgraph-redis:
condition: service_healthy
langgraph-postgres:
condition: service_healthy
environment:
REDIS_URI: redis://langgraph-redis:6379
POSTGRES_URI: {DEFAULT_POSTGRES_URI}"""
assert clean_empty_lines(actual_compose_str) == expected_compose_str
|
0 | lc_public_repos/langgraph/libs/cli/tests | lc_public_repos/langgraph/libs/cli/tests/unit_tests/helpers.py | def clean_empty_lines(input_str: str):
return "\n".join(filter(None, input_str.splitlines()))
|
0 | lc_public_repos/langgraph/libs/cli/tests | lc_public_repos/langgraph/libs/cli/tests/unit_tests/agent.py | import asyncio
import os
from typing import Annotated, Sequence, TypedDict
from langchain_core.language_models.fake_chat_models import FakeListChatModel
from langchain_core.messages import BaseMessage, HumanMessage, ToolMessage
from langgraph.graph import END, StateGraph, add_messages
# check that env var is present
os.environ["SOME_ENV_VAR"]
class AgentState(TypedDict):
some_bytes: bytes
some_byte_array: bytearray
dict_with_bytes: dict[str, bytes]
messages: Annotated[Sequence[BaseMessage], add_messages]
sleep: int
async def call_model(state, config):
if sleep := state.get("sleep"):
await asyncio.sleep(sleep)
messages = state["messages"]
if len(messages) > 1:
assert state["some_bytes"] == b"some_bytes"
assert state["some_byte_array"] == bytearray(b"some_byte_array")
assert state["dict_with_bytes"] == {"more_bytes": b"more_bytes"}
# hacky way to reset model to the "first" response
if isinstance(messages[-1], HumanMessage):
model.i = 0
response = await model.ainvoke(messages)
return {
"messages": [response],
"some_bytes": b"some_bytes",
"some_byte_array": bytearray(b"some_byte_array"),
"dict_with_bytes": {"more_bytes": b"more_bytes"},
}
def call_tool(state):
last_message_content = state["messages"][-1].content
return {
"messages": [
ToolMessage(
f"tool_call__{last_message_content}", tool_call_id="tool_call_id"
)
]
}
def should_continue(state):
messages = state["messages"]
last_message = messages[-1]
if last_message.content == "end":
return END
else:
return "tool"
# NOTE: the model cycles through responses infinitely here
model = FakeListChatModel(responses=["begin", "end"])
workflow = StateGraph(AgentState)
workflow.add_node("agent", call_model)
workflow.add_node("tool", call_tool)
workflow.set_entry_point("agent")
workflow.add_conditional_edges(
"agent",
should_continue,
)
workflow.add_edge("tool", "agent")
graph = workflow.compile()
|
0 | lc_public_repos/langgraph/libs/cli/tests | lc_public_repos/langgraph/libs/cli/tests/unit_tests/conftest.py | import os
from unittest.mock import patch
import pytest
@pytest.fixture(autouse=True)
def disable_analytics_env() -> None:
"""Disable analytics for unit tests LANGGRAPH_CLI_NO_ANALYTICS."""
# First check if the environment variable is already set, if so, log a warning prior
# to overriding it.
if "LANGGRAPH_CLI_NO_ANALYTICS" in os.environ:
print("⚠️ LANGGRAPH_CLI_NO_ANALYTICS is set. Overriding it for the test.")
with patch.dict(os.environ, {"LANGGRAPH_CLI_NO_ANALYTICS": "0"}):
yield
|
0 | lc_public_repos/langgraph/libs/cli/tests | lc_public_repos/langgraph/libs/cli/tests/unit_tests/test_config.json | {
"python_version": "3.12",
"pip_config_file": "pipconfig.txt",
"dockerfile_lines": [
"ARG meow=woof"
],
"dependencies": [
"langchain_openai",
"."
],
"graphs": {
"agent": "graphs/agent.py:graph"
},
"env": ".env"
}
|
0 | lc_public_repos/langgraph/libs/cli/tests | lc_public_repos/langgraph/libs/cli/tests/unit_tests/test_config.py | import json
import os
import pathlib
import tempfile
import click
import pytest
from langgraph_cli.config import (
config_to_compose,
config_to_docker,
validate_config,
validate_config_file,
)
from langgraph_cli.util import clean_empty_lines
PATH_TO_CONFIG = pathlib.Path(__file__).parent / "test_config.json"
def test_validate_config():
# minimal config
expected_config = {
"dependencies": ["."],
"graphs": {
"agent": "./agent.py:graph",
},
}
expected_config = {
"python_version": "3.11",
"pip_config_file": None,
"dockerfile_lines": [],
"env": {},
"store": None,
**expected_config,
}
actual_config = validate_config(expected_config)
assert actual_config == expected_config
# full config
env = ".env"
expected_config = {
"python_version": "3.12",
"pip_config_file": "pipconfig.txt",
"dockerfile_lines": ["ARG meow"],
"dependencies": [".", "langchain"],
"graphs": {
"agent": "./agent.py:graph",
},
"env": env,
"store": None,
}
actual_config = validate_config(expected_config)
assert actual_config == expected_config
expected_config["python_version"] = "3.13"
actual_config = validate_config(expected_config)
assert actual_config == expected_config
# check wrong python version raises
with pytest.raises(click.UsageError):
validate_config(
{
"python_version": "3.9",
}
)
# check missing dependencies key raises
with pytest.raises(click.UsageError):
validate_config(
{"python_version": "3.9", "graphs": {"agent": "./agent.py:graph"}},
)
# check missing graphs key raises
with pytest.raises(click.UsageError):
validate_config({"python_version": "3.9", "dependencies": ["."]})
with pytest.raises(click.UsageError) as exc_info:
validate_config({"python_version": "3.11.0"})
assert "Invalid Python version format" in str(exc_info.value)
with pytest.raises(click.UsageError) as exc_info:
validate_config({"python_version": "3"})
assert "Invalid Python version format" in str(exc_info.value)
with pytest.raises(click.UsageError) as exc_info:
validate_config({"python_version": "abc.def"})
assert "Invalid Python version format" in str(exc_info.value)
with pytest.raises(click.UsageError) as exc_info:
validate_config({"python_version": "3.10"})
assert "Minimum required version" in str(exc_info.value)
def test_validate_config_file():
with tempfile.TemporaryDirectory() as tmpdir:
tmpdir_path = pathlib.Path(tmpdir)
config_path = tmpdir_path / "langgraph.json"
node_config = {"node_version": "20", "graphs": {"agent": "./agent.js:graph"}}
with open(config_path, "w") as f:
json.dump(node_config, f)
validate_config_file(config_path)
package_json = {"name": "test", "engines": {"node": "20"}}
with open(tmpdir_path / "package.json", "w") as f:
json.dump(package_json, f)
validate_config_file(config_path)
package_json["engines"]["node"] = "20.18"
with open(tmpdir_path / "package.json", "w") as f:
json.dump(package_json, f)
with pytest.raises(click.UsageError, match="Use major version only"):
validate_config_file(config_path)
package_json["engines"] = {"node": "18"}
with open(tmpdir_path / "package.json", "w") as f:
json.dump(package_json, f)
with pytest.raises(click.UsageError, match="must be >= 20"):
validate_config_file(config_path)
package_json["engines"] = {"node": "20", "deno": "1.0"}
with open(tmpdir_path / "package.json", "w") as f:
json.dump(package_json, f)
with pytest.raises(click.UsageError, match="Only 'node' engine is supported"):
validate_config_file(config_path)
with open(tmpdir_path / "package.json", "w") as f:
f.write("{invalid json")
with pytest.raises(click.UsageError, match="Invalid package.json"):
validate_config_file(config_path)
python_config = {
"python_version": "3.11",
"dependencies": ["."],
"graphs": {"agent": "./agent.py:graph"},
}
with open(config_path, "w") as f:
json.dump(python_config, f)
validate_config_file(config_path)
for package_content in [
{"name": "test"},
{"engines": {"node": "18"}},
{"engines": {"node": "20", "deno": "1.0"}},
"{invalid json",
]:
with open(tmpdir_path / "package.json", "w") as f:
if isinstance(package_content, dict):
json.dump(package_content, f)
else:
f.write(package_content)
validate_config_file(config_path)
# config_to_docker
def test_config_to_docker_simple():
graphs = {"agent": "./agent.py:graph"}
actual_docker_stdin = config_to_docker(
PATH_TO_CONFIG,
validate_config({"dependencies": ["."], "graphs": graphs}),
"langchain/langgraph-api",
)
expected_docker_stdin = """\
FROM langchain/langgraph-api:3.11
ADD . /deps/__outer_unit_tests/unit_tests
RUN set -ex && \\
for line in '[project]' \\
'name = "unit_tests"' \\
'version = "0.1"' \\
'[tool.setuptools.package-data]' \\
'"*" = ["**/*"]'; do \\
echo "$line" >> /deps/__outer_unit_tests/pyproject.toml; \\
done
RUN PYTHONDONTWRITEBYTECODE=1 pip install --no-cache-dir -c /api/constraints.txt -e /deps/*
ENV LANGSERVE_GRAPHS='{"agent": "/deps/__outer_unit_tests/unit_tests/agent.py:graph"}'
WORKDIR /deps/__outer_unit_tests/unit_tests\
"""
assert clean_empty_lines(actual_docker_stdin) == expected_docker_stdin
def test_config_to_docker_pipconfig():
graphs = {"agent": "./agent.py:graph"}
actual_docker_stdin = config_to_docker(
PATH_TO_CONFIG,
validate_config(
{
"dependencies": ["."],
"graphs": graphs,
"pip_config_file": "pipconfig.txt",
}
),
"langchain/langgraph-api",
)
expected_docker_stdin = """\
FROM langchain/langgraph-api:3.11
ADD pipconfig.txt /pipconfig.txt
ADD . /deps/__outer_unit_tests/unit_tests
RUN set -ex && \\
for line in '[project]' \\
'name = "unit_tests"' \\
'version = "0.1"' \\
'[tool.setuptools.package-data]' \\
'"*" = ["**/*"]'; do \\
echo "$line" >> /deps/__outer_unit_tests/pyproject.toml; \\
done
RUN PIP_CONFIG_FILE=/pipconfig.txt PYTHONDONTWRITEBYTECODE=1 pip install --no-cache-dir -c /api/constraints.txt -e /deps/*
ENV LANGSERVE_GRAPHS='{"agent": "/deps/__outer_unit_tests/unit_tests/agent.py:graph"}'
WORKDIR /deps/__outer_unit_tests/unit_tests\
"""
assert clean_empty_lines(actual_docker_stdin) == expected_docker_stdin
def test_config_to_docker_invalid_inputs():
# test missing local dependencies
with pytest.raises(FileNotFoundError):
graphs = {"agent": "tests/unit_tests/agent.py:graph"}
config_to_docker(
PATH_TO_CONFIG,
validate_config({"dependencies": ["./missing"], "graphs": graphs}),
"langchain/langgraph-api",
)
# test missing local module
with pytest.raises(FileNotFoundError):
graphs = {"agent": "./missing_agent.py:graph"}
config_to_docker(
PATH_TO_CONFIG,
validate_config({"dependencies": ["."], "graphs": graphs}),
"langchain/langgraph-api",
)
def test_config_to_docker_local_deps():
graphs = {"agent": "./graphs/agent.py:graph"}
actual_docker_stdin = config_to_docker(
PATH_TO_CONFIG,
validate_config(
{
"dependencies": ["./graphs"],
"graphs": graphs,
}
),
"langchain/langgraph-api-custom",
)
expected_docker_stdin = """\
FROM langchain/langgraph-api-custom:3.11
ADD ./graphs /deps/__outer_graphs/src
RUN set -ex && \\
for line in '[project]' \\
'name = "graphs"' \\
'version = "0.1"' \\
'[tool.setuptools.package-data]' \\
'"*" = ["**/*"]'; do \\
echo "$line" >> /deps/__outer_graphs/pyproject.toml; \\
done
RUN PYTHONDONTWRITEBYTECODE=1 pip install --no-cache-dir -c /api/constraints.txt -e /deps/*
ENV LANGSERVE_GRAPHS='{"agent": "/deps/__outer_graphs/src/agent.py:graph"}'\
"""
assert clean_empty_lines(actual_docker_stdin) == expected_docker_stdin
def test_config_to_docker_pyproject():
pyproject_str = """[project]
name = "custom"
version = "0.1"
dependencies = ["langchain"]"""
pyproject_path = "tests/unit_tests/pyproject.toml"
with open(pyproject_path, "w") as f:
f.write(pyproject_str)
graphs = {"agent": "./graphs/agent.py:graph"}
actual_docker_stdin = config_to_docker(
PATH_TO_CONFIG,
validate_config(
{
"dependencies": ["."],
"graphs": graphs,
}
),
"langchain/langgraph-api",
)
os.remove(pyproject_path)
expected_docker_stdin = """FROM langchain/langgraph-api:3.11
ADD . /deps/unit_tests
RUN PYTHONDONTWRITEBYTECODE=1 pip install --no-cache-dir -c /api/constraints.txt -e /deps/*
ENV LANGSERVE_GRAPHS='{"agent": "/deps/unit_tests/graphs/agent.py:graph"}'
WORKDIR /deps/unit_tests"""
assert clean_empty_lines(actual_docker_stdin) == expected_docker_stdin
def test_config_to_docker_end_to_end():
graphs = {"agent": "./graphs/agent.py:graph"}
actual_docker_stdin = config_to_docker(
PATH_TO_CONFIG,
validate_config(
{
"python_version": "3.12",
"dependencies": ["./graphs/", "langchain", "langchain_openai"],
"graphs": graphs,
"pip_config_file": "pipconfig.txt",
"dockerfile_lines": ["ARG meow", "ARG foo"],
}
),
"langchain/langgraph-api",
)
expected_docker_stdin = """FROM langchain/langgraph-api:3.12
ARG meow
ARG foo
ADD pipconfig.txt /pipconfig.txt
RUN PIP_CONFIG_FILE=/pipconfig.txt PYTHONDONTWRITEBYTECODE=1 pip install --no-cache-dir -c /api/constraints.txt langchain langchain_openai
ADD ./graphs/ /deps/__outer_graphs/src
RUN set -ex && \\
for line in '[project]' \\
'name = "graphs"' \\
'version = "0.1"' \\
'[tool.setuptools.package-data]' \\
'"*" = ["**/*"]'; do \\
echo "$line" >> /deps/__outer_graphs/pyproject.toml; \\
done
RUN PIP_CONFIG_FILE=/pipconfig.txt PYTHONDONTWRITEBYTECODE=1 pip install --no-cache-dir -c /api/constraints.txt -e /deps/*
ENV LANGSERVE_GRAPHS='{"agent": "/deps/__outer_graphs/src/agent.py:graph"}'"""
assert clean_empty_lines(actual_docker_stdin) == expected_docker_stdin
# node.js build used for LangGraph Cloud
def test_config_to_docker_nodejs():
graphs = {"agent": "./graphs/agent.js:graph"}
actual_docker_stdin = config_to_docker(
PATH_TO_CONFIG,
validate_config(
{
"node_version": "20",
"graphs": graphs,
"dockerfile_lines": ["ARG meow", "ARG foo"],
}
),
"langchain/langgraphjs-api",
)
expected_docker_stdin = """FROM langchain/langgraphjs-api:20
ARG meow
ARG foo
ADD . /deps/unit_tests
RUN cd /deps/unit_tests && npm i
ENV LANGSERVE_GRAPHS='{"agent": "./graphs/agent.js:graph"}'
WORKDIR /deps/unit_tests
RUN (test ! -f /api/langgraph_api/js/build.mts && echo "Prebuild script not found, skipping") || tsx /api/langgraph_api/js/build.mts"""
assert clean_empty_lines(actual_docker_stdin) == expected_docker_stdin
# config_to_compose
def test_config_to_compose_simple_config():
graphs = {"agent": "./agent.py:graph"}
expected_compose_stdin = """\
pull_policy: build
build:
context: .
dockerfile_inline: |
FROM langchain/langgraph-api:3.11
ADD . /deps/__outer_unit_tests/unit_tests
RUN set -ex && \\
for line in '[project]' \\
'name = "unit_tests"' \\
'version = "0.1"' \\
'[tool.setuptools.package-data]' \\
'"*" = ["**/*"]'; do \\
echo "$line" >> /deps/__outer_unit_tests/pyproject.toml; \\
done
RUN PYTHONDONTWRITEBYTECODE=1 pip install --no-cache-dir -c /api/constraints.txt -e /deps/*
ENV LANGSERVE_GRAPHS='{"agent": "/deps/__outer_unit_tests/unit_tests/agent.py:graph"}'
WORKDIR /deps/__outer_unit_tests/unit_tests
"""
actual_compose_stdin = config_to_compose(
PATH_TO_CONFIG,
validate_config({"dependencies": ["."], "graphs": graphs}),
"langchain/langgraph-api",
)
assert clean_empty_lines(actual_compose_stdin) == expected_compose_stdin
def test_config_to_compose_env_vars():
graphs = {"agent": "./agent.py:graph"}
expected_compose_stdin = """ OPENAI_API_KEY: "key"
pull_policy: build
build:
context: .
dockerfile_inline: |
FROM langchain/langgraph-api-custom:3.11
ADD . /deps/__outer_unit_tests/unit_tests
RUN set -ex && \\
for line in '[project]' \\
'name = "unit_tests"' \\
'version = "0.1"' \\
'[tool.setuptools.package-data]' \\
'"*" = ["**/*"]'; do \\
echo "$line" >> /deps/__outer_unit_tests/pyproject.toml; \\
done
RUN PYTHONDONTWRITEBYTECODE=1 pip install --no-cache-dir -c /api/constraints.txt -e /deps/*
ENV LANGSERVE_GRAPHS='{"agent": "/deps/__outer_unit_tests/unit_tests/agent.py:graph"}'
WORKDIR /deps/__outer_unit_tests/unit_tests
"""
openai_api_key = "key"
actual_compose_stdin = config_to_compose(
PATH_TO_CONFIG,
validate_config(
{
"dependencies": ["."],
"graphs": graphs,
"env": {"OPENAI_API_KEY": openai_api_key},
}
),
"langchain/langgraph-api-custom",
)
assert clean_empty_lines(actual_compose_stdin) == expected_compose_stdin
def test_config_to_compose_env_file():
graphs = {"agent": "./agent.py:graph"}
expected_compose_stdin = """\
env_file: .env
pull_policy: build
build:
context: .
dockerfile_inline: |
FROM langchain/langgraph-api:3.11
ADD . /deps/__outer_unit_tests/unit_tests
RUN set -ex && \\
for line in '[project]' \\
'name = "unit_tests"' \\
'version = "0.1"' \\
'[tool.setuptools.package-data]' \\
'"*" = ["**/*"]'; do \\
echo "$line" >> /deps/__outer_unit_tests/pyproject.toml; \\
done
RUN PYTHONDONTWRITEBYTECODE=1 pip install --no-cache-dir -c /api/constraints.txt -e /deps/*
ENV LANGSERVE_GRAPHS='{"agent": "/deps/__outer_unit_tests/unit_tests/agent.py:graph"}'
WORKDIR /deps/__outer_unit_tests/unit_tests
"""
actual_compose_stdin = config_to_compose(
PATH_TO_CONFIG,
validate_config({"dependencies": ["."], "graphs": graphs, "env": ".env"}),
"langchain/langgraph-api",
)
assert clean_empty_lines(actual_compose_stdin) == expected_compose_stdin
def test_config_to_compose_watch():
graphs = {"agent": "./agent.py:graph"}
expected_compose_stdin = """\
pull_policy: build
build:
context: .
dockerfile_inline: |
FROM langchain/langgraph-api:3.11
ADD . /deps/__outer_unit_tests/unit_tests
RUN set -ex && \\
for line in '[project]' \\
'name = "unit_tests"' \\
'version = "0.1"' \\
'[tool.setuptools.package-data]' \\
'"*" = ["**/*"]'; do \\
echo "$line" >> /deps/__outer_unit_tests/pyproject.toml; \\
done
RUN PYTHONDONTWRITEBYTECODE=1 pip install --no-cache-dir -c /api/constraints.txt -e /deps/*
ENV LANGSERVE_GRAPHS='{"agent": "/deps/__outer_unit_tests/unit_tests/agent.py:graph"}'
WORKDIR /deps/__outer_unit_tests/unit_tests
develop:
watch:
- path: test_config.json
action: rebuild
- path: .
action: rebuild\
"""
actual_compose_stdin = config_to_compose(
PATH_TO_CONFIG,
validate_config({"dependencies": ["."], "graphs": graphs}),
"langchain/langgraph-api",
watch=True,
)
assert clean_empty_lines(actual_compose_stdin) == expected_compose_stdin
def test_config_to_compose_end_to_end():
# test all of the above + langgraph API path
graphs = {"agent": "./agent.py:graph"}
expected_compose_stdin = """\
env_file: .env
pull_policy: build
build:
context: .
dockerfile_inline: |
FROM langchain/langgraph-api:3.11
ADD . /deps/__outer_unit_tests/unit_tests
RUN set -ex && \\
for line in '[project]' \\
'name = "unit_tests"' \\
'version = "0.1"' \\
'[tool.setuptools.package-data]' \\
'"*" = ["**/*"]'; do \\
echo "$line" >> /deps/__outer_unit_tests/pyproject.toml; \\
done
RUN PYTHONDONTWRITEBYTECODE=1 pip install --no-cache-dir -c /api/constraints.txt -e /deps/*
ENV LANGSERVE_GRAPHS='{"agent": "/deps/__outer_unit_tests/unit_tests/agent.py:graph"}'
WORKDIR /deps/__outer_unit_tests/unit_tests
develop:
watch:
- path: test_config.json
action: rebuild
- path: .
action: rebuild\
"""
actual_compose_stdin = config_to_compose(
PATH_TO_CONFIG,
validate_config({"dependencies": ["."], "graphs": graphs, "env": ".env"}),
"langchain/langgraph-api",
watch=True,
)
assert clean_empty_lines(actual_compose_stdin) == expected_compose_stdin
|
0 | lc_public_repos/langgraph/libs/cli/tests/unit_tests | lc_public_repos/langgraph/libs/cli/tests/unit_tests/cli/test_cli.py | import json
import pathlib
import shutil
import tempfile
from contextlib import contextmanager
from pathlib import Path
from click.testing import CliRunner
from langgraph_cli.cli import cli, prepare_args_and_stdin
from langgraph_cli.config import Config, validate_config
from langgraph_cli.docker import DEFAULT_POSTGRES_URI, DockerCapabilities, Version
from langgraph_cli.util import clean_empty_lines
DEFAULT_DOCKER_CAPABILITIES = DockerCapabilities(
version_docker=Version(26, 1, 1),
version_compose=Version(2, 27, 0),
healthcheck_start_interval=True,
)
@contextmanager
def temporary_config_folder(config_content: dict):
# Create a temporary directory
temp_dir = tempfile.mkdtemp()
try:
# Define the path for the config.json file
config_path = Path(temp_dir) / "config.json"
# Write the provided dictionary content to config.json
with open(config_path, "w", encoding="utf-8") as config_file:
json.dump(config_content, config_file)
# Yield the temporary directory path for use within the context
yield config_path.parent
finally:
# Cleanup the temporary directory and its contents
shutil.rmtree(temp_dir)
def test_prepare_args_and_stdin() -> None:
# this basically serves as an end-to-end test for using config and docker helpers
config_path = pathlib.Path("./langgraph.json")
config = validate_config(
Config(dependencies=["."], graphs={"agent": "agent.py:graph"})
)
port = 8000
debugger_port = 8001
debugger_graph_url = f"http://127.0.0.1:{port}"
actual_args, actual_stdin = prepare_args_and_stdin(
capabilities=DEFAULT_DOCKER_CAPABILITIES,
config_path=config_path,
config=config,
docker_compose=pathlib.Path("custom-docker-compose.yml"),
port=port,
debugger_port=debugger_port,
debugger_base_url=debugger_graph_url,
watch=True,
)
expected_args = [
"--project-directory",
".",
"-f",
"custom-docker-compose.yml",
"-f",
"-",
]
expected_stdin = f"""volumes:
langgraph-data:
driver: local
services:
langgraph-redis:
image: redis:6
healthcheck:
test: redis-cli ping
interval: 5s
timeout: 1s
retries: 5
langgraph-postgres:
image: postgres:16
ports:
- "5433:5432"
environment:
POSTGRES_DB: postgres
POSTGRES_USER: postgres
POSTGRES_PASSWORD: postgres
volumes:
- langgraph-data:/var/lib/postgresql/data
healthcheck:
test: pg_isready -U postgres
start_period: 10s
timeout: 1s
retries: 5
interval: 60s
start_interval: 1s
langgraph-debugger:
image: langchain/langgraph-debugger
restart: on-failure
depends_on:
langgraph-postgres:
condition: service_healthy
ports:
- "{debugger_port}:3968"
environment:
VITE_STUDIO_LOCAL_GRAPH_URL: {debugger_graph_url}
langgraph-api:
ports:
- "8000:8000"
depends_on:
langgraph-redis:
condition: service_healthy
langgraph-postgres:
condition: service_healthy
environment:
REDIS_URI: redis://langgraph-redis:6379
POSTGRES_URI: {DEFAULT_POSTGRES_URI}
healthcheck:
test: python /api/healthcheck.py
interval: 60s
start_interval: 1s
start_period: 10s
pull_policy: build
build:
context: .
dockerfile_inline: |
FROM langchain/langgraph-api:3.11
ADD . /deps/
RUN PYTHONDONTWRITEBYTECODE=1 pip install --no-cache-dir -c /api/constraints.txt -e /deps/*
ENV LANGSERVE_GRAPHS='{{"agent": "agent.py:graph"}}'
WORKDIR /deps/
develop:
watch:
- path: langgraph.json
action: rebuild
- path: .
action: rebuild\
"""
assert actual_args == expected_args
assert clean_empty_lines(actual_stdin) == expected_stdin
def test_version_option() -> None:
"""Test the --version option of the CLI."""
runner = CliRunner()
result = runner.invoke(cli, ["--version"])
# Verify that the command executed successfully
assert result.exit_code == 0, "Expected exit code 0 for --version option"
# Check that the output contains the correct version information
assert (
"LangGraph CLI, version" in result.output
), "Expected version information in output"
def test_dockerfile_command_basic() -> None:
"""Test the 'dockerfile' command with basic configuration."""
runner = CliRunner()
config_content = {
"node_version": "20", # Add any other necessary configuration fields
"graphs": {"agent": "agent.py:graph"},
}
with temporary_config_folder(config_content) as temp_dir:
save_path = temp_dir / "Dockerfile"
result = runner.invoke(
cli,
["dockerfile", str(save_path), "--config", str(temp_dir / "config.json")],
)
# Assert command was successful
assert result.exit_code == 0, result.output
assert "✅ Created: Dockerfile" in result.output
# Check if Dockerfile was created
assert save_path.exists()
def test_dockerfile_command_with_docker_compose() -> None:
"""Test the 'dockerfile' command with Docker Compose configuration."""
runner = CliRunner()
config_content = {
"dependencies": ["./my_agent"],
"graphs": {"agent": "./my_agent/agent.py:graph"},
"env": ".env",
}
with temporary_config_folder(config_content) as temp_dir:
save_path = temp_dir / "Dockerfile"
# Add agent.py file
agent_path = temp_dir / "my_agent" / "agent.py"
agent_path.parent.mkdir(parents=True, exist_ok=True)
agent_path.touch()
result = runner.invoke(
cli,
[
"dockerfile",
str(save_path),
"--config",
str(temp_dir / "config.json"),
"--add-docker-compose",
],
)
# Assert command was successful
assert result.exit_code == 0
assert "✅ Created: Dockerfile" in result.output
assert "✅ Created: .dockerignore" in result.output
assert "✅ Created: docker-compose.yml" in result.output
assert (
"✅ Created: .env" in result.output or "➖ Skipped: .env" in result.output
)
assert "🎉 Files generated successfully" in result.output
# Check if Dockerfile, .dockerignore, docker-compose.yml, and .env were created
assert save_path.exists()
assert (temp_dir / ".dockerignore").exists()
assert (temp_dir / "docker-compose.yml").exists()
assert (temp_dir / ".env").exists() or "➖ Skipped: .env" in result.output
def test_dockerfile_command_with_bad_config() -> None:
"""Test the 'dockerfile' command with basic configuration."""
runner = CliRunner()
config_content = {
"node_version": "20" # Add any other necessary configuration fields
}
with temporary_config_folder(config_content) as temp_dir:
save_path = temp_dir / "Dockerfile"
result = runner.invoke(
cli,
["dockerfile", str(save_path), "--config", str(temp_dir / "conf.json")],
)
# Assert command was successful
assert result.exit_code == 2
assert "conf.json' does not exist" in result.output
|
0 | lc_public_repos/langgraph/libs/cli/tests/unit_tests | lc_public_repos/langgraph/libs/cli/tests/unit_tests/cli/test_templates.py | """Unit tests for the 'new' CLI command.
This command creates a new LangGraph project using a specified template.
"""
import os
from io import BytesIO
from pathlib import Path
from tempfile import TemporaryDirectory
from unittest.mock import MagicMock, patch
from urllib import request
from zipfile import ZipFile
from click.testing import CliRunner
from langgraph_cli.cli import cli
from langgraph_cli.templates import TEMPLATE_ID_TO_CONFIG
@patch.object(request, "urlopen")
def test_create_new_with_mocked_download(mock_urlopen: MagicMock) -> None:
"""Test the 'new' CLI command with a mocked download response using urllib."""
# Mock the response content to simulate a ZIP file
mock_zip_content = BytesIO()
with ZipFile(mock_zip_content, "w") as mock_zip:
mock_zip.writestr("test-file.txt", "Test content.")
# Create a mock response that behaves like a context manager
mock_response = MagicMock()
mock_response.read.return_value = mock_zip_content.getvalue()
mock_response.__enter__.return_value = mock_response # Setup enter context
mock_response.status = 200
mock_urlopen.return_value = mock_response
with TemporaryDirectory() as temp_dir:
runner = CliRunner()
template = next(
iter(TEMPLATE_ID_TO_CONFIG)
) # Select the first template for the test
result = runner.invoke(cli, ["new", temp_dir, "--template", template])
# Verify CLI command execution and success
assert result.exit_code == 0, result.output
assert (
"New project created" in result.output
), "Expected success message in output."
# Verify that the directory is not empty
assert os.listdir(temp_dir), "Expected files to be created in temp directory."
# Check for a known file in the extracted content
extracted_files = [f.name for f in Path(temp_dir).glob("*")]
assert (
"test-file.txt" in extracted_files
), "Expected 'test-file.txt' in the extracted content."
def test_invalid_template_id() -> None:
"""Test that an invalid template ID passed via CLI results in a graceful error."""
runner = CliRunner()
result = runner.invoke(
cli, ["new", "dummy_path", "--template", "invalid-template-id"]
)
# Verify the command failed and proper message is displayed
assert result.exit_code != 0, "Expected non-zero exit code for invalid template."
assert (
"Template 'invalid-template-id' not found" in result.output
), "Expected error message in output."
|
0 | lc_public_repos/langgraph/libs/cli | lc_public_repos/langgraph/libs/cli/examples/Makefile | .PHONY: run_w_override
run:
poetry run langgraph up --watch --no-pull
run_faux:
cd graphs && poetry run langgraph up --no-pull
run_graphs_reqs_a:
cd graphs_reqs_a && poetry run langgraph up --no-pull
run_graphs_reqs_b:
cd graphs_reqs_b && poetry run langgraph up --no-pull
|
0 | lc_public_repos/langgraph/libs/cli | lc_public_repos/langgraph/libs/cli/examples/poetry.lock | # This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand.
[[package]]
name = "anyio"
version = "4.4.0"
description = "High level compatibility layer for multiple asynchronous event loop implementations"
optional = false
python-versions = ">=3.8"
files = [
{file = "anyio-4.4.0-py3-none-any.whl", hash = "sha256:c1b2d8f46a8a812513012e1107cb0e68c17159a7a594208005a57dc776e1bdc7"},
{file = "anyio-4.4.0.tar.gz", hash = "sha256:5aadc6a1bbb7cdb0bede386cac5e2940f5e2ff3aa20277e991cf028e0585ce94"},
]
[package.dependencies]
exceptiongroup = {version = ">=1.0.2", markers = "python_version < \"3.11\""}
idna = ">=2.8"
sniffio = ">=1.1"
typing-extensions = {version = ">=4.1", markers = "python_version < \"3.11\""}
[package.extras]
doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"]
test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"]
trio = ["trio (>=0.23)"]
[[package]]
name = "certifi"
version = "2024.7.4"
description = "Python package for providing Mozilla's CA Bundle."
optional = false
python-versions = ">=3.6"
files = [
{file = "certifi-2024.7.4-py3-none-any.whl", hash = "sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90"},
{file = "certifi-2024.7.4.tar.gz", hash = "sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b"},
]
[[package]]
name = "click"
version = "8.1.7"
description = "Composable command line interface toolkit"
optional = false
python-versions = ">=3.7"
files = [
{file = "click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28"},
{file = "click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"},
]
[package.dependencies]
colorama = {version = "*", markers = "platform_system == \"Windows\""}
[[package]]
name = "colorama"
version = "0.4.6"
description = "Cross-platform colored terminal text."
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7"
files = [
{file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"},
{file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"},
]
[[package]]
name = "exceptiongroup"
version = "1.2.1"
description = "Backport of PEP 654 (exception groups)"
optional = false
python-versions = ">=3.7"
files = [
{file = "exceptiongroup-1.2.1-py3-none-any.whl", hash = "sha256:5258b9ed329c5bbdd31a309f53cbfb0b155341807f6ff7606a1e801a891b29ad"},
{file = "exceptiongroup-1.2.1.tar.gz", hash = "sha256:a4785e48b045528f5bfe627b6ad554ff32def154f42372786903b7abcfe1aa16"},
]
[package.extras]
test = ["pytest (>=6)"]
[[package]]
name = "h11"
version = "0.14.0"
description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1"
optional = false
python-versions = ">=3.7"
files = [
{file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"},
{file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"},
]
[[package]]
name = "httpcore"
version = "1.0.5"
description = "A minimal low-level HTTP client."
optional = false
python-versions = ">=3.8"
files = [
{file = "httpcore-1.0.5-py3-none-any.whl", hash = "sha256:421f18bac248b25d310f3cacd198d55b8e6125c107797b609ff9b7a6ba7991b5"},
{file = "httpcore-1.0.5.tar.gz", hash = "sha256:34a38e2f9291467ee3b44e89dd52615370e152954ba21721378a87b2960f7a61"},
]
[package.dependencies]
certifi = "*"
h11 = ">=0.13,<0.15"
[package.extras]
asyncio = ["anyio (>=4.0,<5.0)"]
http2 = ["h2 (>=3,<5)"]
socks = ["socksio (==1.*)"]
trio = ["trio (>=0.22.0,<0.26.0)"]
[[package]]
name = "httpx"
version = "0.27.0"
description = "The next generation HTTP client."
optional = false
python-versions = ">=3.8"
files = [
{file = "httpx-0.27.0-py3-none-any.whl", hash = "sha256:71d5465162c13681bff01ad59b2cc68dd838ea1f10e51574bac27103f00c91a5"},
{file = "httpx-0.27.0.tar.gz", hash = "sha256:a0cb88a46f32dc874e04ee956e4c2764aba2aa228f650b06788ba6bda2962ab5"},
]
[package.dependencies]
anyio = "*"
certifi = "*"
httpcore = "==1.*"
idna = "*"
sniffio = "*"
[package.extras]
brotli = ["brotli", "brotlicffi"]
cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"]
http2 = ["h2 (>=3,<5)"]
socks = ["socksio (==1.*)"]
[[package]]
name = "httpx-sse"
version = "0.4.0"
description = "Consume Server-Sent Event (SSE) messages with HTTPX."
optional = false
python-versions = ">=3.8"
files = [
{file = "httpx-sse-0.4.0.tar.gz", hash = "sha256:1e81a3a3070ce322add1d3529ed42eb5f70817f45ed6ec915ab753f961139721"},
{file = "httpx_sse-0.4.0-py3-none-any.whl", hash = "sha256:f329af6eae57eaa2bdfd962b42524764af68075ea87370a2de920af5341e318f"},
]
[[package]]
name = "idna"
version = "3.7"
description = "Internationalized Domain Names in Applications (IDNA)"
optional = false
python-versions = ">=3.5"
files = [
{file = "idna-3.7-py3-none-any.whl", hash = "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0"},
{file = "idna-3.7.tar.gz", hash = "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc"},
]
[[package]]
name = "langgraph-cli"
version = "0.1.52"
description = "CLI for interacting with LangGraph API"
optional = false
python-versions = "^3.9.0,<4.0"
files = []
develop = true
[package.dependencies]
click = "^8.1.7"
[package.source]
type = "directory"
url = ".."
[[package]]
name = "langgraph-sdk"
version = "0.1.29"
description = "SDK for interacting with LangGraph API"
optional = false
python-versions = "^3.9.0,<4.0"
files = []
develop = true
[package.dependencies]
httpx = ">=0.25.2"
httpx-sse = ">=0.4.0"
orjson = ">=3.10.1"
[package.source]
type = "directory"
url = "../../sdk-py"
[[package]]
name = "orjson"
version = "3.10.5"
description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy"
optional = false
python-versions = ">=3.8"
files = [
{file = "orjson-3.10.5-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:545d493c1f560d5ccfc134803ceb8955a14c3fcb47bbb4b2fee0232646d0b932"},
{file = "orjson-3.10.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f4324929c2dd917598212bfd554757feca3e5e0fa60da08be11b4aa8b90013c1"},
{file = "orjson-3.10.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8c13ca5e2ddded0ce6a927ea5a9f27cae77eee4c75547b4297252cb20c4d30e6"},
{file = "orjson-3.10.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b6c8e30adfa52c025f042a87f450a6b9ea29649d828e0fec4858ed5e6caecf63"},
{file = "orjson-3.10.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:338fd4f071b242f26e9ca802f443edc588fa4ab60bfa81f38beaedf42eda226c"},
{file = "orjson-3.10.5-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:6970ed7a3126cfed873c5d21ece1cd5d6f83ca6c9afb71bbae21a0b034588d96"},
{file = "orjson-3.10.5-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:235dadefb793ad12f7fa11e98a480db1f7c6469ff9e3da5e73c7809c700d746b"},
{file = "orjson-3.10.5-cp310-none-win32.whl", hash = "sha256:be79e2393679eda6a590638abda16d167754393f5d0850dcbca2d0c3735cebe2"},
{file = "orjson-3.10.5-cp310-none-win_amd64.whl", hash = "sha256:c4a65310ccb5c9910c47b078ba78e2787cb3878cdded1702ac3d0da71ddc5228"},
{file = "orjson-3.10.5-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:cdf7365063e80899ae3a697def1277c17a7df7ccfc979990a403dfe77bb54d40"},
{file = "orjson-3.10.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b68742c469745d0e6ca5724506858f75e2f1e5b59a4315861f9e2b1df77775a"},
{file = "orjson-3.10.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7d10cc1b594951522e35a3463da19e899abe6ca95f3c84c69e9e901e0bd93d38"},
{file = "orjson-3.10.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dcbe82b35d1ac43b0d84072408330fd3295c2896973112d495e7234f7e3da2e1"},
{file = "orjson-3.10.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10c0eb7e0c75e1e486c7563fe231b40fdd658a035ae125c6ba651ca3b07936f5"},
{file = "orjson-3.10.5-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:53ed1c879b10de56f35daf06dbc4a0d9a5db98f6ee853c2dbd3ee9d13e6f302f"},
{file = "orjson-3.10.5-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:099e81a5975237fda3100f918839af95f42f981447ba8f47adb7b6a3cdb078fa"},
{file = "orjson-3.10.5-cp311-none-win32.whl", hash = "sha256:1146bf85ea37ac421594107195db8bc77104f74bc83e8ee21a2e58596bfb2f04"},
{file = "orjson-3.10.5-cp311-none-win_amd64.whl", hash = "sha256:36a10f43c5f3a55c2f680efe07aa93ef4a342d2960dd2b1b7ea2dd764fe4a37c"},
{file = "orjson-3.10.5-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:68f85ecae7af14a585a563ac741b0547a3f291de81cd1e20903e79f25170458f"},
{file = "orjson-3.10.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:28afa96f496474ce60d3340fe8d9a263aa93ea01201cd2bad844c45cd21f5268"},
{file = "orjson-3.10.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9cd684927af3e11b6e754df80b9ffafd9fb6adcaa9d3e8fdd5891be5a5cad51e"},
{file = "orjson-3.10.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d21b9983da032505f7050795e98b5d9eee0df903258951566ecc358f6696969"},
{file = "orjson-3.10.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ad1de7fef79736dde8c3554e75361ec351158a906d747bd901a52a5c9c8d24b"},
{file = "orjson-3.10.5-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2d97531cdfe9bdd76d492e69800afd97e5930cb0da6a825646667b2c6c6c0211"},
{file = "orjson-3.10.5-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:d69858c32f09c3e1ce44b617b3ebba1aba030e777000ebdf72b0d8e365d0b2b3"},
{file = "orjson-3.10.5-cp312-none-win32.whl", hash = "sha256:64c9cc089f127e5875901ac05e5c25aa13cfa5dbbbd9602bda51e5c611d6e3e2"},
{file = "orjson-3.10.5-cp312-none-win_amd64.whl", hash = "sha256:b2efbd67feff8c1f7728937c0d7f6ca8c25ec81373dc8db4ef394c1d93d13dc5"},
{file = "orjson-3.10.5-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:03b565c3b93f5d6e001db48b747d31ea3819b89abf041ee10ac6988886d18e01"},
{file = "orjson-3.10.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:584c902ec19ab7928fd5add1783c909094cc53f31ac7acfada817b0847975f26"},
{file = "orjson-3.10.5-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5a35455cc0b0b3a1eaf67224035f5388591ec72b9b6136d66b49a553ce9eb1e6"},
{file = "orjson-3.10.5-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1670fe88b116c2745a3a30b0f099b699a02bb3482c2591514baf5433819e4f4d"},
{file = "orjson-3.10.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:185c394ef45b18b9a7d8e8f333606e2e8194a50c6e3c664215aae8cf42c5385e"},
{file = "orjson-3.10.5-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:ca0b3a94ac8d3886c9581b9f9de3ce858263865fdaa383fbc31c310b9eac07c9"},
{file = "orjson-3.10.5-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:dfc91d4720d48e2a709e9c368d5125b4b5899dced34b5400c3837dadc7d6271b"},
{file = "orjson-3.10.5-cp38-none-win32.whl", hash = "sha256:c05f16701ab2a4ca146d0bca950af254cb7c02f3c01fca8efbbad82d23b3d9d4"},
{file = "orjson-3.10.5-cp38-none-win_amd64.whl", hash = "sha256:8a11d459338f96a9aa7f232ba95679fc0c7cedbd1b990d736467894210205c09"},
{file = "orjson-3.10.5-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:85c89131d7b3218db1b24c4abecea92fd6c7f9fab87441cfc342d3acc725d807"},
{file = "orjson-3.10.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb66215277a230c456f9038d5e2d84778141643207f85336ef8d2a9da26bd7ca"},
{file = "orjson-3.10.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:51bbcdea96cdefa4a9b4461e690c75ad4e33796530d182bdd5c38980202c134a"},
{file = "orjson-3.10.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dbead71dbe65f959b7bd8cf91e0e11d5338033eba34c114f69078d59827ee139"},
{file = "orjson-3.10.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5df58d206e78c40da118a8c14fc189207fffdcb1f21b3b4c9c0c18e839b5a214"},
{file = "orjson-3.10.5-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:c4057c3b511bb8aef605616bd3f1f002a697c7e4da6adf095ca5b84c0fd43595"},
{file = "orjson-3.10.5-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:b39e006b00c57125ab974362e740c14a0c6a66ff695bff44615dcf4a70ce2b86"},
{file = "orjson-3.10.5-cp39-none-win32.whl", hash = "sha256:eded5138cc565a9d618e111c6d5c2547bbdd951114eb822f7f6309e04db0fb47"},
{file = "orjson-3.10.5-cp39-none-win_amd64.whl", hash = "sha256:cc28e90a7cae7fcba2493953cff61da5a52950e78dc2dacfe931a317ee3d8de7"},
{file = "orjson-3.10.5.tar.gz", hash = "sha256:7a5baef8a4284405d96c90c7c62b755e9ef1ada84c2406c24a9ebec86b89f46d"},
]
[[package]]
name = "sniffio"
version = "1.3.1"
description = "Sniff out which async library your code is running under"
optional = false
python-versions = ">=3.7"
files = [
{file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"},
{file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"},
]
[[package]]
name = "typing-extensions"
version = "4.12.2"
description = "Backported and Experimental Type Hints for Python 3.8+"
optional = false
python-versions = ">=3.8"
files = [
{file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"},
{file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"},
]
[metadata]
lock-version = "2.0"
python-versions = "^3.9.0,<4.0"
content-hash = "ec5109729f30d2033a10a10e8f8d3ed94c7d96d5d31025b4815b0123664bb063"
|
0 | lc_public_repos/langgraph/libs/cli | lc_public_repos/langgraph/libs/cli/examples/pyproject.toml | [tool.poetry]
name = "langgraph-examples"
version = "0.1.0"
description = ""
authors = []
readme = "README.md"
packages = []
package-mode = false
[tool.poetry.dependencies]
python = "^3.9.0,<4.0"
langgraph-cli = {path = "../../cli", develop = true}
langgraph-sdk = {path = "../../sdk-py", develop = true}
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"
|
0 | lc_public_repos/langgraph/libs/cli | lc_public_repos/langgraph/libs/cli/examples/.env.example | OPENAI_API_KEY=placeholder
ANTHROPIC_API_KEY=placeholder
TAVILY_API_KEY=placeholder
LANGCHAIN_TRACING_V2=false
LANGCHAIN_ENDPOINT=placeholder
LANGCHAIN_API_KEY=placeholder
LANGCHAIN_PROJECT=placeholder
LANGGRAPH_AUTH_TYPE=noop
LANGSMITH_AUTH_ENDPOINT=placeholder
LANGSMITH_TENANT_ID=placeholder |
0 | lc_public_repos/langgraph/libs/cli | lc_public_repos/langgraph/libs/cli/examples/langgraph.json | {
"pip_config_file": "./pipconf.txt",
"dependencies": [
"langchain_community",
"langchain_anthropic",
"langchain_openai",
"wikipedia",
"scikit-learn",
"./graphs"
],
"graphs": {
"agent": "./graphs/agent.py:graph",
"storm": "./graphs/storm.py:graph"
},
"env": ".env"
}
|
0 | lc_public_repos/langgraph/libs/cli | lc_public_repos/langgraph/libs/cli/examples/pipconf.txt | [global]
timeout = 60
|
0 | lc_public_repos/langgraph/libs/cli/examples | lc_public_repos/langgraph/libs/cli/examples/graphs/agent.py | from typing import Annotated, Literal, Sequence, TypedDict
from langchain_anthropic import ChatAnthropic
from langchain_community.tools.tavily_search import TavilySearchResults
from langchain_core.messages import BaseMessage
from langchain_openai import ChatOpenAI
from langgraph.graph import END, StateGraph, add_messages
from langgraph.prebuilt import ToolNode
tools = [TavilySearchResults(max_results=1)]
model_anth = ChatAnthropic(temperature=0, model_name="claude-3-sonnet-20240229")
model_oai = ChatOpenAI(temperature=0)
model_anth = model_anth.bind_tools(tools)
model_oai = model_oai.bind_tools(tools)
class AgentState(TypedDict):
messages: Annotated[Sequence[BaseMessage], add_messages]
# Define the function that determines whether to continue or not
def should_continue(state):
messages = state["messages"]
last_message = messages[-1]
# If there are no tool calls, then we finish
if not last_message.tool_calls:
return "end"
# Otherwise if there is, we continue
else:
return "continue"
# Define the function that calls the model
def call_model(state, config):
if config["configurable"].get("model", "anthropic") == "anthropic":
model = model_anth
else:
model = model_oai
messages = state["messages"]
response = model.invoke(messages)
# We return a list, because this will get added to the existing list
return {"messages": [response]}
# Define the function to execute tools
tool_node = ToolNode(tools)
class ConfigSchema(TypedDict):
model: Literal["anthropic", "openai"]
# Define a new graph
workflow = StateGraph(AgentState, config_schema=ConfigSchema)
# Define the two nodes we will cycle between
workflow.add_node("agent", call_model)
workflow.add_node("action", tool_node)
# Set the entrypoint as `agent`
# This means that this node is the first one called
workflow.set_entry_point("agent")
# We now add a conditional edge
workflow.add_conditional_edges(
# First, we define the start node. We use `agent`.
# This means these are the edges taken after the `agent` node is called.
"agent",
# Next, we pass in the function that will determine which node is called next.
should_continue,
# Finally we pass in a mapping.
# The keys are strings, and the values are other nodes.
# END is a special node marking that the graph should finish.
# What will happen is we will call `should_continue`, and then the output of that
# will be matched against the keys in this mapping.
# Based on which one it matches, that node will then be called.
{
# If `tools`, then we call the tool node.
"continue": "action",
# Otherwise we finish.
"end": END,
},
)
# We now add a normal edge from `tools` to `agent`.
# This means that after `tools` is called, `agent` node is called next.
workflow.add_edge("action", "agent")
# Finally, we compile it!
# This compiles it into a LangChain Runnable,
# meaning you can use it as you would any other runnable
graph = workflow.compile()
|
0 | lc_public_repos/langgraph/libs/cli/examples | lc_public_repos/langgraph/libs/cli/examples/graphs/langgraph.json | {
"python_version": "3.12",
"dependencies": [
"langchain_community",
"langchain_anthropic",
"langchain_openai",
"wikipedia",
"scikit-learn",
"."
],
"graphs": {
"agent": "./agent.py:graph",
"storm": "./storm.py:graph"
},
"env": "../.env"
}
|
0 | lc_public_repos/langgraph/libs/cli/examples | lc_public_repos/langgraph/libs/cli/examples/graphs/storm.py | import asyncio
import json
from typing import Annotated, List, Optional
from langchain_community.retrievers import WikipediaRetriever
from langchain_community.tools.tavily_search import TavilySearchResults
from langchain_community.vectorstores import SKLearnVectorStore
from langchain_core.documents import Document
from langchain_core.messages import (
AIMessage,
AnyMessage,
HumanMessage,
ToolMessage,
)
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.runnables import RunnableConfig, RunnableLambda
from langchain_core.runnables import chain as as_runnable
from langchain_core.tools import tool
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
from langgraph.graph import END, StateGraph
from pydantic import BaseModel, Field
from typing_extensions import TypedDict
fast_llm = ChatOpenAI(model="gpt-3.5-turbo")
# Uncomment for a Fireworks model
# fast_llm = ChatFireworks(model="accounts/fireworks/models/firefunction-v1", max_tokens=32_000)
long_context_llm = ChatOpenAI(model="gpt-4-turbo-preview")
direct_gen_outline_prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You are a Wikipedia writer. Write an outline for a Wikipedia page about a user-provided topic. Be comprehensive and specific.",
),
("user", "{topic}"),
]
)
class Subsection(BaseModel):
subsection_title: str = Field(..., title="Title of the subsection")
description: str = Field(..., title="Content of the subsection")
@property
def as_str(self) -> str:
return f"### {self.subsection_title}\n\n{self.description}".strip()
class Section(BaseModel):
section_title: str = Field(..., title="Title of the section")
description: str = Field(..., title="Content of the section")
subsections: Optional[List[Subsection]] = Field(
default=None,
title="Titles and descriptions for each subsection of the Wikipedia page.",
)
@property
def as_str(self) -> str:
subsections = "\n\n".join(
f"### {subsection.subsection_title}\n\n{subsection.description}"
for subsection in self.subsections or []
)
return f"## {self.section_title}\n\n{self.description}\n\n{subsections}".strip()
class Outline(BaseModel):
page_title: str = Field(..., title="Title of the Wikipedia page")
sections: List[Section] = Field(
default_factory=list,
title="Titles and descriptions for each section of the Wikipedia page.",
)
@property
def as_str(self) -> str:
sections = "\n\n".join(section.as_str for section in self.sections)
return f"# {self.page_title}\n\n{sections}".strip()
generate_outline_direct = direct_gen_outline_prompt | fast_llm.with_structured_output(
Outline
)
gen_related_topics_prompt = ChatPromptTemplate.from_template(
"""I'm writing a Wikipedia page for a topic mentioned below. Please identify and recommend some Wikipedia pages on closely related subjects. I'm looking for examples that provide insights into interesting aspects commonly associated with this topic, or examples that help me understand the typical content and structure included in Wikipedia pages for similar topics.
Please list the as many subjects and urls as you can.
Topic of interest: {topic}
"""
)
class RelatedSubjects(BaseModel):
topics: List[str] = Field(
description="Comprehensive list of related subjects as background research.",
)
expand_chain = gen_related_topics_prompt | fast_llm.with_structured_output(
RelatedSubjects
)
class Editor(BaseModel):
affiliation: str = Field(
description="Primary affiliation of the editor.",
)
name: str = Field(
description="Name of the editor.",
)
role: str = Field(
description="Role of the editor in the context of the topic.",
)
description: str = Field(
description="Description of the editor's focus, concerns, and motives.",
)
@property
def persona(self) -> str:
return f"Name: {self.name}\nRole: {self.role}\nAffiliation: {self.affiliation}\nDescription: {self.description}\n"
class Perspectives(BaseModel):
editors: List[Editor] = Field(
description="Comprehensive list of editors with their roles and affiliations.",
# Add a pydantic validation/restriction to be at most M editors
)
gen_perspectives_prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"""You need to select a diverse (and distinct) group of Wikipedia editors who will work together to create a comprehensive article on the topic. Each of them represents a different perspective, role, or affiliation related to this topic.\
You can use other Wikipedia pages of related topics for inspiration. For each editor, add a description of what they will focus on.
Wiki page outlines of related topics for inspiration:
{examples}""",
),
("user", "Topic of interest: {topic}"),
]
)
gen_perspectives_chain = gen_perspectives_prompt | ChatOpenAI(
model="gpt-3.5-turbo"
).with_structured_output(Perspectives)
wikipedia_retriever = WikipediaRetriever(load_all_available_meta=True, top_k_results=1)
def format_doc(doc, max_length=1000):
related = "- ".join(doc.metadata["categories"])
return f"### {doc.metadata['title']}\n\nSummary: {doc.page_content}\n\nRelated\n{related}"[
:max_length
]
def format_docs(docs):
return "\n\n".join(format_doc(doc) for doc in docs)
@as_runnable
async def survey_subjects(topic: str):
related_subjects = await expand_chain.ainvoke({"topic": topic})
retrieved_docs = await wikipedia_retriever.abatch(
related_subjects.topics, return_exceptions=True
)
all_docs = []
for docs in retrieved_docs:
if isinstance(docs, BaseException):
continue
all_docs.extend(docs)
formatted = format_docs(all_docs)
return await gen_perspectives_chain.ainvoke({"examples": formatted, "topic": topic})
def add_messages(left, right):
if not isinstance(left, list):
left = [left]
if not isinstance(right, list):
right = [right]
return left + right
def update_references(references, new_references):
if not references:
references = {}
references.update(new_references)
return references
def update_editor(editor, new_editor):
# Can only set at the outset
if not editor:
return new_editor
return editor
class InterviewState(TypedDict):
messages: Annotated[List[AnyMessage], add_messages]
references: Annotated[Optional[dict], update_references]
editor: Annotated[Optional[Editor], update_editor]
gen_qn_prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"""You are an experienced Wikipedia writer and want to edit a specific page. \
Besides your identity as a Wikipedia writer, you have a specific focus when researching the topic. \
Now, you are chatting with an expert to get information. Ask good questions to get more useful information.
When you have no more questions to ask, say "Thank you so much for your help!" to end the conversation.\
Please only ask one question at a time and don't ask what you have asked before.\
Your questions should be related to the topic you want to write.
Be comprehensive and curious, gaining as much unique insight from the expert as possible.\
Stay true to your specific perspective:
{persona}""",
),
MessagesPlaceholder(variable_name="messages", optional=True),
]
)
def tag_with_name(ai_message: AIMessage, name: str):
ai_message.name = name.replace(" ", "_").replace(".", "_")
return ai_message
def swap_roles(state: InterviewState, name: str):
converted = []
for message in state["messages"]:
if isinstance(message, AIMessage) and message.name != name:
message = HumanMessage(**message.dict(exclude={"type"}))
converted.append(message)
return {"messages": converted}
@as_runnable
async def generate_question(state: InterviewState):
editor = state["editor"]
gn_chain = (
RunnableLambda(swap_roles).bind(name=editor.name)
| gen_qn_prompt.partial(persona=editor.persona)
| fast_llm
| RunnableLambda(tag_with_name).bind(name=editor.name)
)
result = await gn_chain.ainvoke(state)
return {"messages": [result]}
class Queries(BaseModel):
queries: List[str] = Field(
description="Comprehensive list of search engine queries to answer the user's questions.",
)
gen_queries_prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You are a helpful research assistant. Query the search engine to answer the user's questions.",
),
MessagesPlaceholder(variable_name="messages", optional=True),
]
)
gen_queries_chain = gen_queries_prompt | ChatOpenAI(
model="gpt-3.5-turbo"
).with_structured_output(Queries, include_raw=True)
class AnswerWithCitations(BaseModel):
answer: str = Field(
description="Comprehensive answer to the user's question with citations.",
)
cited_urls: List[str] = Field(
description="List of urls cited in the answer.",
)
@property
def as_str(self) -> str:
return f"{self.answer}\n\nCitations:\n\n" + "\n".join(
f"[{i+1}]: {url}" for i, url in enumerate(self.cited_urls)
)
gen_answer_prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"""You are an expert who can use information effectively. You are chatting with a Wikipedia writer who wants\
to write a Wikipedia page on the topic you know. You have gathered the related information and will now use the information to form a response.
Make your response as informative as possible and make sure every sentence is supported by the gathered information.
Each response must be backed up by a citation from a reliable source, formatted as a footnote, reproducing the URLS after your response.""",
),
MessagesPlaceholder(variable_name="messages", optional=True),
]
)
gen_answer_chain = gen_answer_prompt | fast_llm.with_structured_output(
AnswerWithCitations, include_raw=True
).with_config(run_name="GenerateAnswer")
# Tavily is typically a better search engine, but your free queries are limited
tavily_search = TavilySearchResults(max_results=4)
@tool
async def search_engine(query: str):
"""Search engine to the internet."""
results = tavily_search.invoke(query)
return [{"content": r["content"], "url": r["url"]} for r in results]
async def gen_answer(
state: InterviewState,
config: Optional[RunnableConfig] = None,
name: str = "Subject_Matter_Expert",
max_str_len: int = 15000,
):
swapped_state = swap_roles(state, name) # Convert all other AI messages
queries = await gen_queries_chain.ainvoke(swapped_state)
query_results = await search_engine.abatch(
queries["parsed"].queries, config, return_exceptions=True
)
successful_results = [
res for res in query_results if not isinstance(res, Exception)
]
all_query_results = {
res["url"]: res["content"] for results in successful_results for res in results
}
# We could be more precise about handling max token length if we wanted to here
dumped = json.dumps(all_query_results)[:max_str_len]
ai_message: AIMessage = queries["raw"]
tool_call = queries["raw"].tool_calls[0]
tool_id = tool_call["id"]
tool_message = ToolMessage(tool_call_id=tool_id, content=dumped)
swapped_state["messages"].extend([ai_message, tool_message])
# Only update the shared state with the final answer to avoid
# polluting the dialogue history with intermediate messages
generated = await gen_answer_chain.ainvoke(swapped_state)
cited_urls = set(generated["parsed"].cited_urls)
# Save the retrieved information to a the shared state for future reference
cited_references = {k: v for k, v in all_query_results.items() if k in cited_urls}
formatted_message = AIMessage(name=name, content=generated["parsed"].as_str)
return {"messages": [formatted_message], "references": cited_references}
max_num_turns = 5
def route_messages(state: InterviewState, name: str = "Subject_Matter_Expert"):
messages = state["messages"]
num_responses = len(
[m for m in messages if isinstance(m, AIMessage) and m.name == name]
)
if num_responses >= max_num_turns:
return END
last_question = messages[-2]
if last_question.content.endswith("Thank you so much for your help!"):
return END
return "ask_question"
builder = StateGraph(InterviewState)
builder.add_node("ask_question", generate_question)
builder.add_node("answer_question", gen_answer)
builder.add_conditional_edges("answer_question", route_messages)
builder.add_edge("ask_question", "answer_question")
builder.set_entry_point("ask_question")
interview_graph = builder.compile().with_config(run_name="Conduct Interviews")
refine_outline_prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"""You are a Wikipedia writer. You have gathered information from experts and search engines. Now, you are refining the outline of the Wikipedia page. \
You need to make sure that the outline is comprehensive and specific. \
Topic you are writing about: {topic}
Old outline:
{old_outline}""",
),
(
"user",
"Refine the outline based on your conversations with subject-matter experts:\n\nConversations:\n\n{conversations}\n\nWrite the refined Wikipedia outline:",
),
]
)
# Using turbo preview since the context can get quite long
refine_outline_chain = refine_outline_prompt | long_context_llm.with_structured_output(
Outline
)
embeddings = OpenAIEmbeddings(model="text-embedding-3-small")
# reference_docs = [
# Document(page_content=v, metadata={"source": k})
# for k, v in final_state["references"].items()
# ]
# # This really doesn't need to be a vectorstore for this size of data.
# # It could just be a numpy matrix. Or you could store documents
# # across requests if you want.
# vectorstore = SKLearnVectorStore.from_documents(
# reference_docs,
# embedding=embeddings,
# )
# retriever = vectorstore.as_retriever(k=10)
vectorstore = SKLearnVectorStore(embedding=embeddings)
retriever = vectorstore.as_retriever(k=10)
class SubSection(BaseModel):
subsection_title: str = Field(..., title="Title of the subsection")
content: str = Field(
...,
title="Full content of the subsection. Include [#] citations to the cited sources where relevant.",
)
@property
def as_str(self) -> str:
return f"### {self.subsection_title}\n\n{self.content}".strip()
class WikiSection(BaseModel):
section_title: str = Field(..., title="Title of the section")
content: str = Field(..., title="Full content of the section")
subsections: Optional[List[Subsection]] = Field(
default=None,
title="Titles and descriptions for each subsection of the Wikipedia page.",
)
citations: List[str] = Field(default_factory=list)
@property
def as_str(self) -> str:
subsections = "\n\n".join(
subsection.as_str for subsection in self.subsections or []
)
citations = "\n".join([f" [{i}] {cit}" for i, cit in enumerate(self.citations)])
return (
f"## {self.section_title}\n\n{self.content}\n\n{subsections}".strip()
+ f"\n\n{citations}".strip()
)
section_writer_prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You are an expert Wikipedia writer. Complete your assigned WikiSection from the following outline:\n\n"
"{outline}\n\nCite your sources, using the following references:\n\n<Documents>\n{docs}\n<Documents>",
),
("user", "Write the full WikiSection for the {section} section."),
]
)
async def retrieve(inputs: dict):
docs = await retriever.ainvoke(inputs["topic"] + ": " + inputs["section"])
formatted = "\n".join(
[
f'<Document href="{doc.metadata["source"]}"/>\n{doc.page_content}\n</Document>'
for doc in docs
]
)
return {"docs": formatted, **inputs}
section_writer = (
retrieve
| section_writer_prompt
| long_context_llm.with_structured_output(WikiSection)
)
writer_prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You are an expert Wikipedia author. Write the complete wiki article on {topic} using the following section drafts:\n\n"
"{draft}\n\nStrictly follow Wikipedia format guidelines.",
),
(
"user",
'Write the complete Wiki article using markdown format. Organize citations using footnotes like "[1]",'
" avoiding duplicates in the footer. Include URLs in the footer.",
),
]
)
writer = writer_prompt | long_context_llm | StrOutputParser()
class ResearchState(TypedDict):
topic: str
outline: Outline
editors: List[Editor]
interview_results: List[InterviewState]
# The final sections output
sections: List[WikiSection]
article: str
async def initialize_research(state: ResearchState):
topic = state["topic"]
coros = (
generate_outline_direct.ainvoke({"topic": topic}),
survey_subjects.ainvoke(topic),
)
results = await asyncio.gather(*coros)
return {
**state,
"outline": results[0],
"editors": results[1].editors,
}
async def conduct_interviews(state: ResearchState):
topic = state["topic"]
initial_states = [
{
"editor": editor,
"messages": [
AIMessage(
content=f"So you said you were writing an article on {topic}?",
name="Subject_Matter_Expert",
)
],
}
for editor in state["editors"]
]
# We call in to the sub-graph here to parallelize the interviews
interview_results = await interview_graph.abatch(initial_states)
return {
**state,
"interview_results": interview_results,
}
def format_conversation(interview_state):
messages = interview_state["messages"]
convo = "\n".join(f"{m.name}: {m.content}" for m in messages)
return f'Conversation with {interview_state["editor"].name}\n\n' + convo
async def refine_outline(state: ResearchState):
convos = "\n\n".join(
[
format_conversation(interview_state)
for interview_state in state["interview_results"]
]
)
updated_outline = await refine_outline_chain.ainvoke(
{
"topic": state["topic"],
"old_outline": state["outline"].as_str,
"conversations": convos,
}
)
return {**state, "outline": updated_outline}
async def index_references(state: ResearchState):
all_docs = []
for interview_state in state["interview_results"]:
reference_docs = [
Document(page_content=v, metadata={"source": k})
for k, v in interview_state["references"].items()
]
all_docs.extend(reference_docs)
await vectorstore.aadd_documents(all_docs)
return state
async def write_sections(state: ResearchState):
outline = state["outline"]
sections = await section_writer.abatch(
[
{
"outline": outline.as_str,
"section": section.section_title,
"topic": state["topic"],
}
for section in outline.sections
]
)
return {
**state,
"sections": sections,
}
async def write_article(state: ResearchState):
topic = state["topic"]
sections = state["sections"]
draft = "\n\n".join([section.as_str for section in sections])
article = await writer.ainvoke({"topic": topic, "draft": draft})
return {
**state,
"article": article,
}
builder_of_storm = StateGraph(ResearchState)
nodes = [
("init_research", initialize_research),
("conduct_interviews", conduct_interviews),
("refine_outline", refine_outline),
("index_references", index_references),
("write_sections", write_sections),
("write_article", write_article),
]
for i in range(len(nodes)):
name, node = nodes[i]
builder_of_storm.add_node(name, node)
if i > 0:
builder_of_storm.add_edge(nodes[i - 1][0], name)
builder_of_storm.set_entry_point(nodes[0][0])
builder_of_storm.set_finish_point(nodes[-1][0])
graph = builder_of_storm.compile()
|
0 | lc_public_repos/langgraph/libs/cli/examples | lc_public_repos/langgraph/libs/cli/examples/graphs_reqs_b/requirements.txt | requests
langchain_anthropic
langchain_openai
langchain_community
|
0 | lc_public_repos/langgraph/libs/cli/examples | lc_public_repos/langgraph/libs/cli/examples/graphs_reqs_b/hello.py | from graphs_submod.agent import graph # noqa
from utils.greeter import greet
greet()
|
0 | lc_public_repos/langgraph/libs/cli/examples | lc_public_repos/langgraph/libs/cli/examples/graphs_reqs_b/langgraph.json | {
"dependencies": [
"."
],
"env": "../.env",
"graphs": {
"graph": "./hello.py:graph"
}
}
|
0 | lc_public_repos/langgraph/libs/cli/examples/graphs_reqs_b | lc_public_repos/langgraph/libs/cli/examples/graphs_reqs_b/utils/greeter.py | def greet():
print("Hello, world!")
|
0 | lc_public_repos/langgraph/libs/cli/examples/graphs_reqs_b | lc_public_repos/langgraph/libs/cli/examples/graphs_reqs_b/graphs_submod/agent.py | from pathlib import Path
from typing import Annotated, Sequence, TypedDict
from langchain_anthropic import ChatAnthropic
from langchain_community.tools.tavily_search import TavilySearchResults
from langchain_core.messages import BaseMessage
from langchain_openai import ChatOpenAI
from langgraph.graph import END, StateGraph, add_messages
from langgraph.prebuilt import ToolNode
tools = [TavilySearchResults(max_results=1)]
model_anth = ChatAnthropic(temperature=0, model_name="claude-3-sonnet-20240229")
model_oai = ChatOpenAI(temperature=0)
model_anth = model_anth.bind_tools(tools)
model_oai = model_oai.bind_tools(tools)
prompt = open("prompt.txt").read()
subprompt = open(Path(__file__).parent / "subprompt.txt").read()
class AgentState(TypedDict):
messages: Annotated[Sequence[BaseMessage], add_messages]
# Define the function that determines whether to continue or not
def should_continue(state):
messages = state["messages"]
last_message = messages[-1]
# If there are no tool calls, then we finish
if not last_message.tool_calls:
return "end"
# Otherwise if there is, we continue
else:
return "continue"
# Define the function that calls the model
def call_model(state, config):
if config["configurable"].get("model", "anthropic") == "anthropic":
model = model_anth
else:
model = model_oai
messages = state["messages"]
response = model.invoke(messages)
# We return a list, because this will get added to the existing list
return {"messages": [response]}
# Define the function to execute tools
tool_node = ToolNode(tools)
# Define a new graph
workflow = StateGraph(AgentState)
# Define the two nodes we will cycle between
workflow.add_node("agent", call_model)
workflow.add_node("action", tool_node)
# Set the entrypoint as `agent`
# This means that this node is the first one called
workflow.set_entry_point("agent")
# We now add a conditional edge
workflow.add_conditional_edges(
# First, we define the start node. We use `agent`.
# This means these are the edges taken after the `agent` node is called.
"agent",
# Next, we pass in the function that will determine which node is called next.
should_continue,
# Finally we pass in a mapping.
# The keys are strings, and the values are other nodes.
# END is a special node marking that the graph should finish.
# What will happen is we will call `should_continue`, and then the output of that
# will be matched against the keys in this mapping.
# Based on which one it matches, that node will then be called.
{
# If `tools`, then we call the tool node.
"continue": "action",
# Otherwise we finish.
"end": END,
},
)
# We now add a normal edge from `tools` to `agent`.
# This means that after `tools` is called, `agent` node is called next.
workflow.add_edge("action", "agent")
# Finally, we compile it!
# This compiles it into a LangChain Runnable,
# meaning you can use it as you would any other runnable
graph = workflow.compile()
|
0 | lc_public_repos/langgraph/libs/cli/examples | lc_public_repos/langgraph/libs/cli/examples/graphs_reqs_a/requirements.txt | requests
langchain_anthropic
langchain_openai
langchain_community
|
0 | lc_public_repos/langgraph/libs/cli/examples | lc_public_repos/langgraph/libs/cli/examples/graphs_reqs_a/hello.py | from graphs_reqs_a.graphs_submod.agent import graph # noqa
|
0 | lc_public_repos/langgraph/libs/cli/examples | lc_public_repos/langgraph/libs/cli/examples/graphs_reqs_a/langgraph.json | {
"dependencies": [
"."
],
"env": "../.env",
"graphs": {
"graph": "./hello.py:graph"
}
}
|
0 | lc_public_repos/langgraph/libs/cli/examples/graphs_reqs_a | lc_public_repos/langgraph/libs/cli/examples/graphs_reqs_a/graphs_submod/agent.py | from pathlib import Path
from typing import Annotated, Sequence, TypedDict
from langchain_anthropic import ChatAnthropic
from langchain_community.tools.tavily_search import TavilySearchResults
from langchain_core.messages import BaseMessage
from langchain_openai import ChatOpenAI
from langgraph.graph import END, StateGraph, add_messages
from langgraph.prebuilt import ToolNode
tools = [TavilySearchResults(max_results=1)]
model_anth = ChatAnthropic(temperature=0, model_name="claude-3-sonnet-20240229")
model_oai = ChatOpenAI(temperature=0)
model_anth = model_anth.bind_tools(tools)
model_oai = model_oai.bind_tools(tools)
prompt = open("prompt.txt").read()
subprompt = open(Path(__file__).parent / "subprompt.txt").read()
class AgentState(TypedDict):
messages: Annotated[Sequence[BaseMessage], add_messages]
# Define the function that determines whether to continue or not
def should_continue(state):
messages = state["messages"]
last_message = messages[-1]
# If there are no tool calls, then we finish
if not last_message.tool_calls:
return "end"
# Otherwise if there is, we continue
else:
return "continue"
# Define the function that calls the model
def call_model(state, config):
if config["configurable"].get("model", "anthropic") == "anthropic":
model = model_anth
else:
model = model_oai
messages = state["messages"]
response = model.invoke(messages)
# We return a list, because this will get added to the existing list
return {"messages": [response]}
# Define the function to execute tools
tool_node = ToolNode(tools)
# Define a new graph
workflow = StateGraph(AgentState)
# Define the two nodes we will cycle between
workflow.add_node("agent", call_model)
workflow.add_node("action", tool_node)
# Set the entrypoint as `agent`
# This means that this node is the first one called
workflow.set_entry_point("agent")
# We now add a conditional edge
workflow.add_conditional_edges(
# First, we define the start node. We use `agent`.
# This means these are the edges taken after the `agent` node is called.
"agent",
# Next, we pass in the function that will determine which node is called next.
should_continue,
# Finally we pass in a mapping.
# The keys are strings, and the values are other nodes.
# END is a special node marking that the graph should finish.
# What will happen is we will call `should_continue`, and then the output of that
# will be matched against the keys in this mapping.
# Based on which one it matches, that node will then be called.
{
# If `tools`, then we call the tool node.
"continue": "action",
# Otherwise we finish.
"end": END,
},
)
# We now add a normal edge from `tools` to `agent`.
# This means that after `tools` is called, `agent` node is called next.
workflow.add_edge("action", "agent")
# Finally, we compile it!
# This compiles it into a LangChain Runnable,
# meaning you can use it as you would any other runnable
graph = workflow.compile()
|
0 | lc_public_repos/langgraph/libs/cli | lc_public_repos/langgraph/libs/cli/langgraph_cli/exec.py | import asyncio
import signal
import sys
from contextlib import contextmanager
from typing import Callable, Optional, cast
import click.exceptions
@contextmanager
def Runner():
if hasattr(asyncio, "Runner"):
with asyncio.Runner() as runner:
yield runner
else:
class _Runner:
def __enter__(self):
return self
def __exit__(self, *args):
pass
def run(self, coro):
return asyncio.run(coro)
yield _Runner()
async def subp_exec(
cmd: str,
*args: str,
input: Optional[str] = None,
wait: Optional[float] = None,
verbose: bool = False,
collect: bool = False,
on_stdout: Optional[Callable[[str], Optional[bool]]] = None,
) -> tuple[Optional[str], Optional[str]]:
if verbose:
cmd_str = f"+ {cmd} {' '.join(map(str, args))}"
if input:
print(cmd_str, " <\n", "\n".join(filter(None, input.splitlines())), sep="")
else:
print(cmd_str)
if wait:
await asyncio.sleep(wait)
try:
proc = await asyncio.create_subprocess_exec(
cmd,
*args,
stdin=asyncio.subprocess.PIPE if input else None,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
def signal_handler():
# make sure process exists, then terminate it
if proc.returncode is None:
proc.terminate()
original_sigint_handler = signal.getsignal(signal.SIGINT)
if sys.platform == "win32":
def handle_windows_signal(signum, frame):
signal_handler()
original_sigint_handler(signum, frame)
signal.signal(signal.SIGINT, handle_windows_signal)
# NOTE: we're not adding a handler for SIGTERM since it's ignored on Windows
else:
loop = asyncio.get_event_loop()
loop.add_signal_handler(signal.SIGINT, signal_handler)
loop.add_signal_handler(signal.SIGTERM, signal_handler)
empty_fut: asyncio.Future = asyncio.Future()
empty_fut.set_result(None)
stdout, stderr, _ = await asyncio.gather(
monitor_stream(
cast(asyncio.StreamReader, proc.stdout),
collect=True,
display=verbose,
on_line=on_stdout,
),
monitor_stream(
cast(asyncio.StreamReader, proc.stderr),
collect=True,
display=verbose,
),
proc._feed_stdin(input.encode()) if input else empty_fut, # type: ignore[attr-defined]
)
returncode = await proc.wait()
if (
returncode is not None
and returncode != 0 # success
and returncode != 130 # user interrupt
):
sys.stdout.write(stdout.decode() if stdout else "")
sys.stderr.write(stderr.decode() if stderr else "")
raise click.exceptions.Exit(returncode)
if collect:
return (
stdout.decode() if stdout else None,
stderr.decode() if stderr else None,
)
else:
return None, None
finally:
try:
if proc.returncode is None:
try:
proc.terminate()
except (ProcessLookupError, KeyboardInterrupt):
pass
if sys.platform == "win32":
signal.signal(signal.SIGINT, original_sigint_handler)
else:
loop.remove_signal_handler(signal.SIGINT)
loop.remove_signal_handler(signal.SIGTERM)
except UnboundLocalError:
pass
async def monitor_stream(
stream: asyncio.StreamReader,
collect: bool = False,
display: bool = False,
on_line: Optional[Callable[[str], Optional[bool]]] = None,
) -> Optional[bytearray]:
if collect:
ba = bytearray()
def handle(line: bytes, overrun: bool):
nonlocal on_line
nonlocal display
if display:
sys.stdout.buffer.write(line)
if overrun:
return
if collect:
ba.extend(line)
if on_line:
if on_line(line.decode()):
on_line = None
display = True
"""Adapted from asyncio.StreamReader.readline() to handle LimitOverrunError."""
sep = b"\n"
seplen = len(sep)
while True:
try:
line = await stream.readuntil(sep)
overrun = False
except asyncio.IncompleteReadError as e:
line = e.partial
overrun = False
except asyncio.LimitOverrunError as e:
if stream._buffer.startswith(sep, e.consumed):
line = stream._buffer[: e.consumed + seplen]
else:
line = stream._buffer.clear()
overrun = True
stream._maybe_resume_transport()
await asyncio.to_thread(handle, line, overrun)
if line == b"":
break
if collect:
return ba
else:
return None
|
0 | lc_public_repos/langgraph/libs/cli | lc_public_repos/langgraph/libs/cli/langgraph_cli/version.py | """Main entrypoint into package."""
from importlib import metadata
try:
__version__ = metadata.version(__package__)
except metadata.PackageNotFoundError:
# Case where package metadata is not available.
__version__ = ""
del metadata # optional, avoids polluting the results of dir(__package__)
|
0 | lc_public_repos/langgraph/libs/cli | lc_public_repos/langgraph/libs/cli/langgraph_cli/config.py | import json
import os
import pathlib
import textwrap
from typing import NamedTuple, Optional, TypedDict, Union
import click
MIN_NODE_VERSION = "20"
MIN_PYTHON_VERSION = "3.11"
class IndexConfig(TypedDict, total=False):
"""Configuration for indexing documents for semantic search in the store."""
dims: int
"""Number of dimensions in the embedding vectors.
Common embedding models have the following dimensions:
- openai:text-embedding-3-large: 3072
- openai:text-embedding-3-small: 1536
- openai:text-embedding-ada-002: 1536
- cohere:embed-english-v3.0: 1024
- cohere:embed-english-light-v3.0: 384
- cohere:embed-multilingual-v3.0: 1024
- cohere:embed-multilingual-light-v3.0: 384
"""
embed: str
"""Optional model (string) to generate embeddings from text or path to model or function.
Examples:
- "openai:text-embedding-3-large"
- "cohere:embed-multilingual-v3.0"
- "src/app.py:embeddings
"""
fields: Optional[list[str]]
"""Fields to extract text from for embedding generation.
Defaults to the root ["$"], which embeds the json object as a whole.
"""
class StoreConfig(TypedDict, total=False):
embed: Optional[IndexConfig]
"""Configuration for vector embeddings in store."""
class Config(TypedDict, total=False):
python_version: str
node_version: Optional[str]
pip_config_file: Optional[str]
dockerfile_lines: list[str]
dependencies: list[str]
graphs: dict[str, str]
env: Union[dict[str, str], str]
store: Optional[StoreConfig]
def _parse_version(version_str: str) -> tuple[int, int]:
"""Parse a version string into a tuple of (major, minor)."""
try:
major, minor = map(int, version_str.split("."))
return (major, minor)
except ValueError:
raise click.UsageError(f"Invalid version format: {version_str}") from None
def _parse_node_version(version_str: str) -> int:
"""Parse a Node.js version string into a major version number."""
try:
if "." in version_str:
raise ValueError("Node.js version must be major version only")
return int(version_str)
except ValueError:
raise click.UsageError(
f"Invalid Node.js version format: {version_str}. "
"Use major version only (e.g., '20')."
) from None
def validate_config(config: Config) -> Config:
config = (
{
"node_version": config.get("node_version"),
"dockerfile_lines": config.get("dockerfile_lines", []),
"graphs": config.get("graphs", {}),
"env": config.get("env", {}),
"store": config.get("store"),
}
if config.get("node_version")
else {
"python_version": config.get("python_version", "3.11"),
"pip_config_file": config.get("pip_config_file"),
"dockerfile_lines": config.get("dockerfile_lines", []),
"dependencies": config.get("dependencies", []),
"graphs": config.get("graphs", {}),
"env": config.get("env", {}),
"store": config.get("store"),
}
)
if config.get("node_version"):
node_version = config["node_version"]
try:
major = _parse_node_version(node_version)
min_major = _parse_node_version(MIN_NODE_VERSION)
if major < min_major:
raise click.UsageError(
f"Node.js version {node_version} is not supported. "
f"Minimum required version is {MIN_NODE_VERSION}."
)
except ValueError as e:
raise click.UsageError(str(e)) from None
if config.get("python_version"):
pyversion = config["python_version"]
if not pyversion.count(".") == 1 or not all(
part.isdigit() for part in pyversion.split(".")
):
raise click.UsageError(
f"Invalid Python version format: {pyversion}. "
"Use 'major.minor' format (e.g., '3.11'). "
"Patch version cannot be specified."
)
if _parse_version(pyversion) < _parse_version(MIN_PYTHON_VERSION):
raise click.UsageError(
f"Python version {pyversion} is not supported. "
f"Minimum required version is {MIN_PYTHON_VERSION}."
)
if not config["dependencies"]:
raise click.UsageError(
"No dependencies found in config. "
"Add at least one dependency to 'dependencies' list."
)
if not config["graphs"]:
raise click.UsageError(
"No graphs found in config. "
"Add at least one graph to 'graphs' dictionary."
)
return config
def validate_config_file(config_path: pathlib.Path) -> Config:
with open(config_path) as f:
config = json.load(f)
validated = validate_config(config)
# Enforce the package.json doesn't enforce an
# incompatible Node.js version
if validated.get("node_version"):
package_json_path = config_path.parent / "package.json"
if package_json_path.is_file():
try:
with open(package_json_path) as f:
package_json = json.load(f)
if "engines" in package_json:
engines = package_json["engines"]
if any(engine != "node" for engine in engines.keys()):
raise click.UsageError(
"Only 'node' engine is supported in package.json engines."
f" Got engines: {list(engines.keys())}"
)
if engines:
node_version = engines["node"]
try:
major = _parse_node_version(node_version)
min_major = _parse_node_version(MIN_NODE_VERSION)
if major < min_major:
raise click.UsageError(
f"Node.js version in package.json engines must be >= {MIN_NODE_VERSION} "
f"(major version only), got '{node_version}'. Minor/patch versions "
"(like '20.x.y') are not supported to prevent deployment issues "
"when new Node.js versions are released."
)
except ValueError as e:
raise click.UsageError(str(e)) from None
except json.JSONDecodeError:
raise click.UsageError(
"Invalid package.json found in langgraph "
f"config directory {package_json_path}: file is not valid JSON"
) from None
return validated
class LocalDeps(NamedTuple):
pip_reqs: list[tuple[pathlib.Path, str]]
real_pkgs: dict[pathlib.Path, str]
faux_pkgs: dict[pathlib.Path, tuple[str, str]]
# if . is in dependencies, use it as working_dir
working_dir: Optional[str] = None
def _assemble_local_deps(config_path: pathlib.Path, config: Config) -> LocalDeps:
# ensure reserved package names are not used
reserved = {
"src",
"langgraph-api",
"langgraph_api",
"langgraph",
"langchain-core",
"langchain_core",
"pydantic",
"orjson",
"fastapi",
"uvicorn",
"psycopg",
"httpx",
"langsmith",
}
def check_reserved(name: str, ref: str):
if name in reserved:
raise ValueError(
f"Package name '{name}' used in local dep '{ref}' is reserved. "
"Rename the directory."
)
reserved.add(name)
pip_reqs = []
real_pkgs = {}
faux_pkgs = {}
working_dir = None
for local_dep in config["dependencies"]:
if not local_dep.startswith("."):
continue
resolved = config_path.parent / local_dep
# validate local dependency
if not resolved.exists():
raise FileNotFoundError(f"Could not find local dependency: {resolved}")
elif not resolved.is_dir():
raise NotADirectoryError(
f"Local dependency must be a directory: {resolved}"
)
elif not resolved.is_relative_to(config_path.parent):
raise ValueError(
f"Local dependency '{resolved}' must be a subdirectory of '{config_path.parent}'"
)
# if it's installable, add it to local_pkgs
# otherwise, add it to faux_pkgs, and create a pyproject.toml
files = os.listdir(resolved)
if "pyproject.toml" in files:
real_pkgs[resolved] = local_dep
if local_dep == ".":
working_dir = f"/deps/{resolved.name}"
elif "setup.py" in files:
real_pkgs[resolved] = local_dep
if local_dep == ".":
working_dir = f"/deps/{resolved.name}"
else:
if any(file == "__init__.py" for file in files):
# flat layout
if "-" in resolved.name:
raise ValueError(
f"Package name '{resolved.name}' contains a hyphen. "
"Rename the directory to use it as flat-layout package."
)
check_reserved(resolved.name, local_dep)
container_path = f"/deps/__outer_{resolved.name}/{resolved.name}"
else:
# src layout
container_path = f"/deps/__outer_{resolved.name}/src"
for file in files:
rfile = resolved / file
if (
rfile.is_dir()
and file != "__pycache__"
and not file.startswith(".")
):
try:
for subfile in os.listdir(rfile):
if subfile.endswith(".py"):
check_reserved(file, local_dep)
break
except PermissionError:
pass
faux_pkgs[resolved] = (local_dep, container_path)
if local_dep == ".":
working_dir = container_path
if "requirements.txt" in files:
rfile = resolved / "requirements.txt"
pip_reqs.append(
(
rfile.relative_to(config_path.parent),
f"{container_path}/requirements.txt",
)
)
return LocalDeps(pip_reqs, real_pkgs, faux_pkgs, working_dir)
def _update_graph_paths(
config_path: pathlib.Path, config: Config, local_deps: LocalDeps
) -> None:
for graph_id, import_str in config["graphs"].items():
module_str, _, attr_str = import_str.partition(":")
if not module_str or not attr_str:
message = (
'Import string "{import_str}" must be in format "<module>:<attribute>".'
)
raise ValueError(message.format(import_str=import_str))
if "/" in module_str:
resolved = config_path.parent / module_str
if not resolved.exists():
raise FileNotFoundError(f"Could not find local module: {resolved}")
elif not resolved.is_file():
raise IsADirectoryError(f"Local module must be a file: {resolved}")
else:
for path in local_deps.real_pkgs:
if resolved.is_relative_to(path):
module_str = f"/deps/{path.name}/{resolved.relative_to(path)}"
break
else:
for faux_pkg, (_, destpath) in local_deps.faux_pkgs.items():
if resolved.is_relative_to(faux_pkg):
module_str = f"{destpath}/{resolved.relative_to(faux_pkg)}"
break
else:
raise ValueError(
f"Module '{import_str}' not found in 'dependencies' list. "
"Add its containing package to 'dependencies' list."
)
# update the config
config["graphs"][graph_id] = f"{module_str}:{attr_str}"
def python_config_to_docker(config_path: pathlib.Path, config: Config, base_image: str):
# configure pip
pip_install = (
"PYTHONDONTWRITEBYTECODE=1 pip install --no-cache-dir -c /api/constraints.txt"
)
if config.get("pip_config_file"):
pip_install = f"PIP_CONFIG_FILE=/pipconfig.txt {pip_install}"
pip_config_file_str = (
f"ADD {config['pip_config_file']} /pipconfig.txt"
if config.get("pip_config_file")
else ""
)
# collect dependencies
pypi_deps = [dep for dep in config["dependencies"] if not dep.startswith(".")]
local_deps = _assemble_local_deps(config_path, config)
# rewrite graph paths
_update_graph_paths(config_path, config, local_deps)
pip_pkgs_str = f"RUN {pip_install} {' '.join(pypi_deps)}" if pypi_deps else ""
if local_deps.pip_reqs:
pip_reqs_str = os.linesep.join(
f"ADD {reqpath} {destpath}" for reqpath, destpath in local_deps.pip_reqs
)
pip_reqs_str += f'{os.linesep}RUN {pip_install} {" ".join("-r " + r for _,r in local_deps.pip_reqs)}'
else:
pip_reqs_str = ""
# https://setuptools.pypa.io/en/latest/userguide/datafiles.html#package-data
# https://til.simonwillison.net/python/pyproject
faux_pkgs_str = f"{os.linesep}{os.linesep}".join(
f"""ADD {relpath} {destpath}
RUN set -ex && \\
for line in '[project]' \\
'name = "{fullpath.name}"' \\
'version = "0.1"' \\
'[tool.setuptools.package-data]' \\
'"*" = ["**/*"]'; do \\
echo "$line" >> /deps/__outer_{fullpath.name}/pyproject.toml; \\
done"""
for fullpath, (relpath, destpath) in local_deps.faux_pkgs.items()
)
local_pkgs_str = os.linesep.join(
f"ADD {relpath} /deps/{fullpath.name}"
for fullpath, relpath in local_deps.real_pkgs.items()
)
installs = f"{os.linesep}{os.linesep}".join(
filter(
None,
[
pip_config_file_str,
pip_pkgs_str,
pip_reqs_str,
local_pkgs_str,
faux_pkgs_str,
],
)
)
store_config = config.get("store")
env_additional_config = (
""
if not store_config
else f"""
ENV LANGGRAPH_STORE='{json.dumps(store_config)}'
"""
)
return f"""FROM {base_image}:{config['python_version']}
{os.linesep.join(config["dockerfile_lines"])}
{installs}
RUN {pip_install} -e /deps/*
{env_additional_config}
ENV LANGSERVE_GRAPHS='{json.dumps(config["graphs"])}'
{f"WORKDIR {local_deps.working_dir}" if local_deps.working_dir else ""}"""
def node_config_to_docker(config_path: pathlib.Path, config: Config, base_image: str):
faux_path = f"/deps/{config_path.parent.name}"
def test_file(file_name):
full_path = config_path.parent / file_name
try:
return full_path.is_file()
except OSError:
return False
npm, yarn, pnpm = [
test_file("package-lock.json"),
test_file("yarn.lock"),
test_file("pnpm-lock.yaml"),
]
if yarn:
install_cmd = "yarn install --frozen-lockfile"
elif pnpm:
install_cmd = "pnpm i --frozen-lockfile"
elif npm:
install_cmd = "npm ci"
else:
install_cmd = "npm i"
store_config = config.get("store")
env_additional_config = (
""
if not store_config
else f"""
ENV LANGGRAPH_STORE='{json.dumps(store_config)}'
"""
)
return f"""FROM {base_image}:{config['node_version']}
{os.linesep.join(config["dockerfile_lines"])}
ADD . {faux_path}
RUN cd {faux_path} && {install_cmd}
{env_additional_config}
ENV LANGSERVE_GRAPHS='{json.dumps(config["graphs"])}'
WORKDIR {faux_path}
RUN (test ! -f /api/langgraph_api/js/build.mts && echo "Prebuild script not found, skipping") || tsx /api/langgraph_api/js/build.mts"""
def config_to_docker(config_path: pathlib.Path, config: Config, base_image: str):
if config.get("node_version"):
return node_config_to_docker(config_path, config, base_image)
return python_config_to_docker(config_path, config, base_image)
def config_to_compose(
config_path: pathlib.Path,
config: Config,
base_image: str,
watch: bool = False,
):
env_vars = config["env"].items() if isinstance(config["env"], dict) else {}
env_vars_str = "\n".join(f' {k}: "{v}"' for k, v in env_vars)
env_file_str = (
f"env_file: {config['env']}" if isinstance(config["env"], str) else ""
)
if watch:
watch_paths = [config_path.name] + [
dep for dep in config["dependencies"] if dep.startswith(".")
]
watch_actions = "\n".join(
f"""- path: {path}
action: rebuild"""
for path in watch_paths
)
watch_str = f"""
develop:
watch:
{textwrap.indent(watch_actions, " ")}
"""
else:
watch_str = ""
return f"""
{textwrap.indent(env_vars_str, " ")}
{env_file_str}
pull_policy: build
build:
context: .
dockerfile_inline: |
{textwrap.indent(config_to_docker(config_path, config, base_image), " ")}
{watch_str}
"""
|
0 | lc_public_repos/langgraph/libs/cli | lc_public_repos/langgraph/libs/cli/langgraph_cli/util.py | def clean_empty_lines(input_str: str):
return "\n".join(filter(None, input_str.splitlines()))
|
0 | lc_public_repos/langgraph/libs/cli | lc_public_repos/langgraph/libs/cli/langgraph_cli/analytics.py | import functools
import json
import os
import pathlib
import platform
import threading
import urllib.error
import urllib.request
from typing import Any, TypedDict
from langgraph_cli.constants import (
DEFAULT_CONFIG,
DEFAULT_PORT,
SUPABASE_PUBLIC_API_KEY,
SUPABASE_URL,
)
from langgraph_cli.version import __version__
class LogData(TypedDict):
os: str
os_version: str
python_version: str
cli_version: str
cli_command: str
params: dict[str, Any]
def get_anonymized_params(kwargs: dict[str, Any]) -> dict[str, bool]:
params = {}
# anonymize params with values
if config := kwargs.get("config"):
if config != pathlib.Path(DEFAULT_CONFIG).resolve():
params["config"] = True
if port := kwargs.get("port"):
if port != DEFAULT_PORT:
params["port"] = True
if kwargs.get("docker_compose"):
params["docker_compose"] = True
if kwargs.get("debugger_port"):
params["debugger_port"] = True
if kwargs.get("postgres_uri"):
params["postgres_uri"] = True
# pick up exact values for boolean flags
for boolean_param in ["recreate", "pull", "watch", "wait", "verbose"]:
if kwargs.get(boolean_param):
params[boolean_param] = kwargs[boolean_param]
return params
def log_data(data: LogData) -> None:
headers = {
"Content-Type": "application/json",
"apikey": SUPABASE_PUBLIC_API_KEY,
"User-Agent": "Mozilla/5.0",
}
supabase_url = SUPABASE_URL
req = urllib.request.Request(
f"{supabase_url}/rest/v1/logs",
data=json.dumps(data).encode("utf-8"),
headers=headers,
method="POST",
)
try:
urllib.request.urlopen(req)
except urllib.error.URLError:
pass
def log_command(func):
@functools.wraps(func)
def decorator(*args, **kwargs):
if os.getenv("LANGGRAPH_CLI_NO_ANALYTICS") == "1":
return func(*args, **kwargs)
data = {
"os": platform.system(),
"os_version": platform.version(),
"python_version": platform.python_version(),
"cli_version": __version__,
"cli_command": func.__name__,
"params": get_anonymized_params(kwargs),
}
background_thread = threading.Thread(target=log_data, args=(data,))
background_thread.start()
return func(*args, **kwargs)
return decorator
|
0 | lc_public_repos/langgraph/libs/cli | lc_public_repos/langgraph/libs/cli/langgraph_cli/templates.py | import os
import shutil
import sys
from io import BytesIO
from typing import Dict, Optional
from urllib import error, request
from zipfile import ZipFile
import click
TEMPLATES: Dict[str, Dict[str, str]] = {
"New LangGraph Project": {
"description": "A simple, minimal chatbot with memory.",
"python": "https://github.com/langchain-ai/new-langgraph-project/archive/refs/heads/main.zip",
"js": "https://github.com/langchain-ai/new-langgraphjs-project/archive/refs/heads/main.zip",
},
"ReAct Agent": {
"description": "A simple agent that can be flexibly extended to many tools.",
"python": "https://github.com/langchain-ai/react-agent/archive/refs/heads/main.zip",
"js": "https://github.com/langchain-ai/react-agent-js/archive/refs/heads/main.zip",
},
"Memory Agent": {
"description": "A ReAct-style agent with an additional tool to store memories for use across conversational threads.",
"python": "https://github.com/langchain-ai/memory-agent/archive/refs/heads/main.zip",
"js": "https://github.com/langchain-ai/memory-agent-js/archive/refs/heads/main.zip",
},
"Retrieval Agent": {
"description": "An agent that includes a retrieval-based question-answering system.",
"python": "https://github.com/langchain-ai/retrieval-agent-template/archive/refs/heads/main.zip",
"js": "https://github.com/langchain-ai/retrieval-agent-template-js/archive/refs/heads/main.zip",
},
"Data-enrichment Agent": {
"description": "An agent that performs web searches and organizes its findings into a structured format.",
"python": "https://github.com/langchain-ai/data-enrichment/archive/refs/heads/main.zip",
"js": "https://github.com/langchain-ai/data-enrichment-js/archive/refs/heads/main.zip",
},
}
# Generate TEMPLATE_IDS programmatically
TEMPLATE_ID_TO_CONFIG = {
f"{name.lower().replace(' ', '-')}-{lang}": (name, lang, url)
for name, versions in TEMPLATES.items()
for lang, url in versions.items()
if lang in {"python", "js"}
}
TEMPLATE_IDS = list(TEMPLATE_ID_TO_CONFIG.keys())
TEMPLATE_HELP_STRING = (
"The name of the template to use. Available options:\n"
+ "\n".join(f"{id_}" for id_ in TEMPLATE_ID_TO_CONFIG)
)
def _choose_template() -> str:
"""Presents a list of templates to the user and prompts them to select one.
Returns:
str: The URL of the selected template.
"""
click.secho("🌟 Please select a template:", bold=True, fg="yellow")
for idx, (template_name, template_info) in enumerate(TEMPLATES.items(), 1):
click.secho(f"{idx}. ", nl=False, fg="cyan")
click.secho(template_name, fg="cyan", nl=False)
click.secho(f" - {template_info['description']}", fg="white")
# Get the template choice from the user, defaulting to the first template if blank
template_choice: Optional[int] = click.prompt(
"Enter the number of your template choice (default is 1)",
type=int,
default=1,
show_default=False,
)
template_keys = list(TEMPLATES.keys())
if 1 <= template_choice <= len(template_keys):
selected_template: str = template_keys[template_choice - 1]
else:
click.secho("❌ Invalid choice. Please try again.", fg="red")
return _choose_template()
# Prompt the user to choose between Python or JS/TS version
click.secho(
f"\nYou selected: {selected_template} - {TEMPLATES[selected_template]['description']}",
fg="green",
)
version_choice: int = click.prompt(
"Choose language (1 for Python 🐍, 2 for JS/TS 🌐)", type=int
)
if version_choice == 1:
return TEMPLATES[selected_template]["python"]
elif version_choice == 2:
return TEMPLATES[selected_template]["js"]
else:
click.secho("❌ Invalid choice. Please try again.", fg="red")
return _choose_template()
def _download_repo_with_requests(repo_url: str, path: str) -> None:
"""Download a ZIP archive from the given URL and extracts it to the specified path.
Args:
repo_url (str): The URL of the repository to download.
path (str): The path where the repository should be extracted.
"""
click.secho("📥 Attempting to download repository as a ZIP archive...", fg="yellow")
click.secho(f"URL: {repo_url}", fg="yellow")
try:
with request.urlopen(repo_url) as response:
if response.status == 200:
with ZipFile(BytesIO(response.read())) as zip_file:
zip_file.extractall(path)
# Move extracted contents to path
for item in os.listdir(path):
if item.endswith("-main"):
extracted_dir = os.path.join(path, item)
for filename in os.listdir(extracted_dir):
shutil.move(os.path.join(extracted_dir, filename), path)
shutil.rmtree(extracted_dir)
click.secho(
f"✅ Downloaded and extracted repository to {path}", fg="green"
)
except error.HTTPError as e:
click.secho(
f"❌ Error: Failed to download repository.\n" f"Details: {e}\n",
fg="red",
bold=True,
err=True,
)
sys.exit(1)
def _get_template_url(template_name: str) -> Optional[str]:
"""
Retrieves the template URL based on the provided template name.
Args:
template_name (str): The name of the template.
Returns:
Optional[str]: The URL of the template if found, else None.
"""
if template_name in TEMPLATES:
click.secho(f"Template selected: {template_name}", fg="green")
version_choice: int = click.prompt(
"Choose version (1 for Python 🐍, 2 for JS/TS 🌐)", type=int
)
if version_choice == 1:
return TEMPLATES[template_name]["python"]
elif version_choice == 2:
return TEMPLATES[template_name]["js"]
else:
click.secho("❌ Invalid choice. Please try again.", fg="red")
return None
else:
click.secho(
f"Template '{template_name}' not found. Please select from the available options.",
fg="red",
)
return None
def create_new(path: Optional[str], template: Optional[str]) -> None:
"""Create a new LangGraph project at the specified PATH using the chosen TEMPLATE.
Args:
path (Optional[str]): The path where the new project will be created.
template (Optional[str]): The name of the template to use.
"""
# Prompt for path if not provided
if not path:
path = click.prompt(
"📂 Please specify the path to create the application", default="."
)
path = os.path.abspath(path) # Ensure path is absolute
# Check if path exists and is not empty
if os.path.exists(path) and os.listdir(path):
click.secho(
"❌ The specified directory already exists and is not empty. "
"Aborting to prevent overwriting files.",
fg="red",
bold=True,
)
sys.exit(1)
# Get template URL either from command-line argument or
# through interactive selection
if template:
if template not in TEMPLATE_ID_TO_CONFIG:
# Format available options in a readable way with descriptions
template_options = ""
for id_ in TEMPLATE_IDS:
name, lang, _ = TEMPLATE_ID_TO_CONFIG[id_]
description = TEMPLATES[name]["description"]
# Add each template option with color formatting
template_options += (
click.style("- ", fg="yellow", bold=True)
+ click.style(f"{id_}", fg="cyan")
+ click.style(f": {description}", fg="white")
+ "\n"
)
# Display error message with colors and formatting
click.secho("❌ Error:", fg="red", bold=True, nl=False)
click.secho(f" Template '{template}' not found.", fg="red")
click.secho(
"Please select from the available options:\n", fg="yellow", bold=True
)
click.secho(template_options, fg="cyan")
sys.exit(1)
_, _, template_url = TEMPLATE_ID_TO_CONFIG[template]
else:
template_url = _choose_template()
# Download and extract the template
_download_repo_with_requests(template_url, path)
click.secho(f"🎉 New project created at {path}", fg="green", bold=True)
|
0 | lc_public_repos/langgraph/libs/cli | lc_public_repos/langgraph/libs/cli/langgraph_cli/docker.py | import json
import pathlib
import shutil
from typing import Literal, NamedTuple, Optional
import click.exceptions
from langgraph_cli.exec import subp_exec
ROOT = pathlib.Path(__file__).parent.resolve()
DEFAULT_POSTGRES_URI = (
"postgres://postgres:postgres@langgraph-postgres:5432/postgres?sslmode=disable"
)
class Version(NamedTuple):
major: int
minor: int
patch: int
DockerComposeType = Literal["plugin", "standalone"]
class DockerCapabilities(NamedTuple):
version_docker: Version
version_compose: Version
healthcheck_start_interval: bool
compose_type: DockerComposeType = "plugin"
def _parse_version(version: str) -> Version:
parts = version.split(".", 2)
if len(parts) == 1:
major = parts[0]
minor = "0"
patch = "0"
elif len(parts) == 2:
major, minor = parts
patch = "0"
else:
major, minor, patch = parts
return Version(int(major.lstrip("v")), int(minor), int(patch.split("-")[0]))
def check_capabilities(runner) -> DockerCapabilities:
# check docker available
if shutil.which("docker") is None:
raise click.UsageError("Docker not installed") from None
try:
stdout, _ = runner.run(subp_exec("docker", "info", "-f", "json", collect=True))
info = json.loads(stdout)
except (click.exceptions.Exit, json.JSONDecodeError):
raise click.UsageError("Docker not installed or not running") from None
if not info["ServerVersion"]:
raise click.UsageError("Docker not running") from None
compose_type: DockerComposeType
try:
compose = next(
p for p in info["ClientInfo"]["Plugins"] if p["Name"] == "compose"
)
compose_version_str = compose["Version"]
compose_type = "plugin"
except (KeyError, StopIteration):
if shutil.which("docker-compose") is None:
raise click.UsageError("Docker Compose not installed") from None
compose_version_str, _ = runner.run(
subp_exec("docker-compose", "--version", "--short", collect=True)
)
compose_type = "standalone"
# parse versions
docker_version = _parse_version(info["ServerVersion"])
compose_version = _parse_version(compose_version_str)
# check capabilities
return DockerCapabilities(
version_docker=docker_version,
version_compose=compose_version,
healthcheck_start_interval=docker_version >= Version(25, 0, 0),
compose_type=compose_type,
)
def debugger_compose(
*, port: Optional[int] = None, base_url: Optional[str] = None
) -> dict:
if port is None:
return ""
config = {
"langgraph-debugger": {
"image": "langchain/langgraph-debugger",
"restart": "on-failure",
"depends_on": {
"langgraph-postgres": {"condition": "service_healthy"},
},
"ports": [f'"{port}:3968"'],
}
}
if base_url:
config["langgraph-debugger"]["environment"] = {
"VITE_STUDIO_LOCAL_GRAPH_URL": base_url
}
return config
# Function to convert dictionary to YAML
def dict_to_yaml(d: dict, *, indent: int = 0) -> str:
"""Convert a dictionary to a YAML string."""
yaml_str = ""
for idx, (key, value) in enumerate(d.items()):
# Format things in a visually appealing way
# Use an extra newline for top-level keys only
if idx >= 1 and indent < 2:
yaml_str += "\n"
space = " " * indent
if isinstance(value, dict):
yaml_str += f"{space}{key}:\n" + dict_to_yaml(value, indent=indent + 1)
elif isinstance(value, list):
yaml_str += f"{space}{key}:\n"
for item in value:
yaml_str += f"{space} - {item}\n"
else:
yaml_str += f"{space}{key}: {value}\n"
return yaml_str
def compose_as_dict(
capabilities: DockerCapabilities,
*,
port: int,
debugger_port: Optional[int] = None,
debugger_base_url: Optional[str] = None,
# postgres://user:password@host:port/database?option=value
postgres_uri: Optional[str] = None,
) -> dict:
"""Create a docker compose file as a dictionary in YML style."""
if postgres_uri is None:
include_db = True
postgres_uri = DEFAULT_POSTGRES_URI
else:
include_db = False
# The services below are defined in a non-intuitive order to match
# the existing unit tests for this function.
# It's fine to re-order just requires updating the unit tests, so it should
# be done with caution.
# Define the Redis service first as per the test order
services = {
"langgraph-redis": {
"image": "redis:6",
"healthcheck": {
"test": "redis-cli ping",
"interval": "5s",
"timeout": "1s",
"retries": 5,
},
}
}
# Add Postgres service before langgraph-api if it is needed
if include_db:
services["langgraph-postgres"] = {
"image": "postgres:16",
"ports": ['"5433:5432"'],
"environment": {
"POSTGRES_DB": "postgres",
"POSTGRES_USER": "postgres",
"POSTGRES_PASSWORD": "postgres",
},
"volumes": ["langgraph-data:/var/lib/postgresql/data"],
"healthcheck": {
"test": "pg_isready -U postgres",
"start_period": "10s",
"timeout": "1s",
"retries": 5,
},
}
if capabilities.healthcheck_start_interval:
services["langgraph-postgres"]["healthcheck"]["interval"] = "60s"
services["langgraph-postgres"]["healthcheck"]["start_interval"] = "1s"
else:
services["langgraph-postgres"]["healthcheck"]["interval"] = "5s"
# Add optional debugger service if debugger_port is specified
if debugger_port:
services["langgraph-debugger"] = debugger_compose(
port=debugger_port, base_url=debugger_base_url
)["langgraph-debugger"]
# Add langgraph-api service
services["langgraph-api"] = {
"ports": [f'"{port}:8000"'],
"depends_on": {
"langgraph-redis": {"condition": "service_healthy"},
},
"environment": {
"REDIS_URI": "redis://langgraph-redis:6379",
"POSTGRES_URI": postgres_uri,
},
}
# If Postgres is included, add it to the dependencies of langgraph-api
if include_db:
services["langgraph-api"]["depends_on"]["langgraph-postgres"] = {
"condition": "service_healthy"
}
# Additional healthcheck for langgraph-api if required
if capabilities.healthcheck_start_interval:
services["langgraph-api"]["healthcheck"] = {
"test": "python /api/healthcheck.py",
"interval": "60s",
"start_interval": "1s",
"start_period": "10s",
}
# Final compose dictionary with volumes included if needed
compose_dict = {}
if include_db:
compose_dict["volumes"] = {"langgraph-data": {"driver": "local"}}
compose_dict["services"] = services
return compose_dict
def compose(
capabilities: DockerCapabilities,
*,
port: int,
debugger_port: Optional[int] = None,
debugger_base_url: Optional[str] = None,
# postgres://user:password@host:port/database?option=value
postgres_uri: Optional[str] = None,
) -> str:
"""Create a docker compose file as a string."""
compose_content = compose_as_dict(
capabilities,
port=port,
debugger_port=debugger_port,
debugger_base_url=debugger_base_url,
postgres_uri=postgres_uri,
)
compose_str = dict_to_yaml(compose_content)
return compose_str
|
0 | lc_public_repos/langgraph/libs/cli | lc_public_repos/langgraph/libs/cli/langgraph_cli/cli.py | import os
import pathlib
import shutil
import sys
from typing import Callable, Optional, Sequence
import click
import click.exceptions
from click import secho
import langgraph_cli.config
import langgraph_cli.docker
from langgraph_cli.analytics import log_command
from langgraph_cli.config import Config
from langgraph_cli.constants import DEFAULT_CONFIG, DEFAULT_PORT
from langgraph_cli.docker import DockerCapabilities
from langgraph_cli.exec import Runner, subp_exec
from langgraph_cli.progress import Progress
from langgraph_cli.templates import TEMPLATE_HELP_STRING, create_new
from langgraph_cli.version import __version__
OPT_DOCKER_COMPOSE = click.option(
"--docker-compose",
"-d",
help="Advanced: Path to docker-compose.yml file with additional services to launch.",
type=click.Path(
exists=True,
file_okay=True,
dir_okay=False,
resolve_path=True,
path_type=pathlib.Path,
),
)
OPT_CONFIG = click.option(
"--config",
"-c",
help="""Path to configuration file declaring dependencies, graphs and environment variables.
\b
Config file must be a JSON file that has the following keys:
- "dependencies": array of dependencies for langgraph API server. Dependencies can be one of the following:
- ".", which would look for local python packages, as well as pyproject.toml, setup.py or requirements.txt in the app directory
- "./local_package"
- "<package_name>
- "graphs": mapping from graph ID to path where the compiled graph is defined, i.e. ./your_package/your_file.py:variable, where
"variable" is an instance of langgraph.graph.graph.CompiledGraph
- "env": (optional) path to .env file or a mapping from environment variable to its value
- "python_version": (optional) 3.11, 3.12, or 3.13. Defaults to 3.11
- "pip_config_file": (optional) path to pip config file
- "dockerfile_lines": (optional) array of additional lines to add to Dockerfile following the import from parent image
\b
Example:
langgraph up -c langgraph.json
\b
Example:
{
"dependencies": [
"langchain_openai",
"./your_package"
],
"graphs": {
"my_graph_id": "./your_package/your_file.py:variable"
},
"env": "./.env"
}
\b
Example:
{
"python_version": "3.11",
"dependencies": [
"langchain_openai",
"."
],
"graphs": {
"my_graph_id": "./your_package/your_file.py:variable"
},
"env": {
"OPENAI_API_KEY": "secret-key"
}
}
Defaults to looking for langgraph.json in the current directory.""",
default=DEFAULT_CONFIG,
type=click.Path(
exists=True,
file_okay=True,
dir_okay=False,
resolve_path=True,
path_type=pathlib.Path,
),
)
OPT_PORT = click.option(
"--port",
"-p",
type=int,
default=DEFAULT_PORT,
show_default=True,
help="""
Port to expose.
\b
Example:
langgraph up --port 8000
\b
""",
)
OPT_RECREATE = click.option(
"--recreate/--no-recreate",
default=False,
show_default=True,
help="Recreate containers even if their configuration and image haven't changed",
)
OPT_PULL = click.option(
"--pull/--no-pull",
default=True,
show_default=True,
help="""
Pull latest images. Use --no-pull for running the server with locally-built images.
\b
Example:
langgraph up --no-pull
\b
""",
)
OPT_VERBOSE = click.option(
"--verbose",
is_flag=True,
default=False,
help="Show more output from the server logs",
)
OPT_WATCH = click.option("--watch", is_flag=True, help="Restart on file changes")
OPT_DEBUGGER_PORT = click.option(
"--debugger-port",
type=int,
help="Pull the debugger image locally and serve the UI on specified port",
)
OPT_DEBUGGER_BASE_URL = click.option(
"--debugger-base-url",
type=str,
help="URL used by the debugger to access LangGraph API. Defaults to http://127.0.0.1:[PORT]",
)
OPT_POSTGRES_URI = click.option(
"--postgres-uri",
help="Postgres URI to use for the database. Defaults to launching a local database",
)
@click.group()
@click.version_option(version=__version__, prog_name="LangGraph CLI")
def cli():
pass
@OPT_RECREATE
@OPT_PULL
@OPT_PORT
@OPT_DOCKER_COMPOSE
@OPT_CONFIG
@OPT_VERBOSE
@OPT_DEBUGGER_PORT
@OPT_DEBUGGER_BASE_URL
@OPT_WATCH
@OPT_POSTGRES_URI
@click.option(
"--wait",
is_flag=True,
help="Wait for services to start before returning. Implies --detach",
)
@cli.command(help="🚀 Launch LangGraph API server.")
@log_command
def up(
config: pathlib.Path,
docker_compose: Optional[pathlib.Path],
port: int,
recreate: bool,
pull: bool,
watch: bool,
wait: bool,
verbose: bool,
debugger_port: Optional[int],
debugger_base_url: Optional[str],
postgres_uri: Optional[str],
):
click.secho("Starting LangGraph API server...", fg="green")
click.secho(
"""For local dev, requires env var LANGSMITH_API_KEY with access to LangGraph Cloud closed beta.
For production use, requires a license key in env var LANGGRAPH_CLOUD_LICENSE_KEY.""",
)
with Runner() as runner, Progress(message="Pulling...") as set:
capabilities = langgraph_cli.docker.check_capabilities(runner)
args, stdin = prepare(
runner,
capabilities=capabilities,
config_path=config,
docker_compose=docker_compose,
port=port,
pull=pull,
watch=watch,
verbose=verbose,
debugger_port=debugger_port,
debugger_base_url=debugger_base_url,
postgres_uri=postgres_uri,
)
# add up + options
args.extend(["up", "--remove-orphans"])
if recreate:
args.extend(["--force-recreate", "--renew-anon-volumes"])
try:
runner.run(subp_exec("docker", "volume", "rm", "langgraph-data"))
except click.exceptions.Exit:
pass
if watch:
args.append("--watch")
if wait:
args.append("--wait")
else:
args.append("--abort-on-container-exit")
# run docker compose
set("Building...")
def on_stdout(line: str):
if "unpacking to docker.io" in line:
set("Starting...")
elif "Application startup complete" in line:
debugger_origin = (
f"http://localhost:{debugger_port}"
if debugger_port
else "https://smith.langchain.com"
)
debugger_base_url_query = (
debugger_base_url or f"http://127.0.0.1:{port}"
)
set("")
sys.stdout.write(
f"""Ready!
- API: http://localhost:{port}
- Docs: http://localhost:{port}/docs
- LangGraph Studio: {debugger_origin}/studio/?baseUrl={debugger_base_url_query}
"""
)
sys.stdout.flush()
return True
if capabilities.compose_type == "plugin":
compose_cmd = ["docker", "compose"]
elif capabilities.compose_type == "standalone":
compose_cmd = ["docker-compose"]
runner.run(
subp_exec(
*compose_cmd,
*args,
input=stdin,
verbose=verbose,
on_stdout=on_stdout,
)
)
def _build(
runner,
set: Callable[[str], None],
config: pathlib.Path,
config_json: dict,
base_image: Optional[str],
pull: bool,
tag: str,
passthrough: Sequence[str] = (),
):
base_image = base_image or (
"langchain/langgraphjs-api"
if config_json.get("node_version")
else "langchain/langgraph-api"
)
# pull latest images
if pull:
runner.run(
subp_exec(
"docker",
"pull",
(
f"{base_image}:{config_json['node_version']}"
if config_json.get("node_version")
else f"{base_image}:{config_json['python_version']}"
),
verbose=True,
)
)
set("Building...")
# apply options
args = [
"-f",
"-", # stdin
"-t",
tag,
]
# apply config
stdin = langgraph_cli.config.config_to_docker(config, config_json, base_image)
# run docker build
runner.run(
subp_exec(
"docker",
"build",
*args,
*passthrough,
str(config.parent),
input=stdin,
verbose=True,
)
)
@OPT_CONFIG
@OPT_PULL
@click.option(
"--tag",
"-t",
help="""Tag for the docker image.
\b
Example:
langgraph build -t my-image
\b
""",
required=True,
)
@click.option(
"--base-image",
hidden=True,
)
@click.argument("docker_build_args", nargs=-1, type=click.UNPROCESSED)
@cli.command(
help="📦 Build LangGraph API server Docker image.",
context_settings=dict(
ignore_unknown_options=True,
),
)
@log_command
def build(
config: pathlib.Path,
docker_build_args: Sequence[str],
base_image: Optional[str],
pull: bool,
tag: str,
):
with Runner() as runner, Progress(message="Pulling...") as set:
if shutil.which("docker") is None:
raise click.UsageError("Docker not installed") from None
config_json = langgraph_cli.config.validate_config_file(config)
_build(
runner, set, config, config_json, base_image, pull, tag, docker_build_args
)
def _get_docker_ignore_content() -> str:
"""Return the content of a .dockerignore file.
This file is used to exclude files and directories from the Docker build context.
It may be overly broad, but it's better to be safe than sorry.
The main goal is to exclude .env files by default.
"""
return """\
# Ignore node_modules and other dependency directories
node_modules
bower_components
vendor
# Ignore logs and temporary files
*.log
*.tmp
*.swp
# Ignore .env files and other environment files
.env
.env.*
*.local
# Ignore git-related files
.git
.gitignore
# Ignore Docker-related files and configs
.dockerignore
docker-compose.yml
# Ignore build and cache directories
dist
build
.cache
__pycache__
# Ignore IDE and editor configurations
.vscode
.idea
*.sublime-project
*.sublime-workspace
.DS_Store # macOS-specific
# Ignore test and coverage files
coverage
*.coverage
*.test.js
*.spec.js
tests
"""
@OPT_CONFIG
@click.argument("save_path", type=click.Path(resolve_path=True))
@cli.command(
help="🐳 Generate a Dockerfile for the LangGraph API server, with Docker Compose options."
)
@click.option(
# Add a flag for adding a docker-compose.yml file as part of the output
"--add-docker-compose",
help=(
"Add additional files for running the LangGraph API server with "
"docker-compose. These files include a docker-compose.yml, .env file, "
"and a .dockerignore file."
),
is_flag=True,
)
@log_command
def dockerfile(save_path: str, config: pathlib.Path, add_docker_compose: bool) -> None:
save_path = pathlib.Path(save_path).absolute()
secho(f"🔍 Validating configuration at path: {config}", fg="yellow")
config_json = langgraph_cli.config.validate_config_file(config)
secho("✅ Configuration validated!", fg="green")
secho(f"📝 Generating Dockerfile at {save_path}", fg="yellow")
with open(str(save_path), "w", encoding="utf-8") as f:
f.write(
langgraph_cli.config.config_to_docker(
config,
config_json,
(
"langchain/langgraphjs-api"
if config_json.get("node_version")
else "langchain/langgraph-api"
),
)
)
secho("✅ Created: Dockerfile", fg="green")
if add_docker_compose:
# Add docker compose and related files
# Add .dockerignore file in the same directory as the Dockerfile
with open(str(save_path.parent / ".dockerignore"), "w", encoding="utf-8") as f:
f.write(_get_docker_ignore_content())
secho("✅ Created: .dockerignore", fg="green")
# Generate a docker-compose.yml file
path = str(save_path.parent / "docker-compose.yml")
with open(path, "w", encoding="utf-8") as f:
with Runner() as runner:
capabilities = langgraph_cli.docker.check_capabilities(runner)
compose_dict = langgraph_cli.docker.compose_as_dict(
capabilities,
port=8123,
)
# Add .env file to the docker-compose.yml for the langgraph-api service
compose_dict["services"]["langgraph-api"]["env_file"] = [".env"]
# Add the Dockerfile to the build context
compose_dict["services"]["langgraph-api"]["build"] = {
"context": ".",
"dockerfile": save_path.name,
}
f.write(langgraph_cli.docker.dict_to_yaml(compose_dict))
secho("✅ Created: docker-compose.yml", fg="green")
# Check if the .env file exists in the same directory as the Dockerfile
if not (save_path.parent / ".env").exists():
# Also add an empty .env file
with open(str(save_path.parent / ".env"), "w", encoding="utf-8") as f:
f.writelines(
[
"# Uncomment the following line to add your LangSmith API key",
"\n",
"# LANGSMITH_API_KEY=your-api-key",
"\n",
"# Or if you have a LangGraph Cloud license key, "
"then uncomment the following line: ",
"\n",
"# LANGGRAPH_CLOUD_LICENSE_KEY=your-license-key",
"\n",
"# Add any other environment variables go below...",
]
)
secho("✅ Created: .env", fg="green")
else:
# Do nothing since the .env file already exists. Not a great
# idea to overwrite in case the user has added custom env vars set
# in the .env file already.
secho("➖ Skipped: .env. It already exists!", fg="yellow")
secho(
f"🎉 Files generated successfully at path {save_path.parent}!",
fg="cyan",
bold=True,
)
@click.option(
"--host",
default="127.0.0.1",
help="Network interface to bind the development server to. Default 127.0.0.1 is recommended for security. Only use 0.0.0.0 in trusted networks",
)
@click.option(
"--port",
default=2024,
type=int,
help="Port number to bind the development server to. Example: langgraph dev --port 8000",
)
@click.option(
"--no-reload",
is_flag=True,
help="Disable automatic reloading when code changes are detected",
)
@click.option(
"--config",
type=click.Path(exists=True),
default="langgraph.json",
help="Path to configuration file declaring dependencies, graphs and environment variables",
)
@click.option(
"--n-jobs-per-worker",
default=None,
type=int,
help="Maximum number of concurrent jobs each worker process can handle. Default: 10",
)
@click.option(
"--no-browser",
is_flag=True,
help="Skip automatically opening the browser when the server starts",
)
@click.option(
"--debug-port",
default=None,
type=int,
help="Enable remote debugging by listening on specified port. Requires debugpy to be installed",
)
@click.option(
"--wait-for-client",
is_flag=True,
help="Wait for a debugger client to connect to the debug port before starting the server",
default=False,
)
@cli.command(
"dev",
help="🏃♀️➡️ Run LangGraph API server in development mode with hot reloading and debugging support",
)
@log_command
def dev(
host: str,
port: int,
no_reload: bool,
config: pathlib.Path,
n_jobs_per_worker: Optional[int],
no_browser: bool,
debug_port: Optional[int],
wait_for_client: bool,
):
"""CLI entrypoint for running the LangGraph API server."""
try:
from langgraph_api.cli import run_server
except ImportError:
try:
import pkg_resources
pkg_resources.require("langgraph-api-inmem")
except (ImportError, pkg_resources.DistributionNotFound):
raise click.UsageError(
"Required package 'langgraph-api-inmem' is not installed.\n"
"Please install it with:\n\n"
' pip install -U "langgraph-cli[inmem]"\n\n'
"If you're developing the langgraph-cli package locally, you can install in development mode:\n"
" pip install -e ."
) from None
raise click.UsageError(
"Could not import run_server. This likely means your installation is incomplete.\n"
"Please ensure langgraph-cli is installed with the 'inmem' extra: pip install -U \"langgraph-cli[inmem]\""
) from None
config_json = langgraph_cli.config.validate_config_file(config)
cwd = os.getcwd()
sys.path.append(cwd)
dependencies = config_json.get("dependencies", [])
for dep in dependencies:
dep_path = pathlib.Path(cwd) / dep
if dep_path.is_dir() and dep_path.exists():
sys.path.append(str(dep_path))
graphs = config_json.get("graphs", {})
run_server(
host,
port,
not no_reload,
graphs,
n_jobs_per_worker=n_jobs_per_worker,
open_browser=not no_browser,
debug_port=debug_port,
env=config_json.get("env"),
store=config_json.get("store"),
wait_for_client=wait_for_client,
)
@click.argument("path", required=False)
@click.option(
"--template",
type=str,
help=TEMPLATE_HELP_STRING,
)
@cli.command("new", help="🌱 Create a new LangGraph project from a template.")
@log_command
def new(path: Optional[str], template: Optional[str]) -> None:
"""Create a new LangGraph project from a template."""
return create_new(path, template)
def prepare_args_and_stdin(
*,
capabilities: DockerCapabilities,
config_path: pathlib.Path,
config: Config,
docker_compose: Optional[pathlib.Path],
port: int,
watch: bool,
debugger_port: Optional[int] = None,
debugger_base_url: Optional[str] = None,
postgres_uri: Optional[str] = None,
):
# prepare args
stdin = langgraph_cli.docker.compose(
capabilities,
port=port,
debugger_port=debugger_port,
debugger_base_url=debugger_base_url,
postgres_uri=postgres_uri,
)
args = [
"--project-directory",
str(config_path.parent),
]
# apply options
if docker_compose:
args.extend(["-f", str(docker_compose)])
args.extend(["-f", "-"]) # stdin
# apply config
stdin += langgraph_cli.config.config_to_compose(
config_path,
config,
watch=watch,
base_image=(
"langchain/langgraphjs-api"
if config.get("node_version")
else "langchain/langgraph-api"
),
)
return args, stdin
def prepare(
runner,
*,
capabilities: DockerCapabilities,
config_path: pathlib.Path,
docker_compose: Optional[pathlib.Path],
port: int,
pull: bool,
watch: bool,
verbose: bool,
debugger_port: Optional[int] = None,
debugger_base_url: Optional[str] = None,
postgres_uri: Optional[str] = None,
):
config_json = langgraph_cli.config.validate_config_file(config_path)
# pull latest images
if pull:
runner.run(
subp_exec(
"docker",
"pull",
(
f"langchain/langgraphjs-api:{config_json['node_version']}"
if config_json.get("node_version")
else f"langchain/langgraph-api:{config_json['python_version']}"
),
verbose=verbose,
)
)
args, stdin = prepare_args_and_stdin(
capabilities=capabilities,
config_path=config_path,
config=config_json,
docker_compose=docker_compose,
port=port,
watch=watch,
debugger_port=debugger_port,
debugger_base_url=debugger_base_url or f"http://127.0.0.1:{port}",
postgres_uri=postgres_uri,
)
return args, stdin
|
0 | lc_public_repos/langgraph/libs/cli | lc_public_repos/langgraph/libs/cli/langgraph_cli/progress.py | import sys
import threading
import time
from typing import Callable
class Progress:
delay: float = 0.1
@staticmethod
def spinning_cursor():
while True:
yield from "|/-\\"
def __init__(self, *, message=""):
self.message = message
self.spinner_generator = self.spinning_cursor()
def spinner_iteration(self):
message = self.message
sys.stdout.write(next(self.spinner_generator) + " " + message)
sys.stdout.flush()
time.sleep(self.delay)
# clear the spinner and message
sys.stdout.write(
"\b" * (len(message) + 2)
+ " " * (len(message) + 2)
+ "\b" * (len(message) + 2)
)
sys.stdout.flush()
def spinner_task(self):
while self.message:
message = self.message
sys.stdout.write(next(self.spinner_generator) + " " + message)
sys.stdout.flush()
time.sleep(self.delay)
# clear the spinner and message
sys.stdout.write(
"\b" * (len(message) + 2)
+ " " * (len(message) + 2)
+ "\b" * (len(message) + 2)
)
sys.stdout.flush()
def __enter__(self) -> Callable[[str], None]:
self.thread = threading.Thread(target=self.spinner_task)
self.thread.start()
def set_message(message):
self.message = message
if not message:
self.thread.join()
return set_message
def __exit__(self, exception, value, tb):
self.message = ""
try:
self.thread.join()
finally:
del self.thread
if exception is not None:
return False
|
0 | lc_public_repos/langgraph/libs/cli | lc_public_repos/langgraph/libs/cli/langgraph_cli/constants.py | DEFAULT_CONFIG = "langgraph.json"
DEFAULT_PORT = 8123
# analytics
SUPABASE_PUBLIC_API_KEY = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6Imt6cmxwcG9qaW5wY3l5YWlweG5iIiwicm9sZSI6ImFub24iLCJpYXQiOjE3MTkyNTc1NzksImV4cCI6MjAzNDgzMzU3OX0.kkVOlLz3BxemA5nP-vat3K4qRtrDuO4SwZSR_htcX9c"
SUPABASE_URL = "https://kzrlppojinpcyyaipxnb.supabase.co"
|
0 | lc_public_repos/langgraph/libs | lc_public_repos/langgraph/libs/langgraph/Makefile | .PHONY: all format lint test test_watch integration_tests spell_check spell_fix benchmark profile
# Default target executed when no arguments are given to make.
all: help
######################
# TESTING AND COVERAGE
######################
# Benchmarks
OUTPUT ?= out/benchmark.json
benchmark:
mkdir -p out
rm -f $(OUTPUT)
poetry run python -m bench -o $(OUTPUT) --rigorous
benchmark-fast:
mkdir -p out
rm -f $(OUTPUT)
poetry run python -m bench -o $(OUTPUT) --fast
GRAPH ?= bench/fanout_to_subgraph.py
profile:
mkdir -p out
sudo poetry run py-spy record -g -o out/profile.svg -- python $(GRAPH)
# Run unit tests and generate a coverage report.
coverage:
poetry run pytest --cov \
--cov-config=.coveragerc \
--cov-report xml \
--cov-report term-missing:skip-covered
start-postgres:
docker compose -f tests/compose-postgres.yml up -V --force-recreate --wait --remove-orphans
stop-postgres:
docker compose -f tests/compose-postgres.yml down -v
TEST ?= .
test:
make start-postgres && poetry run pytest $(TEST); \
EXIT_CODE=$$?; \
make stop-postgres; \
exit $$EXIT_CODE
test_parallel:
make start-postgres && poetry run pytest -n auto --dist worksteal $(TEST); \
EXIT_CODE=$$?; \
make stop-postgres; \
exit $$EXIT_CODE
WORKERS ?= auto
XDIST_ARGS := $(if $(WORKERS),-n $(WORKERS) --dist worksteal,)
MAXFAIL ?=
MAXFAIL_ARGS := $(if $(MAXFAIL),--maxfail $(MAXFAIL),)
test_watch:
make start-postgres && poetry run ptw . -- --ff -vv -x $(XDIST_ARGS) $(MAXFAIL_ARGS) --snapshot-update --tb short $(TEST); \
EXIT_CODE=$$?; \
make stop-postgres; \
exit $$EXIT_CODE
test_watch_all:
npx concurrently -n langgraph,checkpoint,checkpoint-sqlite,postgres "make test_watch" "make -C ../checkpoint test_watch" "make -C ../checkpoint-sqlite test_watch" "make -C ../checkpoint-postgres test_watch"
######################
# LINTING AND FORMATTING
######################
# Define a variable for Python and notebook files.
PYTHON_FILES=.
MYPY_CACHE=.mypy_cache
lint format: PYTHON_FILES=.
lint_diff format_diff: PYTHON_FILES=$(shell git diff --name-only --relative --diff-filter=d main . | grep -E '\.py$$|\.ipynb$$')
lint_package: PYTHON_FILES=langgraph
lint_tests: PYTHON_FILES=tests
lint_tests: MYPY_CACHE=.mypy_cache_test
lint lint_diff lint_package lint_tests:
poetry run ruff check .
[ "$(PYTHON_FILES)" = "" ] || poetry run ruff format $(PYTHON_FILES) --diff
[ "$(PYTHON_FILES)" = "" ] || poetry run ruff check --select I $(PYTHON_FILES)
[ "$(PYTHON_FILES)" = "" ] || mkdir -p $(MYPY_CACHE)
[ "$(PYTHON_FILES)" = "" ] || poetry run mypy langgraph --cache-dir $(MYPY_CACHE)
format format_diff:
poetry run ruff format $(PYTHON_FILES)
poetry run ruff check --select I --fix $(PYTHON_FILES)
spell_check:
poetry run codespell --toml pyproject.toml
spell_fix:
poetry run codespell --toml pyproject.toml -w
######################
# HELP
######################
help:
@echo '===================='
@echo '-- DOCUMENTATION --'
@echo '-- LINTING --'
@echo 'format - run code formatters'
@echo 'lint - run linters'
@echo 'spell_check - run codespell on the project'
@echo 'spell_fix - run codespell on the project and fix the errors'
@echo '-- TESTS --'
@echo 'coverage - run unit tests and generate coverage report'
@echo 'test - run unit tests'
@echo 'test TEST_FILE=<test_file> - run all tests in file'
@echo 'test_watch - run unit tests in watch mode'
|
0 | lc_public_repos/langgraph/libs | lc_public_repos/langgraph/libs/langgraph/LICENSE | MIT License
Copyright (c) 2024 LangChain, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
|
0 | lc_public_repos/langgraph/libs | lc_public_repos/langgraph/libs/langgraph/poetry.lock | # This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand.
[[package]]
name = "aiosqlite"
version = "0.20.0"
description = "asyncio bridge to the standard sqlite3 module"
optional = false
python-versions = ">=3.8"
files = [
{file = "aiosqlite-0.20.0-py3-none-any.whl", hash = "sha256:36a1deaca0cac40ebe32aac9977a6e2bbc7f5189f23f4a54d5908986729e5bd6"},
{file = "aiosqlite-0.20.0.tar.gz", hash = "sha256:6d35c8c256637f4672f843c31021464090805bf925385ac39473fb16eaaca3d7"},
]
[package.dependencies]
typing_extensions = ">=4.0"
[package.extras]
dev = ["attribution (==1.7.0)", "black (==24.2.0)", "coverage[toml] (==7.4.1)", "flake8 (==7.0.0)", "flake8-bugbear (==24.2.6)", "flit (==3.9.0)", "mypy (==1.8.0)", "ufmt (==2.3.0)", "usort (==1.0.8.post1)"]
docs = ["sphinx (==7.2.6)", "sphinx-mdinclude (==0.5.3)"]
[[package]]
name = "annotated-types"
version = "0.7.0"
description = "Reusable constraint types to use with typing.Annotated"
optional = false
python-versions = ">=3.8"
files = [
{file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"},
{file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"},
]
[[package]]
name = "anyio"
version = "4.6.2.post1"
description = "High level compatibility layer for multiple asynchronous event loop implementations"
optional = false
python-versions = ">=3.9"
files = [
{file = "anyio-4.6.2.post1-py3-none-any.whl", hash = "sha256:6d170c36fba3bdd840c73d3868c1e777e33676a69c3a72cf0a0d5d6d8009b61d"},
{file = "anyio-4.6.2.post1.tar.gz", hash = "sha256:4c8bc31ccdb51c7f7bd251f51c609e038d63e34219b44aa86e47576389880b4c"},
]
[package.dependencies]
exceptiongroup = {version = ">=1.0.2", markers = "python_version < \"3.11\""}
idna = ">=2.8"
sniffio = ">=1.1"
typing-extensions = {version = ">=4.1", markers = "python_version < \"3.11\""}
[package.extras]
doc = ["Sphinx (>=7.4,<8.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"]
test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "truststore (>=0.9.1)", "uvloop (>=0.21.0b1)"]
trio = ["trio (>=0.26.1)"]
[[package]]
name = "appnope"
version = "0.1.4"
description = "Disable App Nap on macOS >= 10.9"
optional = false
python-versions = ">=3.6"
files = [
{file = "appnope-0.1.4-py2.py3-none-any.whl", hash = "sha256:502575ee11cd7a28c0205f379b525beefebab9d161b7c964670864014ed7213c"},
{file = "appnope-0.1.4.tar.gz", hash = "sha256:1de3860566df9caf38f01f86f65e0e13e379af54f9e4bee1e66b48f2efffd1ee"},
]
[[package]]
name = "argon2-cffi"
version = "23.1.0"
description = "Argon2 for Python"
optional = false
python-versions = ">=3.7"
files = [
{file = "argon2_cffi-23.1.0-py3-none-any.whl", hash = "sha256:c670642b78ba29641818ab2e68bd4e6a78ba53b7eff7b4c3815ae16abf91c7ea"},
{file = "argon2_cffi-23.1.0.tar.gz", hash = "sha256:879c3e79a2729ce768ebb7d36d4609e3a78a4ca2ec3a9f12286ca057e3d0db08"},
]
[package.dependencies]
argon2-cffi-bindings = "*"
[package.extras]
dev = ["argon2-cffi[tests,typing]", "tox (>4)"]
docs = ["furo", "myst-parser", "sphinx", "sphinx-copybutton", "sphinx-notfound-page"]
tests = ["hypothesis", "pytest"]
typing = ["mypy"]
[[package]]
name = "argon2-cffi-bindings"
version = "21.2.0"
description = "Low-level CFFI bindings for Argon2"
optional = false
python-versions = ">=3.6"
files = [
{file = "argon2-cffi-bindings-21.2.0.tar.gz", hash = "sha256:bb89ceffa6c791807d1305ceb77dbfacc5aa499891d2c55661c6459651fc39e3"},
{file = "argon2_cffi_bindings-21.2.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:ccb949252cb2ab3a08c02024acb77cfb179492d5701c7cbdbfd776124d4d2367"},
{file = "argon2_cffi_bindings-21.2.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9524464572e12979364b7d600abf96181d3541da11e23ddf565a32e70bd4dc0d"},
{file = "argon2_cffi_bindings-21.2.0-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b746dba803a79238e925d9046a63aa26bf86ab2a2fe74ce6b009a1c3f5c8f2ae"},
{file = "argon2_cffi_bindings-21.2.0-cp36-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:58ed19212051f49a523abb1dbe954337dc82d947fb6e5a0da60f7c8471a8476c"},
{file = "argon2_cffi_bindings-21.2.0-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:bd46088725ef7f58b5a1ef7ca06647ebaf0eb4baff7d1d0d177c6cc8744abd86"},
{file = "argon2_cffi_bindings-21.2.0-cp36-abi3-musllinux_1_1_i686.whl", hash = "sha256:8cd69c07dd875537a824deec19f978e0f2078fdda07fd5c42ac29668dda5f40f"},
{file = "argon2_cffi_bindings-21.2.0-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:f1152ac548bd5b8bcecfb0b0371f082037e47128653df2e8ba6e914d384f3c3e"},
{file = "argon2_cffi_bindings-21.2.0-cp36-abi3-win32.whl", hash = "sha256:603ca0aba86b1349b147cab91ae970c63118a0f30444d4bc80355937c950c082"},
{file = "argon2_cffi_bindings-21.2.0-cp36-abi3-win_amd64.whl", hash = "sha256:b2ef1c30440dbbcba7a5dc3e319408b59676e2e039e2ae11a8775ecf482b192f"},
{file = "argon2_cffi_bindings-21.2.0-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:e415e3f62c8d124ee16018e491a009937f8cf7ebf5eb430ffc5de21b900dad93"},
{file = "argon2_cffi_bindings-21.2.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:3e385d1c39c520c08b53d63300c3ecc28622f076f4c2b0e6d7e796e9f6502194"},
{file = "argon2_cffi_bindings-21.2.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2c3e3cc67fdb7d82c4718f19b4e7a87123caf8a93fde7e23cf66ac0337d3cb3f"},
{file = "argon2_cffi_bindings-21.2.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6a22ad9800121b71099d0fb0a65323810a15f2e292f2ba450810a7316e128ee5"},
{file = "argon2_cffi_bindings-21.2.0-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f9f8b450ed0547e3d473fdc8612083fd08dd2120d6ac8f73828df9b7d45bb351"},
{file = "argon2_cffi_bindings-21.2.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:93f9bf70084f97245ba10ee36575f0c3f1e7d7724d67d8e5b08e61787c320ed7"},
{file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:3b9ef65804859d335dc6b31582cad2c5166f0c3e7975f324d9ffaa34ee7e6583"},
{file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d4966ef5848d820776f5f562a7d45fdd70c2f330c961d0d745b784034bd9f48d"},
{file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:20ef543a89dee4db46a1a6e206cd015360e5a75822f76df533845c3cbaf72670"},
{file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ed2937d286e2ad0cc79a7087d3c272832865f779430e0cc2b4f3718d3159b0cb"},
{file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:5e00316dabdaea0b2dd82d141cc66889ced0cdcbfa599e8b471cf22c620c329a"},
]
[package.dependencies]
cffi = ">=1.0.1"
[package.extras]
dev = ["cogapp", "pre-commit", "pytest", "wheel"]
tests = ["pytest"]
[[package]]
name = "arrow"
version = "1.3.0"
description = "Better dates & times for Python"
optional = false
python-versions = ">=3.8"
files = [
{file = "arrow-1.3.0-py3-none-any.whl", hash = "sha256:c728b120ebc00eb84e01882a6f5e7927a53960aa990ce7dd2b10f39005a67f80"},
{file = "arrow-1.3.0.tar.gz", hash = "sha256:d4540617648cb5f895730f1ad8c82a65f2dad0166f57b75f3ca54759c4d67a85"},
]
[package.dependencies]
python-dateutil = ">=2.7.0"
types-python-dateutil = ">=2.8.10"
[package.extras]
doc = ["doc8", "sphinx (>=7.0.0)", "sphinx-autobuild", "sphinx-autodoc-typehints", "sphinx_rtd_theme (>=1.3.0)"]
test = ["dateparser (==1.*)", "pre-commit", "pytest", "pytest-cov", "pytest-mock", "pytz (==2021.1)", "simplejson (==3.*)"]
[[package]]
name = "asttokens"
version = "2.4.1"
description = "Annotate AST trees with source code positions"
optional = false
python-versions = "*"
files = [
{file = "asttokens-2.4.1-py2.py3-none-any.whl", hash = "sha256:051ed49c3dcae8913ea7cd08e46a606dba30b79993209636c4875bc1d637bc24"},
{file = "asttokens-2.4.1.tar.gz", hash = "sha256:b03869718ba9a6eb027e134bfdf69f38a236d681c83c160d510768af11254ba0"},
]
[package.dependencies]
six = ">=1.12.0"
[package.extras]
astroid = ["astroid (>=1,<2)", "astroid (>=2,<4)"]
test = ["astroid (>=1,<2)", "astroid (>=2,<4)", "pytest"]
[[package]]
name = "async-lru"
version = "2.0.4"
description = "Simple LRU cache for asyncio"
optional = false
python-versions = ">=3.8"
files = [
{file = "async-lru-2.0.4.tar.gz", hash = "sha256:b8a59a5df60805ff63220b2a0c5b5393da5521b113cd5465a44eb037d81a5627"},
{file = "async_lru-2.0.4-py3-none-any.whl", hash = "sha256:ff02944ce3c288c5be660c42dbcca0742b32c3b279d6dceda655190240b99224"},
]
[package.dependencies]
typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.11\""}
[[package]]
name = "attrs"
version = "24.2.0"
description = "Classes Without Boilerplate"
optional = false
python-versions = ">=3.7"
files = [
{file = "attrs-24.2.0-py3-none-any.whl", hash = "sha256:81921eb96de3191c8258c199618104dd27ac608d9366f5e35d011eae1867ede2"},
{file = "attrs-24.2.0.tar.gz", hash = "sha256:5cfb1b9148b5b086569baec03f20d7b6bf3bcacc9a42bebf87ffaaca362f6346"},
]
[package.extras]
benchmark = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
cov = ["cloudpickle", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
dev = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pre-commit", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
docs = ["cogapp", "furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier (<24.7)"]
tests = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
tests-mypy = ["mypy (>=1.11.1)", "pytest-mypy-plugins"]
[[package]]
name = "babel"
version = "2.16.0"
description = "Internationalization utilities"
optional = false
python-versions = ">=3.8"
files = [
{file = "babel-2.16.0-py3-none-any.whl", hash = "sha256:368b5b98b37c06b7daf6696391c3240c938b37767d4584413e8438c5c435fa8b"},
{file = "babel-2.16.0.tar.gz", hash = "sha256:d1f3554ca26605fe173f3de0c65f750f5a42f924499bf134de6423582298e316"},
]
[package.extras]
dev = ["freezegun (>=1.0,<2.0)", "pytest (>=6.0)", "pytest-cov"]
[[package]]
name = "beautifulsoup4"
version = "4.12.3"
description = "Screen-scraping library"
optional = false
python-versions = ">=3.6.0"
files = [
{file = "beautifulsoup4-4.12.3-py3-none-any.whl", hash = "sha256:b80878c9f40111313e55da8ba20bdba06d8fa3969fc68304167741bbf9e082ed"},
{file = "beautifulsoup4-4.12.3.tar.gz", hash = "sha256:74e3d1928edc070d21748185c46e3fb33490f22f52a3addee9aee0f4f7781051"},
]
[package.dependencies]
soupsieve = ">1.2"
[package.extras]
cchardet = ["cchardet"]
chardet = ["chardet"]
charset-normalizer = ["charset-normalizer"]
html5lib = ["html5lib"]
lxml = ["lxml"]
[[package]]
name = "bleach"
version = "6.2.0"
description = "An easy safelist-based HTML-sanitizing tool."
optional = false
python-versions = ">=3.9"
files = [
{file = "bleach-6.2.0-py3-none-any.whl", hash = "sha256:117d9c6097a7c3d22fd578fcd8d35ff1e125df6736f554da4e432fdd63f31e5e"},
{file = "bleach-6.2.0.tar.gz", hash = "sha256:123e894118b8a599fd80d3ec1a6d4cc7ce4e5882b1317a7e1ba69b56e95f991f"},
]
[package.dependencies]
webencodings = "*"
[package.extras]
css = ["tinycss2 (>=1.1.0,<1.5)"]
[[package]]
name = "certifi"
version = "2024.8.30"
description = "Python package for providing Mozilla's CA Bundle."
optional = false
python-versions = ">=3.6"
files = [
{file = "certifi-2024.8.30-py3-none-any.whl", hash = "sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8"},
{file = "certifi-2024.8.30.tar.gz", hash = "sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9"},
]
[[package]]
name = "cffi"
version = "1.17.1"
description = "Foreign Function Interface for Python calling C code."
optional = false
python-versions = ">=3.8"
files = [
{file = "cffi-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14"},
{file = "cffi-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67"},
{file = "cffi-1.17.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:edae79245293e15384b51f88b00613ba9f7198016a5948b5dddf4917d4d26382"},
{file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45398b671ac6d70e67da8e4224a065cec6a93541bb7aebe1b198a61b58c7b702"},
{file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ad9413ccdeda48c5afdae7e4fa2192157e991ff761e7ab8fdd8926f40b160cc3"},
{file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5da5719280082ac6bd9aa7becb3938dc9f9cbd57fac7d2871717b1feb0902ab6"},
{file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bb1a08b8008b281856e5971307cc386a8e9c5b625ac297e853d36da6efe9c17"},
{file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8"},
{file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6883e737d7d9e4899a8a695e00ec36bd4e5e4f18fabe0aca0efe0a4b44cdb13e"},
{file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6b8b4a92e1c65048ff98cfe1f735ef8f1ceb72e3d5f0c25fdb12087a23da22be"},
{file = "cffi-1.17.1-cp310-cp310-win32.whl", hash = "sha256:c9c3d058ebabb74db66e431095118094d06abf53284d9c81f27300d0e0d8bc7c"},
{file = "cffi-1.17.1-cp310-cp310-win_amd64.whl", hash = "sha256:0f048dcf80db46f0098ccac01132761580d28e28bc0f78ae0d58048063317e15"},
{file = "cffi-1.17.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401"},
{file = "cffi-1.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf"},
{file = "cffi-1.17.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4"},
{file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41"},
{file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1"},
{file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6"},
{file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d"},
{file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6"},
{file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f"},
{file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b"},
{file = "cffi-1.17.1-cp311-cp311-win32.whl", hash = "sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655"},
{file = "cffi-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0"},
{file = "cffi-1.17.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4"},
{file = "cffi-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c"},
{file = "cffi-1.17.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36"},
{file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5"},
{file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff"},
{file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99"},
{file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93"},
{file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3"},
{file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8"},
{file = "cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65"},
{file = "cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903"},
{file = "cffi-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e"},
{file = "cffi-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2"},
{file = "cffi-1.17.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3"},
{file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683"},
{file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5"},
{file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4"},
{file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd"},
{file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed"},
{file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9"},
{file = "cffi-1.17.1-cp313-cp313-win32.whl", hash = "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d"},
{file = "cffi-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a"},
{file = "cffi-1.17.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:636062ea65bd0195bc012fea9321aca499c0504409f413dc88af450b57ffd03b"},
{file = "cffi-1.17.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c7eac2ef9b63c79431bc4b25f1cd649d7f061a28808cbc6c47b534bd789ef964"},
{file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e221cf152cff04059d011ee126477f0d9588303eb57e88923578ace7baad17f9"},
{file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:31000ec67d4221a71bd3f67df918b1f88f676f1c3b535a7eb473255fdc0b83fc"},
{file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f17be4345073b0a7b8ea599688f692ac3ef23ce28e5df79c04de519dbc4912c"},
{file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2b1fac190ae3ebfe37b979cc1ce69c81f4e4fe5746bb401dca63a9062cdaf1"},
{file = "cffi-1.17.1-cp38-cp38-win32.whl", hash = "sha256:7596d6620d3fa590f677e9ee430df2958d2d6d6de2feeae5b20e82c00b76fbf8"},
{file = "cffi-1.17.1-cp38-cp38-win_amd64.whl", hash = "sha256:78122be759c3f8a014ce010908ae03364d00a1f81ab5c7f4a7a5120607ea56e1"},
{file = "cffi-1.17.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b2ab587605f4ba0bf81dc0cb08a41bd1c0a5906bd59243d56bad7668a6fc6c16"},
{file = "cffi-1.17.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:28b16024becceed8c6dfbc75629e27788d8a3f9030691a1dbf9821a128b22c36"},
{file = "cffi-1.17.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d599671f396c4723d016dbddb72fe8e0397082b0a77a4fab8028923bec050e8"},
{file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca74b8dbe6e8e8263c0ffd60277de77dcee6c837a3d0881d8c1ead7268c9e576"},
{file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f7f5baafcc48261359e14bcd6d9bff6d4b28d9103847c9e136694cb0501aef87"},
{file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98e3969bcff97cae1b2def8ba499ea3d6f31ddfdb7635374834cf89a1a08ecf0"},
{file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cdf5ce3acdfd1661132f2a9c19cac174758dc2352bfe37d98aa7512c6b7178b3"},
{file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9755e4345d1ec879e3849e62222a18c7174d65a6a92d5b346b1863912168b595"},
{file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f1e22e8c4419538cb197e4dd60acc919d7696e5ef98ee4da4e01d3f8cfa4cc5a"},
{file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c03e868a0b3bc35839ba98e74211ed2b05d2119be4e8a0f224fba9384f1fe02e"},
{file = "cffi-1.17.1-cp39-cp39-win32.whl", hash = "sha256:e31ae45bc2e29f6b2abd0de1cc3b9d5205aa847cafaecb8af1476a609a2f6eb7"},
{file = "cffi-1.17.1-cp39-cp39-win_amd64.whl", hash = "sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662"},
{file = "cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824"},
]
[package.dependencies]
pycparser = "*"
[[package]]
name = "charset-normalizer"
version = "3.4.0"
description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
optional = false
python-versions = ">=3.7.0"
files = [
{file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4f9fc98dad6c2eaa32fc3af1417d95b5e3d08aff968df0cd320066def971f9a6"},
{file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0de7b687289d3c1b3e8660d0741874abe7888100efe14bd0f9fd7141bcbda92b"},
{file = "charset_normalizer-3.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5ed2e36c3e9b4f21dd9422f6893dec0abf2cca553af509b10cd630f878d3eb99"},
{file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40d3ff7fc90b98c637bda91c89d51264a3dcf210cade3a2c6f838c7268d7a4ca"},
{file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1110e22af8ca26b90bd6364fe4c763329b0ebf1ee213ba32b68c73de5752323d"},
{file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:86f4e8cca779080f66ff4f191a685ced73d2f72d50216f7112185dc02b90b9b7"},
{file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f683ddc7eedd742e2889d2bfb96d69573fde1d92fcb811979cdb7165bb9c7d3"},
{file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:27623ba66c183eca01bf9ff833875b459cad267aeeb044477fedac35e19ba907"},
{file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f606a1881d2663630ea5b8ce2efe2111740df4b687bd78b34a8131baa007f79b"},
{file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:0b309d1747110feb25d7ed6b01afdec269c647d382c857ef4663bbe6ad95a912"},
{file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:136815f06a3ae311fae551c3df1f998a1ebd01ddd424aa5603a4336997629e95"},
{file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:14215b71a762336254351b00ec720a8e85cada43b987da5a042e4ce3e82bd68e"},
{file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:79983512b108e4a164b9c8d34de3992f76d48cadc9554c9e60b43f308988aabe"},
{file = "charset_normalizer-3.4.0-cp310-cp310-win32.whl", hash = "sha256:c94057af19bc953643a33581844649a7fdab902624d2eb739738a30e2b3e60fc"},
{file = "charset_normalizer-3.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:55f56e2ebd4e3bc50442fbc0888c9d8c94e4e06a933804e2af3e89e2f9c1c749"},
{file = "charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0d99dd8ff461990f12d6e42c7347fd9ab2532fb70e9621ba520f9e8637161d7c"},
{file = "charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c57516e58fd17d03ebe67e181a4e4e2ccab1168f8c2976c6a334d4f819fe5944"},
{file = "charset_normalizer-3.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6dba5d19c4dfab08e58d5b36304b3f92f3bd5d42c1a3fa37b5ba5cdf6dfcbcee"},
{file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf4475b82be41b07cc5e5ff94810e6a01f276e37c2d55571e3fe175e467a1a1c"},
{file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce031db0408e487fd2775d745ce30a7cd2923667cf3b69d48d219f1d8f5ddeb6"},
{file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ff4e7cdfdb1ab5698e675ca622e72d58a6fa2a8aa58195de0c0061288e6e3ea"},
{file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3710a9751938947e6327ea9f3ea6332a09bf0ba0c09cae9cb1f250bd1f1549bc"},
{file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82357d85de703176b5587dbe6ade8ff67f9f69a41c0733cf2425378b49954de5"},
{file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:47334db71978b23ebcf3c0f9f5ee98b8d65992b65c9c4f2d34c2eaf5bcaf0594"},
{file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8ce7fd6767a1cc5a92a639b391891bf1c268b03ec7e021c7d6d902285259685c"},
{file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f1a2f519ae173b5b6a2c9d5fa3116ce16e48b3462c8b96dfdded11055e3d6365"},
{file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:63bc5c4ae26e4bc6be6469943b8253c0fd4e4186c43ad46e713ea61a0ba49129"},
{file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:bcb4f8ea87d03bc51ad04add8ceaf9b0f085ac045ab4d74e73bbc2dc033f0236"},
{file = "charset_normalizer-3.4.0-cp311-cp311-win32.whl", hash = "sha256:9ae4ef0b3f6b41bad6366fb0ea4fc1d7ed051528e113a60fa2a65a9abb5b1d99"},
{file = "charset_normalizer-3.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:cee4373f4d3ad28f1ab6290684d8e2ebdb9e7a1b74fdc39e4c211995f77bec27"},
{file = "charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0713f3adb9d03d49d365b70b84775d0a0d18e4ab08d12bc46baa6132ba78aaf6"},
{file = "charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:de7376c29d95d6719048c194a9cf1a1b0393fbe8488a22008610b0361d834ecf"},
{file = "charset_normalizer-3.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4a51b48f42d9358460b78725283f04bddaf44a9358197b889657deba38f329db"},
{file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b295729485b06c1a0683af02a9e42d2caa9db04a373dc38a6a58cdd1e8abddf1"},
{file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ee803480535c44e7f5ad00788526da7d85525cfefaf8acf8ab9a310000be4b03"},
{file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d59d125ffbd6d552765510e3f31ed75ebac2c7470c7274195b9161a32350284"},
{file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8cda06946eac330cbe6598f77bb54e690b4ca93f593dee1568ad22b04f347c15"},
{file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07afec21bbbbf8a5cc3651aa96b980afe2526e7f048fdfb7f1014d84acc8b6d8"},
{file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6b40e8d38afe634559e398cc32b1472f376a4099c75fe6299ae607e404c033b2"},
{file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b8dcd239c743aa2f9c22ce674a145e0a25cb1566c495928440a181ca1ccf6719"},
{file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:84450ba661fb96e9fd67629b93d2941c871ca86fc38d835d19d4225ff946a631"},
{file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:44aeb140295a2f0659e113b31cfe92c9061622cadbc9e2a2f7b8ef6b1e29ef4b"},
{file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1db4e7fefefd0f548d73e2e2e041f9df5c59e178b4c72fbac4cc6f535cfb1565"},
{file = "charset_normalizer-3.4.0-cp312-cp312-win32.whl", hash = "sha256:5726cf76c982532c1863fb64d8c6dd0e4c90b6ece9feb06c9f202417a31f7dd7"},
{file = "charset_normalizer-3.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:b197e7094f232959f8f20541ead1d9862ac5ebea1d58e9849c1bf979255dfac9"},
{file = "charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:dd4eda173a9fcccb5f2e2bd2a9f423d180194b1bf17cf59e3269899235b2a114"},
{file = "charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e9e3c4c9e1ed40ea53acf11e2a386383c3304212c965773704e4603d589343ed"},
{file = "charset_normalizer-3.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:92a7e36b000bf022ef3dbb9c46bfe2d52c047d5e3f3343f43204263c5addc250"},
{file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:54b6a92d009cbe2fb11054ba694bc9e284dad30a26757b1e372a1fdddaf21920"},
{file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ffd9493de4c922f2a38c2bf62b831dcec90ac673ed1ca182fe11b4d8e9f2a64"},
{file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:35c404d74c2926d0287fbd63ed5d27eb911eb9e4a3bb2c6d294f3cfd4a9e0c23"},
{file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4796efc4faf6b53a18e3d46343535caed491776a22af773f366534056c4e1fbc"},
{file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e7fdd52961feb4c96507aa649550ec2a0d527c086d284749b2f582f2d40a2e0d"},
{file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:92db3c28b5b2a273346bebb24857fda45601aef6ae1c011c0a997106581e8a88"},
{file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ab973df98fc99ab39080bfb0eb3a925181454d7c3ac8a1e695fddfae696d9e90"},
{file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:4b67fdab07fdd3c10bb21edab3cbfe8cf5696f453afce75d815d9d7223fbe88b"},
{file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:aa41e526a5d4a9dfcfbab0716c7e8a1b215abd3f3df5a45cf18a12721d31cb5d"},
{file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ffc519621dce0c767e96b9c53f09c5d215578e10b02c285809f76509a3931482"},
{file = "charset_normalizer-3.4.0-cp313-cp313-win32.whl", hash = "sha256:f19c1585933c82098c2a520f8ec1227f20e339e33aca8fa6f956f6691b784e67"},
{file = "charset_normalizer-3.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:707b82d19e65c9bd28b81dde95249b07bf9f5b90ebe1ef17d9b57473f8a64b7b"},
{file = "charset_normalizer-3.4.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:dbe03226baf438ac4fda9e2d0715022fd579cb641c4cf639fa40d53b2fe6f3e2"},
{file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd9a8bd8900e65504a305bf8ae6fa9fbc66de94178c420791d0293702fce2df7"},
{file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b8831399554b92b72af5932cdbbd4ddc55c55f631bb13ff8fe4e6536a06c5c51"},
{file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a14969b8691f7998e74663b77b4c36c0337cb1df552da83d5c9004a93afdb574"},
{file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dcaf7c1524c0542ee2fc82cc8ec337f7a9f7edee2532421ab200d2b920fc97cf"},
{file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:425c5f215d0eecee9a56cdb703203dda90423247421bf0d67125add85d0c4455"},
{file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:d5b054862739d276e09928de37c79ddeec42a6e1bfc55863be96a36ba22926f6"},
{file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:f3e73a4255342d4eb26ef6df01e3962e73aa29baa3124a8e824c5d3364a65748"},
{file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:2f6c34da58ea9c1a9515621f4d9ac379871a8f21168ba1b5e09d74250de5ad62"},
{file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:f09cb5a7bbe1ecae6e87901a2eb23e0256bb524a79ccc53eb0b7629fbe7677c4"},
{file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:0099d79bdfcf5c1f0c2c72f91516702ebf8b0b8ddd8905f97a8aecf49712c621"},
{file = "charset_normalizer-3.4.0-cp37-cp37m-win32.whl", hash = "sha256:9c98230f5042f4945f957d006edccc2af1e03ed5e37ce7c373f00a5a4daa6149"},
{file = "charset_normalizer-3.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:62f60aebecfc7f4b82e3f639a7d1433a20ec32824db2199a11ad4f5e146ef5ee"},
{file = "charset_normalizer-3.4.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:af73657b7a68211996527dbfeffbb0864e043d270580c5aef06dc4b659a4b578"},
{file = "charset_normalizer-3.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cab5d0b79d987c67f3b9e9c53f54a61360422a5a0bc075f43cab5621d530c3b6"},
{file = "charset_normalizer-3.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:9289fd5dddcf57bab41d044f1756550f9e7cf0c8e373b8cdf0ce8773dc4bd417"},
{file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b493a043635eb376e50eedf7818f2f322eabbaa974e948bd8bdd29eb7ef2a51"},
{file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9fa2566ca27d67c86569e8c85297aaf413ffab85a8960500f12ea34ff98e4c41"},
{file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8e538f46104c815be19c975572d74afb53f29650ea2025bbfaef359d2de2f7f"},
{file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fd30dc99682dc2c603c2b315bded2799019cea829f8bf57dc6b61efde6611c8"},
{file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2006769bd1640bdf4d5641c69a3d63b71b81445473cac5ded39740a226fa88ab"},
{file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:dc15e99b2d8a656f8e666854404f1ba54765871104e50c8e9813af8a7db07f12"},
{file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:ab2e5bef076f5a235c3774b4f4028a680432cded7cad37bba0fd90d64b187d19"},
{file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:4ec9dd88a5b71abfc74e9df5ebe7921c35cbb3b641181a531ca65cdb5e8e4dea"},
{file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:43193c5cda5d612f247172016c4bb71251c784d7a4d9314677186a838ad34858"},
{file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:aa693779a8b50cd97570e5a0f343538a8dbd3e496fa5dcb87e29406ad0299654"},
{file = "charset_normalizer-3.4.0-cp38-cp38-win32.whl", hash = "sha256:7706f5850360ac01d80c89bcef1640683cc12ed87f42579dab6c5d3ed6888613"},
{file = "charset_normalizer-3.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:c3e446d253bd88f6377260d07c895816ebf33ffffd56c1c792b13bff9c3e1ade"},
{file = "charset_normalizer-3.4.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:980b4f289d1d90ca5efcf07958d3eb38ed9c0b7676bf2831a54d4f66f9c27dfa"},
{file = "charset_normalizer-3.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f28f891ccd15c514a0981f3b9db9aa23d62fe1a99997512b0491d2ed323d229a"},
{file = "charset_normalizer-3.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8aacce6e2e1edcb6ac625fb0f8c3a9570ccc7bfba1f63419b3769ccf6a00ed0"},
{file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd7af3717683bea4c87acd8c0d3d5b44d56120b26fd3f8a692bdd2d5260c620a"},
{file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5ff2ed8194587faf56555927b3aa10e6fb69d931e33953943bc4f837dfee2242"},
{file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e91f541a85298cf35433bf66f3fab2a4a2cff05c127eeca4af174f6d497f0d4b"},
{file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:309a7de0a0ff3040acaebb35ec45d18db4b28232f21998851cfa709eeff49d62"},
{file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:285e96d9d53422efc0d7a17c60e59f37fbf3dfa942073f666db4ac71e8d726d0"},
{file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:5d447056e2ca60382d460a604b6302d8db69476fd2015c81e7c35417cfabe4cd"},
{file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:20587d20f557fe189b7947d8e7ec5afa110ccf72a3128d61a2a387c3313f46be"},
{file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:130272c698667a982a5d0e626851ceff662565379baf0ff2cc58067b81d4f11d"},
{file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:ab22fbd9765e6954bc0bcff24c25ff71dcbfdb185fcdaca49e81bac68fe724d3"},
{file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7782afc9b6b42200f7362858f9e73b1f8316afb276d316336c0ec3bd73312742"},
{file = "charset_normalizer-3.4.0-cp39-cp39-win32.whl", hash = "sha256:2de62e8801ddfff069cd5c504ce3bc9672b23266597d4e4f50eda28846c322f2"},
{file = "charset_normalizer-3.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:95c3c157765b031331dd4db3c775e58deaee050a3042fcad72cbc4189d7c8dca"},
{file = "charset_normalizer-3.4.0-py3-none-any.whl", hash = "sha256:fe9f97feb71aa9896b81973a7bbada8c49501dc73e58a10fcef6663af95e5079"},
{file = "charset_normalizer-3.4.0.tar.gz", hash = "sha256:223217c3d4f82c3ac5e29032b3f1c2eb0fb591b72161f86d93f5719079dae93e"},
]
[[package]]
name = "colorama"
version = "0.4.6"
description = "Cross-platform colored terminal text."
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7"
files = [
{file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"},
{file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"},
]
[[package]]
name = "comm"
version = "0.2.2"
description = "Jupyter Python Comm implementation, for usage in ipykernel, xeus-python etc."
optional = false
python-versions = ">=3.8"
files = [
{file = "comm-0.2.2-py3-none-any.whl", hash = "sha256:e6fb86cb70ff661ee8c9c14e7d36d6de3b4066f1441be4063df9c5009f0a64d3"},
{file = "comm-0.2.2.tar.gz", hash = "sha256:3fd7a84065306e07bea1773df6eb8282de51ba82f77c72f9c85716ab11fe980e"},
]
[package.dependencies]
traitlets = ">=4"
[package.extras]
test = ["pytest"]
[[package]]
name = "coverage"
version = "7.6.4"
description = "Code coverage measurement for Python"
optional = false
python-versions = ">=3.9"
files = [
{file = "coverage-7.6.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5f8ae553cba74085db385d489c7a792ad66f7f9ba2ee85bfa508aeb84cf0ba07"},
{file = "coverage-7.6.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8165b796df0bd42e10527a3f493c592ba494f16ef3c8b531288e3d0d72c1f6f0"},
{file = "coverage-7.6.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c7c8b95bf47db6d19096a5e052ffca0a05f335bc63cef281a6e8fe864d450a72"},
{file = "coverage-7.6.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8ed9281d1b52628e81393f5eaee24a45cbd64965f41857559c2b7ff19385df51"},
{file = "coverage-7.6.4-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0809082ee480bb8f7416507538243c8863ac74fd8a5d2485c46f0f7499f2b491"},
{file = "coverage-7.6.4-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d541423cdd416b78626b55f123412fcf979d22a2c39fce251b350de38c15c15b"},
{file = "coverage-7.6.4-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:58809e238a8a12a625c70450b48e8767cff9eb67c62e6154a642b21ddf79baea"},
{file = "coverage-7.6.4-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:c9b8e184898ed014884ca84c70562b4a82cbc63b044d366fedc68bc2b2f3394a"},
{file = "coverage-7.6.4-cp310-cp310-win32.whl", hash = "sha256:6bd818b7ea14bc6e1f06e241e8234508b21edf1b242d49831831a9450e2f35fa"},
{file = "coverage-7.6.4-cp310-cp310-win_amd64.whl", hash = "sha256:06babbb8f4e74b063dbaeb74ad68dfce9186c595a15f11f5d5683f748fa1d172"},
{file = "coverage-7.6.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:73d2b73584446e66ee633eaad1a56aad577c077f46c35ca3283cd687b7715b0b"},
{file = "coverage-7.6.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:51b44306032045b383a7a8a2c13878de375117946d68dcb54308111f39775a25"},
{file = "coverage-7.6.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0b3fb02fe73bed561fa12d279a417b432e5b50fe03e8d663d61b3d5990f29546"},
{file = "coverage-7.6.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ed8fe9189d2beb6edc14d3ad19800626e1d9f2d975e436f84e19efb7fa19469b"},
{file = "coverage-7.6.4-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b369ead6527d025a0fe7bd3864e46dbee3aa8f652d48df6174f8d0bac9e26e0e"},
{file = "coverage-7.6.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:ade3ca1e5f0ff46b678b66201f7ff477e8fa11fb537f3b55c3f0568fbfe6e718"},
{file = "coverage-7.6.4-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:27fb4a050aaf18772db513091c9c13f6cb94ed40eacdef8dad8411d92d9992db"},
{file = "coverage-7.6.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:4f704f0998911abf728a7783799444fcbbe8261c4a6c166f667937ae6a8aa522"},
{file = "coverage-7.6.4-cp311-cp311-win32.whl", hash = "sha256:29155cd511ee058e260db648b6182c419422a0d2e9a4fa44501898cf918866cf"},
{file = "coverage-7.6.4-cp311-cp311-win_amd64.whl", hash = "sha256:8902dd6a30173d4ef09954bfcb24b5d7b5190cf14a43170e386979651e09ba19"},
{file = "coverage-7.6.4-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:12394842a3a8affa3ba62b0d4ab7e9e210c5e366fbac3e8b2a68636fb19892c2"},
{file = "coverage-7.6.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2b6b4c83d8e8ea79f27ab80778c19bc037759aea298da4b56621f4474ffeb117"},
{file = "coverage-7.6.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1d5b8007f81b88696d06f7df0cb9af0d3b835fe0c8dbf489bad70b45f0e45613"},
{file = "coverage-7.6.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b57b768feb866f44eeed9f46975f3d6406380275c5ddfe22f531a2bf187eda27"},
{file = "coverage-7.6.4-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5915fcdec0e54ee229926868e9b08586376cae1f5faa9bbaf8faf3561b393d52"},
{file = "coverage-7.6.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:0b58c672d14f16ed92a48db984612f5ce3836ae7d72cdd161001cc54512571f2"},
{file = "coverage-7.6.4-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:2fdef0d83a2d08d69b1f2210a93c416d54e14d9eb398f6ab2f0a209433db19e1"},
{file = "coverage-7.6.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:8cf717ee42012be8c0cb205dbbf18ffa9003c4cbf4ad078db47b95e10748eec5"},
{file = "coverage-7.6.4-cp312-cp312-win32.whl", hash = "sha256:7bb92c539a624cf86296dd0c68cd5cc286c9eef2d0c3b8b192b604ce9de20a17"},
{file = "coverage-7.6.4-cp312-cp312-win_amd64.whl", hash = "sha256:1032e178b76a4e2b5b32e19d0fd0abbce4b58e77a1ca695820d10e491fa32b08"},
{file = "coverage-7.6.4-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:023bf8ee3ec6d35af9c1c6ccc1d18fa69afa1cb29eaac57cb064dbb262a517f9"},
{file = "coverage-7.6.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:b0ac3d42cb51c4b12df9c5f0dd2f13a4f24f01943627120ec4d293c9181219ba"},
{file = "coverage-7.6.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f8fe4984b431f8621ca53d9380901f62bfb54ff759a1348cd140490ada7b693c"},
{file = "coverage-7.6.4-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5fbd612f8a091954a0c8dd4c0b571b973487277d26476f8480bfa4b2a65b5d06"},
{file = "coverage-7.6.4-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dacbc52de979f2823a819571f2e3a350a7e36b8cb7484cdb1e289bceaf35305f"},
{file = "coverage-7.6.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:dab4d16dfef34b185032580e2f2f89253d302facba093d5fa9dbe04f569c4f4b"},
{file = "coverage-7.6.4-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:862264b12ebb65ad8d863d51f17758b1684560b66ab02770d4f0baf2ff75da21"},
{file = "coverage-7.6.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5beb1ee382ad32afe424097de57134175fea3faf847b9af002cc7895be4e2a5a"},
{file = "coverage-7.6.4-cp313-cp313-win32.whl", hash = "sha256:bf20494da9653f6410213424f5f8ad0ed885e01f7e8e59811f572bdb20b8972e"},
{file = "coverage-7.6.4-cp313-cp313-win_amd64.whl", hash = "sha256:182e6cd5c040cec0a1c8d415a87b67ed01193ed9ad458ee427741c7d8513d963"},
{file = "coverage-7.6.4-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:a181e99301a0ae128493a24cfe5cfb5b488c4e0bf2f8702091473d033494d04f"},
{file = "coverage-7.6.4-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:df57bdbeffe694e7842092c5e2e0bc80fff7f43379d465f932ef36f027179806"},
{file = "coverage-7.6.4-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0bcd1069e710600e8e4cf27f65c90c7843fa8edfb4520fb0ccb88894cad08b11"},
{file = "coverage-7.6.4-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:99b41d18e6b2a48ba949418db48159d7a2e81c5cc290fc934b7d2380515bd0e3"},
{file = "coverage-7.6.4-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a6b1e54712ba3474f34b7ef7a41e65bd9037ad47916ccb1cc78769bae324c01a"},
{file = "coverage-7.6.4-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:53d202fd109416ce011578f321460795abfe10bb901b883cafd9b3ef851bacfc"},
{file = "coverage-7.6.4-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:c48167910a8f644671de9f2083a23630fbf7a1cb70ce939440cd3328e0919f70"},
{file = "coverage-7.6.4-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:cc8ff50b50ce532de2fa7a7daae9dd12f0a699bfcd47f20945364e5c31799fef"},
{file = "coverage-7.6.4-cp313-cp313t-win32.whl", hash = "sha256:b8d3a03d9bfcaf5b0141d07a88456bb6a4c3ce55c080712fec8418ef3610230e"},
{file = "coverage-7.6.4-cp313-cp313t-win_amd64.whl", hash = "sha256:f3ddf056d3ebcf6ce47bdaf56142af51bb7fad09e4af310241e9db7a3a8022e1"},
{file = "coverage-7.6.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9cb7fa111d21a6b55cbf633039f7bc2749e74932e3aa7cb7333f675a58a58bf3"},
{file = "coverage-7.6.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:11a223a14e91a4693d2d0755c7a043db43d96a7450b4f356d506c2562c48642c"},
{file = "coverage-7.6.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a413a096c4cbac202433c850ee43fa326d2e871b24554da8327b01632673a076"},
{file = "coverage-7.6.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:00a1d69c112ff5149cabe60d2e2ee948752c975d95f1e1096742e6077affd376"},
{file = "coverage-7.6.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f76846299ba5c54d12c91d776d9605ae33f8ae2b9d1d3c3703cf2db1a67f2c0"},
{file = "coverage-7.6.4-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:fe439416eb6380de434886b00c859304338f8b19f6f54811984f3420a2e03858"},
{file = "coverage-7.6.4-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:0294ca37f1ba500667b1aef631e48d875ced93ad5e06fa665a3295bdd1d95111"},
{file = "coverage-7.6.4-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:6f01ba56b1c0e9d149f9ac85a2f999724895229eb36bd997b61e62999e9b0901"},
{file = "coverage-7.6.4-cp39-cp39-win32.whl", hash = "sha256:bc66f0bf1d7730a17430a50163bb264ba9ded56739112368ba985ddaa9c3bd09"},
{file = "coverage-7.6.4-cp39-cp39-win_amd64.whl", hash = "sha256:c481b47f6b5845064c65a7bc78bc0860e635a9b055af0df46fdf1c58cebf8e8f"},
{file = "coverage-7.6.4-pp39.pp310-none-any.whl", hash = "sha256:3c65d37f3a9ebb703e710befdc489a38683a5b152242664b973a7b7b22348a4e"},
{file = "coverage-7.6.4.tar.gz", hash = "sha256:29fc0f17b1d3fea332f8001d4558f8214af7f1d87a345f3a133c901d60347c73"},
]
[package.dependencies]
tomli = {version = "*", optional = true, markers = "python_full_version <= \"3.11.0a6\" and extra == \"toml\""}
[package.extras]
toml = ["tomli"]
[[package]]
name = "debugpy"
version = "1.8.7"
description = "An implementation of the Debug Adapter Protocol for Python"
optional = false
python-versions = ">=3.8"
files = [
{file = "debugpy-1.8.7-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:95fe04a573b8b22896c404365e03f4eda0ce0ba135b7667a1e57bd079793b96b"},
{file = "debugpy-1.8.7-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:628a11f4b295ffb4141d8242a9bb52b77ad4a63a2ad19217a93be0f77f2c28c9"},
{file = "debugpy-1.8.7-cp310-cp310-win32.whl", hash = "sha256:85ce9c1d0eebf622f86cc68618ad64bf66c4fc3197d88f74bb695a416837dd55"},
{file = "debugpy-1.8.7-cp310-cp310-win_amd64.whl", hash = "sha256:29e1571c276d643757ea126d014abda081eb5ea4c851628b33de0c2b6245b037"},
{file = "debugpy-1.8.7-cp311-cp311-macosx_14_0_universal2.whl", hash = "sha256:caf528ff9e7308b74a1749c183d6808ffbedbb9fb6af78b033c28974d9b8831f"},
{file = "debugpy-1.8.7-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cba1d078cf2e1e0b8402e6bda528bf8fda7ccd158c3dba6c012b7897747c41a0"},
{file = "debugpy-1.8.7-cp311-cp311-win32.whl", hash = "sha256:171899588bcd412151e593bd40d9907133a7622cd6ecdbdb75f89d1551df13c2"},
{file = "debugpy-1.8.7-cp311-cp311-win_amd64.whl", hash = "sha256:6e1c4ffb0c79f66e89dfd97944f335880f0d50ad29525dc792785384923e2211"},
{file = "debugpy-1.8.7-cp312-cp312-macosx_14_0_universal2.whl", hash = "sha256:4d27d842311353ede0ad572600c62e4bcd74f458ee01ab0dd3a1a4457e7e3706"},
{file = "debugpy-1.8.7-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:703c1fd62ae0356e194f3e7b7a92acd931f71fe81c4b3be2c17a7b8a4b546ec2"},
{file = "debugpy-1.8.7-cp312-cp312-win32.whl", hash = "sha256:2f729228430ef191c1e4df72a75ac94e9bf77413ce5f3f900018712c9da0aaca"},
{file = "debugpy-1.8.7-cp312-cp312-win_amd64.whl", hash = "sha256:45c30aaefb3e1975e8a0258f5bbd26cd40cde9bfe71e9e5a7ac82e79bad64e39"},
{file = "debugpy-1.8.7-cp313-cp313-macosx_14_0_universal2.whl", hash = "sha256:d050a1ec7e925f514f0f6594a1e522580317da31fbda1af71d1530d6ea1f2b40"},
{file = "debugpy-1.8.7-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2f4349a28e3228a42958f8ddaa6333d6f8282d5edaea456070e48609c5983b7"},
{file = "debugpy-1.8.7-cp313-cp313-win32.whl", hash = "sha256:11ad72eb9ddb436afb8337891a986302e14944f0f755fd94e90d0d71e9100bba"},
{file = "debugpy-1.8.7-cp313-cp313-win_amd64.whl", hash = "sha256:2efb84d6789352d7950b03d7f866e6d180284bc02c7e12cb37b489b7083d81aa"},
{file = "debugpy-1.8.7-cp38-cp38-macosx_14_0_x86_64.whl", hash = "sha256:4b908291a1d051ef3331484de8e959ef3e66f12b5e610c203b5b75d2725613a7"},
{file = "debugpy-1.8.7-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da8df5b89a41f1fd31503b179d0a84a5fdb752dddd5b5388dbd1ae23cda31ce9"},
{file = "debugpy-1.8.7-cp38-cp38-win32.whl", hash = "sha256:b12515e04720e9e5c2216cc7086d0edadf25d7ab7e3564ec8b4521cf111b4f8c"},
{file = "debugpy-1.8.7-cp38-cp38-win_amd64.whl", hash = "sha256:93176e7672551cb5281577cdb62c63aadc87ec036f0c6a486f0ded337c504596"},
{file = "debugpy-1.8.7-cp39-cp39-macosx_14_0_x86_64.whl", hash = "sha256:90d93e4f2db442f8222dec5ec55ccfc8005821028982f1968ebf551d32b28907"},
{file = "debugpy-1.8.7-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b6db2a370e2700557a976eaadb16243ec9c91bd46f1b3bb15376d7aaa7632c81"},
{file = "debugpy-1.8.7-cp39-cp39-win32.whl", hash = "sha256:a6cf2510740e0c0b4a40330640e4b454f928c7b99b0c9dbf48b11efba08a8cda"},
{file = "debugpy-1.8.7-cp39-cp39-win_amd64.whl", hash = "sha256:6a9d9d6d31846d8e34f52987ee0f1a904c7baa4912bf4843ab39dadf9b8f3e0d"},
{file = "debugpy-1.8.7-py2.py3-none-any.whl", hash = "sha256:57b00de1c8d2c84a61b90880f7e5b6deaf4c312ecbde3a0e8912f2a56c4ac9ae"},
{file = "debugpy-1.8.7.zip", hash = "sha256:18b8f731ed3e2e1df8e9cdaa23fb1fc9c24e570cd0081625308ec51c82efe42e"},
]
[[package]]
name = "decorator"
version = "5.1.1"
description = "Decorators for Humans"
optional = false
python-versions = ">=3.5"
files = [
{file = "decorator-5.1.1-py3-none-any.whl", hash = "sha256:b8c3f85900b9dc423225913c5aace94729fe1fa9763b38939a95226f02d37186"},
{file = "decorator-5.1.1.tar.gz", hash = "sha256:637996211036b6385ef91435e4fae22989472f9d571faba8927ba8253acbc330"},
]
[[package]]
name = "defusedxml"
version = "0.7.1"
description = "XML bomb protection for Python stdlib modules"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
files = [
{file = "defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61"},
{file = "defusedxml-0.7.1.tar.gz", hash = "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69"},
]
[[package]]
name = "duckdb"
version = "1.1.2"
description = "DuckDB in-process database"
optional = false
python-versions = ">=3.7.0"
files = [
{file = "duckdb-1.1.2-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:91e7f99cf5cab1d26f92cb014429153497d805e79689baa44f4c4585a8cb243f"},
{file = "duckdb-1.1.2-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:0107de622fe208142a1108263a03c43956048dcc99be3702d8e5d2aeaf99554c"},
{file = "duckdb-1.1.2-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:8a09610f780857677725897856f8cdf3cafd8a991f871e6cb8ba88b2dbc8d737"},
{file = "duckdb-1.1.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c0f0ddac0482f0f3fece54d720d13819e82ae26c01a939ffa66a87be53f7f665"},
{file = "duckdb-1.1.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:84103373e818758dfa361d27781d0f096553843c5ffb9193260a0786c5248270"},
{file = "duckdb-1.1.2-cp310-cp310-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bfdfd23e2bf58014ad0673973bd0ed88cd048dfe8e82420814a71d7d52ef2288"},
{file = "duckdb-1.1.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:25889e6e29b87047b1dd56385ac08156e4713c59326cc6fff89657d01b2c417b"},
{file = "duckdb-1.1.2-cp310-cp310-win_amd64.whl", hash = "sha256:312570fa5277c3079de18388b86c2d87cbe1044838bb152b235c0227581d5d42"},
{file = "duckdb-1.1.2-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:568439ea4fce8cb72ec1f767cd510686a9e7e29a011fc7c56d990059a6e94e48"},
{file = "duckdb-1.1.2-cp311-cp311-macosx_12_0_universal2.whl", hash = "sha256:74974f2d7210623a5d61b1fb0cb589c6e5ffcbf7dbb757a04c5ba24adcfc8cac"},
{file = "duckdb-1.1.2-cp311-cp311-macosx_12_0_x86_64.whl", hash = "sha256:e26422a3358c816d764639070945b73eef55d1b4df990989e3492c85ef725c21"},
{file = "duckdb-1.1.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:87e972bd452eeeab197fe39dcaeecdb7c264b1f75a0ee67e532e235fe45b84df"},
{file = "duckdb-1.1.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a6b73e70b73c8df85da383f6e557c03cad5c877868b9a7e41715761e8166c1e"},
{file = "duckdb-1.1.2-cp311-cp311-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:623cb1952466aae5907af84107bcdec25a5ca021a8b6441e961f41edc724f6f2"},
{file = "duckdb-1.1.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:d9fc0b550f96901fa7e76dc70a13f6477ad3e18ef1cb21d414c3a5569de3f27e"},
{file = "duckdb-1.1.2-cp311-cp311-win_amd64.whl", hash = "sha256:181edb1973bd8f493bcb6ecfa035f1a592dff4667758592f300619012ba251c0"},
{file = "duckdb-1.1.2-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:83372b1b411086cac01ab2071122772fa66170b1b41ddbc37527464066083668"},
{file = "duckdb-1.1.2-cp312-cp312-macosx_12_0_universal2.whl", hash = "sha256:db37441deddfee6ac35a0c742d2f9e90e4e50b9e76d586a060d122b8fc56dada"},
{file = "duckdb-1.1.2-cp312-cp312-macosx_12_0_x86_64.whl", hash = "sha256:19142a77e72874aeaa6fda30aeb13612c6de5e8c60fbcc3392cea6ef0694eeaf"},
{file = "duckdb-1.1.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:099d99dd48d6e4682a3dd6233ceab73d977ebe1a87afaac54cf77c844e24514a"},
{file = "duckdb-1.1.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:be86e586ca7af7e807f72479a2b8d0983565360b19dbda4ef8a9d7b3909b8e2c"},
{file = "duckdb-1.1.2-cp312-cp312-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:578e0953e4d8ba8da0cd69fb2930c45f51ce47d213b77d8a4cd461f9c0960b87"},
{file = "duckdb-1.1.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:72b5eb5762c1a5e68849c7143f3b3747a9f15c040e34e41559f233a1569ad16f"},
{file = "duckdb-1.1.2-cp312-cp312-win_amd64.whl", hash = "sha256:9b4c6b6a08180261d98330d97355503961a25ca31cd9ef296e0681f7895b4a2c"},
{file = "duckdb-1.1.2-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:695dcbc561374b126e86659709feadf883c9969ed718e94713edd4ba15d16619"},
{file = "duckdb-1.1.2-cp313-cp313-macosx_12_0_universal2.whl", hash = "sha256:ada29be1e889f486c6cf1f6dffd15463e748faf361f33996f2e862779edc24a9"},
{file = "duckdb-1.1.2-cp313-cp313-macosx_12_0_x86_64.whl", hash = "sha256:6ca722738fa9eb6218619740631de29acfdd132de6f6a6350fee5e291c2f6117"},
{file = "duckdb-1.1.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c796d33f1e5a0c8c570d22da0c0b1db8578687e427029e1ce2c8ce3f9fffa6a3"},
{file = "duckdb-1.1.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f5c0996988a70dd3bc8111d9b9aeab7e38ed1999a52607c5f1b528e362b4dd1c"},
{file = "duckdb-1.1.2-cp313-cp313-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6c37b039f6d6fed14d89450f5ccf54922b3304192d7412e12d6cc8d9e757f7a2"},
{file = "duckdb-1.1.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e8c766b87f675c76d6d17103bf6fb9fb1a9e2fcb3d9b25c28bbc634bde31223e"},
{file = "duckdb-1.1.2-cp313-cp313-win_amd64.whl", hash = "sha256:e3e6300b7ccaf64b609f4f0780a6e1d25ab8cf34cceed46e62c35b6c4c5cb63b"},
{file = "duckdb-1.1.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8a01fae9604a54ecbc26e7503c522311f15afbd2870e6d8f6fbef4545dfae550"},
{file = "duckdb-1.1.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:492b1d86a696428bd3f14dc1c7c3230e2dbca8978f288be64b04a26e0e00fad5"},
{file = "duckdb-1.1.2-cp37-cp37m-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1bba58459ad897a78c4e478a097626fc266459a40338cecc68a49a8d5dc72fb7"},
{file = "duckdb-1.1.2-cp37-cp37m-win_amd64.whl", hash = "sha256:d395a3bf510bf24686821eec15802624797dcb33e8f14f8a7cc8e17d909474af"},
{file = "duckdb-1.1.2-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:fd800f75728727fe699ed1eb22b636867cf48c9dd105ee88b977e20c89df4509"},
{file = "duckdb-1.1.2-cp38-cp38-macosx_12_0_universal2.whl", hash = "sha256:d8caaf43909e49537e26df51d80d075ae2b25a610d28ed8bd31d6ccebeaf3c65"},
{file = "duckdb-1.1.2-cp38-cp38-macosx_12_0_x86_64.whl", hash = "sha256:564166811c68d9c7f9911eb707ad32ec9c2507b98336d894fbe658b85bf1c697"},
{file = "duckdb-1.1.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19386aa09f0d6f97634ba2972096d1c80d880176dfb0e949eadc91c98262a663"},
{file = "duckdb-1.1.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e9e8387bcc9a591ad14011ddfec0d408d1d9b1889c6c9b495a04c7016a24b9b3"},
{file = "duckdb-1.1.2-cp38-cp38-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f8c5ff4970403ed3ff0ac71fe0ce1e6be3199df9d542afc84c424b444ba4ffe8"},
{file = "duckdb-1.1.2-cp38-cp38-win_amd64.whl", hash = "sha256:9283dcca87c3260eb631a99d738fa72b8545ed45b475bc72ad254f7310e14284"},
{file = "duckdb-1.1.2-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:f87edaf20001530e63a4f7bda13b55dc3152d7171226915f2bf34e0813c8759e"},
{file = "duckdb-1.1.2-cp39-cp39-macosx_12_0_universal2.whl", hash = "sha256:efec169b3fe0b821e3207ba3e445f227d42dd62b4440ff79c37fa168a4fc5a71"},
{file = "duckdb-1.1.2-cp39-cp39-macosx_12_0_x86_64.whl", hash = "sha256:89164a2d29d56605a95ee5032aa415dd487028c4fd3e06d971497840e74c56e7"},
{file = "duckdb-1.1.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6858e10c60ff7e70e61d3dd53d2545c8b2609942e45fd6de38cd0dee52932de3"},
{file = "duckdb-1.1.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ca967c5a57b1d0cb0fd5e539ab24110e5a59dcbedd365bb2dc80533d6e44a8d"},
{file = "duckdb-1.1.2-cp39-cp39-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4ce949f1d7999aa6a046eb64067eee41d4c5c2872ba4fa408c9947742d0c7231"},
{file = "duckdb-1.1.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:9ba6d1f918e6ca47a368a0c32806016405cb9beb2c245806b0ca998f569d2bdf"},
{file = "duckdb-1.1.2-cp39-cp39-win_amd64.whl", hash = "sha256:7111fd3e7b334a7be383313ce29918b7c643e4f6ef44d6d63c3ab3fa6716c114"},
{file = "duckdb-1.1.2.tar.gz", hash = "sha256:c8232861dc8ec6daa29067056d5a0e5789919f2ab22ab792787616d7cd52f02a"},
]
[[package]]
name = "exceptiongroup"
version = "1.2.2"
description = "Backport of PEP 654 (exception groups)"
optional = false
python-versions = ">=3.7"
files = [
{file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"},
{file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"},
]
[package.extras]
test = ["pytest (>=6)"]
[[package]]
name = "execnet"
version = "2.1.1"
description = "execnet: rapid multi-Python deployment"
optional = false
python-versions = ">=3.8"
files = [
{file = "execnet-2.1.1-py3-none-any.whl", hash = "sha256:26dee51f1b80cebd6d0ca8e74dd8745419761d3bef34163928cbebbdc4749fdc"},
{file = "execnet-2.1.1.tar.gz", hash = "sha256:5189b52c6121c24feae288166ab41b32549c7e2348652736540b9e6e7d4e72e3"},
]
[package.extras]
testing = ["hatch", "pre-commit", "pytest", "tox"]
[[package]]
name = "executing"
version = "2.1.0"
description = "Get the currently executing AST node of a frame, and other information"
optional = false
python-versions = ">=3.8"
files = [
{file = "executing-2.1.0-py2.py3-none-any.whl", hash = "sha256:8d63781349375b5ebccc3142f4b30350c0cd9c79f921cde38be2be4637e98eaf"},
{file = "executing-2.1.0.tar.gz", hash = "sha256:8ea27ddd260da8150fa5a708269c4a10e76161e2496ec3e587da9e3c0fe4b9ab"},
]
[package.extras]
tests = ["asttokens (>=2.1.0)", "coverage", "coverage-enable-subprocess", "ipython", "littleutils", "pytest", "rich"]
[[package]]
name = "fastjsonschema"
version = "2.20.0"
description = "Fastest Python implementation of JSON schema"
optional = false
python-versions = "*"
files = [
{file = "fastjsonschema-2.20.0-py3-none-any.whl", hash = "sha256:5875f0b0fa7a0043a91e93a9b8f793bcbbba9691e7fd83dca95c28ba26d21f0a"},
{file = "fastjsonschema-2.20.0.tar.gz", hash = "sha256:3d48fc5300ee96f5d116f10fe6f28d938e6008f59a6a025c2649475b87f76a23"},
]
[package.extras]
devel = ["colorama", "json-spec", "jsonschema", "pylint", "pytest", "pytest-benchmark", "pytest-cache", "validictory"]
[[package]]
name = "fqdn"
version = "1.5.1"
description = "Validates fully-qualified domain names against RFC 1123, so that they are acceptable to modern bowsers"
optional = false
python-versions = ">=2.7, !=3.0, !=3.1, !=3.2, !=3.3, !=3.4, <4"
files = [
{file = "fqdn-1.5.1-py3-none-any.whl", hash = "sha256:3a179af3761e4df6eb2e026ff9e1a3033d3587bf980a0b1b2e1e5d08d7358014"},
{file = "fqdn-1.5.1.tar.gz", hash = "sha256:105ed3677e767fb5ca086a0c1f4bb66ebc3c100be518f0e0d755d9eae164d89f"},
]
[[package]]
name = "h11"
version = "0.14.0"
description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1"
optional = false
python-versions = ">=3.7"
files = [
{file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"},
{file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"},
]
[[package]]
name = "httpcore"
version = "1.0.6"
description = "A minimal low-level HTTP client."
optional = false
python-versions = ">=3.8"
files = [
{file = "httpcore-1.0.6-py3-none-any.whl", hash = "sha256:27b59625743b85577a8c0e10e55b50b5368a4f2cfe8cc7bcfa9cf00829c2682f"},
{file = "httpcore-1.0.6.tar.gz", hash = "sha256:73f6dbd6eb8c21bbf7ef8efad555481853f5f6acdeaff1edb0694289269ee17f"},
]
[package.dependencies]
certifi = "*"
h11 = ">=0.13,<0.15"
[package.extras]
asyncio = ["anyio (>=4.0,<5.0)"]
http2 = ["h2 (>=3,<5)"]
socks = ["socksio (==1.*)"]
trio = ["trio (>=0.22.0,<1.0)"]
[[package]]
name = "httpx"
version = "0.26.0"
description = "The next generation HTTP client."
optional = false
python-versions = ">=3.8"
files = [
{file = "httpx-0.26.0-py3-none-any.whl", hash = "sha256:8915f5a3627c4d47b73e8202457cb28f1266982d1159bd5779d86a80c0eab1cd"},
{file = "httpx-0.26.0.tar.gz", hash = "sha256:451b55c30d5185ea6b23c2c793abf9bb237d2a7dfb901ced6ff69ad37ec1dfaf"},
]
[package.dependencies]
anyio = "*"
certifi = "*"
httpcore = "==1.*"
idna = "*"
sniffio = "*"
[package.extras]
brotli = ["brotli", "brotlicffi"]
cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"]
http2 = ["h2 (>=3,<5)"]
socks = ["socksio (==1.*)"]
[[package]]
name = "idna"
version = "3.10"
description = "Internationalized Domain Names in Applications (IDNA)"
optional = false
python-versions = ">=3.6"
files = [
{file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"},
{file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"},
]
[package.extras]
all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"]
[[package]]
name = "importlib-metadata"
version = "8.5.0"
description = "Read metadata from Python packages"
optional = false
python-versions = ">=3.8"
files = [
{file = "importlib_metadata-8.5.0-py3-none-any.whl", hash = "sha256:45e54197d28b7a7f1559e60b95e7c567032b602131fbd588f1497f47880aa68b"},
{file = "importlib_metadata-8.5.0.tar.gz", hash = "sha256:71522656f0abace1d072b9e5481a48f07c138e00f079c38c8f883823f9c26bd7"},
]
[package.dependencies]
zipp = ">=3.20"
[package.extras]
check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"]
cover = ["pytest-cov"]
doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
enabler = ["pytest-enabler (>=2.2)"]
perf = ["ipython"]
test = ["flufl.flake8", "importlib-resources (>=1.3)", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"]
type = ["pytest-mypy"]
[[package]]
name = "iniconfig"
version = "2.0.0"
description = "brain-dead simple config-ini parsing"
optional = false
python-versions = ">=3.7"
files = [
{file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"},
{file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"},
]
[[package]]
name = "ipykernel"
version = "6.29.5"
description = "IPython Kernel for Jupyter"
optional = false
python-versions = ">=3.8"
files = [
{file = "ipykernel-6.29.5-py3-none-any.whl", hash = "sha256:afdb66ba5aa354b09b91379bac28ae4afebbb30e8b39510c9690afb7a10421b5"},
{file = "ipykernel-6.29.5.tar.gz", hash = "sha256:f093a22c4a40f8828f8e330a9c297cb93dcab13bd9678ded6de8e5cf81c56215"},
]
[package.dependencies]
appnope = {version = "*", markers = "platform_system == \"Darwin\""}
comm = ">=0.1.1"
debugpy = ">=1.6.5"
ipython = ">=7.23.1"
jupyter-client = ">=6.1.12"
jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0"
matplotlib-inline = ">=0.1"
nest-asyncio = "*"
packaging = "*"
psutil = "*"
pyzmq = ">=24"
tornado = ">=6.1"
traitlets = ">=5.4.0"
[package.extras]
cov = ["coverage[toml]", "curio", "matplotlib", "pytest-cov", "trio"]
docs = ["myst-parser", "pydata-sphinx-theme", "sphinx", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-spelling", "trio"]
pyqt5 = ["pyqt5"]
pyside6 = ["pyside6"]
test = ["flaky", "ipyparallel", "pre-commit", "pytest (>=7.0)", "pytest-asyncio (>=0.23.5)", "pytest-cov", "pytest-timeout"]
[[package]]
name = "ipython"
version = "8.18.1"
description = "IPython: Productive Interactive Computing"
optional = false
python-versions = ">=3.9"
files = [
{file = "ipython-8.18.1-py3-none-any.whl", hash = "sha256:e8267419d72d81955ec1177f8a29aaa90ac80ad647499201119e2f05e99aa397"},
{file = "ipython-8.18.1.tar.gz", hash = "sha256:ca6f079bb33457c66e233e4580ebfc4128855b4cf6370dddd73842a9563e8a27"},
]
[package.dependencies]
colorama = {version = "*", markers = "sys_platform == \"win32\""}
decorator = "*"
exceptiongroup = {version = "*", markers = "python_version < \"3.11\""}
jedi = ">=0.16"
matplotlib-inline = "*"
pexpect = {version = ">4.3", markers = "sys_platform != \"win32\""}
prompt-toolkit = ">=3.0.41,<3.1.0"
pygments = ">=2.4.0"
stack-data = "*"
traitlets = ">=5"
typing-extensions = {version = "*", markers = "python_version < \"3.10\""}
[package.extras]
all = ["black", "curio", "docrepr", "exceptiongroup", "ipykernel", "ipyparallel", "ipywidgets", "matplotlib", "matplotlib (!=3.2.0)", "nbconvert", "nbformat", "notebook", "numpy (>=1.22)", "pandas", "pickleshare", "pytest (<7)", "pytest (<7.1)", "pytest-asyncio (<0.22)", "qtconsole", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "stack-data", "testpath", "trio", "typing-extensions"]
black = ["black"]
doc = ["docrepr", "exceptiongroup", "ipykernel", "matplotlib", "pickleshare", "pytest (<7)", "pytest (<7.1)", "pytest-asyncio (<0.22)", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "stack-data", "testpath", "typing-extensions"]
kernel = ["ipykernel"]
nbconvert = ["nbconvert"]
nbformat = ["nbformat"]
notebook = ["ipywidgets", "notebook"]
parallel = ["ipyparallel"]
qtconsole = ["qtconsole"]
test = ["pickleshare", "pytest (<7.1)", "pytest-asyncio (<0.22)", "testpath"]
test-extra = ["curio", "matplotlib (!=3.2.0)", "nbformat", "numpy (>=1.22)", "pandas", "pickleshare", "pytest (<7.1)", "pytest-asyncio (<0.22)", "testpath", "trio"]
[[package]]
name = "ipywidgets"
version = "8.1.5"
description = "Jupyter interactive widgets"
optional = false
python-versions = ">=3.7"
files = [
{file = "ipywidgets-8.1.5-py3-none-any.whl", hash = "sha256:3290f526f87ae6e77655555baba4f36681c555b8bdbbff430b70e52c34c86245"},
{file = "ipywidgets-8.1.5.tar.gz", hash = "sha256:870e43b1a35656a80c18c9503bbf2d16802db1cb487eec6fab27d683381dde17"},
]
[package.dependencies]
comm = ">=0.1.3"
ipython = ">=6.1.0"
jupyterlab-widgets = ">=3.0.12,<3.1.0"
traitlets = ">=4.3.1"
widgetsnbextension = ">=4.0.12,<4.1.0"
[package.extras]
test = ["ipykernel", "jsonschema", "pytest (>=3.6.0)", "pytest-cov", "pytz"]
[[package]]
name = "isoduration"
version = "20.11.0"
description = "Operations with ISO 8601 durations"
optional = false
python-versions = ">=3.7"
files = [
{file = "isoduration-20.11.0-py3-none-any.whl", hash = "sha256:b2904c2a4228c3d44f409c8ae8e2370eb21a26f7ac2ec5446df141dde3452042"},
{file = "isoduration-20.11.0.tar.gz", hash = "sha256:ac2f9015137935279eac671f94f89eb00584f940f5dc49462a0c4ee692ba1bd9"},
]
[package.dependencies]
arrow = ">=0.15.0"
[[package]]
name = "jedi"
version = "0.19.1"
description = "An autocompletion tool for Python that can be used for text editors."
optional = false
python-versions = ">=3.6"
files = [
{file = "jedi-0.19.1-py2.py3-none-any.whl", hash = "sha256:e983c654fe5c02867aef4cdfce5a2fbb4a50adc0af145f70504238f18ef5e7e0"},
{file = "jedi-0.19.1.tar.gz", hash = "sha256:cf0496f3651bc65d7174ac1b7d043eff454892c708a87d1b683e57b569927ffd"},
]
[package.dependencies]
parso = ">=0.8.3,<0.9.0"
[package.extras]
docs = ["Jinja2 (==2.11.3)", "MarkupSafe (==1.1.1)", "Pygments (==2.8.1)", "alabaster (==0.7.12)", "babel (==2.9.1)", "chardet (==4.0.0)", "commonmark (==0.8.1)", "docutils (==0.17.1)", "future (==0.18.2)", "idna (==2.10)", "imagesize (==1.2.0)", "mock (==1.0.1)", "packaging (==20.9)", "pyparsing (==2.4.7)", "pytz (==2021.1)", "readthedocs-sphinx-ext (==2.1.4)", "recommonmark (==0.5.0)", "requests (==2.25.1)", "six (==1.15.0)", "snowballstemmer (==2.1.0)", "sphinx (==1.8.5)", "sphinx-rtd-theme (==0.4.3)", "sphinxcontrib-serializinghtml (==1.1.4)", "sphinxcontrib-websupport (==1.2.4)", "urllib3 (==1.26.4)"]
qa = ["flake8 (==5.0.4)", "mypy (==0.971)", "types-setuptools (==67.2.0.1)"]
testing = ["Django", "attrs", "colorama", "docopt", "pytest (<7.0.0)"]
[[package]]
name = "jinja2"
version = "3.1.4"
description = "A very fast and expressive template engine."
optional = false
python-versions = ">=3.7"
files = [
{file = "jinja2-3.1.4-py3-none-any.whl", hash = "sha256:bc5dd2abb727a5319567b7a813e6a2e7318c39f4f487cfe6c89c6f9c7d25197d"},
{file = "jinja2-3.1.4.tar.gz", hash = "sha256:4a3aee7acbbe7303aede8e9648d13b8bf88a429282aa6122a993f0ac800cb369"},
]
[package.dependencies]
MarkupSafe = ">=2.0"
[package.extras]
i18n = ["Babel (>=2.7)"]
[[package]]
name = "json5"
version = "0.9.25"
description = "A Python implementation of the JSON5 data format."
optional = false
python-versions = ">=3.8"
files = [
{file = "json5-0.9.25-py3-none-any.whl", hash = "sha256:34ed7d834b1341a86987ed52f3f76cd8ee184394906b6e22a1e0deb9ab294e8f"},
{file = "json5-0.9.25.tar.gz", hash = "sha256:548e41b9be043f9426776f05df8635a00fe06104ea51ed24b67f908856e151ae"},
]
[[package]]
name = "jsonpatch"
version = "1.33"
description = "Apply JSON-Patches (RFC 6902)"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.*"
files = [
{file = "jsonpatch-1.33-py2.py3-none-any.whl", hash = "sha256:0ae28c0cd062bbd8b8ecc26d7d164fbbea9652a1a3693f3b956c1eae5145dade"},
{file = "jsonpatch-1.33.tar.gz", hash = "sha256:9fcd4009c41e6d12348b4a0ff2563ba56a2923a7dfee731d004e212e1ee5030c"},
]
[package.dependencies]
jsonpointer = ">=1.9"
[[package]]
name = "jsonpointer"
version = "3.0.0"
description = "Identify specific nodes in a JSON document (RFC 6901)"
optional = false
python-versions = ">=3.7"
files = [
{file = "jsonpointer-3.0.0-py2.py3-none-any.whl", hash = "sha256:13e088adc14fca8b6aa8177c044e12701e6ad4b28ff10e65f2267a90109c9942"},
{file = "jsonpointer-3.0.0.tar.gz", hash = "sha256:2b2d729f2091522d61c3b31f82e11870f60b68f43fbc705cb76bf4b832af59ef"},
]
[[package]]
name = "jsonschema"
version = "4.23.0"
description = "An implementation of JSON Schema validation for Python"
optional = false
python-versions = ">=3.8"
files = [
{file = "jsonschema-4.23.0-py3-none-any.whl", hash = "sha256:fbadb6f8b144a8f8cf9f0b89ba94501d143e50411a1278633f56a7acf7fd5566"},
{file = "jsonschema-4.23.0.tar.gz", hash = "sha256:d71497fef26351a33265337fa77ffeb82423f3ea21283cd9467bb03999266bc4"},
]
[package.dependencies]
attrs = ">=22.2.0"
fqdn = {version = "*", optional = true, markers = "extra == \"format-nongpl\""}
idna = {version = "*", optional = true, markers = "extra == \"format-nongpl\""}
isoduration = {version = "*", optional = true, markers = "extra == \"format-nongpl\""}
jsonpointer = {version = ">1.13", optional = true, markers = "extra == \"format-nongpl\""}
jsonschema-specifications = ">=2023.03.6"
referencing = ">=0.28.4"
rfc3339-validator = {version = "*", optional = true, markers = "extra == \"format-nongpl\""}
rfc3986-validator = {version = ">0.1.0", optional = true, markers = "extra == \"format-nongpl\""}
rpds-py = ">=0.7.1"
uri-template = {version = "*", optional = true, markers = "extra == \"format-nongpl\""}
webcolors = {version = ">=24.6.0", optional = true, markers = "extra == \"format-nongpl\""}
[package.extras]
format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"]
format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=24.6.0)"]
[[package]]
name = "jsonschema-specifications"
version = "2024.10.1"
description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry"
optional = false
python-versions = ">=3.9"
files = [
{file = "jsonschema_specifications-2024.10.1-py3-none-any.whl", hash = "sha256:a09a0680616357d9a0ecf05c12ad234479f549239d0f5b55f3deea67475da9bf"},
{file = "jsonschema_specifications-2024.10.1.tar.gz", hash = "sha256:0f38b83639958ce1152d02a7f062902c41c8fd20d558b0c34344292d417ae272"},
]
[package.dependencies]
referencing = ">=0.31.0"
[[package]]
name = "jupyter"
version = "1.1.1"
description = "Jupyter metapackage. Install all the Jupyter components in one go."
optional = false
python-versions = "*"
files = [
{file = "jupyter-1.1.1-py2.py3-none-any.whl", hash = "sha256:7a59533c22af65439b24bbe60373a4e95af8f16ac65a6c00820ad378e3f7cc83"},
{file = "jupyter-1.1.1.tar.gz", hash = "sha256:d55467bceabdea49d7e3624af7e33d59c37fff53ed3a350e1ac957bed731de7a"},
]
[package.dependencies]
ipykernel = "*"
ipywidgets = "*"
jupyter-console = "*"
jupyterlab = "*"
nbconvert = "*"
notebook = "*"
[[package]]
name = "jupyter-client"
version = "8.6.3"
description = "Jupyter protocol implementation and client libraries"
optional = false
python-versions = ">=3.8"
files = [
{file = "jupyter_client-8.6.3-py3-none-any.whl", hash = "sha256:e8a19cc986cc45905ac3362915f410f3af85424b4c0905e94fa5f2cb08e8f23f"},
{file = "jupyter_client-8.6.3.tar.gz", hash = "sha256:35b3a0947c4a6e9d589eb97d7d4cd5e90f910ee73101611f01283732bd6d9419"},
]
[package.dependencies]
importlib-metadata = {version = ">=4.8.3", markers = "python_version < \"3.10\""}
jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0"
python-dateutil = ">=2.8.2"
pyzmq = ">=23.0"
tornado = ">=6.2"
traitlets = ">=5.3"
[package.extras]
docs = ["ipykernel", "myst-parser", "pydata-sphinx-theme", "sphinx (>=4)", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-spelling"]
test = ["coverage", "ipykernel (>=6.14)", "mypy", "paramiko", "pre-commit", "pytest (<8.2.0)", "pytest-cov", "pytest-jupyter[client] (>=0.4.1)", "pytest-timeout"]
[[package]]
name = "jupyter-console"
version = "6.6.3"
description = "Jupyter terminal console"
optional = false
python-versions = ">=3.7"
files = [
{file = "jupyter_console-6.6.3-py3-none-any.whl", hash = "sha256:309d33409fcc92ffdad25f0bcdf9a4a9daa61b6f341177570fdac03de5352485"},
{file = "jupyter_console-6.6.3.tar.gz", hash = "sha256:566a4bf31c87adbfadf22cdf846e3069b59a71ed5da71d6ba4d8aaad14a53539"},
]
[package.dependencies]
ipykernel = ">=6.14"
ipython = "*"
jupyter-client = ">=7.0.0"
jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0"
prompt-toolkit = ">=3.0.30"
pygments = "*"
pyzmq = ">=17"
traitlets = ">=5.4"
[package.extras]
test = ["flaky", "pexpect", "pytest"]
[[package]]
name = "jupyter-core"
version = "5.7.2"
description = "Jupyter core package. A base package on which Jupyter projects rely."
optional = false
python-versions = ">=3.8"
files = [
{file = "jupyter_core-5.7.2-py3-none-any.whl", hash = "sha256:4f7315d2f6b4bcf2e3e7cb6e46772eba760ae459cd1f59d29eb57b0a01bd7409"},
{file = "jupyter_core-5.7.2.tar.gz", hash = "sha256:aa5f8d32bbf6b431ac830496da7392035d6f61b4f54872f15c4bd2a9c3f536d9"},
]
[package.dependencies]
platformdirs = ">=2.5"
pywin32 = {version = ">=300", markers = "sys_platform == \"win32\" and platform_python_implementation != \"PyPy\""}
traitlets = ">=5.3"
[package.extras]
docs = ["myst-parser", "pydata-sphinx-theme", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-spelling", "traitlets"]
test = ["ipykernel", "pre-commit", "pytest (<8)", "pytest-cov", "pytest-timeout"]
[[package]]
name = "jupyter-events"
version = "0.10.0"
description = "Jupyter Event System library"
optional = false
python-versions = ">=3.8"
files = [
{file = "jupyter_events-0.10.0-py3-none-any.whl", hash = "sha256:4b72130875e59d57716d327ea70d3ebc3af1944d3717e5a498b8a06c6c159960"},
{file = "jupyter_events-0.10.0.tar.gz", hash = "sha256:670b8229d3cc882ec782144ed22e0d29e1c2d639263f92ca8383e66682845e22"},
]
[package.dependencies]
jsonschema = {version = ">=4.18.0", extras = ["format-nongpl"]}
python-json-logger = ">=2.0.4"
pyyaml = ">=5.3"
referencing = "*"
rfc3339-validator = "*"
rfc3986-validator = ">=0.1.1"
traitlets = ">=5.3"
[package.extras]
cli = ["click", "rich"]
docs = ["jupyterlite-sphinx", "myst-parser", "pydata-sphinx-theme", "sphinxcontrib-spelling"]
test = ["click", "pre-commit", "pytest (>=7.0)", "pytest-asyncio (>=0.19.0)", "pytest-console-scripts", "rich"]
[[package]]
name = "jupyter-lsp"
version = "2.2.5"
description = "Multi-Language Server WebSocket proxy for Jupyter Notebook/Lab server"
optional = false
python-versions = ">=3.8"
files = [
{file = "jupyter-lsp-2.2.5.tar.gz", hash = "sha256:793147a05ad446f809fd53ef1cd19a9f5256fd0a2d6b7ce943a982cb4f545001"},
{file = "jupyter_lsp-2.2.5-py3-none-any.whl", hash = "sha256:45fbddbd505f3fbfb0b6cb2f1bc5e15e83ab7c79cd6e89416b248cb3c00c11da"},
]
[package.dependencies]
importlib-metadata = {version = ">=4.8.3", markers = "python_version < \"3.10\""}
jupyter-server = ">=1.1.2"
[[package]]
name = "jupyter-server"
version = "2.14.2"
description = "The backend—i.e. core services, APIs, and REST endpoints—to Jupyter web applications."
optional = false
python-versions = ">=3.8"
files = [
{file = "jupyter_server-2.14.2-py3-none-any.whl", hash = "sha256:47ff506127c2f7851a17bf4713434208fc490955d0e8632e95014a9a9afbeefd"},
{file = "jupyter_server-2.14.2.tar.gz", hash = "sha256:66095021aa9638ced276c248b1d81862e4c50f292d575920bbe960de1c56b12b"},
]
[package.dependencies]
anyio = ">=3.1.0"
argon2-cffi = ">=21.1"
jinja2 = ">=3.0.3"
jupyter-client = ">=7.4.4"
jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0"
jupyter-events = ">=0.9.0"
jupyter-server-terminals = ">=0.4.4"
nbconvert = ">=6.4.4"
nbformat = ">=5.3.0"
overrides = ">=5.0"
packaging = ">=22.0"
prometheus-client = ">=0.9"
pywinpty = {version = ">=2.0.1", markers = "os_name == \"nt\""}
pyzmq = ">=24"
send2trash = ">=1.8.2"
terminado = ">=0.8.3"
tornado = ">=6.2.0"
traitlets = ">=5.6.0"
websocket-client = ">=1.7"
[package.extras]
docs = ["ipykernel", "jinja2", "jupyter-client", "myst-parser", "nbformat", "prometheus-client", "pydata-sphinx-theme", "send2trash", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-openapi (>=0.8.0)", "sphinxcontrib-spelling", "sphinxemoji", "tornado", "typing-extensions"]
test = ["flaky", "ipykernel", "pre-commit", "pytest (>=7.0,<9)", "pytest-console-scripts", "pytest-jupyter[server] (>=0.7)", "pytest-timeout", "requests"]
[[package]]
name = "jupyter-server-terminals"
version = "0.5.3"
description = "A Jupyter Server Extension Providing Terminals."
optional = false
python-versions = ">=3.8"
files = [
{file = "jupyter_server_terminals-0.5.3-py3-none-any.whl", hash = "sha256:41ee0d7dc0ebf2809c668e0fc726dfaf258fcd3e769568996ca731b6194ae9aa"},
{file = "jupyter_server_terminals-0.5.3.tar.gz", hash = "sha256:5ae0295167220e9ace0edcfdb212afd2b01ee8d179fe6f23c899590e9b8a5269"},
]
[package.dependencies]
pywinpty = {version = ">=2.0.3", markers = "os_name == \"nt\""}
terminado = ">=0.8.3"
[package.extras]
docs = ["jinja2", "jupyter-server", "mistune (<4.0)", "myst-parser", "nbformat", "packaging", "pydata-sphinx-theme", "sphinxcontrib-github-alt", "sphinxcontrib-openapi", "sphinxcontrib-spelling", "sphinxemoji", "tornado"]
test = ["jupyter-server (>=2.0.0)", "pytest (>=7.0)", "pytest-jupyter[server] (>=0.5.3)", "pytest-timeout"]
[[package]]
name = "jupyterlab"
version = "4.2.5"
description = "JupyterLab computational environment"
optional = false
python-versions = ">=3.8"
files = [
{file = "jupyterlab-4.2.5-py3-none-any.whl", hash = "sha256:73b6e0775d41a9fee7ee756c80f58a6bed4040869ccc21411dc559818874d321"},
{file = "jupyterlab-4.2.5.tar.gz", hash = "sha256:ae7f3a1b8cb88b4f55009ce79fa7c06f99d70cd63601ee4aa91815d054f46f75"},
]
[package.dependencies]
async-lru = ">=1.0.0"
httpx = ">=0.25.0"
importlib-metadata = {version = ">=4.8.3", markers = "python_version < \"3.10\""}
ipykernel = ">=6.5.0"
jinja2 = ">=3.0.3"
jupyter-core = "*"
jupyter-lsp = ">=2.0.0"
jupyter-server = ">=2.4.0,<3"
jupyterlab-server = ">=2.27.1,<3"
notebook-shim = ">=0.2"
packaging = "*"
setuptools = ">=40.1.0"
tomli = {version = ">=1.2.2", markers = "python_version < \"3.11\""}
tornado = ">=6.2.0"
traitlets = "*"
[package.extras]
dev = ["build", "bump2version", "coverage", "hatch", "pre-commit", "pytest-cov", "ruff (==0.3.5)"]
docs = ["jsx-lexer", "myst-parser", "pydata-sphinx-theme (>=0.13.0)", "pytest", "pytest-check-links", "pytest-jupyter", "sphinx (>=1.8,<7.3.0)", "sphinx-copybutton"]
docs-screenshots = ["altair (==5.3.0)", "ipython (==8.16.1)", "ipywidgets (==8.1.2)", "jupyterlab-geojson (==3.4.0)", "jupyterlab-language-pack-zh-cn (==4.1.post2)", "matplotlib (==3.8.3)", "nbconvert (>=7.0.0)", "pandas (==2.2.1)", "scipy (==1.12.0)", "vega-datasets (==0.9.0)"]
test = ["coverage", "pytest (>=7.0)", "pytest-check-links (>=0.7)", "pytest-console-scripts", "pytest-cov", "pytest-jupyter (>=0.5.3)", "pytest-timeout", "pytest-tornasync", "requests", "requests-cache", "virtualenv"]
upgrade-extension = ["copier (>=9,<10)", "jinja2-time (<0.3)", "pydantic (<3.0)", "pyyaml-include (<3.0)", "tomli-w (<2.0)"]
[[package]]
name = "jupyterlab-pygments"
version = "0.3.0"
description = "Pygments theme using JupyterLab CSS variables"
optional = false
python-versions = ">=3.8"
files = [
{file = "jupyterlab_pygments-0.3.0-py3-none-any.whl", hash = "sha256:841a89020971da1d8693f1a99997aefc5dc424bb1b251fd6322462a1b8842780"},
{file = "jupyterlab_pygments-0.3.0.tar.gz", hash = "sha256:721aca4d9029252b11cfa9d185e5b5af4d54772bb8072f9b7036f4170054d35d"},
]
[[package]]
name = "jupyterlab-server"
version = "2.27.3"
description = "A set of server components for JupyterLab and JupyterLab like applications."
optional = false
python-versions = ">=3.8"
files = [
{file = "jupyterlab_server-2.27.3-py3-none-any.whl", hash = "sha256:e697488f66c3db49df675158a77b3b017520d772c6e1548c7d9bcc5df7944ee4"},
{file = "jupyterlab_server-2.27.3.tar.gz", hash = "sha256:eb36caca59e74471988f0ae25c77945610b887f777255aa21f8065def9e51ed4"},
]
[package.dependencies]
babel = ">=2.10"
importlib-metadata = {version = ">=4.8.3", markers = "python_version < \"3.10\""}
jinja2 = ">=3.0.3"
json5 = ">=0.9.0"
jsonschema = ">=4.18.0"
jupyter-server = ">=1.21,<3"
packaging = ">=21.3"
requests = ">=2.31"
[package.extras]
docs = ["autodoc-traits", "jinja2 (<3.2.0)", "mistune (<4)", "myst-parser", "pydata-sphinx-theme", "sphinx", "sphinx-copybutton", "sphinxcontrib-openapi (>0.8)"]
openapi = ["openapi-core (>=0.18.0,<0.19.0)", "ruamel-yaml"]
test = ["hatch", "ipykernel", "openapi-core (>=0.18.0,<0.19.0)", "openapi-spec-validator (>=0.6.0,<0.8.0)", "pytest (>=7.0,<8)", "pytest-console-scripts", "pytest-cov", "pytest-jupyter[server] (>=0.6.2)", "pytest-timeout", "requests-mock", "ruamel-yaml", "sphinxcontrib-spelling", "strict-rfc3339", "werkzeug"]
[[package]]
name = "jupyterlab-widgets"
version = "3.0.13"
description = "Jupyter interactive widgets for JupyterLab"
optional = false
python-versions = ">=3.7"
files = [
{file = "jupyterlab_widgets-3.0.13-py3-none-any.whl", hash = "sha256:e3cda2c233ce144192f1e29914ad522b2f4c40e77214b0cc97377ca3d323db54"},
{file = "jupyterlab_widgets-3.0.13.tar.gz", hash = "sha256:a2966d385328c1942b683a8cd96b89b8dd82c8b8f81dda902bb2bc06d46f5bed"},
]
[[package]]
name = "langchain-core"
version = "0.3.15"
description = "Building applications with LLMs through composability"
optional = false
python-versions = "<4.0,>=3.9"
files = [
{file = "langchain_core-0.3.15-py3-none-any.whl", hash = "sha256:3d4ca6dbb8ed396a6ee061063832a2451b0ce8c345570f7b086ffa7288e4fa29"},
{file = "langchain_core-0.3.15.tar.gz", hash = "sha256:b1a29787a4ffb7ec2103b4e97d435287201da7809b369740dd1e32f176325aba"},
]
[package.dependencies]
jsonpatch = ">=1.33,<2.0"
langsmith = ">=0.1.125,<0.2.0"
packaging = ">=23.2,<25"
pydantic = [
{version = ">=2.5.2,<3.0.0", markers = "python_full_version < \"3.12.4\""},
{version = ">=2.7.4,<3.0.0", markers = "python_full_version >= \"3.12.4\""},
]
PyYAML = ">=5.3"
tenacity = ">=8.1.0,<8.4.0 || >8.4.0,<10.0.0"
typing-extensions = ">=4.7"
[[package]]
name = "langgraph-checkpoint"
version = "2.0.8"
description = "Library with base interfaces for LangGraph checkpoint savers."
optional = false
python-versions = "^3.9.0,<4.0"
files = []
develop = true
[package.dependencies]
langchain-core = ">=0.2.38,<0.4"
msgpack = "^1.1.0"
[package.source]
type = "directory"
url = "../checkpoint"
[[package]]
name = "langgraph-checkpoint-duckdb"
version = "2.0.1"
description = "Library with a DuckDB implementation of LangGraph checkpoint saver."
optional = false
python-versions = "^3.9.0,<4.0"
files = []
develop = true
[package.dependencies]
duckdb = ">=1.1.2"
langgraph-checkpoint = "^2.0.2"
[package.source]
type = "directory"
url = "../checkpoint-duckdb"
[[package]]
name = "langgraph-checkpoint-postgres"
version = "2.0.7"
description = "Library with a Postgres implementation of LangGraph checkpoint saver."
optional = false
python-versions = "^3.9.0,<4.0"
files = []
develop = true
[package.dependencies]
langgraph-checkpoint = "^2.0.7"
orjson = ">=3.10.1"
psycopg = "^3.2.0"
psycopg-pool = "^3.2.0"
[package.source]
type = "directory"
url = "../checkpoint-postgres"
[[package]]
name = "langgraph-checkpoint-sqlite"
version = "2.0.1"
description = "Library with a SQLite implementation of LangGraph checkpoint saver."
optional = false
python-versions = "^3.9.0"
files = []
develop = true
[package.dependencies]
aiosqlite = "^0.20.0"
langgraph-checkpoint = "^2.0.2"
[package.source]
type = "directory"
url = "../checkpoint-sqlite"
[[package]]
name = "langgraph-sdk"
version = "0.1.42"
description = "SDK for interacting with LangGraph API"
optional = false
python-versions = "^3.9.0,<4.0"
files = []
develop = true
[package.dependencies]
httpx = ">=0.25.2"
orjson = ">=3.10.1"
[package.source]
type = "directory"
url = "../sdk-py"
[[package]]
name = "langsmith"
version = "0.1.138"
description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform."
optional = false
python-versions = "<4.0,>=3.8.1"
files = [
{file = "langsmith-0.1.138-py3-none-any.whl", hash = "sha256:5c2bd5c11c75f7b3d06a0f06b115186e7326ca969fd26d66ffc65a0669012aee"},
{file = "langsmith-0.1.138.tar.gz", hash = "sha256:1ecf613bb52f6bf17f1510e24ad8b70d4b0259bc9d3dbfd69b648c66d4644f0b"},
]
[package.dependencies]
httpx = ">=0.23.0,<1"
orjson = ">=3.9.14,<4.0.0"
pydantic = [
{version = ">=1,<3", markers = "python_full_version < \"3.12.4\""},
{version = ">=2.7.4,<3.0.0", markers = "python_full_version >= \"3.12.4\""},
]
requests = ">=2,<3"
requests-toolbelt = ">=1.0.0,<2.0.0"
[[package]]
name = "markupsafe"
version = "3.0.2"
description = "Safely add untrusted strings to HTML/XML markup."
optional = false
python-versions = ">=3.9"
files = [
{file = "MarkupSafe-3.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8"},
{file = "MarkupSafe-3.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9e2d922824181480953426608b81967de705c3cef4d1af983af849d7bd619158"},
{file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:38a9ef736c01fccdd6600705b09dc574584b89bea478200c5fbf112a6b0d5579"},
{file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bbcb445fa71794da8f178f0f6d66789a28d7319071af7a496d4d507ed566270d"},
{file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57cb5a3cf367aeb1d316576250f65edec5bb3be939e9247ae594b4bcbc317dfb"},
{file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:3809ede931876f5b2ec92eef964286840ed3540dadf803dd570c3b7e13141a3b"},
{file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e07c3764494e3776c602c1e78e298937c3315ccc9043ead7e685b7f2b8d47b3c"},
{file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b424c77b206d63d500bcb69fa55ed8d0e6a3774056bdc4839fc9298a7edca171"},
{file = "MarkupSafe-3.0.2-cp310-cp310-win32.whl", hash = "sha256:fcabf5ff6eea076f859677f5f0b6b5c1a51e70a376b0579e0eadef8db48c6b50"},
{file = "MarkupSafe-3.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:6af100e168aa82a50e186c82875a5893c5597a0c1ccdb0d8b40240b1f28b969a"},
{file = "MarkupSafe-3.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9025b4018f3a1314059769c7bf15441064b2207cb3f065e6ea1e7359cb46db9d"},
{file = "MarkupSafe-3.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:93335ca3812df2f366e80509ae119189886b0f3c2b81325d39efdb84a1e2ae93"},
{file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cb8438c3cbb25e220c2ab33bb226559e7afb3baec11c4f218ffa7308603c832"},
{file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a123e330ef0853c6e822384873bef7507557d8e4a082961e1defa947aa59ba84"},
{file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e084f686b92e5b83186b07e8a17fc09e38fff551f3602b249881fec658d3eca"},
{file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d8213e09c917a951de9d09ecee036d5c7d36cb6cb7dbaece4c71a60d79fb9798"},
{file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:5b02fb34468b6aaa40dfc198d813a641e3a63b98c2b05a16b9f80b7ec314185e"},
{file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0bff5e0ae4ef2e1ae4fdf2dfd5b76c75e5c2fa4132d05fc1b0dabcd20c7e28c4"},
{file = "MarkupSafe-3.0.2-cp311-cp311-win32.whl", hash = "sha256:6c89876f41da747c8d3677a2b540fb32ef5715f97b66eeb0c6b66f5e3ef6f59d"},
{file = "MarkupSafe-3.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:70a87b411535ccad5ef2f1df5136506a10775d267e197e4cf531ced10537bd6b"},
{file = "MarkupSafe-3.0.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:9778bd8ab0a994ebf6f84c2b949e65736d5575320a17ae8984a77fab08db94cf"},
{file = "MarkupSafe-3.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:846ade7b71e3536c4e56b386c2a47adf5741d2d8b94ec9dc3e92e5e1ee1e2225"},
{file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c99d261bd2d5f6b59325c92c73df481e05e57f19837bdca8413b9eac4bd8028"},
{file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e17c96c14e19278594aa4841ec148115f9c7615a47382ecb6b82bd8fea3ab0c8"},
{file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88416bd1e65dcea10bc7569faacb2c20ce071dd1f87539ca2ab364bf6231393c"},
{file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2181e67807fc2fa785d0592dc2d6206c019b9502410671cc905d132a92866557"},
{file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:52305740fe773d09cffb16f8ed0427942901f00adedac82ec8b67752f58a1b22"},
{file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ad10d3ded218f1039f11a75f8091880239651b52e9bb592ca27de44eed242a48"},
{file = "MarkupSafe-3.0.2-cp312-cp312-win32.whl", hash = "sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30"},
{file = "MarkupSafe-3.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:8e06879fc22a25ca47312fbe7c8264eb0b662f6db27cb2d3bbbc74b1df4b9b87"},
{file = "MarkupSafe-3.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd"},
{file = "MarkupSafe-3.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430"},
{file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094"},
{file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396"},
{file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79"},
{file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a"},
{file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca"},
{file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c"},
{file = "MarkupSafe-3.0.2-cp313-cp313-win32.whl", hash = "sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1"},
{file = "MarkupSafe-3.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f"},
{file = "MarkupSafe-3.0.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c"},
{file = "MarkupSafe-3.0.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb"},
{file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c"},
{file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d"},
{file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe"},
{file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5"},
{file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a"},
{file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9"},
{file = "MarkupSafe-3.0.2-cp313-cp313t-win32.whl", hash = "sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6"},
{file = "MarkupSafe-3.0.2-cp313-cp313t-win_amd64.whl", hash = "sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f"},
{file = "MarkupSafe-3.0.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:eaa0a10b7f72326f1372a713e73c3f739b524b3af41feb43e4921cb529f5929a"},
{file = "MarkupSafe-3.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:48032821bbdf20f5799ff537c7ac3d1fba0ba032cfc06194faffa8cda8b560ff"},
{file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a9d3f5f0901fdec14d8d2f66ef7d035f2157240a433441719ac9a3fba440b13"},
{file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88b49a3b9ff31e19998750c38e030fc7bb937398b1f78cfa599aaef92d693144"},
{file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cfad01eed2c2e0c01fd0ecd2ef42c492f7f93902e39a42fc9ee1692961443a29"},
{file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:1225beacc926f536dc82e45f8a4d68502949dc67eea90eab715dea3a21c1b5f0"},
{file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:3169b1eefae027567d1ce6ee7cae382c57fe26e82775f460f0b2778beaad66c0"},
{file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:eb7972a85c54febfb25b5c4b4f3af4dcc731994c7da0d8a0b4a6eb0640e1d178"},
{file = "MarkupSafe-3.0.2-cp39-cp39-win32.whl", hash = "sha256:8c4e8c3ce11e1f92f6536ff07154f9d49677ebaaafc32db9db4620bc11ed480f"},
{file = "MarkupSafe-3.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:6e296a513ca3d94054c2c881cc913116e90fd030ad1c656b3869762b754f5f8a"},
{file = "markupsafe-3.0.2.tar.gz", hash = "sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0"},
]
[[package]]
name = "matplotlib-inline"
version = "0.1.7"
description = "Inline Matplotlib backend for Jupyter"
optional = false
python-versions = ">=3.8"
files = [
{file = "matplotlib_inline-0.1.7-py3-none-any.whl", hash = "sha256:df192d39a4ff8f21b1895d72e6a13f5fcc5099f00fa84384e0ea28c2cc0653ca"},
{file = "matplotlib_inline-0.1.7.tar.gz", hash = "sha256:8423b23ec666be3d16e16b60bdd8ac4e86e840ebd1dd11a30b9f117f2fa0ab90"},
]
[package.dependencies]
traitlets = "*"
[[package]]
name = "mistune"
version = "3.0.2"
description = "A sane and fast Markdown parser with useful plugins and renderers"
optional = false
python-versions = ">=3.7"
files = [
{file = "mistune-3.0.2-py3-none-any.whl", hash = "sha256:71481854c30fdbc938963d3605b72501f5c10a9320ecd412c121c163a1c7d205"},
{file = "mistune-3.0.2.tar.gz", hash = "sha256:fc7f93ded930c92394ef2cb6f04a8aabab4117a91449e72dcc8dfa646a508be8"},
]
[[package]]
name = "msgpack"
version = "1.1.0"
description = "MessagePack serializer"
optional = false
python-versions = ">=3.8"
files = [
{file = "msgpack-1.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7ad442d527a7e358a469faf43fda45aaf4ac3249c8310a82f0ccff9164e5dccd"},
{file = "msgpack-1.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:74bed8f63f8f14d75eec75cf3d04ad581da6b914001b474a5d3cd3372c8cc27d"},
{file = "msgpack-1.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:914571a2a5b4e7606997e169f64ce53a8b1e06f2cf2c3a7273aa106236d43dd5"},
{file = "msgpack-1.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c921af52214dcbb75e6bdf6a661b23c3e6417f00c603dd2070bccb5c3ef499f5"},
{file = "msgpack-1.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d8ce0b22b890be5d252de90d0e0d119f363012027cf256185fc3d474c44b1b9e"},
{file = "msgpack-1.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:73322a6cc57fcee3c0c57c4463d828e9428275fb85a27aa2aa1a92fdc42afd7b"},
{file = "msgpack-1.1.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:e1f3c3d21f7cf67bcf2da8e494d30a75e4cf60041d98b3f79875afb5b96f3a3f"},
{file = "msgpack-1.1.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:64fc9068d701233effd61b19efb1485587560b66fe57b3e50d29c5d78e7fef68"},
{file = "msgpack-1.1.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:42f754515e0f683f9c79210a5d1cad631ec3d06cea5172214d2176a42e67e19b"},
{file = "msgpack-1.1.0-cp310-cp310-win32.whl", hash = "sha256:3df7e6b05571b3814361e8464f9304c42d2196808e0119f55d0d3e62cd5ea044"},
{file = "msgpack-1.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:685ec345eefc757a7c8af44a3032734a739f8c45d1b0ac45efc5d8977aa4720f"},
{file = "msgpack-1.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:3d364a55082fb2a7416f6c63ae383fbd903adb5a6cf78c5b96cc6316dc1cedc7"},
{file = "msgpack-1.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:79ec007767b9b56860e0372085f8504db5d06bd6a327a335449508bbee9648fa"},
{file = "msgpack-1.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6ad622bf7756d5a497d5b6836e7fc3752e2dd6f4c648e24b1803f6048596f701"},
{file = "msgpack-1.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e59bca908d9ca0de3dc8684f21ebf9a690fe47b6be93236eb40b99af28b6ea6"},
{file = "msgpack-1.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e1da8f11a3dd397f0a32c76165cf0c4eb95b31013a94f6ecc0b280c05c91b59"},
{file = "msgpack-1.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:452aff037287acb1d70a804ffd022b21fa2bb7c46bee884dbc864cc9024128a0"},
{file = "msgpack-1.1.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8da4bf6d54ceed70e8861f833f83ce0814a2b72102e890cbdfe4b34764cdd66e"},
{file = "msgpack-1.1.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:41c991beebf175faf352fb940bf2af9ad1fb77fd25f38d9142053914947cdbf6"},
{file = "msgpack-1.1.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a52a1f3a5af7ba1c9ace055b659189f6c669cf3657095b50f9602af3a3ba0fe5"},
{file = "msgpack-1.1.0-cp311-cp311-win32.whl", hash = "sha256:58638690ebd0a06427c5fe1a227bb6b8b9fdc2bd07701bec13c2335c82131a88"},
{file = "msgpack-1.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:fd2906780f25c8ed5d7b323379f6138524ba793428db5d0e9d226d3fa6aa1788"},
{file = "msgpack-1.1.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:d46cf9e3705ea9485687aa4001a76e44748b609d260af21c4ceea7f2212a501d"},
{file = "msgpack-1.1.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5dbad74103df937e1325cc4bfeaf57713be0b4f15e1c2da43ccdd836393e2ea2"},
{file = "msgpack-1.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:58dfc47f8b102da61e8949708b3eafc3504509a5728f8b4ddef84bd9e16ad420"},
{file = "msgpack-1.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4676e5be1b472909b2ee6356ff425ebedf5142427842aa06b4dfd5117d1ca8a2"},
{file = "msgpack-1.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17fb65dd0bec285907f68b15734a993ad3fc94332b5bb21b0435846228de1f39"},
{file = "msgpack-1.1.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a51abd48c6d8ac89e0cfd4fe177c61481aca2d5e7ba42044fd218cfd8ea9899f"},
{file = "msgpack-1.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2137773500afa5494a61b1208619e3871f75f27b03bcfca7b3a7023284140247"},
{file = "msgpack-1.1.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:398b713459fea610861c8a7b62a6fec1882759f308ae0795b5413ff6a160cf3c"},
{file = "msgpack-1.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:06f5fd2f6bb2a7914922d935d3b8bb4a7fff3a9a91cfce6d06c13bc42bec975b"},
{file = "msgpack-1.1.0-cp312-cp312-win32.whl", hash = "sha256:ad33e8400e4ec17ba782f7b9cf868977d867ed784a1f5f2ab46e7ba53b6e1e1b"},
{file = "msgpack-1.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:115a7af8ee9e8cddc10f87636767857e7e3717b7a2e97379dc2054712693e90f"},
{file = "msgpack-1.1.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:071603e2f0771c45ad9bc65719291c568d4edf120b44eb36324dcb02a13bfddf"},
{file = "msgpack-1.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:0f92a83b84e7c0749e3f12821949d79485971f087604178026085f60ce109330"},
{file = "msgpack-1.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:4a1964df7b81285d00a84da4e70cb1383f2e665e0f1f2a7027e683956d04b734"},
{file = "msgpack-1.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:59caf6a4ed0d164055ccff8fe31eddc0ebc07cf7326a2aaa0dbf7a4001cd823e"},
{file = "msgpack-1.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0907e1a7119b337971a689153665764adc34e89175f9a34793307d9def08e6ca"},
{file = "msgpack-1.1.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:65553c9b6da8166e819a6aa90ad15288599b340f91d18f60b2061f402b9a4915"},
{file = "msgpack-1.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:7a946a8992941fea80ed4beae6bff74ffd7ee129a90b4dd5cf9c476a30e9708d"},
{file = "msgpack-1.1.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:4b51405e36e075193bc051315dbf29168d6141ae2500ba8cd80a522964e31434"},
{file = "msgpack-1.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:b4c01941fd2ff87c2a934ee6055bda4ed353a7846b8d4f341c428109e9fcde8c"},
{file = "msgpack-1.1.0-cp313-cp313-win32.whl", hash = "sha256:7c9a35ce2c2573bada929e0b7b3576de647b0defbd25f5139dcdaba0ae35a4cc"},
{file = "msgpack-1.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:bce7d9e614a04d0883af0b3d4d501171fbfca038f12c77fa838d9f198147a23f"},
{file = "msgpack-1.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c40ffa9a15d74e05ba1fe2681ea33b9caffd886675412612d93ab17b58ea2fec"},
{file = "msgpack-1.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f1ba6136e650898082d9d5a5217d5906d1e138024f836ff48691784bbe1adf96"},
{file = "msgpack-1.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e0856a2b7e8dcb874be44fea031d22e5b3a19121be92a1e098f46068a11b0870"},
{file = "msgpack-1.1.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:471e27a5787a2e3f974ba023f9e265a8c7cfd373632247deb225617e3100a3c7"},
{file = "msgpack-1.1.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:646afc8102935a388ffc3914b336d22d1c2d6209c773f3eb5dd4d6d3b6f8c1cb"},
{file = "msgpack-1.1.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:13599f8829cfbe0158f6456374e9eea9f44eee08076291771d8ae93eda56607f"},
{file = "msgpack-1.1.0-cp38-cp38-win32.whl", hash = "sha256:8a84efb768fb968381e525eeeb3d92857e4985aacc39f3c47ffd00eb4509315b"},
{file = "msgpack-1.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:879a7b7b0ad82481c52d3c7eb99bf6f0645dbdec5134a4bddbd16f3506947feb"},
{file = "msgpack-1.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:53258eeb7a80fc46f62fd59c876957a2d0e15e6449a9e71842b6d24419d88ca1"},
{file = "msgpack-1.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7e7b853bbc44fb03fbdba34feb4bd414322180135e2cb5164f20ce1c9795ee48"},
{file = "msgpack-1.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f3e9b4936df53b970513eac1758f3882c88658a220b58dcc1e39606dccaaf01c"},
{file = "msgpack-1.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:46c34e99110762a76e3911fc923222472c9d681f1094096ac4102c18319e6468"},
{file = "msgpack-1.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8a706d1e74dd3dea05cb54580d9bd8b2880e9264856ce5068027eed09680aa74"},
{file = "msgpack-1.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:534480ee5690ab3cbed89d4c8971a5c631b69a8c0883ecfea96c19118510c846"},
{file = "msgpack-1.1.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:8cf9e8c3a2153934a23ac160cc4cba0ec035f6867c8013cc6077a79823370346"},
{file = "msgpack-1.1.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:3180065ec2abbe13a4ad37688b61b99d7f9e012a535b930e0e683ad6bc30155b"},
{file = "msgpack-1.1.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:c5a91481a3cc573ac8c0d9aace09345d989dc4a0202b7fcb312c88c26d4e71a8"},
{file = "msgpack-1.1.0-cp39-cp39-win32.whl", hash = "sha256:f80bc7d47f76089633763f952e67f8214cb7b3ee6bfa489b3cb6a84cfac114cd"},
{file = "msgpack-1.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:4d1b7ff2d6146e16e8bd665ac726a89c74163ef8cd39fa8c1087d4e52d3a2325"},
{file = "msgpack-1.1.0.tar.gz", hash = "sha256:dd432ccc2c72b914e4cb77afce64aab761c1137cc698be3984eee260bcb2896e"},
]
[[package]]
name = "mypy"
version = "1.13.0"
description = "Optional static typing for Python"
optional = false
python-versions = ">=3.8"
files = [
{file = "mypy-1.13.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6607e0f1dd1fb7f0aca14d936d13fd19eba5e17e1cd2a14f808fa5f8f6d8f60a"},
{file = "mypy-1.13.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8a21be69bd26fa81b1f80a61ee7ab05b076c674d9b18fb56239d72e21d9f4c80"},
{file = "mypy-1.13.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7b2353a44d2179846a096e25691d54d59904559f4232519d420d64da6828a3a7"},
{file = "mypy-1.13.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0730d1c6a2739d4511dc4253f8274cdd140c55c32dfb0a4cf8b7a43f40abfa6f"},
{file = "mypy-1.13.0-cp310-cp310-win_amd64.whl", hash = "sha256:c5fc54dbb712ff5e5a0fca797e6e0aa25726c7e72c6a5850cfd2adbc1eb0a372"},
{file = "mypy-1.13.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:581665e6f3a8a9078f28d5502f4c334c0c8d802ef55ea0e7276a6e409bc0d82d"},
{file = "mypy-1.13.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3ddb5b9bf82e05cc9a627e84707b528e5c7caaa1c55c69e175abb15a761cec2d"},
{file = "mypy-1.13.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:20c7ee0bc0d5a9595c46f38beb04201f2620065a93755704e141fcac9f59db2b"},
{file = "mypy-1.13.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3790ded76f0b34bc9c8ba4def8f919dd6a46db0f5a6610fb994fe8efdd447f73"},
{file = "mypy-1.13.0-cp311-cp311-win_amd64.whl", hash = "sha256:51f869f4b6b538229c1d1bcc1dd7d119817206e2bc54e8e374b3dfa202defcca"},
{file = "mypy-1.13.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:5c7051a3461ae84dfb5dd15eff5094640c61c5f22257c8b766794e6dd85e72d5"},
{file = "mypy-1.13.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:39bb21c69a5d6342f4ce526e4584bc5c197fd20a60d14a8624d8743fffb9472e"},
{file = "mypy-1.13.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:164f28cb9d6367439031f4c81e84d3ccaa1e19232d9d05d37cb0bd880d3f93c2"},
{file = "mypy-1.13.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a4c1bfcdbce96ff5d96fc9b08e3831acb30dc44ab02671eca5953eadad07d6d0"},
{file = "mypy-1.13.0-cp312-cp312-win_amd64.whl", hash = "sha256:a0affb3a79a256b4183ba09811e3577c5163ed06685e4d4b46429a271ba174d2"},
{file = "mypy-1.13.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a7b44178c9760ce1a43f544e595d35ed61ac2c3de306599fa59b38a6048e1aa7"},
{file = "mypy-1.13.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5d5092efb8516d08440e36626f0153b5006d4088c1d663d88bf79625af3d1d62"},
{file = "mypy-1.13.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:de2904956dac40ced10931ac967ae63c5089bd498542194b436eb097a9f77bc8"},
{file = "mypy-1.13.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:7bfd8836970d33c2105562650656b6846149374dc8ed77d98424b40b09340ba7"},
{file = "mypy-1.13.0-cp313-cp313-win_amd64.whl", hash = "sha256:9f73dba9ec77acb86457a8fc04b5239822df0c14a082564737833d2963677dbc"},
{file = "mypy-1.13.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:100fac22ce82925f676a734af0db922ecfea991e1d7ec0ceb1e115ebe501301a"},
{file = "mypy-1.13.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7bcb0bb7f42a978bb323a7c88f1081d1b5dee77ca86f4100735a6f541299d8fb"},
{file = "mypy-1.13.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bde31fc887c213e223bbfc34328070996061b0833b0a4cfec53745ed61f3519b"},
{file = "mypy-1.13.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:07de989f89786f62b937851295ed62e51774722e5444a27cecca993fc3f9cd74"},
{file = "mypy-1.13.0-cp38-cp38-win_amd64.whl", hash = "sha256:4bde84334fbe19bad704b3f5b78c4abd35ff1026f8ba72b29de70dda0916beb6"},
{file = "mypy-1.13.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:0246bcb1b5de7f08f2826451abd947bf656945209b140d16ed317f65a17dc7dc"},
{file = "mypy-1.13.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7f5b7deae912cf8b77e990b9280f170381fdfbddf61b4ef80927edd813163732"},
{file = "mypy-1.13.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7029881ec6ffb8bc233a4fa364736789582c738217b133f1b55967115288a2bc"},
{file = "mypy-1.13.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:3e38b980e5681f28f033f3be86b099a247b13c491f14bb8b1e1e134d23bb599d"},
{file = "mypy-1.13.0-cp39-cp39-win_amd64.whl", hash = "sha256:a6789be98a2017c912ae6ccb77ea553bbaf13d27605d2ca20a76dfbced631b24"},
{file = "mypy-1.13.0-py3-none-any.whl", hash = "sha256:9c250883f9fd81d212e0952c92dbfcc96fc237f4b7c92f56ac81fd48460b3e5a"},
{file = "mypy-1.13.0.tar.gz", hash = "sha256:0291a61b6fbf3e6673e3405cfcc0e7650bebc7939659fdca2702958038bd835e"},
]
[package.dependencies]
mypy-extensions = ">=1.0.0"
tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""}
typing-extensions = ">=4.6.0"
[package.extras]
dmypy = ["psutil (>=4.0)"]
faster-cache = ["orjson"]
install-types = ["pip"]
mypyc = ["setuptools (>=50)"]
reports = ["lxml"]
[[package]]
name = "mypy-extensions"
version = "1.0.0"
description = "Type system extensions for programs checked with the mypy type checker."
optional = false
python-versions = ">=3.5"
files = [
{file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"},
{file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"},
]
[[package]]
name = "nbclient"
version = "0.10.0"
description = "A client library for executing notebooks. Formerly nbconvert's ExecutePreprocessor."
optional = false
python-versions = ">=3.8.0"
files = [
{file = "nbclient-0.10.0-py3-none-any.whl", hash = "sha256:f13e3529332a1f1f81d82a53210322476a168bb7090a0289c795fe9cc11c9d3f"},
{file = "nbclient-0.10.0.tar.gz", hash = "sha256:4b3f1b7dba531e498449c4db4f53da339c91d449dc11e9af3a43b4eb5c5abb09"},
]
[package.dependencies]
jupyter-client = ">=6.1.12"
jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0"
nbformat = ">=5.1"
traitlets = ">=5.4"
[package.extras]
dev = ["pre-commit"]
docs = ["autodoc-traits", "mock", "moto", "myst-parser", "nbclient[test]", "sphinx (>=1.7)", "sphinx-book-theme", "sphinxcontrib-spelling"]
test = ["flaky", "ipykernel (>=6.19.3)", "ipython", "ipywidgets", "nbconvert (>=7.0.0)", "pytest (>=7.0,<8)", "pytest-asyncio", "pytest-cov (>=4.0)", "testpath", "xmltodict"]
[[package]]
name = "nbconvert"
version = "7.16.4"
description = "Converting Jupyter Notebooks (.ipynb files) to other formats. Output formats include asciidoc, html, latex, markdown, pdf, py, rst, script. nbconvert can be used both as a Python library (`import nbconvert`) or as a command line tool (invoked as `jupyter nbconvert ...`)."
optional = false
python-versions = ">=3.8"
files = [
{file = "nbconvert-7.16.4-py3-none-any.whl", hash = "sha256:05873c620fe520b6322bf8a5ad562692343fe3452abda5765c7a34b7d1aa3eb3"},
{file = "nbconvert-7.16.4.tar.gz", hash = "sha256:86ca91ba266b0a448dc96fa6c5b9d98affabde2867b363258703536807f9f7f4"},
]
[package.dependencies]
beautifulsoup4 = "*"
bleach = "!=5.0.0"
defusedxml = "*"
importlib-metadata = {version = ">=3.6", markers = "python_version < \"3.10\""}
jinja2 = ">=3.0"
jupyter-core = ">=4.7"
jupyterlab-pygments = "*"
markupsafe = ">=2.0"
mistune = ">=2.0.3,<4"
nbclient = ">=0.5.0"
nbformat = ">=5.7"
packaging = "*"
pandocfilters = ">=1.4.1"
pygments = ">=2.4.1"
tinycss2 = "*"
traitlets = ">=5.1"
[package.extras]
all = ["flaky", "ipykernel", "ipython", "ipywidgets (>=7.5)", "myst-parser", "nbsphinx (>=0.2.12)", "playwright", "pydata-sphinx-theme", "pyqtwebengine (>=5.15)", "pytest (>=7)", "sphinx (==5.0.2)", "sphinxcontrib-spelling", "tornado (>=6.1)"]
docs = ["ipykernel", "ipython", "myst-parser", "nbsphinx (>=0.2.12)", "pydata-sphinx-theme", "sphinx (==5.0.2)", "sphinxcontrib-spelling"]
qtpdf = ["pyqtwebengine (>=5.15)"]
qtpng = ["pyqtwebengine (>=5.15)"]
serve = ["tornado (>=6.1)"]
test = ["flaky", "ipykernel", "ipywidgets (>=7.5)", "pytest (>=7)"]
webpdf = ["playwright"]
[[package]]
name = "nbformat"
version = "5.10.4"
description = "The Jupyter Notebook format"
optional = false
python-versions = ">=3.8"
files = [
{file = "nbformat-5.10.4-py3-none-any.whl", hash = "sha256:3b48d6c8fbca4b299bf3982ea7db1af21580e4fec269ad087b9e81588891200b"},
{file = "nbformat-5.10.4.tar.gz", hash = "sha256:322168b14f937a5d11362988ecac2a4952d3d8e3a2cbeb2319584631226d5b3a"},
]
[package.dependencies]
fastjsonschema = ">=2.15"
jsonschema = ">=2.6"
jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0"
traitlets = ">=5.1"
[package.extras]
docs = ["myst-parser", "pydata-sphinx-theme", "sphinx", "sphinxcontrib-github-alt", "sphinxcontrib-spelling"]
test = ["pep440", "pre-commit", "pytest", "testpath"]
[[package]]
name = "nest-asyncio"
version = "1.6.0"
description = "Patch asyncio to allow nested event loops"
optional = false
python-versions = ">=3.5"
files = [
{file = "nest_asyncio-1.6.0-py3-none-any.whl", hash = "sha256:87af6efd6b5e897c81050477ef65c62e2b2f35d51703cae01aff2905b1852e1c"},
{file = "nest_asyncio-1.6.0.tar.gz", hash = "sha256:6f172d5449aca15afd6c646851f4e31e02c598d553a667e38cafa997cfec55fe"},
]
[[package]]
name = "notebook"
version = "7.2.2"
description = "Jupyter Notebook - A web-based notebook environment for interactive computing"
optional = false
python-versions = ">=3.8"
files = [
{file = "notebook-7.2.2-py3-none-any.whl", hash = "sha256:c89264081f671bc02eec0ed470a627ed791b9156cad9285226b31611d3e9fe1c"},
{file = "notebook-7.2.2.tar.gz", hash = "sha256:2ef07d4220421623ad3fe88118d687bc0450055570cdd160814a59cf3a1c516e"},
]
[package.dependencies]
jupyter-server = ">=2.4.0,<3"
jupyterlab = ">=4.2.0,<4.3"
jupyterlab-server = ">=2.27.1,<3"
notebook-shim = ">=0.2,<0.3"
tornado = ">=6.2.0"
[package.extras]
dev = ["hatch", "pre-commit"]
docs = ["myst-parser", "nbsphinx", "pydata-sphinx-theme", "sphinx (>=1.3.6)", "sphinxcontrib-github-alt", "sphinxcontrib-spelling"]
test = ["importlib-resources (>=5.0)", "ipykernel", "jupyter-server[test] (>=2.4.0,<3)", "jupyterlab-server[test] (>=2.27.1,<3)", "nbval", "pytest (>=7.0)", "pytest-console-scripts", "pytest-timeout", "pytest-tornasync", "requests"]
[[package]]
name = "notebook-shim"
version = "0.2.4"
description = "A shim layer for notebook traits and config"
optional = false
python-versions = ">=3.7"
files = [
{file = "notebook_shim-0.2.4-py3-none-any.whl", hash = "sha256:411a5be4e9dc882a074ccbcae671eda64cceb068767e9a3419096986560e1cef"},
{file = "notebook_shim-0.2.4.tar.gz", hash = "sha256:b4b2cfa1b65d98307ca24361f5b30fe785b53c3fd07b7a47e89acb5e6ac638cb"},
]
[package.dependencies]
jupyter-server = ">=1.8,<3"
[package.extras]
test = ["pytest", "pytest-console-scripts", "pytest-jupyter", "pytest-tornasync"]
[[package]]
name = "orjson"
version = "3.10.10"
description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy"
optional = false
python-versions = ">=3.8"
files = [
{file = "orjson-3.10.10-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:b788a579b113acf1c57e0a68e558be71d5d09aa67f62ca1f68e01117e550a998"},
{file = "orjson-3.10.10-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:804b18e2b88022c8905bb79bd2cbe59c0cd014b9328f43da8d3b28441995cda4"},
{file = "orjson-3.10.10-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9972572a1d042ec9ee421b6da69f7cc823da5962237563fa548ab17f152f0b9b"},
{file = "orjson-3.10.10-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dc6993ab1c2ae7dd0711161e303f1db69062955ac2668181bfdf2dd410e65258"},
{file = "orjson-3.10.10-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d78e4cacced5781b01d9bc0f0cd8b70b906a0e109825cb41c1b03f9c41e4ce86"},
{file = "orjson-3.10.10-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e6eb2598df518281ba0cbc30d24c5b06124ccf7e19169e883c14e0831217a0bc"},
{file = "orjson-3.10.10-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:23776265c5215ec532de6238a52707048401a568f0fa0d938008e92a147fe2c7"},
{file = "orjson-3.10.10-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:8cc2a654c08755cef90b468ff17c102e2def0edd62898b2486767204a7f5cc9c"},
{file = "orjson-3.10.10-cp310-none-win32.whl", hash = "sha256:081b3fc6a86d72efeb67c13d0ea7c030017bd95f9868b1e329a376edc456153b"},
{file = "orjson-3.10.10-cp310-none-win_amd64.whl", hash = "sha256:ff38c5fb749347768a603be1fb8a31856458af839f31f064c5aa74aca5be9efe"},
{file = "orjson-3.10.10-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:879e99486c0fbb256266c7c6a67ff84f46035e4f8749ac6317cc83dacd7f993a"},
{file = "orjson-3.10.10-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:019481fa9ea5ff13b5d5d95e6fd5ab25ded0810c80b150c2c7b1cc8660b662a7"},
{file = "orjson-3.10.10-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0dd57eff09894938b4c86d4b871a479260f9e156fa7f12f8cad4b39ea8028bb5"},
{file = "orjson-3.10.10-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dbde6d70cd95ab4d11ea8ac5e738e30764e510fc54d777336eec09bb93b8576c"},
{file = "orjson-3.10.10-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3b2625cb37b8fb42e2147404e5ff7ef08712099197a9cd38895006d7053e69d6"},
{file = "orjson-3.10.10-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dbf3c20c6a7db69df58672a0d5815647ecf78c8e62a4d9bd284e8621c1fe5ccb"},
{file = "orjson-3.10.10-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:75c38f5647e02d423807d252ce4528bf6a95bd776af999cb1fb48867ed01d1f6"},
{file = "orjson-3.10.10-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:23458d31fa50ec18e0ec4b0b4343730928296b11111df5f547c75913714116b2"},
{file = "orjson-3.10.10-cp311-none-win32.whl", hash = "sha256:2787cd9dedc591c989f3facd7e3e86508eafdc9536a26ec277699c0aa63c685b"},
{file = "orjson-3.10.10-cp311-none-win_amd64.whl", hash = "sha256:6514449d2c202a75183f807bc755167713297c69f1db57a89a1ef4a0170ee269"},
{file = "orjson-3.10.10-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:8564f48f3620861f5ef1e080ce7cd122ee89d7d6dacf25fcae675ff63b4d6e05"},
{file = "orjson-3.10.10-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5bf161a32b479034098c5b81f2608f09167ad2fa1c06abd4e527ea6bf4837a9"},
{file = "orjson-3.10.10-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:68b65c93617bcafa7f04b74ae8bc2cc214bd5cb45168a953256ff83015c6747d"},
{file = "orjson-3.10.10-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e8e28406f97fc2ea0c6150f4c1b6e8261453318930b334abc419214c82314f85"},
{file = "orjson-3.10.10-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e4d0d9fe174cc7a5bdce2e6c378bcdb4c49b2bf522a8f996aa586020e1b96cee"},
{file = "orjson-3.10.10-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b3be81c42f1242cbed03cbb3973501fcaa2675a0af638f8be494eaf37143d999"},
{file = "orjson-3.10.10-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:65f9886d3bae65be026219c0a5f32dbbe91a9e6272f56d092ab22561ad0ea33b"},
{file = "orjson-3.10.10-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:730ed5350147db7beb23ddaf072f490329e90a1d059711d364b49fe352ec987b"},
{file = "orjson-3.10.10-cp312-none-win32.whl", hash = "sha256:a8f4bf5f1c85bea2170800020d53a8877812892697f9c2de73d576c9307a8a5f"},
{file = "orjson-3.10.10-cp312-none-win_amd64.whl", hash = "sha256:384cd13579a1b4cd689d218e329f459eb9ddc504fa48c5a83ef4889db7fd7a4f"},
{file = "orjson-3.10.10-cp313-cp313-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:44bffae68c291f94ff5a9b4149fe9d1bdd4cd0ff0fb575bcea8351d48db629a1"},
{file = "orjson-3.10.10-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e27b4c6437315df3024f0835887127dac2a0a3ff643500ec27088d2588fa5ae1"},
{file = "orjson-3.10.10-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bca84df16d6b49325a4084fd8b2fe2229cb415e15c46c529f868c3387bb1339d"},
{file = "orjson-3.10.10-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c14ce70e8f39bd71f9f80423801b5d10bf93d1dceffdecd04df0f64d2c69bc01"},
{file = "orjson-3.10.10-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:24ac62336da9bda1bd93c0491eff0613003b48d3cb5d01470842e7b52a40d5b4"},
{file = "orjson-3.10.10-cp313-none-win32.whl", hash = "sha256:eb0a42831372ec2b05acc9ee45af77bcaccbd91257345f93780a8e654efc75db"},
{file = "orjson-3.10.10-cp313-none-win_amd64.whl", hash = "sha256:f0c4f37f8bf3f1075c6cc8dd8a9f843689a4b618628f8812d0a71e6968b95ffd"},
{file = "orjson-3.10.10-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:829700cc18503efc0cf502d630f612884258020d98a317679cd2054af0259568"},
{file = "orjson-3.10.10-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e0ceb5e0e8c4f010ac787d29ae6299846935044686509e2f0f06ed441c1ca949"},
{file = "orjson-3.10.10-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0c25908eb86968613216f3db4d3003f1c45d78eb9046b71056ca327ff92bdbd4"},
{file = "orjson-3.10.10-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:218cb0bc03340144b6328a9ff78f0932e642199ac184dd74b01ad691f42f93ff"},
{file = "orjson-3.10.10-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e2277ec2cea3775640dc81ab5195bb5b2ada2fe0ea6eee4677474edc75ea6785"},
{file = "orjson-3.10.10-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:848ea3b55ab5ccc9d7bbd420d69432628b691fba3ca8ae3148c35156cbd282aa"},
{file = "orjson-3.10.10-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:e3e67b537ac0c835b25b5f7d40d83816abd2d3f4c0b0866ee981a045287a54f3"},
{file = "orjson-3.10.10-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:7948cfb909353fce2135dcdbe4521a5e7e1159484e0bb024c1722f272488f2b8"},
{file = "orjson-3.10.10-cp38-none-win32.whl", hash = "sha256:78bee66a988f1a333dc0b6257503d63553b1957889c17b2c4ed72385cd1b96ae"},
{file = "orjson-3.10.10-cp38-none-win_amd64.whl", hash = "sha256:f1d647ca8d62afeb774340a343c7fc023efacfd3a39f70c798991063f0c681dd"},
{file = "orjson-3.10.10-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:5a059afddbaa6dd733b5a2d76a90dbc8af790b993b1b5cb97a1176ca713b5df8"},
{file = "orjson-3.10.10-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6f9b5c59f7e2a1a410f971c5ebc68f1995822837cd10905ee255f96074537ee6"},
{file = "orjson-3.10.10-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d5ef198bafdef4aa9d49a4165ba53ffdc0a9e1c7b6f76178572ab33118afea25"},
{file = "orjson-3.10.10-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aaf29ce0bb5d3320824ec3d1508652421000ba466abd63bdd52c64bcce9eb1fa"},
{file = "orjson-3.10.10-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dddd5516bcc93e723d029c1633ae79c4417477b4f57dad9bfeeb6bc0315e654a"},
{file = "orjson-3.10.10-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a12f2003695b10817f0fa8b8fca982ed7f5761dcb0d93cff4f2f9f6709903fd7"},
{file = "orjson-3.10.10-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:672f9874a8a8fb9bb1b771331d31ba27f57702c8106cdbadad8bda5d10bc1019"},
{file = "orjson-3.10.10-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1dcbb0ca5fafb2b378b2c74419480ab2486326974826bbf6588f4dc62137570a"},
{file = "orjson-3.10.10-cp39-none-win32.whl", hash = "sha256:d9bbd3a4b92256875cb058c3381b782649b9a3c68a4aa9a2fff020c2f9cfc1be"},
{file = "orjson-3.10.10-cp39-none-win_amd64.whl", hash = "sha256:766f21487a53aee8524b97ca9582d5c6541b03ab6210fbaf10142ae2f3ced2aa"},
{file = "orjson-3.10.10.tar.gz", hash = "sha256:37949383c4df7b4337ce82ee35b6d7471e55195efa7dcb45ab8226ceadb0fe3b"},
]
[[package]]
name = "overrides"
version = "7.7.0"
description = "A decorator to automatically detect mismatch when overriding a method."
optional = false
python-versions = ">=3.6"
files = [
{file = "overrides-7.7.0-py3-none-any.whl", hash = "sha256:c7ed9d062f78b8e4c1a7b70bd8796b35ead4d9f510227ef9c5dc7626c60d7e49"},
{file = "overrides-7.7.0.tar.gz", hash = "sha256:55158fa3d93b98cc75299b1e67078ad9003ca27945c76162c1c0766d6f91820a"},
]
[[package]]
name = "packaging"
version = "24.1"
description = "Core utilities for Python packages"
optional = false
python-versions = ">=3.8"
files = [
{file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"},
{file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"},
]
[[package]]
name = "pandocfilters"
version = "1.5.1"
description = "Utilities for writing pandoc filters in python"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
files = [
{file = "pandocfilters-1.5.1-py2.py3-none-any.whl", hash = "sha256:93be382804a9cdb0a7267585f157e5d1731bbe5545a85b268d6f5fe6232de2bc"},
{file = "pandocfilters-1.5.1.tar.gz", hash = "sha256:002b4a555ee4ebc03f8b66307e287fa492e4a77b4ea14d3f934328297bb4939e"},
]
[[package]]
name = "parso"
version = "0.8.4"
description = "A Python Parser"
optional = false
python-versions = ">=3.6"
files = [
{file = "parso-0.8.4-py2.py3-none-any.whl", hash = "sha256:a418670a20291dacd2dddc80c377c5c3791378ee1e8d12bffc35420643d43f18"},
{file = "parso-0.8.4.tar.gz", hash = "sha256:eb3a7b58240fb99099a345571deecc0f9540ea5f4dd2fe14c2a99d6b281ab92d"},
]
[package.extras]
qa = ["flake8 (==5.0.4)", "mypy (==0.971)", "types-setuptools (==67.2.0.1)"]
testing = ["docopt", "pytest"]
[[package]]
name = "pexpect"
version = "4.9.0"
description = "Pexpect allows easy control of interactive console applications."
optional = false
python-versions = "*"
files = [
{file = "pexpect-4.9.0-py2.py3-none-any.whl", hash = "sha256:7236d1e080e4936be2dc3e326cec0af72acf9212a7e1d060210e70a47e253523"},
{file = "pexpect-4.9.0.tar.gz", hash = "sha256:ee7d41123f3c9911050ea2c2dac107568dc43b2d3b0c7557a33212c398ead30f"},
]
[package.dependencies]
ptyprocess = ">=0.5"
[[package]]
name = "platformdirs"
version = "4.3.6"
description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`."
optional = false
python-versions = ">=3.8"
files = [
{file = "platformdirs-4.3.6-py3-none-any.whl", hash = "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb"},
{file = "platformdirs-4.3.6.tar.gz", hash = "sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907"},
]
[package.extras]
docs = ["furo (>=2024.8.6)", "proselint (>=0.14)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4)"]
test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=8.3.2)", "pytest-cov (>=5)", "pytest-mock (>=3.14)"]
type = ["mypy (>=1.11.2)"]
[[package]]
name = "pluggy"
version = "1.5.0"
description = "plugin and hook calling mechanisms for python"
optional = false
python-versions = ">=3.8"
files = [
{file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"},
{file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"},
]
[package.extras]
dev = ["pre-commit", "tox"]
testing = ["pytest", "pytest-benchmark"]
[[package]]
name = "prometheus-client"
version = "0.21.0"
description = "Python client for the Prometheus monitoring system."
optional = false
python-versions = ">=3.8"
files = [
{file = "prometheus_client-0.21.0-py3-none-any.whl", hash = "sha256:4fa6b4dd0ac16d58bb587c04b1caae65b8c5043e85f778f42f5f632f6af2e166"},
{file = "prometheus_client-0.21.0.tar.gz", hash = "sha256:96c83c606b71ff2b0a433c98889d275f51ffec6c5e267de37c7a2b5c9aa9233e"},
]
[package.extras]
twisted = ["twisted"]
[[package]]
name = "prompt-toolkit"
version = "3.0.48"
description = "Library for building powerful interactive command lines in Python"
optional = false
python-versions = ">=3.7.0"
files = [
{file = "prompt_toolkit-3.0.48-py3-none-any.whl", hash = "sha256:f49a827f90062e411f1ce1f854f2aedb3c23353244f8108b89283587397ac10e"},
{file = "prompt_toolkit-3.0.48.tar.gz", hash = "sha256:d6623ab0477a80df74e646bdbc93621143f5caf104206aa29294d53de1a03d90"},
]
[package.dependencies]
wcwidth = "*"
[[package]]
name = "psutil"
version = "6.1.0"
description = "Cross-platform lib for process and system monitoring in Python."
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7"
files = [
{file = "psutil-6.1.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:ff34df86226c0227c52f38b919213157588a678d049688eded74c76c8ba4a5d0"},
{file = "psutil-6.1.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:c0e0c00aa18ca2d3b2b991643b799a15fc8f0563d2ebb6040f64ce8dc027b942"},
{file = "psutil-6.1.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:000d1d1ebd634b4efb383f4034437384e44a6d455260aaee2eca1e9c1b55f047"},
{file = "psutil-6.1.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:5cd2bcdc75b452ba2e10f0e8ecc0b57b827dd5d7aaffbc6821b2a9a242823a76"},
{file = "psutil-6.1.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:045f00a43c737f960d273a83973b2511430d61f283a44c96bf13a6e829ba8fdc"},
{file = "psutil-6.1.0-cp27-none-win32.whl", hash = "sha256:9118f27452b70bb1d9ab3198c1f626c2499384935aaf55388211ad982611407e"},
{file = "psutil-6.1.0-cp27-none-win_amd64.whl", hash = "sha256:a8506f6119cff7015678e2bce904a4da21025cc70ad283a53b099e7620061d85"},
{file = "psutil-6.1.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:6e2dcd475ce8b80522e51d923d10c7871e45f20918e027ab682f94f1c6351688"},
{file = "psutil-6.1.0-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:0895b8414afafc526712c498bd9de2b063deaac4021a3b3c34566283464aff8e"},
{file = "psutil-6.1.0-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9dcbfce5d89f1d1f2546a2090f4fcf87c7f669d1d90aacb7d7582addece9fb38"},
{file = "psutil-6.1.0-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:498c6979f9c6637ebc3a73b3f87f9eb1ec24e1ce53a7c5173b8508981614a90b"},
{file = "psutil-6.1.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d905186d647b16755a800e7263d43df08b790d709d575105d419f8b6ef65423a"},
{file = "psutil-6.1.0-cp36-cp36m-win32.whl", hash = "sha256:6d3fbbc8d23fcdcb500d2c9f94e07b1342df8ed71b948a2649b5cb060a7c94ca"},
{file = "psutil-6.1.0-cp36-cp36m-win_amd64.whl", hash = "sha256:1209036fbd0421afde505a4879dee3b2fd7b1e14fee81c0069807adcbbcca747"},
{file = "psutil-6.1.0-cp37-abi3-win32.whl", hash = "sha256:1ad45a1f5d0b608253b11508f80940985d1d0c8f6111b5cb637533a0e6ddc13e"},
{file = "psutil-6.1.0-cp37-abi3-win_amd64.whl", hash = "sha256:a8fb3752b491d246034fa4d279ff076501588ce8cbcdbb62c32fd7a377d996be"},
{file = "psutil-6.1.0.tar.gz", hash = "sha256:353815f59a7f64cdaca1c0307ee13558a0512f6db064e92fe833784f08539c7a"},
]
[package.extras]
dev = ["black", "check-manifest", "coverage", "packaging", "pylint", "pyperf", "pypinfo", "pytest-cov", "requests", "rstcheck", "ruff", "sphinx", "sphinx_rtd_theme", "toml-sort", "twine", "virtualenv", "wheel"]
test = ["pytest", "pytest-xdist", "setuptools"]
[[package]]
name = "psycopg"
version = "3.2.3"
description = "PostgreSQL database adapter for Python"
optional = false
python-versions = ">=3.8"
files = [
{file = "psycopg-3.2.3-py3-none-any.whl", hash = "sha256:644d3973fe26908c73d4be746074f6e5224b03c1101d302d9a53bf565ad64907"},
{file = "psycopg-3.2.3.tar.gz", hash = "sha256:a5764f67c27bec8bfac85764d23c534af2c27b893550377e37ce59c12aac47a2"},
]
[package.dependencies]
psycopg-binary = {version = "3.2.3", optional = true, markers = "implementation_name != \"pypy\" and extra == \"binary\""}
typing-extensions = {version = ">=4.6", markers = "python_version < \"3.13\""}
tzdata = {version = "*", markers = "sys_platform == \"win32\""}
[package.extras]
binary = ["psycopg-binary (==3.2.3)"]
c = ["psycopg-c (==3.2.3)"]
dev = ["ast-comments (>=1.1.2)", "black (>=24.1.0)", "codespell (>=2.2)", "dnspython (>=2.1)", "flake8 (>=4.0)", "mypy (>=1.11)", "types-setuptools (>=57.4)", "wheel (>=0.37)"]
docs = ["Sphinx (>=5.0)", "furo (==2022.6.21)", "sphinx-autobuild (>=2021.3.14)", "sphinx-autodoc-typehints (>=1.12)"]
pool = ["psycopg-pool"]
test = ["anyio (>=4.0)", "mypy (>=1.11)", "pproxy (>=2.7)", "pytest (>=6.2.5)", "pytest-cov (>=3.0)", "pytest-randomly (>=3.5)"]
[[package]]
name = "psycopg-binary"
version = "3.2.3"
description = "PostgreSQL database adapter for Python -- C optimisation distribution"
optional = false
python-versions = ">=3.8"
files = [
{file = "psycopg_binary-3.2.3-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:965455eac8547f32b3181d5ec9ad8b9be500c10fe06193543efaaebe3e4ce70c"},
{file = "psycopg_binary-3.2.3-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:71adcc8bc80a65b776510bc39992edf942ace35b153ed7a9c6c573a6849ce308"},
{file = "psycopg_binary-3.2.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f73adc05452fb85e7a12ed3f69c81540a8875960739082e6ea5e28c373a30774"},
{file = "psycopg_binary-3.2.3-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e8630943143c6d6ca9aefc88bbe5e76c90553f4e1a3b2dc339e67dc34aa86f7e"},
{file = "psycopg_binary-3.2.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3bffb61e198a91f712cc3d7f2d176a697cb05b284b2ad150fb8edb308eba9002"},
{file = "psycopg_binary-3.2.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dc4fa2240c9fceddaa815a58f29212826fafe43ce80ff666d38c4a03fb036955"},
{file = "psycopg_binary-3.2.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:192a5f8496e6e1243fdd9ac20e117e667c0712f148c5f9343483b84435854c78"},
{file = "psycopg_binary-3.2.3-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:64dc6e9ec64f592f19dc01a784e87267a64a743d34f68488924251253da3c818"},
{file = "psycopg_binary-3.2.3-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:79498df398970abcee3d326edd1d4655de7d77aa9aecd578154f8af35ce7bbd2"},
{file = "psycopg_binary-3.2.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:949551752930d5e478817e0b49956350d866b26578ced0042a61967e3fcccdea"},
{file = "psycopg_binary-3.2.3-cp310-cp310-win_amd64.whl", hash = "sha256:80a2337e2dfb26950894c8301358961430a0304f7bfe729d34cc036474e9c9b1"},
{file = "psycopg_binary-3.2.3-cp311-cp311-macosx_12_0_x86_64.whl", hash = "sha256:6d8f2144e0d5808c2e2aed40fbebe13869cd00c2ae745aca4b3b16a435edb056"},
{file = "psycopg_binary-3.2.3-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:94253be2b57ef2fea7ffe08996067aabf56a1eb9648342c9e3bad9e10c46e045"},
{file = "psycopg_binary-3.2.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fda0162b0dbfa5eaed6cdc708179fa27e148cb8490c7d62e5cf30713909658ea"},
{file = "psycopg_binary-3.2.3-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2c0419cdad8c70eaeb3116bb28e7b42d546f91baf5179d7556f230d40942dc78"},
{file = "psycopg_binary-3.2.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:74fbf5dd3ef09beafd3557631e282f00f8af4e7a78fbfce8ab06d9cd5a789aae"},
{file = "psycopg_binary-3.2.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7d784f614e4d53050cbe8abf2ae9d1aaacf8ed31ce57b42ce3bf2a48a66c3a5c"},
{file = "psycopg_binary-3.2.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:4e76ce2475ed4885fe13b8254058be710ec0de74ebd8ef8224cf44a9a3358e5f"},
{file = "psycopg_binary-3.2.3-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:5938b257b04c851c2d1e6cb2f8c18318f06017f35be9a5fe761ee1e2e344dfb7"},
{file = "psycopg_binary-3.2.3-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:257c4aea6f70a9aef39b2a77d0658a41bf05c243e2bf41895eb02220ac6306f3"},
{file = "psycopg_binary-3.2.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:06b5cc915e57621eebf2393f4173793ed7e3387295f07fed93ed3fb6a6ccf585"},
{file = "psycopg_binary-3.2.3-cp311-cp311-win_amd64.whl", hash = "sha256:09baa041856b35598d335b1a74e19a49da8500acedf78164600694c0ba8ce21b"},
{file = "psycopg_binary-3.2.3-cp312-cp312-macosx_12_0_x86_64.whl", hash = "sha256:48f8ca6ee8939bab760225b2ab82934d54330eec10afe4394a92d3f2a0c37dd6"},
{file = "psycopg_binary-3.2.3-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:5361ea13c241d4f0ec3f95e0bf976c15e2e451e9cc7ef2e5ccfc9d170b197a40"},
{file = "psycopg_binary-3.2.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb987f14af7da7c24f803111dbc7392f5070fd350146af3345103f76ea82e339"},
{file = "psycopg_binary-3.2.3-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0463a11b1cace5a6aeffaf167920707b912b8986a9c7920341c75e3686277920"},
{file = "psycopg_binary-3.2.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8b7be9a6c06518967b641fb15032b1ed682fd3b0443f64078899c61034a0bca6"},
{file = "psycopg_binary-3.2.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64a607e630d9f4b2797f641884e52b9f8e239d35943f51bef817a384ec1678fe"},
{file = "psycopg_binary-3.2.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:fa33ead69ed133210d96af0c63448b1385df48b9c0247eda735c5896b9e6dbbf"},
{file = "psycopg_binary-3.2.3-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:1f8b0d0e99d8e19923e6e07379fa00570be5182c201a8c0b5aaa9a4d4a4ea20b"},
{file = "psycopg_binary-3.2.3-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:709447bd7203b0b2debab1acec23123eb80b386f6c29e7604a5d4326a11e5bd6"},
{file = "psycopg_binary-3.2.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:5e37d5027e297a627da3551a1e962316d0f88ee4ada74c768f6c9234e26346d9"},
{file = "psycopg_binary-3.2.3-cp312-cp312-win_amd64.whl", hash = "sha256:261f0031ee6074765096a19b27ed0f75498a8338c3dcd7f4f0d831e38adf12d1"},
{file = "psycopg_binary-3.2.3-cp313-cp313-macosx_12_0_x86_64.whl", hash = "sha256:41fdec0182efac66b27478ac15ef54c9ebcecf0e26ed467eb7d6f262a913318b"},
{file = "psycopg_binary-3.2.3-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:07d019a786eb020c0f984691aa1b994cb79430061065a694cf6f94056c603d26"},
{file = "psycopg_binary-3.2.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4c57615791a337378fe5381143259a6c432cdcbb1d3e6428bfb7ce59fff3fb5c"},
{file = "psycopg_binary-3.2.3-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e8eb9a4e394926b93ad919cad1b0a918e9b4c846609e8c1cfb6b743683f64da0"},
{file = "psycopg_binary-3.2.3-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5905729668ef1418bd36fbe876322dcb0f90b46811bba96d505af89e6fbdce2f"},
{file = "psycopg_binary-3.2.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd65774ed7d65101b314808b6893e1a75b7664f680c3ef18d2e5c84d570fa393"},
{file = "psycopg_binary-3.2.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:700679c02f9348a0d0a2adcd33a0275717cd0d0aee9d4482b47d935023629505"},
{file = "psycopg_binary-3.2.3-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:96334bb64d054e36fed346c50c4190bad9d7c586376204f50bede21a913bf942"},
{file = "psycopg_binary-3.2.3-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:9099e443d4cc24ac6872e6a05f93205ba1a231b1a8917317b07c9ef2b955f1f4"},
{file = "psycopg_binary-3.2.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:1985ab05e9abebfbdf3163a16ebb37fbc5d49aff2bf5b3d7375ff0920bbb54cd"},
{file = "psycopg_binary-3.2.3-cp313-cp313-win_amd64.whl", hash = "sha256:e90352d7b610b4693fad0feea48549d4315d10f1eba5605421c92bb834e90170"},
{file = "psycopg_binary-3.2.3-cp38-cp38-macosx_12_0_x86_64.whl", hash = "sha256:69320f05de8cdf4077ecd7fefdec223890eea232af0d58f2530cbda2871244a0"},
{file = "psycopg_binary-3.2.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4926ea5c46da30bec4a85907aa3f7e4ea6313145b2aa9469fdb861798daf1502"},
{file = "psycopg_binary-3.2.3-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c64c4cd0d50d5b2288ab1bcb26c7126c772bbdebdfadcd77225a77df01c4a57e"},
{file = "psycopg_binary-3.2.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:05a1bdce30356e70a05428928717765f4a9229999421013f41338d9680d03a63"},
{file = "psycopg_binary-3.2.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ad357e426b0ea5c3043b8ec905546fa44b734bf11d33b3da3959f6e4447d350"},
{file = "psycopg_binary-3.2.3-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:967b47a0fd237aa17c2748fdb7425015c394a6fb57cdad1562e46a6eb070f96d"},
{file = "psycopg_binary-3.2.3-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:71db8896b942770ed7ab4efa59b22eee5203be2dfdee3c5258d60e57605d688c"},
{file = "psycopg_binary-3.2.3-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:2773f850a778575dd7158a6dd072f7925b67f3ba305e2003538e8831fec77a1d"},
{file = "psycopg_binary-3.2.3-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:aeddf7b3b3f6e24ccf7d0edfe2d94094ea76b40e831c16eff5230e040ce3b76b"},
{file = "psycopg_binary-3.2.3-cp38-cp38-win_amd64.whl", hash = "sha256:824c867a38521d61d62b60aca7db7ca013a2b479e428a0db47d25d8ca5067410"},
{file = "psycopg_binary-3.2.3-cp39-cp39-macosx_12_0_x86_64.whl", hash = "sha256:9994f7db390c17fc2bd4c09dca722fd792ff8a49bb3bdace0c50a83f22f1767d"},
{file = "psycopg_binary-3.2.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1303bf8347d6be7ad26d1362af2c38b3a90b8293e8d56244296488ee8591058e"},
{file = "psycopg_binary-3.2.3-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:842da42a63ecb32612bb7f5b9e9f8617eab9bc23bd58679a441f4150fcc51c96"},
{file = "psycopg_binary-3.2.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2bb342a01c76f38a12432848e6013c57eb630103e7556cf79b705b53814c3949"},
{file = "psycopg_binary-3.2.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd40af959173ea0d087b6b232b855cfeaa6738f47cb2a0fd10a7f4fa8b74293f"},
{file = "psycopg_binary-3.2.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:9b60b465773a52c7d4705b0a751f7f1cdccf81dd12aee3b921b31a6e76b07b0e"},
{file = "psycopg_binary-3.2.3-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:fc6d87a1c44df8d493ef44988a3ded751e284e02cdf785f746c2d357e99782a6"},
{file = "psycopg_binary-3.2.3-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:f0b018e37608c3bfc6039a1dc4eb461e89334465a19916be0153c757a78ea426"},
{file = "psycopg_binary-3.2.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:2a29f5294b0b6360bfda69653697eff70aaf2908f58d1073b0acd6f6ab5b5a4f"},
{file = "psycopg_binary-3.2.3-cp39-cp39-win_amd64.whl", hash = "sha256:e56b1fd529e5dde2d1452a7d72907b37ed1b4f07fdced5d8fb1e963acfff6749"},
]
[[package]]
name = "psycopg-pool"
version = "3.2.3"
description = "Connection Pool for Psycopg"
optional = false
python-versions = ">=3.8"
files = [
{file = "psycopg_pool-3.2.3-py3-none-any.whl", hash = "sha256:53bd8e640625e01b2927b2ad96df8ed8e8f91caea4597d45e7673fc7bbb85eb1"},
{file = "psycopg_pool-3.2.3.tar.gz", hash = "sha256:bb942f123bef4b7fbe4d55421bd3fb01829903c95c0f33fd42b7e94e5ac9b52a"},
]
[package.dependencies]
typing-extensions = ">=4.6"
[[package]]
name = "ptyprocess"
version = "0.7.0"
description = "Run a subprocess in a pseudo terminal"
optional = false
python-versions = "*"
files = [
{file = "ptyprocess-0.7.0-py2.py3-none-any.whl", hash = "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35"},
{file = "ptyprocess-0.7.0.tar.gz", hash = "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220"},
]
[[package]]
name = "pure-eval"
version = "0.2.3"
description = "Safely evaluate AST nodes without side effects"
optional = false
python-versions = "*"
files = [
{file = "pure_eval-0.2.3-py3-none-any.whl", hash = "sha256:1db8e35b67b3d218d818ae653e27f06c3aa420901fa7b081ca98cbedc874e0d0"},
{file = "pure_eval-0.2.3.tar.gz", hash = "sha256:5f4e983f40564c576c7c8635ae88db5956bb2229d7e9237d03b3c0b0190eaf42"},
]
[package.extras]
tests = ["pytest"]
[[package]]
name = "py-spy"
version = "0.3.14"
description = "Sampling profiler for Python programs"
optional = false
python-versions = "*"
files = [
{file = "py_spy-0.3.14-py2.py3-none-macosx_10_7_x86_64.whl", hash = "sha256:5b342cc5feb8d160d57a7ff308de153f6be68dcf506ad02b4d67065f2bae7f45"},
{file = "py_spy-0.3.14-py2.py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:fe7efe6c91f723442259d428bf1f9ddb9c1679828866b353d539345ca40d9dd2"},
{file = "py_spy-0.3.14-py2.py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:590905447241d789d9de36cff9f52067b6f18d8b5e9fb399242041568d414461"},
{file = "py_spy-0.3.14-py2.py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fd6211fe7f587b3532ba9d300784326d9a6f2b890af7bf6fff21a029ebbc812b"},
{file = "py_spy-0.3.14-py2.py3-none-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3e8e48032e71c94c3dd51694c39e762e4bbfec250df5bf514adcdd64e79371e0"},
{file = "py_spy-0.3.14-py2.py3-none-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:f59b0b52e56ba9566305236375e6fc68888261d0d36b5addbe3cf85affbefc0e"},
{file = "py_spy-0.3.14-py2.py3-none-win_amd64.whl", hash = "sha256:8f5b311d09f3a8e33dbd0d44fc6e37b715e8e0c7efefafcda8bfd63b31ab5a31"},
]
[[package]]
name = "pycparser"
version = "2.22"
description = "C parser in Python"
optional = false
python-versions = ">=3.8"
files = [
{file = "pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc"},
{file = "pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6"},
]
[[package]]
name = "pydantic"
version = "2.9.2"
description = "Data validation using Python type hints"
optional = false
python-versions = ">=3.8"
files = [
{file = "pydantic-2.9.2-py3-none-any.whl", hash = "sha256:f048cec7b26778210e28a0459867920654d48e5e62db0958433636cde4254f12"},
{file = "pydantic-2.9.2.tar.gz", hash = "sha256:d155cef71265d1e9807ed1c32b4c8deec042a44a50a4188b25ac67ecd81a9c0f"},
]
[package.dependencies]
annotated-types = ">=0.6.0"
pydantic-core = "2.23.4"
typing-extensions = [
{version = ">=4.6.1", markers = "python_version < \"3.13\""},
{version = ">=4.12.2", markers = "python_version >= \"3.13\""},
]
[package.extras]
email = ["email-validator (>=2.0.0)"]
timezone = ["tzdata"]
[[package]]
name = "pydantic-core"
version = "2.23.4"
description = "Core functionality for Pydantic validation and serialization"
optional = false
python-versions = ">=3.8"
files = [
{file = "pydantic_core-2.23.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:b10bd51f823d891193d4717448fab065733958bdb6a6b351967bd349d48d5c9b"},
{file = "pydantic_core-2.23.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4fc714bdbfb534f94034efaa6eadd74e5b93c8fa6315565a222f7b6f42ca1166"},
{file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63e46b3169866bd62849936de036f901a9356e36376079b05efa83caeaa02ceb"},
{file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed1a53de42fbe34853ba90513cea21673481cd81ed1be739f7f2efb931b24916"},
{file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cfdd16ab5e59fc31b5e906d1a3f666571abc367598e3e02c83403acabc092e07"},
{file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:255a8ef062cbf6674450e668482456abac99a5583bbafb73f9ad469540a3a232"},
{file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a7cd62e831afe623fbb7aabbb4fe583212115b3ef38a9f6b71869ba644624a2"},
{file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f09e2ff1f17c2b51f2bc76d1cc33da96298f0a036a137f5440ab3ec5360b624f"},
{file = "pydantic_core-2.23.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e38e63e6f3d1cec5a27e0afe90a085af8b6806ee208b33030e65b6516353f1a3"},
{file = "pydantic_core-2.23.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0dbd8dbed2085ed23b5c04afa29d8fd2771674223135dc9bc937f3c09284d071"},
{file = "pydantic_core-2.23.4-cp310-none-win32.whl", hash = "sha256:6531b7ca5f951d663c339002e91aaebda765ec7d61b7d1e3991051906ddde119"},
{file = "pydantic_core-2.23.4-cp310-none-win_amd64.whl", hash = "sha256:7c9129eb40958b3d4500fa2467e6a83356b3b61bfff1b414c7361d9220f9ae8f"},
{file = "pydantic_core-2.23.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:77733e3892bb0a7fa797826361ce8a9184d25c8dffaec60b7ffe928153680ba8"},
{file = "pydantic_core-2.23.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1b84d168f6c48fabd1f2027a3d1bdfe62f92cade1fb273a5d68e621da0e44e6d"},
{file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df49e7a0861a8c36d089c1ed57d308623d60416dab2647a4a17fe050ba85de0e"},
{file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ff02b6d461a6de369f07ec15e465a88895f3223eb75073ffea56b84d9331f607"},
{file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:996a38a83508c54c78a5f41456b0103c30508fed9abcad0a59b876d7398f25fd"},
{file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d97683ddee4723ae8c95d1eddac7c192e8c552da0c73a925a89fa8649bf13eea"},
{file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:216f9b2d7713eb98cb83c80b9c794de1f6b7e3145eef40400c62e86cee5f4e1e"},
{file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6f783e0ec4803c787bcea93e13e9932edab72068f68ecffdf86a99fd5918878b"},
{file = "pydantic_core-2.23.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d0776dea117cf5272382634bd2a5c1b6eb16767c223c6a5317cd3e2a757c61a0"},
{file = "pydantic_core-2.23.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d5f7a395a8cf1621939692dba2a6b6a830efa6b3cee787d82c7de1ad2930de64"},
{file = "pydantic_core-2.23.4-cp311-none-win32.whl", hash = "sha256:74b9127ffea03643e998e0c5ad9bd3811d3dac8c676e47db17b0ee7c3c3bf35f"},
{file = "pydantic_core-2.23.4-cp311-none-win_amd64.whl", hash = "sha256:98d134c954828488b153d88ba1f34e14259284f256180ce659e8d83e9c05eaa3"},
{file = "pydantic_core-2.23.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f3e0da4ebaef65158d4dfd7d3678aad692f7666877df0002b8a522cdf088f231"},
{file = "pydantic_core-2.23.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f69a8e0b033b747bb3e36a44e7732f0c99f7edd5cea723d45bc0d6e95377ffee"},
{file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:723314c1d51722ab28bfcd5240d858512ffd3116449c557a1336cbe3919beb87"},
{file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bb2802e667b7051a1bebbfe93684841cc9351004e2badbd6411bf357ab8d5ac8"},
{file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d18ca8148bebe1b0a382a27a8ee60350091a6ddaf475fa05ef50dc35b5df6327"},
{file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33e3d65a85a2a4a0dc3b092b938a4062b1a05f3a9abde65ea93b233bca0e03f2"},
{file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:128585782e5bfa515c590ccee4b727fb76925dd04a98864182b22e89a4e6ed36"},
{file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:68665f4c17edcceecc112dfed5dbe6f92261fb9d6054b47d01bf6371a6196126"},
{file = "pydantic_core-2.23.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:20152074317d9bed6b7a95ade3b7d6054845d70584216160860425f4fbd5ee9e"},
{file = "pydantic_core-2.23.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:9261d3ce84fa1d38ed649c3638feefeae23d32ba9182963e465d58d62203bd24"},
{file = "pydantic_core-2.23.4-cp312-none-win32.whl", hash = "sha256:4ba762ed58e8d68657fc1281e9bb72e1c3e79cc5d464be146e260c541ec12d84"},
{file = "pydantic_core-2.23.4-cp312-none-win_amd64.whl", hash = "sha256:97df63000f4fea395b2824da80e169731088656d1818a11b95f3b173747b6cd9"},
{file = "pydantic_core-2.23.4-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7530e201d10d7d14abce4fb54cfe5b94a0aefc87da539d0346a484ead376c3cc"},
{file = "pydantic_core-2.23.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:df933278128ea1cd77772673c73954e53a1c95a4fdf41eef97c2b779271bd0bd"},
{file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cb3da3fd1b6a5d0279a01877713dbda118a2a4fc6f0d821a57da2e464793f05"},
{file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42c6dcb030aefb668a2b7009c85b27f90e51e6a3b4d5c9bc4c57631292015b0d"},
{file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:696dd8d674d6ce621ab9d45b205df149399e4bb9aa34102c970b721554828510"},
{file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2971bb5ffe72cc0f555c13e19b23c85b654dd2a8f7ab493c262071377bfce9f6"},
{file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8394d940e5d400d04cad4f75c0598665cbb81aecefaca82ca85bd28264af7f9b"},
{file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0dff76e0602ca7d4cdaacc1ac4c005e0ce0dcfe095d5b5259163a80d3a10d327"},
{file = "pydantic_core-2.23.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7d32706badfe136888bdea71c0def994644e09fff0bfe47441deaed8e96fdbc6"},
{file = "pydantic_core-2.23.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ed541d70698978a20eb63d8c5d72f2cc6d7079d9d90f6b50bad07826f1320f5f"},
{file = "pydantic_core-2.23.4-cp313-none-win32.whl", hash = "sha256:3d5639516376dce1940ea36edf408c554475369f5da2abd45d44621cb616f769"},
{file = "pydantic_core-2.23.4-cp313-none-win_amd64.whl", hash = "sha256:5a1504ad17ba4210df3a045132a7baeeba5a200e930f57512ee02909fc5c4cb5"},
{file = "pydantic_core-2.23.4-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d4488a93b071c04dc20f5cecc3631fc78b9789dd72483ba15d423b5b3689b555"},
{file = "pydantic_core-2.23.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:81965a16b675b35e1d09dd14df53f190f9129c0202356ed44ab2728b1c905658"},
{file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ffa2ebd4c8530079140dd2d7f794a9d9a73cbb8e9d59ffe24c63436efa8f271"},
{file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:61817945f2fe7d166e75fbfb28004034b48e44878177fc54d81688e7b85a3665"},
{file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:29d2c342c4bc01b88402d60189f3df065fb0dda3654744d5a165a5288a657368"},
{file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5e11661ce0fd30a6790e8bcdf263b9ec5988e95e63cf901972107efc49218b13"},
{file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d18368b137c6295db49ce7218b1a9ba15c5bc254c96d7c9f9e924a9bc7825ad"},
{file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ec4e55f79b1c4ffb2eecd8a0cfba9955a2588497d96851f4c8f99aa4a1d39b12"},
{file = "pydantic_core-2.23.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:374a5e5049eda9e0a44c696c7ade3ff355f06b1fe0bb945ea3cac2bc336478a2"},
{file = "pydantic_core-2.23.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5c364564d17da23db1106787675fc7af45f2f7b58b4173bfdd105564e132e6fb"},
{file = "pydantic_core-2.23.4-cp38-none-win32.whl", hash = "sha256:d7a80d21d613eec45e3d41eb22f8f94ddc758a6c4720842dc74c0581f54993d6"},
{file = "pydantic_core-2.23.4-cp38-none-win_amd64.whl", hash = "sha256:5f5ff8d839f4566a474a969508fe1c5e59c31c80d9e140566f9a37bba7b8d556"},
{file = "pydantic_core-2.23.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:a4fa4fc04dff799089689f4fd502ce7d59de529fc2f40a2c8836886c03e0175a"},
{file = "pydantic_core-2.23.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0a7df63886be5e270da67e0966cf4afbae86069501d35c8c1b3b6c168f42cb36"},
{file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dcedcd19a557e182628afa1d553c3895a9f825b936415d0dbd3cd0bbcfd29b4b"},
{file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f54b118ce5de9ac21c363d9b3caa6c800341e8c47a508787e5868c6b79c9323"},
{file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86d2f57d3e1379a9525c5ab067b27dbb8a0642fb5d454e17a9ac434f9ce523e3"},
{file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:de6d1d1b9e5101508cb37ab0d972357cac5235f5c6533d1071964c47139257df"},
{file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1278e0d324f6908e872730c9102b0112477a7f7cf88b308e4fc36ce1bdb6d58c"},
{file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9a6b5099eeec78827553827f4c6b8615978bb4b6a88e5d9b93eddf8bb6790f55"},
{file = "pydantic_core-2.23.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:e55541f756f9b3ee346b840103f32779c695a19826a4c442b7954550a0972040"},
{file = "pydantic_core-2.23.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a5c7ba8ffb6d6f8f2ab08743be203654bb1aaa8c9dcb09f82ddd34eadb695605"},
{file = "pydantic_core-2.23.4-cp39-none-win32.whl", hash = "sha256:37b0fe330e4a58d3c58b24d91d1eb102aeec675a3db4c292ec3928ecd892a9a6"},
{file = "pydantic_core-2.23.4-cp39-none-win_amd64.whl", hash = "sha256:1498bec4c05c9c787bde9125cfdcc63a41004ff167f495063191b863399b1a29"},
{file = "pydantic_core-2.23.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f455ee30a9d61d3e1a15abd5068827773d6e4dc513e795f380cdd59932c782d5"},
{file = "pydantic_core-2.23.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:1e90d2e3bd2c3863d48525d297cd143fe541be8bbf6f579504b9712cb6b643ec"},
{file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e203fdf807ac7e12ab59ca2bfcabb38c7cf0b33c41efeb00f8e5da1d86af480"},
{file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e08277a400de01bc72436a0ccd02bdf596631411f592ad985dcee21445bd0068"},
{file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f220b0eea5965dec25480b6333c788fb72ce5f9129e8759ef876a1d805d00801"},
{file = "pydantic_core-2.23.4-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d06b0c8da4f16d1d1e352134427cb194a0a6e19ad5db9161bf32b2113409e728"},
{file = "pydantic_core-2.23.4-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:ba1a0996f6c2773bd83e63f18914c1de3c9dd26d55f4ac302a7efe93fb8e7433"},
{file = "pydantic_core-2.23.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:9a5bce9d23aac8f0cf0836ecfc033896aa8443b501c58d0602dbfd5bd5b37753"},
{file = "pydantic_core-2.23.4-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:78ddaaa81421a29574a682b3179d4cf9e6d405a09b99d93ddcf7e5239c742e21"},
{file = "pydantic_core-2.23.4-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:883a91b5dd7d26492ff2f04f40fbb652de40fcc0afe07e8129e8ae779c2110eb"},
{file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88ad334a15b32a791ea935af224b9de1bf99bcd62fabf745d5f3442199d86d59"},
{file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:233710f069d251feb12a56da21e14cca67994eab08362207785cf8c598e74577"},
{file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:19442362866a753485ba5e4be408964644dd6a09123d9416c54cd49171f50744"},
{file = "pydantic_core-2.23.4-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:624e278a7d29b6445e4e813af92af37820fafb6dcc55c012c834f9e26f9aaaef"},
{file = "pydantic_core-2.23.4-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f5ef8f42bec47f21d07668a043f077d507e5bf4e668d5c6dfe6aaba89de1a5b8"},
{file = "pydantic_core-2.23.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:aea443fffa9fbe3af1a9ba721a87f926fe548d32cab71d188a6ede77d0ff244e"},
{file = "pydantic_core-2.23.4.tar.gz", hash = "sha256:2584f7cf844ac4d970fba483a717dbe10c1c1c96a969bf65d61ffe94df1b2863"},
]
[package.dependencies]
typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0"
[[package]]
name = "pygments"
version = "2.18.0"
description = "Pygments is a syntax highlighting package written in Python."
optional = false
python-versions = ">=3.8"
files = [
{file = "pygments-2.18.0-py3-none-any.whl", hash = "sha256:b8e6aca0523f3ab76fee51799c488e38782ac06eafcf95e7ba832985c8e7b13a"},
{file = "pygments-2.18.0.tar.gz", hash = "sha256:786ff802f32e91311bff3889f6e9a86e81505fe99f2735bb6d60ae0c5004f199"},
]
[package.extras]
windows-terminal = ["colorama (>=0.4.6)"]
[[package]]
name = "pyperf"
version = "2.8.0"
description = "Python module to run and analyze benchmarks"
optional = false
python-versions = ">=3.7"
files = [
{file = "pyperf-2.8.0-py3-none-any.whl", hash = "sha256:1a775b5a09882f18bf876430ef78e07646f773f50774546f5f6a8b34d60e3968"},
{file = "pyperf-2.8.0.tar.gz", hash = "sha256:b30a20465819daf102b6543b512f6799a5a879ff2a123981e6cd732d0e6a7a79"},
]
[package.dependencies]
psutil = ">=5.9.0"
[package.extras]
dev = ["importlib-metadata", "tox"]
[[package]]
name = "pytest"
version = "8.3.3"
description = "pytest: simple powerful testing with Python"
optional = false
python-versions = ">=3.8"
files = [
{file = "pytest-8.3.3-py3-none-any.whl", hash = "sha256:a6853c7375b2663155079443d2e45de913a911a11d669df02a50814944db57b2"},
{file = "pytest-8.3.3.tar.gz", hash = "sha256:70b98107bd648308a7952b06e6ca9a50bc660be218d53c257cc1fc94fda10181"},
]
[package.dependencies]
colorama = {version = "*", markers = "sys_platform == \"win32\""}
exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""}
iniconfig = "*"
packaging = "*"
pluggy = ">=1.5,<2"
tomli = {version = ">=1", markers = "python_version < \"3.11\""}
[package.extras]
dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"]
[[package]]
name = "pytest-cov"
version = "4.1.0"
description = "Pytest plugin for measuring coverage."
optional = false
python-versions = ">=3.7"
files = [
{file = "pytest-cov-4.1.0.tar.gz", hash = "sha256:3904b13dfbfec47f003b8e77fd5b589cd11904a21ddf1ab38a64f204d6a10ef6"},
{file = "pytest_cov-4.1.0-py3-none-any.whl", hash = "sha256:6ba70b9e97e69fcc3fb45bfeab2d0a138fb65c4d0d6a41ef33983ad114be8c3a"},
]
[package.dependencies]
coverage = {version = ">=5.2.1", extras = ["toml"]}
pytest = ">=4.6"
[package.extras]
testing = ["fields", "hunter", "process-tests", "pytest-xdist", "six", "virtualenv"]
[[package]]
name = "pytest-dotenv"
version = "0.5.2"
description = "A py.test plugin that parses environment files before running tests"
optional = false
python-versions = "*"
files = [
{file = "pytest-dotenv-0.5.2.tar.gz", hash = "sha256:2dc6c3ac6d8764c71c6d2804e902d0ff810fa19692e95fe138aefc9b1aa73732"},
{file = "pytest_dotenv-0.5.2-py3-none-any.whl", hash = "sha256:40a2cece120a213898afaa5407673f6bd924b1fa7eafce6bda0e8abffe2f710f"},
]
[package.dependencies]
pytest = ">=5.0.0"
python-dotenv = ">=0.9.1"
[[package]]
name = "pytest-mock"
version = "3.14.0"
description = "Thin-wrapper around the mock package for easier use with pytest"
optional = false
python-versions = ">=3.8"
files = [
{file = "pytest-mock-3.14.0.tar.gz", hash = "sha256:2719255a1efeceadbc056d6bf3df3d1c5015530fb40cf347c0f9afac88410bd0"},
{file = "pytest_mock-3.14.0-py3-none-any.whl", hash = "sha256:0b72c38033392a5f4621342fe11e9219ac11ec9d375f8e2a0c164539e0d70f6f"},
]
[package.dependencies]
pytest = ">=6.2.5"
[package.extras]
dev = ["pre-commit", "pytest-asyncio", "tox"]
[[package]]
name = "pytest-repeat"
version = "0.9.3"
description = "pytest plugin for repeating tests"
optional = false
python-versions = ">=3.7"
files = [
{file = "pytest_repeat-0.9.3-py3-none-any.whl", hash = "sha256:26ab2df18226af9d5ce441c858f273121e92ff55f5bb311d25755b8d7abdd8ed"},
{file = "pytest_repeat-0.9.3.tar.gz", hash = "sha256:ffd3836dfcd67bb270bec648b330e20be37d2966448c4148c4092d1e8aba8185"},
]
[package.dependencies]
pytest = "*"
[[package]]
name = "pytest-watcher"
version = "0.4.3"
description = "Automatically rerun your tests on file modifications"
optional = false
python-versions = "<4.0.0,>=3.7.0"
files = [
{file = "pytest_watcher-0.4.3-py3-none-any.whl", hash = "sha256:d59b1e1396f33a65ea4949b713d6884637755d641646960056a90b267c3460f9"},
{file = "pytest_watcher-0.4.3.tar.gz", hash = "sha256:0cb0e4661648c8c0ff2b2d25efa5a8e421784b9e4c60fcecbf9b7c30b2d731b3"},
]
[package.dependencies]
tomli = {version = ">=2.0.1,<3.0.0", markers = "python_version < \"3.11\""}
watchdog = ">=2.0.0"
[[package]]
name = "pytest-xdist"
version = "3.6.1"
description = "pytest xdist plugin for distributed testing, most importantly across multiple CPUs"
optional = false
python-versions = ">=3.8"
files = [
{file = "pytest_xdist-3.6.1-py3-none-any.whl", hash = "sha256:9ed4adfb68a016610848639bb7e02c9352d5d9f03d04809919e2dafc3be4cca7"},
{file = "pytest_xdist-3.6.1.tar.gz", hash = "sha256:ead156a4db231eec769737f57668ef58a2084a34b2e55c4a8fa20d861107300d"},
]
[package.dependencies]
execnet = ">=2.1"
psutil = {version = ">=3.0", optional = true, markers = "extra == \"psutil\""}
pytest = ">=7.0.0"
[package.extras]
psutil = ["psutil (>=3.0)"]
setproctitle = ["setproctitle"]
testing = ["filelock"]
[[package]]
name = "python-dateutil"
version = "2.9.0.post0"
description = "Extensions to the standard Python datetime module"
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
files = [
{file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"},
{file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"},
]
[package.dependencies]
six = ">=1.5"
[[package]]
name = "python-dotenv"
version = "1.0.1"
description = "Read key-value pairs from a .env file and set them as environment variables"
optional = false
python-versions = ">=3.8"
files = [
{file = "python-dotenv-1.0.1.tar.gz", hash = "sha256:e324ee90a023d808f1959c46bcbc04446a10ced277783dc6ee09987c37ec10ca"},
{file = "python_dotenv-1.0.1-py3-none-any.whl", hash = "sha256:f7b63ef50f1b690dddf550d03497b66d609393b40b564ed0d674909a68ebf16a"},
]
[package.extras]
cli = ["click (>=5.0)"]
[[package]]
name = "python-json-logger"
version = "2.0.7"
description = "A python library adding a json log formatter"
optional = false
python-versions = ">=3.6"
files = [
{file = "python-json-logger-2.0.7.tar.gz", hash = "sha256:23e7ec02d34237c5aa1e29a070193a4ea87583bb4e7f8fd06d3de8264c4b2e1c"},
{file = "python_json_logger-2.0.7-py3-none-any.whl", hash = "sha256:f380b826a991ebbe3de4d897aeec42760035ac760345e57b812938dc8b35e2bd"},
]
[[package]]
name = "pywin32"
version = "308"
description = "Python for Window Extensions"
optional = false
python-versions = "*"
files = [
{file = "pywin32-308-cp310-cp310-win32.whl", hash = "sha256:796ff4426437896550d2981b9c2ac0ffd75238ad9ea2d3bfa67a1abd546d262e"},
{file = "pywin32-308-cp310-cp310-win_amd64.whl", hash = "sha256:4fc888c59b3c0bef905ce7eb7e2106a07712015ea1c8234b703a088d46110e8e"},
{file = "pywin32-308-cp310-cp310-win_arm64.whl", hash = "sha256:a5ab5381813b40f264fa3495b98af850098f814a25a63589a8e9eb12560f450c"},
{file = "pywin32-308-cp311-cp311-win32.whl", hash = "sha256:5d8c8015b24a7d6855b1550d8e660d8daa09983c80e5daf89a273e5c6fb5095a"},
{file = "pywin32-308-cp311-cp311-win_amd64.whl", hash = "sha256:575621b90f0dc2695fec346b2d6302faebd4f0f45c05ea29404cefe35d89442b"},
{file = "pywin32-308-cp311-cp311-win_arm64.whl", hash = "sha256:100a5442b7332070983c4cd03f2e906a5648a5104b8a7f50175f7906efd16bb6"},
{file = "pywin32-308-cp312-cp312-win32.whl", hash = "sha256:587f3e19696f4bf96fde9d8a57cec74a57021ad5f204c9e627e15c33ff568897"},
{file = "pywin32-308-cp312-cp312-win_amd64.whl", hash = "sha256:00b3e11ef09ede56c6a43c71f2d31857cf7c54b0ab6e78ac659497abd2834f47"},
{file = "pywin32-308-cp312-cp312-win_arm64.whl", hash = "sha256:9b4de86c8d909aed15b7011182c8cab38c8850de36e6afb1f0db22b8959e3091"},
{file = "pywin32-308-cp313-cp313-win32.whl", hash = "sha256:1c44539a37a5b7b21d02ab34e6a4d314e0788f1690d65b48e9b0b89f31abbbed"},
{file = "pywin32-308-cp313-cp313-win_amd64.whl", hash = "sha256:fd380990e792eaf6827fcb7e187b2b4b1cede0585e3d0c9e84201ec27b9905e4"},
{file = "pywin32-308-cp313-cp313-win_arm64.whl", hash = "sha256:ef313c46d4c18dfb82a2431e3051ac8f112ccee1a34f29c263c583c568db63cd"},
{file = "pywin32-308-cp37-cp37m-win32.whl", hash = "sha256:1f696ab352a2ddd63bd07430080dd598e6369152ea13a25ebcdd2f503a38f1ff"},
{file = "pywin32-308-cp37-cp37m-win_amd64.whl", hash = "sha256:13dcb914ed4347019fbec6697a01a0aec61019c1046c2b905410d197856326a6"},
{file = "pywin32-308-cp38-cp38-win32.whl", hash = "sha256:5794e764ebcabf4ff08c555b31bd348c9025929371763b2183172ff4708152f0"},
{file = "pywin32-308-cp38-cp38-win_amd64.whl", hash = "sha256:3b92622e29d651c6b783e368ba7d6722b1634b8e70bd376fd7610fe1992e19de"},
{file = "pywin32-308-cp39-cp39-win32.whl", hash = "sha256:7873ca4dc60ab3287919881a7d4f88baee4a6e639aa6962de25a98ba6b193341"},
{file = "pywin32-308-cp39-cp39-win_amd64.whl", hash = "sha256:71b3322d949b4cc20776436a9c9ba0eeedcbc9c650daa536df63f0ff111bb920"},
]
[[package]]
name = "pywinpty"
version = "2.0.14"
description = "Pseudo terminal support for Windows from Python."
optional = false
python-versions = ">=3.8"
files = [
{file = "pywinpty-2.0.14-cp310-none-win_amd64.whl", hash = "sha256:0b149c2918c7974f575ba79f5a4aad58bd859a52fa9eb1296cc22aa412aa411f"},
{file = "pywinpty-2.0.14-cp311-none-win_amd64.whl", hash = "sha256:cf2a43ac7065b3e0dc8510f8c1f13a75fb8fde805efa3b8cff7599a1ef497bc7"},
{file = "pywinpty-2.0.14-cp312-none-win_amd64.whl", hash = "sha256:55dad362ef3e9408ade68fd173e4f9032b3ce08f68cfe7eacb2c263ea1179737"},
{file = "pywinpty-2.0.14-cp313-none-win_amd64.whl", hash = "sha256:074fb988a56ec79ca90ed03a896d40707131897cefb8f76f926e3834227f2819"},
{file = "pywinpty-2.0.14-cp39-none-win_amd64.whl", hash = "sha256:5725fd56f73c0531ec218663bd8c8ff5acc43c78962fab28564871b5fce053fd"},
{file = "pywinpty-2.0.14.tar.gz", hash = "sha256:18bd9529e4a5daf2d9719aa17788ba6013e594ae94c5a0c27e83df3278b0660e"},
]
[[package]]
name = "pyyaml"
version = "6.0.2"
description = "YAML parser and emitter for Python"
optional = false
python-versions = ">=3.8"
files = [
{file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"},
{file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"},
{file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237"},
{file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b"},
{file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed"},
{file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180"},
{file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68"},
{file = "PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99"},
{file = "PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e"},
{file = "PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774"},
{file = "PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee"},
{file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c"},
{file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317"},
{file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85"},
{file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"},
{file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e"},
{file = "PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5"},
{file = "PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44"},
{file = "PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab"},
{file = "PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725"},
{file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5"},
{file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425"},
{file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476"},
{file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48"},
{file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b"},
{file = "PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4"},
{file = "PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8"},
{file = "PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba"},
{file = "PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1"},
{file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133"},
{file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484"},
{file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5"},
{file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc"},
{file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652"},
{file = "PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183"},
{file = "PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563"},
{file = "PyYAML-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a"},
{file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5"},
{file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d"},
{file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083"},
{file = "PyYAML-6.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706"},
{file = "PyYAML-6.0.2-cp38-cp38-win32.whl", hash = "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a"},
{file = "PyYAML-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff"},
{file = "PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d"},
{file = "PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f"},
{file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290"},
{file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12"},
{file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19"},
{file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e"},
{file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725"},
{file = "PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631"},
{file = "PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8"},
{file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"},
]
[[package]]
name = "pyzmq"
version = "26.2.0"
description = "Python bindings for 0MQ"
optional = false
python-versions = ">=3.7"
files = [
{file = "pyzmq-26.2.0-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:ddf33d97d2f52d89f6e6e7ae66ee35a4d9ca6f36eda89c24591b0c40205a3629"},
{file = "pyzmq-26.2.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:dacd995031a01d16eec825bf30802fceb2c3791ef24bcce48fa98ce40918c27b"},
{file = "pyzmq-26.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89289a5ee32ef6c439086184529ae060c741334b8970a6855ec0b6ad3ff28764"},
{file = "pyzmq-26.2.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5506f06d7dc6ecf1efacb4a013b1f05071bb24b76350832c96449f4a2d95091c"},
{file = "pyzmq-26.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8ea039387c10202ce304af74def5021e9adc6297067f3441d348d2b633e8166a"},
{file = "pyzmq-26.2.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:a2224fa4a4c2ee872886ed00a571f5e967c85e078e8e8c2530a2fb01b3309b88"},
{file = "pyzmq-26.2.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:28ad5233e9c3b52d76196c696e362508959741e1a005fb8fa03b51aea156088f"},
{file = "pyzmq-26.2.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:1c17211bc037c7d88e85ed8b7d8f7e52db6dc8eca5590d162717c654550f7282"},
{file = "pyzmq-26.2.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b8f86dd868d41bea9a5f873ee13bf5551c94cf6bc51baebc6f85075971fe6eea"},
{file = "pyzmq-26.2.0-cp310-cp310-win32.whl", hash = "sha256:46a446c212e58456b23af260f3d9fb785054f3e3653dbf7279d8f2b5546b21c2"},
{file = "pyzmq-26.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:49d34ab71db5a9c292a7644ce74190b1dd5a3475612eefb1f8be1d6961441971"},
{file = "pyzmq-26.2.0-cp310-cp310-win_arm64.whl", hash = "sha256:bfa832bfa540e5b5c27dcf5de5d82ebc431b82c453a43d141afb1e5d2de025fa"},
{file = "pyzmq-26.2.0-cp311-cp311-macosx_10_15_universal2.whl", hash = "sha256:8f7e66c7113c684c2b3f1c83cdd3376103ee0ce4c49ff80a648643e57fb22218"},
{file = "pyzmq-26.2.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3a495b30fc91db2db25120df5847d9833af237546fd59170701acd816ccc01c4"},
{file = "pyzmq-26.2.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77eb0968da535cba0470a5165468b2cac7772cfb569977cff92e240f57e31bef"},
{file = "pyzmq-26.2.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ace4f71f1900a548f48407fc9be59c6ba9d9aaf658c2eea6cf2779e72f9f317"},
{file = "pyzmq-26.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:92a78853d7280bffb93df0a4a6a2498cba10ee793cc8076ef797ef2f74d107cf"},
{file = "pyzmq-26.2.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:689c5d781014956a4a6de61d74ba97b23547e431e9e7d64f27d4922ba96e9d6e"},
{file = "pyzmq-26.2.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0aca98bc423eb7d153214b2df397c6421ba6373d3397b26c057af3c904452e37"},
{file = "pyzmq-26.2.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:1f3496d76b89d9429a656293744ceca4d2ac2a10ae59b84c1da9b5165f429ad3"},
{file = "pyzmq-26.2.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5c2b3bfd4b9689919db068ac6c9911f3fcb231c39f7dd30e3138be94896d18e6"},
{file = "pyzmq-26.2.0-cp311-cp311-win32.whl", hash = "sha256:eac5174677da084abf378739dbf4ad245661635f1600edd1221f150b165343f4"},
{file = "pyzmq-26.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:5a509df7d0a83a4b178d0f937ef14286659225ef4e8812e05580776c70e155d5"},
{file = "pyzmq-26.2.0-cp311-cp311-win_arm64.whl", hash = "sha256:c0e6091b157d48cbe37bd67233318dbb53e1e6327d6fc3bb284afd585d141003"},
{file = "pyzmq-26.2.0-cp312-cp312-macosx_10_15_universal2.whl", hash = "sha256:ded0fc7d90fe93ae0b18059930086c51e640cdd3baebdc783a695c77f123dcd9"},
{file = "pyzmq-26.2.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:17bf5a931c7f6618023cdacc7081f3f266aecb68ca692adac015c383a134ca52"},
{file = "pyzmq-26.2.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:55cf66647e49d4621a7e20c8d13511ef1fe1efbbccf670811864452487007e08"},
{file = "pyzmq-26.2.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4661c88db4a9e0f958c8abc2b97472e23061f0bc737f6f6179d7a27024e1faa5"},
{file = "pyzmq-26.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ea7f69de383cb47522c9c208aec6dd17697db7875a4674c4af3f8cfdac0bdeae"},
{file = "pyzmq-26.2.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:7f98f6dfa8b8ccaf39163ce872bddacca38f6a67289116c8937a02e30bbe9711"},
{file = "pyzmq-26.2.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:e3e0210287329272539eea617830a6a28161fbbd8a3271bf4150ae3e58c5d0e6"},
{file = "pyzmq-26.2.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:6b274e0762c33c7471f1a7471d1a2085b1a35eba5cdc48d2ae319f28b6fc4de3"},
{file = "pyzmq-26.2.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:29c6a4635eef69d68a00321e12a7d2559fe2dfccfa8efae3ffb8e91cd0b36a8b"},
{file = "pyzmq-26.2.0-cp312-cp312-win32.whl", hash = "sha256:989d842dc06dc59feea09e58c74ca3e1678c812a4a8a2a419046d711031f69c7"},
{file = "pyzmq-26.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:2a50625acdc7801bc6f74698c5c583a491c61d73c6b7ea4dee3901bb99adb27a"},
{file = "pyzmq-26.2.0-cp312-cp312-win_arm64.whl", hash = "sha256:4d29ab8592b6ad12ebbf92ac2ed2bedcfd1cec192d8e559e2e099f648570e19b"},
{file = "pyzmq-26.2.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:9dd8cd1aeb00775f527ec60022004d030ddc51d783d056e3e23e74e623e33726"},
{file = "pyzmq-26.2.0-cp313-cp313-macosx_10_15_universal2.whl", hash = "sha256:28c812d9757fe8acecc910c9ac9dafd2ce968c00f9e619db09e9f8f54c3a68a3"},
{file = "pyzmq-26.2.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4d80b1dd99c1942f74ed608ddb38b181b87476c6a966a88a950c7dee118fdf50"},
{file = "pyzmq-26.2.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8c997098cc65e3208eca09303630e84d42718620e83b733d0fd69543a9cab9cb"},
{file = "pyzmq-26.2.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ad1bc8d1b7a18497dda9600b12dc193c577beb391beae5cd2349184db40f187"},
{file = "pyzmq-26.2.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:bea2acdd8ea4275e1278350ced63da0b166421928276c7c8e3f9729d7402a57b"},
{file = "pyzmq-26.2.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:23f4aad749d13698f3f7b64aad34f5fc02d6f20f05999eebc96b89b01262fb18"},
{file = "pyzmq-26.2.0-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:a4f96f0d88accc3dbe4a9025f785ba830f968e21e3e2c6321ccdfc9aef755115"},
{file = "pyzmq-26.2.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ced65e5a985398827cc9276b93ef6dfabe0273c23de8c7931339d7e141c2818e"},
{file = "pyzmq-26.2.0-cp313-cp313-win32.whl", hash = "sha256:31507f7b47cc1ead1f6e86927f8ebb196a0bab043f6345ce070f412a59bf87b5"},
{file = "pyzmq-26.2.0-cp313-cp313-win_amd64.whl", hash = "sha256:70fc7fcf0410d16ebdda9b26cbd8bf8d803d220a7f3522e060a69a9c87bf7bad"},
{file = "pyzmq-26.2.0-cp313-cp313-win_arm64.whl", hash = "sha256:c3789bd5768ab5618ebf09cef6ec2b35fed88709b104351748a63045f0ff9797"},
{file = "pyzmq-26.2.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:034da5fc55d9f8da09015d368f519478a52675e558c989bfcb5cf6d4e16a7d2a"},
{file = "pyzmq-26.2.0-cp313-cp313t-macosx_10_15_universal2.whl", hash = "sha256:c92d73464b886931308ccc45b2744e5968cbaade0b1d6aeb40d8ab537765f5bc"},
{file = "pyzmq-26.2.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:794a4562dcb374f7dbbfb3f51d28fb40123b5a2abadee7b4091f93054909add5"},
{file = "pyzmq-26.2.0-cp313-cp313t-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aee22939bb6075e7afededabad1a56a905da0b3c4e3e0c45e75810ebe3a52672"},
{file = "pyzmq-26.2.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ae90ff9dad33a1cfe947d2c40cb9cb5e600d759ac4f0fd22616ce6540f72797"},
{file = "pyzmq-26.2.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:43a47408ac52647dfabbc66a25b05b6a61700b5165807e3fbd40063fcaf46386"},
{file = "pyzmq-26.2.0-cp313-cp313t-musllinux_1_1_aarch64.whl", hash = "sha256:25bf2374a2a8433633c65ccb9553350d5e17e60c8eb4de4d92cc6bd60f01d306"},
{file = "pyzmq-26.2.0-cp313-cp313t-musllinux_1_1_i686.whl", hash = "sha256:007137c9ac9ad5ea21e6ad97d3489af654381324d5d3ba614c323f60dab8fae6"},
{file = "pyzmq-26.2.0-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:470d4a4f6d48fb34e92d768b4e8a5cc3780db0d69107abf1cd7ff734b9766eb0"},
{file = "pyzmq-26.2.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:3b55a4229ce5da9497dd0452b914556ae58e96a4381bb6f59f1305dfd7e53fc8"},
{file = "pyzmq-26.2.0-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:9cb3a6460cdea8fe8194a76de8895707e61ded10ad0be97188cc8463ffa7e3a8"},
{file = "pyzmq-26.2.0-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:8ab5cad923cc95c87bffee098a27856c859bd5d0af31bd346035aa816b081fe1"},
{file = "pyzmq-26.2.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9ed69074a610fad1c2fda66180e7b2edd4d31c53f2d1872bc2d1211563904cd9"},
{file = "pyzmq-26.2.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:cccba051221b916a4f5e538997c45d7d136a5646442b1231b916d0164067ea27"},
{file = "pyzmq-26.2.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:0eaa83fc4c1e271c24eaf8fb083cbccef8fde77ec8cd45f3c35a9a123e6da097"},
{file = "pyzmq-26.2.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:9edda2df81daa129b25a39b86cb57dfdfe16f7ec15b42b19bfac503360d27a93"},
{file = "pyzmq-26.2.0-cp37-cp37m-win32.whl", hash = "sha256:ea0eb6af8a17fa272f7b98d7bebfab7836a0d62738e16ba380f440fceca2d951"},
{file = "pyzmq-26.2.0-cp37-cp37m-win_amd64.whl", hash = "sha256:4ff9dc6bc1664bb9eec25cd17506ef6672d506115095411e237d571e92a58231"},
{file = "pyzmq-26.2.0-cp38-cp38-macosx_10_15_universal2.whl", hash = "sha256:2eb7735ee73ca1b0d71e0e67c3739c689067f055c764f73aac4cc8ecf958ee3f"},
{file = "pyzmq-26.2.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1a534f43bc738181aa7cbbaf48e3eca62c76453a40a746ab95d4b27b1111a7d2"},
{file = "pyzmq-26.2.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:aedd5dd8692635813368e558a05266b995d3d020b23e49581ddd5bbe197a8ab6"},
{file = "pyzmq-26.2.0-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:8be4700cd8bb02cc454f630dcdf7cfa99de96788b80c51b60fe2fe1dac480289"},
{file = "pyzmq-26.2.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fcc03fa4997c447dce58264e93b5aa2d57714fbe0f06c07b7785ae131512732"},
{file = "pyzmq-26.2.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:402b190912935d3db15b03e8f7485812db350d271b284ded2b80d2e5704be780"},
{file = "pyzmq-26.2.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:8685fa9c25ff00f550c1fec650430c4b71e4e48e8d852f7ddcf2e48308038640"},
{file = "pyzmq-26.2.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:76589c020680778f06b7e0b193f4b6dd66d470234a16e1df90329f5e14a171cd"},
{file = "pyzmq-26.2.0-cp38-cp38-win32.whl", hash = "sha256:8423c1877d72c041f2c263b1ec6e34360448decfb323fa8b94e85883043ef988"},
{file = "pyzmq-26.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:76589f2cd6b77b5bdea4fca5992dc1c23389d68b18ccc26a53680ba2dc80ff2f"},
{file = "pyzmq-26.2.0-cp39-cp39-macosx_10_15_universal2.whl", hash = "sha256:b1d464cb8d72bfc1a3adc53305a63a8e0cac6bc8c5a07e8ca190ab8d3faa43c2"},
{file = "pyzmq-26.2.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4da04c48873a6abdd71811c5e163bd656ee1b957971db7f35140a2d573f6949c"},
{file = "pyzmq-26.2.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:d049df610ac811dcffdc147153b414147428567fbbc8be43bb8885f04db39d98"},
{file = "pyzmq-26.2.0-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:05590cdbc6b902101d0e65d6a4780af14dc22914cc6ab995d99b85af45362cc9"},
{file = "pyzmq-26.2.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c811cfcd6a9bf680236c40c6f617187515269ab2912f3d7e8c0174898e2519db"},
{file = "pyzmq-26.2.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:6835dd60355593de10350394242b5757fbbd88b25287314316f266e24c61d073"},
{file = "pyzmq-26.2.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bc6bee759a6bddea5db78d7dcd609397449cb2d2d6587f48f3ca613b19410cfc"},
{file = "pyzmq-26.2.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c530e1eecd036ecc83c3407f77bb86feb79916d4a33d11394b8234f3bd35b940"},
{file = "pyzmq-26.2.0-cp39-cp39-win32.whl", hash = "sha256:367b4f689786fca726ef7a6c5ba606958b145b9340a5e4808132cc65759abd44"},
{file = "pyzmq-26.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:e6fa2e3e683f34aea77de8112f6483803c96a44fd726d7358b9888ae5bb394ec"},
{file = "pyzmq-26.2.0-cp39-cp39-win_arm64.whl", hash = "sha256:7445be39143a8aa4faec43b076e06944b8f9d0701b669df4af200531b21e40bb"},
{file = "pyzmq-26.2.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:706e794564bec25819d21a41c31d4df2d48e1cc4b061e8d345d7fb4dd3e94072"},
{file = "pyzmq-26.2.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b435f2753621cd36e7c1762156815e21c985c72b19135dac43a7f4f31d28dd1"},
{file = "pyzmq-26.2.0-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:160c7e0a5eb178011e72892f99f918c04a131f36056d10d9c1afb223fc952c2d"},
{file = "pyzmq-26.2.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2c4a71d5d6e7b28a47a394c0471b7e77a0661e2d651e7ae91e0cab0a587859ca"},
{file = "pyzmq-26.2.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:90412f2db8c02a3864cbfc67db0e3dcdbda336acf1c469526d3e869394fe001c"},
{file = "pyzmq-26.2.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:2ea4ad4e6a12e454de05f2949d4beddb52460f3de7c8b9d5c46fbb7d7222e02c"},
{file = "pyzmq-26.2.0-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:fc4f7a173a5609631bb0c42c23d12c49df3966f89f496a51d3eb0ec81f4519d6"},
{file = "pyzmq-26.2.0-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:878206a45202247781472a2d99df12a176fef806ca175799e1c6ad263510d57c"},
{file = "pyzmq-26.2.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:17c412bad2eb9468e876f556eb4ee910e62d721d2c7a53c7fa31e643d35352e6"},
{file = "pyzmq-26.2.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:0d987a3ae5a71c6226b203cfd298720e0086c7fe7c74f35fa8edddfbd6597eed"},
{file = "pyzmq-26.2.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:39887ac397ff35b7b775db7201095fc6310a35fdbae85bac4523f7eb3b840e20"},
{file = "pyzmq-26.2.0-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:fdb5b3e311d4d4b0eb8b3e8b4d1b0a512713ad7e6a68791d0923d1aec433d919"},
{file = "pyzmq-26.2.0-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:226af7dcb51fdb0109f0016449b357e182ea0ceb6b47dfb5999d569e5db161d5"},
{file = "pyzmq-26.2.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0bed0e799e6120b9c32756203fb9dfe8ca2fb8467fed830c34c877e25638c3fc"},
{file = "pyzmq-26.2.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:29c7947c594e105cb9e6c466bace8532dc1ca02d498684128b339799f5248277"},
{file = "pyzmq-26.2.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:cdeabcff45d1c219636ee2e54d852262e5c2e085d6cb476d938aee8d921356b3"},
{file = "pyzmq-26.2.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:35cffef589bcdc587d06f9149f8d5e9e8859920a071df5a2671de2213bef592a"},
{file = "pyzmq-26.2.0-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:18c8dc3b7468d8b4bdf60ce9d7141897da103c7a4690157b32b60acb45e333e6"},
{file = "pyzmq-26.2.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7133d0a1677aec369d67dd78520d3fa96dd7f3dcec99d66c1762870e5ea1a50a"},
{file = "pyzmq-26.2.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:6a96179a24b14fa6428cbfc08641c779a53f8fcec43644030328f44034c7f1f4"},
{file = "pyzmq-26.2.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:4f78c88905461a9203eac9faac157a2a0dbba84a0fd09fd29315db27be40af9f"},
{file = "pyzmq-26.2.0.tar.gz", hash = "sha256:070672c258581c8e4f640b5159297580a9974b026043bd4ab0470be9ed324f1f"},
]
[package.dependencies]
cffi = {version = "*", markers = "implementation_name == \"pypy\""}
[[package]]
name = "referencing"
version = "0.35.1"
description = "JSON Referencing + Python"
optional = false
python-versions = ">=3.8"
files = [
{file = "referencing-0.35.1-py3-none-any.whl", hash = "sha256:eda6d3234d62814d1c64e305c1331c9a3a6132da475ab6382eaa997b21ee75de"},
{file = "referencing-0.35.1.tar.gz", hash = "sha256:25b42124a6c8b632a425174f24087783efb348a6f1e0008e63cd4466fedf703c"},
]
[package.dependencies]
attrs = ">=22.2.0"
rpds-py = ">=0.7.0"
[[package]]
name = "requests"
version = "2.32.3"
description = "Python HTTP for Humans."
optional = false
python-versions = ">=3.8"
files = [
{file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"},
{file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"},
]
[package.dependencies]
certifi = ">=2017.4.17"
charset-normalizer = ">=2,<4"
idna = ">=2.5,<4"
urllib3 = ">=1.21.1,<3"
[package.extras]
socks = ["PySocks (>=1.5.6,!=1.5.7)"]
use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"]
[[package]]
name = "requests-toolbelt"
version = "1.0.0"
description = "A utility belt for advanced users of python-requests"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
files = [
{file = "requests-toolbelt-1.0.0.tar.gz", hash = "sha256:7681a0a3d047012b5bdc0ee37d7f8f07ebe76ab08caeccfc3921ce23c88d5bc6"},
{file = "requests_toolbelt-1.0.0-py2.py3-none-any.whl", hash = "sha256:cccfdd665f0a24fcf4726e690f65639d272bb0637b9b92dfd91a5568ccf6bd06"},
]
[package.dependencies]
requests = ">=2.0.1,<3.0.0"
[[package]]
name = "rfc3339-validator"
version = "0.1.4"
description = "A pure python RFC3339 validator"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
files = [
{file = "rfc3339_validator-0.1.4-py2.py3-none-any.whl", hash = "sha256:24f6ec1eda14ef823da9e36ec7113124b39c04d50a4d3d3a3c2859577e7791fa"},
{file = "rfc3339_validator-0.1.4.tar.gz", hash = "sha256:138a2abdf93304ad60530167e51d2dfb9549521a836871b88d7f4695d0022f6b"},
]
[package.dependencies]
six = "*"
[[package]]
name = "rfc3986-validator"
version = "0.1.1"
description = "Pure python rfc3986 validator"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
files = [
{file = "rfc3986_validator-0.1.1-py2.py3-none-any.whl", hash = "sha256:2f235c432ef459970b4306369336b9d5dbdda31b510ca1e327636e01f528bfa9"},
{file = "rfc3986_validator-0.1.1.tar.gz", hash = "sha256:3d44bde7921b3b9ec3ae4e3adca370438eccebc676456449b145d533b240d055"},
]
[[package]]
name = "rpds-py"
version = "0.20.1"
description = "Python bindings to Rust's persistent data structures (rpds)"
optional = false
python-versions = ">=3.8"
files = [
{file = "rpds_py-0.20.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:a649dfd735fff086e8a9d0503a9f0c7d01b7912a333c7ae77e1515c08c146dad"},
{file = "rpds_py-0.20.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f16bc1334853e91ddaaa1217045dd7be166170beec337576818461268a3de67f"},
{file = "rpds_py-0.20.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:14511a539afee6f9ab492b543060c7491c99924314977a55c98bfa2ee29ce78c"},
{file = "rpds_py-0.20.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3ccb8ac2d3c71cda472b75af42818981bdacf48d2e21c36331b50b4f16930163"},
{file = "rpds_py-0.20.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c142b88039b92e7e0cb2552e8967077e3179b22359e945574f5e2764c3953dcf"},
{file = "rpds_py-0.20.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f19169781dddae7478a32301b499b2858bc52fc45a112955e798ee307e294977"},
{file = "rpds_py-0.20.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:13c56de6518e14b9bf6edde23c4c39dac5b48dcf04160ea7bce8fca8397cdf86"},
{file = "rpds_py-0.20.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:925d176a549f4832c6f69fa6026071294ab5910e82a0fe6c6228fce17b0706bd"},
{file = "rpds_py-0.20.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:78f0b6877bfce7a3d1ff150391354a410c55d3cdce386f862926a4958ad5ab7e"},
{file = "rpds_py-0.20.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:3dd645e2b0dcb0fd05bf58e2e54c13875847687d0b71941ad2e757e5d89d4356"},
{file = "rpds_py-0.20.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:4f676e21db2f8c72ff0936f895271e7a700aa1f8d31b40e4e43442ba94973899"},
{file = "rpds_py-0.20.1-cp310-none-win32.whl", hash = "sha256:648386ddd1e19b4a6abab69139b002bc49ebf065b596119f8f37c38e9ecee8ff"},
{file = "rpds_py-0.20.1-cp310-none-win_amd64.whl", hash = "sha256:d9ecb51120de61e4604650666d1f2b68444d46ae18fd492245a08f53ad2b7711"},
{file = "rpds_py-0.20.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:762703bdd2b30983c1d9e62b4c88664df4a8a4d5ec0e9253b0231171f18f6d75"},
{file = "rpds_py-0.20.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:0b581f47257a9fce535c4567782a8976002d6b8afa2c39ff616edf87cbeff712"},
{file = "rpds_py-0.20.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:842c19a6ce894493563c3bd00d81d5100e8e57d70209e84d5491940fdb8b9e3a"},
{file = "rpds_py-0.20.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42cbde7789f5c0bcd6816cb29808e36c01b960fb5d29f11e052215aa85497c93"},
{file = "rpds_py-0.20.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6c8e9340ce5a52f95fa7d3b552b35c7e8f3874d74a03a8a69279fd5fca5dc751"},
{file = "rpds_py-0.20.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ba6f89cac95c0900d932c9efb7f0fb6ca47f6687feec41abcb1bd5e2bd45535"},
{file = "rpds_py-0.20.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a916087371afd9648e1962e67403c53f9c49ca47b9680adbeef79da3a7811b0"},
{file = "rpds_py-0.20.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:200a23239781f46149e6a415f1e870c5ef1e712939fe8fa63035cd053ac2638e"},
{file = "rpds_py-0.20.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:58b1d5dd591973d426cbb2da5e27ba0339209832b2f3315928c9790e13f159e8"},
{file = "rpds_py-0.20.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:6b73c67850ca7cae0f6c56f71e356d7e9fa25958d3e18a64927c2d930859b8e4"},
{file = "rpds_py-0.20.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:d8761c3c891cc51e90bc9926d6d2f59b27beaf86c74622c8979380a29cc23ac3"},
{file = "rpds_py-0.20.1-cp311-none-win32.whl", hash = "sha256:cd945871335a639275eee904caef90041568ce3b42f402c6959b460d25ae8732"},
{file = "rpds_py-0.20.1-cp311-none-win_amd64.whl", hash = "sha256:7e21b7031e17c6b0e445f42ccc77f79a97e2687023c5746bfb7a9e45e0921b84"},
{file = "rpds_py-0.20.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:36785be22066966a27348444b40389f8444671630063edfb1a2eb04318721e17"},
{file = "rpds_py-0.20.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:142c0a5124d9bd0e2976089484af5c74f47bd3298f2ed651ef54ea728d2ea42c"},
{file = "rpds_py-0.20.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dbddc10776ca7ebf2a299c41a4dde8ea0d8e3547bfd731cb87af2e8f5bf8962d"},
{file = "rpds_py-0.20.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:15a842bb369e00295392e7ce192de9dcbf136954614124a667f9f9f17d6a216f"},
{file = "rpds_py-0.20.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:be5ef2f1fc586a7372bfc355986226484e06d1dc4f9402539872c8bb99e34b01"},
{file = "rpds_py-0.20.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dbcf360c9e3399b056a238523146ea77eeb2a596ce263b8814c900263e46031a"},
{file = "rpds_py-0.20.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ecd27a66740ffd621d20b9a2f2b5ee4129a56e27bfb9458a3bcc2e45794c96cb"},
{file = "rpds_py-0.20.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d0b937b2a1988f184a3e9e577adaa8aede21ec0b38320d6009e02bd026db04fa"},
{file = "rpds_py-0.20.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6889469bfdc1eddf489729b471303739bf04555bb151fe8875931f8564309afc"},
{file = "rpds_py-0.20.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:19b73643c802f4eaf13d97f7855d0fb527fbc92ab7013c4ad0e13a6ae0ed23bd"},
{file = "rpds_py-0.20.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3c6afcf2338e7f374e8edc765c79fbcb4061d02b15dd5f8f314a4af2bdc7feb5"},
{file = "rpds_py-0.20.1-cp312-none-win32.whl", hash = "sha256:dc73505153798c6f74854aba69cc75953888cf9866465196889c7cdd351e720c"},
{file = "rpds_py-0.20.1-cp312-none-win_amd64.whl", hash = "sha256:8bbe951244a838a51289ee53a6bae3a07f26d4e179b96fc7ddd3301caf0518eb"},
{file = "rpds_py-0.20.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:6ca91093a4a8da4afae7fe6a222c3b53ee4eef433ebfee4d54978a103435159e"},
{file = "rpds_py-0.20.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:b9c2fe36d1f758b28121bef29ed1dee9b7a2453e997528e7d1ac99b94892527c"},
{file = "rpds_py-0.20.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f009c69bc8c53db5dfab72ac760895dc1f2bc1b62ab7408b253c8d1ec52459fc"},
{file = "rpds_py-0.20.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6740a3e8d43a32629bb9b009017ea5b9e713b7210ba48ac8d4cb6d99d86c8ee8"},
{file = "rpds_py-0.20.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:32b922e13d4c0080d03e7b62991ad7f5007d9cd74e239c4b16bc85ae8b70252d"},
{file = "rpds_py-0.20.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fe00a9057d100e69b4ae4a094203a708d65b0f345ed546fdef86498bf5390982"},
{file = "rpds_py-0.20.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49fe9b04b6fa685bd39237d45fad89ba19e9163a1ccaa16611a812e682913496"},
{file = "rpds_py-0.20.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:aa7ac11e294304e615b43f8c441fee5d40094275ed7311f3420d805fde9b07b4"},
{file = "rpds_py-0.20.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6aa97af1558a9bef4025f8f5d8c60d712e0a3b13a2fe875511defc6ee77a1ab7"},
{file = "rpds_py-0.20.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:483b29f6f7ffa6af845107d4efe2e3fa8fb2693de8657bc1849f674296ff6a5a"},
{file = "rpds_py-0.20.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:37fe0f12aebb6a0e3e17bb4cd356b1286d2d18d2e93b2d39fe647138458b4bcb"},
{file = "rpds_py-0.20.1-cp313-none-win32.whl", hash = "sha256:a624cc00ef2158e04188df5e3016385b9353638139a06fb77057b3498f794782"},
{file = "rpds_py-0.20.1-cp313-none-win_amd64.whl", hash = "sha256:b71b8666eeea69d6363248822078c075bac6ed135faa9216aa85f295ff009b1e"},
{file = "rpds_py-0.20.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:5b48e790e0355865197ad0aca8cde3d8ede347831e1959e158369eb3493d2191"},
{file = "rpds_py-0.20.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3e310838a5801795207c66c73ea903deda321e6146d6f282e85fa7e3e4854804"},
{file = "rpds_py-0.20.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2249280b870e6a42c0d972339e9cc22ee98730a99cd7f2f727549af80dd5a963"},
{file = "rpds_py-0.20.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e79059d67bea28b53d255c1437b25391653263f0e69cd7dec170d778fdbca95e"},
{file = "rpds_py-0.20.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2b431c777c9653e569986ecf69ff4a5dba281cded16043d348bf9ba505486f36"},
{file = "rpds_py-0.20.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:da584ff96ec95e97925174eb8237e32f626e7a1a97888cdd27ee2f1f24dd0ad8"},
{file = "rpds_py-0.20.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:02a0629ec053fc013808a85178524e3cb63a61dbc35b22499870194a63578fb9"},
{file = "rpds_py-0.20.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:fbf15aff64a163db29a91ed0868af181d6f68ec1a3a7d5afcfe4501252840bad"},
{file = "rpds_py-0.20.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:07924c1b938798797d60c6308fa8ad3b3f0201802f82e4a2c41bb3fafb44cc28"},
{file = "rpds_py-0.20.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:4a5a844f68776a7715ecb30843b453f07ac89bad393431efbf7accca3ef599c1"},
{file = "rpds_py-0.20.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:518d2ca43c358929bf08f9079b617f1c2ca6e8848f83c1225c88caeac46e6cbc"},
{file = "rpds_py-0.20.1-cp38-none-win32.whl", hash = "sha256:3aea7eed3e55119635a74bbeb80b35e776bafccb70d97e8ff838816c124539f1"},
{file = "rpds_py-0.20.1-cp38-none-win_amd64.whl", hash = "sha256:7dca7081e9a0c3b6490a145593f6fe3173a94197f2cb9891183ef75e9d64c425"},
{file = "rpds_py-0.20.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:b41b6321805c472f66990c2849e152aff7bc359eb92f781e3f606609eac877ad"},
{file = "rpds_py-0.20.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0a90c373ea2975519b58dece25853dbcb9779b05cc46b4819cb1917e3b3215b6"},
{file = "rpds_py-0.20.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:16d4477bcb9fbbd7b5b0e4a5d9b493e42026c0bf1f06f723a9353f5153e75d30"},
{file = "rpds_py-0.20.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:84b8382a90539910b53a6307f7c35697bc7e6ffb25d9c1d4e998a13e842a5e83"},
{file = "rpds_py-0.20.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4888e117dd41b9d34194d9e31631af70d3d526efc363085e3089ab1a62c32ed1"},
{file = "rpds_py-0.20.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5265505b3d61a0f56618c9b941dc54dc334dc6e660f1592d112cd103d914a6db"},
{file = "rpds_py-0.20.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e75ba609dba23f2c95b776efb9dd3f0b78a76a151e96f96cc5b6b1b0004de66f"},
{file = "rpds_py-0.20.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1791ff70bc975b098fe6ecf04356a10e9e2bd7dc21fa7351c1742fdeb9b4966f"},
{file = "rpds_py-0.20.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:d126b52e4a473d40232ec2052a8b232270ed1f8c9571aaf33f73a14cc298c24f"},
{file = "rpds_py-0.20.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:c14937af98c4cc362a1d4374806204dd51b1e12dded1ae30645c298e5a5c4cb1"},
{file = "rpds_py-0.20.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:3d089d0b88996df627693639d123c8158cff41c0651f646cd8fd292c7da90eaf"},
{file = "rpds_py-0.20.1-cp39-none-win32.whl", hash = "sha256:653647b8838cf83b2e7e6a0364f49af96deec64d2a6578324db58380cff82aca"},
{file = "rpds_py-0.20.1-cp39-none-win_amd64.whl", hash = "sha256:fa41a64ac5b08b292906e248549ab48b69c5428f3987b09689ab2441f267d04d"},
{file = "rpds_py-0.20.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:7a07ced2b22f0cf0b55a6a510078174c31b6d8544f3bc00c2bcee52b3d613f74"},
{file = "rpds_py-0.20.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:68cb0a499f2c4a088fd2f521453e22ed3527154136a855c62e148b7883b99f9a"},
{file = "rpds_py-0.20.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fa3060d885657abc549b2a0f8e1b79699290e5d83845141717c6c90c2df38311"},
{file = "rpds_py-0.20.1-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:95f3b65d2392e1c5cec27cff08fdc0080270d5a1a4b2ea1d51d5f4a2620ff08d"},
{file = "rpds_py-0.20.1-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2cc3712a4b0b76a1d45a9302dd2f53ff339614b1c29603a911318f2357b04dd2"},
{file = "rpds_py-0.20.1-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d4eea0761e37485c9b81400437adb11c40e13ef513375bbd6973e34100aeb06"},
{file = "rpds_py-0.20.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f5179583d7a6cdb981151dd349786cbc318bab54963a192692d945dd3f6435d"},
{file = "rpds_py-0.20.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2fbb0ffc754490aff6dabbf28064be47f0f9ca0b9755976f945214965b3ace7e"},
{file = "rpds_py-0.20.1-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:a94e52537a0e0a85429eda9e49f272ada715506d3b2431f64b8a3e34eb5f3e75"},
{file = "rpds_py-0.20.1-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:92b68b79c0da2a980b1c4197e56ac3dd0c8a149b4603747c4378914a68706979"},
{file = "rpds_py-0.20.1-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:93da1d3db08a827eda74356f9f58884adb254e59b6664f64cc04cdff2cc19b0d"},
{file = "rpds_py-0.20.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:754bbed1a4ca48479e9d4182a561d001bbf81543876cdded6f695ec3d465846b"},
{file = "rpds_py-0.20.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:ca449520e7484534a2a44faf629362cae62b660601432d04c482283c47eaebab"},
{file = "rpds_py-0.20.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:9c4cb04a16b0f199a8c9bf807269b2f63b7b5b11425e4a6bd44bd6961d28282c"},
{file = "rpds_py-0.20.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb63804105143c7e24cee7db89e37cb3f3941f8e80c4379a0b355c52a52b6780"},
{file = "rpds_py-0.20.1-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:55cd1fa4ecfa6d9f14fbd97ac24803e6f73e897c738f771a9fe038f2f11ff07c"},
{file = "rpds_py-0.20.1-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0f8f741b6292c86059ed175d80eefa80997125b7c478fb8769fd9ac8943a16c0"},
{file = "rpds_py-0.20.1-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0fc212779bf8411667234b3cdd34d53de6c2b8b8b958e1e12cb473a5f367c338"},
{file = "rpds_py-0.20.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ad56edabcdb428c2e33bbf24f255fe2b43253b7d13a2cdbf05de955217313e6"},
{file = "rpds_py-0.20.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0a3a1e9ee9728b2c1734f65d6a1d376c6f2f6fdcc13bb007a08cc4b1ff576dc5"},
{file = "rpds_py-0.20.1-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:e13de156137b7095442b288e72f33503a469aa1980ed856b43c353ac86390519"},
{file = "rpds_py-0.20.1-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:07f59760ef99f31422c49038964b31c4dfcfeb5d2384ebfc71058a7c9adae2d2"},
{file = "rpds_py-0.20.1-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:59240685e7da61fb78f65a9f07f8108e36a83317c53f7b276b4175dc44151684"},
{file = "rpds_py-0.20.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:83cba698cfb3c2c5a7c3c6bac12fe6c6a51aae69513726be6411076185a8b24a"},
{file = "rpds_py-0.20.1.tar.gz", hash = "sha256:e1791c4aabd117653530dccd24108fa03cc6baf21f58b950d0a73c3b3b29a350"},
]
[[package]]
name = "ruff"
version = "0.6.9"
description = "An extremely fast Python linter and code formatter, written in Rust."
optional = false
python-versions = ">=3.7"
files = [
{file = "ruff-0.6.9-py3-none-linux_armv6l.whl", hash = "sha256:064df58d84ccc0ac0fcd63bc3090b251d90e2a372558c0f057c3f75ed73e1ccd"},
{file = "ruff-0.6.9-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:140d4b5c9f5fc7a7b074908a78ab8d384dd7f6510402267bc76c37195c02a7ec"},
{file = "ruff-0.6.9-py3-none-macosx_11_0_arm64.whl", hash = "sha256:53fd8ca5e82bdee8da7f506d7b03a261f24cd43d090ea9db9a1dc59d9313914c"},
{file = "ruff-0.6.9-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:645d7d8761f915e48a00d4ecc3686969761df69fb561dd914a773c1a8266e14e"},
{file = "ruff-0.6.9-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eae02b700763e3847595b9d2891488989cac00214da7f845f4bcf2989007d577"},
{file = "ruff-0.6.9-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7d5ccc9e58112441de8ad4b29dcb7a86dc25c5f770e3c06a9d57e0e5eba48829"},
{file = "ruff-0.6.9-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:417b81aa1c9b60b2f8edc463c58363075412866ae4e2b9ab0f690dc1e87ac1b5"},
{file = "ruff-0.6.9-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3c866b631f5fbce896a74a6e4383407ba7507b815ccc52bcedabb6810fdb3ef7"},
{file = "ruff-0.6.9-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7b118afbb3202f5911486ad52da86d1d52305b59e7ef2031cea3425142b97d6f"},
{file = "ruff-0.6.9-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a67267654edc23c97335586774790cde402fb6bbdb3c2314f1fc087dee320bfa"},
{file = "ruff-0.6.9-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:3ef0cc774b00fec123f635ce5c547dac263f6ee9fb9cc83437c5904183b55ceb"},
{file = "ruff-0.6.9-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:12edd2af0c60fa61ff31cefb90aef4288ac4d372b4962c2864aeea3a1a2460c0"},
{file = "ruff-0.6.9-py3-none-musllinux_1_2_i686.whl", hash = "sha256:55bb01caeaf3a60b2b2bba07308a02fca6ab56233302406ed5245180a05c5625"},
{file = "ruff-0.6.9-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:925d26471fa24b0ce5a6cdfab1bb526fb4159952385f386bdcc643813d472039"},
{file = "ruff-0.6.9-py3-none-win32.whl", hash = "sha256:eb61ec9bdb2506cffd492e05ac40e5bc6284873aceb605503d8494180d6fc84d"},
{file = "ruff-0.6.9-py3-none-win_amd64.whl", hash = "sha256:785d31851c1ae91f45b3d8fe23b8ae4b5170089021fbb42402d811135f0b7117"},
{file = "ruff-0.6.9-py3-none-win_arm64.whl", hash = "sha256:a9641e31476d601f83cd602608739a0840e348bda93fec9f1ee816f8b6798b93"},
{file = "ruff-0.6.9.tar.gz", hash = "sha256:b076ef717a8e5bc819514ee1d602bbdca5b4420ae13a9cf61a0c0a4f53a2baa2"},
]
[[package]]
name = "send2trash"
version = "1.8.3"
description = "Send file to trash natively under Mac OS X, Windows and Linux"
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7"
files = [
{file = "Send2Trash-1.8.3-py3-none-any.whl", hash = "sha256:0c31227e0bd08961c7665474a3d1ef7193929fedda4233843689baa056be46c9"},
{file = "Send2Trash-1.8.3.tar.gz", hash = "sha256:b18e7a3966d99871aefeb00cfbcfdced55ce4871194810fc71f4aa484b953abf"},
]
[package.extras]
nativelib = ["pyobjc-framework-Cocoa", "pywin32"]
objc = ["pyobjc-framework-Cocoa"]
win32 = ["pywin32"]
[[package]]
name = "setuptools"
version = "75.3.0"
description = "Easily download, build, install, upgrade, and uninstall Python packages"
optional = false
python-versions = ">=3.8"
files = [
{file = "setuptools-75.3.0-py3-none-any.whl", hash = "sha256:f2504966861356aa38616760c0f66568e535562374995367b4e69c7143cf6bcd"},
{file = "setuptools-75.3.0.tar.gz", hash = "sha256:fba5dd4d766e97be1b1681d98712680ae8f2f26d7881245f2ce9e40714f1a686"},
]
[package.extras]
check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)", "ruff (>=0.5.2)"]
core = ["importlib-metadata (>=6)", "importlib-resources (>=5.10.2)", "jaraco.collections", "jaraco.functools", "jaraco.text (>=3.7)", "more-itertools", "more-itertools (>=8.8)", "packaging", "packaging (>=24)", "platformdirs (>=4.2.2)", "tomli (>=2.0.1)", "wheel (>=0.43.0)"]
cover = ["pytest-cov"]
doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier", "towncrier (<24.7)"]
enabler = ["pytest-enabler (>=2.2)"]
test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "jaraco.test (>=5.5)", "packaging (>=23.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-home (>=0.5)", "pytest-perf", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel (>=0.44.0)"]
type = ["importlib-metadata (>=7.0.2)", "jaraco.develop (>=7.21)", "mypy (==1.12.*)", "pytest-mypy"]
[[package]]
name = "six"
version = "1.16.0"
description = "Python 2 and 3 compatibility utilities"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*"
files = [
{file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"},
{file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"},
]
[[package]]
name = "sniffio"
version = "1.3.1"
description = "Sniff out which async library your code is running under"
optional = false
python-versions = ">=3.7"
files = [
{file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"},
{file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"},
]
[[package]]
name = "soupsieve"
version = "2.6"
description = "A modern CSS selector implementation for Beautiful Soup."
optional = false
python-versions = ">=3.8"
files = [
{file = "soupsieve-2.6-py3-none-any.whl", hash = "sha256:e72c4ff06e4fb6e4b5a9f0f55fe6e81514581fca1515028625d0f299c602ccc9"},
{file = "soupsieve-2.6.tar.gz", hash = "sha256:e2e68417777af359ec65daac1057404a3c8a5455bb8abc36f1a9866ab1a51abb"},
]
[[package]]
name = "stack-data"
version = "0.6.3"
description = "Extract data from python stack frames and tracebacks for informative displays"
optional = false
python-versions = "*"
files = [
{file = "stack_data-0.6.3-py3-none-any.whl", hash = "sha256:d5558e0c25a4cb0853cddad3d77da9891a08cb85dd9f9f91b9f8cd66e511e695"},
{file = "stack_data-0.6.3.tar.gz", hash = "sha256:836a778de4fec4dcd1dcd89ed8abff8a221f58308462e1c4aa2a3cf30148f0b9"},
]
[package.dependencies]
asttokens = ">=2.1.0"
executing = ">=1.2.0"
pure-eval = "*"
[package.extras]
tests = ["cython", "littleutils", "pygments", "pytest", "typeguard"]
[[package]]
name = "syrupy"
version = "4.7.2"
description = "Pytest Snapshot Test Utility"
optional = false
python-versions = ">=3.8.1"
files = [
{file = "syrupy-4.7.2-py3-none-any.whl", hash = "sha256:eae7ba6be5aed190237caa93be288e97ca1eec5ca58760e4818972a10c4acc64"},
{file = "syrupy-4.7.2.tar.gz", hash = "sha256:ea45e099f242de1bb53018c238f408a5bb6c82007bc687aefcbeaa0e1c2e935a"},
]
[package.dependencies]
pytest = ">=7.0.0,<9.0.0"
[[package]]
name = "tenacity"
version = "9.0.0"
description = "Retry code until it succeeds"
optional = false
python-versions = ">=3.8"
files = [
{file = "tenacity-9.0.0-py3-none-any.whl", hash = "sha256:93de0c98785b27fcf659856aa9f54bfbd399e29969b0621bc7f762bd441b4539"},
{file = "tenacity-9.0.0.tar.gz", hash = "sha256:807f37ca97d62aa361264d497b0e31e92b8027044942bfa756160d908320d73b"},
]
[package.extras]
doc = ["reno", "sphinx"]
test = ["pytest", "tornado (>=4.5)", "typeguard"]
[[package]]
name = "terminado"
version = "0.18.1"
description = "Tornado websocket backend for the Xterm.js Javascript terminal emulator library."
optional = false
python-versions = ">=3.8"
files = [
{file = "terminado-0.18.1-py3-none-any.whl", hash = "sha256:a4468e1b37bb318f8a86514f65814e1afc977cf29b3992a4500d9dd305dcceb0"},
{file = "terminado-0.18.1.tar.gz", hash = "sha256:de09f2c4b85de4765f7714688fff57d3e75bad1f909b589fde880460c753fd2e"},
]
[package.dependencies]
ptyprocess = {version = "*", markers = "os_name != \"nt\""}
pywinpty = {version = ">=1.1.0", markers = "os_name == \"nt\""}
tornado = ">=6.1.0"
[package.extras]
docs = ["myst-parser", "pydata-sphinx-theme", "sphinx"]
test = ["pre-commit", "pytest (>=7.0)", "pytest-timeout"]
typing = ["mypy (>=1.6,<2.0)", "traitlets (>=5.11.1)"]
[[package]]
name = "tinycss2"
version = "1.4.0"
description = "A tiny CSS parser"
optional = false
python-versions = ">=3.8"
files = [
{file = "tinycss2-1.4.0-py3-none-any.whl", hash = "sha256:3a49cf47b7675da0b15d0c6e1df8df4ebd96e9394bb905a5775adb0d884c5289"},
{file = "tinycss2-1.4.0.tar.gz", hash = "sha256:10c0972f6fc0fbee87c3edb76549357415e94548c1ae10ebccdea16fb404a9b7"},
]
[package.dependencies]
webencodings = ">=0.4"
[package.extras]
doc = ["sphinx", "sphinx_rtd_theme"]
test = ["pytest", "ruff"]
[[package]]
name = "tomli"
version = "2.0.2"
description = "A lil' TOML parser"
optional = false
python-versions = ">=3.8"
files = [
{file = "tomli-2.0.2-py3-none-any.whl", hash = "sha256:2ebe24485c53d303f690b0ec092806a085f07af5a5aa1464f3931eec36caaa38"},
{file = "tomli-2.0.2.tar.gz", hash = "sha256:d46d457a85337051c36524bc5349dd91b1877838e2979ac5ced3e710ed8a60ed"},
]
[[package]]
name = "tornado"
version = "6.4.1"
description = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed."
optional = false
python-versions = ">=3.8"
files = [
{file = "tornado-6.4.1-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:163b0aafc8e23d8cdc3c9dfb24c5368af84a81e3364745ccb4427669bf84aec8"},
{file = "tornado-6.4.1-cp38-abi3-macosx_10_9_x86_64.whl", hash = "sha256:6d5ce3437e18a2b66fbadb183c1d3364fb03f2be71299e7d10dbeeb69f4b2a14"},
{file = "tornado-6.4.1-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e2e20b9113cd7293f164dc46fffb13535266e713cdb87bd2d15ddb336e96cfc4"},
{file = "tornado-6.4.1-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8ae50a504a740365267b2a8d1a90c9fbc86b780a39170feca9bcc1787ff80842"},
{file = "tornado-6.4.1-cp38-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:613bf4ddf5c7a95509218b149b555621497a6cc0d46ac341b30bd9ec19eac7f3"},
{file = "tornado-6.4.1-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:25486eb223babe3eed4b8aecbac33b37e3dd6d776bc730ca14e1bf93888b979f"},
{file = "tornado-6.4.1-cp38-abi3-musllinux_1_2_i686.whl", hash = "sha256:454db8a7ecfcf2ff6042dde58404164d969b6f5d58b926da15e6b23817950fc4"},
{file = "tornado-6.4.1-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a02a08cc7a9314b006f653ce40483b9b3c12cda222d6a46d4ac63bb6c9057698"},
{file = "tornado-6.4.1-cp38-abi3-win32.whl", hash = "sha256:d9a566c40b89757c9aa8e6f032bcdb8ca8795d7c1a9762910c722b1635c9de4d"},
{file = "tornado-6.4.1-cp38-abi3-win_amd64.whl", hash = "sha256:b24b8982ed444378d7f21d563f4180a2de31ced9d8d84443907a0a64da2072e7"},
{file = "tornado-6.4.1.tar.gz", hash = "sha256:92d3ab53183d8c50f8204a51e6f91d18a15d5ef261e84d452800d4ff6fc504e9"},
]
[[package]]
name = "traitlets"
version = "5.14.3"
description = "Traitlets Python configuration system"
optional = false
python-versions = ">=3.8"
files = [
{file = "traitlets-5.14.3-py3-none-any.whl", hash = "sha256:b74e89e397b1ed28cc831db7aea759ba6640cb3de13090ca145426688ff1ac4f"},
{file = "traitlets-5.14.3.tar.gz", hash = "sha256:9ed0579d3502c94b4b3732ac120375cda96f923114522847de4b3bb98b96b6b7"},
]
[package.extras]
docs = ["myst-parser", "pydata-sphinx-theme", "sphinx"]
test = ["argcomplete (>=3.0.3)", "mypy (>=1.7.0)", "pre-commit", "pytest (>=7.0,<8.2)", "pytest-mock", "pytest-mypy-testing"]
[[package]]
name = "types-python-dateutil"
version = "2.9.0.20241003"
description = "Typing stubs for python-dateutil"
optional = false
python-versions = ">=3.8"
files = [
{file = "types-python-dateutil-2.9.0.20241003.tar.gz", hash = "sha256:58cb85449b2a56d6684e41aeefb4c4280631246a0da1a719bdbe6f3fb0317446"},
{file = "types_python_dateutil-2.9.0.20241003-py3-none-any.whl", hash = "sha256:250e1d8e80e7bbc3a6c99b907762711d1a1cdd00e978ad39cb5940f6f0a87f3d"},
]
[[package]]
name = "types-requests"
version = "2.32.0.20241016"
description = "Typing stubs for requests"
optional = false
python-versions = ">=3.8"
files = [
{file = "types-requests-2.32.0.20241016.tar.gz", hash = "sha256:0d9cad2f27515d0e3e3da7134a1b6f28fb97129d86b867f24d9c726452634d95"},
{file = "types_requests-2.32.0.20241016-py3-none-any.whl", hash = "sha256:4195d62d6d3e043a4eaaf08ff8a62184584d2e8684e9d2aa178c7915a7da3747"},
]
[package.dependencies]
urllib3 = ">=2"
[[package]]
name = "typing-extensions"
version = "4.12.2"
description = "Backported and Experimental Type Hints for Python 3.8+"
optional = false
python-versions = ">=3.8"
files = [
{file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"},
{file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"},
]
[[package]]
name = "tzdata"
version = "2024.2"
description = "Provider of IANA time zone data"
optional = false
python-versions = ">=2"
files = [
{file = "tzdata-2024.2-py2.py3-none-any.whl", hash = "sha256:a48093786cdcde33cad18c2555e8532f34422074448fbc874186f0abd79565cd"},
{file = "tzdata-2024.2.tar.gz", hash = "sha256:7d85cc416e9382e69095b7bdf4afd9e3880418a2413feec7069d533d6b4e31cc"},
]
[[package]]
name = "uri-template"
version = "1.3.0"
description = "RFC 6570 URI Template Processor"
optional = false
python-versions = ">=3.7"
files = [
{file = "uri-template-1.3.0.tar.gz", hash = "sha256:0e00f8eb65e18c7de20d595a14336e9f337ead580c70934141624b6d1ffdacc7"},
{file = "uri_template-1.3.0-py3-none-any.whl", hash = "sha256:a44a133ea12d44a0c0f06d7d42a52d71282e77e2f937d8abd5655b8d56fc1363"},
]
[package.extras]
dev = ["flake8", "flake8-annotations", "flake8-bandit", "flake8-bugbear", "flake8-commas", "flake8-comprehensions", "flake8-continuation", "flake8-datetimez", "flake8-docstrings", "flake8-import-order", "flake8-literal", "flake8-modern-annotations", "flake8-noqa", "flake8-pyproject", "flake8-requirements", "flake8-typechecking-import", "flake8-use-fstring", "mypy", "pep8-naming", "types-PyYAML"]
[[package]]
name = "urllib3"
version = "2.2.3"
description = "HTTP library with thread-safe connection pooling, file post, and more."
optional = false
python-versions = ">=3.8"
files = [
{file = "urllib3-2.2.3-py3-none-any.whl", hash = "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac"},
{file = "urllib3-2.2.3.tar.gz", hash = "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9"},
]
[package.extras]
brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"]
h2 = ["h2 (>=4,<5)"]
socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"]
zstd = ["zstandard (>=0.18.0)"]
[[package]]
name = "uvloop"
version = "0.21.0b1"
description = "Fast implementation of asyncio event loop on top of libuv"
optional = false
python-versions = ">=3.8.0"
files = [
{file = "uvloop-0.21.0b1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:b47c276e66f2a26b58eafd0745c788e7345c9445a9e4b7799dd7065445ca91bf"},
{file = "uvloop-0.21.0b1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5aec94e40549d8fd1b04dc50d1b4480d4e8e1ed61066798dade0b4ecd408e7ed"},
{file = "uvloop-0.21.0b1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e867c5ffde9ec8880253a484a33a961e5af40e26757eda67a34798aabe471af"},
{file = "uvloop-0.21.0b1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1299f155b8dbe3374d1db810cb994cf22a3fadf8c5a85032aa8f31e18745a9c6"},
{file = "uvloop-0.21.0b1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:2abfc1738c3fbb5a5552ea9fb34cca5cbdf73868caf78bdacdcd6ffbab438870"},
{file = "uvloop-0.21.0b1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b3ac2b88f32612f7c4d792b3ed9b63eed414a1e85e004881a6ff08031c4ecf6c"},
{file = "uvloop-0.21.0b1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:a156feb70518fd4d748209726463adf92d4dde895a137442956c66d6d3867fb8"},
{file = "uvloop-0.21.0b1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:115c90a7ef29375104b153e474c7fdf1c2bbd409f0c13ecaa823ed92b2c145e7"},
{file = "uvloop-0.21.0b1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79d0b7c1c1a98282ad3384bc4cf4f199431efa3f4e5eeda6785cb902703c9691"},
{file = "uvloop-0.21.0b1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:586c229730e74308763147195d908e7568c0769d05bafc132f4faaf655f6cffe"},
{file = "uvloop-0.21.0b1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:bcddc39a94971bb5b8c76f243a8b467f7b69674bd25531b85b4d25d5917dd52f"},
{file = "uvloop-0.21.0b1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6c0332893fa201a60c4db7d6d296b131eb91793a062cfc9845bdcdab9cc6c22a"},
{file = "uvloop-0.21.0b1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:ea815a3046d31e3a88c09c13d46956f9b872a6951dd7ddee02ac8e3aa642a2de"},
{file = "uvloop-0.21.0b1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:cb788e15561dab81f5c562fb2496926a1b8b47d8ff1986d9b37acfa98b37faa9"},
{file = "uvloop-0.21.0b1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0029380131aae418f4034520d853c85880d049eb1232214fda209a40a41c806c"},
{file = "uvloop-0.21.0b1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d692df286fe1df2659c2e26e1d4e582b02bf32847e675f7e6a770cc107ca4987"},
{file = "uvloop-0.21.0b1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:19641b992c05a47169cc655b7fbe4628dd5f29cafc910ce87dbd1702609d3bb1"},
{file = "uvloop-0.21.0b1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:61b1c1d32df0a1ed0c8dca000ed15bab59e008349787d1d21b2a9d21ac7e5c8a"},
{file = "uvloop-0.21.0b1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:51f9ce02856cec8c7346875e40068b58fdf9c1f5326dbdf342c751abbcff40df"},
{file = "uvloop-0.21.0b1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:7fbd38cf672c6477ccd5d034a6c25db7fdb7ef3964f82d209cde41c9a2dfe09b"},
{file = "uvloop-0.21.0b1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1d2c4ae86218435cd76cb2f556433281923e15c22417d4ecb2f464325ed0dde3"},
{file = "uvloop-0.21.0b1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ea6c55bbbdbf6cb7bc3693aa52d93c5efb4ded5be903b7faf0eb08e57f8dbfd5"},
{file = "uvloop-0.21.0b1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c5038ebc2f436398a153926db21d235ce75b050450af6bad17faee6336f6ef0b"},
{file = "uvloop-0.21.0b1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6af42e66212598a507879518f1fa8f13a489d52285e3715d1b4c91bcc70dd0ff"},
{file = "uvloop-0.21.0b1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:b83e50efae3199c94c18883356b5b964d02eb5c2ddb775596c92ee0715c0fc79"},
{file = "uvloop-0.21.0b1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:124410218ddbcc5eb4c2651b8f90b2cf2cc7d9f5da77e426d57ed44eb49a8919"},
{file = "uvloop-0.21.0b1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:349557a2bf1cf800ff73f95bd812124a7f35c4a7bdfa62bcffa1c5a30604023a"},
{file = "uvloop-0.21.0b1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d896b0ef27d2a568b294172fe32f33102e19b4183d9cbc5bd3296c1674704f5"},
{file = "uvloop-0.21.0b1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:84ddb89cacfefdc6226b87991cbc13bea193d2a0d9185d13108b0ef560dffc7c"},
{file = "uvloop-0.21.0b1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:d363718fe643b222b4d4a05e19a529c513565672801cb4527131f95f9bd937ea"},
{file = "uvloop-0.21.0b1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:44d50ad4d7bfde8d28825bdaf851a08a519c61c1cfbc4ed630bb6e67ccf12d72"},
{file = "uvloop-0.21.0b1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c5478798cc80ca7c3f3463d02a5f038ab058a8cd4414a7f96afe6a35401bcc99"},
{file = "uvloop-0.21.0b1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7adf2f885a971c9ae9af885d1cfac0dfa015eaf0b5b9ac8d114d73027c807c88"},
{file = "uvloop-0.21.0b1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c6b5947c12128459a94398c303a1404064f69649f1cc6c1262ff6fbf2be6c47a"},
{file = "uvloop-0.21.0b1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:c9c887a6643238b45a8334a41a616c8c2ee7d69e2c8d804feeebdd50e8861808"},
{file = "uvloop-0.21.0b1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1bdb1603f6178f47fdc2ef260a352840794d1cc65d7340d7de61646b9c26000a"},
{file = "uvloop-0.21.0b1.tar.gz", hash = "sha256:5e12901bd67c5ba374741fc497adc44de14854895c416cd0672b2e5b676ca23c"},
]
[package.extras]
dev = ["Cython (>=3.0,<4.0)", "setuptools (>=60)"]
docs = ["Sphinx (>=4.1.2,<4.2.0)", "sphinx-rtd-theme (>=0.5.2,<0.6.0)", "sphinxcontrib-asyncio (>=0.3.0,<0.4.0)"]
test = ["aiohttp (>=3.10.5)", "flake8 (>=5.0,<6.0)", "mypy (>=0.800)", "psutil", "pyOpenSSL (>=23.0.0,<23.1.0)", "pycodestyle (>=2.9.0,<2.10.0)"]
[[package]]
name = "watchdog"
version = "5.0.3"
description = "Filesystem events monitoring"
optional = false
python-versions = ">=3.9"
files = [
{file = "watchdog-5.0.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:85527b882f3facda0579bce9d743ff7f10c3e1e0db0a0d0e28170a7d0e5ce2ea"},
{file = "watchdog-5.0.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:53adf73dcdc0ef04f7735066b4a57a4cd3e49ef135daae41d77395f0b5b692cb"},
{file = "watchdog-5.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e25adddab85f674acac303cf1f5835951345a56c5f7f582987d266679979c75b"},
{file = "watchdog-5.0.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:f01f4a3565a387080dc49bdd1fefe4ecc77f894991b88ef927edbfa45eb10818"},
{file = "watchdog-5.0.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:91b522adc25614cdeaf91f7897800b82c13b4b8ac68a42ca959f992f6990c490"},
{file = "watchdog-5.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d52db5beb5e476e6853da2e2d24dbbbed6797b449c8bf7ea118a4ee0d2c9040e"},
{file = "watchdog-5.0.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:94d11b07c64f63f49876e0ab8042ae034674c8653bfcdaa8c4b32e71cfff87e8"},
{file = "watchdog-5.0.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:349c9488e1d85d0a58e8cb14222d2c51cbc801ce11ac3936ab4c3af986536926"},
{file = "watchdog-5.0.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:53a3f10b62c2d569e260f96e8d966463dec1a50fa4f1b22aec69e3f91025060e"},
{file = "watchdog-5.0.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:950f531ec6e03696a2414b6308f5c6ff9dab7821a768c9d5788b1314e9a46ca7"},
{file = "watchdog-5.0.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ae6deb336cba5d71476caa029ceb6e88047fc1dc74b62b7c4012639c0b563906"},
{file = "watchdog-5.0.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1021223c08ba8d2d38d71ec1704496471ffd7be42cfb26b87cd5059323a389a1"},
{file = "watchdog-5.0.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:752fb40efc7cc8d88ebc332b8f4bcbe2b5cc7e881bccfeb8e25054c00c994ee3"},
{file = "watchdog-5.0.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a2e8f3f955d68471fa37b0e3add18500790d129cc7efe89971b8a4cc6fdeb0b2"},
{file = "watchdog-5.0.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b8ca4d854adcf480bdfd80f46fdd6fb49f91dd020ae11c89b3a79e19454ec627"},
{file = "watchdog-5.0.3-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:90a67d7857adb1d985aca232cc9905dd5bc4803ed85cfcdcfcf707e52049eda7"},
{file = "watchdog-5.0.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:720ef9d3a4f9ca575a780af283c8fd3a0674b307651c1976714745090da5a9e8"},
{file = "watchdog-5.0.3-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:223160bb359281bb8e31c8f1068bf71a6b16a8ad3d9524ca6f523ac666bb6a1e"},
{file = "watchdog-5.0.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:560135542c91eaa74247a2e8430cf83c4342b29e8ad4f520ae14f0c8a19cfb5b"},
{file = "watchdog-5.0.3-py3-none-manylinux2014_aarch64.whl", hash = "sha256:dd021efa85970bd4824acacbb922066159d0f9e546389a4743d56919b6758b91"},
{file = "watchdog-5.0.3-py3-none-manylinux2014_armv7l.whl", hash = "sha256:78864cc8f23dbee55be34cc1494632a7ba30263951b5b2e8fc8286b95845f82c"},
{file = "watchdog-5.0.3-py3-none-manylinux2014_i686.whl", hash = "sha256:1e9679245e3ea6498494b3028b90c7b25dbb2abe65c7d07423ecfc2d6218ff7c"},
{file = "watchdog-5.0.3-py3-none-manylinux2014_ppc64.whl", hash = "sha256:9413384f26b5d050b6978e6fcd0c1e7f0539be7a4f1a885061473c5deaa57221"},
{file = "watchdog-5.0.3-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:294b7a598974b8e2c6123d19ef15de9abcd282b0fbbdbc4d23dfa812959a9e05"},
{file = "watchdog-5.0.3-py3-none-manylinux2014_s390x.whl", hash = "sha256:26dd201857d702bdf9d78c273cafcab5871dd29343748524695cecffa44a8d97"},
{file = "watchdog-5.0.3-py3-none-manylinux2014_x86_64.whl", hash = "sha256:0f9332243355643d567697c3e3fa07330a1d1abf981611654a1f2bf2175612b7"},
{file = "watchdog-5.0.3-py3-none-win32.whl", hash = "sha256:c66f80ee5b602a9c7ab66e3c9f36026590a0902db3aea414d59a2f55188c1f49"},
{file = "watchdog-5.0.3-py3-none-win_amd64.whl", hash = "sha256:f00b4cf737f568be9665563347a910f8bdc76f88c2970121c86243c8cfdf90e9"},
{file = "watchdog-5.0.3-py3-none-win_ia64.whl", hash = "sha256:49f4d36cb315c25ea0d946e018c01bb028048023b9e103d3d3943f58e109dd45"},
{file = "watchdog-5.0.3.tar.gz", hash = "sha256:108f42a7f0345042a854d4d0ad0834b741d421330d5f575b81cb27b883500176"},
]
[package.extras]
watchmedo = ["PyYAML (>=3.10)"]
[[package]]
name = "wcwidth"
version = "0.2.13"
description = "Measures the displayed width of unicode strings in a terminal"
optional = false
python-versions = "*"
files = [
{file = "wcwidth-0.2.13-py2.py3-none-any.whl", hash = "sha256:3da69048e4540d84af32131829ff948f1e022c1c6bdb8d6102117aac784f6859"},
{file = "wcwidth-0.2.13.tar.gz", hash = "sha256:72ea0c06399eb286d978fdedb6923a9eb47e1c486ce63e9b4e64fc18303972b5"},
]
[[package]]
name = "webcolors"
version = "24.8.0"
description = "A library for working with the color formats defined by HTML and CSS."
optional = false
python-versions = ">=3.8"
files = [
{file = "webcolors-24.8.0-py3-none-any.whl", hash = "sha256:fc4c3b59358ada164552084a8ebee637c221e4059267d0f8325b3b560f6c7f0a"},
{file = "webcolors-24.8.0.tar.gz", hash = "sha256:08b07af286a01bcd30d583a7acadf629583d1f79bfef27dd2c2c5c263817277d"},
]
[package.extras]
docs = ["furo", "sphinx", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-notfound-page", "sphinxext-opengraph"]
tests = ["coverage[toml]"]
[[package]]
name = "webencodings"
version = "0.5.1"
description = "Character encoding aliases for legacy web content"
optional = false
python-versions = "*"
files = [
{file = "webencodings-0.5.1-py2.py3-none-any.whl", hash = "sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78"},
{file = "webencodings-0.5.1.tar.gz", hash = "sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923"},
]
[[package]]
name = "websocket-client"
version = "1.8.0"
description = "WebSocket client for Python with low level API options"
optional = false
python-versions = ">=3.8"
files = [
{file = "websocket_client-1.8.0-py3-none-any.whl", hash = "sha256:17b44cc997f5c498e809b22cdf2d9c7a9e71c02c8cc2b6c56e7c2d1239bfa526"},
{file = "websocket_client-1.8.0.tar.gz", hash = "sha256:3239df9f44da632f96012472805d40a23281a991027ce11d2f45a6f24ac4c3da"},
]
[package.extras]
docs = ["Sphinx (>=6.0)", "myst-parser (>=2.0.0)", "sphinx-rtd-theme (>=1.1.0)"]
optional = ["python-socks", "wsaccel"]
test = ["websockets"]
[[package]]
name = "widgetsnbextension"
version = "4.0.13"
description = "Jupyter interactive widgets for Jupyter Notebook"
optional = false
python-versions = ">=3.7"
files = [
{file = "widgetsnbextension-4.0.13-py3-none-any.whl", hash = "sha256:74b2692e8500525cc38c2b877236ba51d34541e6385eeed5aec15a70f88a6c71"},
{file = "widgetsnbextension-4.0.13.tar.gz", hash = "sha256:ffcb67bc9febd10234a362795f643927f4e0c05d9342c727b65d2384f8feacb6"},
]
[[package]]
name = "zipp"
version = "3.20.2"
description = "Backport of pathlib-compatible object wrapper for zip files"
optional = false
python-versions = ">=3.8"
files = [
{file = "zipp-3.20.2-py3-none-any.whl", hash = "sha256:a817ac80d6cf4b23bf7f2828b7cabf326f15a001bea8b1f9b49631780ba28350"},
{file = "zipp-3.20.2.tar.gz", hash = "sha256:bc9eb26f4506fda01b81bcde0ca78103b6e62f991b381fec825435c836edbc29"},
]
[package.extras]
check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"]
cover = ["pytest-cov"]
doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
enabler = ["pytest-enabler (>=2.2)"]
test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-ignore-flaky"]
type = ["pytest-mypy"]
[metadata]
lock-version = "2.0"
python-versions = ">=3.9.0,<4.0"
content-hash = "2df4d5d5e61917bdfff0ba430067a17662666eedee2858d841fa02e594cf69d0"
|
0 | lc_public_repos/langgraph/libs | lc_public_repos/langgraph/libs/langgraph/README.md | # 🦜🕸️LangGraph

[](https://pepy.tech/project/langgraph)
[](https://github.com/langchain-ai/langgraph/issues)
[](https://langchain-ai.github.io/langgraph/)
⚡ Building language agents as graphs ⚡
> [!NOTE]
> Looking for the JS version? Click [here](https://github.com/langchain-ai/langgraphjs) ([JS docs](https://langchain-ai.github.io/langgraphjs/)).
## Overview
[LangGraph](https://langchain-ai.github.io/langgraph/) is a library for building stateful, multi-actor applications with LLMs, used to create agent and multi-agent workflows. Compared to other LLM frameworks, it offers these core benefits: cycles, controllability, and persistence. LangGraph allows you to define flows that involve cycles, essential for most agentic architectures, differentiating it from DAG-based solutions. As a very low-level framework, it provides fine-grained control over both the flow and state of your application, crucial for creating reliable agents. Additionally, LangGraph includes built-in persistence, enabling advanced human-in-the-loop and memory features.
LangGraph is inspired by [Pregel](https://research.google/pubs/pub37252/) and [Apache Beam](https://beam.apache.org/). The public interface draws inspiration from [NetworkX](https://networkx.org/documentation/latest/). LangGraph is built by LangChain Inc, the creators of LangChain, but can be used without LangChain.
[LangGraph Platform](https://langchain-ai.github.io/langgraph/concepts/langgraph_platform) is infrastructure for deploying LangGraph agents. It is a commercial solution for deploying agentic applications to production, built on the open-source LangGraph framework. The LangGraph Platform consists of several components that work together to support the development, deployment, debugging, and monitoring of LangGraph applications: [LangGraph Server](https://langchain-ai.github.io/langgraph/concepts/langgraph_server) (APIs), [LangGraph SDKs](https://langchain-ai.github.io/langgraph/concepts/sdk) (clients for the APIs), [LangGraph CLI](https://langchain-ai.github.io/langgraph/concepts/langgraph_cli) (command line tool for building the server), [LangGraph Studio](https://langchain-ai.github.io/langgraph/concepts/langgraph_studio) (UI/debugger),
To learn more about LangGraph, check out our first LangChain Academy course, *Introduction to LangGraph*, available for free [here](https://academy.langchain.com/courses/intro-to-langgraph).
### Key Features
- **Cycles and Branching**: Implement loops and conditionals in your apps.
- **Persistence**: Automatically save state after each step in the graph. Pause and resume the graph execution at any point to support error recovery, human-in-the-loop workflows, time travel and more.
- **Human-in-the-Loop**: Interrupt graph execution to approve or edit next action planned by the agent.
- **Streaming Support**: Stream outputs as they are produced by each node (including token streaming).
- **Integration with LangChain**: LangGraph integrates seamlessly with [LangChain](https://github.com/langchain-ai/langchain/) and [LangSmith](https://docs.smith.langchain.com/) (but does not require them).
### LangGraph Platform
LangGraph Platform is a commercial solution for deploying agentic applications to production, built on the open-source LangGraph framework.
Here are some common issues that arise in complex deployments, which LangGraph Platform addresses:
- **Streaming support**: LangGraph Server provides [multiple streaming modes](https://langchain-ai.github.io/langgraph/concepts/streaming) optimized for various application needs
- **Background runs**: Runs agents asynchronously in the background
- **Support for long running agents**: Infrastructure that can handle long running processes
- **[Double texting](https://langchain-ai.github.io/langgraph/concepts/double_texting)**: Handle the case where you get two messages from the user before the agent can respond
- **Handle burstiness**: Task queue for ensuring requests are handled consistently without loss, even under heavy loads
## Installation
```shell
pip install -U langgraph
```
## Example
One of the central concepts of LangGraph is state. Each graph execution creates a state that is passed between nodes in the graph as they execute, and each node updates this internal state with its return value after it executes. The way that the graph updates its internal state is defined by either the type of graph chosen or a custom function.
Let's take a look at a simple example of an agent that can use a search tool.
```shell
pip install langchain-anthropic
```
```shell
export ANTHROPIC_API_KEY=sk-...
```
Optionally, we can set up [LangSmith](https://docs.smith.langchain.com/) for best-in-class observability.
```shell
export LANGSMITH_TRACING=true
export LANGSMITH_API_KEY=lsv2_sk_...
```
```python
from typing import Annotated, Literal, TypedDict
from langchain_core.messages import HumanMessage
from langchain_anthropic import ChatAnthropic
from langchain_core.tools import tool
from langgraph.checkpoint.memory import MemorySaver
from langgraph.graph import END, START, StateGraph, MessagesState
from langgraph.prebuilt import ToolNode
# Define the tools for the agent to use
@tool
def search(query: str):
"""Call to surf the web."""
# This is a placeholder, but don't tell the LLM that...
if "sf" in query.lower() or "san francisco" in query.lower():
return "It's 60 degrees and foggy."
return "It's 90 degrees and sunny."
tools = [search]
tool_node = ToolNode(tools)
model = ChatAnthropic(model="claude-3-5-sonnet-20240620", temperature=0).bind_tools(tools)
# Define the function that determines whether to continue or not
def should_continue(state: MessagesState) -> Literal["tools", END]:
messages = state['messages']
last_message = messages[-1]
# If the LLM makes a tool call, then we route to the "tools" node
if last_message.tool_calls:
return "tools"
# Otherwise, we stop (reply to the user)
return END
# Define the function that calls the model
def call_model(state: MessagesState):
messages = state['messages']
response = model.invoke(messages)
# We return a list, because this will get added to the existing list
return {"messages": [response]}
# Define a new graph
workflow = StateGraph(MessagesState)
# Define the two nodes we will cycle between
workflow.add_node("agent", call_model)
workflow.add_node("tools", tool_node)
# Set the entrypoint as `agent`
# This means that this node is the first one called
workflow.add_edge(START, "agent")
# We now add a conditional edge
workflow.add_conditional_edges(
# First, we define the start node. We use `agent`.
# This means these are the edges taken after the `agent` node is called.
"agent",
# Next, we pass in the function that will determine which node is called next.
should_continue,
)
# We now add a normal edge from `tools` to `agent`.
# This means that after `tools` is called, `agent` node is called next.
workflow.add_edge("tools", 'agent')
# Initialize memory to persist state between graph runs
checkpointer = MemorySaver()
# Finally, we compile it!
# This compiles it into a LangChain Runnable,
# meaning you can use it as you would any other runnable.
# Note that we're (optionally) passing the memory when compiling the graph
app = workflow.compile(checkpointer=checkpointer)
# Use the Runnable
final_state = app.invoke(
{"messages": [HumanMessage(content="what is the weather in sf")]},
config={"configurable": {"thread_id": 42}}
)
final_state["messages"][-1].content
```
```
"Based on the search results, I can tell you that the current weather in San Francisco is:\n\nTemperature: 60 degrees Fahrenheit\nConditions: Foggy\n\nSan Francisco is known for its microclimates and frequent fog, especially during the summer months. The temperature of 60°F (about 15.5°C) is quite typical for the city, which tends to have mild temperatures year-round. The fog, often referred to as "Karl the Fog" by locals, is a characteristic feature of San Francisco\'s weather, particularly in the mornings and evenings.\n\nIs there anything else you\'d like to know about the weather in San Francisco or any other location?"
```
Now when we pass the same `"thread_id"`, the conversation context is retained via the saved state (i.e. stored list of messages)
```python
final_state = app.invoke(
{"messages": [HumanMessage(content="what about ny")]},
config={"configurable": {"thread_id": 42}}
)
final_state["messages"][-1].content
```
```
"Based on the search results, I can tell you that the current weather in New York City is:\n\nTemperature: 90 degrees Fahrenheit (approximately 32.2 degrees Celsius)\nConditions: Sunny\n\nThis weather is quite different from what we just saw in San Francisco. New York is experiencing much warmer temperatures right now. Here are a few points to note:\n\n1. The temperature of 90°F is quite hot, typical of summer weather in New York City.\n2. The sunny conditions suggest clear skies, which is great for outdoor activities but also means it might feel even hotter due to direct sunlight.\n3. This kind of weather in New York often comes with high humidity, which can make it feel even warmer than the actual temperature suggests.\n\nIt's interesting to see the stark contrast between San Francisco's mild, foggy weather and New York's hot, sunny conditions. This difference illustrates how varied weather can be across different parts of the United States, even on the same day.\n\nIs there anything else you'd like to know about the weather in New York or any other location?"
```
### Step-by-step Breakdown
1. <details>
<summary>Initialize the model and tools.</summary>
- we use `ChatAnthropic` as our LLM. **NOTE:** we need make sure the model knows that it has these tools available to call. We can do this by converting the LangChain tools into the format for OpenAI tool calling using the `.bind_tools()` method.
- we define the tools we want to use - a search tool in our case. It is really easy to create your own tools - see documentation here on how to do that [here](https://python.langchain.com/docs/modules/agents/tools/custom_tools).
</details>
2. <details>
<summary>Initialize graph with state.</summary>
- we initialize graph (`StateGraph`) by passing state schema (in our case `MessagesState`)
- `MessagesState` is a prebuilt state schema that has one attribute -- a list of LangChain `Message` objects, as well as logic for merging the updates from each node into the state
</details>
3. <details>
<summary>Define graph nodes.</summary>
There are two main nodes we need:
- The `agent` node: responsible for deciding what (if any) actions to take.
- The `tools` node that invokes tools: if the agent decides to take an action, this node will then execute that action.
</details>
4. <details>
<summary>Define entry point and graph edges.</summary>
First, we need to set the entry point for graph execution - `agent` node.
Then we define one normal and one conditional edge. Conditional edge means that the destination depends on the contents of the graph's state (`MessageState`). In our case, the destination is not known until the agent (LLM) decides.
- Conditional edge: after the agent is called, we should either:
- a. Run tools if the agent said to take an action, OR
- b. Finish (respond to the user) if the agent did not ask to run tools
- Normal edge: after the tools are invoked, the graph should always return to the agent to decide what to do next
</details>
5. <details>
<summary>Compile the graph.</summary>
- When we compile the graph, we turn it into a LangChain [Runnable](https://python.langchain.com/v0.2/docs/concepts/#runnable-interface), which automatically enables calling `.invoke()`, `.stream()` and `.batch()` with your inputs
- We can also optionally pass checkpointer object for persisting state between graph runs, and enabling memory, human-in-the-loop workflows, time travel and more. In our case we use `MemorySaver` - a simple in-memory checkpointer
</details>
6. <details>
<summary>Execute the graph.</summary>
1. LangGraph adds the input message to the internal state, then passes the state to the entrypoint node, `"agent"`.
2. The `"agent"` node executes, invoking the chat model.
3. The chat model returns an `AIMessage`. LangGraph adds this to the state.
4. Graph cycles the following steps until there are no more `tool_calls` on `AIMessage`:
- If `AIMessage` has `tool_calls`, `"tools"` node executes
- The `"agent"` node executes again and returns `AIMessage`
5. Execution progresses to the special `END` value and outputs the final state.
And as a result, we get a list of all our chat messages as output.
</details>
## Documentation
* [Tutorials](https://langchain-ai.github.io/langgraph/tutorials/): Learn to build with LangGraph through guided examples.
* [How-to Guides](https://langchain-ai.github.io/langgraph/how-tos/): Accomplish specific things within LangGraph, from streaming, to adding memory & persistence, to common design patterns (branching, subgraphs, etc.), these are the place to go if you want to copy and run a specific code snippet.
* [Conceptual Guides](https://langchain-ai.github.io/langgraph/concepts/high_level/): In-depth explanations of the key concepts and principles behind LangGraph, such as nodes, edges, state and more.
* [API Reference](https://langchain-ai.github.io/langgraph/reference/graphs/): Review important classes and methods, simple examples of how to use the graph and checkpointing APIs, higher-level prebuilt components and more.
* [LangGraph Platform](https://langchain-ai.github.io/langgraph/concepts/#langgraph-platform): LangGraph Platform is a commercial solution for deploying agentic applications in production, built on the open-source LangGraph framework.
## Contributing
For more information on how to contribute, see [here](https://github.com/langchain-ai/langgraph/blob/main/CONTRIBUTING.md).
|
0 | lc_public_repos/langgraph/libs | lc_public_repos/langgraph/libs/langgraph/pyproject.toml | [tool.poetry]
name = "langgraph"
version = "0.2.56"
description = "Building stateful, multi-actor applications with LLMs"
authors = []
license = "MIT"
readme = "README.md"
repository = "https://www.github.com/langchain-ai/langgraph"
[tool.poetry.dependencies]
python = ">=3.9.0,<4.0"
langchain-core = ">=0.2.43,<0.4.0,!=0.3.0,!=0.3.1,!=0.3.2,!=0.3.3,!=0.3.4,!=0.3.5,!=0.3.6,!=0.3.7,!=0.3.8,!=0.3.9,!=0.3.10,!=0.3.11,!=0.3.12,!=0.3.13,!=0.3.14"
langgraph-checkpoint = "^2.0.4"
langgraph-sdk = "^0.1.42"
[tool.poetry.group.dev.dependencies]
pytest = "^8.3.2"
pytest-cov = "^4.0.0"
pytest-dotenv = "^0.5.2"
pytest-mock = "^3.10.0"
syrupy = "^4.0.2"
httpx = "^0.26.0"
pytest-watcher = "^0.4.1"
mypy = "^1.6.0"
ruff = "^0.6.2"
jupyter = "^1.0.0"
pytest-xdist = {extras = ["psutil"], version = "^3.6.1"}
pytest-repeat = "^0.9.3"
langgraph-checkpoint = {path = "../checkpoint", develop = true}
langgraph-checkpoint-duckdb = {path = "../checkpoint-duckdb", develop = true}
langgraph-checkpoint-sqlite = {path = "../checkpoint-sqlite", develop = true}
langgraph-checkpoint-postgres = {path = "../checkpoint-postgres", develop = true}
langgraph-sdk = {path = "../sdk-py", develop = true}
psycopg = {extras = ["binary"], version = ">=3.0.0", python = ">=3.10"}
uvloop = "0.21.0beta1"
pyperf = "^2.7.0"
py-spy = "^0.3.14"
types-requests = "^2.32.0.20240914"
[tool.ruff]
lint.select = [ "E", "F", "I" ]
lint.ignore = [ "E501" ]
line-length = 88
indent-width = 4
extend-include = ["*.ipynb"]
[tool.ruff.format]
quote-style = "double"
indent-style = "space"
skip-magic-trailing-comma = false
line-ending = "auto"
docstring-code-format = false
docstring-code-line-length = "dynamic"
[tool.mypy]
# https://mypy.readthedocs.io/en/stable/config_file.html
disallow_untyped_defs = "True"
explicit_package_bases = "True"
warn_no_return = "False"
warn_unused_ignores = "True"
warn_redundant_casts = "True"
allow_redefinition = "True"
disable_error_code = "typeddict-item, return-value, override, has-type"
[tool.coverage.run]
omit = ["tests/*"]
[tool.pytest-watcher]
now = true
delay = 0.1
patterns = ["*.py"]
[build-system]
requires = ["poetry-core>=1.0.0"]
build-backend = "poetry.core.masonry.api"
[tool.pytest.ini_options]
# --strict-markers will raise errors on unknown marks.
# https://docs.pytest.org/en/7.1.x/how-to/mark.html#raising-errors-on-unknown-marks
#
# https://docs.pytest.org/en/7.1.x/reference/reference.html
# --strict-config any warnings encountered while parsing the `pytest`
# section of the configuration file raise errors.
#
# https://github.com/tophat/syrupy
# --snapshot-warn-unused Prints a warning on unused snapshots rather than fail the test suite.
addopts = "--full-trace --strict-markers --strict-config --durations=5 --snapshot-warn-unused"
# Registering custom markers.
# https://docs.pytest.org/en/7.1.x/example/markers.html#registering-markers
|
0 | lc_public_repos/langgraph/libs | lc_public_repos/langgraph/libs/langgraph/poetry.toml | [virtualenvs]
in-project = true
[installer]
modern-installation = false
|
0 | lc_public_repos/langgraph/libs/langgraph | lc_public_repos/langgraph/libs/langgraph/tests/test_runnable.py | from __future__ import annotations
from typing import Any
import pytest
from langgraph.store.base import BaseStore
from langgraph.types import StreamWriter
from langgraph.utils.runnable import RunnableCallable
pytestmark = pytest.mark.anyio
def test_runnable_callable_func_accepts():
def sync_func(x: Any) -> str:
return f"{x}"
async def async_func(x: Any) -> str:
return f"{x}"
def func_with_store(x: Any, store: BaseStore) -> str:
return f"{x}"
def func_with_writer(x: Any, writer: StreamWriter) -> str:
return f"{x}"
async def afunc_with_store(x: Any, store: BaseStore) -> str:
return f"{x}"
async def afunc_with_writer(x: Any, writer: StreamWriter) -> str:
return f"{x}"
runnables = {
"sync": RunnableCallable(sync_func),
"async": RunnableCallable(func=None, afunc=async_func),
"with_store": RunnableCallable(func_with_store),
"with_writer": RunnableCallable(func_with_writer),
"awith_store": RunnableCallable(afunc_with_store),
"awith_writer": RunnableCallable(afunc_with_writer),
}
expected_store = {"with_store": True, "awith_store": True}
expected_writer = {"with_writer": True, "awith_writer": True}
for name, runnable in runnables.items():
assert runnable.func_accepts["writer"] == expected_writer.get(name, False)
assert runnable.func_accepts["store"] == expected_store.get(name, False)
async def test_runnable_callable_basic():
def sync_func(x: Any) -> str:
return f"{x}"
async def async_func(x: Any) -> str:
return f"{x}"
runnable_sync = RunnableCallable(sync_func)
runnable_async = RunnableCallable(func=None, afunc=async_func)
result_sync = runnable_sync.invoke("test")
assert result_sync == "test"
# Test asynchronous ainvoke
result_async = await runnable_async.ainvoke("test")
assert result_async == "test"
|
0 | lc_public_repos/langgraph/libs/langgraph | lc_public_repos/langgraph/libs/langgraph/tests/fake_tracer.py | from typing import Any, Optional
from uuid import UUID
from langchain_core.messages.base import BaseMessage
from langchain_core.outputs.chat_generation import ChatGeneration
from langchain_core.outputs.llm_result import LLMResult
from langchain_core.tracers import BaseTracer, Run
class FakeTracer(BaseTracer):
"""Fake tracer that records LangChain execution.
It replaces run ids with deterministic UUIDs for snapshotting."""
def __init__(self) -> None:
"""Initialize the tracer."""
super().__init__()
self.runs: list[Run] = []
self.uuids_map: dict[UUID, UUID] = {}
self.uuids_generator = (
UUID(f"00000000-0000-4000-8000-{i:012}", version=4) for i in range(10000)
)
def _replace_uuid(self, uuid: UUID) -> UUID:
if uuid not in self.uuids_map:
self.uuids_map[uuid] = next(self.uuids_generator)
return self.uuids_map[uuid]
def _replace_message_id(self, maybe_message: Any) -> Any:
if isinstance(maybe_message, BaseMessage):
maybe_message.id = str(next(self.uuids_generator))
if isinstance(maybe_message, ChatGeneration):
maybe_message.message.id = str(next(self.uuids_generator))
if isinstance(maybe_message, LLMResult):
for i, gen_list in enumerate(maybe_message.generations):
for j, gen in enumerate(gen_list):
maybe_message.generations[i][j] = self._replace_message_id(gen)
if isinstance(maybe_message, dict):
for k, v in maybe_message.items():
maybe_message[k] = self._replace_message_id(v)
if isinstance(maybe_message, list):
for i, v in enumerate(maybe_message):
maybe_message[i] = self._replace_message_id(v)
return maybe_message
def _copy_run(self, run: Run) -> Run:
if run.dotted_order:
levels = run.dotted_order.split(".")
processed_levels = []
for level in levels:
timestamp, run_id = level.split("Z")
new_run_id = self._replace_uuid(UUID(run_id))
processed_level = f"{timestamp}Z{new_run_id}"
processed_levels.append(processed_level)
new_dotted_order = ".".join(processed_levels)
else:
new_dotted_order = None
return run.copy(
update={
"id": self._replace_uuid(run.id),
"parent_run_id": (
self.uuids_map[run.parent_run_id] if run.parent_run_id else None
),
"child_runs": [self._copy_run(child) for child in run.child_runs],
"trace_id": self._replace_uuid(run.trace_id) if run.trace_id else None,
"dotted_order": new_dotted_order,
"inputs": self._replace_message_id(run.inputs),
"outputs": self._replace_message_id(run.outputs),
}
)
def _persist_run(self, run: Run) -> None:
"""Persist a run."""
self.runs.append(self._copy_run(run))
def flattened_runs(self) -> list[Run]:
q = [] + self.runs
result = []
while q:
parent = q.pop()
result.append(parent)
if parent.child_runs:
q.extend(parent.child_runs)
return result
@property
def run_ids(self) -> list[Optional[UUID]]:
runs = self.flattened_runs()
uuids_map = {v: k for k, v in self.uuids_map.items()}
return [uuids_map.get(r.id) for r in runs]
|
0 | lc_public_repos/langgraph/libs/langgraph | lc_public_repos/langgraph/libs/langgraph/tests/test_channels.py | import operator
from typing import Sequence, Union
import pytest
from langgraph.channels.binop import BinaryOperatorAggregate
from langgraph.channels.last_value import LastValue
from langgraph.channels.topic import Topic
from langgraph.errors import EmptyChannelError, InvalidUpdateError
pytestmark = pytest.mark.anyio
def test_last_value() -> None:
channel = LastValue(int).from_checkpoint(None)
assert channel.ValueType is int
assert channel.UpdateType is int
with pytest.raises(EmptyChannelError):
channel.get()
with pytest.raises(InvalidUpdateError):
channel.update([5, 6])
channel.update([3])
assert channel.get() == 3
channel.update([4])
assert channel.get() == 4
checkpoint = channel.checkpoint()
channel = LastValue(int).from_checkpoint(checkpoint)
assert channel.get() == 4
def test_topic() -> None:
channel = Topic(str).from_checkpoint(None)
assert channel.ValueType is Sequence[str]
assert channel.UpdateType is Union[str, list[str]]
assert channel.update(["a", "b"])
assert channel.get() == ["a", "b"]
assert channel.update([["c", "d"], "d"])
assert channel.get() == ["c", "d", "d"]
assert channel.update([])
with pytest.raises(EmptyChannelError):
channel.get()
assert not channel.update([]), "channel already empty"
assert channel.update(["e"])
assert channel.get() == ["e"]
checkpoint = channel.checkpoint()
channel = Topic(str).from_checkpoint(checkpoint)
assert channel.get() == ["e"]
channel_copy = Topic(str).from_checkpoint(checkpoint)
channel_copy.update(["f"])
assert channel_copy.get() == ["f"]
assert channel.get() == ["e"]
def test_topic_accumulate() -> None:
channel = Topic(str, accumulate=True).from_checkpoint(None)
assert channel.ValueType is Sequence[str]
assert channel.UpdateType is Union[str, list[str]]
assert channel.update(["a", "b"])
assert channel.get() == ["a", "b"]
assert channel.update(["b", ["c", "d"], "d"])
assert channel.get() == ["a", "b", "b", "c", "d", "d"]
assert not channel.update([])
assert channel.get() == ["a", "b", "b", "c", "d", "d"]
checkpoint = channel.checkpoint()
channel = Topic(str, accumulate=True).from_checkpoint(checkpoint)
assert channel.get() == ["a", "b", "b", "c", "d", "d"]
assert channel.update(["e"])
assert channel.get() == ["a", "b", "b", "c", "d", "d", "e"]
def test_binop() -> None:
channel = BinaryOperatorAggregate(int, operator.add).from_checkpoint(None)
assert channel.ValueType is int
assert channel.UpdateType is int
assert channel.get() == 0
channel.update([1, 2, 3])
assert channel.get() == 6
channel.update([4])
assert channel.get() == 10
checkpoint = channel.checkpoint()
channel = BinaryOperatorAggregate(int, operator.add).from_checkpoint(checkpoint)
assert channel.get() == 10
|
0 | lc_public_repos/langgraph/libs/langgraph | lc_public_repos/langgraph/libs/langgraph/tests/test_state.py | import inspect
import warnings
from dataclasses import dataclass, field
from typing import Annotated as Annotated2
from typing import Any, Optional
import pytest
from langchain_core.runnables import RunnableConfig, RunnableLambda
from pydantic.v1 import BaseModel
from typing_extensions import Annotated, NotRequired, Required, TypedDict
from langgraph.graph.state import StateGraph, _get_node_name, _warn_invalid_state_schema
from langgraph.managed.shared_value import SharedValue
class State(BaseModel):
foo: str
bar: int
class State2(TypedDict):
foo: str
bar: int
@pytest.mark.parametrize(
"schema",
[
{"foo": "bar"},
["hi", lambda x, y: x + y],
State(foo="bar", bar=1),
State2(foo="bar", bar=1),
],
)
def test_warns_invalid_schema(schema: Any):
with pytest.warns(UserWarning):
_warn_invalid_state_schema(schema)
@pytest.mark.parametrize(
"schema",
[
Annotated[dict, lambda x, y: y],
Annotated2[list, lambda x, y: y],
dict,
State,
State2,
],
)
def test_doesnt_warn_valid_schema(schema: Any):
# Assert the function does not raise a warning
with warnings.catch_warnings():
warnings.simplefilter("error")
_warn_invalid_state_schema(schema)
def test_state_schema_with_type_hint():
class InputState(TypedDict):
question: str
class OutputState(TypedDict):
input_state: InputState
class FooState(InputState):
foo: str
def complete_hint(state: InputState) -> OutputState:
return {"input_state": state}
def miss_first_hint(state, config: RunnableConfig) -> OutputState:
return {"input_state": state}
def only_return_hint(state, config) -> OutputState:
return {"input_state": state}
def miss_all_hint(state, config):
return {"input_state": state}
def pre_foo(_) -> FooState:
return {"foo": "bar"}
class Foo:
def __call__(self, state: FooState) -> OutputState:
assert state.pop("foo") == "bar"
return {"input_state": state}
graph = StateGraph(InputState, output=OutputState)
actions = [
complete_hint,
miss_first_hint,
only_return_hint,
miss_all_hint,
pre_foo,
Foo(),
]
for action in actions:
graph.add_node(action)
def get_name(action) -> str:
return getattr(action, "__name__", action.__class__.__name__)
graph.set_entry_point(get_name(actions[0]))
for i in range(len(actions) - 1):
graph.add_edge(get_name(actions[i]), get_name(actions[i + 1]))
graph.set_finish_point(get_name(actions[-1]))
graph = graph.compile()
input_state = InputState(question="Hello World!")
output_state = OutputState(input_state=input_state)
foo_state = FooState(foo="bar")
for i, c in enumerate(graph.stream(input_state, stream_mode="updates")):
node_name = get_name(actions[i])
if node_name == get_name(pre_foo):
assert c[node_name] == foo_state
else:
assert c[node_name] == output_state
@pytest.mark.parametrize("total_", [True, False])
def test_state_schema_optional_values(total_: bool):
class SomeParentState(TypedDict):
val0a: str
val0b: Optional[str]
class InputState(SomeParentState, total=total_): # type: ignore
val1: str
val2: Optional[str]
val3: Required[str]
val4: NotRequired[dict]
val5: Annotated[Required[str], "foo"]
val6: Annotated[NotRequired[str], "bar"]
class OutputState(SomeParentState, total=total_): # type: ignore
out_val1: str
out_val2: Optional[str]
out_val3: Required[str]
out_val4: NotRequired[dict]
out_val5: Annotated[Required[str], "foo"]
out_val6: Annotated[NotRequired[str], "bar"]
class State(InputState): # this would be ignored
val4: dict
some_shared_channel: Annotated[str, SharedValue.on("assistant_id")] = field(
default="foo"
)
builder = StateGraph(State, input=InputState, output=OutputState)
builder.add_node("n", lambda x: x)
builder.add_edge("__start__", "n")
graph = builder.compile()
json_schema = graph.get_input_jsonschema()
if total_ is False:
expected_required = set()
expected_optional = {"val2", "val1"}
else:
expected_required = {"val1"}
expected_optional = {"val2"}
# The others should always have precedence based on the required annotation
expected_required |= {"val0a", "val3", "val5"}
expected_optional |= {"val0b", "val4", "val6"}
assert set(json_schema.get("required", set())) == expected_required
assert (
set(json_schema["properties"].keys()) == expected_required | expected_optional
)
# Check output schema. Should be the same process
output_schema = graph.get_output_jsonschema()
if total_ is False:
expected_required = set()
expected_optional = {"out_val2", "out_val1"}
else:
expected_required = {"out_val1"}
expected_optional = {"out_val2"}
expected_required |= {"val0a", "out_val3", "out_val5"}
expected_optional |= {"val0b", "out_val4", "out_val6"}
assert set(output_schema.get("required", set())) == expected_required
assert (
set(output_schema["properties"].keys()) == expected_required | expected_optional
)
@pytest.mark.parametrize("kw_only_", [False, True])
def test_state_schema_default_values(kw_only_: bool):
kwargs = {}
if "kw_only" in inspect.signature(dataclass).parameters:
kwargs = {"kw_only": kw_only_}
@dataclass(**kwargs)
class InputState:
val1: str
val2: Optional[int]
val3: Annotated[Optional[float], "optional annotated"]
val4: Optional[str] = None
val5: list[int] = field(default_factory=lambda: [1, 2, 3])
val6: dict[str, int] = field(default_factory=lambda: {"a": 1})
val7: str = field(default=...)
val8: Annotated[int, "some metadata"] = 42
val9: Annotated[str, "more metadata"] = field(default="some foo")
val10: str = "default"
val11: Annotated[list[str], "annotated list"] = field(
default_factory=lambda: ["a", "b"]
)
some_shared_channel: Annotated[str, SharedValue.on("assistant_id")] = field(
default="foo"
)
builder = StateGraph(InputState)
builder.add_node("n", lambda x: x)
builder.add_edge("__start__", "n")
graph = builder.compile()
for json_schema in [graph.get_input_jsonschema(), graph.get_output_jsonschema()]:
expected_required = {"val1", "val7"}
expected_optional = {
"val2",
"val3",
"val4",
"val5",
"val6",
"val8",
"val9",
"val10",
"val11",
}
assert set(json_schema.get("required", set())) == expected_required
assert (
set(json_schema["properties"].keys()) == expected_required | expected_optional
)
def test_raises_invalid_managed():
class BadInputState(TypedDict):
some_thing: str
some_input_channel: Annotated[str, SharedValue.on("assistant_id")]
class InputState(TypedDict):
some_thing: str
some_input_channel: str
class BadOutputState(TypedDict):
some_thing: str
some_output_channel: Annotated[str, SharedValue.on("assistant_id")]
class OutputState(TypedDict):
some_thing: str
some_output_channel: str
class State(TypedDict):
some_thing: str
some_channel: Annotated[str, SharedValue.on("assistant_id")]
# All OK
StateGraph(State, input=InputState, output=OutputState)
StateGraph(State)
StateGraph(State, input=State, output=State)
StateGraph(State, input=InputState)
StateGraph(State, input=InputState)
bad_input_examples = [
(State, BadInputState, OutputState),
(State, BadInputState, BadOutputState),
(State, BadInputState, State),
(State, BadInputState, None),
]
for _state, _inp, _outp in bad_input_examples:
with pytest.raises(
ValueError,
match="Invalid managed channels detected in BadInputState: some_input_channel. Managed channels are not permitted in Input/Output schema.",
):
StateGraph(_state, input=_inp, output=_outp)
bad_output_examples = [
(State, InputState, BadOutputState),
(State, None, BadOutputState),
]
for _state, _inp, _outp in bad_output_examples:
with pytest.raises(
ValueError,
match="Invalid managed channels detected in BadOutputState: some_output_channel. Managed channels are not permitted in Input/Output schema.",
):
StateGraph(_state, input=_inp, output=_outp)
def test__get_node_name() -> None:
# default runnable name
assert _get_node_name(RunnableLambda(func=lambda x: x)) == "RunnableLambda"
# custom runnable name
assert (
_get_node_name(RunnableLambda(name="my_runnable", func=lambda x: x))
== "my_runnable"
)
# lambda
assert _get_node_name(lambda x: x) == "<lambda>"
# regular function
def func(state):
return
assert _get_node_name(func) == "func"
class MyClass:
def __call__(self, state):
return
def class_method(self, state):
return
# callable class
assert _get_node_name(MyClass()) == "MyClass"
# class method
assert _get_node_name(MyClass().class_method) == "class_method"
|
0 | lc_public_repos/langgraph/libs/langgraph | lc_public_repos/langgraph/libs/langgraph/tests/test_prebuilt.py | import dataclasses
import json
from functools import partial
from typing import (
Annotated,
Any,
Callable,
Dict,
List,
Literal,
Optional,
Sequence,
Type,
TypeVar,
Union,
)
import pytest
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models import BaseChatModel, LanguageModelInput
from langchain_core.messages import (
AIMessage,
AnyMessage,
BaseMessage,
HumanMessage,
SystemMessage,
ToolCall,
ToolMessage,
)
from langchain_core.outputs import ChatGeneration, ChatResult
from langchain_core.runnables import Runnable, RunnableLambda
from langchain_core.tools import BaseTool, ToolException
from langchain_core.tools import tool as dec_tool
from pydantic import BaseModel, ValidationError
from pydantic.v1 import BaseModel as BaseModelV1
from pydantic.v1 import ValidationError as ValidationErrorV1
from typing_extensions import TypedDict
from langgraph.checkpoint.base import BaseCheckpointSaver
from langgraph.checkpoint.memory import MemorySaver
from langgraph.errors import NodeInterrupt
from langgraph.graph import START, MessagesState, StateGraph, add_messages
from langgraph.prebuilt import (
ToolNode,
ValidationNode,
create_react_agent,
tools_condition,
)
from langgraph.prebuilt.chat_agent_executor import _validate_chat_history
from langgraph.prebuilt.tool_node import (
TOOL_CALL_ERROR_TEMPLATE,
InjectedState,
InjectedStore,
_get_state_args,
_infer_handled_types,
)
from langgraph.store.base import BaseStore
from langgraph.store.memory import InMemoryStore
from langgraph.types import Interrupt
from tests.conftest import (
ALL_CHECKPOINTERS_ASYNC,
ALL_CHECKPOINTERS_SYNC,
IS_LANGCHAIN_CORE_030_OR_GREATER,
awith_checkpointer,
)
from tests.messages import _AnyIdHumanMessage, _AnyIdToolMessage
pytestmark = pytest.mark.anyio
class FakeToolCallingModel(BaseChatModel):
tool_calls: Optional[list[list[ToolCall]]] = None
index: int = 0
tool_style: Literal["openai", "anthropic"] = "openai"
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
"""Top Level call"""
messages_string = "-".join([m.content for m in messages])
tool_calls = (
self.tool_calls[self.index % len(self.tool_calls)]
if self.tool_calls
else []
)
message = AIMessage(
content=messages_string, id=str(self.index), tool_calls=tool_calls.copy()
)
self.index += 1
return ChatResult(generations=[ChatGeneration(message=message)])
@property
def _llm_type(self) -> str:
return "fake-tool-call-model"
def bind_tools(
self,
tools: Sequence[Union[Dict[str, Any], Type[BaseModel], Callable, BaseTool]],
**kwargs: Any,
) -> Runnable[LanguageModelInput, BaseMessage]:
if len(tools) == 0:
raise ValueError("Must provide at least one tool")
tool_dicts = []
for tool in tools:
if not isinstance(tool, BaseTool):
raise TypeError(
"Only BaseTool is supported by FakeToolCallingModel.bind_tools"
)
# NOTE: this is a simplified tool spec for testing purposes only
if self.tool_style == "openai":
tool_dicts.append(
{
"type": "function",
"function": {
"name": tool.name,
},
}
)
elif self.tool_style == "anthropic":
tool_dicts.append(
{
"name": tool.name,
}
)
return self.bind(tools=tool_dicts)
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC)
def test_no_modifier(request: pytest.FixtureRequest, checkpointer_name: str) -> None:
checkpointer: BaseCheckpointSaver = request.getfixturevalue(
"checkpointer_" + checkpointer_name
)
model = FakeToolCallingModel()
agent = create_react_agent(model, [], checkpointer=checkpointer)
inputs = [HumanMessage("hi?")]
thread = {"configurable": {"thread_id": "123"}}
response = agent.invoke({"messages": inputs}, thread, debug=True)
expected_response = {"messages": inputs + [AIMessage(content="hi?", id="0")]}
assert response == expected_response
if checkpointer:
saved = checkpointer.get_tuple(thread)
assert saved is not None
assert saved.checkpoint["channel_values"] == {
"messages": [
_AnyIdHumanMessage(content="hi?"),
AIMessage(content="hi?", id="0"),
],
"agent": "agent",
}
assert saved.metadata == {
"parents": {},
"source": "loop",
"writes": {"agent": {"messages": [AIMessage(content="hi?", id="0")]}},
"step": 1,
"thread_id": "123",
}
assert saved.pending_writes == []
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC)
async def test_no_modifier_async(checkpointer_name: str) -> None:
async with awith_checkpointer(checkpointer_name) as checkpointer:
model = FakeToolCallingModel()
agent = create_react_agent(model, [], checkpointer=checkpointer)
inputs = [HumanMessage("hi?")]
thread = {"configurable": {"thread_id": "123"}}
response = await agent.ainvoke({"messages": inputs}, thread, debug=True)
expected_response = {"messages": inputs + [AIMessage(content="hi?", id="0")]}
assert response == expected_response
if checkpointer:
saved = await checkpointer.aget_tuple(thread)
assert saved is not None
assert saved.checkpoint["channel_values"] == {
"messages": [
_AnyIdHumanMessage(content="hi?"),
AIMessage(content="hi?", id="0"),
],
"agent": "agent",
}
assert saved.metadata == {
"parents": {},
"source": "loop",
"writes": {"agent": {"messages": [AIMessage(content="hi?", id="0")]}},
"step": 1,
"thread_id": "123",
}
assert saved.pending_writes == []
def test_passing_two_modifiers():
model = FakeToolCallingModel()
with pytest.raises(ValueError):
create_react_agent(model, [], messages_modifier="Foo", state_modifier="Bar")
def test_system_message_modifier():
messages_modifier = SystemMessage(content="Foo")
agent_1 = create_react_agent(
FakeToolCallingModel(), [], messages_modifier=messages_modifier
)
agent_2 = create_react_agent(
FakeToolCallingModel(), [], state_modifier=messages_modifier
)
for agent in [agent_1, agent_2]:
inputs = [HumanMessage("hi?")]
response = agent.invoke({"messages": inputs})
expected_response = {
"messages": inputs + [AIMessage(content="Foo-hi?", id="0", tool_calls=[])]
}
assert response == expected_response
def test_system_message_string_modifier():
messages_modifier = "Foo"
agent_1 = create_react_agent(
FakeToolCallingModel(), [], messages_modifier=messages_modifier
)
agent_2 = create_react_agent(
FakeToolCallingModel(), [], state_modifier=messages_modifier
)
for agent in [agent_1, agent_2]:
inputs = [HumanMessage("hi?")]
response = agent.invoke({"messages": inputs})
expected_response = {
"messages": inputs + [AIMessage(content="Foo-hi?", id="0", tool_calls=[])]
}
assert response == expected_response
def test_callable_messages_modifier():
model = FakeToolCallingModel()
def messages_modifier(messages):
modified_message = f"Bar {messages[-1].content}"
return [HumanMessage(content=modified_message)]
agent = create_react_agent(model, [], messages_modifier=messages_modifier)
inputs = [HumanMessage("hi?")]
response = agent.invoke({"messages": inputs})
expected_response = {"messages": inputs + [AIMessage(content="Bar hi?", id="0")]}
assert response == expected_response
def test_callable_state_modifier():
model = FakeToolCallingModel()
def state_modifier(state):
modified_message = f"Bar {state['messages'][-1].content}"
return [HumanMessage(content=modified_message)]
agent = create_react_agent(model, [], state_modifier=state_modifier)
inputs = [HumanMessage("hi?")]
response = agent.invoke({"messages": inputs})
expected_response = {"messages": inputs + [AIMessage(content="Bar hi?", id="0")]}
assert response == expected_response
def test_runnable_messages_modifier():
model = FakeToolCallingModel()
messages_modifier = RunnableLambda(
lambda messages: [HumanMessage(content=f"Baz {messages[-1].content}")]
)
agent = create_react_agent(model, [], messages_modifier=messages_modifier)
inputs = [HumanMessage("hi?")]
response = agent.invoke({"messages": inputs})
expected_response = {"messages": inputs + [AIMessage(content="Baz hi?", id="0")]}
assert response == expected_response
def test_runnable_state_modifier():
model = FakeToolCallingModel()
state_modifier = RunnableLambda(
lambda state: [HumanMessage(content=f"Baz {state['messages'][-1].content}")]
)
agent = create_react_agent(model, [], state_modifier=state_modifier)
inputs = [HumanMessage("hi?")]
response = agent.invoke({"messages": inputs})
expected_response = {"messages": inputs + [AIMessage(content="Baz hi?", id="0")]}
assert response == expected_response
def test_state_modifier_with_store():
def add(a: int, b: int):
"""Adds a and b"""
return a + b
in_memory_store = InMemoryStore()
in_memory_store.put(("memories", "1"), "user_name", {"data": "User name is Alice"})
in_memory_store.put(("memories", "2"), "user_name", {"data": "User name is Bob"})
def modify(state, config, *, store):
user_id = config["configurable"]["user_id"]
system_str = store.get(("memories", user_id), "user_name").value["data"]
return [SystemMessage(system_str)] + state["messages"]
def modify_no_store(state, config):
return SystemMessage("foo") + state["messages"]
model = FakeToolCallingModel()
# test state modifier that uses store works
agent = create_react_agent(
model, [add], state_modifier=modify, store=in_memory_store
)
response = agent.invoke(
{"messages": [("user", "hi")]}, {"configurable": {"user_id": "1"}}
)
assert response["messages"][-1].content == "User name is Alice-hi"
# test state modifier that doesn't use store works
agent = create_react_agent(
model, [add], state_modifier=modify_no_store, store=in_memory_store
)
response = agent.invoke(
{"messages": [("user", "hi")]}, {"configurable": {"user_id": "2"}}
)
assert response["messages"][-1].content == "foo-hi"
@pytest.mark.parametrize("tool_style", ["openai", "anthropic"])
def test_model_with_tools(tool_style: str):
model = FakeToolCallingModel(tool_style=tool_style)
@dec_tool
def tool1(some_val: int) -> str:
"""Tool 1 docstring."""
return f"Tool 1: {some_val}"
@dec_tool
def tool2(some_val: int) -> str:
"""Tool 2 docstring."""
return f"Tool 2: {some_val}"
# check valid agent constructor
agent = create_react_agent(model.bind_tools([tool1, tool2]), [tool1, tool2])
result = agent.nodes["tools"].invoke(
{
"messages": [
AIMessage(
"hi?",
tool_calls=[
{
"name": "tool1",
"args": {"some_val": 2},
"id": "some 1",
},
{
"name": "tool2",
"args": {"some_val": 2},
"id": "some 2",
},
],
)
]
}
)
tool_messages: ToolMessage = result["messages"][-2:]
for tool_message in tool_messages:
assert tool_message.type == "tool"
assert tool_message.content in {"Tool 1: 2", "Tool 2: 2"}
assert tool_message.tool_call_id in {"some 1", "some 2"}
# test mismatching tool lengths
with pytest.raises(ValueError):
create_react_agent(model.bind_tools([tool1]), [tool1, tool2])
# test missing bound tools
with pytest.raises(ValueError):
create_react_agent(model.bind_tools([tool1]), [tool2])
def test__validate_messages():
# empty input
_validate_chat_history([])
# single human message
_validate_chat_history(
[
HumanMessage(content="What's the weather?"),
]
)
# human + AI
_validate_chat_history(
[
HumanMessage(content="What's the weather?"),
AIMessage(content="The weather is sunny and 75°F."),
]
)
# Answered tool calls
_validate_chat_history(
[
HumanMessage(content="What's the weather?"),
AIMessage(
content="Let me check that for you.",
tool_calls=[{"id": "call1", "name": "get_weather", "args": {}}],
),
ToolMessage(content="Sunny, 75°F", tool_call_id="call1"),
AIMessage(content="The weather is sunny and 75°F."),
]
)
# Unanswered tool calls
with pytest.raises(ValueError):
_validate_chat_history(
[
AIMessage(
content="I'll check that for you.",
tool_calls=[
{"id": "call1", "name": "get_weather", "args": {}},
{"id": "call2", "name": "get_time", "args": {}},
],
)
]
)
with pytest.raises(ValueError):
_validate_chat_history(
[
HumanMessage(content="What's the weather and time?"),
AIMessage(
content="I'll check that for you.",
tool_calls=[
{"id": "call1", "name": "get_weather", "args": {}},
{"id": "call2", "name": "get_time", "args": {}},
],
),
ToolMessage(content="Sunny, 75°F", tool_call_id="call1"),
AIMessage(
content="The weather is sunny and 75°F. Let me check the time."
),
]
)
def test__infer_handled_types() -> None:
def handle(e): # type: ignore
return ""
def handle2(e: Exception) -> str:
return ""
def handle3(e: Union[ValueError, ToolException]) -> str:
return ""
class Handler:
def handle(self, e: ValueError) -> str:
return ""
handle4 = Handler().handle
def handle5(e: Union[Union[TypeError, ValueError], ToolException]):
return ""
expected: tuple = (Exception,)
actual = _infer_handled_types(handle)
assert expected == actual
expected = (Exception,)
actual = _infer_handled_types(handle2)
assert expected == actual
expected = (ValueError, ToolException)
actual = _infer_handled_types(handle3)
assert expected == actual
expected = (ValueError,)
actual = _infer_handled_types(handle4)
assert expected == actual
expected = (TypeError, ValueError, ToolException)
actual = _infer_handled_types(handle5)
assert expected == actual
with pytest.raises(ValueError):
def handler(e: str):
return ""
_infer_handled_types(handler)
with pytest.raises(ValueError):
def handler(e: list[Exception]):
return ""
_infer_handled_types(handler)
with pytest.raises(ValueError):
def handler(e: Union[str, int]):
return ""
_infer_handled_types(handler)
# tools for testing Too
def tool1(some_val: int, some_other_val: str) -> str:
"""Tool 1 docstring."""
if some_val == 0:
raise ValueError("Test error")
return f"{some_val} - {some_other_val}"
async def tool2(some_val: int, some_other_val: str) -> str:
"""Tool 2 docstring."""
if some_val == 0:
raise ToolException("Test error")
return f"tool2: {some_val} - {some_other_val}"
async def tool3(some_val: int, some_other_val: str) -> str:
"""Tool 3 docstring."""
return [
{"key_1": some_val, "key_2": "foo"},
{"key_1": some_other_val, "key_2": "baz"},
]
async def tool4(some_val: int, some_other_val: str) -> str:
"""Tool 4 docstring."""
return [
{"type": "image_url", "image_url": {"url": "abdc"}},
]
@dec_tool
def tool5(some_val: int):
"""Tool 5 docstring."""
raise ToolException("Test error")
tool5.handle_tool_error = "foo"
async def test_tool_node():
result = ToolNode([tool1]).invoke(
{
"messages": [
AIMessage(
"hi?",
tool_calls=[
{
"name": "tool1",
"args": {"some_val": 1, "some_other_val": "foo"},
"id": "some 0",
}
],
)
]
}
)
tool_message: ToolMessage = result["messages"][-1]
assert tool_message.type == "tool"
assert tool_message.content == "1 - foo"
assert tool_message.tool_call_id == "some 0"
result2 = await ToolNode([tool2]).ainvoke(
{
"messages": [
AIMessage(
"hi?",
tool_calls=[
{
"name": "tool2",
"args": {"some_val": 2, "some_other_val": "bar"},
"id": "some 1",
}
],
)
]
}
)
tool_message: ToolMessage = result2["messages"][-1]
assert tool_message.type == "tool"
assert tool_message.content == "tool2: 2 - bar"
# list of dicts tool content
result3 = await ToolNode([tool3]).ainvoke(
{
"messages": [
AIMessage(
"hi?",
tool_calls=[
{
"name": "tool3",
"args": {"some_val": 2, "some_other_val": "bar"},
"id": "some 2",
}
],
)
]
}
)
tool_message: ToolMessage = result3["messages"][-1]
assert tool_message.type == "tool"
assert (
tool_message.content
== '[{"key_1": 2, "key_2": "foo"}, {"key_1": "bar", "key_2": "baz"}]'
)
assert tool_message.tool_call_id == "some 2"
# list of content blocks tool content
result4 = await ToolNode([tool4]).ainvoke(
{
"messages": [
AIMessage(
"hi?",
tool_calls=[
{
"name": "tool4",
"args": {"some_val": 2, "some_other_val": "bar"},
"id": "some 3",
}
],
)
]
}
)
tool_message: ToolMessage = result4["messages"][-1]
assert tool_message.type == "tool"
assert tool_message.content == [{"type": "image_url", "image_url": {"url": "abdc"}}]
assert tool_message.tool_call_id == "some 3"
async def test_tool_node_error_handling():
def handle_all(e: Union[ValueError, ToolException, ValidationError]):
return TOOL_CALL_ERROR_TEMPLATE.format(error=repr(e))
# test catching all exceptions, via:
# - handle_tool_errors = True
# - passing a tuple of all exceptions
# - passing a callable with all exceptions in the signature
for handle_tool_errors in (
True,
(ValueError, ToolException, ValidationError),
handle_all,
):
result_error = await ToolNode(
[tool1, tool2, tool3], handle_tool_errors=handle_tool_errors
).ainvoke(
{
"messages": [
AIMessage(
"hi?",
tool_calls=[
{
"name": "tool1",
"args": {"some_val": 0, "some_other_val": "foo"},
"id": "some id",
},
{
"name": "tool2",
"args": {"some_val": 0, "some_other_val": "bar"},
"id": "some other id",
},
{
"name": "tool3",
"args": {"some_val": 0},
"id": "another id",
},
],
)
]
}
)
assert all(m.type == "tool" for m in result_error["messages"])
assert all(m.status == "error" for m in result_error["messages"])
assert (
result_error["messages"][0].content
== f"Error: {repr(ValueError('Test error'))}\n Please fix your mistakes."
)
assert (
result_error["messages"][1].content
== f"Error: {repr(ToolException('Test error'))}\n Please fix your mistakes."
)
assert (
"ValidationError" in result_error["messages"][2].content
or "validation error" in result_error["messages"][2].content
)
assert result_error["messages"][0].tool_call_id == "some id"
assert result_error["messages"][1].tool_call_id == "some other id"
assert result_error["messages"][2].tool_call_id == "another id"
async def test_tool_node_error_handling_callable():
def handle_value_error(e: ValueError):
return "Value error"
def handle_tool_exception(e: ToolException):
return "Tool exception"
for handle_tool_errors in ("Value error", handle_value_error):
result_error = await ToolNode(
[tool1], handle_tool_errors=handle_tool_errors
).ainvoke(
{
"messages": [
AIMessage(
"hi?",
tool_calls=[
{
"name": "tool1",
"args": {"some_val": 0, "some_other_val": "foo"},
"id": "some id",
},
],
)
]
}
)
tool_message: ToolMessage = result_error["messages"][-1]
assert tool_message.type == "tool"
assert tool_message.status == "error"
assert tool_message.content == "Value error"
# test raising for an unhandled exception, via:
# - passing a tuple of all exceptions
# - passing a callable with all exceptions in the signature
for handle_tool_errors in ((ValueError,), handle_value_error):
with pytest.raises(ToolException) as exc_info:
await ToolNode(
[tool1, tool2], handle_tool_errors=handle_tool_errors
).ainvoke(
{
"messages": [
AIMessage(
"hi?",
tool_calls=[
{
"name": "tool1",
"args": {"some_val": 0, "some_other_val": "foo"},
"id": "some id",
},
{
"name": "tool2",
"args": {"some_val": 0, "some_other_val": "bar"},
"id": "some other id",
},
],
)
]
}
)
assert str(exc_info.value) == "Test error"
for handle_tool_errors in ((ToolException,), handle_tool_exception):
with pytest.raises(ValueError) as exc_info:
await ToolNode(
[tool1, tool2], handle_tool_errors=handle_tool_errors
).ainvoke(
{
"messages": [
AIMessage(
"hi?",
tool_calls=[
{
"name": "tool1",
"args": {"some_val": 0, "some_other_val": "foo"},
"id": "some id",
},
{
"name": "tool2",
"args": {"some_val": 0, "some_other_val": "bar"},
"id": "some other id",
},
],
)
]
}
)
assert str(exc_info.value) == "Test error"
async def test_tool_node_handle_tool_errors_false():
with pytest.raises(ValueError) as exc_info:
ToolNode([tool1], handle_tool_errors=False).invoke(
{
"messages": [
AIMessage(
"hi?",
tool_calls=[
{
"name": "tool1",
"args": {"some_val": 0, "some_other_val": "foo"},
"id": "some id",
}
],
)
]
}
)
assert str(exc_info.value) == "Test error"
with pytest.raises(ToolException):
await ToolNode([tool2], handle_tool_errors=False).ainvoke(
{
"messages": [
AIMessage(
"hi?",
tool_calls=[
{
"name": "tool2",
"args": {"some_val": 0, "some_other_val": "bar"},
"id": "some id",
}
],
)
]
}
)
assert str(exc_info.value) == "Test error"
# test validation errors get raised if handle_tool_errors is False
with pytest.raises((ValidationError, ValidationErrorV1)):
ToolNode([tool1], handle_tool_errors=False).invoke(
{
"messages": [
AIMessage(
"hi?",
tool_calls=[
{
"name": "tool1",
"args": {"some_val": 0},
"id": "some id",
}
],
)
]
}
)
def test_tool_node_individual_tool_error_handling():
# test error handling on individual tools (and that it overrides overall error handling!)
result_individual_tool_error_handler = ToolNode(
[tool5], handle_tool_errors="bar"
).invoke(
{
"messages": [
AIMessage(
"hi?",
tool_calls=[
{
"name": "tool5",
"args": {"some_val": 0},
"id": "some 0",
}
],
)
]
}
)
tool_message: ToolMessage = result_individual_tool_error_handler["messages"][-1]
assert tool_message.type == "tool"
assert tool_message.status == "error"
assert tool_message.content == "foo"
assert tool_message.tool_call_id == "some 0"
def test_tool_node_incorrect_tool_name():
result_incorrect_name = ToolNode([tool1, tool2]).invoke(
{
"messages": [
AIMessage(
"hi?",
tool_calls=[
{
"name": "tool3",
"args": {"some_val": 1, "some_other_val": "foo"},
"id": "some 0",
}
],
)
]
}
)
tool_message: ToolMessage = result_incorrect_name["messages"][-1]
assert tool_message.type == "tool"
assert tool_message.status == "error"
assert (
tool_message.content
== "Error: tool3 is not a valid tool, try one of [tool1, tool2]."
)
assert tool_message.tool_call_id == "some 0"
def test_tool_node_node_interrupt():
def tool_normal(some_val: int) -> str:
"""Tool docstring."""
return "normal"
def tool_interrupt(some_val: int) -> str:
"""Tool docstring."""
raise NodeInterrupt("foo")
def handle(e: NodeInterrupt):
return "handled"
for handle_tool_errors in (True, (NodeInterrupt,), "handled", handle, False):
node = ToolNode([tool_interrupt], handle_tool_errors=handle_tool_errors)
with pytest.raises(NodeInterrupt) as exc_info:
node.invoke(
{
"messages": [
AIMessage(
"hi?",
tool_calls=[
{
"name": "tool_interrupt",
"args": {"some_val": 0},
"id": "some 0",
}
],
)
]
}
)
assert exc_info.value == "foo"
# test inside react agent
model = FakeToolCallingModel(
tool_calls=[
[
ToolCall(name="tool_interrupt", args={"some_val": 0}, id="1"),
ToolCall(name="tool_normal", args={"some_val": 1}, id="2"),
],
[],
]
)
checkpointer = MemorySaver()
config = {"configurable": {"thread_id": "1"}}
agent = create_react_agent(
model, [tool_interrupt, tool_normal], checkpointer=checkpointer
)
result = agent.invoke({"messages": [HumanMessage("hi?")]}, config)
assert result["messages"] == [
_AnyIdHumanMessage(
content="hi?",
),
AIMessage(
content="hi?",
id="0",
tool_calls=[
{
"name": "tool_interrupt",
"args": {"some_val": 0},
"id": "1",
"type": "tool_call",
},
{
"name": "tool_normal",
"args": {"some_val": 1},
"id": "2",
"type": "tool_call",
},
],
),
]
state = agent.get_state(config)
assert state.next == ("tools",)
task = state.tasks[0]
assert task.name == "tools"
assert task.interrupts == (Interrupt(value="foo", when="during"),)
def my_function(some_val: int, some_other_val: str) -> str:
return f"{some_val} - {some_other_val}"
class MyModel(BaseModel):
some_val: int
some_other_val: str
class MyModelV1(BaseModelV1):
some_val: int
some_other_val: str
@dec_tool
def my_tool(some_val: int, some_other_val: str) -> str:
"""Cool."""
return f"{some_val} - {some_other_val}"
@pytest.mark.parametrize(
"tool_schema",
[
my_function,
MyModel,
MyModelV1,
my_tool,
],
)
@pytest.mark.parametrize("use_message_key", [True, False])
async def test_validation_node(tool_schema: Any, use_message_key: bool):
validation_node = ValidationNode([tool_schema])
tool_name = getattr(tool_schema, "name", getattr(tool_schema, "__name__", None))
inputs = [
AIMessage(
"hi?",
tool_calls=[
{
"name": tool_name,
"args": {"some_val": 1, "some_other_val": "foo"},
"id": "some 0",
},
{
"name": tool_name,
# Wrong type for some_val
"args": {"some_val": "bar", "some_other_val": "foo"},
"id": "some 1",
},
],
),
]
if use_message_key:
inputs = {"messages": inputs}
result = await validation_node.ainvoke(inputs)
if use_message_key:
result = result["messages"]
def check_results(messages: list):
assert len(messages) == 2
assert all(m.type == "tool" for m in messages)
assert not messages[0].additional_kwargs.get("is_error")
assert messages[1].additional_kwargs.get("is_error")
check_results(result)
result_sync = validation_node.invoke(inputs)
if use_message_key:
result_sync = result_sync["messages"]
check_results(result_sync)
class _InjectStateSchema(TypedDict):
messages: list
foo: str
class _InjectedStatePydanticSchema(BaseModelV1):
messages: list
foo: str
class _InjectedStatePydanticV2Schema(BaseModel):
messages: list
foo: str
@dataclasses.dataclass
class _InjectedStateDataclassSchema:
messages: list
foo: str
T = TypeVar("T")
@pytest.mark.parametrize(
"schema_",
[
_InjectStateSchema,
_InjectedStatePydanticSchema,
_InjectedStatePydanticV2Schema,
_InjectedStateDataclassSchema,
],
)
def test_tool_node_inject_state(schema_: Type[T]) -> None:
def tool1(some_val: int, state: Annotated[T, InjectedState]) -> str:
"""Tool 1 docstring."""
if isinstance(state, dict):
return state["foo"]
else:
return getattr(state, "foo")
def tool2(some_val: int, state: Annotated[T, InjectedState()]) -> str:
"""Tool 2 docstring."""
if isinstance(state, dict):
return state["foo"]
else:
return getattr(state, "foo")
def tool3(
some_val: int,
foo: Annotated[str, InjectedState("foo")],
msgs: Annotated[List[AnyMessage], InjectedState("messages")],
) -> str:
"""Tool 1 docstring."""
return foo
def tool4(
some_val: int, msgs: Annotated[List[AnyMessage], InjectedState("messages")]
) -> str:
"""Tool 1 docstring."""
return msgs[0].content
node = ToolNode([tool1, tool2, tool3, tool4])
for tool_name in ("tool1", "tool2", "tool3"):
tool_call = {
"name": tool_name,
"args": {"some_val": 1},
"id": "some 0",
"type": "tool_call",
}
msg = AIMessage("hi?", tool_calls=[tool_call])
result = node.invoke(schema_(**{"messages": [msg], "foo": "bar"}))
tool_message = result["messages"][-1]
assert tool_message.content == "bar", f"Failed for tool={tool_name}"
if tool_name == "tool3":
failure_input = None
try:
failure_input = schema_(**{"messages": [msg], "notfoo": "bar"})
except Exception:
pass
if failure_input is not None:
with pytest.raises(KeyError):
node.invoke(failure_input)
with pytest.raises(ValueError):
node.invoke([msg])
else:
failure_input = None
try:
failure_input = schema_(**{"messages": [msg], "notfoo": "bar"})
except Exception:
# We'd get a validation error from pydantic state and wouldn't make it to the node
# anyway
pass
if failure_input is not None:
messages_ = node.invoke(failure_input)
tool_message = messages_["messages"][-1]
assert "KeyError" in tool_message.content
tool_message = node.invoke([msg])[-1]
assert "KeyError" in tool_message.content
tool_call = {
"name": "tool4",
"args": {"some_val": 1},
"id": "some 0",
"type": "tool_call",
}
msg = AIMessage("hi?", tool_calls=[tool_call])
result = node.invoke(schema_(**{"messages": [msg], "foo": ""}))
tool_message = result["messages"][-1]
assert tool_message.content == "hi?"
result = node.invoke([msg])
tool_message = result[-1]
assert tool_message.content == "hi?"
@pytest.mark.skipif(
not IS_LANGCHAIN_CORE_030_OR_GREATER,
reason="Langchain core 0.3.0 or greater is required",
)
def test_tool_node_inject_store() -> None:
store = InMemoryStore()
namespace = ("test",)
def tool1(some_val: int, store: Annotated[BaseStore, InjectedStore()]) -> str:
"""Tool 1 docstring."""
store_val = store.get(namespace, "test_key").value["foo"]
return f"Some val: {some_val}, store val: {store_val}"
def tool2(some_val: int, store: Annotated[BaseStore, InjectedStore()]) -> str:
"""Tool 2 docstring."""
store_val = store.get(namespace, "test_key").value["foo"]
return f"Some val: {some_val}, store val: {store_val}"
def tool3(
some_val: int,
bar: Annotated[str, InjectedState("bar")],
store: Annotated[BaseStore, InjectedStore()],
) -> str:
"""Tool 3 docstring."""
store_val = store.get(namespace, "test_key").value["foo"]
return f"Some val: {some_val}, store val: {store_val}, state val: {bar}"
node = ToolNode([tool1, tool2, tool3], handle_tool_errors=True)
store.put(namespace, "test_key", {"foo": "bar"})
class State(MessagesState):
bar: str
builder = StateGraph(State)
builder.add_node("tools", node)
builder.add_edge(START, "tools")
graph = builder.compile(store=store)
for tool_name in ("tool1", "tool2"):
tool_call = {
"name": tool_name,
"args": {"some_val": 1},
"id": "some 0",
"type": "tool_call",
}
msg = AIMessage("hi?", tool_calls=[tool_call])
node_result = node.invoke({"messages": [msg]}, store=store)
graph_result = graph.invoke({"messages": [msg]})
for result in (node_result, graph_result):
result["messages"][-1]
tool_message = result["messages"][-1]
assert (
tool_message.content == "Some val: 1, store val: bar"
), f"Failed for tool={tool_name}"
tool_call = {
"name": "tool3",
"args": {"some_val": 1},
"id": "some 0",
"type": "tool_call",
}
msg = AIMessage("hi?", tool_calls=[tool_call])
node_result = node.invoke({"messages": [msg], "bar": "baz"}, store=store)
graph_result = graph.invoke({"messages": [msg], "bar": "baz"})
for result in (node_result, graph_result):
result["messages"][-1]
tool_message = result["messages"][-1]
assert (
tool_message.content == "Some val: 1, store val: bar, state val: baz"
), f"Failed for tool={tool_name}"
# test injected store without passing store to compiled graph
failing_graph = builder.compile()
with pytest.raises(ValueError):
failing_graph.invoke({"messages": [msg], "bar": "baz"})
def test_tool_node_ensure_utf8() -> None:
@dec_tool
def get_day_list(days: list[str]) -> list[str]:
"""choose days"""
return days
data = ["星期一", "水曜日", "목요일", "Friday"]
tools = [get_day_list]
tool_calls = [ToolCall(name=get_day_list.name, args={"days": data}, id="test_id")]
outputs: list[ToolMessage] = ToolNode(tools).invoke(
[AIMessage(content="", tool_calls=tool_calls)]
)
assert outputs[0].content == json.dumps(data, ensure_ascii=False)
def test_tool_node_messages_key() -> None:
@dec_tool
def add(a: int, b: int):
"""Adds a and b."""
return a + b
model = FakeToolCallingModel(
tool_calls=[[ToolCall(name=add.name, args={"a": 1, "b": 2}, id="test_id")]]
)
class State(TypedDict):
subgraph_messages: Annotated[list[AnyMessage], add_messages]
def call_model(state: State):
response = model.invoke(state["subgraph_messages"])
model.tool_calls = []
return {"subgraph_messages": response}
builder = StateGraph(State)
builder.add_node("agent", call_model)
builder.add_node("tools", ToolNode([add], messages_key="subgraph_messages"))
builder.add_conditional_edges(
"agent", partial(tools_condition, messages_key="subgraph_messages")
)
builder.add_edge(START, "agent")
builder.add_edge("tools", "agent")
graph = builder.compile()
result = graph.invoke({"subgraph_messages": [HumanMessage(content="hi")]})
assert result["subgraph_messages"] == [
_AnyIdHumanMessage(content="hi"),
AIMessage(
content="hi",
id="0",
tool_calls=[ToolCall(name=add.name, args={"a": 1, "b": 2}, id="test_id")],
),
_AnyIdToolMessage(content="3", name=add.name, tool_call_id="test_id"),
AIMessage(content="hi-hi-3", id="1"),
]
async def test_return_direct() -> None:
@dec_tool(return_direct=True)
def tool_return_direct(input: str) -> str:
"""A tool that returns directly."""
return f"Direct result: {input}"
@dec_tool
def tool_normal(input: str) -> str:
"""A normal tool."""
return f"Normal result: {input}"
first_tool_call = [
ToolCall(
name="tool_return_direct",
args={"input": "Test direct"},
id="1",
),
]
expected_ai = AIMessage(
content="Test direct",
id="0",
tool_calls=first_tool_call,
)
model = FakeToolCallingModel(tool_calls=[first_tool_call, []])
agent = create_react_agent(model, [tool_return_direct, tool_normal])
# Test direct return for tool_return_direct
result = agent.invoke(
{"messages": [HumanMessage(content="Test direct", id="hum0")]}
)
assert result["messages"] == [
HumanMessage(content="Test direct", id="hum0"),
expected_ai,
ToolMessage(
content="Direct result: Test direct",
name="tool_return_direct",
tool_call_id="1",
id=result["messages"][2].id,
),
]
second_tool_call = [
ToolCall(
name="tool_normal",
args={"input": "Test normal"},
id="2",
),
]
model = FakeToolCallingModel(tool_calls=[second_tool_call, []])
agent = create_react_agent(model, [tool_return_direct, tool_normal])
result = agent.invoke(
{"messages": [HumanMessage(content="Test normal", id="hum1")]}
)
assert result["messages"] == [
HumanMessage(content="Test normal", id="hum1"),
AIMessage(content="Test normal", id="0", tool_calls=second_tool_call),
ToolMessage(
content="Normal result: Test normal",
name="tool_normal",
tool_call_id="2",
id=result["messages"][2].id,
),
AIMessage(content="Test normal-Test normal-Normal result: Test normal", id="1"),
]
both_tool_calls = [
ToolCall(
name="tool_return_direct",
args={"input": "Test both direct"},
id="3",
),
ToolCall(
name="tool_normal",
args={"input": "Test both normal"},
id="4",
),
]
model = FakeToolCallingModel(tool_calls=[both_tool_calls, []])
agent = create_react_agent(model, [tool_return_direct, tool_normal])
result = agent.invoke({"messages": [HumanMessage(content="Test both", id="hum2")]})
assert result["messages"] == [
HumanMessage(content="Test both", id="hum2"),
AIMessage(content="Test both", id="0", tool_calls=both_tool_calls),
ToolMessage(
content="Direct result: Test both direct",
name="tool_return_direct",
tool_call_id="3",
id=result["messages"][2].id,
),
ToolMessage(
content="Normal result: Test both normal",
name="tool_normal",
tool_call_id="4",
id=result["messages"][3].id,
),
]
def test__get_state_args() -> None:
class Schema1(BaseModel):
a: Annotated[str, InjectedState]
class Schema2(Schema1):
b: Annotated[int, InjectedState("bar")]
@dec_tool(args_schema=Schema2)
def foo(a: str, b: int) -> float:
"""return"""
return 0.0
assert _get_state_args(foo) == {"a": None, "b": "bar"}
|
0 | lc_public_repos/langgraph/libs/langgraph | lc_public_repos/langgraph/libs/langgraph/tests/memory_assert.py | import asyncio
import os
import tempfile
from collections import defaultdict
from functools import partial
from typing import Any, Optional
from langchain_core.runnables import RunnableConfig
from langgraph.checkpoint.base import (
ChannelVersions,
Checkpoint,
CheckpointMetadata,
CheckpointTuple,
SerializerProtocol,
copy_checkpoint,
)
from langgraph.checkpoint.memory import MemorySaver, PersistentDict
class NoopSerializer(SerializerProtocol):
def loads_typed(self, data: tuple[str, bytes]) -> Any:
return data[1]
def dumps_typed(self, obj: Any) -> tuple[str, bytes]:
return "type", obj
class MemorySaverAssertImmutable(MemorySaver):
storage_for_copies: defaultdict[str, dict[str, dict[str, Checkpoint]]]
def __init__(
self,
*,
serde: Optional[SerializerProtocol] = None,
put_sleep: Optional[float] = None,
) -> None:
_, filename = tempfile.mkstemp()
super().__init__(
serde=serde, factory=partial(PersistentDict, filename=filename)
)
self.storage_for_copies = defaultdict(lambda: defaultdict(dict))
self.put_sleep = put_sleep
self.stack.callback(os.remove, filename)
def put(
self,
config: dict,
checkpoint: Checkpoint,
metadata: CheckpointMetadata,
new_versions: ChannelVersions,
) -> None:
if self.put_sleep:
import time
time.sleep(self.put_sleep)
# assert checkpoint hasn't been modified since last written
thread_id = config["configurable"]["thread_id"]
checkpoint_ns = config["configurable"]["checkpoint_ns"]
if saved := super().get(config):
assert (
self.serde.loads_typed(
self.storage_for_copies[thread_id][checkpoint_ns][saved["id"]]
)
== saved
)
self.storage_for_copies[thread_id][checkpoint_ns][checkpoint["id"]] = (
self.serde.dumps_typed(copy_checkpoint(checkpoint))
)
# call super to write checkpoint
return super().put(config, checkpoint, metadata, new_versions)
class MemorySaverAssertCheckpointMetadata(MemorySaver):
"""This custom checkpointer is for verifying that a run's configurable
fields are merged with the previous checkpoint config for each step in
the run. This is the desired behavior. Because the checkpointer's (a)put()
method is called for each step, the implementation of this checkpointer
should produce a side effect that can be asserted.
"""
def put(
self,
config: RunnableConfig,
checkpoint: Checkpoint,
metadata: CheckpointMetadata,
new_versions: ChannelVersions,
) -> None:
"""The implementation of put() merges config["configurable"] (a run's
configurable fields) with the metadata field. The state of the
checkpoint metadata can be asserted to confirm that the run's
configurable fields were merged with the previous checkpoint config.
"""
configurable = config["configurable"].copy()
# remove checkpoint_id to make testing simpler
checkpoint_id = configurable.pop("checkpoint_id", None)
thread_id = config["configurable"]["thread_id"]
checkpoint_ns = config["configurable"]["checkpoint_ns"]
self.storage[thread_id][checkpoint_ns].update(
{
checkpoint["id"]: (
self.serde.dumps_typed(checkpoint),
# merge configurable fields and metadata
self.serde.dumps_typed({**configurable, **metadata}),
checkpoint_id,
)
}
)
return {
"configurable": {
"thread_id": config["configurable"]["thread_id"],
"checkpoint_id": checkpoint["id"],
}
}
async def aput(
self,
config: RunnableConfig,
checkpoint: Checkpoint,
metadata: CheckpointMetadata,
new_versions: ChannelVersions,
) -> RunnableConfig:
return await asyncio.get_running_loop().run_in_executor(
None, self.put, config, checkpoint, metadata, new_versions
)
class MemorySaverNoPending(MemorySaver):
def get_tuple(self, config: RunnableConfig) -> Optional[CheckpointTuple]:
result = super().get_tuple(config)
if result:
return CheckpointTuple(result.config, result.checkpoint, result.metadata)
return result
|
0 | lc_public_repos/langgraph/libs/langgraph | lc_public_repos/langgraph/libs/langgraph/tests/test_pregel.py | import enum
import json
import logging
import operator
import re
import time
import uuid
import warnings
from collections import Counter
from concurrent.futures import ThreadPoolExecutor
from contextlib import contextmanager
from dataclasses import replace
from random import randrange
from typing import (
Annotated,
Any,
Dict,
Generator,
Iterator,
List,
Literal,
Optional,
Sequence,
Tuple,
TypedDict,
Union,
cast,
get_type_hints,
)
import httpx
import pytest
from langchain_core.runnables import (
RunnableConfig,
RunnableLambda,
RunnableMap,
RunnablePassthrough,
RunnablePick,
)
from langsmith import traceable
from pydantic import BaseModel
from pytest_mock import MockerFixture
from syrupy import SnapshotAssertion
from langgraph.channels.base import BaseChannel
from langgraph.channels.binop import BinaryOperatorAggregate
from langgraph.channels.context import Context
from langgraph.channels.ephemeral_value import EphemeralValue
from langgraph.channels.last_value import LastValue
from langgraph.channels.topic import Topic
from langgraph.channels.untracked_value import UntrackedValue
from langgraph.checkpoint.base import (
BaseCheckpointSaver,
Checkpoint,
CheckpointMetadata,
CheckpointTuple,
)
from langgraph.checkpoint.memory import MemorySaver
from langgraph.constants import (
CONFIG_KEY_NODE_FINISHED,
ERROR,
FF_SEND_V2,
PULL,
PUSH,
START,
)
from langgraph.errors import InvalidUpdateError, MultipleSubgraphsError, NodeInterrupt
from langgraph.graph import END, Graph, StateGraph
from langgraph.graph.message import MessageGraph, MessagesState, add_messages
from langgraph.managed.shared_value import SharedValue
from langgraph.prebuilt.chat_agent_executor import create_tool_calling_executor
from langgraph.prebuilt.tool_node import ToolNode
from langgraph.pregel import Channel, GraphRecursionError, Pregel, StateSnapshot
from langgraph.pregel.retry import RetryPolicy
from langgraph.store.base import BaseStore
from langgraph.store.memory import InMemoryStore
from langgraph.types import (
Command,
Interrupt,
PregelTask,
Send,
StreamWriter,
interrupt,
)
from tests.any_str import AnyDict, AnyStr, AnyVersion, FloatBetween, UnsortedSequence
from tests.conftest import (
ALL_CHECKPOINTERS_SYNC,
ALL_STORES_SYNC,
SHOULD_CHECK_SNAPSHOTS,
)
from tests.fake_chat import FakeChatModel
from tests.fake_tracer import FakeTracer
from tests.memory_assert import MemorySaverAssertCheckpointMetadata
from tests.messages import (
_AnyIdAIMessage,
_AnyIdAIMessageChunk,
_AnyIdHumanMessage,
_AnyIdToolMessage,
)
logger = logging.getLogger(__name__)
# define these objects to avoid importing langchain_core.agents
# and therefore avoid relying on core Pydantic version
class AgentAction(BaseModel):
tool: str
tool_input: Union[str, dict]
log: str
type: Literal["AgentAction"] = "AgentAction"
model_config = {
"json_schema_extra": {
"description": (
"""Represents a request to execute an action by an agent.
The action consists of the name of the tool to execute and the input to pass
to the tool. The log is used to pass along extra information about the action."""
)
}
}
class AgentFinish(BaseModel):
"""Final return value of an ActionAgent.
Agents return an AgentFinish when they have reached a stopping condition.
"""
return_values: dict
log: str
type: Literal["AgentFinish"] = "AgentFinish"
model_config = {
"json_schema_extra": {
"description": (
"""Final return value of an ActionAgent.
Agents return an AgentFinish when they have reached a stopping condition."""
)
}
}
def test_graph_validation() -> None:
def logic(inp: str) -> str:
return ""
workflow = Graph()
workflow.add_node("agent", logic)
workflow.set_entry_point("agent")
workflow.set_finish_point("agent")
assert workflow.compile(), "valid graph"
# Accept a dead-end
workflow = Graph()
workflow.add_node("agent", logic)
workflow.set_entry_point("agent")
workflow.compile()
workflow = Graph()
workflow.add_node("agent", logic)
workflow.set_finish_point("agent")
with pytest.raises(ValueError, match="must have an entrypoint"):
workflow.compile()
workflow = Graph()
workflow.add_node("agent", logic)
workflow.add_node("tools", logic)
workflow.set_entry_point("agent")
workflow.add_conditional_edges("agent", logic, {"continue": "tools", "exit": END})
workflow.add_edge("tools", "agent")
assert workflow.compile(), "valid graph"
workflow = Graph()
workflow.add_node("agent", logic)
workflow.add_node("tools", logic)
workflow.set_entry_point("tools")
workflow.add_conditional_edges("agent", logic, {"continue": "tools", "exit": END})
workflow.add_edge("tools", "agent")
assert workflow.compile(), "valid graph"
workflow = Graph()
workflow.set_entry_point("tools")
workflow.add_conditional_edges("agent", logic, {"continue": "tools", "exit": END})
workflow.add_edge("tools", "agent")
workflow.add_node("agent", logic)
workflow.add_node("tools", logic)
assert workflow.compile(), "valid graph"
workflow = Graph()
workflow.set_entry_point("tools")
workflow.add_conditional_edges(
"agent", logic, {"continue": "tools", "exit": END, "hmm": "extra"}
)
workflow.add_edge("tools", "agent")
workflow.add_node("agent", logic)
workflow.add_node("tools", logic)
with pytest.raises(ValueError, match="unknown"): # extra is not defined
workflow.compile()
workflow = Graph()
workflow.set_entry_point("agent")
workflow.add_conditional_edges("agent", logic, {"continue": "tools", "exit": END})
workflow.add_edge("tools", "extra")
workflow.add_node("agent", logic)
workflow.add_node("tools", logic)
with pytest.raises(ValueError, match="unknown"): # extra is not defined
workflow.compile()
workflow = Graph()
workflow.add_node("agent", logic)
workflow.add_node("tools", logic)
workflow.add_node("extra", logic)
workflow.set_entry_point("agent")
workflow.add_conditional_edges("agent", logic)
workflow.add_edge("tools", "agent")
# Accept, even though extra is dead-end
workflow.compile()
class State(TypedDict):
hello: str
graph = StateGraph(State)
graph.add_node("start", lambda x: x)
graph.add_edge("__start__", "start")
graph.add_edge("unknown", "start")
graph.add_edge("start", "__end__")
with pytest.raises(ValueError, match="Found edge starting at unknown node "):
graph.compile()
def bad_reducer(a): ...
class BadReducerState(TypedDict):
hello: Annotated[str, bad_reducer]
with pytest.raises(ValueError, match="Invalid reducer"):
StateGraph(BadReducerState)
def node_b(state: State) -> State:
return {"hello": "world"}
builder = StateGraph(State)
builder.add_node("a", node_b)
builder.add_node("b", node_b)
builder.add_node("c", node_b)
builder.set_entry_point("a")
builder.add_edge("a", "b")
builder.add_edge("a", "c")
graph = builder.compile()
with pytest.raises(InvalidUpdateError, match="At key 'hello'"):
graph.invoke({"hello": "there"})
def test_graph_validation_with_command() -> None:
class State(TypedDict):
foo: str
bar: str
def node_a(state: State):
return Command(goto="b", update={"foo": "bar"})
def node_b(state: State):
return Command(goto=END, update={"bar": "baz"})
builder = StateGraph(State)
builder.add_node("a", node_a)
builder.add_node("b", node_b)
builder.add_edge(START, "a")
graph = builder.compile()
assert graph.invoke({"foo": ""}) == {"foo": "bar", "bar": "baz"}
def test_checkpoint_errors() -> None:
class FaultyGetCheckpointer(MemorySaver):
def get_tuple(self, config: RunnableConfig) -> Optional[CheckpointTuple]:
raise ValueError("Faulty get_tuple")
class FaultyPutCheckpointer(MemorySaver):
def put(
self,
config: RunnableConfig,
checkpoint: Checkpoint,
metadata: CheckpointMetadata,
new_versions: Optional[dict[str, Union[str, int, float]]] = None,
) -> RunnableConfig:
raise ValueError("Faulty put")
class FaultyPutWritesCheckpointer(MemorySaver):
def put_writes(
self, config: RunnableConfig, writes: List[Tuple[str, Any]], task_id: str
) -> RunnableConfig:
raise ValueError("Faulty put_writes")
class FaultyVersionCheckpointer(MemorySaver):
def get_next_version(self, current: Optional[int], channel: BaseChannel) -> int:
raise ValueError("Faulty get_next_version")
def logic(inp: str) -> str:
return ""
builder = StateGraph(Annotated[str, operator.add])
builder.add_node("agent", logic)
builder.add_edge(START, "agent")
graph = builder.compile(checkpointer=FaultyGetCheckpointer())
with pytest.raises(ValueError, match="Faulty get_tuple"):
graph.invoke("", {"configurable": {"thread_id": "thread-1"}})
graph = builder.compile(checkpointer=FaultyPutCheckpointer())
with pytest.raises(ValueError, match="Faulty put"):
graph.invoke("", {"configurable": {"thread_id": "thread-1"}})
graph = builder.compile(checkpointer=FaultyVersionCheckpointer())
with pytest.raises(ValueError, match="Faulty get_next_version"):
graph.invoke("", {"configurable": {"thread_id": "thread-1"}})
# add parallel node
builder.add_node("parallel", logic)
builder.add_edge(START, "parallel")
graph = builder.compile(checkpointer=FaultyPutWritesCheckpointer())
with pytest.raises(ValueError, match="Faulty put_writes"):
graph.invoke("", {"configurable": {"thread_id": "thread-1"}})
def test_node_schemas_custom_output() -> None:
class State(TypedDict):
hello: str
bye: str
messages: Annotated[list[str], add_messages]
class Output(TypedDict):
messages: list[str]
class StateForA(TypedDict):
hello: str
messages: Annotated[list[str], add_messages]
def node_a(state: StateForA) -> State:
assert state == {
"hello": "there",
"messages": [_AnyIdHumanMessage(content="hello")],
}
class StateForB(TypedDict):
bye: str
now: int
def node_b(state: StateForB):
assert state == {
"bye": "world",
}
return {
"now": 123,
"hello": "again",
}
class StateForC(TypedDict):
hello: str
now: int
def node_c(state: StateForC) -> StateForC:
assert state == {
"hello": "again",
"now": 123,
}
builder = StateGraph(State, output=Output)
builder.add_node("a", node_a)
builder.add_node("b", node_b)
builder.add_node("c", node_c)
builder.add_edge(START, "a")
builder.add_edge("a", "b")
builder.add_edge("b", "c")
graph = builder.compile()
assert graph.invoke({"hello": "there", "bye": "world", "messages": "hello"}) == {
"messages": [_AnyIdHumanMessage(content="hello")],
}
builder = StateGraph(State, output=Output)
builder.add_node("a", node_a)
builder.add_node("b", node_b)
builder.add_node("c", node_c)
builder.add_edge(START, "a")
builder.add_edge("a", "b")
builder.add_edge("b", "c")
graph = builder.compile()
assert graph.invoke(
{
"hello": "there",
"bye": "world",
"messages": "hello",
"now": 345, # ignored because not in input schema
}
) == {
"messages": [_AnyIdHumanMessage(content="hello")],
}
assert [
c
for c in graph.stream(
{
"hello": "there",
"bye": "world",
"messages": "hello",
"now": 345, # ignored because not in input schema
}
)
] == [
{"a": None},
{"b": {"hello": "again", "now": 123}},
{"c": None},
]
def test_reducer_before_first_node() -> None:
class State(TypedDict):
hello: str
messages: Annotated[list[str], add_messages]
def node_a(state: State) -> State:
assert state == {
"hello": "there",
"messages": [_AnyIdHumanMessage(content="hello")],
}
builder = StateGraph(State)
builder.add_node("a", node_a)
builder.set_entry_point("a")
builder.set_finish_point("a")
graph = builder.compile()
assert graph.invoke({"hello": "there", "messages": "hello"}) == {
"hello": "there",
"messages": [_AnyIdHumanMessage(content="hello")],
}
class State(TypedDict):
hello: str
messages: Annotated[List[str], add_messages]
def node_a(state: State) -> State:
assert state == {
"hello": "there",
"messages": [_AnyIdHumanMessage(content="hello")],
}
builder = StateGraph(State)
builder.add_node("a", node_a)
builder.set_entry_point("a")
builder.set_finish_point("a")
graph = builder.compile()
assert graph.invoke({"hello": "there", "messages": "hello"}) == {
"hello": "there",
"messages": [_AnyIdHumanMessage(content="hello")],
}
class State(TypedDict):
hello: str
messages: Annotated[Sequence[str], add_messages]
def node_a(state: State) -> State:
assert state == {
"hello": "there",
"messages": [_AnyIdHumanMessage(content="hello")],
}
builder = StateGraph(State)
builder.add_node("a", node_a)
builder.set_entry_point("a")
builder.set_finish_point("a")
graph = builder.compile()
assert graph.invoke({"hello": "there", "messages": "hello"}) == {
"hello": "there",
"messages": [_AnyIdHumanMessage(content="hello")],
}
def test_invoke_single_process_in_out(mocker: MockerFixture) -> None:
add_one = mocker.Mock(side_effect=lambda x: x + 1)
chain = Channel.subscribe_to("input") | add_one | Channel.write_to("output")
app = Pregel(
nodes={
"one": chain,
},
channels={
"input": LastValue(int),
"output": LastValue(int),
},
input_channels="input",
output_channels="output",
)
graph = Graph()
graph.add_node("add_one", add_one)
graph.set_entry_point("add_one")
graph.set_finish_point("add_one")
gapp = graph.compile()
if SHOULD_CHECK_SNAPSHOTS:
assert app.input_schema.model_json_schema() == {
"title": "LangGraphInput",
"type": "integer",
}
assert app.output_schema.model_json_schema() == {
"title": "LangGraphOutput",
"type": "integer",
}
with warnings.catch_warnings():
warnings.simplefilter("error") # raise warnings as errors
assert app.config_schema().model_json_schema() == {
"properties": {},
"title": "LangGraphConfig",
"type": "object",
}
assert app.invoke(2) == 3
assert app.invoke(2, output_keys=["output"]) == {"output": 3}
assert repr(app), "does not raise recursion error"
assert gapp.invoke(2, debug=True) == 3
@pytest.mark.parametrize(
"falsy_value",
[None, False, 0, "", [], {}, set(), frozenset(), 0.0, 0j],
)
def test_invoke_single_process_in_out_falsy_values(falsy_value: Any) -> None:
graph = Graph()
graph.add_node("return_falsy_const", lambda *args, **kwargs: falsy_value)
graph.set_entry_point("return_falsy_const")
graph.set_finish_point("return_falsy_const")
gapp = graph.compile()
assert gapp.invoke(1) == falsy_value
def test_invoke_single_process_in_write_kwargs(mocker: MockerFixture) -> None:
add_one = mocker.Mock(side_effect=lambda x: x + 1)
chain = (
Channel.subscribe_to("input")
| add_one
| Channel.write_to("output", fixed=5, output_plus_one=lambda x: x + 1)
)
app = Pregel(
nodes={"one": chain},
channels={
"input": LastValue(int),
"output": LastValue(int),
"fixed": LastValue(int),
"output_plus_one": LastValue(int),
},
output_channels=["output", "fixed", "output_plus_one"],
input_channels="input",
)
if SHOULD_CHECK_SNAPSHOTS:
assert app.input_schema.model_json_schema() == {
"title": "LangGraphInput",
"type": "integer",
}
assert app.output_schema.model_json_schema() == {
"title": "LangGraphOutput",
"type": "object",
"properties": {
"output": {"title": "Output", "type": "integer", "default": None},
"fixed": {"title": "Fixed", "type": "integer", "default": None},
"output_plus_one": {
"title": "Output Plus One",
"type": "integer",
"default": None,
},
},
}
assert app.invoke(2) == {"output": 3, "fixed": 5, "output_plus_one": 4}
def test_invoke_single_process_in_out_dict(mocker: MockerFixture) -> None:
add_one = mocker.Mock(side_effect=lambda x: x + 1)
chain = Channel.subscribe_to("input") | add_one | Channel.write_to("output")
app = Pregel(
nodes={"one": chain},
channels={"input": LastValue(int), "output": LastValue(int)},
input_channels="input",
output_channels=["output"],
)
if SHOULD_CHECK_SNAPSHOTS:
assert app.input_schema.model_json_schema() == {
"title": "LangGraphInput",
"type": "integer",
}
assert app.output_schema.model_json_schema() == {
"title": "LangGraphOutput",
"type": "object",
"properties": {
"output": {"title": "Output", "type": "integer", "default": None}
},
}
assert app.invoke(2) == {"output": 3}
def test_invoke_single_process_in_dict_out_dict(mocker: MockerFixture) -> None:
add_one = mocker.Mock(side_effect=lambda x: x + 1)
chain = Channel.subscribe_to("input") | add_one | Channel.write_to("output")
app = Pregel(
nodes={"one": chain},
channels={"input": LastValue(int), "output": LastValue(int)},
input_channels=["input"],
output_channels=["output"],
)
if SHOULD_CHECK_SNAPSHOTS:
assert app.input_schema.model_json_schema() == {
"title": "LangGraphInput",
"type": "object",
"properties": {
"input": {"title": "Input", "type": "integer", "default": None}
},
}
assert app.output_schema.model_json_schema() == {
"title": "LangGraphOutput",
"type": "object",
"properties": {
"output": {"title": "Output", "type": "integer", "default": None}
},
}
assert app.invoke({"input": 2}) == {"output": 3}
def test_invoke_two_processes_in_out(mocker: MockerFixture) -> None:
add_one = mocker.Mock(side_effect=lambda x: x + 1)
one = Channel.subscribe_to("input") | add_one | Channel.write_to("inbox")
two = Channel.subscribe_to("inbox") | add_one | Channel.write_to("output")
app = Pregel(
nodes={"one": one, "two": two},
channels={
"inbox": LastValue(int),
"output": LastValue(int),
"input": LastValue(int),
},
input_channels="input",
output_channels="output",
)
assert app.invoke(2) == 4
with pytest.raises(GraphRecursionError):
app.invoke(2, {"recursion_limit": 1}, debug=1)
graph = Graph()
graph.add_node("add_one", add_one)
graph.add_node("add_one_more", add_one)
graph.set_entry_point("add_one")
graph.set_finish_point("add_one_more")
graph.add_edge("add_one", "add_one_more")
gapp = graph.compile()
assert gapp.invoke(2) == 4
for step, values in enumerate(gapp.stream(2, debug=1), start=1):
if step == 1:
assert values == {
"add_one": 3,
}
elif step == 2:
assert values == {
"add_one_more": 4,
}
else:
assert 0, f"{step}:{values}"
assert step == 2
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC)
def test_invoke_two_processes_in_out_interrupt(
request: pytest.FixtureRequest, checkpointer_name: str, mocker: MockerFixture
) -> None:
checkpointer = request.getfixturevalue(f"checkpointer_{checkpointer_name}")
add_one = mocker.Mock(side_effect=lambda x: x + 1)
one = Channel.subscribe_to("input") | add_one | Channel.write_to("inbox")
two = Channel.subscribe_to("inbox") | add_one | Channel.write_to("output")
app = Pregel(
nodes={"one": one, "two": two},
channels={
"inbox": LastValue(int),
"output": LastValue(int),
"input": LastValue(int),
},
input_channels="input",
output_channels="output",
checkpointer=checkpointer,
interrupt_after_nodes=["one"],
)
thread1 = {"configurable": {"thread_id": "1"}}
thread2 = {"configurable": {"thread_id": "2"}}
# start execution, stop at inbox
assert app.invoke(2, thread1) is None
# inbox == 3
checkpoint = checkpointer.get(thread1)
assert checkpoint is not None
assert checkpoint["channel_values"]["inbox"] == 3
# resume execution, finish
assert app.invoke(None, thread1) == 4
# start execution again, stop at inbox
assert app.invoke(20, thread1) is None
# inbox == 21
checkpoint = checkpointer.get(thread1)
assert checkpoint is not None
assert checkpoint["channel_values"]["inbox"] == 21
# send a new value in, interrupting the previous execution
assert app.invoke(3, thread1) is None
assert app.invoke(None, thread1) == 5
# start execution again, stopping at inbox
assert app.invoke(20, thread2) is None
# inbox == 21
snapshot = app.get_state(thread2)
assert snapshot.values["inbox"] == 21
assert snapshot.next == ("two",)
# update the state, resume
app.update_state(thread2, 25, as_node="one")
assert app.invoke(None, thread2) == 26
# no pending tasks
snapshot = app.get_state(thread2)
assert snapshot.next == ()
# list history
history = [c for c in app.get_state_history(thread1)]
assert history == [
StateSnapshot(
values={"inbox": 4, "output": 5, "input": 3},
tasks=(),
next=(),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "loop",
"step": 6,
"writes": {"two": 5},
"thread_id": "1",
},
created_at=AnyStr(),
parent_config=history[1].config,
),
StateSnapshot(
values={"inbox": 4, "output": 4, "input": 3},
tasks=(PregelTask(AnyStr(), "two", (PULL, "two"), result={"output": 5}),),
next=("two",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "loop",
"step": 5,
"writes": {"one": None},
"thread_id": "1",
},
created_at=AnyStr(),
parent_config=history[2].config,
),
StateSnapshot(
values={"inbox": 21, "output": 4, "input": 3},
tasks=(PregelTask(AnyStr(), "one", (PULL, "one"), result={"inbox": 4}),),
next=("one",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "input",
"step": 4,
"writes": {"input": 3},
"thread_id": "1",
},
created_at=AnyStr(),
parent_config=history[3].config,
),
StateSnapshot(
values={"inbox": 21, "output": 4, "input": 20},
tasks=(PregelTask(AnyStr(), "two", (PULL, "two")),),
next=("two",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "loop",
"step": 3,
"writes": {"one": None},
"thread_id": "1",
},
created_at=AnyStr(),
parent_config=history[4].config,
),
StateSnapshot(
values={"inbox": 3, "output": 4, "input": 20},
tasks=(PregelTask(AnyStr(), "one", (PULL, "one"), result={"inbox": 21}),),
next=("one",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "input",
"step": 2,
"writes": {"input": 20},
"thread_id": "1",
},
created_at=AnyStr(),
parent_config=history[5].config,
),
StateSnapshot(
values={"inbox": 3, "output": 4, "input": 2},
tasks=(),
next=(),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "loop",
"step": 1,
"writes": {"two": 4},
"thread_id": "1",
},
created_at=AnyStr(),
parent_config=history[6].config,
),
StateSnapshot(
values={"inbox": 3, "input": 2},
tasks=(PregelTask(AnyStr(), "two", (PULL, "two"), result={"output": 4}),),
next=("two",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "loop",
"step": 0,
"writes": {"one": None},
"thread_id": "1",
},
created_at=AnyStr(),
parent_config=history[7].config,
),
StateSnapshot(
values={"input": 2},
tasks=(PregelTask(AnyStr(), "one", (PULL, "one"), result={"inbox": 3}),),
next=("one",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "input",
"step": -1,
"writes": {"input": 2},
"thread_id": "1",
},
created_at=AnyStr(),
parent_config=None,
),
]
# re-running from any previous checkpoint should re-run nodes
assert [c for c in app.stream(None, history[0].config, stream_mode="updates")] == []
assert [c for c in app.stream(None, history[1].config, stream_mode="updates")] == [
{"two": {"output": 5}},
]
assert [c for c in app.stream(None, history[2].config, stream_mode="updates")] == [
{"one": {"inbox": 4}},
{"__interrupt__": ()},
]
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC)
def test_fork_always_re_runs_nodes(
request: pytest.FixtureRequest, checkpointer_name: str, mocker: MockerFixture
) -> None:
checkpointer = request.getfixturevalue(f"checkpointer_{checkpointer_name}")
add_one = mocker.Mock(side_effect=lambda _: 1)
builder = StateGraph(Annotated[int, operator.add])
builder.add_node("add_one", add_one)
builder.add_edge(START, "add_one")
builder.add_conditional_edges("add_one", lambda cnt: "add_one" if cnt < 6 else END)
graph = builder.compile(checkpointer=checkpointer)
thread1 = {"configurable": {"thread_id": "1"}}
# start execution, stop at inbox
assert [*graph.stream(1, thread1, stream_mode=["values", "updates"])] == [
("values", 1),
("updates", {"add_one": 1}),
("values", 2),
("updates", {"add_one": 1}),
("values", 3),
("updates", {"add_one": 1}),
("values", 4),
("updates", {"add_one": 1}),
("values", 5),
("updates", {"add_one": 1}),
("values", 6),
]
# list history
history = [c for c in graph.get_state_history(thread1)]
assert history == [
StateSnapshot(
values=6,
tasks=(),
next=(),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "loop",
"step": 5,
"writes": {"add_one": 1},
"thread_id": "1",
},
created_at=AnyStr(),
parent_config=history[1].config,
),
StateSnapshot(
values=5,
tasks=(PregelTask(AnyStr(), "add_one", (PULL, "add_one"), result=1),),
next=("add_one",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "loop",
"step": 4,
"writes": {"add_one": 1},
"thread_id": "1",
},
created_at=AnyStr(),
parent_config=history[2].config,
),
StateSnapshot(
values=4,
tasks=(PregelTask(AnyStr(), "add_one", (PULL, "add_one"), result=1),),
next=("add_one",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "loop",
"step": 3,
"writes": {"add_one": 1},
"thread_id": "1",
},
created_at=AnyStr(),
parent_config=history[3].config,
),
StateSnapshot(
values=3,
tasks=(PregelTask(AnyStr(), "add_one", (PULL, "add_one"), result=1),),
next=("add_one",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "loop",
"step": 2,
"writes": {"add_one": 1},
"thread_id": "1",
},
created_at=AnyStr(),
parent_config=history[4].config,
),
StateSnapshot(
values=2,
tasks=(PregelTask(AnyStr(), "add_one", (PULL, "add_one"), result=1),),
next=("add_one",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "loop",
"step": 1,
"writes": {"add_one": 1},
"thread_id": "1",
},
created_at=AnyStr(),
parent_config=history[5].config,
),
StateSnapshot(
values=1,
tasks=(PregelTask(AnyStr(), "add_one", (PULL, "add_one"), result=1),),
next=("add_one",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "loop",
"step": 0,
"writes": None,
"thread_id": "1",
},
created_at=AnyStr(),
parent_config=history[6].config,
),
StateSnapshot(
values=0,
tasks=(PregelTask(AnyStr(), "__start__", (PULL, "__start__"), result=1),),
next=("__start__",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "input",
"step": -1,
"writes": {"__start__": 1},
"thread_id": "1",
},
created_at=AnyStr(),
parent_config=None,
),
]
# forking from any previous checkpoint should re-run nodes
assert [
c for c in graph.stream(None, history[0].config, stream_mode="updates")
] == []
assert [
c for c in graph.stream(None, history[1].config, stream_mode="updates")
] == [
{"add_one": 1},
]
assert [
c for c in graph.stream(None, history[2].config, stream_mode="updates")
] == [
{"add_one": 1},
{"add_one": 1},
]
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC)
def test_run_from_checkpoint_id_retains_previous_writes(
request: pytest.FixtureRequest, checkpointer_name: str, mocker: MockerFixture
) -> None:
checkpointer = request.getfixturevalue(f"checkpointer_{checkpointer_name}")
class MyState(TypedDict):
myval: Annotated[int, operator.add]
otherval: bool
class Anode:
def __init__(self):
self.switch = False
def __call__(self, state: MyState):
self.switch = not self.switch
return {"myval": 2 if self.switch else 1, "otherval": self.switch}
builder = StateGraph(MyState)
thenode = Anode() # Fun.
builder.add_node("node_one", thenode)
builder.add_node("node_two", thenode)
builder.add_edge(START, "node_one")
def _getedge(src: str):
swap = "node_one" if src == "node_two" else "node_two"
def _edge(st: MyState) -> Literal["__end__", "node_one", "node_two"]:
if st["myval"] > 3:
return END
if st["otherval"]:
return swap
return src
return _edge
builder.add_conditional_edges("node_one", _getedge("node_one"))
builder.add_conditional_edges("node_two", _getedge("node_two"))
graph = builder.compile(checkpointer=checkpointer)
thread_id = uuid.uuid4()
thread1 = {"configurable": {"thread_id": str(thread_id)}}
result = graph.invoke({"myval": 1}, thread1)
assert result["myval"] == 4
history = [c for c in graph.get_state_history(thread1)]
assert len(history) == 4
assert history[-1].values == {"myval": 0}
assert history[0].values == {"myval": 4, "otherval": False}
second_run_config = {
**thread1,
"configurable": {
**thread1["configurable"],
"checkpoint_id": history[1].config["configurable"]["checkpoint_id"],
},
}
second_result = graph.invoke(None, second_run_config)
assert second_result == {"myval": 5, "otherval": True}
new_history = [
c
for c in graph.get_state_history(
{"configurable": {"thread_id": str(thread_id), "checkpoint_ns": ""}}
)
]
assert len(new_history) == len(history) + 1
for original, new in zip(history, new_history[1:]):
assert original.values == new.values
assert original.next == new.next
assert original.metadata["step"] == new.metadata["step"]
def _get_tasks(hist: list, start: int):
return [h.tasks for h in hist[start:]]
assert _get_tasks(new_history, 1) == _get_tasks(history, 0)
def test_invoke_two_processes_in_dict_out(mocker: MockerFixture) -> None:
add_one = mocker.Mock(side_effect=lambda x: x + 1)
one = Channel.subscribe_to("input") | add_one | Channel.write_to("inbox")
two = (
Channel.subscribe_to("inbox")
| RunnableLambda(add_one).batch
| RunnablePassthrough(lambda _: time.sleep(0.1))
| Channel.write_to("output").batch
)
app = Pregel(
nodes={"one": one, "two": two},
channels={
"inbox": Topic(int),
"output": LastValue(int),
"input": LastValue(int),
},
input_channels=["input", "inbox"],
stream_channels=["output", "inbox"],
output_channels=["output"],
)
# [12 + 1, 2 + 1 + 1]
assert [
*app.stream(
{"input": 2, "inbox": 12}, output_keys="output", stream_mode="updates"
)
] == [
{"one": None},
{"two": 13},
{"two": 4},
]
assert [*app.stream({"input": 2, "inbox": 12}, output_keys="output")] == [
13,
4,
]
assert [*app.stream({"input": 2, "inbox": 12}, stream_mode="updates")] == [
{"one": {"inbox": 3}},
{"two": {"output": 13}},
{"two": {"output": 4}},
]
assert [*app.stream({"input": 2, "inbox": 12})] == [
{"inbox": [3], "output": 13},
{"output": 4},
]
assert [*app.stream({"input": 2, "inbox": 12}, stream_mode="debug")] == [
{
"type": "task",
"timestamp": AnyStr(),
"step": 0,
"payload": {
"id": AnyStr(),
"name": "one",
"input": 2,
"triggers": ["input"],
},
},
{
"type": "task",
"timestamp": AnyStr(),
"step": 0,
"payload": {
"id": AnyStr(),
"name": "two",
"input": [12],
"triggers": ["inbox"],
},
},
{
"type": "task_result",
"timestamp": AnyStr(),
"step": 0,
"payload": {
"id": AnyStr(),
"name": "one",
"result": [("inbox", 3)],
"error": None,
"interrupts": [],
},
},
{
"type": "task_result",
"timestamp": AnyStr(),
"step": 0,
"payload": {
"id": AnyStr(),
"name": "two",
"result": [("output", 13)],
"error": None,
"interrupts": [],
},
},
{
"type": "task",
"timestamp": AnyStr(),
"step": 1,
"payload": {
"id": AnyStr(),
"name": "two",
"input": [3],
"triggers": ["inbox"],
},
},
{
"type": "task_result",
"timestamp": AnyStr(),
"step": 1,
"payload": {
"id": AnyStr(),
"name": "two",
"result": [("output", 4)],
"error": None,
"interrupts": [],
},
},
]
def test_batch_two_processes_in_out() -> None:
def add_one_with_delay(inp: int) -> int:
time.sleep(inp / 10)
return inp + 1
one = Channel.subscribe_to("input") | add_one_with_delay | Channel.write_to("one")
two = Channel.subscribe_to("one") | add_one_with_delay | Channel.write_to("output")
app = Pregel(
nodes={"one": one, "two": two},
channels={
"one": LastValue(int),
"output": LastValue(int),
"input": LastValue(int),
},
input_channels="input",
output_channels="output",
)
assert app.batch([3, 2, 1, 3, 5]) == [5, 4, 3, 5, 7]
assert app.batch([3, 2, 1, 3, 5], output_keys=["output"]) == [
{"output": 5},
{"output": 4},
{"output": 3},
{"output": 5},
{"output": 7},
]
graph = Graph()
graph.add_node("add_one", add_one_with_delay)
graph.add_node("add_one_more", add_one_with_delay)
graph.set_entry_point("add_one")
graph.set_finish_point("add_one_more")
graph.add_edge("add_one", "add_one_more")
gapp = graph.compile()
assert gapp.batch([3, 2, 1, 3, 5]) == [5, 4, 3, 5, 7]
def test_invoke_many_processes_in_out(mocker: MockerFixture) -> None:
test_size = 100
add_one = mocker.Mock(side_effect=lambda x: x + 1)
nodes = {"-1": Channel.subscribe_to("input") | add_one | Channel.write_to("-1")}
for i in range(test_size - 2):
nodes[str(i)] = (
Channel.subscribe_to(str(i - 1)) | add_one | Channel.write_to(str(i))
)
nodes["last"] = Channel.subscribe_to(str(i)) | add_one | Channel.write_to("output")
app = Pregel(
nodes=nodes,
channels={str(i): LastValue(int) for i in range(-1, test_size - 2)}
| {"input": LastValue(int), "output": LastValue(int)},
input_channels="input",
output_channels="output",
)
for _ in range(10):
assert app.invoke(2, {"recursion_limit": test_size}) == 2 + test_size
with ThreadPoolExecutor() as executor:
assert [
*executor.map(app.invoke, [2] * 10, [{"recursion_limit": test_size}] * 10)
] == [2 + test_size] * 10
def test_batch_many_processes_in_out(mocker: MockerFixture) -> None:
test_size = 100
add_one = mocker.Mock(side_effect=lambda x: x + 1)
nodes = {"-1": Channel.subscribe_to("input") | add_one | Channel.write_to("-1")}
for i in range(test_size - 2):
nodes[str(i)] = (
Channel.subscribe_to(str(i - 1)) | add_one | Channel.write_to(str(i))
)
nodes["last"] = Channel.subscribe_to(str(i)) | add_one | Channel.write_to("output")
app = Pregel(
nodes=nodes,
channels={str(i): LastValue(int) for i in range(-1, test_size - 2)}
| {"input": LastValue(int), "output": LastValue(int)},
input_channels="input",
output_channels="output",
)
for _ in range(3):
assert app.batch([2, 1, 3, 4, 5], {"recursion_limit": test_size}) == [
2 + test_size,
1 + test_size,
3 + test_size,
4 + test_size,
5 + test_size,
]
with ThreadPoolExecutor() as executor:
assert [
*executor.map(
app.batch, [[2, 1, 3, 4, 5]] * 3, [{"recursion_limit": test_size}] * 3
)
] == [
[2 + test_size, 1 + test_size, 3 + test_size, 4 + test_size, 5 + test_size]
] * 3
def test_invoke_two_processes_two_in_two_out_invalid(mocker: MockerFixture) -> None:
add_one = mocker.Mock(side_effect=lambda x: x + 1)
one = Channel.subscribe_to("input") | add_one | Channel.write_to("output")
two = Channel.subscribe_to("input") | add_one | Channel.write_to("output")
app = Pregel(
nodes={"one": one, "two": two},
channels={"output": LastValue(int), "input": LastValue(int)},
input_channels="input",
output_channels="output",
)
with pytest.raises(InvalidUpdateError):
# LastValue channels can only be updated once per iteration
app.invoke(2)
class State(TypedDict):
hello: str
def my_node(input: State) -> State:
return {"hello": "world"}
builder = StateGraph(State)
builder.add_node("one", my_node)
builder.add_node("two", my_node)
builder.set_conditional_entry_point(lambda _: ["one", "two"])
graph = builder.compile()
with pytest.raises(InvalidUpdateError, match="At key 'hello'"):
graph.invoke({"hello": "there"}, debug=True)
def test_invoke_two_processes_two_in_two_out_valid(mocker: MockerFixture) -> None:
add_one = mocker.Mock(side_effect=lambda x: x + 1)
one = Channel.subscribe_to("input") | add_one | Channel.write_to("output")
two = Channel.subscribe_to("input") | add_one | Channel.write_to("output")
app = Pregel(
nodes={"one": one, "two": two},
channels={
"input": LastValue(int),
"output": Topic(int),
},
input_channels="input",
output_channels="output",
)
# An Inbox channel accumulates updates into a sequence
assert app.invoke(2) == [3, 3]
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC)
def test_invoke_checkpoint_two(
mocker: MockerFixture, request: pytest.FixtureRequest, checkpointer_name: str
) -> None:
checkpointer: BaseCheckpointSaver = request.getfixturevalue(
f"checkpointer_{checkpointer_name}"
)
add_one = mocker.Mock(side_effect=lambda x: x["total"] + x["input"])
errored_once = False
def raise_if_above_10(input: int) -> int:
nonlocal errored_once
if input > 4:
if errored_once:
pass
else:
errored_once = True
raise ConnectionError("I will be retried")
if input > 10:
raise ValueError("Input is too large")
return input
one = (
Channel.subscribe_to(["input"]).join(["total"])
| add_one
| Channel.write_to("output", "total")
| raise_if_above_10
)
app = Pregel(
nodes={"one": one},
channels={
"total": BinaryOperatorAggregate(int, operator.add),
"input": LastValue(int),
"output": LastValue(int),
},
input_channels="input",
output_channels="output",
checkpointer=checkpointer,
retry_policy=RetryPolicy(),
)
# total starts out as 0, so output is 0+2=2
assert app.invoke(2, {"configurable": {"thread_id": "1"}}) == 2
checkpoint = checkpointer.get({"configurable": {"thread_id": "1"}})
assert checkpoint is not None
assert checkpoint["channel_values"].get("total") == 2
# total is now 2, so output is 2+3=5
assert app.invoke(3, {"configurable": {"thread_id": "1"}}) == 5
assert errored_once, "errored and retried"
checkpoint_tup = checkpointer.get_tuple({"configurable": {"thread_id": "1"}})
assert checkpoint_tup is not None
assert checkpoint_tup.checkpoint["channel_values"].get("total") == 7
# total is now 2+5=7, so output would be 7+4=11, but raises ValueError
with pytest.raises(ValueError):
app.invoke(4, {"configurable": {"thread_id": "1"}})
# checkpoint is not updated, error is recorded
checkpoint_tup = checkpointer.get_tuple({"configurable": {"thread_id": "1"}})
assert checkpoint_tup is not None
assert checkpoint_tup.checkpoint["channel_values"].get("total") == 7
assert checkpoint_tup.pending_writes == [
(AnyStr(), ERROR, "ValueError('Input is too large')")
]
# on a new thread, total starts out as 0, so output is 0+5=5
assert app.invoke(5, {"configurable": {"thread_id": "2"}}) == 5
checkpoint = checkpointer.get({"configurable": {"thread_id": "1"}})
assert checkpoint is not None
assert checkpoint["channel_values"].get("total") == 7
checkpoint = checkpointer.get({"configurable": {"thread_id": "2"}})
assert checkpoint is not None
assert checkpoint["channel_values"].get("total") == 5
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC)
def test_pending_writes_resume(
request: pytest.FixtureRequest, checkpointer_name: str
) -> None:
checkpointer: BaseCheckpointSaver = request.getfixturevalue(
f"checkpointer_{checkpointer_name}"
)
class State(TypedDict):
value: Annotated[int, operator.add]
class AwhileMaker:
def __init__(self, sleep: float, rtn: Union[Dict, Exception]) -> None:
self.sleep = sleep
self.rtn = rtn
self.reset()
def __call__(self, input: State) -> Any:
self.calls += 1
time.sleep(self.sleep)
if isinstance(self.rtn, Exception):
raise self.rtn
else:
return self.rtn
def reset(self):
self.calls = 0
one = AwhileMaker(0.1, {"value": 2})
two = AwhileMaker(0.3, ConnectionError("I'm not good"))
builder = StateGraph(State)
builder.add_node("one", one)
builder.add_node("two", two, retry=RetryPolicy(max_attempts=2))
builder.add_edge(START, "one")
builder.add_edge(START, "two")
graph = builder.compile(checkpointer=checkpointer)
thread1: RunnableConfig = {"configurable": {"thread_id": "1"}}
with pytest.raises(ConnectionError, match="I'm not good"):
graph.invoke({"value": 1}, thread1)
# both nodes should have been called once
assert one.calls == 1
assert two.calls == 2 # two attempts
# latest checkpoint should be before nodes "one", "two"
# but we should have applied the write from "one"
state = graph.get_state(thread1)
assert state is not None
assert state.values == {"value": 3}
assert state.next == ("two",)
assert state.tasks == (
PregelTask(AnyStr(), "one", (PULL, "one"), result={"value": 2}),
PregelTask(AnyStr(), "two", (PULL, "two"), 'ConnectionError("I\'m not good")'),
)
assert state.metadata == {
"parents": {},
"source": "loop",
"step": 0,
"writes": None,
"thread_id": "1",
}
# get_state with checkpoint_id should not apply any pending writes
state = graph.get_state(state.config)
assert state is not None
assert state.values == {"value": 1}
assert state.next == ("one", "two")
# should contain pending write of "one"
checkpoint = checkpointer.get_tuple(thread1)
assert checkpoint is not None
# should contain error from "two"
expected_writes = [
(AnyStr(), "one", "one"),
(AnyStr(), "value", 2),
(AnyStr(), ERROR, 'ConnectionError("I\'m not good")'),
]
assert len(checkpoint.pending_writes) == 3
assert all(w in expected_writes for w in checkpoint.pending_writes)
# both non-error pending writes come from same task
non_error_writes = [w for w in checkpoint.pending_writes if w[1] != ERROR]
assert non_error_writes[0][0] == non_error_writes[1][0]
# error write is from the other task
error_write = next(w for w in checkpoint.pending_writes if w[1] == ERROR)
assert error_write[0] != non_error_writes[0][0]
# resume execution
with pytest.raises(ConnectionError, match="I'm not good"):
graph.invoke(None, thread1)
# node "one" succeeded previously, so shouldn't be called again
assert one.calls == 1
# node "two" should have been called once again
assert two.calls == 4 # two attempts before + two attempts now
# confirm no new checkpoints saved
state_two = graph.get_state(thread1)
assert state_two.metadata == state.metadata
# resume execution, without exception
two.rtn = {"value": 3}
# both the pending write and the new write were applied, 1 + 2 + 3 = 6
assert graph.invoke(None, thread1) == {"value": 6}
# check all final checkpoints
checkpoints = [c for c in checkpointer.list(thread1)]
# we should have 3
assert len(checkpoints) == 3
# the last one not too interesting for this test
assert checkpoints[0] == CheckpointTuple(
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
checkpoint={
"v": 1,
"id": AnyStr(),
"ts": AnyStr(),
"pending_sends": [],
"versions_seen": {
"one": {
"start:one": AnyVersion(),
},
"two": {
"start:two": AnyVersion(),
},
"__input__": {},
"__start__": {
"__start__": AnyVersion(),
},
"__interrupt__": {
"value": AnyVersion(),
"__start__": AnyVersion(),
"start:one": AnyVersion(),
"start:two": AnyVersion(),
},
},
"channel_versions": {
"one": AnyVersion(),
"two": AnyVersion(),
"value": AnyVersion(),
"__start__": AnyVersion(),
"start:one": AnyVersion(),
"start:two": AnyVersion(),
},
"channel_values": {"one": "one", "two": "two", "value": 6},
},
metadata={
"parents": {},
"step": 1,
"source": "loop",
"writes": {"one": {"value": 2}, "two": {"value": 3}},
"thread_id": "1",
},
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": checkpoints[1].config["configurable"]["checkpoint_id"],
}
},
pending_writes=[],
)
# the previous one we assert that pending writes contains both
# - original error
# - successful writes from resuming after preventing error
assert checkpoints[1] == CheckpointTuple(
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
checkpoint={
"v": 1,
"id": AnyStr(),
"ts": AnyStr(),
"pending_sends": [],
"versions_seen": {
"__input__": {},
"__start__": {
"__start__": AnyVersion(),
},
},
"channel_versions": {
"value": AnyVersion(),
"__start__": AnyVersion(),
"start:one": AnyVersion(),
"start:two": AnyVersion(),
},
"channel_values": {
"value": 1,
"start:one": "__start__",
"start:two": "__start__",
},
},
metadata={
"parents": {},
"step": 0,
"source": "loop",
"writes": None,
"thread_id": "1",
},
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": checkpoints[2].config["configurable"]["checkpoint_id"],
}
},
pending_writes=UnsortedSequence(
(AnyStr(), "one", "one"),
(AnyStr(), "value", 2),
(AnyStr(), "__error__", 'ConnectionError("I\'m not good")'),
(AnyStr(), "two", "two"),
(AnyStr(), "value", 3),
),
)
assert checkpoints[2] == CheckpointTuple(
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
checkpoint={
"v": 1,
"id": AnyStr(),
"ts": AnyStr(),
"pending_sends": [],
"versions_seen": {"__input__": {}},
"channel_versions": {
"__start__": AnyVersion(),
},
"channel_values": {"__start__": {"value": 1}},
},
metadata={
"parents": {},
"step": -1,
"source": "input",
"writes": {"__start__": {"value": 1}},
"thread_id": "1",
},
parent_config=None,
pending_writes=UnsortedSequence(
(AnyStr(), "value", 1),
(AnyStr(), "start:one", "__start__"),
(AnyStr(), "start:two", "__start__"),
),
)
def test_cond_edge_after_send() -> None:
class Node:
def __init__(self, name: str):
self.name = name
setattr(self, "__name__", name)
def __call__(self, state):
return [self.name]
def send_for_fun(state):
return [Send("2", state), Send("2", state)]
def route_to_three(state) -> Literal["3"]:
return "3"
builder = StateGraph(Annotated[list, operator.add])
builder.add_node(Node("1"))
builder.add_node(Node("2"))
builder.add_node(Node("3"))
builder.add_edge(START, "1")
builder.add_conditional_edges("1", send_for_fun)
builder.add_conditional_edges("2", route_to_three)
graph = builder.compile()
assert graph.invoke(["0"]) == ["0", "1", "2", "2", "3"]
def test_concurrent_emit_sends() -> None:
class Node:
def __init__(self, name: str):
self.name = name
setattr(self, "__name__", name)
def __call__(self, state):
return (
[self.name]
if isinstance(state, list)
else ["|".join((self.name, str(state)))]
)
def send_for_fun(state):
return [Send("2", 1), Send("2", 2), "3.1"]
def send_for_profit(state):
return [Send("2", 3), Send("2", 4)]
def route_to_three(state) -> Literal["3"]:
return "3"
builder = StateGraph(Annotated[list, operator.add])
builder.add_node(Node("1"))
builder.add_node(Node("1.1"))
builder.add_node(Node("2"))
builder.add_node(Node("3"))
builder.add_node(Node("3.1"))
builder.add_edge(START, "1")
builder.add_edge(START, "1.1")
builder.add_conditional_edges("1", send_for_fun)
builder.add_conditional_edges("1.1", send_for_profit)
builder.add_conditional_edges("2", route_to_three)
graph = builder.compile()
assert graph.invoke(["0"]) == (
[
"0",
"1",
"1.1",
"2|1",
"2|2",
"2|3",
"2|4",
"3",
"3.1",
]
if FF_SEND_V2
else [
"0",
"1",
"1.1",
"3.1",
"2|1",
"2|2",
"2|3",
"2|4",
"3",
]
)
def test_send_sequences() -> None:
class Node:
def __init__(self, name: str):
self.name = name
setattr(self, "__name__", name)
def __call__(self, state):
update = (
[self.name]
if isinstance(state, list)
else ["|".join((self.name, str(state)))]
)
if isinstance(state, Command):
return [state, Command(update=update)]
else:
return update
def send_for_fun(state):
return [
Send("2", Command(goto=Send("2", 3))),
Send("2", Command(goto=Send("2", 4))),
"3.1",
]
def route_to_three(state) -> Literal["3"]:
return "3"
builder = StateGraph(Annotated[list, operator.add])
builder.add_node(Node("1"))
builder.add_node(Node("2"))
builder.add_node(Node("3"))
builder.add_node(Node("3.1"))
builder.add_edge(START, "1")
builder.add_conditional_edges("1", send_for_fun)
builder.add_conditional_edges("2", route_to_three)
graph = builder.compile()
assert (
graph.invoke(["0"])
== [
"0",
"1",
"2|Command(goto=Send(node='2', arg=3))",
"2|Command(goto=Send(node='2', arg=4))",
"2|3",
"2|4",
"3",
"3.1",
]
if FF_SEND_V2
else [
"0",
"1",
"3.1",
"2|Command(goto=Send(node='2', arg=3))",
"2|Command(goto=Send(node='2', arg=4))",
"3",
"2|3",
"2|4",
"3",
]
)
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC)
def test_send_dedupe_on_resume(
request: pytest.FixtureRequest, checkpointer_name: str
) -> None:
if not FF_SEND_V2:
pytest.skip("Send deduplication is only available in Send V2")
checkpointer = request.getfixturevalue(f"checkpointer_{checkpointer_name}")
class InterruptOnce:
ticks: int = 0
def __call__(self, state):
self.ticks += 1
if self.ticks == 1:
raise NodeInterrupt("Bahh")
return ["|".join(("flaky", str(state)))]
class Node:
def __init__(self, name: str):
self.name = name
self.ticks = 0
setattr(self, "__name__", name)
def __call__(self, state):
self.ticks += 1
update = (
[self.name]
if isinstance(state, list)
else ["|".join((self.name, str(state)))]
)
if isinstance(state, Command):
return replace(state, update=update)
else:
return update
def send_for_fun(state):
return [
Send("2", Command(goto=Send("2", 3))),
Send("2", Command(goto=Send("flaky", 4))),
"3.1",
]
def route_to_three(state) -> Literal["3"]:
return "3"
builder = StateGraph(Annotated[list, operator.add])
builder.add_node(Node("1"))
builder.add_node(Node("2"))
builder.add_node(Node("3"))
builder.add_node(Node("3.1"))
builder.add_node("flaky", InterruptOnce())
builder.add_edge(START, "1")
builder.add_conditional_edges("1", send_for_fun)
builder.add_conditional_edges("2", route_to_three)
graph = builder.compile(checkpointer=checkpointer)
thread1 = {"configurable": {"thread_id": "1"}}
assert graph.invoke(["0"], thread1, debug=1) == [
"0",
"1",
"2|Command(goto=Send(node='2', arg=3))",
"2|Command(goto=Send(node='flaky', arg=4))",
"2|3",
]
assert builder.nodes["2"].runnable.func.ticks == 3
assert builder.nodes["flaky"].runnable.func.ticks == 1
# check state
state = graph.get_state(thread1)
assert state.next == ("flaky",)
# check history
history = [c for c in graph.get_state_history(thread1)]
assert len(history) == 2
# resume execution
assert graph.invoke(None, thread1, debug=1) == [
"0",
"1",
"2|Command(goto=Send(node='2', arg=3))",
"2|Command(goto=Send(node='flaky', arg=4))",
"2|3",
"flaky|4",
"3",
"3.1",
]
# node "2" doesn't get called again, as we recover writes saved before
assert builder.nodes["2"].runnable.func.ticks == 3
# node "flaky" gets called again, as it was interrupted
assert builder.nodes["flaky"].runnable.func.ticks == 2
# check state
state = graph.get_state(thread1)
assert state.next == ()
# check history
history = [c for c in graph.get_state_history(thread1)]
assert (
history[1]
== [
StateSnapshot(
values=[
"0",
"1",
"2|Command(goto=Send(node='2', arg=3))",
"2|Command(goto=Send(node='flaky', arg=4))",
"2|3",
"flaky|4",
"3",
"3.1",
],
next=(),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"source": "loop",
"writes": {"3": ["3"], "3.1": ["3.1"]},
"thread_id": "1",
"step": 2,
"parents": {},
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
tasks=(),
),
StateSnapshot(
values=[
"0",
"1",
"2|Command(goto=Send(node='2', arg=3))",
"2|Command(goto=Send(node='flaky', arg=4))",
"2|3",
"flaky|4",
],
next=("3", "3.1"),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"source": "loop",
"writes": {
"1": ["1"],
"2": [
["2|Command(goto=Send(node='2', arg=3))"],
["2|Command(goto=Send(node='flaky', arg=4))"],
["2|3"],
],
"flaky": ["flaky|4"],
},
"thread_id": "1",
"step": 1,
"parents": {},
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
tasks=(
PregelTask(
id=AnyStr(),
name="3",
path=("__pregel_pull", "3"),
error=None,
interrupts=(),
state=None,
result=["3"],
),
PregelTask(
id=AnyStr(),
name="3.1",
path=("__pregel_pull", "3.1"),
error=None,
interrupts=(),
state=None,
result=["3.1"],
),
),
),
StateSnapshot(
values=["0"],
next=("1", "2", "2", "2", "flaky"),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"source": "loop",
"writes": None,
"thread_id": "1",
"step": 0,
"parents": {},
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
tasks=(
PregelTask(
id=AnyStr(),
name="1",
path=("__pregel_pull", "1"),
error=None,
interrupts=(),
state=None,
result=["1"],
),
PregelTask(
id=AnyStr(),
name="2",
path=(
"__pregel_push",
("__pregel_pull", "1"),
2,
AnyStr(),
),
error=None,
interrupts=(),
state=None,
result=["2|Command(goto=Send(node='2', arg=3))"],
),
PregelTask(
id=AnyStr(),
name="2",
path=(
"__pregel_push",
("__pregel_pull", "1"),
3,
AnyStr(),
),
error=None,
interrupts=(),
state=None,
result=["2|Command(goto=Send(node='flaky', arg=4))"],
),
PregelTask(
id=AnyStr(),
name="2",
path=(
"__pregel_push",
(
"__pregel_push",
("__pregel_pull", "1"),
2,
AnyStr(),
),
2,
AnyStr(),
),
error=None,
interrupts=(),
state=None,
result=["2|3"],
),
PregelTask(
id=AnyStr(),
name="flaky",
path=(
"__pregel_push",
(
"__pregel_push",
("__pregel_pull", "1"),
3,
AnyStr(),
),
2,
AnyStr(),
),
error=None,
interrupts=(Interrupt(value="Bahh", when="during"),),
state=None,
result=["flaky|4"],
),
),
),
StateSnapshot(
values=[],
next=("__start__",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"source": "input",
"writes": {"__start__": ["0"]},
"thread_id": "1",
"step": -1,
"parents": {},
},
created_at=AnyStr(),
parent_config=None,
tasks=(
PregelTask(
id=AnyStr(),
name="__start__",
path=("__pregel_pull", "__start__"),
error=None,
interrupts=(),
state=None,
result=["0"],
),
),
),
][1]
)
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC)
def test_send_react_interrupt(
request: pytest.FixtureRequest, checkpointer_name: str
) -> None:
from langchain_core.messages import AIMessage, HumanMessage, ToolCall, ToolMessage
checkpointer = request.getfixturevalue(f"checkpointer_{checkpointer_name}")
ai_message = AIMessage(
"",
id="ai1",
tool_calls=[ToolCall(name="foo", args={"hi": [1, 2, 3]}, id=AnyStr())],
)
def agent(state):
return {"messages": ai_message}
def route(state):
if isinstance(state["messages"][-1], AIMessage):
return [
Send(call["name"], call) for call in state["messages"][-1].tool_calls
]
foo_called = 0
def foo(call: ToolCall):
nonlocal foo_called
foo_called += 1
return {"messages": ToolMessage(str(call["args"]), tool_call_id=call["id"])}
builder = StateGraph(MessagesState)
builder.add_node(agent)
builder.add_node(foo)
builder.add_edge(START, "agent")
builder.add_conditional_edges("agent", route)
graph = builder.compile()
assert graph.invoke({"messages": [HumanMessage("hello")]}) == {
"messages": [
_AnyIdHumanMessage(content="hello"),
_AnyIdAIMessage(
content="",
tool_calls=[
{
"name": "foo",
"args": {"hi": [1, 2, 3]},
"id": "",
"type": "tool_call",
}
],
),
_AnyIdToolMessage(
content="{'hi': [1, 2, 3]}",
tool_call_id=AnyStr(),
),
]
}
assert foo_called == 1
# simple interrupt-resume flow
foo_called = 0
graph = builder.compile(checkpointer=checkpointer, interrupt_before=["foo"])
thread1 = {"configurable": {"thread_id": "1"}}
assert graph.invoke({"messages": [HumanMessage("hello")]}, thread1) == {
"messages": [
_AnyIdHumanMessage(content="hello"),
_AnyIdAIMessage(
content="",
tool_calls=[
{
"name": "foo",
"args": {"hi": [1, 2, 3]},
"id": "",
"type": "tool_call",
}
],
),
]
}
assert foo_called == 0
assert graph.invoke(None, thread1) == {
"messages": [
_AnyIdHumanMessage(content="hello"),
_AnyIdAIMessage(
content="",
tool_calls=[
{
"name": "foo",
"args": {"hi": [1, 2, 3]},
"id": "",
"type": "tool_call",
}
],
),
_AnyIdToolMessage(
content="{'hi': [1, 2, 3]}",
tool_call_id=AnyStr(),
),
]
}
assert foo_called == 1
# interrupt-update-resume flow
foo_called = 0
graph = builder.compile(checkpointer=checkpointer, interrupt_before=["foo"])
thread1 = {"configurable": {"thread_id": "2"}}
assert graph.invoke({"messages": [HumanMessage("hello")]}, thread1) == {
"messages": [
_AnyIdHumanMessage(content="hello"),
_AnyIdAIMessage(
content="",
tool_calls=[
{
"name": "foo",
"args": {"hi": [1, 2, 3]},
"id": "",
"type": "tool_call",
}
],
),
]
}
assert foo_called == 0
if not FF_SEND_V2:
return
# get state should show the pending task
state = graph.get_state(thread1)
assert state == StateSnapshot(
values={
"messages": [
_AnyIdHumanMessage(content="hello"),
_AnyIdAIMessage(
content="",
tool_calls=[
{
"name": "foo",
"args": {"hi": [1, 2, 3]},
"id": "",
"type": "tool_call",
}
],
),
]
},
next=("foo",),
config={
"configurable": {
"thread_id": "2",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"step": 0,
"source": "loop",
"writes": None,
"parents": {},
"thread_id": "2",
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "2",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
tasks=(
PregelTask(
id=AnyStr(),
name="agent",
path=("__pregel_pull", "agent"),
error=None,
interrupts=(),
state=None,
result={
"messages": AIMessage(
content="",
additional_kwargs={},
response_metadata={},
id="ai1",
tool_calls=[
{
"name": "foo",
"args": {"hi": [1, 2, 3]},
"id": "",
"type": "tool_call",
}
],
)
},
),
PregelTask(
id=AnyStr(),
name="foo",
path=("__pregel_push", ("__pregel_pull", "agent"), 2, AnyStr()),
error=None,
interrupts=(),
state=None,
result=None,
),
),
)
# remove the tool call, clearing the pending task
graph.update_state(
thread1, {"messages": AIMessage("Bye now", id=ai_message.id, tool_calls=[])}
)
# tool call no longer in pending tasks
assert graph.get_state(thread1) == StateSnapshot(
values={
"messages": [
_AnyIdHumanMessage(content="hello"),
_AnyIdAIMessage(
content="Bye now",
tool_calls=[],
),
]
},
next=(),
config={
"configurable": {
"thread_id": "2",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"step": 1,
"source": "update",
"writes": {
"agent": {
"messages": _AnyIdAIMessage(
content="Bye now",
tool_calls=[],
)
}
},
"parents": {},
"thread_id": "2",
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "2",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
tasks=(),
)
# tool call not executed
assert graph.invoke(None, thread1) == {
"messages": [
_AnyIdHumanMessage(content="hello"),
_AnyIdAIMessage(content="Bye now"),
]
}
assert foo_called == 0
# interrupt-update-resume flow, creating new Send in update call
foo_called = 0
graph = builder.compile(checkpointer=checkpointer, interrupt_before=["foo"])
thread1 = {"configurable": {"thread_id": "3"}}
assert graph.invoke({"messages": [HumanMessage("hello")]}, thread1) == {
"messages": [
_AnyIdHumanMessage(content="hello"),
_AnyIdAIMessage(
content="",
tool_calls=[
{
"name": "foo",
"args": {"hi": [1, 2, 3]},
"id": "",
"type": "tool_call",
}
],
),
]
}
assert foo_called == 0
# get state should show the pending task
state = graph.get_state(thread1)
assert state == StateSnapshot(
values={
"messages": [
_AnyIdHumanMessage(content="hello"),
_AnyIdAIMessage(
content="",
tool_calls=[
{
"name": "foo",
"args": {"hi": [1, 2, 3]},
"id": "",
"type": "tool_call",
}
],
),
]
},
next=("foo",),
config={
"configurable": {
"thread_id": "3",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"step": 0,
"source": "loop",
"writes": None,
"parents": {},
"thread_id": "3",
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "3",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
tasks=(
PregelTask(
id=AnyStr(),
name="agent",
path=("__pregel_pull", "agent"),
error=None,
interrupts=(),
state=None,
result={
"messages": AIMessage(
"",
id="ai1",
tool_calls=[
{
"name": "foo",
"args": {"hi": [1, 2, 3]},
"id": "",
"type": "tool_call",
}
],
)
},
),
PregelTask(
id=AnyStr(),
name="foo",
path=("__pregel_push", ("__pregel_pull", "agent"), 2, AnyStr()),
error=None,
interrupts=(),
state=None,
result=None,
),
),
)
# replace the tool call, should clear previous send, create new one
graph.update_state(
thread1,
{
"messages": AIMessage(
"",
id=ai_message.id,
tool_calls=[
{
"name": "foo",
"args": {"hi": [4, 5, 6]},
"id": "tool1",
"type": "tool_call",
}
],
)
},
)
# prev tool call no longer in pending tasks, new tool call is
assert graph.get_state(thread1) == StateSnapshot(
values={
"messages": [
_AnyIdHumanMessage(content="hello"),
_AnyIdAIMessage(
content="",
tool_calls=[
{
"name": "foo",
"args": {"hi": [4, 5, 6]},
"id": "tool1",
"type": "tool_call",
}
],
),
]
},
next=("foo",),
config={
"configurable": {
"thread_id": "3",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"step": 1,
"source": "update",
"writes": {
"agent": {
"messages": _AnyIdAIMessage(
content="",
tool_calls=[
{
"name": "foo",
"args": {"hi": [4, 5, 6]},
"id": "tool1",
"type": "tool_call",
}
],
)
}
},
"parents": {},
"thread_id": "3",
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "3",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
tasks=(
PregelTask(
id=AnyStr(),
name="foo",
path=("__pregel_push", (), 0, AnyStr()),
error=None,
interrupts=(),
state=None,
result=None,
),
),
)
# prev tool call not executed, new tool call is
assert graph.invoke(None, thread1) == {
"messages": [
_AnyIdHumanMessage(content="hello"),
AIMessage(
"",
id="ai1",
tool_calls=[
{
"name": "foo",
"args": {"hi": [4, 5, 6]},
"id": "tool1",
"type": "tool_call",
}
],
),
_AnyIdToolMessage(content="{'hi': [4, 5, 6]}", tool_call_id="tool1"),
]
}
assert foo_called == 1
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC)
def test_send_react_interrupt_control(
request: pytest.FixtureRequest, checkpointer_name: str, snapshot: SnapshotAssertion
) -> None:
from langchain_core.messages import AIMessage, HumanMessage, ToolCall, ToolMessage
checkpointer = request.getfixturevalue(f"checkpointer_{checkpointer_name}")
ai_message = AIMessage(
"",
id="ai1",
tool_calls=[ToolCall(name="foo", args={"hi": [1, 2, 3]}, id=AnyStr())],
)
def agent(state) -> Command[Literal["foo"]]:
return Command(
update={"messages": ai_message},
goto=[Send(call["name"], call) for call in ai_message.tool_calls],
)
foo_called = 0
def foo(call: ToolCall):
nonlocal foo_called
foo_called += 1
return {"messages": ToolMessage(str(call["args"]), tool_call_id=call["id"])}
builder = StateGraph(MessagesState)
builder.add_node(agent)
builder.add_node(foo)
builder.add_edge(START, "agent")
graph = builder.compile()
assert graph.get_graph().draw_mermaid() == snapshot
assert graph.invoke({"messages": [HumanMessage("hello")]}) == {
"messages": [
_AnyIdHumanMessage(content="hello"),
_AnyIdAIMessage(
content="",
tool_calls=[
{
"name": "foo",
"args": {"hi": [1, 2, 3]},
"id": "",
"type": "tool_call",
}
],
),
_AnyIdToolMessage(
content="{'hi': [1, 2, 3]}",
tool_call_id=AnyStr(),
),
]
}
assert foo_called == 1
# simple interrupt-resume flow
foo_called = 0
graph = builder.compile(checkpointer=checkpointer, interrupt_before=["foo"])
thread1 = {"configurable": {"thread_id": "1"}}
assert graph.invoke({"messages": [HumanMessage("hello")]}, thread1) == {
"messages": [
_AnyIdHumanMessage(content="hello"),
_AnyIdAIMessage(
content="",
tool_calls=[
{
"name": "foo",
"args": {"hi": [1, 2, 3]},
"id": "",
"type": "tool_call",
}
],
),
]
}
assert foo_called == 0
assert graph.invoke(None, thread1) == {
"messages": [
_AnyIdHumanMessage(content="hello"),
_AnyIdAIMessage(
content="",
tool_calls=[
{
"name": "foo",
"args": {"hi": [1, 2, 3]},
"id": "",
"type": "tool_call",
}
],
),
_AnyIdToolMessage(
content="{'hi': [1, 2, 3]}",
tool_call_id=AnyStr(),
),
]
}
assert foo_called == 1
if not FF_SEND_V2:
return
# interrupt-update-resume flow
foo_called = 0
graph = builder.compile(checkpointer=checkpointer, interrupt_before=["foo"])
thread1 = {"configurable": {"thread_id": "2"}}
assert graph.invoke({"messages": [HumanMessage("hello")]}, thread1) == {
"messages": [
_AnyIdHumanMessage(content="hello"),
_AnyIdAIMessage(
content="",
tool_calls=[
{
"name": "foo",
"args": {"hi": [1, 2, 3]},
"id": "",
"type": "tool_call",
}
],
),
]
}
assert foo_called == 0
# get state should show the pending task
state = graph.get_state(thread1)
assert state == StateSnapshot(
values={
"messages": [
_AnyIdHumanMessage(content="hello"),
_AnyIdAIMessage(
content="",
tool_calls=[
{
"name": "foo",
"args": {"hi": [1, 2, 3]},
"id": "",
"type": "tool_call",
}
],
),
]
},
next=("foo",),
config={
"configurable": {
"thread_id": "2",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"step": 0,
"source": "loop",
"writes": None,
"parents": {},
"thread_id": "2",
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "2",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
tasks=(
PregelTask(
id=AnyStr(),
name="agent",
path=("__pregel_pull", "agent"),
error=None,
interrupts=(),
state=None,
result={
"messages": AIMessage(
content="",
additional_kwargs={},
response_metadata={},
id="ai1",
tool_calls=[
{
"name": "foo",
"args": {"hi": [1, 2, 3]},
"id": "",
"type": "tool_call",
}
],
)
},
),
PregelTask(
id=AnyStr(),
name="foo",
path=("__pregel_push", ("__pregel_pull", "agent"), 2, AnyStr()),
error=None,
interrupts=(),
state=None,
result=None,
),
),
)
# remove the tool call, clearing the pending task
graph.update_state(
thread1, {"messages": AIMessage("Bye now", id=ai_message.id, tool_calls=[])}
)
# tool call no longer in pending tasks
assert graph.get_state(thread1) == StateSnapshot(
values={
"messages": [
_AnyIdHumanMessage(content="hello"),
_AnyIdAIMessage(
content="Bye now",
tool_calls=[],
),
]
},
next=(),
config={
"configurable": {
"thread_id": "2",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"step": 1,
"source": "update",
"writes": {
"agent": {
"messages": _AnyIdAIMessage(
content="Bye now",
tool_calls=[],
)
}
},
"parents": {},
"thread_id": "2",
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "2",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
tasks=(),
)
# tool call not executed
assert graph.invoke(None, thread1) == {
"messages": [
_AnyIdHumanMessage(content="hello"),
_AnyIdAIMessage(content="Bye now"),
]
}
assert foo_called == 0
# interrupt-update-resume flow, creating new Send in update call
# TODO add here test with invoke(Command())
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC)
def test_invoke_checkpoint_three(
mocker: MockerFixture, request: pytest.FixtureRequest, checkpointer_name: str
) -> None:
checkpointer = request.getfixturevalue(f"checkpointer_{checkpointer_name}")
adder = mocker.Mock(side_effect=lambda x: x["total"] + x["input"])
def raise_if_above_10(input: int) -> int:
if input > 10:
raise ValueError("Input is too large")
return input
one = (
Channel.subscribe_to(["input"]).join(["total"])
| adder
| Channel.write_to("output", "total")
| raise_if_above_10
)
app = Pregel(
nodes={"one": one},
channels={
"total": BinaryOperatorAggregate(int, operator.add),
"input": LastValue(int),
"output": LastValue(int),
},
input_channels="input",
output_channels="output",
checkpointer=checkpointer,
)
thread_1 = {"configurable": {"thread_id": "1"}}
# total starts out as 0, so output is 0+2=2
assert app.invoke(2, thread_1, debug=1) == 2
state = app.get_state(thread_1)
assert state is not None
assert state.values.get("total") == 2
assert state.next == ()
assert (
state.config["configurable"]["checkpoint_id"]
== checkpointer.get(thread_1)["id"]
)
# total is now 2, so output is 2+3=5
assert app.invoke(3, thread_1) == 5
state = app.get_state(thread_1)
assert state is not None
assert state.values.get("total") == 7
assert (
state.config["configurable"]["checkpoint_id"]
== checkpointer.get(thread_1)["id"]
)
# total is now 2+5=7, so output would be 7+4=11, but raises ValueError
with pytest.raises(ValueError):
app.invoke(4, thread_1)
# checkpoint is updated with new input
state = app.get_state(thread_1)
assert state is not None
assert state.values.get("total") == 7
assert state.next == ("one",)
"""we checkpoint inputs and it failed on "one", so the next node is one"""
# we can recover from error by sending new inputs
assert app.invoke(2, thread_1) == 9
state = app.get_state(thread_1)
assert state is not None
assert state.values.get("total") == 16, "total is now 7+9=16"
assert state.next == ()
thread_2 = {"configurable": {"thread_id": "2"}}
# on a new thread, total starts out as 0, so output is 0+5=5
assert app.invoke(5, thread_2, debug=True) == 5
state = app.get_state({"configurable": {"thread_id": "1"}})
assert state is not None
assert state.values.get("total") == 16
assert state.next == (), "checkpoint of other thread not touched"
state = app.get_state(thread_2)
assert state is not None
assert state.values.get("total") == 5
assert state.next == ()
assert len(list(app.get_state_history(thread_1, limit=1))) == 1
# list all checkpoints for thread 1
thread_1_history = [c for c in app.get_state_history(thread_1)]
# there are 7 checkpoints
assert len(thread_1_history) == 7
assert Counter(c.metadata["source"] for c in thread_1_history) == {
"input": 4,
"loop": 3,
}
# sorted descending
assert (
thread_1_history[0].config["configurable"]["checkpoint_id"]
> thread_1_history[1].config["configurable"]["checkpoint_id"]
)
# cursor pagination
cursored = list(
app.get_state_history(thread_1, limit=1, before=thread_1_history[0].config)
)
assert len(cursored) == 1
assert cursored[0].config == thread_1_history[1].config
# the last checkpoint
assert thread_1_history[0].values["total"] == 16
# the first "loop" checkpoint
assert thread_1_history[-2].values["total"] == 2
# can get each checkpoint using aget with config
assert (
checkpointer.get(thread_1_history[0].config)["id"]
== thread_1_history[0].config["configurable"]["checkpoint_id"]
)
assert (
checkpointer.get(thread_1_history[1].config)["id"]
== thread_1_history[1].config["configurable"]["checkpoint_id"]
)
thread_1_next_config = app.update_state(thread_1_history[1].config, 10)
# update creates a new checkpoint
assert (
thread_1_next_config["configurable"]["checkpoint_id"]
> thread_1_history[0].config["configurable"]["checkpoint_id"]
)
# update makes new checkpoint child of the previous one
assert (
app.get_state(thread_1_next_config).parent_config == thread_1_history[1].config
)
# 1 more checkpoint in history
assert len(list(app.get_state_history(thread_1))) == 8
assert Counter(c.metadata["source"] for c in app.get_state_history(thread_1)) == {
"update": 1,
"input": 4,
"loop": 3,
}
# the latest checkpoint is the updated one
assert app.get_state(thread_1) == app.get_state(thread_1_next_config)
def test_invoke_two_processes_two_in_join_two_out(mocker: MockerFixture) -> None:
add_one = mocker.Mock(side_effect=lambda x: x + 1)
add_10_each = mocker.Mock(side_effect=lambda x: sorted(y + 10 for y in x))
one = Channel.subscribe_to("input") | add_one | Channel.write_to("inbox")
chain_three = Channel.subscribe_to("input") | add_one | Channel.write_to("inbox")
chain_four = (
Channel.subscribe_to("inbox") | add_10_each | Channel.write_to("output")
)
app = Pregel(
nodes={
"one": one,
"chain_three": chain_three,
"chain_four": chain_four,
},
channels={
"inbox": Topic(int),
"output": LastValue(int),
"input": LastValue(int),
},
input_channels="input",
output_channels="output",
)
# Then invoke app
# We get a single array result as chain_four waits for all publishers to finish
# before operating on all elements published to topic_two as an array
for _ in range(100):
assert app.invoke(2) == [13, 13]
with ThreadPoolExecutor() as executor:
assert [*executor.map(app.invoke, [2] * 100)] == [[13, 13]] * 100
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC)
def test_invoke_join_then_call_other_pregel(
mocker: MockerFixture, request: pytest.FixtureRequest, checkpointer_name: str
) -> None:
checkpointer = request.getfixturevalue(f"checkpointer_{checkpointer_name}")
add_one = mocker.Mock(side_effect=lambda x: x + 1)
add_10_each = mocker.Mock(side_effect=lambda x: [y + 10 for y in x])
inner_app = Pregel(
nodes={
"one": Channel.subscribe_to("input") | add_one | Channel.write_to("output")
},
channels={
"output": LastValue(int),
"input": LastValue(int),
},
input_channels="input",
output_channels="output",
)
one = (
Channel.subscribe_to("input")
| add_10_each
| Channel.write_to("inbox_one").map()
)
two = (
Channel.subscribe_to("inbox_one")
| inner_app.map()
| sorted
| Channel.write_to("outbox_one")
)
chain_three = Channel.subscribe_to("outbox_one") | sum | Channel.write_to("output")
app = Pregel(
nodes={
"one": one,
"two": two,
"chain_three": chain_three,
},
channels={
"inbox_one": Topic(int),
"outbox_one": LastValue(int),
"output": LastValue(int),
"input": LastValue(int),
},
input_channels="input",
output_channels="output",
)
for _ in range(10):
assert app.invoke([2, 3]) == 27
with ThreadPoolExecutor() as executor:
assert [*executor.map(app.invoke, [[2, 3]] * 10)] == [27] * 10
# add checkpointer
app.checkpointer = checkpointer
# subgraph is called twice in the same node, through .map(), so raises
with pytest.raises(MultipleSubgraphsError):
app.invoke([2, 3], {"configurable": {"thread_id": "1"}})
# set inner graph checkpointer NeverCheckpoint
inner_app.checkpointer = False
# subgraph still called twice, but checkpointing for inner graph is disabled
assert app.invoke([2, 3], {"configurable": {"thread_id": "1"}}) == 27
def test_invoke_two_processes_one_in_two_out(mocker: MockerFixture) -> None:
add_one = mocker.Mock(side_effect=lambda x: x + 1)
one = (
Channel.subscribe_to("input") | add_one | Channel.write_to("output", "between")
)
two = Channel.subscribe_to("between") | add_one | Channel.write_to("output")
app = Pregel(
nodes={"one": one, "two": two},
channels={
"input": LastValue(int),
"between": LastValue(int),
"output": LastValue(int),
},
stream_channels=["output", "between"],
input_channels="input",
output_channels="output",
)
assert [c for c in app.stream(2, stream_mode="updates")] == [
{"one": {"between": 3, "output": 3}},
{"two": {"output": 4}},
]
assert [c for c in app.stream(2)] == [
{"between": 3, "output": 3},
{"between": 3, "output": 4},
]
def test_invoke_two_processes_no_out(mocker: MockerFixture) -> None:
add_one = mocker.Mock(side_effect=lambda x: x + 1)
one = Channel.subscribe_to("input") | add_one | Channel.write_to("between")
two = Channel.subscribe_to("between") | add_one
app = Pregel(
nodes={"one": one, "two": two},
channels={
"input": LastValue(int),
"between": LastValue(int),
"output": LastValue(int),
},
input_channels="input",
output_channels="output",
)
# It finishes executing (once no more messages being published)
# but returns nothing, as nothing was published to OUT topic
assert app.invoke(2) is None
def test_invoke_two_processes_no_in(mocker: MockerFixture) -> None:
add_one = mocker.Mock(side_effect=lambda x: x + 1)
one = Channel.subscribe_to("between") | add_one | Channel.write_to("output")
two = Channel.subscribe_to("between") | add_one
with pytest.raises(TypeError):
Pregel(nodes={"one": one, "two": two})
def test_channel_enter_exit_timing(mocker: MockerFixture) -> None:
setup = mocker.Mock()
cleanup = mocker.Mock()
@contextmanager
def an_int() -> Generator[int, None, None]:
setup()
try:
yield 5
finally:
cleanup()
add_one = mocker.Mock(side_effect=lambda x: x + 1)
one = Channel.subscribe_to("input") | add_one | Channel.write_to("inbox")
two = (
Channel.subscribe_to("inbox")
| RunnableLambda(add_one).batch
| Channel.write_to("output").batch
)
app = Pregel(
nodes={"one": one, "two": two},
channels={
"inbox": Topic(int),
"ctx": Context(an_int),
"output": LastValue(int),
"input": LastValue(int),
},
input_channels="input",
output_channels=["inbox", "output"],
stream_channels=["inbox", "output"],
)
assert setup.call_count == 0
assert cleanup.call_count == 0
for i, chunk in enumerate(app.stream(2)):
assert setup.call_count == 1, "Expected setup to be called once"
if i == 0:
assert chunk == {"inbox": [3]}
elif i == 1:
assert chunk == {"output": 4}
else:
assert False, "Expected only two chunks"
assert cleanup.call_count == 1, "Expected cleanup to be called once"
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC)
def test_conditional_graph(
snapshot: SnapshotAssertion, request: pytest.FixtureRequest, checkpointer_name: str
) -> None:
from langchain_core.language_models.fake import FakeStreamingListLLM
from langchain_core.prompts import PromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_core.tools import tool
checkpointer: BaseCheckpointSaver = request.getfixturevalue(
f"checkpointer_{checkpointer_name}"
)
# Assemble the tools
@tool()
def search_api(query: str) -> str:
"""Searches the API for the query."""
return f"result for {query}"
tools = [search_api]
# Construct the agent
prompt = PromptTemplate.from_template("Hello!")
llm = FakeStreamingListLLM(
responses=[
"tool:search_api:query",
"tool:search_api:another",
"finish:answer",
]
)
def agent_parser(input: str) -> Union[AgentAction, AgentFinish]:
if input.startswith("finish"):
_, answer = input.split(":")
return AgentFinish(return_values={"answer": answer}, log=input)
else:
_, tool_name, tool_input = input.split(":")
return AgentAction(tool=tool_name, tool_input=tool_input, log=input)
agent = RunnablePassthrough.assign(agent_outcome=prompt | llm | agent_parser)
# Define tool execution logic
def execute_tools(data: dict) -> dict:
data = data.copy()
agent_action: AgentAction = data.pop("agent_outcome")
observation = {t.name: t for t in tools}[agent_action.tool].invoke(
agent_action.tool_input
)
if data.get("intermediate_steps") is None:
data["intermediate_steps"] = []
else:
data["intermediate_steps"] = data["intermediate_steps"].copy()
data["intermediate_steps"].append([agent_action, observation])
return data
# Define decision-making logic
def should_continue(data: dict) -> str:
# Logic to decide whether to continue in the loop or exit
if isinstance(data["agent_outcome"], AgentFinish):
return "exit"
else:
return "continue"
# Define a new graph
workflow = Graph()
workflow.add_node("agent", agent)
workflow.add_node(
"tools",
execute_tools,
metadata={"parents": {}, "version": 2, "variant": "b"},
)
workflow.set_entry_point("agent")
workflow.add_conditional_edges(
"agent", should_continue, {"continue": "tools", "exit": END}
)
workflow.add_edge("tools", "agent")
app = workflow.compile()
if SHOULD_CHECK_SNAPSHOTS:
assert json.dumps(app.get_graph().to_json(), indent=2) == snapshot
assert app.get_graph().draw_mermaid(with_styles=False) == snapshot
assert app.get_graph().draw_mermaid() == snapshot
assert json.dumps(app.get_graph(xray=True).to_json(), indent=2) == snapshot
assert app.get_graph(xray=True).draw_mermaid(with_styles=False) == snapshot
assert app.invoke({"input": "what is weather in sf"}) == {
"input": "what is weather in sf",
"intermediate_steps": [
[
AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:query",
),
"result for query",
],
[
AgentAction(
tool="search_api",
tool_input="another",
log="tool:search_api:another",
),
"result for another",
],
],
"agent_outcome": AgentFinish(
return_values={"answer": "answer"}, log="finish:answer"
),
}
assert [c for c in app.stream({"input": "what is weather in sf"})] == [
{
"agent": {
"input": "what is weather in sf",
"agent_outcome": AgentAction(
tool="search_api", tool_input="query", log="tool:search_api:query"
),
}
},
{
"tools": {
"input": "what is weather in sf",
"intermediate_steps": [
[
AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:query",
),
"result for query",
]
],
}
},
{
"agent": {
"input": "what is weather in sf",
"intermediate_steps": [
[
AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:query",
),
"result for query",
]
],
"agent_outcome": AgentAction(
tool="search_api",
tool_input="another",
log="tool:search_api:another",
),
}
},
{
"tools": {
"input": "what is weather in sf",
"intermediate_steps": [
[
AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:query",
),
"result for query",
],
[
AgentAction(
tool="search_api",
tool_input="another",
log="tool:search_api:another",
),
"result for another",
],
],
}
},
{
"agent": {
"input": "what is weather in sf",
"intermediate_steps": [
[
AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:query",
),
"result for query",
],
[
AgentAction(
tool="search_api",
tool_input="another",
log="tool:search_api:another",
),
"result for another",
],
],
"agent_outcome": AgentFinish(
return_values={"answer": "answer"}, log="finish:answer"
),
}
},
]
# test state get/update methods with interrupt_after
app_w_interrupt = workflow.compile(
checkpointer=checkpointer,
interrupt_after=["agent"],
)
config = {"configurable": {"thread_id": "1"}}
if SHOULD_CHECK_SNAPSHOTS:
assert app_w_interrupt.get_graph().to_json() == snapshot
assert app_w_interrupt.get_graph().draw_mermaid() == snapshot
assert [
c for c in app_w_interrupt.stream({"input": "what is weather in sf"}, config)
] == [
{
"agent": {
"input": "what is weather in sf",
"agent_outcome": AgentAction(
tool="search_api", tool_input="query", log="tool:search_api:query"
),
}
}
]
assert app_w_interrupt.get_state(config) == StateSnapshot(
values={
"agent": {
"input": "what is weather in sf",
"agent_outcome": AgentAction(
tool="search_api", tool_input="query", log="tool:search_api:query"
),
},
},
tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),),
next=("tools",),
created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"],
config=app_w_interrupt.checkpointer.get_tuple(config).config,
metadata={
"parents": {},
"source": "loop",
"step": 0,
"writes": {
"agent": {
"agent": {
"input": "what is weather in sf",
"agent_outcome": AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:query",
),
}
},
},
"thread_id": "1",
},
parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config,
)
assert (
app_w_interrupt.checkpointer.get_tuple(config).config["configurable"][
"checkpoint_id"
]
is not None
)
app_w_interrupt.update_state(
config,
{
"agent_outcome": AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:a different query",
),
"input": "what is weather in sf",
},
)
assert app_w_interrupt.get_state(config) == StateSnapshot(
values={
"agent": {
"agent_outcome": AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:a different query",
),
"input": "what is weather in sf",
},
},
tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),),
next=("tools",),
config=app_w_interrupt.checkpointer.get_tuple(config).config,
created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"],
metadata={
"parents": {},
"source": "update",
"step": 1,
"writes": {
"agent": {
"agent_outcome": AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:a different query",
),
"input": "what is weather in sf",
},
},
"thread_id": "1",
},
parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config,
)
assert [c for c in app_w_interrupt.stream(None, config)] == [
{
"agent": {
"agent_outcome": AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:a different query",
),
"input": "what is weather in sf",
}
},
{
"tools": {
"input": "what is weather in sf",
"intermediate_steps": [
[
AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:a different query",
),
"result for query",
]
],
}
},
{
"agent": {
"input": "what is weather in sf",
"intermediate_steps": [
[
AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:a different query",
),
"result for query",
]
],
"agent_outcome": AgentAction(
tool="search_api",
tool_input="another",
log="tool:search_api:another",
),
}
},
]
app_w_interrupt.update_state(
config,
{
"input": "what is weather in sf",
"intermediate_steps": [
[
AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:a different query",
),
"result for query",
]
],
"agent_outcome": AgentFinish(
return_values={"answer": "a really nice answer"},
log="finish:a really nice answer",
),
},
)
assert app_w_interrupt.get_state(config) == StateSnapshot(
values={
"agent": {
"input": "what is weather in sf",
"intermediate_steps": [
[
AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:a different query",
),
"result for query",
]
],
"agent_outcome": AgentFinish(
return_values={"answer": "a really nice answer"},
log="finish:a really nice answer",
),
},
},
tasks=(),
next=(),
config=app_w_interrupt.checkpointer.get_tuple(config).config,
created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"],
metadata={
"parents": {},
"source": "update",
"step": 4,
"writes": {
"agent": {
"input": "what is weather in sf",
"intermediate_steps": [
[
AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:a different query",
),
"result for query",
]
],
"agent_outcome": AgentFinish(
return_values={"answer": "a really nice answer"},
log="finish:a really nice answer",
),
}
},
"thread_id": "1",
},
parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config,
)
# test state get/update methods with interrupt_before
app_w_interrupt = workflow.compile(
checkpointer=checkpointer,
interrupt_before=["tools"],
)
config = {"configurable": {"thread_id": "2"}}
llm.i = 0 # reset the llm
assert [
c for c in app_w_interrupt.stream({"input": "what is weather in sf"}, config)
] == [
{
"agent": {
"input": "what is weather in sf",
"agent_outcome": AgentAction(
tool="search_api", tool_input="query", log="tool:search_api:query"
),
}
}
]
assert app_w_interrupt.get_state(config) == StateSnapshot(
values={
"agent": {
"input": "what is weather in sf",
"agent_outcome": AgentAction(
tool="search_api", tool_input="query", log="tool:search_api:query"
),
},
},
tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),),
next=("tools",),
config=app_w_interrupt.checkpointer.get_tuple(config).config,
created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"],
metadata={
"parents": {},
"source": "loop",
"step": 0,
"writes": {
"agent": {
"agent": {
"input": "what is weather in sf",
"agent_outcome": AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:query",
),
}
}
},
"thread_id": "2",
},
parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config,
)
app_w_interrupt.update_state(
config,
{
"agent_outcome": AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:a different query",
),
"input": "what is weather in sf",
},
)
assert app_w_interrupt.get_state(config) == StateSnapshot(
values={
"agent": {
"agent_outcome": AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:a different query",
),
"input": "what is weather in sf",
},
},
tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),),
next=("tools",),
config=app_w_interrupt.checkpointer.get_tuple(config).config,
created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"],
metadata={
"parents": {},
"source": "update",
"step": 1,
"writes": {
"agent": {
"agent_outcome": AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:a different query",
),
"input": "what is weather in sf",
}
},
"thread_id": "2",
},
parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config,
)
assert [c for c in app_w_interrupt.stream(None, config)] == [
{
"agent": {
"agent_outcome": AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:a different query",
),
"input": "what is weather in sf",
},
},
{
"tools": {
"input": "what is weather in sf",
"intermediate_steps": [
[
AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:a different query",
),
"result for query",
]
],
}
},
{
"agent": {
"input": "what is weather in sf",
"intermediate_steps": [
[
AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:a different query",
),
"result for query",
]
],
"agent_outcome": AgentAction(
tool="search_api",
tool_input="another",
log="tool:search_api:another",
),
}
},
]
app_w_interrupt.update_state(
config,
{
"input": "what is weather in sf",
"intermediate_steps": [
[
AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:a different query",
),
"result for query",
]
],
"agent_outcome": AgentFinish(
return_values={"answer": "a really nice answer"},
log="finish:a really nice answer",
),
},
)
assert app_w_interrupt.get_state(config) == StateSnapshot(
values={
"agent": {
"input": "what is weather in sf",
"intermediate_steps": [
[
AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:a different query",
),
"result for query",
]
],
"agent_outcome": AgentFinish(
return_values={"answer": "a really nice answer"},
log="finish:a really nice answer",
),
},
},
tasks=(),
next=(),
config=app_w_interrupt.checkpointer.get_tuple(config).config,
created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"],
metadata={
"parents": {},
"source": "update",
"step": 4,
"writes": {
"agent": {
"input": "what is weather in sf",
"intermediate_steps": [
[
AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:a different query",
),
"result for query",
]
],
"agent_outcome": AgentFinish(
return_values={"answer": "a really nice answer"},
log="finish:a really nice answer",
),
}
},
"thread_id": "2",
},
parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config,
)
# test re-invoke to continue with interrupt_before
app_w_interrupt = workflow.compile(
checkpointer=checkpointer,
interrupt_before=["tools"],
)
config = {"configurable": {"thread_id": "3"}}
llm.i = 0 # reset the llm
assert [
c for c in app_w_interrupt.stream({"input": "what is weather in sf"}, config)
] == [
{
"agent": {
"input": "what is weather in sf",
"agent_outcome": AgentAction(
tool="search_api", tool_input="query", log="tool:search_api:query"
),
}
}
]
assert app_w_interrupt.get_state(config) == StateSnapshot(
values={
"agent": {
"input": "what is weather in sf",
"agent_outcome": AgentAction(
tool="search_api", tool_input="query", log="tool:search_api:query"
),
},
},
tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),),
next=("tools",),
config=app_w_interrupt.checkpointer.get_tuple(config).config,
created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"],
metadata={
"parents": {},
"source": "loop",
"step": 0,
"writes": {
"agent": {
"agent": {
"input": "what is weather in sf",
"agent_outcome": AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:query",
),
}
}
},
"thread_id": "3",
},
parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config,
)
assert [c for c in app_w_interrupt.stream(None, config)] == [
{
"agent": {
"input": "what is weather in sf",
"agent_outcome": AgentAction(
tool="search_api", tool_input="query", log="tool:search_api:query"
),
},
},
{
"tools": {
"input": "what is weather in sf",
"intermediate_steps": [
[
AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:query",
),
"result for query",
]
],
}
},
{
"agent": {
"input": "what is weather in sf",
"intermediate_steps": [
[
AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:query",
),
"result for query",
]
],
"agent_outcome": AgentAction(
tool="search_api",
tool_input="another",
log="tool:search_api:another",
),
}
},
]
assert [c for c in app_w_interrupt.stream(None, config)] == [
{
"agent": {
"input": "what is weather in sf",
"intermediate_steps": [
[
AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:query",
),
"result for query",
]
],
"agent_outcome": AgentAction(
tool="search_api",
tool_input="another",
log="tool:search_api:another",
),
}
},
{
"tools": {
"input": "what is weather in sf",
"intermediate_steps": [
[
AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:query",
),
"result for query",
],
[
AgentAction(
tool="search_api",
tool_input="another",
log="tool:search_api:another",
),
"result for another",
],
],
}
},
{
"agent": {
"input": "what is weather in sf",
"intermediate_steps": [
[
AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:query",
),
"result for query",
],
[
AgentAction(
tool="search_api",
tool_input="another",
log="tool:search_api:another",
),
"result for another",
],
],
"agent_outcome": AgentFinish(
return_values={"answer": "answer"}, log="finish:answer"
),
}
},
]
def test_conditional_entrypoint_graph(snapshot: SnapshotAssertion) -> None:
def left(data: str) -> str:
return data + "->left"
def right(data: str) -> str:
return data + "->right"
def should_start(data: str) -> str:
# Logic to decide where to start
if len(data) > 10:
return "go-right"
else:
return "go-left"
# Define a new graph
workflow = Graph()
workflow.add_node("left", left)
workflow.add_node("right", right)
workflow.set_conditional_entry_point(
should_start, {"go-left": "left", "go-right": "right"}
)
workflow.add_conditional_edges("left", lambda data: END, {END: END})
workflow.add_edge("right", END)
app = workflow.compile()
if SHOULD_CHECK_SNAPSHOTS:
assert json.dumps(app.get_input_schema().model_json_schema()) == snapshot
assert json.dumps(app.get_output_schema().model_json_schema()) == snapshot
assert json.dumps(app.get_graph().to_json(), indent=2) == snapshot
assert app.get_graph().draw_mermaid(with_styles=False) == snapshot
assert (
app.invoke("what is weather in sf", debug=True)
== "what is weather in sf->right"
)
assert [*app.stream("what is weather in sf")] == [
{"right": "what is weather in sf->right"},
]
def test_conditional_entrypoint_to_multiple_state_graph(
snapshot: SnapshotAssertion,
) -> None:
class OverallState(TypedDict):
locations: list[str]
results: Annotated[list[str], operator.add]
def get_weather(state: OverallState) -> OverallState:
location = state["location"]
weather = "sunny" if len(location) > 2 else "cloudy"
return {"results": [f"It's {weather} in {location}"]}
def continue_to_weather(state: OverallState) -> list[Send]:
return [
Send("get_weather", {"location": location})
for location in state["locations"]
]
workflow = StateGraph(OverallState)
workflow.add_node("get_weather", get_weather)
workflow.add_edge("get_weather", END)
workflow.set_conditional_entry_point(continue_to_weather)
app = workflow.compile()
if SHOULD_CHECK_SNAPSHOTS:
assert json.dumps(app.get_input_schema().model_json_schema()) == snapshot
assert json.dumps(app.get_output_schema().model_json_schema()) == snapshot
assert json.dumps(app.get_graph().to_json(), indent=2) == snapshot
assert app.get_graph().draw_mermaid(with_styles=False) == snapshot
assert app.invoke({"locations": ["sf", "nyc"]}, debug=True) == {
"locations": ["sf", "nyc"],
"results": ["It's cloudy in sf", "It's sunny in nyc"],
}
assert [*app.stream({"locations": ["sf", "nyc"]}, stream_mode="values")][-1] == {
"locations": ["sf", "nyc"],
"results": ["It's cloudy in sf", "It's sunny in nyc"],
}
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC)
def test_conditional_state_graph(
snapshot: SnapshotAssertion,
mocker: MockerFixture,
request: pytest.FixtureRequest,
checkpointer_name: str,
) -> None:
from langchain_core.language_models.fake import FakeStreamingListLLM
from langchain_core.prompts import PromptTemplate
from langchain_core.tools import tool
checkpointer: BaseCheckpointSaver = request.getfixturevalue(
f"checkpointer_{checkpointer_name}"
)
setup = mocker.Mock()
teardown = mocker.Mock()
@contextmanager
def assert_ctx_once() -> Iterator[None]:
assert setup.call_count == 0
assert teardown.call_count == 0
try:
yield
finally:
assert setup.call_count == 1
assert teardown.call_count == 1
setup.reset_mock()
teardown.reset_mock()
@contextmanager
def make_httpx_client() -> Iterator[httpx.Client]:
setup()
with httpx.Client() as client:
try:
yield client
finally:
teardown()
class AgentState(TypedDict, total=False):
input: Annotated[str, UntrackedValue]
agent_outcome: Optional[Union[AgentAction, AgentFinish]]
intermediate_steps: Annotated[list[tuple[AgentAction, str]], operator.add]
session: Annotated[httpx.Client, Context(make_httpx_client)]
class ToolState(TypedDict, total=False):
agent_outcome: Union[AgentAction, AgentFinish]
session: Annotated[httpx.Client, Context(make_httpx_client)]
# Assemble the tools
@tool()
def search_api(query: str) -> str:
"""Searches the API for the query."""
return f"result for {query}"
tools = [search_api]
# Construct the agent
prompt = PromptTemplate.from_template("Hello!")
llm = FakeStreamingListLLM(
responses=[
"tool:search_api:query",
"tool:search_api:another",
"finish:answer",
]
)
def agent_parser(input: str) -> dict[str, Union[AgentAction, AgentFinish]]:
if input.startswith("finish"):
_, answer = input.split(":")
return {
"agent_outcome": AgentFinish(
return_values={"answer": answer}, log=input
)
}
else:
_, tool_name, tool_input = input.split(":")
return {
"agent_outcome": AgentAction(
tool=tool_name, tool_input=tool_input, log=input
)
}
agent = prompt | llm | agent_parser
# Define tool execution logic
def execute_tools(data: ToolState) -> dict:
# check session in data
assert isinstance(data["session"], httpx.Client)
assert "input" not in data
assert "intermediate_steps" not in data
# execute the tool
agent_action: AgentAction = data.pop("agent_outcome")
observation = {t.name: t for t in tools}[agent_action.tool].invoke(
agent_action.tool_input
)
return {"intermediate_steps": [[agent_action, observation]]}
# Define decision-making logic
def should_continue(data: AgentState) -> str:
# check session in data
assert isinstance(data["session"], httpx.Client)
# Logic to decide whether to continue in the loop or exit
if isinstance(data["agent_outcome"], AgentFinish):
return "exit"
else:
return "continue"
# Define a new graph
workflow = StateGraph(AgentState)
workflow.add_node("agent", agent)
workflow.add_node("tools", execute_tools, input=ToolState)
workflow.set_entry_point("agent")
workflow.add_conditional_edges(
"agent", should_continue, {"continue": "tools", "exit": END}
)
workflow.add_edge("tools", "agent")
app = workflow.compile()
if SHOULD_CHECK_SNAPSHOTS:
assert json.dumps(app.get_input_schema().model_json_schema()) == snapshot
assert json.dumps(app.get_output_schema().model_json_schema()) == snapshot
assert json.dumps(app.get_graph().to_json(), indent=2) == snapshot
assert app.get_graph().draw_mermaid(with_styles=False) == snapshot
with assert_ctx_once():
assert app.invoke({"input": "what is weather in sf"}) == {
"input": "what is weather in sf",
"intermediate_steps": [
[
AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:query",
),
"result for query",
],
[
AgentAction(
tool="search_api",
tool_input="another",
log="tool:search_api:another",
),
"result for another",
],
],
"agent_outcome": AgentFinish(
return_values={"answer": "answer"}, log="finish:answer"
),
}
with assert_ctx_once():
assert [*app.stream({"input": "what is weather in sf"})] == [
{
"agent": {
"agent_outcome": AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:query",
),
}
},
{
"tools": {
"intermediate_steps": [
[
AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:query",
),
"result for query",
]
],
}
},
{
"agent": {
"agent_outcome": AgentAction(
tool="search_api",
tool_input="another",
log="tool:search_api:another",
),
}
},
{
"tools": {
"intermediate_steps": [
[
AgentAction(
tool="search_api",
tool_input="another",
log="tool:search_api:another",
),
"result for another",
],
],
}
},
{
"agent": {
"agent_outcome": AgentFinish(
return_values={"answer": "answer"}, log="finish:answer"
),
}
},
]
# test state get/update methods with interrupt_after
app_w_interrupt = workflow.compile(
checkpointer=checkpointer,
interrupt_after=["agent"],
)
config = {"configurable": {"thread_id": "1"}}
with assert_ctx_once():
assert [
c
for c in app_w_interrupt.stream({"input": "what is weather in sf"}, config)
] == [
{
"agent": {
"agent_outcome": AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:query",
),
}
},
{"__interrupt__": ()},
]
assert app_w_interrupt.get_state(config) == StateSnapshot(
values={
"agent_outcome": AgentAction(
tool="search_api", tool_input="query", log="tool:search_api:query"
),
"intermediate_steps": [],
},
tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),),
next=("tools",),
config=app_w_interrupt.checkpointer.get_tuple(config).config,
created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"],
metadata={
"parents": {},
"source": "loop",
"step": 1,
"writes": {
"agent": {
"agent_outcome": AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:query",
),
}
},
"thread_id": "1",
},
parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config,
)
with assert_ctx_once():
app_w_interrupt.update_state(
config,
{
"agent_outcome": AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:a different query",
)
},
)
assert app_w_interrupt.get_state(config) == StateSnapshot(
values={
"agent_outcome": AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:a different query",
),
"intermediate_steps": [],
},
tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),),
next=("tools",),
config=app_w_interrupt.checkpointer.get_tuple(config).config,
created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"],
metadata={
"parents": {},
"source": "update",
"step": 2,
"writes": {
"agent": {
"agent_outcome": AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:a different query",
)
},
},
"thread_id": "1",
},
parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config,
)
with assert_ctx_once():
assert [c for c in app_w_interrupt.stream(None, config)] == [
{
"tools": {
"intermediate_steps": [
[
AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:a different query",
),
"result for query",
]
],
}
},
{
"agent": {
"agent_outcome": AgentAction(
tool="search_api",
tool_input="another",
log="tool:search_api:another",
),
}
},
{"__interrupt__": ()},
]
with assert_ctx_once():
app_w_interrupt.update_state(
config,
{
"agent_outcome": AgentFinish(
return_values={"answer": "a really nice answer"},
log="finish:a really nice answer",
)
},
)
assert app_w_interrupt.get_state(config) == StateSnapshot(
values={
"agent_outcome": AgentFinish(
return_values={"answer": "a really nice answer"},
log="finish:a really nice answer",
),
"intermediate_steps": [
[
AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:a different query",
),
"result for query",
]
],
},
tasks=(),
next=(),
config=app_w_interrupt.checkpointer.get_tuple(config).config,
created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"],
metadata={
"parents": {},
"source": "update",
"step": 5,
"writes": {
"agent": {
"agent_outcome": AgentFinish(
return_values={"answer": "a really nice answer"},
log="finish:a really nice answer",
)
}
},
"thread_id": "1",
},
parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config,
)
# test state get/update methods with interrupt_before
app_w_interrupt = workflow.compile(
checkpointer=checkpointer,
interrupt_before=["tools"],
debug=True,
)
config = {"configurable": {"thread_id": "2"}}
llm.i = 0 # reset the llm
assert [
c for c in app_w_interrupt.stream({"input": "what is weather in sf"}, config)
] == [
{
"agent": {
"agent_outcome": AgentAction(
tool="search_api", tool_input="query", log="tool:search_api:query"
),
}
},
{"__interrupt__": ()},
]
assert app_w_interrupt.get_state(config) == StateSnapshot(
values={
"agent_outcome": AgentAction(
tool="search_api", tool_input="query", log="tool:search_api:query"
),
"intermediate_steps": [],
},
tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),),
next=("tools",),
config=app_w_interrupt.checkpointer.get_tuple(config).config,
created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"],
metadata={
"parents": {},
"source": "loop",
"step": 1,
"writes": {
"agent": {
"agent_outcome": AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:query",
),
}
},
"thread_id": "2",
},
parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config,
)
app_w_interrupt.update_state(
config,
{
"agent_outcome": AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:a different query",
)
},
)
assert app_w_interrupt.get_state(config) == StateSnapshot(
values={
"agent_outcome": AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:a different query",
),
"intermediate_steps": [],
},
tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),),
next=("tools",),
config=app_w_interrupt.checkpointer.get_tuple(config).config,
created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"],
metadata={
"parents": {},
"source": "update",
"step": 2,
"writes": {
"agent": {
"agent_outcome": AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:a different query",
)
}
},
"thread_id": "2",
},
parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config,
)
assert [c for c in app_w_interrupt.stream(None, config)] == [
{
"tools": {
"intermediate_steps": [
[
AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:a different query",
),
"result for query",
]
],
}
},
{
"agent": {
"agent_outcome": AgentAction(
tool="search_api",
tool_input="another",
log="tool:search_api:another",
),
}
},
{"__interrupt__": ()},
]
app_w_interrupt.update_state(
config,
{
"agent_outcome": AgentFinish(
return_values={"answer": "a really nice answer"},
log="finish:a really nice answer",
)
},
)
assert app_w_interrupt.get_state(config) == StateSnapshot(
values={
"agent_outcome": AgentFinish(
return_values={"answer": "a really nice answer"},
log="finish:a really nice answer",
),
"intermediate_steps": [
[
AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:a different query",
),
"result for query",
]
],
},
tasks=(),
next=(),
config=app_w_interrupt.checkpointer.get_tuple(config).config,
created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"],
metadata={
"parents": {},
"source": "update",
"step": 5,
"writes": {
"agent": {
"agent_outcome": AgentFinish(
return_values={"answer": "a really nice answer"},
log="finish:a really nice answer",
)
}
},
"thread_id": "2",
},
parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config,
)
# test w interrupt before all
app_w_interrupt = workflow.compile(
checkpointer=checkpointer,
interrupt_before="*",
debug=True,
)
config = {"configurable": {"thread_id": "3"}}
llm.i = 0 # reset the llm
assert [
c for c in app_w_interrupt.stream({"input": "what is weather in sf"}, config)
] == [
{"__interrupt__": ()},
]
assert app_w_interrupt.get_state(config) == StateSnapshot(
values={
"intermediate_steps": [],
},
tasks=(PregelTask(AnyStr(), "agent", (PULL, "agent")),),
next=("agent",),
config=app_w_interrupt.checkpointer.get_tuple(config).config,
created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"],
metadata={
"parents": {},
"source": "loop",
"step": 0,
"writes": None,
"thread_id": "3",
},
parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config,
)
assert [c for c in app_w_interrupt.stream(None, config)] == [
{
"agent": {
"agent_outcome": AgentAction(
tool="search_api", tool_input="query", log="tool:search_api:query"
),
}
},
{"__interrupt__": ()},
]
assert app_w_interrupt.get_state(config) == StateSnapshot(
values={
"agent_outcome": AgentAction(
tool="search_api", tool_input="query", log="tool:search_api:query"
),
"intermediate_steps": [],
},
tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),),
next=("tools",),
config=app_w_interrupt.checkpointer.get_tuple(config).config,
created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"],
metadata={
"parents": {},
"source": "loop",
"step": 1,
"writes": {
"agent": {
"agent_outcome": AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:query",
),
}
},
"thread_id": "3",
},
parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config,
)
assert [c for c in app_w_interrupt.stream(None, config)] == [
{
"tools": {
"intermediate_steps": [
[
AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:query",
),
"result for query",
]
],
}
},
{"__interrupt__": ()},
]
assert app_w_interrupt.get_state(config) == StateSnapshot(
values={
"agent_outcome": AgentAction(
tool="search_api", tool_input="query", log="tool:search_api:query"
),
"intermediate_steps": [
[
AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:query",
),
"result for query",
]
],
},
tasks=(PregelTask(AnyStr(), "agent", (PULL, "agent")),),
next=("agent",),
config=app_w_interrupt.checkpointer.get_tuple(config).config,
created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"],
metadata={
"parents": {},
"source": "loop",
"step": 2,
"writes": {
"tools": {
"intermediate_steps": [
[
AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:query",
),
"result for query",
]
],
}
},
"thread_id": "3",
},
parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config,
)
assert [c for c in app_w_interrupt.stream(None, config)] == [
{
"agent": {
"agent_outcome": AgentAction(
tool="search_api",
tool_input="another",
log="tool:search_api:another",
),
}
},
{"__interrupt__": ()},
]
# test w interrupt after all
app_w_interrupt = workflow.compile(
checkpointer=checkpointer,
interrupt_after="*",
)
config = {"configurable": {"thread_id": "4"}}
llm.i = 0 # reset the llm
assert [
c for c in app_w_interrupt.stream({"input": "what is weather in sf"}, config)
] == [
{
"agent": {
"agent_outcome": AgentAction(
tool="search_api", tool_input="query", log="tool:search_api:query"
),
}
},
{"__interrupt__": ()},
]
assert app_w_interrupt.get_state(config) == StateSnapshot(
values={
"agent_outcome": AgentAction(
tool="search_api", tool_input="query", log="tool:search_api:query"
),
"intermediate_steps": [],
},
tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),),
next=("tools",),
config=app_w_interrupt.checkpointer.get_tuple(config).config,
created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"],
metadata={
"parents": {},
"source": "loop",
"step": 1,
"writes": {
"agent": {
"agent_outcome": AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:query",
),
}
},
"thread_id": "4",
},
parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config,
)
assert [c for c in app_w_interrupt.stream(None, config)] == [
{
"tools": {
"intermediate_steps": [
[
AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:query",
),
"result for query",
]
],
}
},
{"__interrupt__": ()},
]
assert app_w_interrupt.get_state(config) == StateSnapshot(
values={
"agent_outcome": AgentAction(
tool="search_api", tool_input="query", log="tool:search_api:query"
),
"intermediate_steps": [
[
AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:query",
),
"result for query",
]
],
},
tasks=(PregelTask(AnyStr(), "agent", (PULL, "agent")),),
next=("agent",),
config=app_w_interrupt.checkpointer.get_tuple(config).config,
created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"],
metadata={
"parents": {},
"source": "loop",
"step": 2,
"writes": {
"tools": {
"intermediate_steps": [
[
AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:query",
),
"result for query",
]
],
}
},
"thread_id": "4",
},
parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config,
)
assert [c for c in app_w_interrupt.stream(None, config)] == [
{
"agent": {
"agent_outcome": AgentAction(
tool="search_api",
tool_input="another",
log="tool:search_api:another",
),
}
},
{"__interrupt__": ()},
]
def test_conditional_state_graph_with_list_edge_inputs(snapshot: SnapshotAssertion):
class State(TypedDict):
foo: Annotated[list[str], operator.add]
graph_builder = StateGraph(State)
graph_builder.add_node("A", lambda x: {"foo": ["A"]})
graph_builder.add_node("B", lambda x: {"foo": ["B"]})
graph_builder.add_edge(START, "A")
graph_builder.add_edge(START, "B")
graph_builder.add_edge(["A", "B"], END)
app = graph_builder.compile()
assert app.invoke({"foo": []}) == {"foo": ["A", "B"]}
assert json.dumps(app.get_graph().to_json(), indent=2) == snapshot
assert app.get_graph().draw_mermaid(with_styles=False) == snapshot
def test_state_graph_w_config_inherited_state_keys(snapshot: SnapshotAssertion) -> None:
from langchain_core.language_models.fake import FakeStreamingListLLM
from langchain_core.prompts import PromptTemplate
from langchain_core.tools import tool
class BaseState(TypedDict):
input: str
agent_outcome: Optional[Union[AgentAction, AgentFinish]]
class AgentState(BaseState, total=False):
intermediate_steps: Annotated[list[tuple[AgentAction, str]], operator.add]
assert get_type_hints(AgentState).keys() == {
"input",
"agent_outcome",
"intermediate_steps",
}
class Config(TypedDict, total=False):
tools: list[str]
# Assemble the tools
@tool()
def search_api(query: str) -> str:
"""Searches the API for the query."""
return f"result for {query}"
tools = [search_api]
# Construct the agent
prompt = PromptTemplate.from_template("Hello!")
llm = FakeStreamingListLLM(
responses=[
"tool:search_api:query",
"tool:search_api:another",
"finish:answer",
]
)
def agent_parser(input: str) -> dict[str, Union[AgentAction, AgentFinish]]:
if input.startswith("finish"):
_, answer = input.split(":")
return {
"agent_outcome": AgentFinish(
return_values={"answer": answer}, log=input
)
}
else:
_, tool_name, tool_input = input.split(":")
return {
"agent_outcome": AgentAction(
tool=tool_name, tool_input=tool_input, log=input
)
}
agent = prompt | llm | agent_parser
# Define tool execution logic
def execute_tools(data: AgentState) -> dict:
agent_action: AgentAction = data.pop("agent_outcome")
observation = {t.name: t for t in tools}[agent_action.tool].invoke(
agent_action.tool_input
)
return {"intermediate_steps": [(agent_action, observation)]}
# Define decision-making logic
def should_continue(data: AgentState) -> str:
# Logic to decide whether to continue in the loop or exit
if isinstance(data["agent_outcome"], AgentFinish):
return "exit"
else:
return "continue"
# Define a new graph
builder = StateGraph(AgentState, Config)
builder.add_node("agent", agent)
builder.add_node("tools", execute_tools)
builder.set_entry_point("agent")
builder.add_conditional_edges(
"agent", should_continue, {"continue": "tools", "exit": END}
)
builder.add_edge("tools", "agent")
app = builder.compile()
if SHOULD_CHECK_SNAPSHOTS:
assert json.dumps(app.config_schema().model_json_schema()) == snapshot
assert json.dumps(app.get_input_schema().model_json_schema()) == snapshot
assert json.dumps(app.get_output_schema().model_json_schema()) == snapshot
assert builder.channels.keys() == {"input", "agent_outcome", "intermediate_steps"}
assert app.invoke({"input": "what is weather in sf"}) == {
"agent_outcome": AgentFinish(
return_values={"answer": "answer"}, log="finish:answer"
),
"input": "what is weather in sf",
"intermediate_steps": [
(
AgentAction(
tool="search_api", tool_input="query", log="tool:search_api:query"
),
"result for query",
),
(
AgentAction(
tool="search_api",
tool_input="another",
log="tool:search_api:another",
),
"result for another",
),
],
}
def test_conditional_entrypoint_graph_state(snapshot: SnapshotAssertion) -> None:
class AgentState(TypedDict, total=False):
input: str
output: str
steps: Annotated[list[str], operator.add]
def left(data: AgentState) -> AgentState:
return {"output": data["input"] + "->left"}
def right(data: AgentState) -> AgentState:
return {"output": data["input"] + "->right"}
def should_start(data: AgentState) -> str:
assert data["steps"] == [], "Expected input to be read from the state"
# Logic to decide where to start
if len(data["input"]) > 10:
return "go-right"
else:
return "go-left"
# Define a new graph
workflow = StateGraph(AgentState)
workflow.add_node("left", left)
workflow.add_node("right", right)
workflow.set_conditional_entry_point(
should_start, {"go-left": "left", "go-right": "right"}
)
workflow.add_conditional_edges("left", lambda data: END, {END: END})
workflow.add_edge("right", END)
app = workflow.compile()
if SHOULD_CHECK_SNAPSHOTS:
assert json.dumps(app.get_input_schema().model_json_schema()) == snapshot
assert json.dumps(app.get_output_schema().model_json_schema()) == snapshot
assert json.dumps(app.get_graph().to_json(), indent=2) == snapshot
assert app.get_graph().draw_mermaid(with_styles=False) == snapshot
assert app.invoke({"input": "what is weather in sf"}) == {
"input": "what is weather in sf",
"output": "what is weather in sf->right",
"steps": [],
}
assert [*app.stream({"input": "what is weather in sf"})] == [
{"right": {"output": "what is weather in sf->right"}},
]
def test_prebuilt_tool_chat(snapshot: SnapshotAssertion) -> None:
from langchain_core.messages import AIMessage, HumanMessage
from langchain_core.tools import tool
@tool()
def search_api(query: str) -> str:
"""Searches the API for the query."""
return f"result for {query}"
tools = [search_api]
model = FakeChatModel(
messages=[
AIMessage(
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "query"},
},
],
),
AIMessage(
content="",
tool_calls=[
{
"id": "tool_call234",
"name": "search_api",
"args": {"query": "another"},
},
{
"id": "tool_call567",
"name": "search_api",
"args": {"query": "a third one"},
},
],
),
AIMessage(content="answer"),
]
)
app = create_tool_calling_executor(model, tools)
if SHOULD_CHECK_SNAPSHOTS:
assert json.dumps(app.get_input_schema().model_json_schema()) == snapshot
assert json.dumps(app.get_output_schema().model_json_schema()) == snapshot
assert json.dumps(app.get_graph().to_json(), indent=2) == snapshot
assert app.get_graph().draw_mermaid(with_styles=False) == snapshot
assert app.invoke(
{"messages": [HumanMessage(content="what is weather in sf")]}
) == {
"messages": [
_AnyIdHumanMessage(content="what is weather in sf"),
_AnyIdAIMessage(
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "query"},
},
],
),
_AnyIdToolMessage(
content="result for query",
name="search_api",
tool_call_id="tool_call123",
),
_AnyIdAIMessage(
content="",
tool_calls=[
{
"id": "tool_call234",
"name": "search_api",
"args": {"query": "another"},
},
{
"id": "tool_call567",
"name": "search_api",
"args": {"query": "a third one"},
},
],
),
_AnyIdToolMessage(
content="result for another",
name="search_api",
tool_call_id="tool_call234",
),
_AnyIdToolMessage(
content="result for a third one",
name="search_api",
tool_call_id="tool_call567",
id=AnyStr(),
),
_AnyIdAIMessage(content="answer"),
]
}
assert [
c
for c in app.stream(
{"messages": [HumanMessage(content="what is weather in sf")]},
stream_mode="messages",
)
] == [
(
_AnyIdAIMessageChunk(
content="",
tool_calls=[
{
"name": "search_api",
"args": {"query": "query"},
"id": "tool_call123",
"type": "tool_call",
}
],
tool_call_chunks=[
{
"name": "search_api",
"args": '{"query": "query"}',
"id": "tool_call123",
"index": None,
"type": "tool_call_chunk",
}
],
),
{
"langgraph_step": 1,
"langgraph_node": "agent",
"langgraph_triggers": ["start:agent"],
"langgraph_path": (PULL, "agent"),
"langgraph_checkpoint_ns": AnyStr("agent:"),
"checkpoint_ns": AnyStr("agent:"),
"ls_provider": "fakechatmodel",
"ls_model_type": "chat",
},
),
(
_AnyIdToolMessage(
content="result for query",
name="search_api",
tool_call_id="tool_call123",
),
{
"langgraph_step": 2,
"langgraph_node": "tools",
"langgraph_triggers": ["branch:agent:should_continue:tools"],
"langgraph_path": (PULL, "tools"),
"langgraph_checkpoint_ns": AnyStr("tools:"),
},
),
(
_AnyIdAIMessageChunk(
content="",
tool_calls=[
{
"name": "search_api",
"args": {"query": "another"},
"id": "tool_call234",
"type": "tool_call",
},
{
"name": "search_api",
"args": {"query": "a third one"},
"id": "tool_call567",
"type": "tool_call",
},
],
tool_call_chunks=[
{
"name": "search_api",
"args": '{"query": "another"}',
"id": "tool_call234",
"index": None,
"type": "tool_call_chunk",
},
{
"name": "search_api",
"args": '{"query": "a third one"}',
"id": "tool_call567",
"index": None,
"type": "tool_call_chunk",
},
],
),
{
"langgraph_step": 3,
"langgraph_node": "agent",
"langgraph_triggers": ["tools"],
"langgraph_path": (PULL, "agent"),
"langgraph_checkpoint_ns": AnyStr("agent:"),
"checkpoint_ns": AnyStr("agent:"),
"ls_provider": "fakechatmodel",
"ls_model_type": "chat",
},
),
(
_AnyIdToolMessage(
content="result for another",
name="search_api",
tool_call_id="tool_call234",
),
{
"langgraph_step": 4,
"langgraph_node": "tools",
"langgraph_triggers": ["branch:agent:should_continue:tools"],
"langgraph_path": (PULL, "tools"),
"langgraph_checkpoint_ns": AnyStr("tools:"),
},
),
(
_AnyIdToolMessage(
content="result for a third one",
name="search_api",
tool_call_id="tool_call567",
),
{
"langgraph_step": 4,
"langgraph_node": "tools",
"langgraph_triggers": ["branch:agent:should_continue:tools"],
"langgraph_path": (PULL, "tools"),
"langgraph_checkpoint_ns": AnyStr("tools:"),
},
),
(
_AnyIdAIMessageChunk(
content="answer",
),
{
"langgraph_step": 5,
"langgraph_node": "agent",
"langgraph_triggers": ["tools"],
"langgraph_path": (PULL, "agent"),
"langgraph_checkpoint_ns": AnyStr("agent:"),
"checkpoint_ns": AnyStr("agent:"),
"ls_provider": "fakechatmodel",
"ls_model_type": "chat",
},
),
]
assert app.invoke(
{"messages": [HumanMessage(content="what is weather in sf")]},
{"recursion_limit": 2},
debug=True,
) == {
"messages": [
_AnyIdHumanMessage(content="what is weather in sf"),
_AnyIdAIMessage(content="Sorry, need more steps to process this request."),
]
}
model.i = 0 # reset the model
assert (
app.invoke(
{"messages": [HumanMessage(content="what is weather in sf")]},
stream_mode="updates",
)[0]["agent"]["messages"]
== [
{
"agent": {
"messages": [
_AnyIdAIMessage(
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "query"},
},
],
)
]
}
},
{
"tools": {
"messages": [
_AnyIdToolMessage(
content="result for query",
name="search_api",
tool_call_id="tool_call123",
)
]
}
},
{
"agent": {
"messages": [
_AnyIdAIMessage(
content="",
tool_calls=[
{
"id": "tool_call234",
"name": "search_api",
"args": {"query": "another"},
},
{
"id": "tool_call567",
"name": "search_api",
"args": {"query": "a third one"},
},
],
)
]
}
},
{
"tools": {
"messages": [
_AnyIdToolMessage(
content="result for another",
name="search_api",
tool_call_id="tool_call234",
),
_AnyIdToolMessage(
content="result for a third one",
name="search_api",
tool_call_id="tool_call567",
),
]
}
},
{"agent": {"messages": [_AnyIdAIMessage(content="answer")]}},
][0]["agent"]["messages"]
)
assert [
*app.stream({"messages": [HumanMessage(content="what is weather in sf")]})
] == [
{
"agent": {
"messages": [
_AnyIdAIMessage(
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "query"},
},
],
)
]
}
},
{
"tools": {
"messages": [
_AnyIdToolMessage(
content="result for query",
name="search_api",
tool_call_id="tool_call123",
)
]
}
},
{
"agent": {
"messages": [
_AnyIdAIMessage(
content="",
tool_calls=[
{
"id": "tool_call234",
"name": "search_api",
"args": {"query": "another"},
},
{
"id": "tool_call567",
"name": "search_api",
"args": {"query": "a third one"},
},
],
)
]
}
},
{
"tools": {
"messages": [
_AnyIdToolMessage(
content="result for another",
name="search_api",
tool_call_id="tool_call234",
),
_AnyIdToolMessage(
content="result for a third one",
name="search_api",
tool_call_id="tool_call567",
),
]
}
},
{"agent": {"messages": [_AnyIdAIMessage(content="answer")]}},
]
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC)
def test_state_graph_packets(
request: pytest.FixtureRequest, checkpointer_name: str, mocker: MockerFixture
) -> None:
from langchain_core.language_models.fake_chat_models import (
FakeMessagesListChatModel,
)
from langchain_core.messages import (
AIMessage,
BaseMessage,
HumanMessage,
ToolCall,
ToolMessage,
)
from langchain_core.tools import tool
checkpointer: BaseCheckpointSaver = request.getfixturevalue(
f"checkpointer_{checkpointer_name}"
)
class AgentState(TypedDict):
messages: Annotated[list[BaseMessage], add_messages]
session: Annotated[httpx.Client, Context(httpx.Client)]
@tool()
def search_api(query: str) -> str:
"""Searches the API for the query."""
return f"result for {query}"
tools = [search_api]
tools_by_name = {t.name: t for t in tools}
model = FakeMessagesListChatModel(
responses=[
AIMessage(
id="ai1",
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "query"},
},
],
),
AIMessage(
id="ai2",
content="",
tool_calls=[
{
"id": "tool_call234",
"name": "search_api",
"args": {"query": "another", "idx": 0},
},
{
"id": "tool_call567",
"name": "search_api",
"args": {"query": "a third one", "idx": 1},
},
],
),
AIMessage(id="ai3", content="answer"),
]
)
def agent(data: AgentState) -> AgentState:
assert isinstance(data["session"], httpx.Client)
return {
"messages": model.invoke(data["messages"]),
"something_extra": "hi there",
}
# Define decision-making logic
def should_continue(data: AgentState) -> str:
assert isinstance(data["session"], httpx.Client)
assert (
data["something_extra"] == "hi there"
), "nodes can pass extra data to their cond edges, which isn't saved in state"
# Logic to decide whether to continue in the loop or exit
if tool_calls := data["messages"][-1].tool_calls:
return [Send("tools", tool_call) for tool_call in tool_calls]
else:
return END
def tools_node(input: ToolCall, config: RunnableConfig) -> AgentState:
time.sleep(input["args"].get("idx", 0) / 10)
output = tools_by_name[input["name"]].invoke(input["args"], config)
return {
"messages": ToolMessage(
content=output, name=input["name"], tool_call_id=input["id"]
)
}
# Define a new graph
workflow = StateGraph(AgentState)
# Define the two nodes we will cycle between
workflow.add_node("agent", agent)
workflow.add_node("tools", tools_node)
# Set the entrypoint as `agent`
# This means that this node is the first one called
workflow.set_entry_point("agent")
# We now add a conditional edge
workflow.add_conditional_edges("agent", should_continue)
# We now add a normal edge from `tools` to `agent`.
# This means that after `tools` is called, `agent` node is called next.
workflow.add_edge("tools", "agent")
# Finally, we compile it!
# This compiles it into a LangChain Runnable,
# meaning you can use it as you would any other runnable
app = workflow.compile()
assert app.invoke({"messages": HumanMessage(content="what is weather in sf")}) == {
"messages": [
_AnyIdHumanMessage(content="what is weather in sf"),
AIMessage(
id="ai1",
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "query"},
},
],
),
_AnyIdToolMessage(
content="result for query",
name="search_api",
tool_call_id="tool_call123",
),
AIMessage(
id="ai2",
content="",
tool_calls=[
{
"id": "tool_call234",
"name": "search_api",
"args": {"query": "another", "idx": 0},
},
{
"id": "tool_call567",
"name": "search_api",
"args": {"query": "a third one", "idx": 1},
},
],
),
_AnyIdToolMessage(
content="result for another",
name="search_api",
tool_call_id="tool_call234",
),
_AnyIdToolMessage(
content="result for a third one",
name="search_api",
tool_call_id="tool_call567",
),
AIMessage(content="answer", id="ai3"),
]
}
assert [
c
for c in app.stream(
{"messages": [HumanMessage(content="what is weather in sf")]}
)
] == [
{
"agent": {
"messages": AIMessage(
id="ai1",
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "query"},
},
],
)
},
},
{
"tools": {
"messages": _AnyIdToolMessage(
content="result for query",
name="search_api",
tool_call_id="tool_call123",
)
}
},
{
"agent": {
"messages": AIMessage(
id="ai2",
content="",
tool_calls=[
{
"id": "tool_call234",
"name": "search_api",
"args": {"query": "another", "idx": 0},
},
{
"id": "tool_call567",
"name": "search_api",
"args": {"query": "a third one", "idx": 1},
},
],
)
}
},
{
"tools": {
"messages": _AnyIdToolMessage(
content="result for another",
name="search_api",
tool_call_id="tool_call234",
)
},
},
{
"tools": {
"messages": _AnyIdToolMessage(
content="result for a third one",
name="search_api",
tool_call_id="tool_call567",
),
},
},
{"agent": {"messages": AIMessage(content="answer", id="ai3")}},
]
# interrupt after agent
app_w_interrupt = workflow.compile(
checkpointer=checkpointer,
interrupt_after=["agent"],
)
config = {"configurable": {"thread_id": "1"}}
assert [
c
for c in app_w_interrupt.stream(
{"messages": HumanMessage(content="what is weather in sf")}, config
)
] == [
{
"agent": {
"messages": AIMessage(
id="ai1",
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "query"},
},
],
)
}
},
{"__interrupt__": ()},
]
if not FF_SEND_V2:
return
assert app_w_interrupt.get_state(config) == StateSnapshot(
values={
"messages": [
_AnyIdHumanMessage(content="what is weather in sf"),
AIMessage(
id="ai1",
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "query"},
},
],
),
]
},
tasks=(
PregelTask(
id=AnyStr(),
name="agent",
path=("__pregel_pull", "agent"),
error=None,
interrupts=(),
state=None,
result={
"messages": AIMessage(
content="",
additional_kwargs={},
response_metadata={},
id="ai1",
tool_calls=[
{
"name": "search_api",
"args": {"query": "query"},
"id": "tool_call123",
"type": "tool_call",
}
],
)
},
),
PregelTask(
AnyStr(), "tools", (PUSH, ("__pregel_pull", "agent"), 2, AnyStr())
),
),
next=("tools",),
config=(app_w_interrupt.checkpointer.get_tuple(config)).config,
created_at=(app_w_interrupt.checkpointer.get_tuple(config)).checkpoint["ts"],
metadata={
"parents": {},
"source": "loop",
"step": 0,
"writes": None,
"thread_id": "1",
},
parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config,
)
# modify ai message
last_message = (app_w_interrupt.get_state(config)).values["messages"][-1]
last_message.tool_calls[0]["args"]["query"] = "a different query"
app_w_interrupt.update_state(
config, {"messages": last_message, "something_extra": "hi there"}
)
# message was replaced instead of appended
assert app_w_interrupt.get_state(config) == StateSnapshot(
values={
"messages": [
_AnyIdHumanMessage(content="what is weather in sf"),
AIMessage(
id="ai1",
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "a different query"},
},
],
),
]
},
tasks=(PregelTask(AnyStr(), "tools", (PUSH, (), 0, AnyStr())),),
next=("tools",),
config=app_w_interrupt.checkpointer.get_tuple(config).config,
created_at=(app_w_interrupt.checkpointer.get_tuple(config)).checkpoint["ts"],
metadata={
"parents": {},
"source": "update",
"step": 1,
"writes": {
"agent": {
"messages": AIMessage(
id="ai1",
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "a different query"},
},
],
),
"something_extra": "hi there",
}
},
"thread_id": "1",
},
parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config,
)
assert [c for c in app_w_interrupt.stream(None, config)] == [
{
"tools": {
"messages": _AnyIdToolMessage(
content="result for a different query",
name="search_api",
tool_call_id="tool_call123",
)
}
},
{
"agent": {
"messages": AIMessage(
id="ai2",
content="",
tool_calls=[
{
"id": "tool_call234",
"name": "search_api",
"args": {"query": "another", "idx": 0},
},
{
"id": "tool_call567",
"name": "search_api",
"args": {"query": "a third one", "idx": 1},
},
],
)
},
},
{"__interrupt__": ()},
]
assert app_w_interrupt.get_state(config) == StateSnapshot(
values={
"messages": [
_AnyIdHumanMessage(content="what is weather in sf"),
AIMessage(
id="ai1",
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "a different query"},
},
],
),
_AnyIdToolMessage(
content="result for a different query",
name="search_api",
tool_call_id="tool_call123",
),
AIMessage(
id="ai2",
content="",
tool_calls=[
{
"id": "tool_call234",
"name": "search_api",
"args": {"query": "another", "idx": 0},
},
{
"id": "tool_call567",
"name": "search_api",
"args": {"query": "a third one", "idx": 1},
},
],
),
]
},
tasks=(
PregelTask(
id=AnyStr(),
name="agent",
path=("__pregel_pull", "agent"),
error=None,
interrupts=(),
state=None,
result={
"messages": AIMessage(
"",
id="ai2",
tool_calls=[
{
"name": "search_api",
"args": {"query": "another", "idx": 0},
"id": "tool_call234",
"type": "tool_call",
},
{
"name": "search_api",
"args": {"query": "a third one", "idx": 1},
"id": "tool_call567",
"type": "tool_call",
},
],
)
},
),
PregelTask(
AnyStr(), "tools", (PUSH, ("__pregel_pull", "agent"), 2, AnyStr())
),
PregelTask(
AnyStr(), "tools", (PUSH, ("__pregel_pull", "agent"), 3, AnyStr())
),
),
next=("tools", "tools"),
config=app_w_interrupt.checkpointer.get_tuple(config).config,
created_at=(app_w_interrupt.checkpointer.get_tuple(config)).checkpoint["ts"],
metadata={
"parents": {},
"source": "loop",
"step": 2,
"writes": {
"tools": {
"messages": _AnyIdToolMessage(
content="result for a different query",
name="search_api",
tool_call_id="tool_call123",
),
},
},
"thread_id": "1",
},
parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config,
)
app_w_interrupt.update_state(
config,
{
"messages": AIMessage(content="answer", id="ai2"),
"something_extra": "hi there",
},
)
# replaces message even if object identity is different, as long as id is the same
assert app_w_interrupt.get_state(config) == StateSnapshot(
values={
"messages": [
_AnyIdHumanMessage(content="what is weather in sf"),
AIMessage(
id="ai1",
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "a different query"},
},
],
),
_AnyIdToolMessage(
content="result for a different query",
name="search_api",
tool_call_id="tool_call123",
),
AIMessage(content="answer", id="ai2"),
]
},
tasks=(),
next=(),
config=app_w_interrupt.checkpointer.get_tuple(config).config,
created_at=(app_w_interrupt.checkpointer.get_tuple(config)).checkpoint["ts"],
metadata={
"parents": {},
"source": "update",
"step": 3,
"writes": {
"agent": {
"messages": AIMessage(content="answer", id="ai2"),
"something_extra": "hi there",
}
},
"thread_id": "1",
},
parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config,
)
# interrupt before tools
app_w_interrupt = workflow.compile(
checkpointer=checkpointer,
interrupt_before=["tools"],
)
config = {"configurable": {"thread_id": "2"}}
model.i = 0
assert [
c
for c in app_w_interrupt.stream(
{"messages": HumanMessage(content="what is weather in sf")}, config
)
] == [
{
"agent": {
"messages": AIMessage(
id="ai1",
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "query"},
},
],
)
}
},
{"__interrupt__": ()},
]
assert app_w_interrupt.get_state(config) == StateSnapshot(
values={
"messages": [
_AnyIdHumanMessage(content="what is weather in sf"),
AIMessage(
id="ai1",
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "query"},
},
],
),
]
},
tasks=(
PregelTask(
id=AnyStr(),
name="agent",
path=("__pregel_pull", "agent"),
error=None,
interrupts=(),
state=None,
result={
"messages": AIMessage(
"",
id="ai1",
tool_calls=[
{
"name": "search_api",
"args": {"query": "query"},
"id": "tool_call123",
"type": "tool_call",
}
],
)
},
),
PregelTask(
AnyStr(), "tools", (PUSH, ("__pregel_pull", "agent"), 2, AnyStr())
),
),
next=("tools",),
config=(app_w_interrupt.checkpointer.get_tuple(config)).config,
created_at=(app_w_interrupt.checkpointer.get_tuple(config)).checkpoint["ts"],
metadata={
"parents": {},
"source": "loop",
"step": 0,
"writes": None,
"thread_id": "2",
},
parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config,
)
# modify ai message
last_message = (app_w_interrupt.get_state(config)).values["messages"][-1]
last_message.tool_calls[0]["args"]["query"] = "a different query"
app_w_interrupt.update_state(
config, {"messages": last_message, "something_extra": "hi there"}
)
# message was replaced instead of appended
assert app_w_interrupt.get_state(config) == StateSnapshot(
values={
"messages": [
_AnyIdHumanMessage(content="what is weather in sf"),
AIMessage(
id="ai1",
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "a different query"},
},
],
),
]
},
tasks=(PregelTask(AnyStr(), "tools", (PUSH, (), 0, AnyStr())),),
next=("tools",),
config=app_w_interrupt.checkpointer.get_tuple(config).config,
created_at=(app_w_interrupt.checkpointer.get_tuple(config)).checkpoint["ts"],
metadata={
"parents": {},
"source": "update",
"step": 1,
"writes": {
"agent": {
"messages": AIMessage(
id="ai1",
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "a different query"},
},
],
),
"something_extra": "hi there",
}
},
"thread_id": "2",
},
parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config,
)
assert [c for c in app_w_interrupt.stream(None, config)] == [
{
"tools": {
"messages": _AnyIdToolMessage(
content="result for a different query",
name="search_api",
tool_call_id="tool_call123",
)
}
},
{
"agent": {
"messages": AIMessage(
id="ai2",
content="",
tool_calls=[
{
"id": "tool_call234",
"name": "search_api",
"args": {"query": "another", "idx": 0},
},
{
"id": "tool_call567",
"name": "search_api",
"args": {"query": "a third one", "idx": 1},
},
],
)
},
},
{"__interrupt__": ()},
]
assert app_w_interrupt.get_state(config) == StateSnapshot(
values={
"messages": [
_AnyIdHumanMessage(content="what is weather in sf"),
AIMessage(
id="ai1",
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "a different query"},
},
],
),
_AnyIdToolMessage(
content="result for a different query",
name="search_api",
tool_call_id="tool_call123",
),
AIMessage(
id="ai2",
content="",
tool_calls=[
{
"id": "tool_call234",
"name": "search_api",
"args": {"query": "another", "idx": 0},
},
{
"id": "tool_call567",
"name": "search_api",
"args": {"query": "a third one", "idx": 1},
},
],
),
]
},
tasks=(
PregelTask(
id=AnyStr(),
name="agent",
path=("__pregel_pull", "agent"),
error=None,
interrupts=(),
state=None,
result={
"messages": AIMessage(
"",
id="ai2",
tool_calls=[
{
"name": "search_api",
"args": {"query": "another", "idx": 0},
"id": "tool_call234",
"type": "tool_call",
},
{
"name": "search_api",
"args": {"query": "a third one", "idx": 1},
"id": "tool_call567",
"type": "tool_call",
},
],
)
},
),
PregelTask(
AnyStr(), "tools", (PUSH, ("__pregel_pull", "agent"), 2, AnyStr())
),
PregelTask(
AnyStr(), "tools", (PUSH, ("__pregel_pull", "agent"), 3, AnyStr())
),
),
next=("tools", "tools"),
config=app_w_interrupt.checkpointer.get_tuple(config).config,
created_at=(app_w_interrupt.checkpointer.get_tuple(config)).checkpoint["ts"],
metadata={
"parents": {},
"source": "loop",
"step": 2,
"writes": {
"tools": {
"messages": _AnyIdToolMessage(
content="result for a different query",
name="search_api",
tool_call_id="tool_call123",
),
},
},
"thread_id": "2",
},
parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config,
)
app_w_interrupt.update_state(
config,
{
"messages": AIMessage(content="answer", id="ai2"),
"something_extra": "hi there",
},
)
# replaces message even if object identity is different, as long as id is the same
assert app_w_interrupt.get_state(config) == StateSnapshot(
values={
"messages": [
_AnyIdHumanMessage(content="what is weather in sf"),
AIMessage(
id="ai1",
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "a different query"},
},
],
),
_AnyIdToolMessage(
content="result for a different query",
name="search_api",
tool_call_id="tool_call123",
),
AIMessage(content="answer", id="ai2"),
]
},
tasks=(),
next=(),
config=app_w_interrupt.checkpointer.get_tuple(config).config,
created_at=(app_w_interrupt.checkpointer.get_tuple(config)).checkpoint["ts"],
metadata={
"parents": {},
"source": "update",
"step": 3,
"writes": {
"agent": {
"messages": AIMessage(content="answer", id="ai2"),
"something_extra": "hi there",
}
},
"thread_id": "2",
},
parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config,
)
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC)
def test_message_graph(
snapshot: SnapshotAssertion,
deterministic_uuids: MockerFixture,
request: pytest.FixtureRequest,
checkpointer_name: str,
) -> None:
from copy import deepcopy
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.fake_chat_models import (
FakeMessagesListChatModel,
)
from langchain_core.messages import AIMessage, BaseMessage, HumanMessage
from langchain_core.outputs import ChatGeneration, ChatResult
from langchain_core.tools import tool
checkpointer: BaseCheckpointSaver = request.getfixturevalue(
f"checkpointer_{checkpointer_name}"
)
class FakeFuntionChatModel(FakeMessagesListChatModel):
def bind_functions(self, functions: list):
return self
def _generate(
self,
messages: list[BaseMessage],
stop: Optional[list[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
response = deepcopy(self.responses[self.i])
if self.i < len(self.responses) - 1:
self.i += 1
else:
self.i = 0
generation = ChatGeneration(message=response)
return ChatResult(generations=[generation])
@tool()
def search_api(query: str) -> str:
"""Searches the API for the query."""
return f"result for {query}"
tools = [search_api]
model = FakeFuntionChatModel(
responses=[
AIMessage(
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "query"},
}
],
id="ai1",
),
AIMessage(
content="",
tool_calls=[
{
"id": "tool_call456",
"name": "search_api",
"args": {"query": "another"},
}
],
id="ai2",
),
AIMessage(content="answer", id="ai3"),
]
)
# Define the function that determines whether to continue or not
def should_continue(messages):
last_message = messages[-1]
# If there is no function call, then we finish
if not last_message.tool_calls:
return "end"
# Otherwise if there is, we continue
else:
return "continue"
# Define a new graph
workflow = MessageGraph()
# Define the two nodes we will cycle between
workflow.add_node("agent", model)
workflow.add_node("tools", ToolNode(tools))
# Set the entrypoint as `agent`
# This means that this node is the first one called
workflow.set_entry_point("agent")
# We now add a conditional edge
workflow.add_conditional_edges(
# First, we define the start node. We use `agent`.
# This means these are the edges taken after the `agent` node is called.
"agent",
# Next, we pass in the function that will determine which node is called next.
should_continue,
# Finally we pass in a mapping.
# The keys are strings, and the values are other nodes.
# END is a special node marking that the graph should finish.
# What will happen is we will call `should_continue`, and then the output of that
# will be matched against the keys in this mapping.
# Based on which one it matches, that node will then be called.
{
# If `tools`, then we call the tool node.
"continue": "tools",
# Otherwise we finish.
"end": END,
},
)
# We now add a normal edge from `tools` to `agent`.
# This means that after `tools` is called, `agent` node is called next.
workflow.add_edge("tools", "agent")
# Finally, we compile it!
# This compiles it into a LangChain Runnable,
# meaning you can use it as you would any other runnable
app = workflow.compile()
if SHOULD_CHECK_SNAPSHOTS:
assert json.dumps(app.get_input_schema().model_json_schema()) == snapshot
assert json.dumps(app.get_output_schema().model_json_schema()) == snapshot
assert json.dumps(app.get_graph().to_json(), indent=2) == snapshot
assert app.get_graph().draw_mermaid(with_styles=False) == snapshot
assert app.invoke(HumanMessage(content="what is weather in sf")) == [
_AnyIdHumanMessage(
content="what is weather in sf",
),
AIMessage(
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "query"},
}
],
id="ai1", # respects ids passed in
),
_AnyIdToolMessage(
content="result for query",
name="search_api",
tool_call_id="tool_call123",
),
AIMessage(
content="",
tool_calls=[
{
"id": "tool_call456",
"name": "search_api",
"args": {"query": "another"},
}
],
id="ai2",
),
_AnyIdToolMessage(
content="result for another",
name="search_api",
tool_call_id="tool_call456",
),
AIMessage(content="answer", id="ai3"),
]
assert [*app.stream([HumanMessage(content="what is weather in sf")])] == [
{
"agent": AIMessage(
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "query"},
}
],
id="ai1",
)
},
{
"tools": [
_AnyIdToolMessage(
content="result for query",
name="search_api",
tool_call_id="tool_call123",
)
]
},
{
"agent": AIMessage(
content="",
tool_calls=[
{
"id": "tool_call456",
"name": "search_api",
"args": {"query": "another"},
}
],
id="ai2",
)
},
{
"tools": [
_AnyIdToolMessage(
content="result for another",
name="search_api",
tool_call_id="tool_call456",
)
]
},
{"agent": AIMessage(content="answer", id="ai3")},
]
app_w_interrupt = workflow.compile(
checkpointer=checkpointer,
interrupt_after=["agent"],
)
config = {"configurable": {"thread_id": "1"}}
assert [
c for c in app_w_interrupt.stream(("human", "what is weather in sf"), config)
] == [
{
"agent": AIMessage(
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "query"},
}
],
id="ai1",
)
},
{"__interrupt__": ()},
]
assert app_w_interrupt.get_state(config) == StateSnapshot(
values=[
_AnyIdHumanMessage(content="what is weather in sf"),
AIMessage(
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "query"},
}
],
id="ai1",
),
],
tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),),
next=("tools",),
config=app_w_interrupt.checkpointer.get_tuple(config).config,
created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"],
metadata={
"parents": {},
"source": "loop",
"step": 1,
"writes": {
"agent": AIMessage(
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "query"},
}
],
id="ai1",
)
},
"thread_id": "1",
},
parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config,
)
# modify ai message
last_message = app_w_interrupt.get_state(config).values[-1]
last_message.tool_calls[0]["args"] = {"query": "a different query"}
next_config = app_w_interrupt.update_state(config, last_message)
# message was replaced instead of appended
assert app_w_interrupt.get_state(config) == StateSnapshot(
values=[
_AnyIdHumanMessage(content="what is weather in sf"),
AIMessage(
content="",
id="ai1",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "a different query"},
}
],
),
],
tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),),
next=("tools",),
config=next_config,
created_at=AnyStr(),
metadata={
"parents": {},
"source": "update",
"step": 2,
"writes": {
"agent": AIMessage(
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "a different query"},
}
],
id="ai1",
)
},
"thread_id": "1",
},
parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config,
)
assert [c for c in app_w_interrupt.stream(None, config)] == [
{
"tools": [
_AnyIdToolMessage(
content="result for a different query",
name="search_api",
tool_call_id="tool_call123",
)
]
},
{
"agent": AIMessage(
content="",
tool_calls=[
{
"id": "tool_call456",
"name": "search_api",
"args": {"query": "another"},
}
],
id="ai2",
)
},
{"__interrupt__": ()},
]
assert app_w_interrupt.get_state(config) == StateSnapshot(
values=[
_AnyIdHumanMessage(content="what is weather in sf"),
AIMessage(
content="",
id="ai1",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "a different query"},
}
],
),
_AnyIdToolMessage(
content="result for a different query",
name="search_api",
tool_call_id="tool_call123",
),
AIMessage(
content="",
tool_calls=[
{
"id": "tool_call456",
"name": "search_api",
"args": {"query": "another"},
}
],
id="ai2",
),
],
tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),),
next=("tools",),
config=app_w_interrupt.checkpointer.get_tuple(config).config,
created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"],
metadata={
"parents": {},
"source": "loop",
"step": 4,
"writes": {
"agent": AIMessage(
content="",
tool_calls=[
{
"id": "tool_call456",
"name": "search_api",
"args": {"query": "another"},
}
],
id="ai2",
)
},
"thread_id": "1",
},
parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config,
)
app_w_interrupt.update_state(
config,
AIMessage(content="answer", id="ai2"), # replace existing message
)
# replaces message even if object identity is different, as long as id is the same
assert app_w_interrupt.get_state(config) == StateSnapshot(
values=[
_AnyIdHumanMessage(content="what is weather in sf"),
AIMessage(
content="",
id="ai1",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "a different query"},
}
],
),
_AnyIdToolMessage(
content="result for a different query",
name="search_api",
tool_call_id="tool_call123",
),
AIMessage(content="answer", id="ai2"),
],
tasks=(),
next=(),
config=app_w_interrupt.checkpointer.get_tuple(config).config,
created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"],
metadata={
"parents": {},
"source": "update",
"step": 5,
"writes": {"agent": AIMessage(content="answer", id="ai2")},
"thread_id": "1",
},
parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config,
)
app_w_interrupt = workflow.compile(
checkpointer=checkpointer,
interrupt_before=["tools"],
)
config = {"configurable": {"thread_id": "2"}}
model.i = 0 # reset the llm
assert [c for c in app_w_interrupt.stream("what is weather in sf", config)] == [
{
"agent": AIMessage(
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "query"},
}
],
id="ai1",
)
},
{"__interrupt__": ()},
]
assert app_w_interrupt.get_state(config) == StateSnapshot(
values=[
_AnyIdHumanMessage(content="what is weather in sf"),
AIMessage(
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "query"},
}
],
id="ai1",
),
],
tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),),
next=("tools",),
config=app_w_interrupt.checkpointer.get_tuple(config).config,
created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"],
metadata={
"parents": {},
"source": "loop",
"step": 1,
"writes": {
"agent": AIMessage(
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "query"},
}
],
id="ai1",
)
},
"thread_id": "2",
},
parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config,
)
# modify ai message
last_message = app_w_interrupt.get_state(config).values[-1]
last_message.tool_calls[0]["args"] = {"query": "a different query"}
app_w_interrupt.update_state(config, last_message)
# message was replaced instead of appended
assert app_w_interrupt.get_state(config) == StateSnapshot(
values=[
_AnyIdHumanMessage(content="what is weather in sf"),
AIMessage(
content="",
id="ai1",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "a different query"},
}
],
),
],
tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),),
next=("tools",),
config=app_w_interrupt.checkpointer.get_tuple(config).config,
created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"],
metadata={
"parents": {},
"source": "update",
"step": 2,
"writes": {
"agent": AIMessage(
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "a different query"},
}
],
id="ai1",
)
},
"thread_id": "2",
},
parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config,
)
assert [c for c in app_w_interrupt.stream(None, config)] == [
{
"tools": [
_AnyIdToolMessage(
content="result for a different query",
name="search_api",
tool_call_id="tool_call123",
)
]
},
{
"agent": AIMessage(
content="",
tool_calls=[
{
"id": "tool_call456",
"name": "search_api",
"args": {"query": "another"},
}
],
id="ai2",
)
},
{"__interrupt__": ()},
]
assert app_w_interrupt.get_state(config) == StateSnapshot(
values=[
_AnyIdHumanMessage(content="what is weather in sf"),
AIMessage(
content="",
id="ai1",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "a different query"},
}
],
),
_AnyIdToolMessage(
content="result for a different query",
name="search_api",
tool_call_id="tool_call123",
),
AIMessage(
content="",
tool_calls=[
{
"id": "tool_call456",
"name": "search_api",
"args": {"query": "another"},
}
],
id="ai2",
),
],
tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),),
next=("tools",),
config=app_w_interrupt.checkpointer.get_tuple(config).config,
created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"],
metadata={
"parents": {},
"source": "loop",
"step": 4,
"writes": {
"agent": AIMessage(
content="",
tool_calls=[
{
"id": "tool_call456",
"name": "search_api",
"args": {"query": "another"},
}
],
id="ai2",
)
},
"thread_id": "2",
},
parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config,
)
app_w_interrupt.update_state(
config,
AIMessage(content="answer", id="ai2"),
)
# replaces message even if object identity is different, as long as id is the same
assert app_w_interrupt.get_state(config) == StateSnapshot(
values=[
_AnyIdHumanMessage(content="what is weather in sf"),
AIMessage(
content="",
id="ai1",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "a different query"},
}
],
),
_AnyIdToolMessage(
content="result for a different query",
name="search_api",
tool_call_id="tool_call123",
id=AnyStr(),
),
AIMessage(content="answer", id="ai2"),
],
tasks=(),
next=(),
config=app_w_interrupt.checkpointer.get_tuple(config).config,
created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"],
metadata={
"parents": {},
"source": "update",
"step": 5,
"writes": {"agent": AIMessage(content="answer", id="ai2")},
"thread_id": "2",
},
parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config,
)
# add an extra message as if it came from "tools" node
app_w_interrupt.update_state(config, ("ai", "an extra message"), as_node="tools")
# extra message is coerced BaseMessge and appended
# now the next node is "agent" per the graph edges
assert app_w_interrupt.get_state(config) == StateSnapshot(
values=[
_AnyIdHumanMessage(content="what is weather in sf"),
AIMessage(
content="",
id="ai1",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "a different query"},
}
],
),
_AnyIdToolMessage(
content="result for a different query",
name="search_api",
tool_call_id="tool_call123",
id=AnyStr(),
),
AIMessage(content="answer", id="ai2"),
_AnyIdAIMessage(content="an extra message"),
],
tasks=(PregelTask(AnyStr(), "agent", (PULL, "agent")),),
next=("agent",),
config=app_w_interrupt.checkpointer.get_tuple(config).config,
created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"],
metadata={
"parents": {},
"source": "update",
"step": 6,
"writes": {"tools": UnsortedSequence("ai", "an extra message")},
"thread_id": "2",
},
parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config,
)
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC)
def test_root_graph(
deterministic_uuids: MockerFixture,
request: pytest.FixtureRequest,
checkpointer_name: str,
) -> None:
from copy import deepcopy
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.fake_chat_models import (
FakeMessagesListChatModel,
)
from langchain_core.messages import (
AIMessage,
BaseMessage,
HumanMessage,
ToolMessage,
)
from langchain_core.outputs import ChatGeneration, ChatResult
from langchain_core.tools import tool
checkpointer: BaseCheckpointSaver = request.getfixturevalue(
f"checkpointer_{checkpointer_name}"
)
class FakeFuntionChatModel(FakeMessagesListChatModel):
def bind_functions(self, functions: list):
return self
def _generate(
self,
messages: list[BaseMessage],
stop: Optional[list[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
response = deepcopy(self.responses[self.i])
if self.i < len(self.responses) - 1:
self.i += 1
else:
self.i = 0
generation = ChatGeneration(message=response)
return ChatResult(generations=[generation])
@tool()
def search_api(query: str) -> str:
"""Searches the API for the query."""
return f"result for {query}"
tools = [search_api]
model = FakeFuntionChatModel(
responses=[
AIMessage(
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "query"},
}
],
id="ai1",
),
AIMessage(
content="",
tool_calls=[
{
"id": "tool_call456",
"name": "search_api",
"args": {"query": "another"},
}
],
id="ai2",
),
AIMessage(content="answer", id="ai3"),
]
)
# Define the function that determines whether to continue or not
def should_continue(messages):
last_message = messages[-1]
# If there is no function call, then we finish
if not last_message.tool_calls:
return "end"
# Otherwise if there is, we continue
else:
return "continue"
class State(TypedDict):
__root__: Annotated[list[BaseMessage], add_messages]
# Define a new graph
workflow = StateGraph(State)
# Define the two nodes we will cycle between
workflow.add_node("agent", model)
workflow.add_node("tools", ToolNode(tools))
# Set the entrypoint as `agent`
# This means that this node is the first one called
workflow.set_entry_point("agent")
# We now add a conditional edge
workflow.add_conditional_edges(
# First, we define the start node. We use `agent`.
# This means these are the edges taken after the `agent` node is called.
"agent",
# Next, we pass in the function that will determine which node is called next.
should_continue,
# Finally we pass in a mapping.
# The keys are strings, and the values are other nodes.
# END is a special node marking that the graph should finish.
# What will happen is we will call `should_continue`, and then the output of that
# will be matched against the keys in this mapping.
# Based on which one it matches, that node will then be called.
{
# If `tools`, then we call the tool node.
"continue": "tools",
# Otherwise we finish.
"end": END,
},
)
# We now add a normal edge from `tools` to `agent`.
# This means that after `tools` is called, `agent` node is called next.
workflow.add_edge("tools", "agent")
# Finally, we compile it!
# This compiles it into a LangChain Runnable,
# meaning you can use it as you would any other runnable
app = workflow.compile()
assert app.invoke(HumanMessage(content="what is weather in sf")) == [
_AnyIdHumanMessage(
content="what is weather in sf",
),
AIMessage(
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "query"},
}
],
id="ai1", # respects ids passed in
),
_AnyIdToolMessage(
content="result for query",
name="search_api",
tool_call_id="tool_call123",
),
AIMessage(
content="",
tool_calls=[
{
"id": "tool_call456",
"name": "search_api",
"args": {"query": "another"},
}
],
id="ai2",
),
_AnyIdToolMessage(
content="result for another",
name="search_api",
tool_call_id="tool_call456",
),
AIMessage(content="answer", id="ai3"),
]
assert [*app.stream([HumanMessage(content="what is weather in sf")])] == [
{
"agent": AIMessage(
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "query"},
}
],
id="ai1",
)
},
{
"tools": [
ToolMessage(
content="result for query",
name="search_api",
tool_call_id="tool_call123",
id="00000000-0000-4000-8000-000000000033",
)
]
},
{
"agent": AIMessage(
content="",
tool_calls=[
{
"id": "tool_call456",
"name": "search_api",
"args": {"query": "another"},
}
],
id="ai2",
)
},
{
"tools": [
ToolMessage(
content="result for another",
name="search_api",
tool_call_id="tool_call456",
id="00000000-0000-4000-8000-000000000041",
)
]
},
{"agent": AIMessage(content="answer", id="ai3")},
]
app_w_interrupt = workflow.compile(
checkpointer=checkpointer,
interrupt_after=["agent"],
)
config = {"configurable": {"thread_id": "1"}}
assert [
c for c in app_w_interrupt.stream(("human", "what is weather in sf"), config)
] == [
{
"agent": AIMessage(
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "query"},
}
],
id="ai1",
)
},
{"__interrupt__": ()},
]
assert app_w_interrupt.get_state(config) == StateSnapshot(
values=[
_AnyIdHumanMessage(content="what is weather in sf"),
AIMessage(
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "query"},
}
],
id="ai1",
),
],
tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),),
next=("tools",),
config=app_w_interrupt.checkpointer.get_tuple(config).config,
created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"],
metadata={
"parents": {},
"source": "loop",
"step": 1,
"writes": {
"agent": AIMessage(
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "query"},
}
],
id="ai1",
)
},
"thread_id": "1",
},
parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config,
)
# modify ai message
last_message = app_w_interrupt.get_state(config).values[-1]
last_message.tool_calls[0]["args"] = {"query": "a different query"}
next_config = app_w_interrupt.update_state(config, last_message)
# message was replaced instead of appended
assert app_w_interrupt.get_state(config) == StateSnapshot(
values=[
_AnyIdHumanMessage(content="what is weather in sf"),
AIMessage(
content="",
id="ai1",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "a different query"},
}
],
),
],
tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),),
next=("tools",),
config=next_config,
created_at=AnyStr(),
metadata={
"parents": {},
"source": "update",
"step": 2,
"writes": {
"agent": AIMessage(
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "a different query"},
}
],
id="ai1",
)
},
"thread_id": "1",
},
parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config,
)
assert [c for c in app_w_interrupt.stream(None, config)] == [
{
"tools": [
_AnyIdToolMessage(
content="result for a different query",
name="search_api",
tool_call_id="tool_call123",
)
]
},
{
"agent": AIMessage(
content="",
tool_calls=[
{
"id": "tool_call456",
"name": "search_api",
"args": {"query": "another"},
}
],
id="ai2",
)
},
{"__interrupt__": ()},
]
assert app_w_interrupt.get_state(config) == StateSnapshot(
values=[
_AnyIdHumanMessage(content="what is weather in sf"),
AIMessage(
content="",
id="ai1",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "a different query"},
}
],
),
_AnyIdToolMessage(
content="result for a different query",
name="search_api",
tool_call_id="tool_call123",
id=AnyStr(),
),
AIMessage(
content="",
tool_calls=[
{
"id": "tool_call456",
"name": "search_api",
"args": {"query": "another"},
}
],
id="ai2",
),
],
tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),),
next=("tools",),
config=app_w_interrupt.checkpointer.get_tuple(config).config,
created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"],
metadata={
"parents": {},
"source": "loop",
"step": 4,
"writes": {
"agent": AIMessage(
content="",
tool_calls=[
{
"id": "tool_call456",
"name": "search_api",
"args": {"query": "another"},
}
],
id="ai2",
)
},
"thread_id": "1",
},
parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config,
)
app_w_interrupt.update_state(
config,
AIMessage(content="answer", id="ai2"), # replace existing message
)
# replaces message even if object identity is different, as long as id is the same
assert app_w_interrupt.get_state(config) == StateSnapshot(
values=[
_AnyIdHumanMessage(content="what is weather in sf"),
AIMessage(
content="",
id="ai1",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "a different query"},
}
],
),
_AnyIdToolMessage(
content="result for a different query",
name="search_api",
tool_call_id="tool_call123",
id=AnyStr(),
),
AIMessage(content="answer", id="ai2"),
],
tasks=(),
next=(),
config=app_w_interrupt.checkpointer.get_tuple(config).config,
created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"],
metadata={
"parents": {},
"source": "update",
"step": 5,
"writes": {"agent": AIMessage(content="answer", id="ai2")},
"thread_id": "1",
},
parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config,
)
app_w_interrupt = workflow.compile(
checkpointer=checkpointer,
interrupt_before=["tools"],
)
config = {"configurable": {"thread_id": "2"}}
model.i = 0 # reset the llm
assert [c for c in app_w_interrupt.stream("what is weather in sf", config)] == [
{
"agent": AIMessage(
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "query"},
}
],
id="ai1",
)
},
{"__interrupt__": ()},
]
assert app_w_interrupt.get_state(config) == StateSnapshot(
values=[
_AnyIdHumanMessage(content="what is weather in sf"),
AIMessage(
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "query"},
}
],
id="ai1",
),
],
tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),),
next=("tools",),
config=app_w_interrupt.checkpointer.get_tuple(config).config,
created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"],
metadata={
"parents": {},
"source": "loop",
"step": 1,
"writes": {
"agent": AIMessage(
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "query"},
}
],
id="ai1",
)
},
"thread_id": "2",
},
parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config,
)
# modify ai message
last_message = app_w_interrupt.get_state(config).values[-1]
last_message.tool_calls[0]["args"] = {"query": "a different query"}
app_w_interrupt.update_state(config, last_message)
# message was replaced instead of appended
assert app_w_interrupt.get_state(config) == StateSnapshot(
values=[
_AnyIdHumanMessage(content="what is weather in sf"),
AIMessage(
content="",
id="ai1",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "a different query"},
}
],
),
],
tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),),
next=("tools",),
config=app_w_interrupt.checkpointer.get_tuple(config).config,
created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"],
metadata={
"parents": {},
"source": "update",
"step": 2,
"writes": {
"agent": AIMessage(
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "a different query"},
}
],
id="ai1",
)
},
"thread_id": "2",
},
parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config,
)
assert [c for c in app_w_interrupt.stream(None, config)] == [
{
"tools": [
_AnyIdToolMessage(
content="result for a different query",
name="search_api",
tool_call_id="tool_call123",
)
]
},
{
"agent": AIMessage(
content="",
tool_calls=[
{
"id": "tool_call456",
"name": "search_api",
"args": {"query": "another"},
}
],
id="ai2",
)
},
{"__interrupt__": ()},
]
assert app_w_interrupt.get_state(config) == StateSnapshot(
values=[
_AnyIdHumanMessage(content="what is weather in sf"),
AIMessage(
content="",
id="ai1",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "a different query"},
}
],
),
_AnyIdToolMessage(
content="result for a different query",
name="search_api",
tool_call_id="tool_call123",
id=AnyStr(),
),
AIMessage(
content="",
tool_calls=[
{
"id": "tool_call456",
"name": "search_api",
"args": {"query": "another"},
}
],
id="ai2",
),
],
tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),),
next=("tools",),
config=app_w_interrupt.checkpointer.get_tuple(config).config,
created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"],
metadata={
"parents": {},
"source": "loop",
"step": 4,
"writes": {
"agent": AIMessage(
content="",
tool_calls=[
{
"id": "tool_call456",
"name": "search_api",
"args": {"query": "another"},
}
],
id="ai2",
)
},
"thread_id": "2",
},
parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config,
)
app_w_interrupt.update_state(
config,
AIMessage(content="answer", id="ai2"),
)
# replaces message even if object identity is different, as long as id is the same
assert app_w_interrupt.get_state(config) == StateSnapshot(
values=[
_AnyIdHumanMessage(content="what is weather in sf"),
AIMessage(
content="",
id="ai1",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "a different query"},
}
],
),
_AnyIdToolMessage(
content="result for a different query",
name="search_api",
tool_call_id="tool_call123",
),
AIMessage(content="answer", id="ai2"),
],
tasks=(),
next=(),
config=app_w_interrupt.checkpointer.get_tuple(config).config,
created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"],
metadata={
"parents": {},
"source": "update",
"step": 5,
"writes": {"agent": AIMessage(content="answer", id="ai2")},
"thread_id": "2",
},
parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config,
)
# add an extra message as if it came from "tools" node
app_w_interrupt.update_state(config, ("ai", "an extra message"), as_node="tools")
# extra message is coerced BaseMessge and appended
# now the next node is "agent" per the graph edges
assert app_w_interrupt.get_state(config) == StateSnapshot(
values=[
_AnyIdHumanMessage(content="what is weather in sf"),
AIMessage(
content="",
id="ai1",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "a different query"},
}
],
),
_AnyIdToolMessage(
content="result for a different query",
name="search_api",
tool_call_id="tool_call123",
id=AnyStr(),
),
AIMessage(content="answer", id="ai2"),
_AnyIdAIMessage(content="an extra message"),
],
tasks=(PregelTask(AnyStr(), "agent", (PULL, "agent")),),
next=("agent",),
config=app_w_interrupt.checkpointer.get_tuple(config).config,
created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"],
metadata={
"parents": {},
"source": "update",
"step": 6,
"writes": {"tools": UnsortedSequence("ai", "an extra message")},
"thread_id": "2",
},
parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config,
)
# create new graph with one more state key, reuse previous thread history
def simple_add(left, right):
if not isinstance(right, list):
right = [right]
return left + right
class MoreState(TypedDict):
__root__: Annotated[list[BaseMessage], simple_add]
something_else: str
# Define a new graph
new_workflow = StateGraph(MoreState)
new_workflow.add_node(
"agent", RunnableMap(__root__=RunnablePick("__root__") | model)
)
new_workflow.add_node(
"tools", RunnableMap(__root__=RunnablePick("__root__") | ToolNode(tools))
)
new_workflow.set_entry_point("agent")
new_workflow.add_conditional_edges(
"agent",
RunnablePick("__root__") | should_continue,
{
# If `tools`, then we call the tool node.
"continue": "tools",
# Otherwise we finish.
"end": END,
},
)
new_workflow.add_edge("tools", "agent")
new_app = new_workflow.compile(checkpointer=checkpointer)
model.i = 0 # reset the llm
# previous state is converted to new schema
assert new_app.get_state(config) == StateSnapshot(
values={
"__root__": [
_AnyIdHumanMessage(content="what is weather in sf"),
AIMessage(
content="",
id="ai1",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "a different query"},
}
],
),
_AnyIdToolMessage(
content="result for a different query",
name="search_api",
tool_call_id="tool_call123",
),
AIMessage(content="answer", id="ai2"),
_AnyIdAIMessage(content="an extra message"),
]
},
tasks=(PregelTask(AnyStr(), "agent", (PULL, "agent")),),
next=("agent",),
config=app_w_interrupt.checkpointer.get_tuple(config).config,
created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"],
metadata={
"parents": {},
"source": "update",
"step": 6,
"writes": {"tools": UnsortedSequence("ai", "an extra message")},
"thread_id": "2",
},
parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config,
)
# new input is merged to old state
assert new_app.invoke(
{
"__root__": [HumanMessage(content="what is weather in la")],
"something_else": "value",
},
config,
interrupt_before=["agent"],
) == {
"__root__": [
HumanMessage(
content="what is weather in sf",
id="00000000-0000-4000-8000-000000000070",
),
AIMessage(
content="",
id="ai1",
tool_calls=[
{
"name": "search_api",
"args": {"query": "a different query"},
"id": "tool_call123",
}
],
),
_AnyIdToolMessage(
content="result for a different query",
name="search_api",
tool_call_id="tool_call123",
),
AIMessage(content="answer", id="ai2"),
AIMessage(
content="an extra message", id="00000000-0000-4000-8000-000000000092"
),
HumanMessage(content="what is weather in la"),
],
"something_else": "value",
}
def test_in_one_fan_out_out_one_graph_state() -> None:
def sorted_add(x: list[str], y: list[str]) -> list[str]:
return sorted(operator.add(x, y))
class State(TypedDict, total=False):
query: str
answer: str
docs: Annotated[list[str], sorted_add]
def rewrite_query(data: State) -> State:
return {"query": f'query: {data["query"]}'}
def retriever_one(data: State) -> State:
# timer ensures stream output order is stable
# also, it confirms that the update order is not dependent on finishing order
# instead being defined by the order of the nodes/edges in the graph definition
# ie. stable between invocations
time.sleep(0.1)
return {"docs": ["doc1", "doc2"]}
def retriever_two(data: State) -> State:
return {"docs": ["doc3", "doc4"]}
def qa(data: State) -> State:
return {"answer": ",".join(data["docs"])}
workflow = StateGraph(State)
workflow.add_node("rewrite_query", rewrite_query)
workflow.add_node("retriever_one", retriever_one)
workflow.add_node("retriever_two", retriever_two)
workflow.add_node("qa", qa)
workflow.set_entry_point("rewrite_query")
workflow.add_edge("rewrite_query", "retriever_one")
workflow.add_edge("rewrite_query", "retriever_two")
workflow.add_edge("retriever_one", "qa")
workflow.add_edge("retriever_two", "qa")
workflow.set_finish_point("qa")
app = workflow.compile()
assert app.invoke({"query": "what is weather in sf"}) == {
"query": "query: what is weather in sf",
"docs": ["doc1", "doc2", "doc3", "doc4"],
"answer": "doc1,doc2,doc3,doc4",
}
assert [*app.stream({"query": "what is weather in sf"})] == [
{"rewrite_query": {"query": "query: what is weather in sf"}},
{"retriever_two": {"docs": ["doc3", "doc4"]}},
{"retriever_one": {"docs": ["doc1", "doc2"]}},
{"qa": {"answer": "doc1,doc2,doc3,doc4"}},
]
assert [*app.stream({"query": "what is weather in sf"}, stream_mode="values")] == [
{"query": "what is weather in sf", "docs": []},
{"query": "query: what is weather in sf", "docs": []},
{
"query": "query: what is weather in sf",
"docs": ["doc1", "doc2", "doc3", "doc4"],
},
{
"query": "query: what is weather in sf",
"docs": ["doc1", "doc2", "doc3", "doc4"],
"answer": "doc1,doc2,doc3,doc4",
},
]
assert [
*app.stream(
{"query": "what is weather in sf"},
stream_mode=["values", "updates", "debug"],
)
] == [
("values", {"query": "what is weather in sf", "docs": []}),
(
"debug",
{
"type": "task",
"timestamp": AnyStr(),
"step": 1,
"payload": {
"id": AnyStr(),
"name": "rewrite_query",
"input": {"query": "what is weather in sf", "docs": []},
"triggers": ["start:rewrite_query"],
},
},
),
("updates", {"rewrite_query": {"query": "query: what is weather in sf"}}),
(
"debug",
{
"type": "task_result",
"timestamp": AnyStr(),
"step": 1,
"payload": {
"id": AnyStr(),
"name": "rewrite_query",
"result": [("query", "query: what is weather in sf")],
"error": None,
"interrupts": [],
},
},
),
("values", {"query": "query: what is weather in sf", "docs": []}),
(
"debug",
{
"type": "task",
"timestamp": AnyStr(),
"step": 2,
"payload": {
"id": AnyStr(),
"name": "retriever_one",
"input": {"query": "query: what is weather in sf", "docs": []},
"triggers": ["rewrite_query"],
},
},
),
(
"debug",
{
"type": "task",
"timestamp": AnyStr(),
"step": 2,
"payload": {
"id": AnyStr(),
"name": "retriever_two",
"input": {"query": "query: what is weather in sf", "docs": []},
"triggers": ["rewrite_query"],
},
},
),
(
"updates",
{"retriever_two": {"docs": ["doc3", "doc4"]}},
),
(
"debug",
{
"type": "task_result",
"timestamp": AnyStr(),
"step": 2,
"payload": {
"id": AnyStr(),
"name": "retriever_two",
"result": [("docs", ["doc3", "doc4"])],
"error": None,
"interrupts": [],
},
},
),
(
"updates",
{"retriever_one": {"docs": ["doc1", "doc2"]}},
),
(
"debug",
{
"type": "task_result",
"timestamp": AnyStr(),
"step": 2,
"payload": {
"id": AnyStr(),
"name": "retriever_one",
"result": [("docs", ["doc1", "doc2"])],
"error": None,
"interrupts": [],
},
},
),
(
"values",
{
"query": "query: what is weather in sf",
"docs": ["doc1", "doc2", "doc3", "doc4"],
},
),
(
"debug",
{
"type": "task",
"timestamp": AnyStr(),
"step": 3,
"payload": {
"id": AnyStr(),
"name": "qa",
"input": {
"query": "query: what is weather in sf",
"docs": ["doc1", "doc2", "doc3", "doc4"],
},
"triggers": ["retriever_one", "retriever_two"],
},
},
),
("updates", {"qa": {"answer": "doc1,doc2,doc3,doc4"}}),
(
"debug",
{
"type": "task_result",
"timestamp": AnyStr(),
"step": 3,
"payload": {
"id": AnyStr(),
"name": "qa",
"result": [("answer", "doc1,doc2,doc3,doc4")],
"error": None,
"interrupts": [],
},
},
),
(
"values",
{
"query": "query: what is weather in sf",
"answer": "doc1,doc2,doc3,doc4",
"docs": ["doc1", "doc2", "doc3", "doc4"],
},
),
]
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC)
def test_dynamic_interrupt(
request: pytest.FixtureRequest, checkpointer_name: str
) -> None:
checkpointer = request.getfixturevalue(f"checkpointer_{checkpointer_name}")
class State(TypedDict):
my_key: Annotated[str, operator.add]
market: str
tool_two_node_count = 0
def tool_two_node(s: State) -> State:
nonlocal tool_two_node_count
tool_two_node_count += 1
if s["market"] == "DE":
answer = interrupt("Just because...")
else:
answer = " all good"
return {"my_key": answer}
tool_two_graph = StateGraph(State)
tool_two_graph.add_node("tool_two", tool_two_node, retry=RetryPolicy())
tool_two_graph.add_edge(START, "tool_two")
tool_two = tool_two_graph.compile()
tracer = FakeTracer()
assert tool_two.invoke(
{"my_key": "value", "market": "DE"}, {"callbacks": [tracer]}
) == {
"my_key": "value",
"market": "DE",
}
assert tool_two_node_count == 1, "interrupts aren't retried"
assert len(tracer.runs) == 1
run = tracer.runs[0]
assert run.end_time is not None
assert run.error is None
assert run.outputs == {"market": "DE", "my_key": "value"}
assert tool_two.invoke({"my_key": "value", "market": "US"}) == {
"my_key": "value all good",
"market": "US",
}
tool_two = tool_two_graph.compile(checkpointer=checkpointer)
# missing thread_id
with pytest.raises(ValueError, match="thread_id"):
tool_two.invoke({"my_key": "value", "market": "DE"})
# flow: interrupt -> resume with answer
thread2 = {"configurable": {"thread_id": "2"}}
# stop when about to enter node
assert [
c for c in tool_two.stream({"my_key": "value ⛰️", "market": "DE"}, thread2)
] == [
{
"__interrupt__": (
Interrupt(
value="Just because...",
resumable=True,
ns=[AnyStr("tool_two:")],
),
)
},
]
# resume with answer
assert [c for c in tool_two.stream(Command(resume=" my answer"), thread2)] == [
{"tool_two": {"my_key": " my answer"}},
]
# flow: interrupt -> clear tasks
thread1 = {"configurable": {"thread_id": "1"}}
# stop when about to enter node
assert tool_two.invoke({"my_key": "value ⛰️", "market": "DE"}, thread1) == {
"my_key": "value ⛰️",
"market": "DE",
}
assert [c.metadata for c in tool_two.checkpointer.list(thread1)] == [
{
"parents": {},
"source": "loop",
"step": 0,
"writes": None,
"thread_id": "1",
},
{
"parents": {},
"source": "input",
"step": -1,
"writes": {"__start__": {"my_key": "value ⛰️", "market": "DE"}},
"thread_id": "1",
},
]
assert tool_two.get_state(thread1) == StateSnapshot(
values={"my_key": "value ⛰️", "market": "DE"},
next=("tool_two",),
tasks=(
PregelTask(
AnyStr(),
"tool_two",
(PULL, "tool_two"),
interrupts=(
Interrupt(
value="Just because...",
resumable=True,
ns=[AnyStr("tool_two:")],
),
),
),
),
config=tool_two.checkpointer.get_tuple(thread1).config,
created_at=tool_two.checkpointer.get_tuple(thread1).checkpoint["ts"],
metadata={
"parents": {},
"source": "loop",
"step": 0,
"writes": None,
"thread_id": "1",
},
parent_config=[*tool_two.checkpointer.list(thread1, limit=2)][-1].config,
)
# clear the interrupt and next tasks
tool_two.update_state(thread1, None, as_node=END)
# interrupt and next tasks are cleared
assert tool_two.get_state(thread1) == StateSnapshot(
values={"my_key": "value ⛰️", "market": "DE"},
next=(),
tasks=(),
config=tool_two.checkpointer.get_tuple(thread1).config,
created_at=tool_two.checkpointer.get_tuple(thread1).checkpoint["ts"],
metadata={
"parents": {},
"source": "update",
"step": 1,
"writes": {},
"thread_id": "1",
},
parent_config=[*tool_two.checkpointer.list(thread1, limit=2)][-1].config,
)
@pytest.mark.skipif(not FF_SEND_V2, reason="send v2 is not enabled")
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC)
def test_copy_checkpoint(
request: pytest.FixtureRequest, checkpointer_name: str
) -> None:
checkpointer = request.getfixturevalue(f"checkpointer_{checkpointer_name}")
class State(TypedDict):
my_key: Annotated[str, operator.add]
market: str
def tool_one(s: State) -> State:
return {"my_key": " one"}
tool_two_node_count = 0
def tool_two_node(s: State) -> State:
nonlocal tool_two_node_count
tool_two_node_count += 1
if s["market"] == "DE":
answer = interrupt("Just because...")
else:
answer = " all good"
return {"my_key": answer}
def start(state: State) -> list[Union[Send, str]]:
return ["tool_two", Send("tool_one", state)]
tool_two_graph = StateGraph(State)
tool_two_graph.add_node("tool_two", tool_two_node, retry=RetryPolicy())
tool_two_graph.add_node("tool_one", tool_one)
tool_two_graph.set_conditional_entry_point(start)
tool_two = tool_two_graph.compile()
tracer = FakeTracer()
assert tool_two.invoke(
{"my_key": "value", "market": "DE"}, {"callbacks": [tracer]}
) == {
"my_key": "value one",
"market": "DE",
}
assert tool_two_node_count == 1, "interrupts aren't retried"
assert len(tracer.runs) == 1
run = tracer.runs[0]
assert run.end_time is not None
assert run.error is None
assert run.outputs == {"market": "DE", "my_key": "value one"}
assert tool_two.invoke({"my_key": "value", "market": "US"}) == {
"my_key": "value one all good",
"market": "US",
}
tool_two = tool_two_graph.compile(checkpointer=checkpointer)
# missing thread_id
with pytest.raises(ValueError, match="thread_id"):
tool_two.invoke({"my_key": "value", "market": "DE"})
# flow: interrupt -> resume with answer
thread2 = {"configurable": {"thread_id": "2"}}
# stop when about to enter node
assert [
c for c in tool_two.stream({"my_key": "value ⛰️", "market": "DE"}, thread2)
] == [
{
"tool_one": {"my_key": " one"},
},
{
"__interrupt__": (
Interrupt(
value="Just because...",
resumable=True,
ns=[AnyStr("tool_two:")],
),
)
},
]
# resume with answer
assert [c for c in tool_two.stream(Command(resume=" my answer"), thread2)] == [
{"tool_two": {"my_key": " my answer"}},
]
# flow: interrupt -> clear tasks
thread1 = {"configurable": {"thread_id": "1"}}
# stop when about to enter node
assert tool_two.invoke({"my_key": "value ⛰️", "market": "DE"}, thread1) == {
"my_key": "value ⛰️ one",
"market": "DE",
}
assert [c.metadata for c in tool_two.checkpointer.list(thread1)] == [
{
"parents": {},
"source": "loop",
"step": 0,
"writes": {"tool_one": {"my_key": " one"}},
"thread_id": "1",
},
{
"parents": {},
"source": "input",
"step": -1,
"writes": {"__start__": {"my_key": "value ⛰️", "market": "DE"}},
"thread_id": "1",
},
]
assert tool_two.get_state(thread1) == StateSnapshot(
values={"my_key": "value ⛰️ one", "market": "DE"},
next=("tool_two",),
tasks=(
PregelTask(
AnyStr(),
"tool_two",
(PULL, "tool_two"),
interrupts=(
Interrupt(
value="Just because...",
resumable=True,
ns=[AnyStr("tool_two:")],
),
),
),
),
config=tool_two.checkpointer.get_tuple(thread1).config,
created_at=tool_two.checkpointer.get_tuple(thread1).checkpoint["ts"],
metadata={
"parents": {},
"source": "loop",
"step": 0,
"writes": {"tool_one": {"my_key": " one"}},
"thread_id": "1",
},
parent_config=[*tool_two.checkpointer.list(thread1, limit=2)][-1].config,
)
# clear the interrupt and next tasks
tool_two.update_state(thread1, None)
# interrupt is cleared, next task is kept
assert tool_two.get_state(thread1) == StateSnapshot(
values={"my_key": "value ⛰️ one", "market": "DE"},
next=("tool_two",),
tasks=(
PregelTask(
AnyStr(),
"tool_two",
(PULL, "tool_two"),
interrupts=(),
),
),
config=tool_two.checkpointer.get_tuple(thread1).config,
created_at=tool_two.checkpointer.get_tuple(thread1).checkpoint["ts"],
metadata={
"parents": {},
"source": "update",
"step": 1,
"writes": {},
"thread_id": "1",
},
parent_config=[*tool_two.checkpointer.list(thread1, limit=2)][-1].config,
)
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC)
def test_dynamic_interrupt_subgraph(
request: pytest.FixtureRequest, checkpointer_name: str
) -> None:
checkpointer = request.getfixturevalue(f"checkpointer_{checkpointer_name}")
class SubgraphState(TypedDict):
my_key: str
market: str
tool_two_node_count = 0
def tool_two_node(s: SubgraphState) -> SubgraphState:
nonlocal tool_two_node_count
tool_two_node_count += 1
if s["market"] == "DE":
answer = interrupt("Just because...")
else:
answer = " all good"
return {"my_key": answer}
subgraph = StateGraph(SubgraphState)
subgraph.add_node("do", tool_two_node, retry=RetryPolicy())
subgraph.add_edge(START, "do")
class State(TypedDict):
my_key: Annotated[str, operator.add]
market: str
tool_two_graph = StateGraph(State)
tool_two_graph.add_node("tool_two", subgraph.compile())
tool_two_graph.add_edge(START, "tool_two")
tool_two = tool_two_graph.compile()
tracer = FakeTracer()
assert tool_two.invoke(
{"my_key": "value", "market": "DE"}, {"callbacks": [tracer]}
) == {
"my_key": "value",
"market": "DE",
}
assert tool_two_node_count == 1, "interrupts aren't retried"
assert len(tracer.runs) == 1
run = tracer.runs[0]
assert run.end_time is not None
assert run.error is None
assert run.outputs == {"market": "DE", "my_key": "value"}
assert tool_two.invoke({"my_key": "value", "market": "US"}) == {
"my_key": "value all good",
"market": "US",
}
tool_two = tool_two_graph.compile(checkpointer=checkpointer)
# missing thread_id
with pytest.raises(ValueError, match="thread_id"):
tool_two.invoke({"my_key": "value", "market": "DE"})
# flow: interrupt -> resume with answer
thread2 = {"configurable": {"thread_id": "2"}}
# stop when about to enter node
assert [
c for c in tool_two.stream({"my_key": "value ⛰️", "market": "DE"}, thread2)
] == [
{
"__interrupt__": (
Interrupt(
value="Just because...",
resumable=True,
ns=[AnyStr("tool_two:"), AnyStr("do:")],
),
)
},
]
# resume with answer
assert [c for c in tool_two.stream(Command(resume=" my answer"), thread2)] == [
{"tool_two": {"my_key": " my answer", "market": "DE"}},
]
# flow: interrupt -> clear tasks
thread1 = {"configurable": {"thread_id": "1"}}
# stop when about to enter node
assert tool_two.invoke({"my_key": "value ⛰️", "market": "DE"}, thread1) == {
"my_key": "value ⛰️",
"market": "DE",
}
assert [
c.metadata
for c in tool_two.checkpointer.list(
{"configurable": {"thread_id": "1", "checkpoint_ns": ""}}
)
] == [
{
"parents": {},
"source": "loop",
"step": 0,
"writes": None,
"thread_id": "1",
},
{
"parents": {},
"source": "input",
"step": -1,
"writes": {"__start__": {"my_key": "value ⛰️", "market": "DE"}},
"thread_id": "1",
},
]
assert tool_two.get_state(thread1) == StateSnapshot(
values={"my_key": "value ⛰️", "market": "DE"},
next=("tool_two",),
tasks=(
PregelTask(
AnyStr(),
"tool_two",
(PULL, "tool_two"),
interrupts=(
Interrupt(
value="Just because...",
resumable=True,
ns=[AnyStr("tool_two:"), AnyStr("do:")],
),
),
state={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr("tool_two:"),
}
},
),
),
config=tool_two.checkpointer.get_tuple(thread1).config,
created_at=tool_two.checkpointer.get_tuple(thread1).checkpoint["ts"],
metadata={
"parents": {},
"source": "loop",
"step": 0,
"writes": None,
"thread_id": "1",
},
parent_config=[
*tool_two.checkpointer.list(
{"configurable": {"thread_id": "1", "checkpoint_ns": ""}}, limit=2
)
][-1].config,
)
# clear the interrupt and next tasks
tool_two.update_state(thread1, None, as_node=END)
# interrupt and next tasks are cleared
assert tool_two.get_state(thread1) == StateSnapshot(
values={"my_key": "value ⛰️", "market": "DE"},
next=(),
tasks=(),
config=tool_two.checkpointer.get_tuple(thread1).config,
created_at=tool_two.checkpointer.get_tuple(thread1).checkpoint["ts"],
metadata={
"parents": {},
"source": "update",
"step": 1,
"writes": {},
"thread_id": "1",
},
parent_config=[
*tool_two.checkpointer.list(
{"configurable": {"thread_id": "1", "checkpoint_ns": ""}}, limit=2
)
][-1].config,
)
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC)
def test_start_branch_then(
snapshot: SnapshotAssertion, request: pytest.FixtureRequest, checkpointer_name: str
) -> None:
checkpointer = request.getfixturevalue(f"checkpointer_{checkpointer_name}")
class State(TypedDict):
my_key: Annotated[str, operator.add]
market: str
shared: Annotated[dict[str, dict[str, Any]], SharedValue.on("assistant_id")]
def assert_shared_value(data: State, config: RunnableConfig) -> State:
assert "shared" in data
if thread_id := config["configurable"].get("thread_id"):
if thread_id == "1":
# this is the first thread, so should not see a value
assert data["shared"] == {}
return {"shared": {"1": {"hello": "world"}}}
elif thread_id == "2":
# this should get value saved by thread 1
assert data["shared"] == {"1": {"hello": "world"}}
elif thread_id == "3":
# this is a different assistant, so should not see previous value
assert data["shared"] == {}
return {}
def tool_two_slow(data: State, config: RunnableConfig) -> State:
return {"my_key": " slow", **assert_shared_value(data, config)}
def tool_two_fast(data: State, config: RunnableConfig) -> State:
return {"my_key": " fast", **assert_shared_value(data, config)}
tool_two_graph = StateGraph(State)
tool_two_graph.add_node("tool_two_slow", tool_two_slow)
tool_two_graph.add_node("tool_two_fast", tool_two_fast)
tool_two_graph.set_conditional_entry_point(
lambda s: "tool_two_slow" if s["market"] == "DE" else "tool_two_fast", then=END
)
tool_two = tool_two_graph.compile()
assert tool_two.get_graph().draw_mermaid() == snapshot
assert tool_two.invoke({"my_key": "value", "market": "DE"}) == {
"my_key": "value slow",
"market": "DE",
}
assert tool_two.invoke({"my_key": "value", "market": "US"}) == {
"my_key": "value fast",
"market": "US",
}
tool_two = tool_two_graph.compile(
store=InMemoryStore(),
checkpointer=checkpointer,
interrupt_before=["tool_two_fast", "tool_two_slow"],
)
# missing thread_id
with pytest.raises(ValueError, match="thread_id"):
tool_two.invoke({"my_key": "value", "market": "DE"})
thread1 = {"configurable": {"thread_id": "1", "assistant_id": "a"}}
# stop when about to enter node
assert tool_two.invoke({"my_key": "value ⛰️", "market": "DE"}, thread1) == {
"my_key": "value ⛰️",
"market": "DE",
}
assert [c.metadata for c in tool_two.checkpointer.list(thread1)] == [
{
"parents": {},
"source": "loop",
"step": 0,
"writes": None,
"assistant_id": "a",
"thread_id": "1",
},
{
"parents": {},
"source": "input",
"step": -1,
"writes": {"__start__": {"my_key": "value ⛰️", "market": "DE"}},
"assistant_id": "a",
"thread_id": "1",
},
]
assert tool_two.get_state(thread1) == StateSnapshot(
values={"my_key": "value ⛰️", "market": "DE"},
tasks=(PregelTask(AnyStr(), "tool_two_slow", (PULL, "tool_two_slow")),),
next=("tool_two_slow",),
config=tool_two.checkpointer.get_tuple(thread1).config,
created_at=tool_two.checkpointer.get_tuple(thread1).checkpoint["ts"],
metadata={
"parents": {},
"source": "loop",
"step": 0,
"writes": None,
"assistant_id": "a",
"thread_id": "1",
},
parent_config=[*tool_two.checkpointer.list(thread1, limit=2)][-1].config,
)
# resume, for same result as above
assert tool_two.invoke(None, thread1, debug=1) == {
"my_key": "value ⛰️ slow",
"market": "DE",
}
assert tool_two.get_state(thread1) == StateSnapshot(
values={"my_key": "value ⛰️ slow", "market": "DE"},
tasks=(),
next=(),
config=tool_two.checkpointer.get_tuple(thread1).config,
created_at=tool_two.checkpointer.get_tuple(thread1).checkpoint["ts"],
metadata={
"parents": {},
"source": "loop",
"step": 1,
"writes": {"tool_two_slow": {"my_key": " slow"}},
"assistant_id": "a",
"thread_id": "1",
},
parent_config=[*tool_two.checkpointer.list(thread1, limit=2)][-1].config,
)
thread2 = {"configurable": {"thread_id": "2", "assistant_id": "a"}}
# stop when about to enter node
assert tool_two.invoke({"my_key": "value", "market": "US"}, thread2) == {
"my_key": "value",
"market": "US",
}
assert tool_two.get_state(thread2) == StateSnapshot(
values={"my_key": "value", "market": "US"},
tasks=(PregelTask(AnyStr(), "tool_two_fast", (PULL, "tool_two_fast")),),
next=("tool_two_fast",),
config=tool_two.checkpointer.get_tuple(thread2).config,
created_at=tool_two.checkpointer.get_tuple(thread2).checkpoint["ts"],
metadata={
"parents": {},
"source": "loop",
"step": 0,
"writes": None,
"assistant_id": "a",
"thread_id": "2",
},
parent_config=[*tool_two.checkpointer.list(thread2, limit=2)][-1].config,
)
# resume, for same result as above
assert tool_two.invoke(None, thread2, debug=1) == {
"my_key": "value fast",
"market": "US",
}
assert tool_two.get_state(thread2) == StateSnapshot(
values={"my_key": "value fast", "market": "US"},
tasks=(),
next=(),
config=tool_two.checkpointer.get_tuple(thread2).config,
created_at=tool_two.checkpointer.get_tuple(thread2).checkpoint["ts"],
metadata={
"parents": {},
"source": "loop",
"step": 1,
"writes": {"tool_two_fast": {"my_key": " fast"}},
"assistant_id": "a",
"thread_id": "2",
},
parent_config=[*tool_two.checkpointer.list(thread2, limit=2)][-1].config,
)
thread3 = {"configurable": {"thread_id": "3", "assistant_id": "b"}}
# stop when about to enter node
assert tool_two.invoke({"my_key": "value", "market": "US"}, thread3) == {
"my_key": "value",
"market": "US",
}
assert tool_two.get_state(thread3) == StateSnapshot(
values={"my_key": "value", "market": "US"},
tasks=(PregelTask(AnyStr(), "tool_two_fast", (PULL, "tool_two_fast")),),
next=("tool_two_fast",),
config=tool_two.checkpointer.get_tuple(thread3).config,
created_at=tool_two.checkpointer.get_tuple(thread3).checkpoint["ts"],
metadata={
"parents": {},
"source": "loop",
"step": 0,
"writes": None,
"assistant_id": "b",
"thread_id": "3",
},
parent_config=[*tool_two.checkpointer.list(thread3, limit=2)][-1].config,
)
# update state
tool_two.update_state(thread3, {"my_key": "key"}) # appends to my_key
assert tool_two.get_state(thread3) == StateSnapshot(
values={"my_key": "valuekey", "market": "US"},
tasks=(PregelTask(AnyStr(), "tool_two_fast", (PULL, "tool_two_fast")),),
next=("tool_two_fast",),
config=tool_two.checkpointer.get_tuple(thread3).config,
created_at=tool_two.checkpointer.get_tuple(thread3).checkpoint["ts"],
metadata={
"parents": {},
"source": "update",
"step": 1,
"writes": {START: {"my_key": "key"}},
"assistant_id": "b",
"thread_id": "3",
},
parent_config=[*tool_two.checkpointer.list(thread3, limit=2)][-1].config,
)
# resume, for same result as above
assert tool_two.invoke(None, thread3, debug=1) == {
"my_key": "valuekey fast",
"market": "US",
}
assert tool_two.get_state(thread3) == StateSnapshot(
values={"my_key": "valuekey fast", "market": "US"},
tasks=(),
next=(),
config=tool_two.checkpointer.get_tuple(thread3).config,
created_at=tool_two.checkpointer.get_tuple(thread3).checkpoint["ts"],
metadata={
"parents": {},
"source": "loop",
"step": 2,
"writes": {"tool_two_fast": {"my_key": " fast"}},
"assistant_id": "b",
"thread_id": "3",
},
parent_config=[*tool_two.checkpointer.list(thread3, limit=2)][-1].config,
)
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC)
def test_branch_then(
snapshot: SnapshotAssertion, request: pytest.FixtureRequest, checkpointer_name: str
) -> None:
checkpointer = request.getfixturevalue(f"checkpointer_{checkpointer_name}")
class State(TypedDict):
my_key: Annotated[str, operator.add]
market: str
tool_two_graph = StateGraph(State)
tool_two_graph.set_entry_point("prepare")
tool_two_graph.set_finish_point("finish")
tool_two_graph.add_conditional_edges(
source="prepare",
path=lambda s: "tool_two_slow" if s["market"] == "DE" else "tool_two_fast",
then="finish",
)
tool_two_graph.add_node("prepare", lambda s: {"my_key": " prepared"})
tool_two_graph.add_node("tool_two_slow", lambda s: {"my_key": " slow"})
tool_two_graph.add_node("tool_two_fast", lambda s: {"my_key": " fast"})
tool_two_graph.add_node("finish", lambda s: {"my_key": " finished"})
tool_two = tool_two_graph.compile()
assert tool_two.get_graph().draw_mermaid(with_styles=False) == snapshot
assert tool_two.get_graph().draw_mermaid() == snapshot
assert tool_two.invoke({"my_key": "value", "market": "DE"}, debug=1) == {
"my_key": "value prepared slow finished",
"market": "DE",
}
assert tool_two.invoke({"my_key": "value", "market": "US"}) == {
"my_key": "value prepared fast finished",
"market": "US",
}
# test stream_mode=debug
tool_two = tool_two_graph.compile(checkpointer=checkpointer)
thread10 = {"configurable": {"thread_id": "10"}}
res = [
*tool_two.stream(
{"my_key": "value", "market": "DE"}, thread10, stream_mode="debug"
)
]
assert res == [
{
"type": "checkpoint",
"timestamp": AnyStr(),
"step": -1,
"payload": {
"config": {
"tags": [],
"metadata": {"thread_id": "10"},
"callbacks": None,
"recursion_limit": 25,
"configurable": {
"thread_id": "10",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
},
},
"values": {"my_key": ""},
"metadata": {
"parents": {},
"source": "input",
"step": -1,
"writes": {"__start__": {"my_key": "value", "market": "DE"}},
"thread_id": "10",
},
"parent_config": None,
"next": ["__start__"],
"tasks": [
{
"id": AnyStr(),
"name": "__start__",
"interrupts": (),
"state": None,
}
],
},
},
{
"type": "checkpoint",
"timestamp": AnyStr(),
"step": 0,
"payload": {
"config": {
"tags": [],
"metadata": {"thread_id": "10"},
"callbacks": None,
"recursion_limit": 25,
"configurable": {
"thread_id": "10",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
},
},
"values": {
"my_key": "value",
"market": "DE",
},
"metadata": {
"parents": {},
"source": "loop",
"step": 0,
"writes": None,
"thread_id": "10",
},
"parent_config": {
"tags": [],
"metadata": {"thread_id": "10"},
"callbacks": None,
"recursion_limit": 25,
"configurable": {
"thread_id": "10",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
},
},
"next": ["prepare"],
"tasks": [
{"id": AnyStr(), "name": "prepare", "interrupts": (), "state": None}
],
},
},
{
"type": "task",
"timestamp": AnyStr(),
"step": 1,
"payload": {
"id": AnyStr(),
"name": "prepare",
"input": {"my_key": "value", "market": "DE"},
"triggers": ["start:prepare"],
},
},
{
"type": "task_result",
"timestamp": AnyStr(),
"step": 1,
"payload": {
"id": AnyStr(),
"name": "prepare",
"result": [("my_key", " prepared")],
"error": None,
"interrupts": [],
},
},
{
"type": "checkpoint",
"timestamp": AnyStr(),
"step": 1,
"payload": {
"config": {
"tags": [],
"metadata": {"thread_id": "10"},
"callbacks": None,
"recursion_limit": 25,
"configurable": {
"thread_id": "10",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
},
},
"values": {
"my_key": "value prepared",
"market": "DE",
},
"metadata": {
"parents": {},
"source": "loop",
"step": 1,
"writes": {"prepare": {"my_key": " prepared"}},
"thread_id": "10",
},
"parent_config": {
"tags": [],
"metadata": {"thread_id": "10"},
"callbacks": None,
"recursion_limit": 25,
"configurable": {
"thread_id": "10",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
},
},
"next": ["tool_two_slow"],
"tasks": [
{
"id": AnyStr(),
"name": "tool_two_slow",
"interrupts": (),
"state": None,
}
],
},
},
{
"type": "task",
"timestamp": AnyStr(),
"step": 2,
"payload": {
"id": AnyStr(),
"name": "tool_two_slow",
"input": {"my_key": "value prepared", "market": "DE"},
"triggers": ["branch:prepare:condition:tool_two_slow"],
},
},
{
"type": "task_result",
"timestamp": AnyStr(),
"step": 2,
"payload": {
"id": AnyStr(),
"name": "tool_two_slow",
"result": [("my_key", " slow")],
"error": None,
"interrupts": [],
},
},
{
"type": "checkpoint",
"timestamp": AnyStr(),
"step": 2,
"payload": {
"config": {
"tags": [],
"metadata": {"thread_id": "10"},
"callbacks": None,
"recursion_limit": 25,
"configurable": {
"thread_id": "10",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
},
},
"values": {
"my_key": "value prepared slow",
"market": "DE",
},
"metadata": {
"parents": {},
"source": "loop",
"step": 2,
"writes": {"tool_two_slow": {"my_key": " slow"}},
"thread_id": "10",
},
"parent_config": {
"tags": [],
"metadata": {"thread_id": "10"},
"callbacks": None,
"recursion_limit": 25,
"configurable": {
"thread_id": "10",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
},
},
"next": ["finish"],
"tasks": [
{"id": AnyStr(), "name": "finish", "interrupts": (), "state": None}
],
},
},
{
"type": "task",
"timestamp": AnyStr(),
"step": 3,
"payload": {
"id": AnyStr(),
"name": "finish",
"input": {"my_key": "value prepared slow", "market": "DE"},
"triggers": ["branch:prepare:condition::then"],
},
},
{
"type": "task_result",
"timestamp": AnyStr(),
"step": 3,
"payload": {
"id": AnyStr(),
"name": "finish",
"result": [("my_key", " finished")],
"error": None,
"interrupts": [],
},
},
{
"type": "checkpoint",
"timestamp": AnyStr(),
"step": 3,
"payload": {
"config": {
"tags": [],
"metadata": {"thread_id": "10"},
"callbacks": None,
"recursion_limit": 25,
"configurable": {
"thread_id": "10",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
},
},
"values": {
"my_key": "value prepared slow finished",
"market": "DE",
},
"metadata": {
"parents": {},
"source": "loop",
"step": 3,
"writes": {"finish": {"my_key": " finished"}},
"thread_id": "10",
},
"parent_config": {
"tags": [],
"metadata": {"thread_id": "10"},
"callbacks": None,
"recursion_limit": 25,
"configurable": {
"thread_id": "10",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
},
},
"next": [],
"tasks": [],
},
},
]
tool_two = tool_two_graph.compile(
checkpointer=checkpointer, interrupt_before=["tool_two_fast", "tool_two_slow"]
)
# missing thread_id
with pytest.raises(ValueError, match="thread_id"):
tool_two.invoke({"my_key": "value", "market": "DE"})
thread1 = {"configurable": {"thread_id": "1"}}
# stop when about to enter node
assert tool_two.invoke({"my_key": "value", "market": "DE"}, thread1) == {
"my_key": "value prepared",
"market": "DE",
}
assert tool_two.get_state(thread1) == StateSnapshot(
values={"my_key": "value prepared", "market": "DE"},
tasks=(PregelTask(AnyStr(), "tool_two_slow", (PULL, "tool_two_slow")),),
next=("tool_two_slow",),
config=tool_two.checkpointer.get_tuple(thread1).config,
created_at=tool_two.checkpointer.get_tuple(thread1).checkpoint["ts"],
metadata={
"parents": {},
"source": "loop",
"step": 1,
"writes": {"prepare": {"my_key": " prepared"}},
"thread_id": "1",
},
parent_config=[*tool_two.checkpointer.list(thread1, limit=2)][-1].config,
)
# resume, for same result as above
assert tool_two.invoke(None, thread1, debug=1) == {
"my_key": "value prepared slow finished",
"market": "DE",
}
assert tool_two.get_state(thread1) == StateSnapshot(
values={"my_key": "value prepared slow finished", "market": "DE"},
tasks=(),
next=(),
config=tool_two.checkpointer.get_tuple(thread1).config,
created_at=tool_two.checkpointer.get_tuple(thread1).checkpoint["ts"],
metadata={
"parents": {},
"source": "loop",
"step": 3,
"writes": {"finish": {"my_key": " finished"}},
"thread_id": "1",
},
parent_config=[*tool_two.checkpointer.list(thread1, limit=2)][-1].config,
)
thread2 = {"configurable": {"thread_id": "2"}}
# stop when about to enter node
assert tool_two.invoke({"my_key": "value", "market": "US"}, thread2) == {
"my_key": "value prepared",
"market": "US",
}
assert tool_two.get_state(thread2) == StateSnapshot(
values={"my_key": "value prepared", "market": "US"},
tasks=(PregelTask(AnyStr(), "tool_two_fast", (PULL, "tool_two_fast")),),
next=("tool_two_fast",),
config=tool_two.checkpointer.get_tuple(thread2).config,
created_at=tool_two.checkpointer.get_tuple(thread2).checkpoint["ts"],
metadata={
"parents": {},
"source": "loop",
"step": 1,
"writes": {"prepare": {"my_key": " prepared"}},
"thread_id": "2",
},
parent_config=[*tool_two.checkpointer.list(thread2, limit=2)][-1].config,
)
# resume, for same result as above
assert tool_two.invoke(None, thread2, debug=1) == {
"my_key": "value prepared fast finished",
"market": "US",
}
assert tool_two.get_state(thread2) == StateSnapshot(
values={"my_key": "value prepared fast finished", "market": "US"},
tasks=(),
next=(),
config=tool_two.checkpointer.get_tuple(thread2).config,
created_at=tool_two.checkpointer.get_tuple(thread2).checkpoint["ts"],
metadata={
"parents": {},
"source": "loop",
"step": 3,
"writes": {"finish": {"my_key": " finished"}},
"thread_id": "2",
},
parent_config=[*tool_two.checkpointer.list(thread2, limit=2)][-1].config,
)
tool_two = tool_two_graph.compile(
checkpointer=checkpointer, interrupt_before=["finish"]
)
thread1 = {"configurable": {"thread_id": "11"}}
# stop when about to enter node
assert tool_two.invoke({"my_key": "value", "market": "DE"}, thread1) == {
"my_key": "value prepared slow",
"market": "DE",
}
assert tool_two.get_state(thread1) == StateSnapshot(
values={
"my_key": "value prepared slow",
"market": "DE",
},
tasks=(PregelTask(AnyStr(), "finish", (PULL, "finish")),),
next=("finish",),
config=tool_two.checkpointer.get_tuple(thread1).config,
created_at=tool_two.checkpointer.get_tuple(thread1).checkpoint["ts"],
metadata={
"parents": {},
"source": "loop",
"step": 2,
"writes": {"tool_two_slow": {"my_key": " slow"}},
"thread_id": "11",
},
parent_config=[*tool_two.checkpointer.list(thread1, limit=2)][-1].config,
)
# update state
tool_two.update_state(thread1, {"my_key": "er"})
assert tool_two.get_state(thread1) == StateSnapshot(
values={
"my_key": "value prepared slower",
"market": "DE",
},
tasks=(PregelTask(AnyStr(), "finish", (PULL, "finish")),),
next=("finish",),
config=tool_two.checkpointer.get_tuple(thread1).config,
created_at=tool_two.checkpointer.get_tuple(thread1).checkpoint["ts"],
metadata={
"parents": {},
"source": "update",
"step": 3,
"writes": {"tool_two_slow": {"my_key": "er"}},
"thread_id": "11",
},
parent_config=[*tool_two.checkpointer.list(thread1, limit=2)][-1].config,
)
tool_two = tool_two_graph.compile(
checkpointer=checkpointer, interrupt_after=["prepare"]
)
# missing thread_id
with pytest.raises(ValueError, match="thread_id"):
tool_two.invoke({"my_key": "value", "market": "DE"})
thread1 = {"configurable": {"thread_id": "21"}}
# stop when about to enter node
assert tool_two.invoke({"my_key": "value", "market": "DE"}, thread1) == {
"my_key": "value prepared",
"market": "DE",
}
assert tool_two.get_state(thread1) == StateSnapshot(
values={"my_key": "value prepared", "market": "DE"},
tasks=(PregelTask(AnyStr(), "tool_two_slow", (PULL, "tool_two_slow")),),
next=("tool_two_slow",),
config=tool_two.checkpointer.get_tuple(thread1).config,
created_at=tool_two.checkpointer.get_tuple(thread1).checkpoint["ts"],
metadata={
"parents": {},
"source": "loop",
"step": 1,
"writes": {"prepare": {"my_key": " prepared"}},
"thread_id": "21",
},
parent_config=[*tool_two.checkpointer.list(thread1, limit=2)][-1].config,
)
# resume, for same result as above
assert tool_two.invoke(None, thread1, debug=1) == {
"my_key": "value prepared slow finished",
"market": "DE",
}
assert tool_two.get_state(thread1) == StateSnapshot(
values={"my_key": "value prepared slow finished", "market": "DE"},
tasks=(),
next=(),
config=tool_two.checkpointer.get_tuple(thread1).config,
created_at=tool_two.checkpointer.get_tuple(thread1).checkpoint["ts"],
metadata={
"parents": {},
"source": "loop",
"step": 3,
"writes": {"finish": {"my_key": " finished"}},
"thread_id": "21",
},
parent_config=[*tool_two.checkpointer.list(thread1, limit=2)][-1].config,
)
thread2 = {"configurable": {"thread_id": "22"}}
# stop when about to enter node
assert tool_two.invoke({"my_key": "value", "market": "US"}, thread2) == {
"my_key": "value prepared",
"market": "US",
}
assert tool_two.get_state(thread2) == StateSnapshot(
values={"my_key": "value prepared", "market": "US"},
tasks=(PregelTask(AnyStr(), "tool_two_fast", (PULL, "tool_two_fast")),),
next=("tool_two_fast",),
config=tool_two.checkpointer.get_tuple(thread2).config,
created_at=tool_two.checkpointer.get_tuple(thread2).checkpoint["ts"],
metadata={
"parents": {},
"source": "loop",
"step": 1,
"writes": {"prepare": {"my_key": " prepared"}},
"thread_id": "22",
},
parent_config=[*tool_two.checkpointer.list(thread2, limit=2)][-1].config,
)
# resume, for same result as above
assert tool_two.invoke(None, thread2, debug=1) == {
"my_key": "value prepared fast finished",
"market": "US",
}
assert tool_two.get_state(thread2) == StateSnapshot(
values={"my_key": "value prepared fast finished", "market": "US"},
tasks=(),
next=(),
config=tool_two.checkpointer.get_tuple(thread2).config,
created_at=tool_two.checkpointer.get_tuple(thread2).checkpoint["ts"],
metadata={
"parents": {},
"source": "loop",
"step": 3,
"writes": {"finish": {"my_key": " finished"}},
"thread_id": "22",
},
parent_config=[*tool_two.checkpointer.list(thread2, limit=2)][-1].config,
)
thread3 = {"configurable": {"thread_id": "23"}}
# update an empty thread before first run
uconfig = tool_two.update_state(thread3, {"my_key": "key", "market": "DE"})
# check current state
assert tool_two.get_state(thread3) == StateSnapshot(
values={"my_key": "key", "market": "DE"},
tasks=(PregelTask(AnyStr(), "prepare", (PULL, "prepare")),),
next=("prepare",),
config=uconfig,
created_at=AnyStr(),
metadata={
"parents": {},
"source": "update",
"step": 0,
"writes": {START: {"my_key": "key", "market": "DE"}},
"thread_id": "23",
},
parent_config=None,
)
# run from this point
assert tool_two.invoke(None, thread3) == {
"my_key": "key prepared",
"market": "DE",
}
# get state after first node
assert tool_two.get_state(thread3) == StateSnapshot(
values={"my_key": "key prepared", "market": "DE"},
tasks=(PregelTask(AnyStr(), "tool_two_slow", (PULL, "tool_two_slow")),),
next=("tool_two_slow",),
config=tool_two.checkpointer.get_tuple(thread3).config,
created_at=tool_two.checkpointer.get_tuple(thread3).checkpoint["ts"],
metadata={
"parents": {},
"source": "loop",
"step": 1,
"writes": {"prepare": {"my_key": " prepared"}},
"thread_id": "23",
},
parent_config=uconfig,
)
# resume, for same result as above
assert tool_two.invoke(None, thread3, debug=1) == {
"my_key": "key prepared slow finished",
"market": "DE",
}
assert tool_two.get_state(thread3) == StateSnapshot(
values={"my_key": "key prepared slow finished", "market": "DE"},
tasks=(),
next=(),
config=tool_two.checkpointer.get_tuple(thread3).config,
created_at=tool_two.checkpointer.get_tuple(thread3).checkpoint["ts"],
metadata={
"parents": {},
"source": "loop",
"step": 3,
"writes": {"finish": {"my_key": " finished"}},
"thread_id": "23",
},
parent_config=[*tool_two.checkpointer.list(thread3, limit=2)][-1].config,
)
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC)
def test_in_one_fan_out_state_graph_waiting_edge(
snapshot: SnapshotAssertion, request: pytest.FixtureRequest, checkpointer_name: str
) -> None:
checkpointer: BaseCheckpointSaver = request.getfixturevalue(
f"checkpointer_{checkpointer_name}"
)
def sorted_add(
x: list[str], y: Union[list[str], list[tuple[str, str]]]
) -> list[str]:
if isinstance(y[0], tuple):
for rem, _ in y:
x.remove(rem)
y = [t[1] for t in y]
return sorted(operator.add(x, y))
class State(TypedDict, total=False):
query: str
answer: str
docs: Annotated[list[str], sorted_add]
workflow = StateGraph(State)
@workflow.add_node
def rewrite_query(data: State) -> State:
return {"query": f'query: {data["query"]}'}
def analyzer_one(data: State) -> State:
return {"query": f'analyzed: {data["query"]}'}
def retriever_one(data: State) -> State:
return {"docs": ["doc1", "doc2"]}
def retriever_two(data: State) -> State:
time.sleep(0.1) # to ensure stream order
return {"docs": ["doc3", "doc4"]}
def qa(data: State) -> State:
return {"answer": ",".join(data["docs"])}
workflow.add_node(analyzer_one)
workflow.add_node(retriever_one)
workflow.add_node(retriever_two)
workflow.add_node(qa)
workflow.set_entry_point("rewrite_query")
workflow.add_edge("rewrite_query", "analyzer_one")
workflow.add_edge("analyzer_one", "retriever_one")
workflow.add_edge("rewrite_query", "retriever_two")
workflow.add_edge(["retriever_one", "retriever_two"], "qa")
workflow.set_finish_point("qa")
app = workflow.compile()
assert app.get_graph().draw_mermaid(with_styles=False) == snapshot
assert app.invoke({"query": "what is weather in sf"}) == {
"query": "analyzed: query: what is weather in sf",
"docs": ["doc1", "doc2", "doc3", "doc4"],
"answer": "doc1,doc2,doc3,doc4",
}
assert [*app.stream({"query": "what is weather in sf"})] == [
{"rewrite_query": {"query": "query: what is weather in sf"}},
{"analyzer_one": {"query": "analyzed: query: what is weather in sf"}},
{"retriever_two": {"docs": ["doc3", "doc4"]}},
{"retriever_one": {"docs": ["doc1", "doc2"]}},
{"qa": {"answer": "doc1,doc2,doc3,doc4"}},
]
app_w_interrupt = workflow.compile(
checkpointer=checkpointer,
interrupt_after=["retriever_one"],
)
config = {"configurable": {"thread_id": "1"}}
assert [
c for c in app_w_interrupt.stream({"query": "what is weather in sf"}, config)
] == [
{"rewrite_query": {"query": "query: what is weather in sf"}},
{"analyzer_one": {"query": "analyzed: query: what is weather in sf"}},
{"retriever_two": {"docs": ["doc3", "doc4"]}},
{"retriever_one": {"docs": ["doc1", "doc2"]}},
{"__interrupt__": ()},
]
assert [c for c in app_w_interrupt.stream(None, config)] == [
{"qa": {"answer": "doc1,doc2,doc3,doc4"}},
]
app_w_interrupt = workflow.compile(
checkpointer=checkpointer,
interrupt_before=["qa"],
)
config = {"configurable": {"thread_id": "2"}}
assert [
c for c in app_w_interrupt.stream({"query": "what is weather in sf"}, config)
] == [
{"rewrite_query": {"query": "query: what is weather in sf"}},
{"analyzer_one": {"query": "analyzed: query: what is weather in sf"}},
{"retriever_two": {"docs": ["doc3", "doc4"]}},
{"retriever_one": {"docs": ["doc1", "doc2"]}},
{"__interrupt__": ()},
]
app_w_interrupt.update_state(config, {"docs": ["doc5"]})
assert app_w_interrupt.get_state(config) == StateSnapshot(
values={
"query": "analyzed: query: what is weather in sf",
"docs": ["doc1", "doc2", "doc3", "doc4", "doc5"],
},
tasks=(PregelTask(AnyStr(), "qa", (PULL, "qa")),),
next=("qa",),
config=app_w_interrupt.checkpointer.get_tuple(config).config,
created_at=app_w_interrupt.checkpointer.get_tuple(config).checkpoint["ts"],
metadata={
"parents": {},
"source": "update",
"step": 4,
"writes": {"retriever_one": {"docs": ["doc5"]}},
"thread_id": "2",
},
parent_config=[*app_w_interrupt.checkpointer.list(config, limit=2)][-1].config,
)
assert [c for c in app_w_interrupt.stream(None, config, debug=1)] == [
{"qa": {"answer": "doc1,doc2,doc3,doc4,doc5"}},
]
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC)
def test_in_one_fan_out_state_graph_waiting_edge_via_branch(
snapshot: SnapshotAssertion, request: pytest.FixtureRequest, checkpointer_name: str
) -> None:
checkpointer: BaseCheckpointSaver = request.getfixturevalue(
f"checkpointer_{checkpointer_name}"
)
def sorted_add(
x: list[str], y: Union[list[str], list[tuple[str, str]]]
) -> list[str]:
if isinstance(y[0], tuple):
for rem, _ in y:
x.remove(rem)
y = [t[1] for t in y]
return sorted(operator.add(x, y))
class State(TypedDict, total=False):
query: str
answer: str
docs: Annotated[list[str], sorted_add]
def rewrite_query(data: State) -> State:
return {"query": f'query: {data["query"]}'}
def analyzer_one(data: State) -> State:
return {"query": f'analyzed: {data["query"]}'}
def retriever_one(data: State) -> State:
return {"docs": ["doc1", "doc2"]}
def retriever_two(data: State) -> State:
time.sleep(0.1)
return {"docs": ["doc3", "doc4"]}
def qa(data: State) -> State:
return {"answer": ",".join(data["docs"])}
def rewrite_query_then(data: State) -> Literal["retriever_two"]:
return "retriever_two"
workflow = StateGraph(State)
workflow.add_node("rewrite_query", rewrite_query)
workflow.add_node("analyzer_one", analyzer_one)
workflow.add_node("retriever_one", retriever_one)
workflow.add_node("retriever_two", retriever_two)
workflow.add_node("qa", qa)
workflow.set_entry_point("rewrite_query")
workflow.add_edge("rewrite_query", "analyzer_one")
workflow.add_edge("analyzer_one", "retriever_one")
workflow.add_conditional_edges("rewrite_query", rewrite_query_then)
workflow.add_edge(["retriever_one", "retriever_two"], "qa")
workflow.set_finish_point("qa")
app = workflow.compile()
assert app.get_graph().draw_mermaid(with_styles=False) == snapshot
assert app.invoke({"query": "what is weather in sf"}, debug=True) == {
"query": "analyzed: query: what is weather in sf",
"docs": ["doc1", "doc2", "doc3", "doc4"],
"answer": "doc1,doc2,doc3,doc4",
}
assert [*app.stream({"query": "what is weather in sf"})] == [
{"rewrite_query": {"query": "query: what is weather in sf"}},
{"analyzer_one": {"query": "analyzed: query: what is weather in sf"}},
{"retriever_two": {"docs": ["doc3", "doc4"]}},
{"retriever_one": {"docs": ["doc1", "doc2"]}},
{"qa": {"answer": "doc1,doc2,doc3,doc4"}},
]
app_w_interrupt = workflow.compile(
checkpointer=checkpointer,
interrupt_after=["retriever_one"],
)
config = {"configurable": {"thread_id": "1"}}
assert [
c for c in app_w_interrupt.stream({"query": "what is weather in sf"}, config)
] == [
{"rewrite_query": {"query": "query: what is weather in sf"}},
{"analyzer_one": {"query": "analyzed: query: what is weather in sf"}},
{"retriever_two": {"docs": ["doc3", "doc4"]}},
{"retriever_one": {"docs": ["doc1", "doc2"]}},
{"__interrupt__": ()},
]
assert [c for c in app_w_interrupt.stream(None, config)] == [
{"qa": {"answer": "doc1,doc2,doc3,doc4"}},
]
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC)
def test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1(
snapshot: SnapshotAssertion,
mocker: MockerFixture,
request: pytest.FixtureRequest,
checkpointer_name: str,
) -> None:
from pydantic.v1 import BaseModel, ValidationError
checkpointer = request.getfixturevalue(f"checkpointer_{checkpointer_name}")
setup = mocker.Mock()
teardown = mocker.Mock()
@contextmanager
def assert_ctx_once() -> Iterator[None]:
assert setup.call_count == 0
assert teardown.call_count == 0
try:
yield
finally:
assert setup.call_count == 1
assert teardown.call_count == 1
setup.reset_mock()
teardown.reset_mock()
@contextmanager
def make_httpx_client() -> Iterator[httpx.Client]:
setup()
with httpx.Client() as client:
try:
yield client
finally:
teardown()
def sorted_add(
x: list[str], y: Union[list[str], list[tuple[str, str]]]
) -> list[str]:
if isinstance(y[0], tuple):
for rem, _ in y:
x.remove(rem)
y = [t[1] for t in y]
return sorted(operator.add(x, y))
class InnerObject(BaseModel):
yo: int
class State(BaseModel):
class Config:
arbitrary_types_allowed = True
query: str
inner: InnerObject
answer: Optional[str] = None
docs: Annotated[list[str], sorted_add]
client: Annotated[httpx.Client, Context(make_httpx_client)]
class Input(BaseModel):
query: str
inner: InnerObject
class Output(BaseModel):
answer: str
docs: list[str]
class StateUpdate(BaseModel):
query: Optional[str] = None
answer: Optional[str] = None
docs: Optional[list[str]] = None
def rewrite_query(data: State) -> State:
return {"query": f"query: {data.query}"}
def analyzer_one(data: State) -> State:
return StateUpdate(query=f"analyzed: {data.query}")
def retriever_one(data: State) -> State:
return {"docs": ["doc1", "doc2"]}
def retriever_two(data: State) -> State:
time.sleep(0.1)
return {"docs": ["doc3", "doc4"]}
def qa(data: State) -> State:
return {"answer": ",".join(data.docs)}
def decider(data: State) -> str:
assert isinstance(data, State)
return "retriever_two"
workflow = StateGraph(State, input=Input, output=Output)
workflow.add_node("rewrite_query", rewrite_query)
workflow.add_node("analyzer_one", analyzer_one)
workflow.add_node("retriever_one", retriever_one)
workflow.add_node("retriever_two", retriever_two)
workflow.add_node("qa", qa)
workflow.set_entry_point("rewrite_query")
workflow.add_edge("rewrite_query", "analyzer_one")
workflow.add_edge("analyzer_one", "retriever_one")
workflow.add_conditional_edges(
"rewrite_query", decider, {"retriever_two": "retriever_two"}
)
workflow.add_edge(["retriever_one", "retriever_two"], "qa")
workflow.set_finish_point("qa")
app = workflow.compile()
assert app.get_graph().draw_mermaid(with_styles=False) == snapshot
assert app.get_input_jsonschema() == snapshot
assert app.get_output_jsonschema() == snapshot
with pytest.raises(ValidationError), assert_ctx_once():
app.invoke({"query": {}})
with assert_ctx_once():
assert app.invoke({"query": "what is weather in sf", "inner": {"yo": 1}}) == {
"docs": ["doc1", "doc2", "doc3", "doc4"],
"answer": "doc1,doc2,doc3,doc4",
}
with assert_ctx_once():
assert [
*app.stream({"query": "what is weather in sf", "inner": {"yo": 1}})
] == [
{"rewrite_query": {"query": "query: what is weather in sf"}},
{"analyzer_one": {"query": "analyzed: query: what is weather in sf"}},
{"retriever_two": {"docs": ["doc3", "doc4"]}},
{"retriever_one": {"docs": ["doc1", "doc2"]}},
{"qa": {"answer": "doc1,doc2,doc3,doc4"}},
]
app_w_interrupt = workflow.compile(
checkpointer=checkpointer,
interrupt_after=["retriever_one"],
)
config = {"configurable": {"thread_id": "1"}}
with assert_ctx_once():
assert [
c
for c in app_w_interrupt.stream(
{"query": "what is weather in sf", "inner": {"yo": 1}}, config
)
] == [
{"rewrite_query": {"query": "query: what is weather in sf"}},
{"analyzer_one": {"query": "analyzed: query: what is weather in sf"}},
{"retriever_two": {"docs": ["doc3", "doc4"]}},
{"retriever_one": {"docs": ["doc1", "doc2"]}},
{"__interrupt__": ()},
]
with assert_ctx_once():
assert [c for c in app_w_interrupt.stream(None, config)] == [
{"qa": {"answer": "doc1,doc2,doc3,doc4"}},
]
with assert_ctx_once():
assert app_w_interrupt.update_state(
config, {"docs": ["doc5"]}, as_node="rewrite_query"
) == {
"configurable": {
"thread_id": "1",
"checkpoint_id": AnyStr(),
"checkpoint_ns": "",
}
}
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC)
def test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2(
snapshot: SnapshotAssertion,
mocker: MockerFixture,
request: pytest.FixtureRequest,
checkpointer_name: str,
) -> None:
from pydantic import BaseModel, ConfigDict, ValidationError
checkpointer = request.getfixturevalue(f"checkpointer_{checkpointer_name}")
setup = mocker.Mock()
teardown = mocker.Mock()
@contextmanager
def assert_ctx_once() -> Iterator[None]:
assert setup.call_count == 0
assert teardown.call_count == 0
try:
yield
finally:
assert setup.call_count == 1
assert teardown.call_count == 1
setup.reset_mock()
teardown.reset_mock()
@contextmanager
def make_httpx_client() -> Iterator[httpx.Client]:
setup()
with httpx.Client() as client:
try:
yield client
finally:
teardown()
def sorted_add(
x: list[str], y: Union[list[str], list[tuple[str, str]]]
) -> list[str]:
if isinstance(y[0], tuple):
for rem, _ in y:
x.remove(rem)
y = [t[1] for t in y]
return sorted(operator.add(x, y))
class InnerObject(BaseModel):
yo: int
class State(BaseModel):
model_config = ConfigDict(arbitrary_types_allowed=True)
query: str
inner: InnerObject
answer: Optional[str] = None
docs: Annotated[list[str], sorted_add]
client: Annotated[httpx.Client, Context(make_httpx_client)]
class StateUpdate(BaseModel):
query: Optional[str] = None
answer: Optional[str] = None
docs: Optional[list[str]] = None
class Input(BaseModel):
query: str
inner: InnerObject
class Output(BaseModel):
answer: str
docs: list[str]
def rewrite_query(data: State) -> State:
return {"query": f"query: {data.query}"}
def analyzer_one(data: State) -> State:
return StateUpdate(query=f"analyzed: {data.query}")
def retriever_one(data: State) -> State:
return {"docs": ["doc1", "doc2"]}
def retriever_two(data: State) -> State:
time.sleep(0.1)
return {"docs": ["doc3", "doc4"]}
def qa(data: State) -> State:
return {"answer": ",".join(data.docs)}
def decider(data: State) -> str:
assert isinstance(data, State)
return "retriever_two"
workflow = StateGraph(State, input=Input, output=Output)
workflow.add_node("rewrite_query", rewrite_query)
workflow.add_node("analyzer_one", analyzer_one)
workflow.add_node("retriever_one", retriever_one)
workflow.add_node("retriever_two", retriever_two)
workflow.add_node("qa", qa)
workflow.set_entry_point("rewrite_query")
workflow.add_edge("rewrite_query", "analyzer_one")
workflow.add_edge("analyzer_one", "retriever_one")
workflow.add_conditional_edges(
"rewrite_query", decider, {"retriever_two": "retriever_two"}
)
workflow.add_edge(["retriever_one", "retriever_two"], "qa")
workflow.set_finish_point("qa")
app = workflow.compile()
if SHOULD_CHECK_SNAPSHOTS:
assert app.get_graph().draw_mermaid(with_styles=False) == snapshot
assert app.get_input_schema().model_json_schema() == snapshot
assert app.get_output_schema().model_json_schema() == snapshot
with pytest.raises(ValidationError), assert_ctx_once():
app.invoke({"query": {}})
with assert_ctx_once():
assert app.invoke({"query": "what is weather in sf", "inner": {"yo": 1}}) == {
"docs": ["doc1", "doc2", "doc3", "doc4"],
"answer": "doc1,doc2,doc3,doc4",
}
with assert_ctx_once():
assert [
*app.stream({"query": "what is weather in sf", "inner": {"yo": 1}})
] == [
{"rewrite_query": {"query": "query: what is weather in sf"}},
{"analyzer_one": {"query": "analyzed: query: what is weather in sf"}},
{"retriever_two": {"docs": ["doc3", "doc4"]}},
{"retriever_one": {"docs": ["doc1", "doc2"]}},
{"qa": {"answer": "doc1,doc2,doc3,doc4"}},
]
app_w_interrupt = workflow.compile(
checkpointer=checkpointer,
interrupt_after=["retriever_one"],
)
config = {"configurable": {"thread_id": "1"}}
with assert_ctx_once():
assert [
c
for c in app_w_interrupt.stream(
{"query": "what is weather in sf", "inner": {"yo": 1}}, config
)
] == [
{"rewrite_query": {"query": "query: what is weather in sf"}},
{"analyzer_one": {"query": "analyzed: query: what is weather in sf"}},
{"retriever_two": {"docs": ["doc3", "doc4"]}},
{"retriever_one": {"docs": ["doc1", "doc2"]}},
{"__interrupt__": ()},
]
with assert_ctx_once():
assert [c for c in app_w_interrupt.stream(None, config)] == [
{"qa": {"answer": "doc1,doc2,doc3,doc4"}},
]
with assert_ctx_once():
assert app_w_interrupt.update_state(
config, {"docs": ["doc5"]}, as_node="rewrite_query"
) == {
"configurable": {
"thread_id": "1",
"checkpoint_id": AnyStr(),
"checkpoint_ns": "",
}
}
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC)
def test_in_one_fan_out_state_graph_waiting_edge_plus_regular(
request: pytest.FixtureRequest, checkpointer_name: str
) -> None:
checkpointer: BaseCheckpointSaver = request.getfixturevalue(
f"checkpointer_{checkpointer_name}"
)
def sorted_add(
x: list[str], y: Union[list[str], list[tuple[str, str]]]
) -> list[str]:
if isinstance(y[0], tuple):
for rem, _ in y:
x.remove(rem)
y = [t[1] for t in y]
return sorted(operator.add(x, y))
class State(TypedDict, total=False):
query: str
answer: str
docs: Annotated[list[str], sorted_add]
def rewrite_query(data: State) -> State:
return {"query": f'query: {data["query"]}'}
def analyzer_one(data: State) -> State:
time.sleep(0.1)
return {"query": f'analyzed: {data["query"]}'}
def retriever_one(data: State) -> State:
return {"docs": ["doc1", "doc2"]}
def retriever_two(data: State) -> State:
time.sleep(0.2)
return {"docs": ["doc3", "doc4"]}
def qa(data: State) -> State:
return {"answer": ",".join(data["docs"])}
workflow = StateGraph(State)
workflow.add_node("rewrite_query", rewrite_query)
workflow.add_node("analyzer_one", analyzer_one)
workflow.add_node("retriever_one", retriever_one)
workflow.add_node("retriever_two", retriever_two)
workflow.add_node("qa", qa)
workflow.set_entry_point("rewrite_query")
workflow.add_edge("rewrite_query", "analyzer_one")
workflow.add_edge("analyzer_one", "retriever_one")
workflow.add_edge("rewrite_query", "retriever_two")
workflow.add_edge(["retriever_one", "retriever_two"], "qa")
workflow.set_finish_point("qa")
# silly edge, to make sure having been triggered before doesn't break
# semantics of named barrier (== waiting edges)
workflow.add_edge("rewrite_query", "qa")
app = workflow.compile()
assert app.invoke({"query": "what is weather in sf"}) == {
"query": "analyzed: query: what is weather in sf",
"docs": ["doc1", "doc2", "doc3", "doc4"],
"answer": "doc1,doc2,doc3,doc4",
}
assert [*app.stream({"query": "what is weather in sf"})] == [
{"rewrite_query": {"query": "query: what is weather in sf"}},
{"qa": {"answer": ""}},
{"analyzer_one": {"query": "analyzed: query: what is weather in sf"}},
{"retriever_two": {"docs": ["doc3", "doc4"]}},
{"retriever_one": {"docs": ["doc1", "doc2"]}},
{"qa": {"answer": "doc1,doc2,doc3,doc4"}},
]
app_w_interrupt = workflow.compile(
checkpointer=checkpointer,
interrupt_after=["retriever_one"],
)
config = {"configurable": {"thread_id": "1"}}
assert [
c for c in app_w_interrupt.stream({"query": "what is weather in sf"}, config)
] == [
{"rewrite_query": {"query": "query: what is weather in sf"}},
{"qa": {"answer": ""}},
{"analyzer_one": {"query": "analyzed: query: what is weather in sf"}},
{"retriever_two": {"docs": ["doc3", "doc4"]}},
{"retriever_one": {"docs": ["doc1", "doc2"]}},
{"__interrupt__": ()},
]
assert [c for c in app_w_interrupt.stream(None, config)] == [
{"qa": {"answer": "doc1,doc2,doc3,doc4"}},
]
def test_in_one_fan_out_state_graph_waiting_edge_multiple() -> None:
def sorted_add(
x: list[str], y: Union[list[str], list[tuple[str, str]]]
) -> list[str]:
if isinstance(y[0], tuple):
for rem, _ in y:
x.remove(rem)
y = [t[1] for t in y]
return sorted(operator.add(x, y))
class State(TypedDict, total=False):
query: str
answer: str
docs: Annotated[list[str], sorted_add]
def rewrite_query(data: State) -> State:
return {"query": f'query: {data["query"]}'}
def analyzer_one(data: State) -> State:
return {"query": f'analyzed: {data["query"]}'}
def retriever_one(data: State) -> State:
return {"docs": ["doc1", "doc2"]}
def retriever_two(data: State) -> State:
time.sleep(0.1)
return {"docs": ["doc3", "doc4"]}
def qa(data: State) -> State:
return {"answer": ",".join(data["docs"])}
def decider(data: State) -> None:
return None
def decider_cond(data: State) -> str:
if data["query"].count("analyzed") > 1:
return "qa"
else:
return "rewrite_query"
workflow = StateGraph(State)
workflow.add_node("rewrite_query", rewrite_query)
workflow.add_node("analyzer_one", analyzer_one)
workflow.add_node("retriever_one", retriever_one)
workflow.add_node("retriever_two", retriever_two)
workflow.add_node("decider", decider)
workflow.add_node("qa", qa)
workflow.set_entry_point("rewrite_query")
workflow.add_edge("rewrite_query", "analyzer_one")
workflow.add_edge("analyzer_one", "retriever_one")
workflow.add_edge("rewrite_query", "retriever_two")
workflow.add_edge(["retriever_one", "retriever_two"], "decider")
workflow.add_conditional_edges("decider", decider_cond)
workflow.set_finish_point("qa")
app = workflow.compile()
assert app.invoke({"query": "what is weather in sf"}) == {
"query": "analyzed: query: analyzed: query: what is weather in sf",
"answer": "doc1,doc1,doc2,doc2,doc3,doc3,doc4,doc4",
"docs": ["doc1", "doc1", "doc2", "doc2", "doc3", "doc3", "doc4", "doc4"],
}
assert [*app.stream({"query": "what is weather in sf"})] == [
{"rewrite_query": {"query": "query: what is weather in sf"}},
{"analyzer_one": {"query": "analyzed: query: what is weather in sf"}},
{"retriever_two": {"docs": ["doc3", "doc4"]}},
{"retriever_one": {"docs": ["doc1", "doc2"]}},
{"decider": None},
{"rewrite_query": {"query": "query: analyzed: query: what is weather in sf"}},
{
"analyzer_one": {
"query": "analyzed: query: analyzed: query: what is weather in sf"
}
},
{"retriever_two": {"docs": ["doc3", "doc4"]}},
{"retriever_one": {"docs": ["doc1", "doc2"]}},
{"decider": None},
{"qa": {"answer": "doc1,doc1,doc2,doc2,doc3,doc3,doc4,doc4"}},
]
def test_callable_in_conditional_edges_with_no_path_map() -> None:
class State(TypedDict, total=False):
query: str
def rewrite(data: State) -> State:
return {"query": f'query: {data["query"]}'}
def analyze(data: State) -> State:
return {"query": f'analyzed: {data["query"]}'}
class ChooseAnalyzer:
def __call__(self, data: State) -> str:
return "analyzer"
workflow = StateGraph(State)
workflow.add_node("rewriter", rewrite)
workflow.add_node("analyzer", analyze)
workflow.add_conditional_edges("rewriter", ChooseAnalyzer())
workflow.set_entry_point("rewriter")
app = workflow.compile()
assert app.invoke({"query": "what is weather in sf"}) == {
"query": "analyzed: query: what is weather in sf",
}
def test_function_in_conditional_edges_with_no_path_map() -> None:
class State(TypedDict, total=False):
query: str
def rewrite(data: State) -> State:
return {"query": f'query: {data["query"]}'}
def analyze(data: State) -> State:
return {"query": f'analyzed: {data["query"]}'}
def choose_analyzer(data: State) -> str:
return "analyzer"
workflow = StateGraph(State)
workflow.add_node("rewriter", rewrite)
workflow.add_node("analyzer", analyze)
workflow.add_conditional_edges("rewriter", choose_analyzer)
workflow.set_entry_point("rewriter")
app = workflow.compile()
assert app.invoke({"query": "what is weather in sf"}) == {
"query": "analyzed: query: what is weather in sf",
}
def test_in_one_fan_out_state_graph_waiting_edge_multiple_cond_edge() -> None:
def sorted_add(
x: list[str], y: Union[list[str], list[tuple[str, str]]]
) -> list[str]:
if isinstance(y[0], tuple):
for rem, _ in y:
x.remove(rem)
y = [t[1] for t in y]
return sorted(operator.add(x, y))
class State(TypedDict, total=False):
query: str
answer: str
docs: Annotated[list[str], sorted_add]
def rewrite_query(data: State) -> State:
return {"query": f'query: {data["query"]}'}
def retriever_picker(data: State) -> list[str]:
return ["analyzer_one", "retriever_two"]
def analyzer_one(data: State) -> State:
return {"query": f'analyzed: {data["query"]}'}
def retriever_one(data: State) -> State:
return {"docs": ["doc1", "doc2"]}
def retriever_two(data: State) -> State:
time.sleep(0.1)
return {"docs": ["doc3", "doc4"]}
def qa(data: State) -> State:
return {"answer": ",".join(data["docs"])}
def decider(data: State) -> None:
return None
def decider_cond(data: State) -> str:
if data["query"].count("analyzed") > 1:
return "qa"
else:
return "rewrite_query"
workflow = StateGraph(State)
workflow.add_node("rewrite_query", rewrite_query)
workflow.add_node("analyzer_one", analyzer_one)
workflow.add_node("retriever_one", retriever_one)
workflow.add_node("retriever_two", retriever_two)
workflow.add_node("decider", decider)
workflow.add_node("qa", qa)
workflow.set_entry_point("rewrite_query")
workflow.add_conditional_edges("rewrite_query", retriever_picker)
workflow.add_edge("analyzer_one", "retriever_one")
workflow.add_edge(["retriever_one", "retriever_two"], "decider")
workflow.add_conditional_edges("decider", decider_cond)
workflow.set_finish_point("qa")
app = workflow.compile()
assert app.invoke({"query": "what is weather in sf"}) == {
"query": "analyzed: query: analyzed: query: what is weather in sf",
"answer": "doc1,doc1,doc2,doc2,doc3,doc3,doc4,doc4",
"docs": ["doc1", "doc1", "doc2", "doc2", "doc3", "doc3", "doc4", "doc4"],
}
assert [*app.stream({"query": "what is weather in sf"})] == [
{"rewrite_query": {"query": "query: what is weather in sf"}},
{"analyzer_one": {"query": "analyzed: query: what is weather in sf"}},
{"retriever_two": {"docs": ["doc3", "doc4"]}},
{"retriever_one": {"docs": ["doc1", "doc2"]}},
{"decider": None},
{"rewrite_query": {"query": "query: analyzed: query: what is weather in sf"}},
{
"analyzer_one": {
"query": "analyzed: query: analyzed: query: what is weather in sf"
}
},
{"retriever_two": {"docs": ["doc3", "doc4"]}},
{"retriever_one": {"docs": ["doc1", "doc2"]}},
{"decider": None},
{"qa": {"answer": "doc1,doc1,doc2,doc2,doc3,doc3,doc4,doc4"}},
]
def test_simple_multi_edge(snapshot: SnapshotAssertion) -> None:
class State(TypedDict):
my_key: Annotated[str, operator.add]
def up(state: State):
pass
def side(state: State):
pass
def other(state: State):
return {"my_key": "_more"}
def down(state: State):
pass
graph = StateGraph(State)
graph.add_node("up", up)
graph.add_node("side", side)
graph.add_node("other", other)
graph.add_node("down", down)
graph.set_entry_point("up")
graph.add_edge("up", "side")
graph.add_edge("up", "other")
graph.add_edge(["up", "side"], "down")
graph.set_finish_point("down")
app = graph.compile()
assert app.get_graph().draw_mermaid(with_styles=False) == snapshot
assert app.invoke({"my_key": "my_value"}) == {"my_key": "my_value_more"}
assert [*app.stream({"my_key": "my_value"})] in (
[
{"up": None},
{"side": None},
{"other": {"my_key": "_more"}},
{"down": None},
],
[
{"up": None},
{"other": {"my_key": "_more"}},
{"side": None},
{"down": None},
],
)
def test_nested_graph_xray(snapshot: SnapshotAssertion) -> None:
class State(TypedDict):
my_key: Annotated[str, operator.add]
market: str
def logic(state: State):
pass
tool_two_graph = StateGraph(State)
tool_two_graph.add_node("tool_two_slow", logic)
tool_two_graph.add_node("tool_two_fast", logic)
tool_two_graph.set_conditional_entry_point(
lambda s: "tool_two_slow" if s["market"] == "DE" else "tool_two_fast",
then=END,
)
tool_two = tool_two_graph.compile()
graph = StateGraph(State)
graph.add_node("tool_one", logic)
graph.add_node("tool_two", tool_two)
graph.add_node("tool_three", logic)
graph.set_conditional_entry_point(lambda s: "tool_one", then=END)
app = graph.compile()
assert app.get_graph(xray=True).to_json() == snapshot
assert app.get_graph(xray=True).draw_mermaid() == snapshot
def test_nested_graph(snapshot: SnapshotAssertion) -> None:
def never_called_fn(state: Any):
assert 0, "This function should never be called"
never_called = RunnableLambda(never_called_fn)
class InnerState(TypedDict):
my_key: str
my_other_key: str
def up(state: InnerState):
return {"my_key": state["my_key"] + " there", "my_other_key": state["my_key"]}
inner = StateGraph(InnerState)
inner.add_node("up", up)
inner.set_entry_point("up")
inner.set_finish_point("up")
class State(TypedDict):
my_key: str
never_called: Any
def side(state: State):
return {"my_key": state["my_key"] + " and back again"}
graph = StateGraph(State)
graph.add_node("inner", inner.compile())
graph.add_node("side", side)
graph.set_entry_point("inner")
graph.add_edge("inner", "side")
graph.set_finish_point("side")
app = graph.compile()
assert app.get_graph().draw_mermaid(with_styles=False) == snapshot
assert app.get_graph(xray=True).draw_mermaid() == snapshot
assert app.invoke(
{"my_key": "my value", "never_called": never_called}, debug=True
) == {
"my_key": "my value there and back again",
"never_called": never_called,
}
assert [*app.stream({"my_key": "my value", "never_called": never_called})] == [
{"inner": {"my_key": "my value there"}},
{"side": {"my_key": "my value there and back again"}},
]
assert [
*app.stream(
{"my_key": "my value", "never_called": never_called}, stream_mode="values"
)
] == [
{
"my_key": "my value",
"never_called": never_called,
},
{
"my_key": "my value there",
"never_called": never_called,
},
{
"my_key": "my value there and back again",
"never_called": never_called,
},
]
chain = app | RunnablePassthrough()
assert chain.invoke({"my_key": "my value", "never_called": never_called}) == {
"my_key": "my value there and back again",
"never_called": never_called,
}
assert [*chain.stream({"my_key": "my value", "never_called": never_called})] == [
{"inner": {"my_key": "my value there"}},
{"side": {"my_key": "my value there and back again"}},
]
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC)
def test_stream_subgraphs_during_execution(
request: pytest.FixtureRequest, checkpointer_name: str
) -> None:
checkpointer = request.getfixturevalue("checkpointer_" + checkpointer_name)
class InnerState(TypedDict):
my_key: Annotated[str, operator.add]
my_other_key: str
def inner_1(state: InnerState):
return {"my_key": "got here", "my_other_key": state["my_key"]}
def inner_2(state: InnerState):
time.sleep(0.5)
return {
"my_key": " and there",
"my_other_key": state["my_key"],
}
inner = StateGraph(InnerState)
inner.add_node("inner_1", inner_1)
inner.add_node("inner_2", inner_2)
inner.add_edge("inner_1", "inner_2")
inner.set_entry_point("inner_1")
inner.set_finish_point("inner_2")
class State(TypedDict):
my_key: Annotated[str, operator.add]
def outer_1(state: State):
time.sleep(0.2)
return {"my_key": " and parallel"}
def outer_2(state: State):
return {"my_key": " and back again"}
graph = StateGraph(State)
graph.add_node("inner", inner.compile())
graph.add_node("outer_1", outer_1)
graph.add_node("outer_2", outer_2)
graph.add_edge(START, "inner")
graph.add_edge(START, "outer_1")
graph.add_edge(["inner", "outer_1"], "outer_2")
graph.add_edge("outer_2", END)
app = graph.compile(checkpointer=checkpointer)
start = time.perf_counter()
chunks: list[tuple[float, Any]] = []
config = {"configurable": {"thread_id": "2"}}
for c in app.stream({"my_key": ""}, config, subgraphs=True):
chunks.append((round(time.perf_counter() - start, 1), c))
for idx in range(len(chunks)):
elapsed, c = chunks[idx]
chunks[idx] = (round(elapsed - chunks[0][0], 1), c)
assert chunks == [
# arrives before "inner" finishes
(
FloatBetween(0.0, 0.1),
(
(AnyStr("inner:"),),
{"inner_1": {"my_key": "got here", "my_other_key": ""}},
),
),
(FloatBetween(0.2, 0.3), ((), {"outer_1": {"my_key": " and parallel"}})),
(
FloatBetween(0.5, 0.8),
(
(AnyStr("inner:"),),
{"inner_2": {"my_key": " and there", "my_other_key": "got here"}},
),
),
(FloatBetween(0.5, 0.8), ((), {"inner": {"my_key": "got here and there"}})),
(FloatBetween(0.5, 0.8), ((), {"outer_2": {"my_key": " and back again"}})),
]
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC)
def test_stream_buffering_single_node(
request: pytest.FixtureRequest, checkpointer_name: str
) -> None:
checkpointer = request.getfixturevalue("checkpointer_" + checkpointer_name)
class State(TypedDict):
my_key: Annotated[str, operator.add]
def node(state: State, writer: StreamWriter):
writer("Before sleep")
time.sleep(0.2)
writer("After sleep")
return {"my_key": "got here"}
builder = StateGraph(State)
builder.add_node("node", node)
builder.add_edge(START, "node")
builder.add_edge("node", END)
graph = builder.compile(checkpointer=checkpointer)
start = time.perf_counter()
chunks: list[tuple[float, Any]] = []
config = {"configurable": {"thread_id": "2"}}
for c in graph.stream({"my_key": ""}, config, stream_mode="custom"):
chunks.append((round(time.perf_counter() - start, 1), c))
assert chunks == [
(FloatBetween(0.0, 0.1), "Before sleep"),
(FloatBetween(0.2, 0.3), "After sleep"),
]
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC)
def test_nested_graph_interrupts_parallel(
request: pytest.FixtureRequest, checkpointer_name: str
) -> None:
checkpointer = request.getfixturevalue("checkpointer_" + checkpointer_name)
class InnerState(TypedDict):
my_key: Annotated[str, operator.add]
my_other_key: str
def inner_1(state: InnerState):
time.sleep(0.1)
return {"my_key": "got here", "my_other_key": state["my_key"]}
def inner_2(state: InnerState):
return {
"my_key": " and there",
"my_other_key": state["my_key"],
}
inner = StateGraph(InnerState)
inner.add_node("inner_1", inner_1)
inner.add_node("inner_2", inner_2)
inner.add_edge("inner_1", "inner_2")
inner.set_entry_point("inner_1")
inner.set_finish_point("inner_2")
class State(TypedDict):
my_key: Annotated[str, operator.add]
def outer_1(state: State):
return {"my_key": " and parallel"}
def outer_2(state: State):
return {"my_key": " and back again"}
graph = StateGraph(State)
graph.add_node("inner", inner.compile(interrupt_before=["inner_2"]))
graph.add_node("outer_1", outer_1)
graph.add_node("outer_2", outer_2)
graph.add_edge(START, "inner")
graph.add_edge(START, "outer_1")
graph.add_edge(["inner", "outer_1"], "outer_2")
graph.set_finish_point("outer_2")
app = graph.compile(checkpointer=checkpointer)
# test invoke w/ nested interrupt
config = {"configurable": {"thread_id": "1"}}
assert app.invoke({"my_key": ""}, config, debug=True) == {
"my_key": " and parallel",
}
assert app.invoke(None, config, debug=True) == {
"my_key": "got here and there and parallel and back again",
}
# below combo of assertions is asserting two things
# - outer_1 finishes before inner interrupts (because we see its output in stream, which only happens after node finishes)
# - the writes of outer are persisted in 1st call and used in 2nd call, ie outer isn't called again (because we dont see outer_1 output again in 2nd stream)
# test stream updates w/ nested interrupt
config = {"configurable": {"thread_id": "2"}}
assert [*app.stream({"my_key": ""}, config, subgraphs=True)] == [
# we got to parallel node first
((), {"outer_1": {"my_key": " and parallel"}}),
((AnyStr("inner:"),), {"inner_1": {"my_key": "got here", "my_other_key": ""}}),
((), {"__interrupt__": ()}),
]
assert [*app.stream(None, config)] == [
{"outer_1": {"my_key": " and parallel"}, "__metadata__": {"cached": True}},
{"inner": {"my_key": "got here and there"}},
{"outer_2": {"my_key": " and back again"}},
]
# test stream values w/ nested interrupt
config = {"configurable": {"thread_id": "3"}}
assert [*app.stream({"my_key": ""}, config, stream_mode="values")] == [
{"my_key": ""},
{"my_key": " and parallel"},
]
assert [*app.stream(None, config, stream_mode="values")] == [
{"my_key": ""},
{"my_key": "got here and there and parallel"},
{"my_key": "got here and there and parallel and back again"},
]
# test interrupts BEFORE the parallel node
app = graph.compile(checkpointer=checkpointer, interrupt_before=["outer_1"])
config = {"configurable": {"thread_id": "4"}}
assert [*app.stream({"my_key": ""}, config, stream_mode="values")] == [
{"my_key": ""}
]
# while we're waiting for the node w/ interrupt inside to finish
assert [*app.stream(None, config, stream_mode="values")] == [
{"my_key": ""},
{"my_key": " and parallel"},
]
assert [*app.stream(None, config, stream_mode="values")] == [
{"my_key": ""},
{"my_key": "got here and there and parallel"},
{"my_key": "got here and there and parallel and back again"},
]
# test interrupts AFTER the parallel node
app = graph.compile(checkpointer=checkpointer, interrupt_after=["outer_1"])
config = {"configurable": {"thread_id": "5"}}
assert [*app.stream({"my_key": ""}, config, stream_mode="values")] == [
{"my_key": ""},
{"my_key": " and parallel"},
]
assert [*app.stream(None, config, stream_mode="values")] == [
{"my_key": ""},
{"my_key": "got here and there and parallel"},
]
assert [*app.stream(None, config, stream_mode="values")] == [
{"my_key": "got here and there and parallel"},
{"my_key": "got here and there and parallel and back again"},
]
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC)
def test_doubly_nested_graph_interrupts(
request: pytest.FixtureRequest, checkpointer_name: str
) -> None:
checkpointer = request.getfixturevalue("checkpointer_" + checkpointer_name)
class State(TypedDict):
my_key: str
class ChildState(TypedDict):
my_key: str
class GrandChildState(TypedDict):
my_key: str
def grandchild_1(state: ChildState):
return {"my_key": state["my_key"] + " here"}
def grandchild_2(state: ChildState):
return {
"my_key": state["my_key"] + " and there",
}
grandchild = StateGraph(GrandChildState)
grandchild.add_node("grandchild_1", grandchild_1)
grandchild.add_node("grandchild_2", grandchild_2)
grandchild.add_edge("grandchild_1", "grandchild_2")
grandchild.set_entry_point("grandchild_1")
grandchild.set_finish_point("grandchild_2")
child = StateGraph(ChildState)
child.add_node(
"child_1",
grandchild.compile(interrupt_before=["grandchild_2"]),
)
child.set_entry_point("child_1")
child.set_finish_point("child_1")
def parent_1(state: State):
return {"my_key": "hi " + state["my_key"]}
def parent_2(state: State):
return {"my_key": state["my_key"] + " and back again"}
graph = StateGraph(State)
graph.add_node("parent_1", parent_1)
graph.add_node("child", child.compile())
graph.add_node("parent_2", parent_2)
graph.set_entry_point("parent_1")
graph.add_edge("parent_1", "child")
graph.add_edge("child", "parent_2")
graph.set_finish_point("parent_2")
app = graph.compile(checkpointer=checkpointer)
# test invoke w/ nested interrupt
config = {"configurable": {"thread_id": "1"}}
assert app.invoke({"my_key": "my value"}, config, debug=True) == {
"my_key": "hi my value",
}
assert app.invoke(None, config, debug=True) == {
"my_key": "hi my value here and there and back again",
}
# test stream updates w/ nested interrupt
nodes: list[str] = []
config = {
"configurable": {"thread_id": "2", CONFIG_KEY_NODE_FINISHED: nodes.append}
}
assert [*app.stream({"my_key": "my value"}, config)] == [
{"parent_1": {"my_key": "hi my value"}},
{"__interrupt__": ()},
]
assert nodes == ["parent_1", "grandchild_1"]
assert [*app.stream(None, config)] == [
{"child": {"my_key": "hi my value here and there"}},
{"parent_2": {"my_key": "hi my value here and there and back again"}},
]
assert nodes == [
"parent_1",
"grandchild_1",
"grandchild_2",
"child_1",
"child",
"parent_2",
]
# test stream values w/ nested interrupt
config = {"configurable": {"thread_id": "3"}}
assert [*app.stream({"my_key": "my value"}, config, stream_mode="values")] == [
{"my_key": "my value"},
{"my_key": "hi my value"},
]
assert [*app.stream(None, config, stream_mode="values")] == [
{"my_key": "hi my value"},
{"my_key": "hi my value here and there"},
{"my_key": "hi my value here and there and back again"},
]
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC)
def test_nested_graph_state(
request: pytest.FixtureRequest, checkpointer_name: str
) -> None:
checkpointer = request.getfixturevalue("checkpointer_" + checkpointer_name)
class InnerState(TypedDict):
my_key: str
my_other_key: str
def inner_1(state: InnerState):
return {
"my_key": state["my_key"] + " here",
"my_other_key": state["my_key"],
}
def inner_2(state: InnerState):
return {
"my_key": state["my_key"] + " and there",
"my_other_key": state["my_key"],
}
inner = StateGraph(InnerState)
inner.add_node("inner_1", inner_1)
inner.add_node("inner_2", inner_2)
inner.add_edge("inner_1", "inner_2")
inner.set_entry_point("inner_1")
inner.set_finish_point("inner_2")
class State(TypedDict):
my_key: str
other_parent_key: str
def outer_1(state: State):
return {"my_key": "hi " + state["my_key"]}
def outer_2(state: State):
return {"my_key": state["my_key"] + " and back again"}
graph = StateGraph(State)
graph.add_node("outer_1", outer_1)
graph.add_node(
"inner",
inner.compile(interrupt_before=["inner_2"]),
)
graph.add_node("outer_2", outer_2)
graph.set_entry_point("outer_1")
graph.add_edge("outer_1", "inner")
graph.add_edge("inner", "outer_2")
graph.set_finish_point("outer_2")
app = graph.compile(checkpointer=checkpointer)
config = {"configurable": {"thread_id": "1"}}
app.invoke({"my_key": "my value"}, config, debug=True)
# test state w/ nested subgraph state (right after interrupt)
# first get_state without subgraph state
assert app.get_state(config) == StateSnapshot(
values={"my_key": "hi my value"},
tasks=(
PregelTask(
AnyStr(),
"inner",
(PULL, "inner"),
state={"configurable": {"thread_id": "1", "checkpoint_ns": AnyStr()}},
),
),
next=("inner",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "loop",
"writes": {"outer_1": {"my_key": "hi my value"}},
"step": 1,
"thread_id": "1",
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
)
# now, get_state with subgraphs state
assert app.get_state(config, subgraphs=True) == StateSnapshot(
values={"my_key": "hi my value"},
tasks=(
PregelTask(
AnyStr(),
"inner",
(PULL, "inner"),
state=StateSnapshot(
values={
"my_key": "hi my value here",
"my_other_key": "hi my value",
},
tasks=(
PregelTask(
AnyStr(),
"inner_2",
(PULL, "inner_2"),
),
),
next=("inner_2",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr("inner:"),
"checkpoint_id": AnyStr(),
"checkpoint_map": AnyDict(
{"": AnyStr(), AnyStr("child:"): AnyStr()}
),
}
},
metadata={
"parents": {
"": AnyStr(),
},
"source": "loop",
"writes": {
"inner_1": {
"my_key": "hi my value here",
"my_other_key": "hi my value",
}
},
"step": 1,
"thread_id": "1",
"checkpoint_ns": AnyStr("inner:"),
"langgraph_node": "inner",
"langgraph_path": [PULL, "inner"],
"langgraph_step": 2,
"langgraph_triggers": ["outer_1"],
"langgraph_checkpoint_ns": AnyStr("inner:"),
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr("inner:"),
"checkpoint_id": AnyStr(),
"checkpoint_map": AnyDict(
{"": AnyStr(), AnyStr("child:"): AnyStr()}
),
}
},
),
),
),
next=("inner",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "loop",
"writes": {"outer_1": {"my_key": "hi my value"}},
"step": 1,
"thread_id": "1",
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
)
# get_state_history returns outer graph checkpoints
history = list(app.get_state_history(config))
assert history == [
StateSnapshot(
values={"my_key": "hi my value"},
tasks=(
PregelTask(
AnyStr(),
"inner",
(PULL, "inner"),
state={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr("inner:"),
}
},
),
),
next=("inner",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "loop",
"writes": {"outer_1": {"my_key": "hi my value"}},
"step": 1,
"thread_id": "1",
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
),
StateSnapshot(
values={"my_key": "my value"},
tasks=(
PregelTask(
AnyStr(),
"outer_1",
(PULL, "outer_1"),
result={"my_key": "hi my value"},
),
),
next=("outer_1",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "loop",
"writes": None,
"step": 0,
"thread_id": "1",
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
),
StateSnapshot(
values={},
tasks=(
PregelTask(
AnyStr(),
"__start__",
(PULL, "__start__"),
result={"my_key": "my value"},
),
),
next=("__start__",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "input",
"writes": {"__start__": {"my_key": "my value"}},
"step": -1,
"thread_id": "1",
},
created_at=AnyStr(),
parent_config=None,
),
]
# get_state_history for a subgraph returns its checkpoints
child_history = [*app.get_state_history(history[0].tasks[0].state)]
assert child_history == [
StateSnapshot(
values={"my_key": "hi my value here", "my_other_key": "hi my value"},
next=("inner_2",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr("inner:"),
"checkpoint_id": AnyStr(),
"checkpoint_map": AnyDict(
{"": AnyStr(), AnyStr("child:"): AnyStr()}
),
}
},
metadata={
"source": "loop",
"writes": {
"inner_1": {
"my_key": "hi my value here",
"my_other_key": "hi my value",
}
},
"step": 1,
"parents": {"": AnyStr()},
"thread_id": "1",
"checkpoint_ns": AnyStr("inner:"),
"langgraph_node": "inner",
"langgraph_path": [PULL, "inner"],
"langgraph_step": 2,
"langgraph_triggers": ["outer_1"],
"langgraph_checkpoint_ns": AnyStr("inner:"),
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr("inner:"),
"checkpoint_id": AnyStr(),
"checkpoint_map": AnyDict(
{"": AnyStr(), AnyStr("child:"): AnyStr()}
),
}
},
tasks=(PregelTask(AnyStr(), "inner_2", (PULL, "inner_2")),),
),
StateSnapshot(
values={"my_key": "hi my value"},
next=("inner_1",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr("inner:"),
"checkpoint_id": AnyStr(),
"checkpoint_map": AnyDict(
{"": AnyStr(), AnyStr("child:"): AnyStr()}
),
}
},
metadata={
"source": "loop",
"writes": None,
"step": 0,
"parents": {"": AnyStr()},
"thread_id": "1",
"checkpoint_ns": AnyStr("inner:"),
"langgraph_node": "inner",
"langgraph_path": [PULL, "inner"],
"langgraph_step": 2,
"langgraph_triggers": ["outer_1"],
"langgraph_checkpoint_ns": AnyStr("inner:"),
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr("inner:"),
"checkpoint_id": AnyStr(),
"checkpoint_map": AnyDict(
{"": AnyStr(), AnyStr("child:"): AnyStr()}
),
}
},
tasks=(
PregelTask(
AnyStr(),
"inner_1",
(PULL, "inner_1"),
result={
"my_key": "hi my value here",
"my_other_key": "hi my value",
},
),
),
),
StateSnapshot(
values={},
next=("__start__",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr("inner:"),
"checkpoint_id": AnyStr(),
"checkpoint_map": AnyDict(
{"": AnyStr(), AnyStr("child:"): AnyStr()}
),
}
},
metadata={
"source": "input",
"writes": {"__start__": {"my_key": "hi my value"}},
"step": -1,
"parents": {"": AnyStr()},
"thread_id": "1",
"checkpoint_ns": AnyStr("inner:"),
"langgraph_node": "inner",
"langgraph_path": [PULL, "inner"],
"langgraph_step": 2,
"langgraph_triggers": ["outer_1"],
"langgraph_checkpoint_ns": AnyStr("inner:"),
},
created_at=AnyStr(),
parent_config=None,
tasks=(
PregelTask(
AnyStr(),
"__start__",
(PULL, "__start__"),
result={"my_key": "hi my value"},
),
),
),
]
# resume
app.invoke(None, config, debug=True)
# test state w/ nested subgraph state (after resuming from interrupt)
assert app.get_state(config) == StateSnapshot(
values={"my_key": "hi my value here and there and back again"},
tasks=(),
next=(),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "loop",
"writes": {
"outer_2": {"my_key": "hi my value here and there and back again"}
},
"step": 3,
"thread_id": "1",
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
)
# test full history at the end
actual_history = list(app.get_state_history(config))
expected_history = [
StateSnapshot(
values={"my_key": "hi my value here and there and back again"},
tasks=(),
next=(),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "loop",
"writes": {
"outer_2": {"my_key": "hi my value here and there and back again"}
},
"step": 3,
"thread_id": "1",
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
),
StateSnapshot(
values={"my_key": "hi my value here and there"},
tasks=(
PregelTask(
AnyStr(),
"outer_2",
(PULL, "outer_2"),
result={"my_key": "hi my value here and there and back again"},
),
),
next=("outer_2",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "loop",
"writes": {"inner": {"my_key": "hi my value here and there"}},
"step": 2,
"thread_id": "1",
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
),
StateSnapshot(
values={"my_key": "hi my value"},
tasks=(
PregelTask(
AnyStr(),
"inner",
(PULL, "inner"),
state={
"configurable": {"thread_id": "1", "checkpoint_ns": AnyStr()}
},
result={"my_key": "hi my value here and there"},
),
),
next=("inner",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "loop",
"writes": {"outer_1": {"my_key": "hi my value"}},
"step": 1,
"thread_id": "1",
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
),
StateSnapshot(
values={"my_key": "my value"},
tasks=(
PregelTask(
AnyStr(),
"outer_1",
(PULL, "outer_1"),
result={"my_key": "hi my value"},
),
),
next=("outer_1",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "loop",
"writes": None,
"step": 0,
"thread_id": "1",
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
),
StateSnapshot(
values={},
tasks=(
PregelTask(
AnyStr(),
"__start__",
(PULL, "__start__"),
result={"my_key": "my value"},
),
),
next=("__start__",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "input",
"writes": {"__start__": {"my_key": "my value"}},
"step": -1,
"thread_id": "1",
},
created_at=AnyStr(),
parent_config=None,
),
]
assert actual_history == expected_history
# test looking up parent state by checkpoint ID
for actual_snapshot, expected_snapshot in zip(actual_history, expected_history):
assert app.get_state(actual_snapshot.config) == expected_snapshot
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC)
def test_doubly_nested_graph_state(
request: pytest.FixtureRequest, checkpointer_name: str
) -> None:
checkpointer = request.getfixturevalue("checkpointer_" + checkpointer_name)
class State(TypedDict):
my_key: str
class ChildState(TypedDict):
my_key: str
class GrandChildState(TypedDict):
my_key: str
def grandchild_1(state: ChildState):
return {"my_key": state["my_key"] + " here"}
def grandchild_2(state: ChildState):
return {
"my_key": state["my_key"] + " and there",
}
grandchild = StateGraph(GrandChildState)
grandchild.add_node("grandchild_1", grandchild_1)
grandchild.add_node("grandchild_2", grandchild_2)
grandchild.add_edge("grandchild_1", "grandchild_2")
grandchild.set_entry_point("grandchild_1")
grandchild.set_finish_point("grandchild_2")
child = StateGraph(ChildState)
child.add_node(
"child_1",
grandchild.compile(interrupt_before=["grandchild_2"]),
)
child.set_entry_point("child_1")
child.set_finish_point("child_1")
def parent_1(state: State):
return {"my_key": "hi " + state["my_key"]}
def parent_2(state: State):
return {"my_key": state["my_key"] + " and back again"}
graph = StateGraph(State)
graph.add_node("parent_1", parent_1)
graph.add_node("child", child.compile())
graph.add_node("parent_2", parent_2)
graph.set_entry_point("parent_1")
graph.add_edge("parent_1", "child")
graph.add_edge("child", "parent_2")
graph.set_finish_point("parent_2")
app = graph.compile(checkpointer=checkpointer)
# test invoke w/ nested interrupt
config = {"configurable": {"thread_id": "1"}}
assert [c for c in app.stream({"my_key": "my value"}, config, subgraphs=True)] == [
((), {"parent_1": {"my_key": "hi my value"}}),
(
(AnyStr("child:"), AnyStr("child_1:")),
{"grandchild_1": {"my_key": "hi my value here"}},
),
((), {"__interrupt__": ()}),
]
# get state without subgraphs
outer_state = app.get_state(config)
assert outer_state == StateSnapshot(
values={"my_key": "hi my value"},
tasks=(
PregelTask(
AnyStr(),
"child",
(PULL, "child"),
state={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr("child"),
}
},
),
),
next=("child",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "loop",
"writes": {"parent_1": {"my_key": "hi my value"}},
"step": 1,
"thread_id": "1",
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
)
child_state = app.get_state(outer_state.tasks[0].state)
assert (
child_state.tasks[0]
== StateSnapshot(
values={"my_key": "hi my value"},
tasks=(
PregelTask(
AnyStr(),
"child_1",
(PULL, "child_1"),
state={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr(),
}
},
),
),
next=("child_1",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr("child:"),
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {"": AnyStr()},
"source": "loop",
"writes": None,
"step": 0,
"thread_id": "1",
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr("child:"),
"checkpoint_id": AnyStr(),
}
},
).tasks[0]
)
grandchild_state = app.get_state(child_state.tasks[0].state)
assert grandchild_state == StateSnapshot(
values={"my_key": "hi my value here"},
tasks=(
PregelTask(
AnyStr(),
"grandchild_2",
(PULL, "grandchild_2"),
),
),
next=("grandchild_2",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr(),
"checkpoint_id": AnyStr(),
"checkpoint_map": AnyDict(
{
"": AnyStr(),
AnyStr("child:"): AnyStr(),
AnyStr(re.compile(r"child:.+|child1:")): AnyStr(),
}
),
}
},
metadata={
"parents": AnyDict(
{
"": AnyStr(),
AnyStr("child:"): AnyStr(),
}
),
"source": "loop",
"writes": {"grandchild_1": {"my_key": "hi my value here"}},
"step": 1,
"thread_id": "1",
"checkpoint_ns": AnyStr("child:"),
"langgraph_checkpoint_ns": AnyStr("child:"),
"langgraph_node": "child_1",
"langgraph_path": [PULL, AnyStr("child_1")],
"langgraph_step": 1,
"langgraph_triggers": [AnyStr("start:child_1")],
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr(),
"checkpoint_id": AnyStr(),
"checkpoint_map": AnyDict(
{
"": AnyStr(),
AnyStr("child:"): AnyStr(),
AnyStr(re.compile(r"child:.+|child1:")): AnyStr(),
}
),
}
},
)
# get state with subgraphs
assert app.get_state(config, subgraphs=True) == StateSnapshot(
values={"my_key": "hi my value"},
tasks=(
PregelTask(
AnyStr(),
"child",
(PULL, "child"),
state=StateSnapshot(
values={"my_key": "hi my value"},
tasks=(
PregelTask(
AnyStr(),
"child_1",
(PULL, "child_1"),
state=StateSnapshot(
values={"my_key": "hi my value here"},
tasks=(
PregelTask(
AnyStr(),
"grandchild_2",
(PULL, "grandchild_2"),
),
),
next=("grandchild_2",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr(),
"checkpoint_id": AnyStr(),
"checkpoint_map": AnyDict(
{
"": AnyStr(),
AnyStr("child:"): AnyStr(),
AnyStr(
re.compile(r"child:.+|child1:")
): AnyStr(),
}
),
}
},
metadata={
"parents": AnyDict(
{
"": AnyStr(),
AnyStr("child:"): AnyStr(),
}
),
"source": "loop",
"writes": {
"grandchild_1": {"my_key": "hi my value here"}
},
"step": 1,
"thread_id": "1",
"checkpoint_ns": AnyStr("child:"),
"langgraph_checkpoint_ns": AnyStr("child:"),
"langgraph_node": "child_1",
"langgraph_path": [
PULL,
AnyStr("child_1"),
],
"langgraph_step": 1,
"langgraph_triggers": [AnyStr("start:child_1")],
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr(),
"checkpoint_id": AnyStr(),
"checkpoint_map": AnyDict(
{
"": AnyStr(),
AnyStr("child:"): AnyStr(),
AnyStr(
re.compile(r"child:.+|child1:")
): AnyStr(),
}
),
}
},
),
),
),
next=("child_1",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr("child:"),
"checkpoint_id": AnyStr(),
"checkpoint_map": AnyDict(
{"": AnyStr(), AnyStr("child:"): AnyStr()}
),
}
},
metadata={
"parents": {"": AnyStr()},
"source": "loop",
"writes": None,
"step": 0,
"thread_id": "1",
"checkpoint_ns": AnyStr("child:"),
"langgraph_node": "child",
"langgraph_path": [PULL, AnyStr("child")],
"langgraph_step": 2,
"langgraph_triggers": [AnyStr("parent_1")],
"langgraph_checkpoint_ns": AnyStr("child:"),
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr("child:"),
"checkpoint_id": AnyStr(),
"checkpoint_map": AnyDict(
{"": AnyStr(), AnyStr("child:"): AnyStr()}
),
}
},
),
),
),
next=("child",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "loop",
"writes": {"parent_1": {"my_key": "hi my value"}},
"step": 1,
"thread_id": "1",
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
)
# # resume
assert [c for c in app.stream(None, config, subgraphs=True)] == [
(
(AnyStr("child:"), AnyStr("child_1:")),
{"grandchild_2": {"my_key": "hi my value here and there"}},
),
((AnyStr("child:"),), {"child_1": {"my_key": "hi my value here and there"}}),
((), {"child": {"my_key": "hi my value here and there"}}),
((), {"parent_2": {"my_key": "hi my value here and there and back again"}}),
]
# get state with and without subgraphs
assert (
app.get_state(config)
== app.get_state(config, subgraphs=True)
== StateSnapshot(
values={"my_key": "hi my value here and there and back again"},
tasks=(),
next=(),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "loop",
"writes": {
"parent_2": {"my_key": "hi my value here and there and back again"}
},
"step": 3,
"thread_id": "1",
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
)
)
# get outer graph history
outer_history = list(app.get_state_history(config))
assert outer_history == [
StateSnapshot(
values={"my_key": "hi my value here and there and back again"},
tasks=(),
next=(),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "loop",
"writes": {
"parent_2": {"my_key": "hi my value here and there and back again"}
},
"step": 3,
"thread_id": "1",
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
),
StateSnapshot(
values={"my_key": "hi my value here and there"},
next=("parent_2",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"source": "loop",
"writes": {"child": {"my_key": "hi my value here and there"}},
"step": 2,
"parents": {},
"thread_id": "1",
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
tasks=(
PregelTask(
id=AnyStr(),
name="parent_2",
path=(PULL, "parent_2"),
result={"my_key": "hi my value here and there and back again"},
),
),
),
StateSnapshot(
values={"my_key": "hi my value"},
tasks=(
PregelTask(
AnyStr(),
"child",
(PULL, "child"),
state={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr("child"),
}
},
result={"my_key": "hi my value here and there"},
),
),
next=("child",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "loop",
"writes": {"parent_1": {"my_key": "hi my value"}},
"step": 1,
"thread_id": "1",
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
),
StateSnapshot(
values={"my_key": "my value"},
next=("parent_1",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"source": "loop",
"writes": None,
"step": 0,
"parents": {},
"thread_id": "1",
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
tasks=(
PregelTask(
id=AnyStr(),
name="parent_1",
path=(PULL, "parent_1"),
result={"my_key": "hi my value"},
),
),
),
StateSnapshot(
values={},
next=("__start__",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"source": "input",
"writes": {"__start__": {"my_key": "my value"}},
"step": -1,
"parents": {},
"thread_id": "1",
},
created_at=AnyStr(),
parent_config=None,
tasks=(
PregelTask(
id=AnyStr(),
name="__start__",
path=(PULL, "__start__"),
result={"my_key": "my value"},
),
),
),
]
# get child graph history
child_history = list(app.get_state_history(outer_history[2].tasks[0].state))
assert child_history == [
StateSnapshot(
values={"my_key": "hi my value here and there"},
next=(),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr("child:"),
"checkpoint_id": AnyStr(),
"checkpoint_map": AnyDict(
{"": AnyStr(), AnyStr("child:"): AnyStr()}
),
}
},
metadata={
"source": "loop",
"writes": {"child_1": {"my_key": "hi my value here and there"}},
"step": 1,
"parents": {"": AnyStr()},
"thread_id": "1",
"checkpoint_ns": AnyStr("child:"),
"langgraph_node": "child",
"langgraph_path": [PULL, AnyStr("child")],
"langgraph_step": 2,
"langgraph_triggers": [AnyStr("parent_1")],
"langgraph_checkpoint_ns": AnyStr("child:"),
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr("child:"),
"checkpoint_id": AnyStr(),
"checkpoint_map": AnyDict(
{"": AnyStr(), AnyStr("child:"): AnyStr()}
),
}
},
tasks=(),
),
StateSnapshot(
values={"my_key": "hi my value"},
next=("child_1",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr("child:"),
"checkpoint_id": AnyStr(),
"checkpoint_map": AnyDict(
{"": AnyStr(), AnyStr("child:"): AnyStr()}
),
}
},
metadata={
"source": "loop",
"writes": None,
"step": 0,
"parents": {"": AnyStr()},
"thread_id": "1",
"checkpoint_ns": AnyStr("child:"),
"langgraph_node": "child",
"langgraph_path": [PULL, AnyStr("child")],
"langgraph_step": 2,
"langgraph_triggers": [AnyStr("parent_1")],
"langgraph_checkpoint_ns": AnyStr("child:"),
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr("child:"),
"checkpoint_id": AnyStr(),
"checkpoint_map": AnyDict(
{"": AnyStr(), AnyStr("child:"): AnyStr()}
),
}
},
tasks=(
PregelTask(
id=AnyStr(),
name="child_1",
path=(PULL, "child_1"),
state={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr("child:"),
}
},
result={"my_key": "hi my value here and there"},
),
),
),
StateSnapshot(
values={},
next=("__start__",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr("child:"),
"checkpoint_id": AnyStr(),
"checkpoint_map": AnyDict(
{"": AnyStr(), AnyStr("child:"): AnyStr()}
),
}
},
metadata={
"source": "input",
"writes": {"__start__": {"my_key": "hi my value"}},
"step": -1,
"parents": {"": AnyStr()},
"thread_id": "1",
"checkpoint_ns": AnyStr("child:"),
"langgraph_node": "child",
"langgraph_path": [PULL, AnyStr("child")],
"langgraph_step": 2,
"langgraph_triggers": [AnyStr("parent_1")],
"langgraph_checkpoint_ns": AnyStr("child:"),
},
created_at=AnyStr(),
parent_config=None,
tasks=(
PregelTask(
id=AnyStr(),
name="__start__",
path=(PULL, "__start__"),
result={"my_key": "hi my value"},
),
),
),
]
# get grandchild graph history
grandchild_history = list(app.get_state_history(child_history[1].tasks[0].state))
assert grandchild_history == [
StateSnapshot(
values={"my_key": "hi my value here and there"},
next=(),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr(),
"checkpoint_id": AnyStr(),
"checkpoint_map": AnyDict(
{
"": AnyStr(),
AnyStr("child:"): AnyStr(),
AnyStr(re.compile(r"child:.+|child1:")): AnyStr(),
}
),
}
},
metadata={
"source": "loop",
"writes": {"grandchild_2": {"my_key": "hi my value here and there"}},
"step": 2,
"parents": AnyDict(
{
"": AnyStr(),
AnyStr("child:"): AnyStr(),
}
),
"thread_id": "1",
"checkpoint_ns": AnyStr("child:"),
"langgraph_checkpoint_ns": AnyStr("child:"),
"langgraph_node": "child_1",
"langgraph_path": [
PULL,
AnyStr("child_1"),
],
"langgraph_step": 1,
"langgraph_triggers": [AnyStr("start:child_1")],
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr(),
"checkpoint_id": AnyStr(),
"checkpoint_map": AnyDict(
{
"": AnyStr(),
AnyStr("child:"): AnyStr(),
AnyStr(re.compile(r"child:.+|child1:")): AnyStr(),
}
),
}
},
tasks=(),
),
StateSnapshot(
values={"my_key": "hi my value here"},
next=("grandchild_2",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr(),
"checkpoint_id": AnyStr(),
"checkpoint_map": AnyDict(
{
"": AnyStr(),
AnyStr("child:"): AnyStr(),
AnyStr(re.compile(r"child:.+|child1:")): AnyStr(),
}
),
}
},
metadata={
"source": "loop",
"writes": {"grandchild_1": {"my_key": "hi my value here"}},
"step": 1,
"parents": AnyDict(
{
"": AnyStr(),
AnyStr("child:"): AnyStr(),
}
),
"thread_id": "1",
"checkpoint_ns": AnyStr("child:"),
"langgraph_checkpoint_ns": AnyStr("child:"),
"langgraph_node": "child_1",
"langgraph_path": [
PULL,
AnyStr("child_1"),
],
"langgraph_step": 1,
"langgraph_triggers": [AnyStr("start:child_1")],
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr(),
"checkpoint_id": AnyStr(),
"checkpoint_map": AnyDict(
{
"": AnyStr(),
AnyStr("child:"): AnyStr(),
AnyStr(re.compile(r"child:.+|child1:")): AnyStr(),
}
),
}
},
tasks=(
PregelTask(
id=AnyStr(),
name="grandchild_2",
path=(PULL, "grandchild_2"),
result={"my_key": "hi my value here and there"},
),
),
),
StateSnapshot(
values={"my_key": "hi my value"},
next=("grandchild_1",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr(),
"checkpoint_id": AnyStr(),
"checkpoint_map": AnyDict(
{
"": AnyStr(),
AnyStr("child:"): AnyStr(),
AnyStr(re.compile(r"child:.+|child1:")): AnyStr(),
}
),
}
},
metadata={
"source": "loop",
"writes": None,
"step": 0,
"parents": AnyDict(
{
"": AnyStr(),
AnyStr("child:"): AnyStr(),
}
),
"thread_id": "1",
"checkpoint_ns": AnyStr("child:"),
"langgraph_checkpoint_ns": AnyStr("child:"),
"langgraph_node": "child_1",
"langgraph_path": [
PULL,
AnyStr("child_1"),
],
"langgraph_step": 1,
"langgraph_triggers": [AnyStr("start:child_1")],
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr(),
"checkpoint_id": AnyStr(),
"checkpoint_map": AnyDict(
{
"": AnyStr(),
AnyStr("child:"): AnyStr(),
AnyStr(re.compile(r"child:.+|child1:")): AnyStr(),
}
),
}
},
tasks=(
PregelTask(
id=AnyStr(),
name="grandchild_1",
path=(PULL, "grandchild_1"),
result={"my_key": "hi my value here"},
),
),
),
StateSnapshot(
values={},
next=("__start__",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr(),
"checkpoint_id": AnyStr(),
"checkpoint_map": AnyDict(
{
"": AnyStr(),
AnyStr("child:"): AnyStr(),
AnyStr(re.compile(r"child:.+|child1:")): AnyStr(),
}
),
}
},
metadata={
"source": "input",
"writes": {"__start__": {"my_key": "hi my value"}},
"step": -1,
"parents": AnyDict(
{
"": AnyStr(),
AnyStr("child:"): AnyStr(),
}
),
"thread_id": "1",
"checkpoint_ns": AnyStr("child:"),
"langgraph_checkpoint_ns": AnyStr("child:"),
"langgraph_node": "child_1",
"langgraph_path": [
PULL,
AnyStr("child_1"),
],
"langgraph_step": 1,
"langgraph_triggers": [AnyStr("start:child_1")],
},
created_at=AnyStr(),
parent_config=None,
tasks=(
PregelTask(
id=AnyStr(),
name="__start__",
path=(PULL, "__start__"),
result={"my_key": "hi my value"},
),
),
),
]
# replay grandchild checkpoint
assert [
c for c in app.stream(None, grandchild_history[2].config, subgraphs=True)
] == [
(
(AnyStr("child:"), AnyStr("child_1:")),
{"grandchild_1": {"my_key": "hi my value here"}},
),
((), {"__interrupt__": ()}),
]
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC)
def test_send_to_nested_graphs(
request: pytest.FixtureRequest, checkpointer_name: str
) -> None:
checkpointer = request.getfixturevalue("checkpointer_" + checkpointer_name)
class OverallState(TypedDict):
subjects: list[str]
jokes: Annotated[list[str], operator.add]
def continue_to_jokes(state: OverallState):
return [Send("generate_joke", {"subject": s}) for s in state["subjects"]]
class JokeState(TypedDict):
subject: str
def edit(state: JokeState):
subject = state["subject"]
return {"subject": f"{subject} - hohoho"}
# subgraph
subgraph = StateGraph(JokeState, output=OverallState)
subgraph.add_node("edit", edit)
subgraph.add_node(
"generate", lambda state: {"jokes": [f"Joke about {state['subject']}"]}
)
subgraph.set_entry_point("edit")
subgraph.add_edge("edit", "generate")
subgraph.set_finish_point("generate")
# parent graph
builder = StateGraph(OverallState)
builder.add_node(
"generate_joke",
subgraph.compile(interrupt_before=["generate"]),
)
builder.add_conditional_edges(START, continue_to_jokes)
builder.add_edge("generate_joke", END)
graph = builder.compile(checkpointer=checkpointer)
config = {"configurable": {"thread_id": "1"}}
tracer = FakeTracer()
# invoke and pause at nested interrupt
assert graph.invoke(
{"subjects": ["cats", "dogs"]}, config={**config, "callbacks": [tracer]}
) == {
"subjects": ["cats", "dogs"],
"jokes": [],
}
assert len(tracer.runs) == 1, "Should produce exactly 1 root run"
# check state
outer_state = graph.get_state(config)
if not FF_SEND_V2:
# update state of dogs joke graph
graph.update_state(outer_state.tasks[1].state, {"subject": "turtles - hohoho"})
# continue past interrupt
assert sorted(
graph.stream(None, config=config),
key=lambda d: d["generate_joke"]["jokes"][0],
) == [
{"generate_joke": {"jokes": ["Joke about cats - hohoho"]}},
{"generate_joke": {"jokes": ["Joke about turtles - hohoho"]}},
]
return
assert outer_state == StateSnapshot(
values={"subjects": ["cats", "dogs"], "jokes": []},
tasks=(
PregelTask(
id=AnyStr(),
name="__start__",
path=("__pregel_pull", "__start__"),
error=None,
interrupts=(),
state=None,
result={"subjects": ["cats", "dogs"]},
),
PregelTask(
AnyStr(),
"generate_joke",
(PUSH, ("__pregel_pull", "__start__"), 1, AnyStr()),
state={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr("generate_joke:"),
}
},
),
PregelTask(
AnyStr(),
"generate_joke",
(PUSH, ("__pregel_pull", "__start__"), 2, AnyStr()),
state={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr("generate_joke:"),
}
},
),
),
next=("generate_joke", "generate_joke"),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "input",
"writes": {"__start__": {"subjects": ["cats", "dogs"]}},
"step": -1,
"thread_id": "1",
},
created_at=AnyStr(),
parent_config=None,
)
# check state of each of the inner tasks
assert graph.get_state(outer_state.tasks[1].state) == StateSnapshot(
values={"subject": "cats - hohoho", "jokes": []},
next=("generate",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr("generate_joke:"),
"checkpoint_id": AnyStr(),
"checkpoint_map": AnyDict(
{
"": AnyStr(),
AnyStr("generate_joke:"): AnyStr(),
}
),
}
},
metadata={
"step": 1,
"source": "loop",
"writes": {"edit": None},
"parents": {"": AnyStr()},
"thread_id": "1",
"checkpoint_ns": AnyStr("generate_joke:"),
"langgraph_checkpoint_ns": AnyStr("generate_joke:"),
"langgraph_node": "generate_joke",
"langgraph_path": [PUSH, ["__pregel_pull", "__start__"], 1, AnyStr()],
"langgraph_step": 0,
"langgraph_triggers": [PUSH],
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr("generate_joke:"),
"checkpoint_id": AnyStr(),
"checkpoint_map": AnyDict(
{
"": AnyStr(),
AnyStr("generate_joke:"): AnyStr(),
}
),
}
},
tasks=(PregelTask(id=AnyStr(""), name="generate", path=(PULL, "generate")),),
)
assert graph.get_state(outer_state.tasks[2].state) == StateSnapshot(
values={"subject": "dogs - hohoho", "jokes": []},
next=("generate",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr("generate_joke:"),
"checkpoint_id": AnyStr(),
"checkpoint_map": AnyDict(
{
"": AnyStr(),
AnyStr("generate_joke:"): AnyStr(),
}
),
}
},
metadata={
"step": 1,
"source": "loop",
"writes": {"edit": None},
"parents": {"": AnyStr()},
"thread_id": "1",
"checkpoint_ns": AnyStr("generate_joke:"),
"langgraph_checkpoint_ns": AnyStr("generate_joke:"),
"langgraph_node": "generate_joke",
"langgraph_path": [PUSH, ["__pregel_pull", "__start__"], 2, AnyStr()],
"langgraph_step": 0,
"langgraph_triggers": [PUSH],
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr("generate_joke:"),
"checkpoint_id": AnyStr(),
"checkpoint_map": AnyDict(
{
"": AnyStr(),
AnyStr("generate_joke:"): AnyStr(),
}
),
}
},
tasks=(PregelTask(id=AnyStr(""), name="generate", path=(PULL, "generate")),),
)
# update state of dogs joke graph
graph.update_state(
outer_state.tasks[2 if FF_SEND_V2 else 1].state, {"subject": "turtles - hohoho"}
)
# continue past interrupt
assert sorted(
graph.stream(None, config=config), key=lambda d: d["generate_joke"]["jokes"][0]
) == [
{"generate_joke": {"jokes": ["Joke about cats - hohoho"]}},
{"generate_joke": {"jokes": ["Joke about turtles - hohoho"]}},
]
actual_snapshot = graph.get_state(config)
expected_snapshot = StateSnapshot(
values={
"subjects": ["cats", "dogs"],
"jokes": ["Joke about cats - hohoho", "Joke about turtles - hohoho"],
},
tasks=(),
next=(),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "loop",
"writes": {
"generate_joke": [
{"jokes": ["Joke about cats - hohoho"]},
{"jokes": ["Joke about turtles - hohoho"]},
]
},
"step": 0,
"thread_id": "1",
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
)
assert actual_snapshot == expected_snapshot
# test full history
actual_history = list(graph.get_state_history(config))
# get subgraph node state for expected history
expected_history = [
StateSnapshot(
values={
"subjects": ["cats", "dogs"],
"jokes": ["Joke about cats - hohoho", "Joke about turtles - hohoho"],
},
tasks=(),
next=(),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "loop",
"writes": {
"generate_joke": [
{"jokes": ["Joke about cats - hohoho"]},
{"jokes": ["Joke about turtles - hohoho"]},
]
},
"step": 0,
"thread_id": "1",
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
),
StateSnapshot(
values={"jokes": []},
tasks=(
PregelTask(
id=AnyStr(),
name="__start__",
path=("__pregel_pull", "__start__"),
error=None,
interrupts=(),
state=None,
result={"subjects": ["cats", "dogs"]},
),
PregelTask(
AnyStr(),
"generate_joke",
(PUSH, ("__pregel_pull", "__start__"), 1, AnyStr()),
state={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr("generate_joke:"),
}
},
result={"jokes": ["Joke about cats - hohoho"]},
),
PregelTask(
AnyStr(),
"generate_joke",
(PUSH, ("__pregel_pull", "__start__"), 2, AnyStr()),
state={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr("generate_joke:"),
}
},
result={"jokes": ["Joke about turtles - hohoho"]},
),
),
next=("__start__", "generate_joke", "generate_joke"),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "input",
"writes": {"__start__": {"subjects": ["cats", "dogs"]}},
"step": -1,
"thread_id": "1",
},
created_at=AnyStr(),
parent_config=None,
),
]
assert actual_history == expected_history
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC)
def test_weather_subgraph(
request: pytest.FixtureRequest, checkpointer_name: str, snapshot: SnapshotAssertion
) -> None:
from langchain_core.language_models.fake_chat_models import (
FakeMessagesListChatModel,
)
from langchain_core.messages import AIMessage, ToolCall
from langchain_core.tools import tool
from langgraph.graph import MessagesState
checkpointer = request.getfixturevalue("checkpointer_" + checkpointer_name)
# setup subgraph
@tool
def get_weather(city: str):
"""Get the weather for a specific city"""
return f"I'ts sunny in {city}!"
weather_model = FakeMessagesListChatModel(
responses=[
AIMessage(
content="",
tool_calls=[
ToolCall(
id="tool_call123",
name="get_weather",
args={"city": "San Francisco"},
)
],
)
]
)
class SubGraphState(MessagesState):
city: str
def model_node(state: SubGraphState, writer: StreamWriter):
writer(" very")
result = weather_model.invoke(state["messages"])
return {"city": cast(AIMessage, result).tool_calls[0]["args"]["city"]}
def weather_node(state: SubGraphState, writer: StreamWriter):
writer(" good")
result = get_weather.invoke({"city": state["city"]})
return {"messages": [{"role": "assistant", "content": result}]}
subgraph = StateGraph(SubGraphState)
subgraph.add_node(model_node)
subgraph.add_node(weather_node)
subgraph.add_edge(START, "model_node")
subgraph.add_edge("model_node", "weather_node")
subgraph.add_edge("weather_node", END)
subgraph = subgraph.compile(interrupt_before=["weather_node"])
# setup main graph
class RouterState(MessagesState):
route: Literal["weather", "other"]
router_model = FakeMessagesListChatModel(
responses=[
AIMessage(
content="",
tool_calls=[
ToolCall(
id="tool_call123",
name="router",
args={"dest": "weather"},
)
],
)
]
)
def router_node(state: RouterState, writer: StreamWriter):
writer("I'm")
system_message = "Classify the incoming query as either about weather or not."
messages = [{"role": "system", "content": system_message}] + state["messages"]
route = router_model.invoke(messages)
return {"route": cast(AIMessage, route).tool_calls[0]["args"]["dest"]}
def normal_llm_node(state: RouterState):
return {"messages": [AIMessage("Hello!")]}
def route_after_prediction(state: RouterState):
if state["route"] == "weather":
return "weather_graph"
else:
return "normal_llm_node"
def weather_graph(state: RouterState):
return subgraph.invoke(state)
graph = StateGraph(RouterState)
graph.add_node(router_node)
graph.add_node(normal_llm_node)
graph.add_node("weather_graph", weather_graph)
graph.add_edge(START, "router_node")
graph.add_conditional_edges("router_node", route_after_prediction)
graph.add_edge("normal_llm_node", END)
graph.add_edge("weather_graph", END)
graph = graph.compile(checkpointer=checkpointer)
assert graph.get_graph(xray=1).draw_mermaid() == snapshot
config = {"configurable": {"thread_id": "1"}}
thread2 = {"configurable": {"thread_id": "2"}}
inputs = {"messages": [{"role": "user", "content": "what's the weather in sf"}]}
# run with custom output
assert [c for c in graph.stream(inputs, thread2, stream_mode="custom")] == [
"I'm",
" very",
]
assert [c for c in graph.stream(None, thread2, stream_mode="custom")] == [
" good",
]
# run until interrupt
assert [
c
for c in graph.stream(
inputs, config=config, stream_mode="updates", subgraphs=True
)
] == [
((), {"router_node": {"route": "weather"}}),
((AnyStr("weather_graph:"),), {"model_node": {"city": "San Francisco"}}),
((), {"__interrupt__": ()}),
]
# check current state
state = graph.get_state(config)
assert state == StateSnapshot(
values={
"messages": [_AnyIdHumanMessage(content="what's the weather in sf")],
"route": "weather",
},
next=("weather_graph",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"source": "loop",
"writes": {"router_node": {"route": "weather"}},
"step": 1,
"parents": {},
"thread_id": "1",
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
tasks=(
PregelTask(
id=AnyStr(),
name="weather_graph",
path=(PULL, "weather_graph"),
state={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr("weather_graph:"),
}
},
),
),
)
# update
graph.update_state(state.tasks[0].state, {"city": "la"})
# run after update
assert [
c
for c in graph.stream(
None, config=config, stream_mode="updates", subgraphs=True
)
] == [
(
(AnyStr("weather_graph:"),),
{
"weather_node": {
"messages": [{"role": "assistant", "content": "I'ts sunny in la!"}]
}
},
),
(
(),
{
"weather_graph": {
"messages": [
_AnyIdHumanMessage(content="what's the weather in sf"),
_AnyIdAIMessage(content="I'ts sunny in la!"),
]
}
},
),
]
# try updating acting as weather node
config = {"configurable": {"thread_id": "14"}}
inputs = {"messages": [{"role": "user", "content": "what's the weather in sf"}]}
assert [
c
for c in graph.stream(
inputs, config=config, stream_mode="updates", subgraphs=True
)
] == [
((), {"router_node": {"route": "weather"}}),
((AnyStr("weather_graph:"),), {"model_node": {"city": "San Francisco"}}),
((), {"__interrupt__": ()}),
]
state = graph.get_state(config, subgraphs=True)
assert state == StateSnapshot(
values={
"messages": [_AnyIdHumanMessage(content="what's the weather in sf")],
"route": "weather",
},
next=("weather_graph",),
config={
"configurable": {
"thread_id": "14",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"source": "loop",
"writes": {"router_node": {"route": "weather"}},
"step": 1,
"parents": {},
"thread_id": "14",
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "14",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
tasks=(
PregelTask(
id=AnyStr(),
name="weather_graph",
path=(PULL, "weather_graph"),
state=StateSnapshot(
values={
"messages": [
_AnyIdHumanMessage(content="what's the weather in sf")
],
"city": "San Francisco",
},
next=("weather_node",),
config={
"configurable": {
"thread_id": "14",
"checkpoint_ns": AnyStr("weather_graph:"),
"checkpoint_id": AnyStr(),
"checkpoint_map": AnyDict(
{
"": AnyStr(),
AnyStr("weather_graph:"): AnyStr(),
}
),
}
},
metadata={
"source": "loop",
"writes": {"model_node": {"city": "San Francisco"}},
"step": 1,
"parents": {"": AnyStr()},
"thread_id": "14",
"checkpoint_ns": AnyStr("weather_graph:"),
"langgraph_node": "weather_graph",
"langgraph_path": [PULL, "weather_graph"],
"langgraph_step": 2,
"langgraph_triggers": [
"branch:router_node:route_after_prediction:weather_graph"
],
"langgraph_checkpoint_ns": AnyStr("weather_graph:"),
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "14",
"checkpoint_ns": AnyStr("weather_graph:"),
"checkpoint_id": AnyStr(),
"checkpoint_map": AnyDict(
{
"": AnyStr(),
AnyStr("weather_graph:"): AnyStr(),
}
),
}
},
tasks=(
PregelTask(
id=AnyStr(),
name="weather_node",
path=(PULL, "weather_node"),
),
),
),
),
),
)
graph.update_state(
state.tasks[0].state.config,
{"messages": [{"role": "assistant", "content": "rainy"}]},
as_node="weather_node",
)
state = graph.get_state(config, subgraphs=True)
assert state == StateSnapshot(
values={
"messages": [_AnyIdHumanMessage(content="what's the weather in sf")],
"route": "weather",
},
next=("weather_graph",),
config={
"configurable": {
"thread_id": "14",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"source": "loop",
"writes": {"router_node": {"route": "weather"}},
"step": 1,
"parents": {},
"thread_id": "14",
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "14",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
tasks=(
PregelTask(
id=AnyStr(),
name="weather_graph",
path=(PULL, "weather_graph"),
state=StateSnapshot(
values={
"messages": [
_AnyIdHumanMessage(content="what's the weather in sf"),
_AnyIdAIMessage(content="rainy"),
],
"city": "San Francisco",
},
next=(),
config={
"configurable": {
"thread_id": "14",
"checkpoint_ns": AnyStr("weather_graph:"),
"checkpoint_id": AnyStr(),
"checkpoint_map": AnyDict(
{
"": AnyStr(),
AnyStr("weather_graph:"): AnyStr(),
}
),
}
},
metadata={
"step": 2,
"source": "update",
"writes": {
"weather_node": {
"messages": [{"role": "assistant", "content": "rainy"}]
}
},
"parents": {"": AnyStr()},
"thread_id": "14",
"checkpoint_id": AnyStr(),
"checkpoint_ns": AnyStr("weather_graph:"),
"langgraph_node": "weather_graph",
"langgraph_path": [PULL, "weather_graph"],
"langgraph_step": 2,
"langgraph_triggers": [
"branch:router_node:route_after_prediction:weather_graph"
],
"langgraph_checkpoint_ns": AnyStr("weather_graph:"),
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "14",
"checkpoint_ns": AnyStr("weather_graph:"),
"checkpoint_id": AnyStr(),
"checkpoint_map": AnyDict(
{
"": AnyStr(),
AnyStr("weather_graph:"): AnyStr(),
}
),
}
},
tasks=(),
),
),
),
)
assert [
c
for c in graph.stream(
None, config=config, stream_mode="updates", subgraphs=True
)
] == [
(
(),
{
"weather_graph": {
"messages": [
_AnyIdHumanMessage(content="what's the weather in sf"),
_AnyIdAIMessage(content="rainy"),
]
}
},
),
]
def test_repeat_condition(snapshot: SnapshotAssertion) -> None:
class AgentState(TypedDict):
hello: str
def router(state: AgentState) -> str:
return "hmm"
workflow = StateGraph(AgentState)
workflow.add_node("Researcher", lambda x: x)
workflow.add_node("Chart Generator", lambda x: x)
workflow.add_node("Call Tool", lambda x: x)
workflow.add_conditional_edges(
"Researcher",
router,
{
"redo": "Researcher",
"continue": "Chart Generator",
"call_tool": "Call Tool",
"end": END,
},
)
workflow.add_conditional_edges(
"Chart Generator",
router,
{"continue": "Researcher", "call_tool": "Call Tool", "end": END},
)
workflow.add_conditional_edges(
"Call Tool",
# Each agent node updates the 'sender' field
# the tool calling node does not, meaning
# this edge will route back to the original agent
# who invoked the tool
lambda x: x["sender"],
{
"Researcher": "Researcher",
"Chart Generator": "Chart Generator",
},
)
workflow.set_entry_point("Researcher")
app = workflow.compile()
assert app.get_graph().draw_mermaid(with_styles=False) == snapshot
def test_checkpoint_metadata() -> None:
"""This test verifies that a run's configurable fields are merged with the
previous checkpoint config for each step in the run.
"""
# set up test
from langchain_core.language_models.fake_chat_models import (
FakeMessagesListChatModel,
)
from langchain_core.messages import AIMessage, AnyMessage
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.tools import tool
# graph state
class BaseState(TypedDict):
messages: Annotated[list[AnyMessage], add_messages]
# initialize graph nodes
@tool()
def search_api(query: str) -> str:
"""Searches the API for the query."""
return f"result for {query}"
tools = [search_api]
prompt = ChatPromptTemplate.from_messages(
[
("system", "You are a nice assistant."),
("placeholder", "{messages}"),
]
)
model = FakeMessagesListChatModel(
responses=[
AIMessage(
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "query"},
},
],
),
AIMessage(content="answer"),
]
)
@traceable(run_type="llm")
def agent(state: BaseState) -> BaseState:
formatted = prompt.invoke(state)
response = model.invoke(formatted)
return {"messages": response, "usage_metadata": {"total_tokens": 123}}
def should_continue(data: BaseState) -> str:
# Logic to decide whether to continue in the loop or exit
if not data["messages"][-1].tool_calls:
return "exit"
else:
return "continue"
# define graphs w/ and w/o interrupt
workflow = StateGraph(BaseState)
workflow.add_node("agent", agent)
workflow.add_node("tools", ToolNode(tools))
workflow.set_entry_point("agent")
workflow.add_conditional_edges(
"agent", should_continue, {"continue": "tools", "exit": END}
)
workflow.add_edge("tools", "agent")
# graph w/o interrupt
checkpointer_1 = MemorySaverAssertCheckpointMetadata()
app = workflow.compile(checkpointer=checkpointer_1)
# graph w/ interrupt
checkpointer_2 = MemorySaverAssertCheckpointMetadata()
app_w_interrupt = workflow.compile(
checkpointer=checkpointer_2, interrupt_before=["tools"]
)
# assertions
# invoke graph w/o interrupt
assert app.invoke(
{"messages": ["what is weather in sf"]},
{
"configurable": {
"thread_id": "1",
"test_config_1": "foo",
"test_config_2": "bar",
},
},
) == {
"messages": [
_AnyIdHumanMessage(content="what is weather in sf"),
_AnyIdAIMessage(
content="",
tool_calls=[
{
"name": "search_api",
"args": {"query": "query"},
"id": "tool_call123",
"type": "tool_call",
}
],
),
_AnyIdToolMessage(
content="result for query",
name="search_api",
tool_call_id="tool_call123",
),
_AnyIdAIMessage(content="answer"),
]
}
config = {"configurable": {"thread_id": "1"}}
# assert that checkpoint metadata contains the run's configurable fields
chkpnt_metadata_1 = checkpointer_1.get_tuple(config).metadata
assert chkpnt_metadata_1["thread_id"] == "1"
assert chkpnt_metadata_1["test_config_1"] == "foo"
assert chkpnt_metadata_1["test_config_2"] == "bar"
# Verify that all checkpoint metadata have the expected keys. This check
# is needed because a run may have an arbitrary number of steps depending
# on how the graph is constructed.
chkpnt_tuples_1 = checkpointer_1.list(config)
for chkpnt_tuple in chkpnt_tuples_1:
assert chkpnt_tuple.metadata["thread_id"] == "1"
assert chkpnt_tuple.metadata["test_config_1"] == "foo"
assert chkpnt_tuple.metadata["test_config_2"] == "bar"
# invoke graph, but interrupt before tool call
app_w_interrupt.invoke(
{"messages": ["what is weather in sf"]},
{
"configurable": {
"thread_id": "2",
"test_config_3": "foo",
"test_config_4": "bar",
},
},
)
config = {"configurable": {"thread_id": "2"}}
# assert that checkpoint metadata contains the run's configurable fields
chkpnt_metadata_2 = checkpointer_2.get_tuple(config).metadata
assert chkpnt_metadata_2["thread_id"] == "2"
assert chkpnt_metadata_2["test_config_3"] == "foo"
assert chkpnt_metadata_2["test_config_4"] == "bar"
# resume graph execution
app_w_interrupt.invoke(
input=None,
config={
"configurable": {
"thread_id": "2",
"test_config_3": "foo",
"test_config_4": "bar",
}
},
)
# assert that checkpoint metadata contains the run's configurable fields
chkpnt_metadata_3 = checkpointer_2.get_tuple(config).metadata
assert chkpnt_metadata_3["thread_id"] == "2"
assert chkpnt_metadata_3["test_config_3"] == "foo"
assert chkpnt_metadata_3["test_config_4"] == "bar"
# Verify that all checkpoint metadata have the expected keys. This check
# is needed because a run may have an arbitrary number of steps depending
# on how the graph is constructed.
chkpnt_tuples_2 = checkpointer_2.list(config)
for chkpnt_tuple in chkpnt_tuples_2:
assert chkpnt_tuple.metadata["thread_id"] == "2"
assert chkpnt_tuple.metadata["test_config_3"] == "foo"
assert chkpnt_tuple.metadata["test_config_4"] == "bar"
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC)
def test_remove_message_via_state_update(
request: pytest.FixtureRequest, checkpointer_name: str
) -> None:
from langchain_core.messages import AIMessage, HumanMessage, RemoveMessage
workflow = MessageGraph()
workflow.add_node(
"chatbot",
lambda state: [
AIMessage(
content="Hello! How can I help you",
)
],
)
workflow.set_entry_point("chatbot")
workflow.add_edge("chatbot", END)
checkpointer = request.getfixturevalue("checkpointer_" + checkpointer_name)
app = workflow.compile(checkpointer=checkpointer)
config = {"configurable": {"thread_id": "1"}}
output = app.invoke([HumanMessage(content="Hi")], config=config)
app.update_state(config, values=[RemoveMessage(id=output[-1].id)])
updated_state = app.get_state(config)
assert len(updated_state.values) == 1
assert updated_state.values[-1].content == "Hi"
def test_remove_message_from_node():
from langchain_core.messages import AIMessage, HumanMessage, RemoveMessage
workflow = MessageGraph()
workflow.add_node(
"chatbot",
lambda state: [
AIMessage(
content="Hello!",
),
AIMessage(
content="How can I help you?",
),
],
)
workflow.add_node("delete_messages", lambda state: [RemoveMessage(id=state[-2].id)])
workflow.set_entry_point("chatbot")
workflow.add_edge("chatbot", "delete_messages")
workflow.add_edge("delete_messages", END)
app = workflow.compile()
output = app.invoke([HumanMessage(content="Hi")])
assert len(output) == 2
assert output[-1].content == "How can I help you?"
def test_xray_lance(snapshot: SnapshotAssertion):
from langchain_core.messages import AnyMessage, HumanMessage
from pydantic import BaseModel, Field
class Analyst(BaseModel):
affiliation: str = Field(
description="Primary affiliation of the investment analyst.",
)
name: str = Field(
description="Name of the investment analyst.",
pattern=r"^[a-zA-Z0-9_-]{1,64}$",
)
role: str = Field(
description="Role of the investment analyst in the context of the topic.",
)
description: str = Field(
description="Description of the investment analyst focus, concerns, and motives.",
)
@property
def persona(self) -> str:
return f"Name: {self.name}\nRole: {self.role}\nAffiliation: {self.affiliation}\nDescription: {self.description}\n"
class Perspectives(BaseModel):
analysts: List[Analyst] = Field(
description="Comprehensive list of investment analysts with their roles and affiliations.",
)
class Section(BaseModel):
section_title: str = Field(..., title="Title of the section")
context: str = Field(
..., title="Provide a clear summary of the focus area that you researched."
)
findings: str = Field(
...,
title="Give a clear and detailed overview of your findings based upon the expert interview.",
)
thesis: str = Field(
...,
title="Give a clear and specific investment thesis based upon these findings.",
)
class InterviewState(TypedDict):
messages: Annotated[List[AnyMessage], add_messages]
analyst: Analyst
section: Section
class ResearchGraphState(TypedDict):
analysts: List[Analyst]
topic: str
max_analysts: int
sections: List[Section]
interviews: Annotated[list, operator.add]
# Conditional edge
def route_messages(state):
return "ask_question"
def generate_question(state):
return ...
def generate_answer(state):
return ...
# Add nodes and edges
interview_builder = StateGraph(InterviewState)
interview_builder.add_node("ask_question", generate_question)
interview_builder.add_node("answer_question", generate_answer)
# Flow
interview_builder.add_edge(START, "ask_question")
interview_builder.add_edge("ask_question", "answer_question")
interview_builder.add_conditional_edges("answer_question", route_messages)
# Set up memory
memory = MemorySaver()
# Interview
interview_graph = interview_builder.compile(checkpointer=memory).with_config(
run_name="Conduct Interviews"
)
# View
assert interview_graph.get_graph().to_json() == snapshot
def run_all_interviews(state: ResearchGraphState):
"""Edge to run the interview sub-graph using Send"""
return [
Send(
"conduct_interview",
{
"analyst": Analyst(),
"messages": [
HumanMessage(
content="So you said you were writing an article on ...?"
)
],
},
)
for s in state["analysts"]
]
def generate_sections(state: ResearchGraphState):
return ...
def generate_analysts(state: ResearchGraphState):
return ...
builder = StateGraph(ResearchGraphState)
builder.add_node("generate_analysts", generate_analysts)
builder.add_node("conduct_interview", interview_builder.compile())
builder.add_node("generate_sections", generate_sections)
builder.add_edge(START, "generate_analysts")
builder.add_conditional_edges(
"generate_analysts", run_all_interviews, ["conduct_interview"]
)
builder.add_edge("conduct_interview", "generate_sections")
builder.add_edge("generate_sections", END)
graph = builder.compile()
# View
assert graph.get_graph().to_json() == snapshot
assert graph.get_graph(xray=1).to_json() == snapshot
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC)
def test_channel_values(request: pytest.FixtureRequest, checkpointer_name: str) -> None:
checkpointer = request.getfixturevalue(f"checkpointer_{checkpointer_name}")
config = {"configurable": {"thread_id": "1"}}
chain = Channel.subscribe_to("input") | Channel.write_to("output")
app = Pregel(
nodes={
"one": chain,
},
channels={
"ephemeral": EphemeralValue(Any),
"input": LastValue(int),
"output": LastValue(int),
},
input_channels=["input", "ephemeral"],
output_channels="output",
checkpointer=checkpointer,
)
app.invoke({"input": 1, "ephemeral": "meow"}, config)
assert checkpointer.get(config)["channel_values"] == {"input": 1, "output": 1}
def test_xray_issue(snapshot: SnapshotAssertion) -> None:
class State(TypedDict):
messages: Annotated[list, add_messages]
def node(name):
def _node(state: State):
return {"messages": [("human", f"entered {name} node")]}
return _node
parent = StateGraph(State)
child = StateGraph(State)
child.add_node("c_one", node("c_one"))
child.add_node("c_two", node("c_two"))
child.add_edge("__start__", "c_one")
child.add_edge("c_two", "c_one")
child.add_conditional_edges(
"c_one", lambda x: str(randrange(0, 2)), {"0": "c_two", "1": "__end__"}
)
parent.add_node("p_one", node("p_one"))
parent.add_node("p_two", child.compile())
parent.add_edge("__start__", "p_one")
parent.add_edge("p_two", "p_one")
parent.add_conditional_edges(
"p_one", lambda x: str(randrange(0, 2)), {"0": "p_two", "1": "__end__"}
)
app = parent.compile()
assert app.get_graph(xray=True).draw_mermaid() == snapshot
def test_xray_bool(snapshot: SnapshotAssertion) -> None:
class State(TypedDict):
messages: Annotated[list, add_messages]
def node(name):
def _node(state: State):
return {"messages": [("human", f"entered {name} node")]}
return _node
grand_parent = StateGraph(State)
child = StateGraph(State)
child.add_node("c_one", node("c_one"))
child.add_node("c_two", node("c_two"))
child.add_edge("__start__", "c_one")
child.add_edge("c_two", "c_one")
child.add_conditional_edges(
"c_one", lambda x: str(randrange(0, 2)), {"0": "c_two", "1": "__end__"}
)
parent = StateGraph(State)
parent.add_node("p_one", node("p_one"))
parent.add_node("p_two", child.compile())
parent.add_edge("__start__", "p_one")
parent.add_edge("p_two", "p_one")
parent.add_conditional_edges(
"p_one", lambda x: str(randrange(0, 2)), {"0": "p_two", "1": "__end__"}
)
grand_parent.add_node("gp_one", node("gp_one"))
grand_parent.add_node("gp_two", parent.compile())
grand_parent.add_edge("__start__", "gp_one")
grand_parent.add_edge("gp_two", "gp_one")
grand_parent.add_conditional_edges(
"gp_one", lambda x: str(randrange(0, 2)), {"0": "gp_two", "1": "__end__"}
)
app = grand_parent.compile()
assert app.get_graph(xray=True).draw_mermaid() == snapshot
def test_multiple_sinks_subgraphs(snapshot: SnapshotAssertion) -> None:
class State(TypedDict):
messages: Annotated[list, add_messages]
subgraph_builder = StateGraph(State)
subgraph_builder.add_node("one", lambda x: x)
subgraph_builder.add_node("two", lambda x: x)
subgraph_builder.add_node("three", lambda x: x)
subgraph_builder.add_edge("__start__", "one")
subgraph_builder.add_conditional_edges("one", lambda x: "two", ["two", "three"])
subgraph = subgraph_builder.compile()
builder = StateGraph(State)
builder.add_node("uno", lambda x: x)
builder.add_node("dos", lambda x: x)
builder.add_node("subgraph", subgraph)
builder.add_edge("__start__", "uno")
builder.add_conditional_edges("uno", lambda x: "dos", ["dos", "subgraph"])
app = builder.compile()
assert app.get_graph(xray=True).draw_mermaid() == snapshot
def test_subgraph_retries():
class State(TypedDict):
count: int
class ChildState(State):
some_list: Annotated[list, operator.add]
called_times = 0
class RandomError(ValueError):
"""This will be retried on."""
def parent_node(state: State):
return {"count": state["count"] + 1}
def child_node_a(state: ChildState):
nonlocal called_times
# We want it to retry only on node_b
# NOT re-compute the whole graph.
assert not called_times
called_times += 1
return {"some_list": ["val"]}
def child_node_b(state: ChildState):
raise RandomError("First attempt fails")
child = StateGraph(ChildState)
child.add_node(child_node_a)
child.add_node(child_node_b)
child.add_edge("__start__", "child_node_a")
child.add_edge("child_node_a", "child_node_b")
parent = StateGraph(State)
parent.add_node("parent_node", parent_node)
parent.add_node(
"child_graph",
child.compile(),
retry=RetryPolicy(
max_attempts=3,
retry_on=(RandomError,),
backoff_factor=0.0001,
initial_interval=0.0001,
),
)
parent.add_edge("parent_node", "child_graph")
parent.set_entry_point("parent_node")
checkpointer = MemorySaver()
app = parent.compile(checkpointer=checkpointer)
with pytest.raises(RandomError):
app.invoke({"count": 0}, {"configurable": {"thread_id": "foo"}})
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC)
@pytest.mark.parametrize("store_name", ALL_STORES_SYNC)
def test_store_injected(
request: pytest.FixtureRequest, checkpointer_name: str, store_name: str
) -> None:
checkpointer = request.getfixturevalue(f"checkpointer_{checkpointer_name}")
the_store = request.getfixturevalue(f"store_{store_name}")
class State(TypedDict):
count: Annotated[int, operator.add]
doc_id = str(uuid.uuid4())
doc = {"some-key": "this-is-a-val"}
uid = uuid.uuid4().hex
namespace = (f"foo-{uid}", "bar")
thread_1 = str(uuid.uuid4())
thread_2 = str(uuid.uuid4())
class Node:
def __init__(self, i: Optional[int] = None):
self.i = i
def __call__(self, inputs: State, config: RunnableConfig, store: BaseStore):
assert isinstance(store, BaseStore)
store.put(
namespace
if self.i is not None
and config["configurable"]["thread_id"] in (thread_1, thread_2)
else (f"foo_{self.i}", "bar"),
doc_id,
{
**doc,
"from_thread": config["configurable"]["thread_id"],
"some_val": inputs["count"],
},
)
return {"count": 1}
builder = StateGraph(State)
builder.add_node("node", Node())
builder.add_edge("__start__", "node")
N = 500
M = 1
if "duckdb" in store_name:
logger.warning(
"DuckDB store implementation has a known issue that does not"
" support concurrent writes, so we're reducing the test scope"
)
N = M = 1
for i in range(N):
builder.add_node(f"node_{i}", Node(i))
builder.add_edge("__start__", f"node_{i}")
graph = builder.compile(store=the_store, checkpointer=checkpointer)
results = graph.batch(
[{"count": 0}] * M,
([{"configurable": {"thread_id": str(uuid.uuid4())}}] * (M - 1))
+ [{"configurable": {"thread_id": thread_1}}],
)
result = results[-1]
assert result == {"count": N + 1}
returned_doc = the_store.get(namespace, doc_id).value
assert returned_doc == {**doc, "from_thread": thread_1, "some_val": 0}
assert len(the_store.search(namespace)) == 1
# Check results after another turn of the same thread
result = graph.invoke({"count": 0}, {"configurable": {"thread_id": thread_1}})
assert result == {"count": (N + 1) * 2}
returned_doc = the_store.get(namespace, doc_id).value
assert returned_doc == {**doc, "from_thread": thread_1, "some_val": N + 1}
assert len(the_store.search(namespace)) == 1
result = graph.invoke({"count": 0}, {"configurable": {"thread_id": thread_2}})
assert result == {"count": N + 1}
returned_doc = the_store.get(namespace, doc_id).value
assert returned_doc == {
**doc,
"from_thread": thread_2,
"some_val": 0,
} # Overwrites the whole doc
assert len(the_store.search(namespace)) == 1 # still overwriting the same one
def test_enum_node_names():
class NodeName(str, enum.Enum):
BAZ = "baz"
class State(TypedDict):
foo: str
bar: str
def baz(state: State):
return {"bar": state["foo"] + "!"}
graph = StateGraph(State)
graph.add_node(NodeName.BAZ, baz)
graph.add_edge(START, NodeName.BAZ)
graph.add_edge(NodeName.BAZ, END)
graph = graph.compile()
assert graph.invoke({"foo": "hello"}) == {"foo": "hello", "bar": "hello!"}
def test_debug_retry():
class State(TypedDict):
messages: Annotated[list[str], operator.add]
def node(name):
def _node(state: State):
return {"messages": [f"entered {name} node"]}
return _node
builder = StateGraph(State)
builder.add_node("one", node("one"))
builder.add_node("two", node("two"))
builder.add_edge(START, "one")
builder.add_edge("one", "two")
builder.add_edge("two", END)
saver = MemorySaver()
graph = builder.compile(checkpointer=saver)
config = {"configurable": {"thread_id": "1"}}
graph.invoke({"messages": []}, config=config)
# re-run step: 1
target_config = next(
c.parent_config for c in saver.list(config) if c.metadata["step"] == 1
)
update_config = graph.update_state(target_config, values=None)
events = [*graph.stream(None, config=update_config, stream_mode="debug")]
checkpoint_events = list(
reversed([e["payload"] for e in events if e["type"] == "checkpoint"])
)
checkpoint_history = {
c.config["configurable"]["checkpoint_id"]: c
for c in graph.get_state_history(config)
}
def lax_normalize_config(config: Optional[dict]) -> Optional[dict]:
if config is None:
return None
return config["configurable"]
for stream in checkpoint_events:
stream_conf = lax_normalize_config(stream["config"])
stream_parent_conf = lax_normalize_config(stream["parent_config"])
assert stream_conf != stream_parent_conf
# ensure the streamed checkpoint == checkpoint from checkpointer.list()
history = checkpoint_history[stream["config"]["configurable"]["checkpoint_id"]]
history_conf = lax_normalize_config(history.config)
assert stream_conf == history_conf
history_parent_conf = lax_normalize_config(history.parent_config)
assert stream_parent_conf == history_parent_conf
def test_debug_subgraphs():
class State(TypedDict):
messages: Annotated[list[str], operator.add]
def node(name):
def _node(state: State):
return {"messages": [f"entered {name} node"]}
return _node
parent = StateGraph(State)
child = StateGraph(State)
child.add_node("c_one", node("c_one"))
child.add_node("c_two", node("c_two"))
child.add_edge(START, "c_one")
child.add_edge("c_one", "c_two")
child.add_edge("c_two", END)
parent.add_node("p_one", node("p_one"))
parent.add_node("p_two", child.compile())
parent.add_edge(START, "p_one")
parent.add_edge("p_one", "p_two")
parent.add_edge("p_two", END)
graph = parent.compile(checkpointer=MemorySaver())
config = {"configurable": {"thread_id": "1"}}
events = [
*graph.stream(
{"messages": []},
config=config,
stream_mode="debug",
)
]
checkpoint_events = list(
reversed([e["payload"] for e in events if e["type"] == "checkpoint"])
)
checkpoint_history = list(graph.get_state_history(config))
assert len(checkpoint_events) == len(checkpoint_history)
def lax_normalize_config(config: Optional[dict]) -> Optional[dict]:
if config is None:
return None
return config["configurable"]
for stream, history in zip(checkpoint_events, checkpoint_history):
assert stream["values"] == history.values
assert stream["next"] == list(history.next)
assert lax_normalize_config(stream["config"]) == lax_normalize_config(
history.config
)
assert lax_normalize_config(stream["parent_config"]) == lax_normalize_config(
history.parent_config
)
assert len(stream["tasks"]) == len(history.tasks)
for stream_task, history_task in zip(stream["tasks"], history.tasks):
assert stream_task["id"] == history_task.id
assert stream_task["name"] == history_task.name
assert stream_task["interrupts"] == history_task.interrupts
assert stream_task.get("error") == history_task.error
assert stream_task.get("state") == history_task.state
def test_debug_nested_subgraphs():
from collections import defaultdict
class State(TypedDict):
messages: Annotated[list[str], operator.add]
def node(name):
def _node(state: State):
return {"messages": [f"entered {name} node"]}
return _node
grand_parent = StateGraph(State)
parent = StateGraph(State)
child = StateGraph(State)
child.add_node("c_one", node("c_one"))
child.add_node("c_two", node("c_two"))
child.add_edge(START, "c_one")
child.add_edge("c_one", "c_two")
child.add_edge("c_two", END)
parent.add_node("p_one", node("p_one"))
parent.add_node("p_two", child.compile())
parent.add_edge(START, "p_one")
parent.add_edge("p_one", "p_two")
parent.add_edge("p_two", END)
grand_parent.add_node("gp_one", node("gp_one"))
grand_parent.add_node("gp_two", parent.compile())
grand_parent.add_edge(START, "gp_one")
grand_parent.add_edge("gp_one", "gp_two")
grand_parent.add_edge("gp_two", END)
graph = grand_parent.compile(checkpointer=MemorySaver())
config = {"configurable": {"thread_id": "1"}}
events = [
*graph.stream(
{"messages": []},
config=config,
stream_mode="debug",
subgraphs=True,
)
]
stream_ns: dict[tuple, dict] = defaultdict(list)
for ns, e in events:
if e["type"] == "checkpoint":
stream_ns[ns].append(e["payload"])
assert list(stream_ns.keys()) == [
(),
(AnyStr("gp_two:"),),
(AnyStr("gp_two:"), AnyStr("p_two:")),
]
history_ns = {
ns: list(
graph.get_state_history(
{"configurable": {"thread_id": "1", "checkpoint_ns": "|".join(ns)}}
)
)[::-1]
for ns in stream_ns.keys()
}
def normalize_config(config: Optional[dict]) -> Optional[dict]:
if config is None:
return None
clean_config = {}
clean_config["thread_id"] = config["configurable"]["thread_id"]
clean_config["checkpoint_id"] = config["configurable"]["checkpoint_id"]
clean_config["checkpoint_ns"] = config["configurable"]["checkpoint_ns"]
if "checkpoint_map" in config["configurable"]:
clean_config["checkpoint_map"] = config["configurable"]["checkpoint_map"]
return clean_config
for checkpoint_events, checkpoint_history in zip(
stream_ns.values(), history_ns.values()
):
for stream, history in zip(checkpoint_events, checkpoint_history):
assert stream["values"] == history.values
assert stream["next"] == list(history.next)
assert normalize_config(stream["config"]) == normalize_config(
history.config
)
assert normalize_config(stream["parent_config"]) == normalize_config(
history.parent_config
)
assert len(stream["tasks"]) == len(history.tasks)
for stream_task, history_task in zip(stream["tasks"], history.tasks):
assert stream_task["id"] == history_task.id
assert stream_task["name"] == history_task.name
assert stream_task["interrupts"] == history_task.interrupts
assert stream_task.get("error") == history_task.error
assert stream_task.get("state") == history_task.state
def test_add_sequence():
class State(TypedDict):
foo: Annotated[list[str], operator.add]
bar: str
def step1(state: State):
return {"foo": ["step1"], "bar": "baz"}
def step2(state: State):
return {"foo": ["step2"]}
# test raising if less than 1 steps
with pytest.raises(ValueError):
StateGraph(State).add_sequence([])
# test raising if duplicate step names
with pytest.raises(ValueError):
StateGraph(State).add_sequence([step1, step1])
with pytest.raises(ValueError):
StateGraph(State).add_sequence([("foo", step1), ("foo", step1)])
# test unnamed steps
builder = StateGraph(State)
builder.add_sequence([step1, step2])
builder.add_edge(START, "step1")
graph = builder.compile()
result = graph.invoke({"foo": []})
assert result == {"foo": ["step1", "step2"], "bar": "baz"}
stream_chunks = list(graph.stream({"foo": []}))
assert stream_chunks == [
{"step1": {"foo": ["step1"], "bar": "baz"}},
{"step2": {"foo": ["step2"]}},
]
# test named steps
builder_named_steps = StateGraph(State)
builder_named_steps.add_sequence([("meow1", step1), ("meow2", step2)])
builder_named_steps.add_edge(START, "meow1")
graph_named_steps = builder_named_steps.compile()
result = graph_named_steps.invoke({"foo": []})
stream_chunks = list(graph_named_steps.stream({"foo": []}))
assert result == {"foo": ["step1", "step2"], "bar": "baz"}
assert stream_chunks == [
{"meow1": {"foo": ["step1"], "bar": "baz"}},
{"meow2": {"foo": ["step2"]}},
]
builder_named_steps = StateGraph(State)
builder_named_steps.add_sequence(
[
("meow1", lambda state: {"foo": ["foo"]}),
("meow2", lambda state: {"bar": state["foo"][0] + "bar"}),
],
)
builder_named_steps.add_edge(START, "meow1")
graph_named_steps = builder_named_steps.compile()
result = graph_named_steps.invoke({"foo": []})
stream_chunks = list(graph_named_steps.stream({"foo": []}))
# filtered by output schema
assert result == {"bar": "foobar", "foo": ["foo"]}
assert stream_chunks == [
{"meow1": {"foo": ["foo"]}},
{"meow2": {"bar": "foobar"}},
]
# test two sequences
def a(state: State):
return {"foo": ["a"]}
def b(state: State):
return {"foo": ["b"]}
builder_two_sequences = StateGraph(State)
builder_two_sequences.add_sequence([a])
builder_two_sequences.add_sequence([b])
builder_two_sequences.add_edge(START, "a")
builder_two_sequences.add_edge("a", "b")
graph_two_sequences = builder_two_sequences.compile()
result = graph_two_sequences.invoke({"foo": []})
assert result == {"foo": ["a", "b"]}
stream_chunks = list(graph_two_sequences.stream({"foo": []}))
assert stream_chunks == [
{"a": {"foo": ["a"]}},
{"b": {"foo": ["b"]}},
]
# test mixed nodes and sequences
def c(state: State):
return {"foo": ["c"]}
def d(state: State):
return {"foo": ["d"]}
def e(state: State):
return {"foo": ["e"]}
def foo(state: State):
if state["foo"][0] == "a":
return "d"
else:
return "c"
builder_complex = StateGraph(State)
builder_complex.add_sequence([a, b])
builder_complex.add_conditional_edges("b", foo)
builder_complex.add_node(c)
builder_complex.add_sequence([d, e])
builder_complex.add_edge(START, "a")
graph_complex = builder_complex.compile()
result = graph_complex.invoke({"foo": []})
assert result == {"foo": ["a", "b", "d", "e"]}
result = graph_complex.invoke({"foo": ["start"]})
assert result == {"foo": ["start", "a", "b", "c"]}
stream_chunks = list(graph_complex.stream({"foo": []}))
assert stream_chunks == [
{"a": {"foo": ["a"]}},
{"b": {"foo": ["b"]}},
{"d": {"foo": ["d"]}},
{"e": {"foo": ["e"]}},
]
def test_runnable_passthrough_node_graph() -> None:
class State(TypedDict):
changeme: str
async def dummy(state):
return state
agent = dummy | RunnablePassthrough.assign(prediction=RunnableLambda(lambda x: x))
graph_builder = StateGraph(State)
graph_builder.add_node("agent", agent)
graph_builder.add_edge(START, "agent")
graph = graph_builder.compile()
assert graph.get_graph(xray=True).to_json() == graph.get_graph(xray=False).to_json()
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC)
def test_parent_command(request: pytest.FixtureRequest, checkpointer_name: str) -> None:
from langchain_core.messages import BaseMessage
from langchain_core.tools import tool
@tool(return_direct=True)
def get_user_name() -> Command:
"""Retrieve user name"""
return Command(update={"user_name": "Meow"}, graph=Command.PARENT)
subgraph_builder = StateGraph(MessagesState)
subgraph_builder.add_node("tool", get_user_name)
subgraph_builder.add_edge(START, "tool")
subgraph = subgraph_builder.compile()
class CustomParentState(TypedDict):
messages: Annotated[list[BaseMessage], add_messages]
# this key is not available to the child graph
user_name: str
builder = StateGraph(CustomParentState)
builder.add_node("alice", subgraph)
builder.add_edge(START, "alice")
checkpointer = request.getfixturevalue(f"checkpointer_{checkpointer_name}")
graph = builder.compile(checkpointer=checkpointer)
config = {"configurable": {"thread_id": "1"}}
assert graph.invoke({"messages": [("user", "get user name")]}, config) == {
"messages": [
_AnyIdHumanMessage(
content="get user name", additional_kwargs={}, response_metadata={}
),
],
"user_name": "Meow",
}
assert graph.get_state(config) == StateSnapshot(
values={
"messages": [
_AnyIdHumanMessage(
content="get user name", additional_kwargs={}, response_metadata={}
),
],
"user_name": "Meow",
},
next=(),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"source": "loop",
"writes": {
"alice": {
"user_name": "Meow",
}
},
"thread_id": "1",
"step": 1,
"parents": {},
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
tasks=(),
)
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC)
def test_interrupt_subgraph(request: pytest.FixtureRequest, checkpointer_name: str):
checkpointer = request.getfixturevalue(f"checkpointer_{checkpointer_name}")
class State(TypedDict):
baz: str
def foo(state):
return {"baz": "foo"}
def bar(state):
value = interrupt("Please provide baz value:")
return {"baz": value}
child_builder = StateGraph(State)
child_builder.add_node(bar)
child_builder.add_edge(START, "bar")
builder = StateGraph(State)
builder.add_node(foo)
builder.add_node("bar", child_builder.compile())
builder.add_edge(START, "foo")
builder.add_edge("foo", "bar")
graph = builder.compile(checkpointer=checkpointer)
thread1 = {"configurable": {"thread_id": "1"}}
# First run, interrupted at bar
assert graph.invoke({"baz": ""}, thread1)
# Resume with answer
assert graph.invoke(Command(resume="bar"), thread1)
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC)
def test_interrupt_multiple(request: pytest.FixtureRequest, checkpointer_name: str):
checkpointer = request.getfixturevalue(f"checkpointer_{checkpointer_name}")
class State(TypedDict):
my_key: Annotated[str, operator.add]
def node(s: State) -> State:
answer = interrupt({"value": 1})
answer2 = interrupt({"value": 2})
return {"my_key": answer + " " + answer2}
builder = StateGraph(State)
builder.add_node("node", node)
builder.add_edge(START, "node")
graph = builder.compile(checkpointer=checkpointer)
thread1 = {"configurable": {"thread_id": "1"}}
assert [e for e in graph.stream({"my_key": "DE", "market": "DE"}, thread1)] == [
{
"__interrupt__": (
Interrupt(
value={"value": 1},
resumable=True,
ns=[AnyStr("node:")],
when="during",
),
)
}
]
assert [
event
for event in graph.stream(
Command(resume="answer 1", update={"my_key": "foofoo"}), thread1
)
] == [
{
"__interrupt__": (
Interrupt(
value={"value": 2},
resumable=True,
ns=[AnyStr("node:")],
when="during",
),
)
}
]
assert [event for event in graph.stream(Command(resume="answer 2"), thread1)] == [
{"node": {"my_key": "answer 1 answer 2"}},
]
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC)
def test_interrupt_loop(request: pytest.FixtureRequest, checkpointer_name: str):
checkpointer = request.getfixturevalue(f"checkpointer_{checkpointer_name}")
class State(TypedDict):
age: int
other: str
def ask_age(s: State):
"""Ask an expert for help."""
question = "How old are you?"
value = None
for _ in range(10):
value: str = interrupt(question)
if not value.isdigit() or int(value) < 18:
question = "invalid response"
value = None
else:
break
return {"age": int(value)}
builder = StateGraph(State)
builder.add_node("node", ask_age)
builder.add_edge(START, "node")
graph = builder.compile(checkpointer=checkpointer)
thread1 = {"configurable": {"thread_id": "1"}}
assert [e for e in graph.stream({"other": ""}, thread1)] == [
{
"__interrupt__": (
Interrupt(
value="How old are you?",
resumable=True,
ns=[AnyStr("node:")],
when="during",
),
)
}
]
assert [
event
for event in graph.stream(
Command(resume="13"),
thread1,
)
] == [
{
"__interrupt__": (
Interrupt(
value="invalid response",
resumable=True,
ns=[AnyStr("node:")],
when="during",
),
)
}
]
assert [
event
for event in graph.stream(
Command(resume="15"),
thread1,
)
] == [
{
"__interrupt__": (
Interrupt(
value="invalid response",
resumable=True,
ns=[AnyStr("node:")],
when="during",
),
)
}
]
assert [event for event in graph.stream(Command(resume="19"), thread1)] == [
{"node": {"age": 19}},
]
|
0 | lc_public_repos/langgraph/libs/langgraph | lc_public_repos/langgraph/libs/langgraph/tests/test_messages_state.py | from typing import Annotated
from uuid import UUID
import pytest
from langchain_core.messages import (
AIMessage,
AnyMessage,
HumanMessage,
RemoveMessage,
SystemMessage,
)
from pydantic import BaseModel
from pydantic.v1 import BaseModel as BaseModelV1
from langgraph.graph import add_messages
from langgraph.graph.message import MessagesState
from langgraph.graph.state import END, START, StateGraph
from tests.conftest import IS_LANGCHAIN_CORE_030_OR_GREATER
from tests.messages import _AnyIdHumanMessage
def test_add_single_message():
left = [HumanMessage(content="Hello", id="1")]
right = AIMessage(content="Hi there!", id="2")
result = add_messages(left, right)
expected_result = [
HumanMessage(content="Hello", id="1"),
AIMessage(content="Hi there!", id="2"),
]
assert result == expected_result
def test_add_multiple_messages():
left = [HumanMessage(content="Hello", id="1")]
right = [
AIMessage(content="Hi there!", id="2"),
SystemMessage(content="System message", id="3"),
]
result = add_messages(left, right)
expected_result = [
HumanMessage(content="Hello", id="1"),
AIMessage(content="Hi there!", id="2"),
SystemMessage(content="System message", id="3"),
]
assert result == expected_result
def test_update_existing_message():
left = [HumanMessage(content="Hello", id="1")]
right = HumanMessage(content="Hello again", id="1")
result = add_messages(left, right)
expected_result = [HumanMessage(content="Hello again", id="1")]
assert result == expected_result
def test_missing_ids():
left = [HumanMessage(content="Hello")]
right = [AIMessage(content="Hi there!")]
result = add_messages(left, right)
assert len(result) == 2
assert all(isinstance(m.id, str) and UUID(m.id, version=4) for m in result)
def test_remove_message():
left = [
HumanMessage(content="Hello", id="1"),
AIMessage(content="Hi there!", id="2"),
]
right = RemoveMessage(id="2")
result = add_messages(left, right)
expected_result = [HumanMessage(content="Hello", id="1")]
assert result == expected_result
def test_duplicate_remove_message():
left = [
HumanMessage(content="Hello", id="1"),
AIMessage(content="Hi there!", id="2"),
]
right = [RemoveMessage(id="2"), RemoveMessage(id="2")]
result = add_messages(left, right)
expected_result = [HumanMessage(content="Hello", id="1")]
assert result == expected_result
def test_remove_nonexistent_message():
left = [HumanMessage(content="Hello", id="1")]
right = RemoveMessage(id="2")
with pytest.raises(
ValueError, match="Attempting to delete a message with an ID that doesn't exist"
):
add_messages(left, right)
def test_mixed_operations():
left = [
HumanMessage(content="Hello", id="1"),
AIMessage(content="Hi there!", id="2"),
]
right = [
HumanMessage(content="Updated hello", id="1"),
RemoveMessage(id="2"),
SystemMessage(content="New message", id="3"),
]
result = add_messages(left, right)
expected_result = [
HumanMessage(content="Updated hello", id="1"),
SystemMessage(content="New message", id="3"),
]
assert result == expected_result
def test_empty_inputs():
assert add_messages([], []) == []
assert add_messages([], [HumanMessage(content="Hello", id="1")]) == [
HumanMessage(content="Hello", id="1")
]
assert add_messages([HumanMessage(content="Hello", id="1")], []) == [
HumanMessage(content="Hello", id="1")
]
def test_non_list_inputs():
left = HumanMessage(content="Hello", id="1")
right = AIMessage(content="Hi there!", id="2")
result = add_messages(left, right)
expected_result = [
HumanMessage(content="Hello", id="1"),
AIMessage(content="Hi there!", id="2"),
]
assert result == expected_result
def test_delete_all():
left = [
HumanMessage(content="Hello", id="1"),
AIMessage(content="Hi there!", id="2"),
]
right = [
RemoveMessage(id="1"),
RemoveMessage(id="2"),
]
result = add_messages(left, right)
expected_result = []
assert result == expected_result
MESSAGES_STATE_SCHEMAS = [MessagesState]
if IS_LANGCHAIN_CORE_030_OR_GREATER:
class MessagesStatePydantic(BaseModel):
messages: Annotated[list[AnyMessage], add_messages]
MESSAGES_STATE_SCHEMAS.append(MessagesStatePydantic)
else:
class MessagesStatePydanticV1(BaseModelV1):
messages: Annotated[list[AnyMessage], add_messages]
MESSAGES_STATE_SCHEMAS.append(MessagesStatePydanticV1)
@pytest.mark.parametrize("state_schema", MESSAGES_STATE_SCHEMAS)
def test_messages_state(state_schema):
def foo(state):
return {"messages": [HumanMessage("foo")]}
graph = StateGraph(state_schema)
graph.add_edge(START, "foo")
graph.add_edge("foo", END)
graph.add_node(foo)
app = graph.compile()
assert app.invoke({"messages": [("user", "meow")]}) == {
"messages": [
_AnyIdHumanMessage(content="meow"),
_AnyIdHumanMessage(content="foo"),
]
}
|
0 | lc_public_repos/langgraph/libs/langgraph | lc_public_repos/langgraph/libs/langgraph/tests/test_algo.py | from langgraph.checkpoint.base import empty_checkpoint
from langgraph.pregel.algo import prepare_next_tasks
from langgraph.pregel.manager import ChannelsManager
def test_prepare_next_tasks() -> None:
config = {}
processes = {}
checkpoint = empty_checkpoint()
with ChannelsManager({}, checkpoint, config) as (channels, managed):
assert (
prepare_next_tasks(
checkpoint,
{},
processes,
channels,
managed,
config,
0,
for_execution=False,
)
== {}
)
assert (
prepare_next_tasks(
checkpoint,
{},
processes,
channels,
managed,
config,
0,
for_execution=True,
checkpointer=None,
store=None,
manager=None,
)
== {}
)
# TODO: add more tests
|
0 | lc_public_repos/langgraph/libs/langgraph | lc_public_repos/langgraph/libs/langgraph/tests/test_interruption.py | from typing import TypedDict
import pytest
from pytest_mock import MockerFixture
from langgraph.graph import END, START, StateGraph
from tests.conftest import (
ALL_CHECKPOINTERS_ASYNC,
ALL_CHECKPOINTERS_SYNC,
awith_checkpointer,
)
pytestmark = pytest.mark.anyio
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_SYNC)
def test_interruption_without_state_updates(
request: pytest.FixtureRequest, checkpointer_name: str, mocker: MockerFixture
) -> None:
"""Test interruption without state updates. This test confirms that
interrupting doesn't require a state key having been updated in the prev step"""
class State(TypedDict):
input: str
def noop(_state):
pass
builder = StateGraph(State)
builder.add_node("step_1", noop)
builder.add_node("step_2", noop)
builder.add_node("step_3", noop)
builder.add_edge(START, "step_1")
builder.add_edge("step_1", "step_2")
builder.add_edge("step_2", "step_3")
builder.add_edge("step_3", END)
checkpointer = request.getfixturevalue(f"checkpointer_{checkpointer_name}")
graph = builder.compile(checkpointer=checkpointer, interrupt_after="*")
initial_input = {"input": "hello world"}
thread = {"configurable": {"thread_id": "1"}}
graph.invoke(initial_input, thread, debug=True)
assert graph.get_state(thread).next == ("step_2",)
graph.invoke(None, thread, debug=True)
assert graph.get_state(thread).next == ("step_3",)
graph.invoke(None, thread, debug=True)
assert graph.get_state(thread).next == ()
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC)
async def test_interruption_without_state_updates_async(
checkpointer_name: str, mocker: MockerFixture
):
"""Test interruption without state updates. This test confirms that
interrupting doesn't require a state key having been updated in the prev step"""
class State(TypedDict):
input: str
async def noop(_state):
pass
builder = StateGraph(State)
builder.add_node("step_1", noop)
builder.add_node("step_2", noop)
builder.add_node("step_3", noop)
builder.add_edge(START, "step_1")
builder.add_edge("step_1", "step_2")
builder.add_edge("step_2", "step_3")
builder.add_edge("step_3", END)
async with awith_checkpointer(checkpointer_name) as checkpointer:
graph = builder.compile(checkpointer=checkpointer, interrupt_after="*")
initial_input = {"input": "hello world"}
thread = {"configurable": {"thread_id": "1"}}
await graph.ainvoke(initial_input, thread, debug=True)
assert (await graph.aget_state(thread)).next == ("step_2",)
await graph.ainvoke(None, thread, debug=True)
assert (await graph.aget_state(thread)).next == ("step_3",)
await graph.ainvoke(None, thread, debug=True)
assert (await graph.aget_state(thread)).next == ()
|
0 | lc_public_repos/langgraph/libs/langgraph | lc_public_repos/langgraph/libs/langgraph/tests/compose-postgres.yml | name: langgraph-tests
services:
postgres-test:
image: postgres:16
ports:
- "5442:5432"
environment:
POSTGRES_DB: postgres
POSTGRES_USER: postgres
POSTGRES_PASSWORD: postgres
healthcheck:
test: pg_isready -U postgres
start_period: 10s
timeout: 1s
retries: 5
interval: 60s
start_interval: 1s
|
0 | lc_public_repos/langgraph/libs/langgraph | lc_public_repos/langgraph/libs/langgraph/tests/test_pregel_async.py | import asyncio
import logging
import operator
import random
import re
import sys
import uuid
from collections import Counter
from contextlib import asynccontextmanager, contextmanager
from dataclasses import replace
from time import perf_counter
from typing import (
Annotated,
Any,
AsyncGenerator,
AsyncIterator,
Dict,
Generator,
List,
Literal,
Optional,
Tuple,
TypedDict,
Union,
cast,
)
from uuid import UUID
import httpx
import pytest
from langchain_core.messages import ToolCall
from langchain_core.runnables import (
RunnableConfig,
RunnableLambda,
RunnablePassthrough,
RunnablePick,
)
from langchain_core.utils.aiter import aclosing
from pydantic import BaseModel
from pytest_mock import MockerFixture
from syrupy import SnapshotAssertion
from langgraph.channels.base import BaseChannel
from langgraph.channels.binop import BinaryOperatorAggregate
from langgraph.channels.context import Context
from langgraph.channels.last_value import LastValue
from langgraph.channels.topic import Topic
from langgraph.channels.untracked_value import UntrackedValue
from langgraph.checkpoint.base import (
ChannelVersions,
Checkpoint,
CheckpointMetadata,
CheckpointTuple,
)
from langgraph.checkpoint.memory import MemorySaver
from langgraph.constants import (
CONFIG_KEY_NODE_FINISHED,
ERROR,
FF_SEND_V2,
PULL,
PUSH,
START,
)
from langgraph.errors import InvalidUpdateError, MultipleSubgraphsError, NodeInterrupt
from langgraph.graph import END, Graph, StateGraph
from langgraph.graph.message import MessageGraph, MessagesState, add_messages
from langgraph.managed.shared_value import SharedValue
from langgraph.prebuilt.chat_agent_executor import create_tool_calling_executor
from langgraph.prebuilt.tool_node import ToolNode
from langgraph.pregel import Channel, GraphRecursionError, Pregel, StateSnapshot
from langgraph.pregel.retry import RetryPolicy
from langgraph.store.base import BaseStore
from langgraph.store.memory import InMemoryStore
from langgraph.types import (
Command,
Interrupt,
PregelTask,
Send,
StreamWriter,
interrupt,
)
from tests.any_str import AnyDict, AnyStr, AnyVersion, FloatBetween, UnsortedSequence
from tests.conftest import (
ALL_CHECKPOINTERS_ASYNC,
ALL_CHECKPOINTERS_ASYNC_PLUS_NONE,
ALL_STORES_ASYNC,
SHOULD_CHECK_SNAPSHOTS,
awith_checkpointer,
awith_store,
)
from tests.fake_chat import FakeChatModel
from tests.fake_tracer import FakeTracer
from tests.memory_assert import (
MemorySaverAssertCheckpointMetadata,
MemorySaverNoPending,
)
from tests.messages import (
_AnyIdAIMessage,
_AnyIdAIMessageChunk,
_AnyIdHumanMessage,
_AnyIdToolMessage,
)
logger = logging.getLogger(__name__)
pytestmark = pytest.mark.anyio
async def test_checkpoint_errors() -> None:
class FaultyGetCheckpointer(MemorySaver):
async def aget_tuple(self, config: RunnableConfig) -> Optional[CheckpointTuple]:
raise ValueError("Faulty get_tuple")
class FaultyPutCheckpointer(MemorySaver):
async def aput(
self,
config: RunnableConfig,
checkpoint: Checkpoint,
metadata: CheckpointMetadata,
new_versions: ChannelVersions,
) -> RunnableConfig:
raise ValueError("Faulty put")
class FaultyPutWritesCheckpointer(MemorySaver):
async def aput_writes(
self, config: RunnableConfig, writes: List[Tuple[str, Any]], task_id: str
) -> RunnableConfig:
raise ValueError("Faulty put_writes")
class FaultyVersionCheckpointer(MemorySaver):
def get_next_version(self, current: Optional[int], channel: BaseChannel) -> int:
raise ValueError("Faulty get_next_version")
def logic(inp: str) -> str:
return ""
builder = StateGraph(Annotated[str, operator.add])
builder.add_node("agent", logic)
builder.add_edge(START, "agent")
graph = builder.compile(checkpointer=FaultyGetCheckpointer())
with pytest.raises(ValueError, match="Faulty get_tuple"):
await graph.ainvoke("", {"configurable": {"thread_id": "thread-1"}})
with pytest.raises(ValueError, match="Faulty get_tuple"):
async for _ in graph.astream("", {"configurable": {"thread_id": "thread-2"}}):
pass
with pytest.raises(ValueError, match="Faulty get_tuple"):
async for _ in graph.astream_events(
"", {"configurable": {"thread_id": "thread-3"}}, version="v2"
):
pass
graph = builder.compile(checkpointer=FaultyPutCheckpointer())
with pytest.raises(ValueError, match="Faulty put"):
await graph.ainvoke("", {"configurable": {"thread_id": "thread-1"}})
with pytest.raises(ValueError, match="Faulty put"):
async for _ in graph.astream("", {"configurable": {"thread_id": "thread-2"}}):
pass
with pytest.raises(ValueError, match="Faulty put"):
async for _ in graph.astream_events(
"", {"configurable": {"thread_id": "thread-3"}}, version="v2"
):
pass
graph = builder.compile(checkpointer=FaultyVersionCheckpointer())
with pytest.raises(ValueError, match="Faulty get_next_version"):
await graph.ainvoke("", {"configurable": {"thread_id": "thread-1"}})
with pytest.raises(ValueError, match="Faulty get_next_version"):
async for _ in graph.astream("", {"configurable": {"thread_id": "thread-2"}}):
pass
with pytest.raises(ValueError, match="Faulty get_next_version"):
async for _ in graph.astream_events(
"", {"configurable": {"thread_id": "thread-3"}}, version="v2"
):
pass
# add a parallel node
builder.add_node("parallel", logic)
builder.add_edge(START, "parallel")
graph = builder.compile(checkpointer=FaultyPutWritesCheckpointer())
with pytest.raises(ValueError, match="Faulty put_writes"):
await graph.ainvoke("", {"configurable": {"thread_id": "thread-1"}})
with pytest.raises(ValueError, match="Faulty put_writes"):
async for _ in graph.astream("", {"configurable": {"thread_id": "thread-2"}}):
pass
with pytest.raises(ValueError, match="Faulty put_writes"):
async for _ in graph.astream_events(
"", {"configurable": {"thread_id": "thread-3"}}, version="v2"
):
pass
async def test_node_cancellation_on_external_cancel() -> None:
inner_task_cancelled = False
async def awhile(input: Any) -> None:
try:
await asyncio.sleep(1)
except asyncio.CancelledError:
nonlocal inner_task_cancelled
inner_task_cancelled = True
raise
builder = Graph()
builder.add_node("agent", awhile)
builder.set_entry_point("agent")
builder.set_finish_point("agent")
graph = builder.compile()
with pytest.raises(asyncio.TimeoutError):
await asyncio.wait_for(graph.ainvoke(1), 0.5)
assert inner_task_cancelled
async def test_node_cancellation_on_other_node_exception() -> None:
inner_task_cancelled = False
async def awhile(input: Any) -> None:
try:
await asyncio.sleep(1)
except asyncio.CancelledError:
nonlocal inner_task_cancelled
inner_task_cancelled = True
raise
async def iambad(input: Any) -> None:
raise ValueError("I am bad")
builder = Graph()
builder.add_node("agent", awhile)
builder.add_node("bad", iambad)
builder.set_conditional_entry_point(lambda _: ["agent", "bad"], then=END)
graph = builder.compile()
with pytest.raises(ValueError, match="I am bad"):
# This will raise ValueError, not TimeoutError
await asyncio.wait_for(graph.ainvoke(1), 0.5)
assert inner_task_cancelled
async def test_node_cancellation_on_other_node_exception_two() -> None:
async def awhile(input: Any) -> None:
await asyncio.sleep(1)
async def iambad(input: Any) -> None:
raise ValueError("I am bad")
builder = Graph()
builder.add_node("agent", awhile)
builder.add_node("bad", iambad)
builder.set_conditional_entry_point(lambda _: ["agent", "bad"], then=END)
graph = builder.compile()
with pytest.raises(ValueError, match="I am bad"):
# This will raise ValueError, not CancelledError
await graph.ainvoke(1)
@pytest.mark.skipif(
sys.version_info < (3, 11),
reason="Python 3.11+ is required for async contextvars support",
)
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC)
async def test_dynamic_interrupt(checkpointer_name: str) -> None:
class State(TypedDict):
my_key: Annotated[str, operator.add]
market: str
tool_two_node_count = 0
async def tool_two_node(s: State) -> State:
nonlocal tool_two_node_count
tool_two_node_count += 1
if s["market"] == "DE":
answer = interrupt("Just because...")
else:
answer = " all good"
return {"my_key": answer}
tool_two_graph = StateGraph(State)
tool_two_graph.add_node("tool_two", tool_two_node, retry=RetryPolicy())
tool_two_graph.add_edge(START, "tool_two")
tool_two = tool_two_graph.compile()
tracer = FakeTracer()
assert await tool_two.ainvoke(
{"my_key": "value", "market": "DE"}, {"callbacks": [tracer]}
) == {
"my_key": "value",
"market": "DE",
}
assert tool_two_node_count == 1, "interrupts aren't retried"
assert len(tracer.runs) == 1
run = tracer.runs[0]
assert run.end_time is not None
assert run.error is None
assert run.outputs == {"market": "DE", "my_key": "value"}
assert await tool_two.ainvoke({"my_key": "value", "market": "US"}) == {
"my_key": "value all good",
"market": "US",
}
async with awith_checkpointer(checkpointer_name) as checkpointer:
tool_two = tool_two_graph.compile(checkpointer=checkpointer)
# missing thread_id
with pytest.raises(ValueError, match="thread_id"):
await tool_two.ainvoke({"my_key": "value", "market": "DE"})
# flow: interrupt -> resume with answer
thread2 = {"configurable": {"thread_id": "2"}}
# stop when about to enter node
assert [
c
async for c in tool_two.astream(
{"my_key": "value ⛰️", "market": "DE"}, thread2
)
] == [
{
"__interrupt__": (
Interrupt(
value="Just because...",
resumable=True,
ns=[AnyStr("tool_two:")],
),
)
},
]
# resume with answer
assert [
c async for c in tool_two.astream(Command(resume=" my answer"), thread2)
] == [
{"tool_two": {"my_key": " my answer"}},
]
# flow: interrupt -> clear
thread1 = {"configurable": {"thread_id": "1"}}
# stop when about to enter node
assert [
c
async for c in tool_two.astream(
{"my_key": "value ⛰️", "market": "DE"}, thread1
)
] == [
{
"__interrupt__": (
Interrupt(
value="Just because...",
resumable=True,
ns=[AnyStr("tool_two:")],
),
)
},
]
assert [c.metadata async for c in tool_two.checkpointer.alist(thread1)] == [
{
"parents": {},
"source": "loop",
"step": 0,
"writes": None,
"thread_id": "1",
},
{
"parents": {},
"source": "input",
"step": -1,
"writes": {"__start__": {"my_key": "value ⛰️", "market": "DE"}},
"thread_id": "1",
},
]
tup = await tool_two.checkpointer.aget_tuple(thread1)
assert await tool_two.aget_state(thread1) == StateSnapshot(
values={"my_key": "value ⛰️", "market": "DE"},
next=("tool_two",),
tasks=(
PregelTask(
AnyStr(),
"tool_two",
(PULL, "tool_two"),
interrupts=(
Interrupt(
value="Just because...",
resumable=True,
ns=[AnyStr("tool_two:")],
),
),
),
),
config=tup.config,
created_at=tup.checkpoint["ts"],
metadata={
"parents": {},
"source": "loop",
"step": 0,
"writes": None,
"thread_id": "1",
},
parent_config=[
c async for c in tool_two.checkpointer.alist(thread1, limit=2)
][-1].config,
)
# clear the interrupt and next tasks
await tool_two.aupdate_state(thread1, None, as_node=END)
# interrupt is cleared, as well as the next tasks
tup = await tool_two.checkpointer.aget_tuple(thread1)
assert await tool_two.aget_state(thread1) == StateSnapshot(
values={"my_key": "value ⛰️", "market": "DE"},
next=(),
tasks=(),
config=tup.config,
created_at=tup.checkpoint["ts"],
metadata={
"parents": {},
"source": "update",
"step": 1,
"writes": {},
"thread_id": "1",
},
parent_config=[
c async for c in tool_two.checkpointer.alist(thread1, limit=2)
][-1].config,
)
@pytest.mark.skipif(
sys.version_info < (3, 11),
reason="Python 3.11+ is required for async contextvars support",
)
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC)
async def test_dynamic_interrupt_subgraph(checkpointer_name: str) -> None:
class SubgraphState(TypedDict):
my_key: str
market: str
tool_two_node_count = 0
def tool_two_node(s: SubgraphState) -> SubgraphState:
nonlocal tool_two_node_count
tool_two_node_count += 1
if s["market"] == "DE":
answer = interrupt("Just because...")
else:
answer = " all good"
return {"my_key": answer}
subgraph = StateGraph(SubgraphState)
subgraph.add_node("do", tool_two_node, retry=RetryPolicy())
subgraph.add_edge(START, "do")
class State(TypedDict):
my_key: Annotated[str, operator.add]
market: str
tool_two_graph = StateGraph(State)
tool_two_graph.add_node("tool_two", subgraph.compile())
tool_two_graph.add_edge(START, "tool_two")
tool_two = tool_two_graph.compile()
tracer = FakeTracer()
assert await tool_two.ainvoke(
{"my_key": "value", "market": "DE"}, {"callbacks": [tracer]}
) == {
"my_key": "value",
"market": "DE",
}
assert tool_two_node_count == 1, "interrupts aren't retried"
assert len(tracer.runs) == 1
run = tracer.runs[0]
assert run.end_time is not None
assert run.error is None
assert run.outputs == {"market": "DE", "my_key": "value"}
assert await tool_two.ainvoke({"my_key": "value", "market": "US"}) == {
"my_key": "value all good",
"market": "US",
}
async with awith_checkpointer(checkpointer_name) as checkpointer:
tool_two = tool_two_graph.compile(checkpointer=checkpointer)
# missing thread_id
with pytest.raises(ValueError, match="thread_id"):
await tool_two.ainvoke({"my_key": "value", "market": "DE"})
# flow: interrupt -> resume with answer
thread2 = {"configurable": {"thread_id": "2"}}
# stop when about to enter node
assert [
c
async for c in tool_two.astream(
{"my_key": "value ⛰️", "market": "DE"}, thread2
)
] == [
{
"__interrupt__": (
Interrupt(
value="Just because...",
resumable=True,
ns=[AnyStr("tool_two:"), AnyStr("do:")],
),
)
},
]
# resume with answer
assert [
c async for c in tool_two.astream(Command(resume=" my answer"), thread2)
] == [
{"tool_two": {"my_key": " my answer", "market": "DE"}},
]
# flow: interrupt -> clear
thread1 = {"configurable": {"thread_id": "1"}}
thread1root = {"configurable": {"thread_id": "1", "checkpoint_ns": ""}}
# stop when about to enter node
assert [
c
async for c in tool_two.astream(
{"my_key": "value ⛰️", "market": "DE"}, thread1
)
] == [
{
"__interrupt__": (
Interrupt(
value="Just because...",
resumable=True,
ns=[AnyStr("tool_two:"), AnyStr("do:")],
),
)
},
]
assert [c.metadata async for c in tool_two.checkpointer.alist(thread1root)] == [
{
"parents": {},
"source": "loop",
"step": 0,
"writes": None,
"thread_id": "1",
},
{
"parents": {},
"source": "input",
"step": -1,
"writes": {"__start__": {"my_key": "value ⛰️", "market": "DE"}},
"thread_id": "1",
},
]
tup = await tool_two.checkpointer.aget_tuple(thread1)
assert await tool_two.aget_state(thread1) == StateSnapshot(
values={"my_key": "value ⛰️", "market": "DE"},
next=("tool_two",),
tasks=(
PregelTask(
AnyStr(),
"tool_two",
(PULL, "tool_two"),
interrupts=(
Interrupt(
value="Just because...",
resumable=True,
ns=[AnyStr("tool_two:"), AnyStr("do:")],
),
),
state={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr("tool_two:"),
}
},
),
),
config=tup.config,
created_at=tup.checkpoint["ts"],
metadata={
"parents": {},
"source": "loop",
"step": 0,
"writes": None,
"thread_id": "1",
},
parent_config=[
c async for c in tool_two.checkpointer.alist(thread1root, limit=2)
][-1].config,
)
# clear the interrupt and next tasks
await tool_two.aupdate_state(thread1, None, as_node=END)
# interrupt is cleared, as well as the next tasks
tup = await tool_two.checkpointer.aget_tuple(thread1)
assert await tool_two.aget_state(thread1) == StateSnapshot(
values={"my_key": "value ⛰️", "market": "DE"},
next=(),
tasks=(),
config=tup.config,
created_at=tup.checkpoint["ts"],
metadata={
"parents": {},
"source": "update",
"step": 1,
"writes": {},
"thread_id": "1",
},
parent_config=[
c async for c in tool_two.checkpointer.alist(thread1root, limit=2)
][-1].config,
)
@pytest.mark.skipif(not FF_SEND_V2, reason="send v2 is not enabled")
@pytest.mark.skipif(
sys.version_info < (3, 11),
reason="Python 3.11+ is required for async contextvars support",
)
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC)
async def test_copy_checkpoint(checkpointer_name: str) -> None:
class State(TypedDict):
my_key: Annotated[str, operator.add]
market: str
def tool_one(s: State) -> State:
return {"my_key": " one"}
tool_two_node_count = 0
def tool_two_node(s: State) -> State:
nonlocal tool_two_node_count
tool_two_node_count += 1
if s["market"] == "DE":
answer = interrupt("Just because...")
else:
answer = " all good"
return {"my_key": answer}
def start(state: State) -> list[Union[Send, str]]:
return ["tool_two", Send("tool_one", state)]
tool_two_graph = StateGraph(State)
tool_two_graph.add_node("tool_two", tool_two_node, retry=RetryPolicy())
tool_two_graph.add_node("tool_one", tool_one)
tool_two_graph.set_conditional_entry_point(start)
tool_two = tool_two_graph.compile()
tracer = FakeTracer()
assert await tool_two.ainvoke(
{"my_key": "value", "market": "DE"}, {"callbacks": [tracer]}
) == {
"my_key": "value one",
"market": "DE",
}
assert tool_two_node_count == 1, "interrupts aren't retried"
assert len(tracer.runs) == 1
run = tracer.runs[0]
assert run.end_time is not None
assert run.error is None
assert run.outputs == {"market": "DE", "my_key": "value one"}
assert await tool_two.ainvoke({"my_key": "value", "market": "US"}) == {
"my_key": "value one all good",
"market": "US",
}
async with awith_checkpointer(checkpointer_name) as checkpointer:
tool_two = tool_two_graph.compile(checkpointer=checkpointer)
# missing thread_id
with pytest.raises(ValueError, match="thread_id"):
await tool_two.ainvoke({"my_key": "value", "market": "DE"})
# flow: interrupt -> resume with answer
thread2 = {"configurable": {"thread_id": "2"}}
# stop when about to enter node
assert [
c
async for c in tool_two.astream(
{"my_key": "value ⛰️", "market": "DE"}, thread2
)
] == [
{
"tool_one": {"my_key": " one"},
},
{
"__interrupt__": (
Interrupt(
value="Just because...",
resumable=True,
ns=[AnyStr("tool_two:")],
),
)
},
]
# resume with answer
assert [
c async for c in tool_two.astream(Command(resume=" my answer"), thread2)
] == [
{"tool_two": {"my_key": " my answer"}},
]
# flow: interrupt -> clear tasks
thread1 = {"configurable": {"thread_id": "1"}}
# stop when about to enter node
assert await tool_two.ainvoke(
{"my_key": "value ⛰️", "market": "DE"}, thread1
) == {
"my_key": "value ⛰️ one",
"market": "DE",
}
assert [c.metadata async for c in tool_two.checkpointer.alist(thread1)] == [
{
"parents": {},
"source": "loop",
"step": 0,
"writes": {"tool_one": {"my_key": " one"}},
"thread_id": "1",
},
{
"parents": {},
"source": "input",
"step": -1,
"writes": {"__start__": {"my_key": "value ⛰️", "market": "DE"}},
"thread_id": "1",
},
]
tup = await tool_two.checkpointer.aget_tuple(thread1)
assert await tool_two.aget_state(thread1) == StateSnapshot(
values={"my_key": "value ⛰️ one", "market": "DE"},
next=("tool_two",),
tasks=(
PregelTask(
AnyStr(),
"tool_two",
(PULL, "tool_two"),
interrupts=(
Interrupt(
value="Just because...",
resumable=True,
ns=[AnyStr("tool_two:")],
),
),
),
),
config=tup.config,
created_at=tup.checkpoint["ts"],
metadata={
"parents": {},
"source": "loop",
"step": 0,
"writes": {"tool_one": {"my_key": " one"}},
"thread_id": "1",
},
parent_config=[
c async for c in tool_two.checkpointer.alist(thread1, limit=2)
][-1].config,
)
# clear the interrupt and next tasks
await tool_two.aupdate_state(thread1, None)
# interrupt is cleared, next task is kept
tup = await tool_two.checkpointer.aget_tuple(thread1)
assert await tool_two.aget_state(thread1) == StateSnapshot(
values={"my_key": "value ⛰️ one", "market": "DE"},
next=("tool_two",),
tasks=(
PregelTask(
AnyStr(),
"tool_two",
(PULL, "tool_two"),
interrupts=(),
),
),
config=tup.config,
created_at=tup.checkpoint["ts"],
metadata={
"parents": {},
"source": "update",
"step": 1,
"writes": {},
"thread_id": "1",
},
parent_config=[
c async for c in tool_two.checkpointer.alist(thread1, limit=2)
][-1].config,
)
@pytest.mark.skipif(
sys.version_info < (3, 11),
reason="Python 3.11+ is required for async contextvars support",
)
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC)
async def test_node_not_cancelled_on_other_node_interrupted(
checkpointer_name: str,
) -> None:
class State(TypedDict):
hello: Annotated[str, operator.add]
awhiles = 0
inner_task_cancelled = False
async def awhile(input: State) -> None:
nonlocal awhiles
awhiles += 1
try:
await asyncio.sleep(1)
return {"hello": " again"}
except asyncio.CancelledError:
nonlocal inner_task_cancelled
inner_task_cancelled = True
raise
async def iambad(input: State) -> None:
return {"hello": interrupt("I am bad")}
builder = StateGraph(State)
builder.add_node("agent", awhile)
builder.add_node("bad", iambad)
builder.set_conditional_entry_point(lambda _: ["agent", "bad"], then=END)
async with awith_checkpointer(checkpointer_name) as checkpointer:
graph = builder.compile(checkpointer=checkpointer)
thread = {"configurable": {"thread_id": "1"}}
# writes from "awhile" are applied to last chunk
assert await graph.ainvoke({"hello": "world"}, thread) == {
"hello": "world again"
}
assert not inner_task_cancelled
assert awhiles == 1
assert await graph.ainvoke(None, thread, debug=True) == {"hello": "world again"}
assert not inner_task_cancelled
assert awhiles == 1
# resume with answer
assert await graph.ainvoke(Command(resume=" okay"), thread) == {
"hello": "world again okay"
}
assert not inner_task_cancelled
assert awhiles == 1
async def test_step_timeout_on_stream_hang() -> None:
inner_task_cancelled = False
async def awhile(input: Any) -> None:
try:
await asyncio.sleep(1.5)
except asyncio.CancelledError:
nonlocal inner_task_cancelled
inner_task_cancelled = True
raise
async def alittlewhile(input: Any) -> None:
await asyncio.sleep(0.6)
return "1"
builder = Graph()
builder.add_node(awhile)
builder.add_node(alittlewhile)
builder.set_conditional_entry_point(lambda _: ["awhile", "alittlewhile"], then=END)
graph = builder.compile()
graph.step_timeout = 1
with pytest.raises(asyncio.TimeoutError):
async for chunk in graph.astream(1, stream_mode="updates"):
assert chunk == {"alittlewhile": {"alittlewhile": "1"}}
await asyncio.sleep(0.6)
assert inner_task_cancelled
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC_PLUS_NONE)
async def test_cancel_graph_astream(checkpointer_name: str) -> None:
class State(TypedDict):
value: Annotated[int, operator.add]
class AwhileMaker:
def __init__(self) -> None:
self.reset()
async def __call__(self, input: State) -> Any:
self.started = True
try:
await asyncio.sleep(1.5)
except asyncio.CancelledError:
self.cancelled = True
raise
def reset(self):
self.started = False
self.cancelled = False
async def alittlewhile(input: State) -> None:
await asyncio.sleep(0.6)
return {"value": 2}
awhile = AwhileMaker()
aparallelwhile = AwhileMaker()
builder = StateGraph(State)
builder.add_node("awhile", awhile)
builder.add_node("aparallelwhile", aparallelwhile)
builder.add_node(alittlewhile)
builder.add_edge(START, "alittlewhile")
builder.add_edge(START, "aparallelwhile")
builder.add_edge("alittlewhile", "awhile")
async with awith_checkpointer(checkpointer_name) as checkpointer:
graph = builder.compile(checkpointer=checkpointer)
# test interrupting astream
got_event = False
thread1: RunnableConfig = {"configurable": {"thread_id": "1"}}
async with aclosing(graph.astream({"value": 1}, thread1)) as stream:
async for chunk in stream:
assert chunk == {"alittlewhile": {"value": 2}}
got_event = True
break
assert got_event
# node aparallelwhile should start, but be cancelled
assert aparallelwhile.started is True
assert aparallelwhile.cancelled is True
# node "awhile" should never start
assert awhile.started is False
# checkpoint with output of "alittlewhile" should not be saved
# but we should have applied pending writes
if checkpointer is not None:
state = await graph.aget_state(thread1)
assert state is not None
assert state.values == {"value": 3} # 1 + 2
assert state.next == ("aparallelwhile",)
assert state.metadata == {
"parents": {},
"source": "loop",
"step": 0,
"writes": None,
"thread_id": "1",
}
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC_PLUS_NONE)
async def test_cancel_graph_astream_events_v2(checkpointer_name: Optional[str]) -> None:
class State(TypedDict):
value: int
class AwhileMaker:
def __init__(self) -> None:
self.reset()
async def __call__(self, input: State) -> Any:
self.started = True
try:
await asyncio.sleep(1.5)
except asyncio.CancelledError:
self.cancelled = True
raise
def reset(self):
self.started = False
self.cancelled = False
async def alittlewhile(input: State) -> None:
await asyncio.sleep(0.6)
return {"value": 2}
awhile = AwhileMaker()
anotherwhile = AwhileMaker()
builder = StateGraph(State)
builder.add_node(alittlewhile)
builder.add_node("awhile", awhile)
builder.add_node("anotherwhile", anotherwhile)
builder.add_edge(START, "alittlewhile")
builder.add_edge("alittlewhile", "awhile")
builder.add_edge("awhile", "anotherwhile")
async with awith_checkpointer(checkpointer_name) as checkpointer:
graph = builder.compile(checkpointer=checkpointer)
# test interrupting astream_events v2
got_event = False
thread2: RunnableConfig = {"configurable": {"thread_id": "2"}}
async with aclosing(
graph.astream_events({"value": 1}, thread2, version="v2")
) as stream:
async for chunk in stream:
if chunk["event"] == "on_chain_stream" and not chunk["parent_ids"]:
got_event = True
assert chunk["data"]["chunk"] == {"alittlewhile": {"value": 2}}
await asyncio.sleep(0.1)
break
# did break
assert got_event
# node "awhile" maybe starts (impl detail of astream_events)
# if it does start, it must be cancelled
if awhile.started:
assert awhile.cancelled is True
# node "anotherwhile" should never start
assert anotherwhile.started is False
# checkpoint with output of "alittlewhile" should not be saved
if checkpointer is not None:
state = await graph.aget_state(thread2)
assert state is not None
assert state.values == {"value": 2}
assert state.next == ("awhile",)
assert state.metadata == {
"parents": {},
"source": "loop",
"step": 1,
"writes": {"alittlewhile": {"value": 2}},
"thread_id": "2",
}
async def test_node_schemas_custom_output() -> None:
class State(TypedDict):
hello: str
bye: str
messages: Annotated[list[str], add_messages]
class Output(TypedDict):
messages: list[str]
class StateForA(TypedDict):
hello: str
messages: Annotated[list[str], add_messages]
async def node_a(state: StateForA):
assert state == {
"hello": "there",
"messages": [_AnyIdHumanMessage(content="hello")],
}
class StateForB(TypedDict):
bye: str
now: int
async def node_b(state: StateForB):
assert state == {
"bye": "world",
}
return {
"now": 123,
"hello": "again",
}
class StateForC(TypedDict):
hello: str
now: int
async def node_c(state: StateForC):
assert state == {
"hello": "again",
"now": 123,
}
builder = StateGraph(State, output=Output)
builder.add_node("a", node_a)
builder.add_node("b", node_b)
builder.add_node("c", node_c)
builder.add_edge(START, "a")
builder.add_edge("a", "b")
builder.add_edge("b", "c")
graph = builder.compile()
assert await graph.ainvoke(
{"hello": "there", "bye": "world", "messages": "hello"}
) == {
"messages": [_AnyIdHumanMessage(content="hello")],
}
builder = StateGraph(State, output=Output)
builder.add_node("a", node_a)
builder.add_node("b", node_b)
builder.add_node("c", node_c)
builder.add_edge(START, "a")
builder.add_edge("a", "b")
builder.add_edge("b", "c")
graph = builder.compile()
assert await graph.ainvoke(
{
"hello": "there",
"bye": "world",
"messages": "hello",
"now": 345, # ignored because not in input schema
}
) == {
"messages": [_AnyIdHumanMessage(content="hello")],
}
assert [
c
async for c in graph.astream(
{
"hello": "there",
"bye": "world",
"messages": "hello",
"now": 345, # ignored because not in input schema
}
)
] == [
{"a": None},
{"b": {"hello": "again", "now": 123}},
{"c": None},
]
async def test_invoke_single_process_in_out(mocker: MockerFixture) -> None:
add_one = mocker.Mock(side_effect=lambda x: x + 1)
chain = Channel.subscribe_to("input") | add_one | Channel.write_to("output")
app = Pregel(
nodes={
"one": chain,
},
channels={
"input": LastValue(int),
"output": LastValue(int),
},
input_channels="input",
output_channels="output",
)
graph = Graph()
graph.add_node("add_one", add_one)
graph.set_entry_point("add_one")
graph.set_finish_point("add_one")
gapp = graph.compile()
if SHOULD_CHECK_SNAPSHOTS:
assert app.input_schema.model_json_schema() == {
"title": "LangGraphInput",
"type": "integer",
}
assert app.output_schema.model_json_schema() == {
"title": "LangGraphOutput",
"type": "integer",
}
assert await app.ainvoke(2) == 3
assert await app.ainvoke(2, output_keys=["output"]) == {"output": 3}
assert await gapp.ainvoke(2) == 3
@pytest.mark.parametrize(
"falsy_value",
[None, False, 0, "", [], {}, set(), frozenset(), 0.0, 0j],
)
async def test_invoke_single_process_in_out_falsy_values(falsy_value: Any) -> None:
graph = Graph()
graph.add_node("return_falsy_const", lambda *args, **kwargs: falsy_value)
graph.set_entry_point("return_falsy_const")
graph.set_finish_point("return_falsy_const")
gapp = graph.compile()
assert falsy_value == await gapp.ainvoke(1)
async def test_invoke_single_process_in_write_kwargs(mocker: MockerFixture) -> None:
add_one = mocker.Mock(side_effect=lambda x: x + 1)
chain = (
Channel.subscribe_to("input")
| add_one
| Channel.write_to("output", fixed=5, output_plus_one=lambda x: x + 1)
)
app = Pregel(
nodes={"one": chain},
channels={
"input": LastValue(int),
"output": LastValue(int),
"fixed": LastValue(int),
"output_plus_one": LastValue(int),
},
output_channels=["output", "fixed", "output_plus_one"],
input_channels="input",
)
if SHOULD_CHECK_SNAPSHOTS:
assert app.input_schema.model_json_schema() == {
"title": "LangGraphInput",
"type": "integer",
}
assert app.output_schema.model_json_schema() == {
"title": "LangGraphOutput",
"type": "object",
"properties": {
"output": {"title": "Output", "type": "integer", "default": None},
"fixed": {"title": "Fixed", "type": "integer", "default": None},
"output_plus_one": {
"title": "Output Plus One",
"type": "integer",
"default": None,
},
},
}
assert await app.ainvoke(2) == {"output": 3, "fixed": 5, "output_plus_one": 4}
async def test_invoke_single_process_in_out_dict(mocker: MockerFixture) -> None:
add_one = mocker.Mock(side_effect=lambda x: x + 1)
chain = Channel.subscribe_to("input") | add_one | Channel.write_to("output")
app = Pregel(
nodes={"one": chain},
channels={"input": LastValue(int), "output": LastValue(int)},
input_channels="input",
output_channels=["output"],
)
if SHOULD_CHECK_SNAPSHOTS:
assert app.input_schema.model_json_schema() == {
"title": "LangGraphInput",
"type": "integer",
}
assert app.output_schema.model_json_schema() == {
"title": "LangGraphOutput",
"type": "object",
"properties": {
"output": {"title": "Output", "type": "integer", "default": None}
},
}
assert await app.ainvoke(2) == {"output": 3}
async def test_invoke_single_process_in_dict_out_dict(mocker: MockerFixture) -> None:
add_one = mocker.Mock(side_effect=lambda x: x + 1)
chain = Channel.subscribe_to("input") | add_one | Channel.write_to("output")
app = Pregel(
nodes={"one": chain},
channels={"input": LastValue(int), "output": LastValue(int)},
input_channels=["input"],
output_channels=["output"],
)
if SHOULD_CHECK_SNAPSHOTS:
assert app.input_schema.model_json_schema() == {
"title": "LangGraphInput",
"type": "object",
"properties": {
"input": {"title": "Input", "type": "integer", "default": None}
},
}
assert app.output_schema.model_json_schema() == {
"title": "LangGraphOutput",
"type": "object",
"properties": {
"output": {"title": "Output", "type": "integer", "default": None}
},
}
assert await app.ainvoke({"input": 2}) == {"output": 3}
async def test_invoke_two_processes_in_out(mocker: MockerFixture) -> None:
add_one = mocker.Mock(side_effect=lambda x: x + 1)
one = Channel.subscribe_to("input") | add_one | Channel.write_to("inbox")
two = Channel.subscribe_to("inbox") | add_one | Channel.write_to("output")
app = Pregel(
nodes={"one": one, "two": two},
channels={
"inbox": LastValue(int),
"output": LastValue(int),
"input": LastValue(int),
},
input_channels="input",
output_channels="output",
stream_channels=["inbox", "output"],
)
assert await app.ainvoke(2) == 4
with pytest.raises(GraphRecursionError):
await app.ainvoke(2, {"recursion_limit": 1})
step = 0
async for values in app.astream(2):
step += 1
if step == 1:
assert values == {
"inbox": 3,
}
elif step == 2:
assert values == {
"inbox": 3,
"output": 4,
}
assert step == 2
graph = Graph()
graph.add_node("add_one", add_one)
graph.add_node("add_one_more", add_one)
graph.set_entry_point("add_one")
graph.set_finish_point("add_one_more")
graph.add_edge("add_one", "add_one_more")
gapp = graph.compile()
assert await gapp.ainvoke(2) == 4
step = 0
async for values in gapp.astream(2):
step += 1
if step == 1:
assert values == {
"add_one": 3,
}
elif step == 2:
assert values == {
"add_one_more": 4,
}
assert step == 2
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC)
async def test_invoke_two_processes_in_out_interrupt(
checkpointer_name: str, mocker: MockerFixture
) -> None:
add_one = mocker.Mock(side_effect=lambda x: x + 1)
one = Channel.subscribe_to("input") | add_one | Channel.write_to("inbox")
two = Channel.subscribe_to("inbox") | add_one | Channel.write_to("output")
async with awith_checkpointer(checkpointer_name) as checkpointer:
app = Pregel(
nodes={"one": one, "two": two},
channels={
"inbox": LastValue(int),
"output": LastValue(int),
"input": LastValue(int),
},
input_channels="input",
output_channels="output",
checkpointer=checkpointer,
interrupt_after_nodes=["one"],
)
thread1 = {"configurable": {"thread_id": "1"}}
thread2 = {"configurable": {"thread_id": "2"}}
# start execution, stop at inbox
assert await app.ainvoke(2, thread1) is None
# inbox == 3
checkpoint = await checkpointer.aget(thread1)
assert checkpoint is not None
assert checkpoint["channel_values"]["inbox"] == 3
# resume execution, finish
assert await app.ainvoke(None, thread1) == 4
# start execution again, stop at inbox
assert await app.ainvoke(20, thread1) is None
# inbox == 21
checkpoint = await checkpointer.aget(thread1)
assert checkpoint is not None
assert checkpoint["channel_values"]["inbox"] == 21
# send a new value in, interrupting the previous execution
assert await app.ainvoke(3, thread1) is None
assert await app.ainvoke(None, thread1) == 5
# start execution again, stopping at inbox
assert await app.ainvoke(20, thread2) is None
# inbox == 21
snapshot = await app.aget_state(thread2)
assert snapshot.values["inbox"] == 21
assert snapshot.next == ("two",)
# update the state, resume
await app.aupdate_state(thread2, 25, as_node="one")
assert await app.ainvoke(None, thread2) == 26
# no pending tasks
snapshot = await app.aget_state(thread2)
assert snapshot.next == ()
# list history
history = [c async for c in app.aget_state_history(thread1)]
assert history == [
StateSnapshot(
values={"inbox": 4, "output": 5, "input": 3},
tasks=(),
next=(),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "loop",
"step": 6,
"writes": {"two": 5},
"thread_id": "1",
},
created_at=AnyStr(),
parent_config=history[1].config,
),
StateSnapshot(
values={"inbox": 4, "output": 4, "input": 3},
tasks=(
PregelTask(AnyStr(), "two", (PULL, "two"), result={"output": 5}),
),
next=("two",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "loop",
"step": 5,
"writes": {"one": None},
"thread_id": "1",
},
created_at=AnyStr(),
parent_config=history[2].config,
),
StateSnapshot(
values={"inbox": 21, "output": 4, "input": 3},
tasks=(
PregelTask(AnyStr(), "one", (PULL, "one"), result={"inbox": 4}),
),
next=("one",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "input",
"step": 4,
"writes": {"input": 3},
"thread_id": "1",
},
created_at=AnyStr(),
parent_config=history[3].config,
),
StateSnapshot(
values={"inbox": 21, "output": 4, "input": 20},
tasks=(PregelTask(AnyStr(), "two", (PULL, "two")),),
next=("two",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "loop",
"step": 3,
"writes": {"one": None},
"thread_id": "1",
},
created_at=AnyStr(),
parent_config=history[4].config,
),
StateSnapshot(
values={"inbox": 3, "output": 4, "input": 20},
tasks=(
PregelTask(AnyStr(), "one", (PULL, "one"), result={"inbox": 21}),
),
next=("one",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "input",
"step": 2,
"writes": {"input": 20},
"thread_id": "1",
},
created_at=AnyStr(),
parent_config=history[5].config,
),
StateSnapshot(
values={"inbox": 3, "output": 4, "input": 2},
tasks=(),
next=(),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "loop",
"step": 1,
"writes": {"two": 4},
"thread_id": "1",
},
created_at=AnyStr(),
parent_config=history[6].config,
),
StateSnapshot(
values={"inbox": 3, "input": 2},
tasks=(
PregelTask(AnyStr(), "two", (PULL, "two"), result={"output": 4}),
),
next=("two",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "loop",
"step": 0,
"writes": {"one": None},
"thread_id": "1",
},
created_at=AnyStr(),
parent_config=history[7].config,
),
StateSnapshot(
values={"input": 2},
tasks=(
PregelTask(AnyStr(), "one", (PULL, "one"), result={"inbox": 3}),
),
next=("one",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "input",
"step": -1,
"writes": {"input": 2},
"thread_id": "1",
},
created_at=AnyStr(),
parent_config=None,
),
]
# forking from any previous checkpoint should re-run nodes
assert [
c async for c in app.astream(None, history[0].config, stream_mode="updates")
] == []
assert [
c async for c in app.astream(None, history[1].config, stream_mode="updates")
] == [
{"two": {"output": 5}},
]
assert [
c async for c in app.astream(None, history[2].config, stream_mode="updates")
] == [
{"one": {"inbox": 4}},
{"__interrupt__": ()},
]
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC)
async def test_fork_always_re_runs_nodes(
checkpointer_name: str, mocker: MockerFixture
) -> None:
add_one = mocker.Mock(side_effect=lambda _: 1)
builder = StateGraph(Annotated[int, operator.add])
builder.add_node("add_one", add_one)
builder.add_edge(START, "add_one")
builder.add_conditional_edges("add_one", lambda cnt: "add_one" if cnt < 6 else END)
async with awith_checkpointer(checkpointer_name) as checkpointer:
graph = builder.compile(checkpointer=checkpointer)
thread1 = {"configurable": {"thread_id": "1"}}
# start execution, stop at inbox
assert [
c
async for c in graph.astream(1, thread1, stream_mode=["values", "updates"])
] == [
("values", 1),
("updates", {"add_one": 1}),
("values", 2),
("updates", {"add_one": 1}),
("values", 3),
("updates", {"add_one": 1}),
("values", 4),
("updates", {"add_one": 1}),
("values", 5),
("updates", {"add_one": 1}),
("values", 6),
]
# list history
history = [c async for c in graph.aget_state_history(thread1)]
assert history == [
StateSnapshot(
values=6,
next=(),
tasks=(),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "loop",
"step": 5,
"writes": {"add_one": 1},
"thread_id": "1",
},
created_at=AnyStr(),
parent_config=history[1].config,
),
StateSnapshot(
values=5,
tasks=(PregelTask(AnyStr(), "add_one", (PULL, "add_one"), result=1),),
next=("add_one",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "loop",
"step": 4,
"writes": {"add_one": 1},
"thread_id": "1",
},
created_at=AnyStr(),
parent_config=history[2].config,
),
StateSnapshot(
values=4,
tasks=(PregelTask(AnyStr(), "add_one", (PULL, "add_one"), result=1),),
next=("add_one",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "loop",
"step": 3,
"writes": {"add_one": 1},
"thread_id": "1",
},
created_at=AnyStr(),
parent_config=history[3].config,
),
StateSnapshot(
values=3,
tasks=(PregelTask(AnyStr(), "add_one", (PULL, "add_one"), result=1),),
next=("add_one",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "loop",
"step": 2,
"writes": {"add_one": 1},
"thread_id": "1",
},
created_at=AnyStr(),
parent_config=history[4].config,
),
StateSnapshot(
values=2,
tasks=(PregelTask(AnyStr(), "add_one", (PULL, "add_one"), result=1),),
next=("add_one",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "loop",
"step": 1,
"writes": {"add_one": 1},
"thread_id": "1",
},
created_at=AnyStr(),
parent_config=history[5].config,
),
StateSnapshot(
values=1,
tasks=(PregelTask(AnyStr(), "add_one", (PULL, "add_one"), result=1),),
next=("add_one",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "loop",
"step": 0,
"writes": None,
"thread_id": "1",
},
created_at=AnyStr(),
parent_config=history[6].config,
),
StateSnapshot(
values=0,
tasks=(
PregelTask(AnyStr(), "__start__", (PULL, "__start__"), result=1),
),
next=("__start__",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "input",
"step": -1,
"writes": {"__start__": 1},
"thread_id": "1",
},
created_at=AnyStr(),
parent_config=None,
),
]
# forking from any previous checkpoint should re-run nodes
assert [
c
async for c in graph.astream(None, history[0].config, stream_mode="updates")
] == []
assert [
c
async for c in graph.astream(None, history[1].config, stream_mode="updates")
] == [
{"add_one": 1},
]
assert [
c
async for c in graph.astream(None, history[2].config, stream_mode="updates")
] == [
{"add_one": 1},
{"add_one": 1},
]
async def test_invoke_two_processes_in_dict_out(mocker: MockerFixture) -> None:
add_one = mocker.Mock(side_effect=lambda x: x + 1)
one = Channel.subscribe_to("input") | add_one | Channel.write_to("inbox")
two = (
Channel.subscribe_to("inbox")
| RunnableLambda(add_one).abatch
| Channel.write_to("output").abatch
)
app = Pregel(
nodes={"one": one, "two": two},
channels={
"inbox": Topic(int),
"output": LastValue(int),
"input": LastValue(int),
},
input_channels=["input", "inbox"],
stream_channels=["output", "inbox"],
output_channels=["output"],
)
# [12 + 1, 2 + 1 + 1]
assert [
c
async for c in app.astream(
{"input": 2, "inbox": 12}, output_keys="output", stream_mode="updates"
)
] == [
{"one": None},
{"two": 13},
{"two": 4},
]
assert [
c async for c in app.astream({"input": 2, "inbox": 12}, output_keys="output")
] == [13, 4]
assert [
c async for c in app.astream({"input": 2, "inbox": 12}, stream_mode="updates")
] == [
{"one": {"inbox": 3}},
{"two": {"output": 13}},
{"two": {"output": 4}},
]
assert [c async for c in app.astream({"input": 2, "inbox": 12})] == [
{"inbox": [3], "output": 13},
{"output": 4},
]
assert [
c async for c in app.astream({"input": 2, "inbox": 12}, stream_mode="debug")
] == [
{
"type": "task",
"timestamp": AnyStr(),
"step": 0,
"payload": {
"id": AnyStr(),
"name": "one",
"input": 2,
"triggers": ["input"],
},
},
{
"type": "task",
"timestamp": AnyStr(),
"step": 0,
"payload": {
"id": AnyStr(),
"name": "two",
"input": [12],
"triggers": ["inbox"],
},
},
{
"type": "task_result",
"timestamp": AnyStr(),
"step": 0,
"payload": {
"id": AnyStr(),
"name": "one",
"result": [("inbox", 3)],
"error": None,
"interrupts": [],
},
},
{
"type": "task_result",
"timestamp": AnyStr(),
"step": 0,
"payload": {
"id": AnyStr(),
"name": "two",
"result": [("output", 13)],
"error": None,
"interrupts": [],
},
},
{
"type": "task",
"timestamp": AnyStr(),
"step": 1,
"payload": {
"id": AnyStr(),
"name": "two",
"input": [3],
"triggers": ["inbox"],
},
},
{
"type": "task_result",
"timestamp": AnyStr(),
"step": 1,
"payload": {
"id": AnyStr(),
"name": "two",
"result": [("output", 4)],
"error": None,
"interrupts": [],
},
},
]
async def test_batch_two_processes_in_out() -> None:
async def add_one_with_delay(inp: int) -> int:
await asyncio.sleep(inp / 10)
return inp + 1
one = Channel.subscribe_to("input") | add_one_with_delay | Channel.write_to("one")
two = Channel.subscribe_to("one") | add_one_with_delay | Channel.write_to("output")
app = Pregel(
nodes={"one": one, "two": two},
channels={
"one": LastValue(int),
"output": LastValue(int),
"input": LastValue(int),
},
input_channels="input",
output_channels="output",
)
assert await app.abatch([3, 2, 1, 3, 5]) == [5, 4, 3, 5, 7]
assert await app.abatch([3, 2, 1, 3, 5], output_keys=["output"]) == [
{"output": 5},
{"output": 4},
{"output": 3},
{"output": 5},
{"output": 7},
]
graph = Graph()
graph.add_node("add_one", add_one_with_delay)
graph.add_node("add_one_more", add_one_with_delay)
graph.set_entry_point("add_one")
graph.set_finish_point("add_one_more")
graph.add_edge("add_one", "add_one_more")
gapp = graph.compile()
assert await gapp.abatch([3, 2, 1, 3, 5]) == [5, 4, 3, 5, 7]
async def test_invoke_many_processes_in_out(mocker: MockerFixture) -> None:
test_size = 100
add_one = mocker.Mock(side_effect=lambda x: x + 1)
nodes = {"-1": Channel.subscribe_to("input") | add_one | Channel.write_to("-1")}
for i in range(test_size - 2):
nodes[str(i)] = (
Channel.subscribe_to(str(i - 1)) | add_one | Channel.write_to(str(i))
)
nodes["last"] = Channel.subscribe_to(str(i)) | add_one | Channel.write_to("output")
app = Pregel(
nodes=nodes,
channels={str(i): LastValue(int) for i in range(-1, test_size - 2)}
| {"input": LastValue(int), "output": LastValue(int)},
input_channels="input",
output_channels="output",
)
# No state is left over from previous invocations
for _ in range(10):
assert await app.ainvoke(2, {"recursion_limit": test_size}) == 2 + test_size
# Concurrent invocations do not interfere with each other
assert await asyncio.gather(
*(app.ainvoke(2, {"recursion_limit": test_size}) for _ in range(10))
) == [2 + test_size for _ in range(10)]
async def test_batch_many_processes_in_out(mocker: MockerFixture) -> None:
test_size = 100
add_one = mocker.Mock(side_effect=lambda x: x + 1)
nodes = {"-1": Channel.subscribe_to("input") | add_one | Channel.write_to("-1")}
for i in range(test_size - 2):
nodes[str(i)] = (
Channel.subscribe_to(str(i - 1)) | add_one | Channel.write_to(str(i))
)
nodes["last"] = Channel.subscribe_to(str(i)) | add_one | Channel.write_to("output")
app = Pregel(
nodes=nodes,
channels={str(i): LastValue(int) for i in range(-1, test_size - 2)}
| {"input": LastValue(int), "output": LastValue(int)},
input_channels="input",
output_channels="output",
)
# No state is left over from previous invocations
for _ in range(3):
# Then invoke pubsub
assert await app.abatch([2, 1, 3, 4, 5], {"recursion_limit": test_size}) == [
2 + test_size,
1 + test_size,
3 + test_size,
4 + test_size,
5 + test_size,
]
# Concurrent invocations do not interfere with each other
assert await asyncio.gather(
*(app.abatch([2, 1, 3, 4, 5], {"recursion_limit": test_size}) for _ in range(3))
) == [
[2 + test_size, 1 + test_size, 3 + test_size, 4 + test_size, 5 + test_size]
for _ in range(3)
]
async def test_invoke_two_processes_two_in_two_out_invalid(
mocker: MockerFixture,
) -> None:
add_one = mocker.Mock(side_effect=lambda x: x + 1)
one = Channel.subscribe_to("input") | add_one | Channel.write_to("output")
two = Channel.subscribe_to("input") | add_one | Channel.write_to("output")
app = Pregel(
nodes={"one": one, "two": two},
channels={"output": LastValue(int), "input": LastValue(int)},
input_channels="input",
output_channels="output",
)
with pytest.raises(InvalidUpdateError):
# LastValue channels can only be updated once per iteration
await app.ainvoke(2)
async def test_invoke_two_processes_two_in_two_out_valid(mocker: MockerFixture) -> None:
add_one = mocker.Mock(side_effect=lambda x: x + 1)
one = Channel.subscribe_to("input") | add_one | Channel.write_to("output")
two = Channel.subscribe_to("input") | add_one | Channel.write_to("output")
app = Pregel(
nodes={"one": one, "two": two},
channels={
"input": LastValue(int),
"output": Topic(int),
},
input_channels="input",
output_channels="output",
)
# An Topic channel accumulates updates into a sequence
assert await app.ainvoke(2) == [3, 3]
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC)
async def test_invoke_checkpoint(mocker: MockerFixture, checkpointer_name: str) -> None:
add_one = mocker.Mock(side_effect=lambda x: x["total"] + x["input"])
errored_once = False
def raise_if_above_10(input: int) -> int:
nonlocal errored_once
if input > 4:
if errored_once:
pass
else:
errored_once = True
raise ConnectionError("I will be retried")
if input > 10:
raise ValueError("Input is too large")
return input
one = (
Channel.subscribe_to(["input"]).join(["total"])
| add_one
| Channel.write_to("output", "total")
| raise_if_above_10
)
async with awith_checkpointer(checkpointer_name) as checkpointer:
app = Pregel(
nodes={"one": one},
channels={
"total": BinaryOperatorAggregate(int, operator.add),
"input": LastValue(int),
"output": LastValue(int),
},
input_channels="input",
output_channels="output",
checkpointer=checkpointer,
retry_policy=RetryPolicy(),
)
# total starts out as 0, so output is 0+2=2
assert await app.ainvoke(2, {"configurable": {"thread_id": "1"}}) == 2
checkpoint = await checkpointer.aget({"configurable": {"thread_id": "1"}})
assert checkpoint is not None
assert checkpoint["channel_values"].get("total") == 2
# total is now 2, so output is 2+3=5
assert await app.ainvoke(3, {"configurable": {"thread_id": "1"}}) == 5
assert errored_once, "errored and retried"
checkpoint = await checkpointer.aget({"configurable": {"thread_id": "1"}})
assert checkpoint is not None
assert checkpoint["channel_values"].get("total") == 7
# total is now 2+5=7, so output would be 7+4=11, but raises ValueError
with pytest.raises(ValueError):
await app.ainvoke(4, {"configurable": {"thread_id": "1"}})
# checkpoint is not updated
checkpoint = await checkpointer.aget({"configurable": {"thread_id": "1"}})
assert checkpoint is not None
assert checkpoint["channel_values"].get("total") == 7
# on a new thread, total starts out as 0, so output is 0+5=5
assert await app.ainvoke(5, {"configurable": {"thread_id": "2"}}) == 5
checkpoint = await checkpointer.aget({"configurable": {"thread_id": "1"}})
assert checkpoint is not None
assert checkpoint["channel_values"].get("total") == 7
checkpoint = await checkpointer.aget({"configurable": {"thread_id": "2"}})
assert checkpoint is not None
assert checkpoint["channel_values"].get("total") == 5
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC)
async def test_pending_writes_resume(
request: pytest.FixtureRequest, checkpointer_name: str
) -> None:
class State(TypedDict):
value: Annotated[int, operator.add]
class AwhileMaker:
def __init__(self, sleep: float, rtn: Union[Dict, Exception]) -> None:
self.sleep = sleep
self.rtn = rtn
self.reset()
async def __call__(self, input: State) -> Any:
self.calls += 1
await asyncio.sleep(self.sleep)
if isinstance(self.rtn, Exception):
raise self.rtn
else:
return self.rtn
def reset(self):
self.calls = 0
one = AwhileMaker(0.1, {"value": 2})
two = AwhileMaker(0.3, ConnectionError("I'm not good"))
builder = StateGraph(State)
builder.add_node("one", one)
builder.add_node("two", two, retry=RetryPolicy(max_attempts=2))
builder.add_edge(START, "one")
builder.add_edge(START, "two")
async with awith_checkpointer(checkpointer_name) as checkpointer:
graph = builder.compile(checkpointer=checkpointer)
thread1: RunnableConfig = {"configurable": {"thread_id": "1"}}
with pytest.raises(ConnectionError, match="I'm not good"):
await graph.ainvoke({"value": 1}, thread1)
# both nodes should have been called once
assert one.calls == 1
assert two.calls == 2
# latest checkpoint should be before nodes "one", "two"
# but we should have applied pending writes from "one"
state = await graph.aget_state(thread1)
assert state is not None
assert state.values == {"value": 3}
assert state.next == ("two",)
assert state.tasks == (
PregelTask(AnyStr(), "one", (PULL, "one"), result={"value": 2}),
PregelTask(
AnyStr(),
"two",
(PULL, "two"),
'ConnectionError("I\'m not good")',
),
)
assert state.metadata == {
"parents": {},
"source": "loop",
"step": 0,
"writes": None,
"thread_id": "1",
}
# get_state with checkpoint_id should not apply any pending writes
state = await graph.aget_state(state.config)
assert state is not None
assert state.values == {"value": 1}
assert state.next == ("one", "two")
# should contain pending write of "one"
checkpoint = await checkpointer.aget_tuple(thread1)
assert checkpoint is not None
# should contain error from "two"
expected_writes = [
(AnyStr(), "one", "one"),
(AnyStr(), "value", 2),
(AnyStr(), ERROR, 'ConnectionError("I\'m not good")'),
]
assert len(checkpoint.pending_writes) == 3
assert all(w in expected_writes for w in checkpoint.pending_writes)
# both non-error pending writes come from same task
non_error_writes = [w for w in checkpoint.pending_writes if w[1] != ERROR]
assert non_error_writes[0][0] == non_error_writes[1][0]
# error write is from the other task
error_write = next(w for w in checkpoint.pending_writes if w[1] == ERROR)
assert error_write[0] != non_error_writes[0][0]
# resume execution
with pytest.raises(ConnectionError, match="I'm not good"):
await graph.ainvoke(None, thread1)
# node "one" succeeded previously, so shouldn't be called again
assert one.calls == 1
# node "two" should have been called once again
assert two.calls == 4
# confirm no new checkpoints saved
state_two = await graph.aget_state(thread1)
assert state_two.metadata == state.metadata
# resume execution, without exception
two.rtn = {"value": 3}
# both the pending write and the new write were applied, 1 + 2 + 3 = 6
assert await graph.ainvoke(None, thread1) == {"value": 6}
# check all final checkpoints
checkpoints = [c async for c in checkpointer.alist(thread1)]
# we should have 3
assert len(checkpoints) == 3
# the last one not too interesting for this test
assert checkpoints[0] == CheckpointTuple(
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
checkpoint={
"v": 1,
"id": AnyStr(),
"ts": AnyStr(),
"pending_sends": [],
"versions_seen": {
"one": {
"start:one": AnyVersion(),
},
"two": {
"start:two": AnyVersion(),
},
"__input__": {},
"__start__": {
"__start__": AnyVersion(),
},
"__interrupt__": {
"value": AnyVersion(),
"__start__": AnyVersion(),
"start:one": AnyVersion(),
"start:two": AnyVersion(),
},
},
"channel_versions": {
"one": AnyVersion(),
"two": AnyVersion(),
"value": AnyVersion(),
"__start__": AnyVersion(),
"start:one": AnyVersion(),
"start:two": AnyVersion(),
},
"channel_values": {"one": "one", "two": "two", "value": 6},
},
metadata={
"parents": {},
"step": 1,
"source": "loop",
"writes": {"one": {"value": 2}, "two": {"value": 3}},
"thread_id": "1",
},
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": checkpoints[1].config["configurable"][
"checkpoint_id"
],
}
},
pending_writes=[],
)
# the previous one we assert that pending writes contains both
# - original error
# - successful writes from resuming after preventing error
assert checkpoints[1] == CheckpointTuple(
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
checkpoint={
"v": 1,
"id": AnyStr(),
"ts": AnyStr(),
"pending_sends": [],
"versions_seen": {
"__input__": {},
"__start__": {
"__start__": AnyVersion(),
},
},
"channel_versions": {
"value": AnyVersion(),
"__start__": AnyVersion(),
"start:one": AnyVersion(),
"start:two": AnyVersion(),
},
"channel_values": {
"value": 1,
"start:one": "__start__",
"start:two": "__start__",
},
},
metadata={
"parents": {},
"step": 0,
"source": "loop",
"writes": None,
"thread_id": "1",
},
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": checkpoints[2].config["configurable"][
"checkpoint_id"
],
}
},
pending_writes=UnsortedSequence(
(AnyStr(), "one", "one"),
(AnyStr(), "value", 2),
(AnyStr(), "__error__", 'ConnectionError("I\'m not good")'),
(AnyStr(), "two", "two"),
(AnyStr(), "value", 3),
),
)
assert checkpoints[2] == CheckpointTuple(
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
checkpoint={
"v": 1,
"id": AnyStr(),
"ts": AnyStr(),
"pending_sends": [],
"versions_seen": {"__input__": {}},
"channel_versions": {
"__start__": AnyVersion(),
},
"channel_values": {"__start__": {"value": 1}},
},
metadata={
"parents": {},
"step": -1,
"source": "input",
"writes": {"__start__": {"value": 1}},
"thread_id": "1",
},
parent_config=None,
pending_writes=UnsortedSequence(
(AnyStr(), "value", 1),
(AnyStr(), "start:one", "__start__"),
(AnyStr(), "start:two", "__start__"),
),
)
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC)
async def test_run_from_checkpoint_id_retains_previous_writes(
request: pytest.FixtureRequest, checkpointer_name: str, mocker: MockerFixture
) -> None:
class MyState(TypedDict):
myval: Annotated[int, operator.add]
otherval: bool
class Anode:
def __init__(self):
self.switch = False
async def __call__(self, state: MyState):
self.switch = not self.switch
return {"myval": 2 if self.switch else 1, "otherval": self.switch}
builder = StateGraph(MyState)
thenode = Anode() # Fun.
builder.add_node("node_one", thenode)
builder.add_node("node_two", thenode)
builder.add_edge(START, "node_one")
def _getedge(src: str):
swap = "node_one" if src == "node_two" else "node_two"
def _edge(st: MyState) -> Literal["__end__", "node_one", "node_two"]:
if st["myval"] > 3:
return END
if st["otherval"]:
return swap
return src
return _edge
builder.add_conditional_edges("node_one", _getedge("node_one"))
builder.add_conditional_edges("node_two", _getedge("node_two"))
async with awith_checkpointer(checkpointer_name) as checkpointer:
graph = builder.compile(checkpointer=checkpointer)
thread_id = uuid.uuid4()
thread1 = {"configurable": {"thread_id": str(thread_id)}}
result = await graph.ainvoke({"myval": 1}, thread1)
assert result["myval"] == 4
history = [c async for c in graph.aget_state_history(thread1)]
assert len(history) == 4
assert history[-1].values == {"myval": 0}
assert history[0].values == {"myval": 4, "otherval": False}
second_run_config = {
**thread1,
"configurable": {
**thread1["configurable"],
"checkpoint_id": history[1].config["configurable"]["checkpoint_id"],
},
}
second_result = await graph.ainvoke(None, second_run_config)
assert second_result == {"myval": 5, "otherval": True}
new_history = [
c
async for c in graph.aget_state_history(
{"configurable": {"thread_id": str(thread_id), "checkpoint_ns": ""}}
)
]
assert len(new_history) == len(history) + 1
for original, new in zip(history, new_history[1:]):
assert original.values == new.values
assert original.next == new.next
assert original.metadata["step"] == new.metadata["step"]
def _get_tasks(hist: list, start: int):
return [h.tasks for h in hist[start:]]
assert _get_tasks(new_history, 1) == _get_tasks(history, 0)
async def test_cond_edge_after_send() -> None:
class Node:
def __init__(self, name: str):
self.name = name
setattr(self, "__name__", name)
async def __call__(self, state):
return [self.name]
async def send_for_fun(state):
return [Send("2", state), Send("2", state)]
async def route_to_three(state) -> Literal["3"]:
return "3"
builder = StateGraph(Annotated[list, operator.add])
builder.add_node(Node("1"))
builder.add_node(Node("2"))
builder.add_node(Node("3"))
builder.add_edge(START, "1")
builder.add_conditional_edges("1", send_for_fun)
builder.add_conditional_edges("2", route_to_three)
graph = builder.compile()
assert await graph.ainvoke(["0"]) == ["0", "1", "2", "2", "3"]
async def test_concurrent_emit_sends() -> None:
class Node:
def __init__(self, name: str):
self.name = name
setattr(self, "__name__", name)
async def __call__(self, state):
return (
[self.name]
if isinstance(state, list)
else ["|".join((self.name, str(state)))]
)
async def send_for_fun(state):
return [Send("2", 1), Send("2", 2), "3.1"]
async def send_for_profit(state):
return [Send("2", 3), Send("2", 4)]
async def route_to_three(state) -> Literal["3"]:
return "3"
builder = StateGraph(Annotated[list, operator.add])
builder.add_node(Node("1"))
builder.add_node(Node("1.1"))
builder.add_node(Node("2"))
builder.add_node(Node("3"))
builder.add_node(Node("3.1"))
builder.add_edge(START, "1")
builder.add_edge(START, "1.1")
builder.add_conditional_edges("1", send_for_fun)
builder.add_conditional_edges("1.1", send_for_profit)
builder.add_conditional_edges("2", route_to_three)
graph = builder.compile()
assert await graph.ainvoke(["0"]) == (
[
"0",
"1",
"1.1",
"2|1",
"2|2",
"2|3",
"2|4",
"3",
"3.1",
]
if FF_SEND_V2
else [
"0",
"1",
"1.1",
"3.1",
"2|1",
"2|2",
"2|3",
"2|4",
"3",
]
)
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC)
async def test_send_sequences(checkpointer_name: str) -> None:
class Node:
def __init__(self, name: str):
self.name = name
setattr(self, "__name__", name)
async def __call__(self, state):
update = (
[self.name]
if isinstance(state, list) # or isinstance(state, Control)
else ["|".join((self.name, str(state)))]
)
if isinstance(state, Command):
return replace(state, update=update)
else:
return update
async def send_for_fun(state):
return [
Send("2", Command(goto=Send("2", 3))),
Send("2", Command(goto=Send("2", 4))),
"3.1",
]
async def route_to_three(state) -> Literal["3"]:
return "3"
builder = StateGraph(Annotated[list, operator.add])
builder.add_node(Node("1"))
builder.add_node(Node("2"))
builder.add_node(Node("3"))
builder.add_node(Node("3.1"))
builder.add_edge(START, "1")
builder.add_conditional_edges("1", send_for_fun)
builder.add_conditional_edges("2", route_to_three)
graph = builder.compile()
assert (
await graph.ainvoke(["0"])
== [
"0",
"1",
"2|Command(goto=Send(node='2', arg=3))",
"2|Command(goto=Send(node='2', arg=4))",
"2|3",
"2|4",
"3",
"3.1",
]
if FF_SEND_V2
else [
"0",
"1",
"3.1",
"2|Command(goto=Send(node='2', arg=3))",
"2|Command(goto=Send(node='2', arg=4))",
"3",
"2|3",
"2|4",
"3",
]
)
if not FF_SEND_V2:
return
async with awith_checkpointer(checkpointer_name) as checkpointer:
graph = builder.compile(checkpointer=checkpointer, interrupt_before=["3.1"])
thread1 = {"configurable": {"thread_id": "1"}}
assert await graph.ainvoke(["0"], thread1) == [
"0",
"1",
"2|Command(goto=Send(node='2', arg=3))",
"2|Command(goto=Send(node='2', arg=4))",
"2|3",
"2|4",
]
assert await graph.ainvoke(None, thread1) == [
"0",
"1",
"2|Command(goto=Send(node='2', arg=3))",
"2|Command(goto=Send(node='2', arg=4))",
"2|3",
"2|4",
"3",
"3.1",
]
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC)
async def test_send_dedupe_on_resume(checkpointer_name: str) -> None:
if not FF_SEND_V2:
pytest.skip("Send deduplication is only available in Send V2")
class InterruptOnce:
ticks: int = 0
def __call__(self, state):
self.ticks += 1
if self.ticks == 1:
raise NodeInterrupt("Bahh")
return ["|".join(("flaky", str(state)))]
class Node:
def __init__(self, name: str):
self.name = name
self.ticks = 0
setattr(self, "__name__", name)
def __call__(self, state):
self.ticks += 1
update = (
[self.name]
if isinstance(state, list)
else ["|".join((self.name, str(state)))]
)
if isinstance(state, Command):
return replace(state, update=update)
else:
return update
def send_for_fun(state):
return [
Send("2", Command(goto=Send("2", 3))),
Send("2", Command(goto=Send("flaky", 4))),
"3.1",
]
def route_to_three(state) -> Literal["3"]:
return "3"
builder = StateGraph(Annotated[list, operator.add])
builder.add_node(Node("1"))
builder.add_node(Node("2"))
builder.add_node(Node("3"))
builder.add_node(Node("3.1"))
builder.add_node("flaky", InterruptOnce())
builder.add_edge(START, "1")
builder.add_conditional_edges("1", send_for_fun)
builder.add_conditional_edges("2", route_to_three)
async with awith_checkpointer(checkpointer_name) as checkpointer:
graph = builder.compile(checkpointer=checkpointer)
thread1 = {"configurable": {"thread_id": "1"}}
assert await graph.ainvoke(["0"], thread1, debug=1) == [
"0",
"1",
"2|Command(goto=Send(node='2', arg=3))",
"2|Command(goto=Send(node='flaky', arg=4))",
"2|3",
]
assert builder.nodes["2"].runnable.func.ticks == 3
assert builder.nodes["flaky"].runnable.func.ticks == 1
# resume execution
assert await graph.ainvoke(None, thread1, debug=1) == [
"0",
"1",
"2|Command(goto=Send(node='2', arg=3))",
"2|Command(goto=Send(node='flaky', arg=4))",
"2|3",
"flaky|4",
"3",
"3.1",
]
# node "2" doesn't get called again, as we recover writes saved before
assert builder.nodes["2"].runnable.func.ticks == 3
# node "flaky" gets called again, as it was interrupted
assert builder.nodes["flaky"].runnable.func.ticks == 2
# check history
history = [c async for c in graph.aget_state_history(thread1)]
assert history == [
StateSnapshot(
values=[
"0",
"1",
"2|Command(goto=Send(node='2', arg=3))",
"2|Command(goto=Send(node='flaky', arg=4))",
"2|3",
"flaky|4",
"3",
"3.1",
],
next=(),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"source": "loop",
"writes": {"3": ["3"], "3.1": ["3.1"]},
"thread_id": "1",
"step": 2,
"parents": {},
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
tasks=(),
),
StateSnapshot(
values=[
"0",
"1",
"2|Command(goto=Send(node='2', arg=3))",
"2|Command(goto=Send(node='flaky', arg=4))",
"2|3",
"flaky|4",
],
next=("3", "3.1"),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"source": "loop",
"writes": {
"1": ["1"],
"2": [
["2|Command(goto=Send(node='2', arg=3))"],
["2|Command(goto=Send(node='flaky', arg=4))"],
["2|3"],
],
"flaky": ["flaky|4"],
},
"thread_id": "1",
"step": 1,
"parents": {},
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
tasks=(
PregelTask(
id=AnyStr(),
name="3",
path=("__pregel_pull", "3"),
error=None,
interrupts=(),
state=None,
result=["3"],
),
PregelTask(
id=AnyStr(),
name="3.1",
path=("__pregel_pull", "3.1"),
error=None,
interrupts=(),
state=None,
result=["3.1"],
),
),
),
StateSnapshot(
values=["0"],
next=("1", "2", "2", "2", "flaky"),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"source": "loop",
"writes": None,
"thread_id": "1",
"step": 0,
"parents": {},
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
tasks=(
PregelTask(
id=AnyStr(),
name="1",
path=("__pregel_pull", "1"),
error=None,
interrupts=(),
state=None,
result=["1"],
),
PregelTask(
id=AnyStr(),
name="2",
path=(
"__pregel_push",
("__pregel_pull", "1"),
2,
AnyStr(),
),
error=None,
interrupts=(),
state=None,
result=["2|Command(goto=Send(node='2', arg=3))"],
),
PregelTask(
id=AnyStr(),
name="2",
path=(
"__pregel_push",
("__pregel_pull", "1"),
3,
AnyStr(),
),
error=None,
interrupts=(),
state=None,
result=["2|Command(goto=Send(node='flaky', arg=4))"],
),
PregelTask(
id=AnyStr(),
name="2",
path=(
"__pregel_push",
(
"__pregel_push",
("__pregel_pull", "1"),
2,
AnyStr(),
),
2,
AnyStr(),
),
error=None,
interrupts=(),
state=None,
result=["2|3"],
),
PregelTask(
id=AnyStr(),
name="flaky",
path=(
"__pregel_push",
(
"__pregel_push",
("__pregel_pull", "1"),
3,
AnyStr(),
),
2,
AnyStr(),
),
error=None,
interrupts=(Interrupt(value="Bahh", when="during"),),
state=None,
result=["flaky|4"],
),
),
),
StateSnapshot(
values=[],
next=("__start__",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"source": "input",
"writes": {"__start__": ["0"]},
"thread_id": "1",
"step": -1,
"parents": {},
},
created_at=AnyStr(),
parent_config=None,
tasks=(
PregelTask(
id=AnyStr(),
name="__start__",
path=("__pregel_pull", "__start__"),
error=None,
interrupts=(),
state=None,
result=["0"],
),
),
),
]
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC)
async def test_send_react_interrupt(checkpointer_name: str) -> None:
from langchain_core.messages import AIMessage, HumanMessage, ToolCall, ToolMessage
ai_message = AIMessage(
"",
id="ai1",
tool_calls=[ToolCall(name="foo", args={"hi": [1, 2, 3]}, id=AnyStr())],
)
async def agent(state):
return {"messages": ai_message}
def route(state):
if isinstance(state["messages"][-1], AIMessage):
return [
Send(call["name"], call) for call in state["messages"][-1].tool_calls
]
foo_called = 0
async def foo(call: ToolCall):
nonlocal foo_called
foo_called += 1
return {"messages": ToolMessage(str(call["args"]), tool_call_id=call["id"])}
builder = StateGraph(MessagesState)
builder.add_node(agent)
builder.add_node(foo)
builder.add_edge(START, "agent")
builder.add_conditional_edges("agent", route)
graph = builder.compile()
assert await graph.ainvoke({"messages": [HumanMessage("hello")]}) == {
"messages": [
_AnyIdHumanMessage(content="hello"),
_AnyIdAIMessage(
content="",
tool_calls=[
{
"name": "foo",
"args": {"hi": [1, 2, 3]},
"id": "",
"type": "tool_call",
}
],
),
_AnyIdToolMessage(
content="{'hi': [1, 2, 3]}",
tool_call_id=AnyStr(),
),
]
}
assert foo_called == 1
async with awith_checkpointer(checkpointer_name) as checkpointer:
# simple interrupt-resume flow
foo_called = 0
graph = builder.compile(checkpointer=checkpointer, interrupt_before=["foo"])
thread1 = {"configurable": {"thread_id": "1"}}
assert await graph.ainvoke({"messages": [HumanMessage("hello")]}, thread1) == {
"messages": [
_AnyIdHumanMessage(content="hello"),
_AnyIdAIMessage(
content="",
tool_calls=[
{
"name": "foo",
"args": {"hi": [1, 2, 3]},
"id": "",
"type": "tool_call",
}
],
),
]
}
assert foo_called == 0
assert await graph.ainvoke(None, thread1) == {
"messages": [
_AnyIdHumanMessage(content="hello"),
_AnyIdAIMessage(
content="",
tool_calls=[
{
"name": "foo",
"args": {"hi": [1, 2, 3]},
"id": "",
"type": "tool_call",
}
],
),
_AnyIdToolMessage(
content="{'hi': [1, 2, 3]}",
tool_call_id=AnyStr(),
),
]
}
assert foo_called == 1
# interrupt-update-resume flow
foo_called = 0
graph = builder.compile(checkpointer=checkpointer, interrupt_before=["foo"])
thread1 = {"configurable": {"thread_id": "2"}}
assert await graph.ainvoke({"messages": [HumanMessage("hello")]}, thread1) == {
"messages": [
_AnyIdHumanMessage(content="hello"),
_AnyIdAIMessage(
content="",
tool_calls=[
{
"name": "foo",
"args": {"hi": [1, 2, 3]},
"id": "",
"type": "tool_call",
}
],
),
]
}
assert foo_called == 0
if not FF_SEND_V2:
return
# get state should show the pending task
state = await graph.aget_state(thread1)
assert state == StateSnapshot(
values={
"messages": [
_AnyIdHumanMessage(content="hello"),
_AnyIdAIMessage(
content="",
tool_calls=[
{
"name": "foo",
"args": {"hi": [1, 2, 3]},
"id": "",
"type": "tool_call",
}
],
),
]
},
next=("foo",),
config={
"configurable": {
"thread_id": "2",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"step": 0,
"source": "loop",
"writes": None,
"parents": {},
"thread_id": "2",
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "2",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
tasks=(
PregelTask(
id=AnyStr(),
name="agent",
path=("__pregel_pull", "agent"),
error=None,
interrupts=(),
state=None,
result={
"messages": AIMessage(
content="",
additional_kwargs={},
response_metadata={},
id="ai1",
tool_calls=[
{
"name": "foo",
"args": {"hi": [1, 2, 3]},
"id": "",
"type": "tool_call",
}
],
)
},
),
PregelTask(
id=AnyStr(),
name="foo",
path=("__pregel_push", ("__pregel_pull", "agent"), 2, AnyStr()),
error=None,
interrupts=(),
state=None,
result=None,
),
),
)
# remove the tool call, clearing the pending task
await graph.aupdate_state(
thread1, {"messages": AIMessage("Bye now", id=ai_message.id, tool_calls=[])}
)
# tool call no longer in pending tasks
assert await graph.aget_state(thread1) == StateSnapshot(
values={
"messages": [
_AnyIdHumanMessage(content="hello"),
_AnyIdAIMessage(
content="Bye now",
tool_calls=[],
),
]
},
next=(),
config={
"configurable": {
"thread_id": "2",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"step": 1,
"source": "update",
"writes": {
"agent": {
"messages": _AnyIdAIMessage(
content="Bye now",
tool_calls=[],
)
}
},
"parents": {},
"thread_id": "2",
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "2",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
tasks=(),
)
# tool call not executed
assert await graph.ainvoke(None, thread1) == {
"messages": [
_AnyIdHumanMessage(content="hello"),
_AnyIdAIMessage(content="Bye now"),
]
}
assert foo_called == 0
# interrupt-update-resume flow, creating new Send in update call
foo_called = 0
graph = builder.compile(checkpointer=checkpointer, interrupt_before=["foo"])
thread1 = {"configurable": {"thread_id": "3"}}
assert await graph.ainvoke({"messages": [HumanMessage("hello")]}, thread1) == {
"messages": [
_AnyIdHumanMessage(content="hello"),
_AnyIdAIMessage(
content="",
tool_calls=[
{
"name": "foo",
"args": {"hi": [1, 2, 3]},
"id": "",
"type": "tool_call",
}
],
),
]
}
assert foo_called == 0
# get state should show the pending task
state = await graph.aget_state(thread1)
assert state == StateSnapshot(
values={
"messages": [
_AnyIdHumanMessage(content="hello"),
_AnyIdAIMessage(
content="",
tool_calls=[
{
"name": "foo",
"args": {"hi": [1, 2, 3]},
"id": "",
"type": "tool_call",
}
],
),
]
},
next=("foo",),
config={
"configurable": {
"thread_id": "3",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"step": 0,
"source": "loop",
"writes": None,
"parents": {},
"thread_id": "3",
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "3",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
tasks=(
PregelTask(
id=AnyStr(),
name="agent",
path=("__pregel_pull", "agent"),
error=None,
interrupts=(),
state=None,
result={
"messages": AIMessage(
"",
id="ai1",
tool_calls=[
{
"name": "foo",
"args": {"hi": [1, 2, 3]},
"id": "",
"type": "tool_call",
}
],
)
},
),
PregelTask(
id=AnyStr(),
name="foo",
path=("__pregel_push", ("__pregel_pull", "agent"), 2, AnyStr()),
error=None,
interrupts=(),
state=None,
result=None,
),
),
)
# replace the tool call, should clear previous send, create new one
await graph.aupdate_state(
thread1,
{
"messages": AIMessage(
"",
id=ai_message.id,
tool_calls=[
{
"name": "foo",
"args": {"hi": [4, 5, 6]},
"id": "tool1",
"type": "tool_call",
}
],
)
},
)
# prev tool call no longer in pending tasks, new tool call is
assert await graph.aget_state(thread1) == StateSnapshot(
values={
"messages": [
_AnyIdHumanMessage(content="hello"),
_AnyIdAIMessage(
content="",
tool_calls=[
{
"name": "foo",
"args": {"hi": [4, 5, 6]},
"id": "tool1",
"type": "tool_call",
}
],
),
]
},
next=("foo",),
config={
"configurable": {
"thread_id": "3",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"step": 1,
"source": "update",
"writes": {
"agent": {
"messages": _AnyIdAIMessage(
content="",
tool_calls=[
{
"name": "foo",
"args": {"hi": [4, 5, 6]},
"id": "tool1",
"type": "tool_call",
}
],
)
}
},
"parents": {},
"thread_id": "3",
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "3",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
tasks=(
PregelTask(
id=AnyStr(),
name="foo",
path=("__pregel_push", (), 0, AnyStr()),
error=None,
interrupts=(),
state=None,
result=None,
),
),
)
# prev tool call not executed, new tool call is
assert await graph.ainvoke(None, thread1) == {
"messages": [
_AnyIdHumanMessage(content="hello"),
AIMessage(
"",
id="ai1",
tool_calls=[
{
"name": "foo",
"args": {"hi": [4, 5, 6]},
"id": "tool1",
"type": "tool_call",
}
],
),
_AnyIdToolMessage(content="{'hi': [4, 5, 6]}", tool_call_id="tool1"),
]
}
assert foo_called == 1
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC)
async def test_send_react_interrupt_control(
checkpointer_name: str, snapshot: SnapshotAssertion
) -> None:
from langchain_core.messages import AIMessage, HumanMessage, ToolCall, ToolMessage
ai_message = AIMessage(
"",
id="ai1",
tool_calls=[ToolCall(name="foo", args={"hi": [1, 2, 3]}, id=AnyStr())],
)
async def agent(state) -> Command[Literal["foo"]]:
return Command(
update={"messages": ai_message},
goto=[Send(call["name"], call) for call in ai_message.tool_calls],
)
foo_called = 0
async def foo(call: ToolCall):
nonlocal foo_called
foo_called += 1
return {"messages": ToolMessage(str(call["args"]), tool_call_id=call["id"])}
builder = StateGraph(MessagesState)
builder.add_node(agent)
builder.add_node(foo)
builder.add_edge(START, "agent")
graph = builder.compile()
assert graph.get_graph().draw_mermaid() == snapshot
assert await graph.ainvoke({"messages": [HumanMessage("hello")]}) == {
"messages": [
_AnyIdHumanMessage(content="hello"),
_AnyIdAIMessage(
content="",
tool_calls=[
{
"name": "foo",
"args": {"hi": [1, 2, 3]},
"id": "",
"type": "tool_call",
}
],
),
_AnyIdToolMessage(
content="{'hi': [1, 2, 3]}",
tool_call_id=AnyStr(),
),
]
}
assert foo_called == 1
async with awith_checkpointer(checkpointer_name) as checkpointer:
# simple interrupt-resume flow
foo_called = 0
graph = builder.compile(checkpointer=checkpointer, interrupt_before=["foo"])
thread1 = {"configurable": {"thread_id": "1"}}
assert await graph.ainvoke({"messages": [HumanMessage("hello")]}, thread1) == {
"messages": [
_AnyIdHumanMessage(content="hello"),
_AnyIdAIMessage(
content="",
tool_calls=[
{
"name": "foo",
"args": {"hi": [1, 2, 3]},
"id": "",
"type": "tool_call",
}
],
),
]
}
assert foo_called == 0
assert await graph.ainvoke(None, thread1) == {
"messages": [
_AnyIdHumanMessage(content="hello"),
_AnyIdAIMessage(
content="",
tool_calls=[
{
"name": "foo",
"args": {"hi": [1, 2, 3]},
"id": "",
"type": "tool_call",
}
],
),
_AnyIdToolMessage(
content="{'hi': [1, 2, 3]}",
tool_call_id=AnyStr(),
),
]
}
assert foo_called == 1
# interrupt-update-resume flow
foo_called = 0
graph = builder.compile(checkpointer=checkpointer, interrupt_before=["foo"])
thread1 = {"configurable": {"thread_id": "2"}}
assert await graph.ainvoke({"messages": [HumanMessage("hello")]}, thread1) == {
"messages": [
_AnyIdHumanMessage(content="hello"),
_AnyIdAIMessage(
content="",
tool_calls=[
{
"name": "foo",
"args": {"hi": [1, 2, 3]},
"id": "",
"type": "tool_call",
}
],
),
]
}
assert foo_called == 0
if not FF_SEND_V2:
return
# get state should show the pending task
state = await graph.aget_state(thread1)
assert state == StateSnapshot(
values={
"messages": [
_AnyIdHumanMessage(content="hello"),
_AnyIdAIMessage(
content="",
tool_calls=[
{
"name": "foo",
"args": {"hi": [1, 2, 3]},
"id": "",
"type": "tool_call",
}
],
),
]
},
next=("foo",),
config={
"configurable": {
"thread_id": "2",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"step": 0,
"source": "loop",
"writes": None,
"parents": {},
"thread_id": "2",
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "2",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
tasks=(
PregelTask(
id=AnyStr(),
name="agent",
path=("__pregel_pull", "agent"),
error=None,
interrupts=(),
state=None,
result={
"messages": AIMessage(
content="",
additional_kwargs={},
response_metadata={},
id="ai1",
tool_calls=[
{
"name": "foo",
"args": {"hi": [1, 2, 3]},
"id": "",
"type": "tool_call",
}
],
)
},
),
PregelTask(
id=AnyStr(),
name="foo",
path=("__pregel_push", ("__pregel_pull", "agent"), 2, AnyStr()),
error=None,
interrupts=(),
state=None,
result=None,
),
),
)
# remove the tool call, clearing the pending task
await graph.aupdate_state(
thread1, {"messages": AIMessage("Bye now", id=ai_message.id, tool_calls=[])}
)
# tool call no longer in pending tasks
assert await graph.aget_state(thread1) == StateSnapshot(
values={
"messages": [
_AnyIdHumanMessage(content="hello"),
_AnyIdAIMessage(
content="Bye now",
tool_calls=[],
),
]
},
next=(),
config={
"configurable": {
"thread_id": "2",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"step": 1,
"source": "update",
"writes": {
"agent": {
"messages": _AnyIdAIMessage(
content="Bye now",
tool_calls=[],
)
}
},
"parents": {},
"thread_id": "2",
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "2",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
tasks=(),
)
# tool call not executed
assert await graph.ainvoke(None, thread1) == {
"messages": [
_AnyIdHumanMessage(content="hello"),
_AnyIdAIMessage(content="Bye now"),
]
}
assert foo_called == 0
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC)
async def test_max_concurrency(checkpointer_name: str) -> None:
class Node:
def __init__(self, name: str):
self.name = name
setattr(self, "__name__", name)
self.currently = 0
self.max_currently = 0
async def __call__(self, state):
self.currently += 1
if self.currently > self.max_currently:
self.max_currently = self.currently
await asyncio.sleep(random.random() / 10)
self.currently -= 1
return [state]
def one(state):
return ["1"]
def three(state):
return ["3"]
async def send_to_many(state):
return [Send("2", idx) for idx in range(100)]
async def route_to_three(state) -> Literal["3"]:
return "3"
node2 = Node("2")
builder = StateGraph(Annotated[list, operator.add])
builder.add_node("1", one)
builder.add_node(node2)
builder.add_node("3", three)
builder.add_edge(START, "1")
builder.add_conditional_edges("1", send_to_many)
builder.add_conditional_edges("2", route_to_three)
graph = builder.compile()
assert await graph.ainvoke(["0"]) == ["0", "1", *range(100), "3"]
assert node2.max_currently == 100
assert node2.currently == 0
node2.max_currently = 0
assert await graph.ainvoke(["0"], {"max_concurrency": 10}) == [
"0",
"1",
*range(100),
"3",
]
assert node2.max_currently == 10
assert node2.currently == 0
async with awith_checkpointer(checkpointer_name) as checkpointer:
graph = builder.compile(checkpointer=checkpointer, interrupt_before=["2"])
thread1 = {"max_concurrency": 10, "configurable": {"thread_id": "1"}}
assert await graph.ainvoke(["0"], thread1, debug=True) == ["0", "1"]
state = await graph.aget_state(thread1)
assert state.values == ["0", "1"]
assert await graph.ainvoke(None, thread1) == ["0", "1", *range(100), "3"]
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC)
async def test_max_concurrency_control(checkpointer_name: str) -> None:
async def node1(state) -> Command[Literal["2"]]:
return Command(update=["1"], goto=[Send("2", idx) for idx in range(100)])
node2_currently = 0
node2_max_currently = 0
async def node2(state) -> Command[Literal["3"]]:
nonlocal node2_currently, node2_max_currently
node2_currently += 1
if node2_currently > node2_max_currently:
node2_max_currently = node2_currently
await asyncio.sleep(0.1)
node2_currently -= 1
return Command(update=[state], goto="3")
async def node3(state) -> Literal["3"]:
return ["3"]
builder = StateGraph(Annotated[list, operator.add])
builder.add_node("1", node1)
builder.add_node("2", node2)
builder.add_node("3", node3)
builder.add_edge(START, "1")
graph = builder.compile()
assert (
graph.get_graph().draw_mermaid()
== """%%{init: {'flowchart': {'curve': 'linear'}}}%%
graph TD;
__start__([<p>__start__</p>]):::first
1(1)
2(2)
3([3]):::last
__start__ --> 1;
1 -.-> 2;
2 -.-> 3;
classDef default fill:#f2f0ff,line-height:1.2
classDef first fill-opacity:0
classDef last fill:#bfb6fc
"""
)
assert await graph.ainvoke(["0"], debug=True) == ["0", "1", *range(100), "3"]
assert node2_max_currently == 100
assert node2_currently == 0
node2_max_currently = 0
assert await graph.ainvoke(["0"], {"max_concurrency": 10}) == [
"0",
"1",
*range(100),
"3",
]
assert node2_max_currently == 10
assert node2_currently == 0
async with awith_checkpointer(checkpointer_name) as checkpointer:
graph = builder.compile(checkpointer=checkpointer, interrupt_before=["2"])
thread1 = {"max_concurrency": 10, "configurable": {"thread_id": "1"}}
assert await graph.ainvoke(["0"], thread1) == ["0", "1"]
assert await graph.ainvoke(None, thread1) == ["0", "1", *range(100), "3"]
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC)
async def test_invoke_checkpoint_three(
mocker: MockerFixture, checkpointer_name: str
) -> None:
add_one = mocker.Mock(side_effect=lambda x: x["total"] + x["input"])
def raise_if_above_10(input: int) -> int:
if input > 10:
raise ValueError("Input is too large")
return input
one = (
Channel.subscribe_to(["input"]).join(["total"])
| add_one
| Channel.write_to("output", "total")
| raise_if_above_10
)
async with awith_checkpointer(checkpointer_name) as checkpointer:
app = Pregel(
nodes={"one": one},
channels={
"total": BinaryOperatorAggregate(int, operator.add),
"input": LastValue(int),
"output": LastValue(int),
},
input_channels="input",
output_channels="output",
checkpointer=checkpointer,
debug=True,
)
thread_1 = {"configurable": {"thread_id": "1"}}
# total starts out as 0, so output is 0+2=2
assert await app.ainvoke(2, thread_1) == 2
state = await app.aget_state(thread_1)
assert state is not None
assert state.values.get("total") == 2
assert (
state.config["configurable"]["checkpoint_id"]
== (await checkpointer.aget(thread_1))["id"]
)
# total is now 2, so output is 2+3=5
assert await app.ainvoke(3, thread_1) == 5
state = await app.aget_state(thread_1)
assert state is not None
assert state.values.get("total") == 7
assert (
state.config["configurable"]["checkpoint_id"]
== (await checkpointer.aget(thread_1))["id"]
)
# total is now 2+5=7, so output would be 7+4=11, but raises ValueError
with pytest.raises(ValueError):
await app.ainvoke(4, thread_1)
# checkpoint is not updated
state = await app.aget_state(thread_1)
assert state is not None
assert state.values.get("total") == 7
assert state.next == ("one",)
"""we checkpoint inputs and it failed on "one", so the next node is one"""
# we can recover from error by sending new inputs
assert await app.ainvoke(2, thread_1) == 9
state = await app.aget_state(thread_1)
assert state is not None
assert state.values.get("total") == 16, "total is now 7+9=16"
assert state.next == ()
thread_2 = {"configurable": {"thread_id": "2"}}
# on a new thread, total starts out as 0, so output is 0+5=5
assert await app.ainvoke(5, thread_2) == 5
state = await app.aget_state({"configurable": {"thread_id": "1"}})
assert state is not None
assert state.values.get("total") == 16
assert state.next == ()
state = await app.aget_state(thread_2)
assert state is not None
assert state.values.get("total") == 5
assert state.next == ()
assert len([c async for c in app.aget_state_history(thread_1, limit=1)]) == 1
# list all checkpoints for thread 1
thread_1_history = [c async for c in app.aget_state_history(thread_1)]
# there are 7 checkpoints
assert len(thread_1_history) == 7
assert Counter(c.metadata["source"] for c in thread_1_history) == {
"input": 4,
"loop": 3,
}
# sorted descending
assert (
thread_1_history[0].config["configurable"]["checkpoint_id"]
> thread_1_history[1].config["configurable"]["checkpoint_id"]
)
# cursor pagination
cursored = [
c
async for c in app.aget_state_history(
thread_1, limit=1, before=thread_1_history[0].config
)
]
assert len(cursored) == 1
assert cursored[0].config == thread_1_history[1].config
# the last checkpoint
assert thread_1_history[0].values["total"] == 16
# the first "loop" checkpoint
assert thread_1_history[-2].values["total"] == 2
# can get each checkpoint using aget with config
assert (await checkpointer.aget(thread_1_history[0].config))[
"id"
] == thread_1_history[0].config["configurable"]["checkpoint_id"]
assert (await checkpointer.aget(thread_1_history[1].config))[
"id"
] == thread_1_history[1].config["configurable"]["checkpoint_id"]
thread_1_next_config = await app.aupdate_state(thread_1_history[1].config, 10)
# update creates a new checkpoint
assert (
thread_1_next_config["configurable"]["checkpoint_id"]
> thread_1_history[0].config["configurable"]["checkpoint_id"]
)
# 1 more checkpoint in history
assert len([c async for c in app.aget_state_history(thread_1)]) == 8
assert Counter(
[c.metadata["source"] async for c in app.aget_state_history(thread_1)]
) == {
"update": 1,
"input": 4,
"loop": 3,
}
# the latest checkpoint is the updated one
assert await app.aget_state(thread_1) == await app.aget_state(
thread_1_next_config
)
async def test_invoke_two_processes_two_in_join_two_out(mocker: MockerFixture) -> None:
add_one = mocker.Mock(side_effect=lambda x: x + 1)
add_10_each = mocker.Mock(side_effect=lambda x: sorted(y + 10 for y in x))
one = Channel.subscribe_to("input") | add_one | Channel.write_to("inbox")
chain_three = Channel.subscribe_to("input") | add_one | Channel.write_to("inbox")
chain_four = (
Channel.subscribe_to("inbox") | add_10_each | Channel.write_to("output")
)
app = Pregel(
nodes={
"one": one,
"chain_three": chain_three,
"chain_four": chain_four,
},
channels={
"inbox": Topic(int),
"output": LastValue(int),
"input": LastValue(int),
},
input_channels="input",
output_channels="output",
)
# Then invoke app
# We get a single array result as chain_four waits for all publishers to finish
# before operating on all elements published to topic_two as an array
for _ in range(100):
assert await app.ainvoke(2) == [13, 13]
assert await asyncio.gather(*(app.ainvoke(2) for _ in range(100))) == [
[13, 13] for _ in range(100)
]
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC)
async def test_invoke_join_then_call_other_pregel(
mocker: MockerFixture, checkpointer_name: str
) -> None:
add_one = mocker.Mock(side_effect=lambda x: x + 1)
add_10_each = mocker.Mock(side_effect=lambda x: [y + 10 for y in x])
inner_app = Pregel(
nodes={
"one": Channel.subscribe_to("input") | add_one | Channel.write_to("output")
},
channels={
"output": LastValue(int),
"input": LastValue(int),
},
input_channels="input",
output_channels="output",
)
one = (
Channel.subscribe_to("input")
| add_10_each
| Channel.write_to("inbox_one").map()
)
two = (
Channel.subscribe_to("inbox_one")
| inner_app.map()
| sorted
| Channel.write_to("outbox_one")
)
chain_three = Channel.subscribe_to("outbox_one") | sum | Channel.write_to("output")
app = Pregel(
nodes={
"one": one,
"two": two,
"chain_three": chain_three,
},
channels={
"inbox_one": Topic(int),
"outbox_one": LastValue(int),
"output": LastValue(int),
"input": LastValue(int),
},
input_channels="input",
output_channels="output",
)
# Then invoke pubsub
for _ in range(10):
assert await app.ainvoke([2, 3]) == 27
assert await asyncio.gather(*(app.ainvoke([2, 3]) for _ in range(10))) == [
27 for _ in range(10)
]
async with awith_checkpointer(checkpointer_name) as checkpointer:
# add checkpointer
app.checkpointer = checkpointer
# subgraph is called twice in the same node, through .map(), so raises
with pytest.raises(MultipleSubgraphsError):
await app.ainvoke([2, 3], {"configurable": {"thread_id": "1"}})
# set inner graph checkpointer NeverCheckpoint
inner_app.checkpointer = False
# subgraph still called twice, but checkpointing for inner graph is disabled
assert await app.ainvoke([2, 3], {"configurable": {"thread_id": "1"}}) == 27
async def test_invoke_two_processes_one_in_two_out(mocker: MockerFixture) -> None:
add_one = mocker.Mock(side_effect=lambda x: x + 1)
one = (
Channel.subscribe_to("input") | add_one | Channel.write_to("output", "between")
)
two = Channel.subscribe_to("between") | add_one | Channel.write_to("output")
app = Pregel(
nodes={"one": one, "two": two},
channels={
"input": LastValue(int),
"between": LastValue(int),
"output": LastValue(int),
},
stream_channels=["output", "between"],
input_channels="input",
output_channels="output",
)
# Then invoke pubsub
assert [c async for c in app.astream(2)] == [
{"between": 3, "output": 3},
{"between": 3, "output": 4},
]
async def test_invoke_two_processes_no_out(mocker: MockerFixture) -> None:
add_one = mocker.Mock(side_effect=lambda x: x + 1)
one = Channel.subscribe_to("input") | add_one | Channel.write_to("between")
two = Channel.subscribe_to("between") | add_one
app = Pregel(
nodes={"one": one, "two": two},
channels={
"input": LastValue(int),
"between": LastValue(int),
"output": LastValue(int),
},
input_channels="input",
output_channels="output",
)
# It finishes executing (once no more messages being published)
# but returns nothing, as nothing was published to "output" topic
assert await app.ainvoke(2) is None
async def test_channel_enter_exit_timing(mocker: MockerFixture) -> None:
setup_sync = mocker.Mock()
cleanup_sync = mocker.Mock()
setup_async = mocker.Mock()
cleanup_async = mocker.Mock()
@contextmanager
def an_int() -> Generator[int, None, None]:
setup_sync()
try:
yield 5
finally:
cleanup_sync()
@asynccontextmanager
async def an_int_async() -> AsyncGenerator[int, None]:
setup_async()
try:
yield 5
finally:
cleanup_async()
add_one = mocker.Mock(side_effect=lambda x: x + 1)
one = Channel.subscribe_to("input") | add_one | Channel.write_to("inbox")
two = (
Channel.subscribe_to("inbox")
| RunnableLambda(add_one).abatch
| Channel.write_to("output").abatch
)
app = Pregel(
nodes={"one": one, "two": two},
channels={
"input": LastValue(int),
"output": LastValue(int),
"inbox": Topic(int),
"ctx": Context(an_int, an_int_async),
},
input_channels="input",
output_channels=["inbox", "output"],
stream_channels=["inbox", "output"],
)
async def aenumerate(aiter: AsyncIterator[Any]) -> AsyncIterator[tuple[int, Any]]:
i = 0
async for chunk in aiter:
yield i, chunk
i += 1
assert setup_sync.call_count == 0
assert cleanup_sync.call_count == 0
assert setup_async.call_count == 0
assert cleanup_async.call_count == 0
async for i, chunk in aenumerate(app.astream(2)):
assert setup_sync.call_count == 0, "Sync context manager should not be used"
assert cleanup_sync.call_count == 0, "Sync context manager should not be used"
assert setup_async.call_count == 1, "Expected setup to be called once"
if i == 0:
assert chunk == {"inbox": [3]}
elif i == 1:
assert chunk == {"output": 4}
else:
assert False, "Expected only two chunks"
assert setup_sync.call_count == 0
assert cleanup_sync.call_count == 0
assert setup_async.call_count == 1, "Expected setup to be called once"
assert cleanup_async.call_count == 1, "Expected cleanup to be called once"
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC)
async def test_conditional_graph(checkpointer_name: str) -> None:
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.language_models.fake import FakeStreamingListLLM
from langchain_core.prompts import PromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_core.tools import tool
# Assemble the tools
@tool()
def search_api(query: str) -> str:
"""Searches the API for the query."""
return f"result for {query}"
tools = [search_api]
# Construct the agent
prompt = PromptTemplate.from_template("Hello!")
llm = FakeStreamingListLLM(
responses=[
"tool:search_api:query",
"tool:search_api:another",
"finish:answer",
]
)
async def agent_parser(input: str) -> Union[AgentAction, AgentFinish]:
if input.startswith("finish"):
_, answer = input.split(":")
return AgentFinish(return_values={"answer": answer}, log=input)
else:
_, tool_name, tool_input = input.split(":")
return AgentAction(tool=tool_name, tool_input=tool_input, log=input)
agent = RunnablePassthrough.assign(agent_outcome=prompt | llm | agent_parser)
# Define tool execution logic
async def execute_tools(data: dict) -> dict:
data = data.copy()
agent_action: AgentAction = data.pop("agent_outcome")
observation = await {t.name: t for t in tools}[agent_action.tool].ainvoke(
agent_action.tool_input
)
if data.get("intermediate_steps") is None:
data["intermediate_steps"] = []
else:
data["intermediate_steps"] = data["intermediate_steps"].copy()
data["intermediate_steps"].append([agent_action, observation])
return data
# Define decision-making logic
async def should_continue(data: dict, config: RunnableConfig) -> str:
# Logic to decide whether to continue in the loop or exit
if isinstance(data["agent_outcome"], AgentFinish):
return "exit"
else:
return "continue"
# Define a new graph
workflow = Graph()
workflow.add_node("agent", agent)
workflow.add_node("tools", execute_tools)
workflow.set_entry_point("agent")
workflow.add_conditional_edges(
"agent", should_continue, {"continue": "tools", "exit": END}
)
workflow.add_edge("tools", "agent")
app = workflow.compile()
assert await app.ainvoke({"input": "what is weather in sf"}) == {
"input": "what is weather in sf",
"intermediate_steps": [
[
AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:query",
),
"result for query",
],
[
AgentAction(
tool="search_api",
tool_input="another",
log="tool:search_api:another",
),
"result for another",
],
],
"agent_outcome": AgentFinish(
return_values={"answer": "answer"}, log="finish:answer"
),
}
assert [c async for c in app.astream({"input": "what is weather in sf"})] == [
{
"agent": {
"input": "what is weather in sf",
"agent_outcome": AgentAction(
tool="search_api", tool_input="query", log="tool:search_api:query"
),
}
},
{
"tools": {
"input": "what is weather in sf",
"intermediate_steps": [
[
AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:query",
),
"result for query",
]
],
}
},
{
"agent": {
"input": "what is weather in sf",
"intermediate_steps": [
[
AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:query",
),
"result for query",
]
],
"agent_outcome": AgentAction(
tool="search_api",
tool_input="another",
log="tool:search_api:another",
),
}
},
{
"tools": {
"input": "what is weather in sf",
"intermediate_steps": [
[
AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:query",
),
"result for query",
],
[
AgentAction(
tool="search_api",
tool_input="another",
log="tool:search_api:another",
),
"result for another",
],
],
}
},
{
"agent": {
"input": "what is weather in sf",
"intermediate_steps": [
[
AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:query",
),
"result for query",
],
[
AgentAction(
tool="search_api",
tool_input="another",
log="tool:search_api:another",
),
"result for another",
],
],
"agent_outcome": AgentFinish(
return_values={"answer": "answer"}, log="finish:answer"
),
}
},
]
patches = [c async for c in app.astream_log({"input": "what is weather in sf"})]
patch_paths = {op["path"] for log in patches for op in log.ops}
# Check that agent (one of the nodes) has its output streamed to the logs
assert "/logs/agent/streamed_output/-" in patch_paths
assert "/logs/agent:2/streamed_output/-" in patch_paths
assert "/logs/agent:3/streamed_output/-" in patch_paths
# Check that agent (one of the nodes) has its final output set in the logs
assert "/logs/agent/final_output" in patch_paths
assert "/logs/agent:2/final_output" in patch_paths
assert "/logs/agent:3/final_output" in patch_paths
assert [
p["value"]
for log in patches
for p in log.ops
if p["path"] == "/logs/agent/final_output"
or p["path"] == "/logs/agent:2/final_output"
or p["path"] == "/logs/agent:3/final_output"
] == [
{
"input": "what is weather in sf",
"agent_outcome": AgentAction(
tool="search_api", tool_input="query", log="tool:search_api:query"
),
},
{
"input": "what is weather in sf",
"intermediate_steps": [
[
AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:query",
),
"result for query",
]
],
"agent_outcome": AgentAction(
tool="search_api",
tool_input="another",
log="tool:search_api:another",
),
},
{
"input": "what is weather in sf",
"intermediate_steps": [
[
AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:query",
),
"result for query",
],
[
AgentAction(
tool="search_api",
tool_input="another",
log="tool:search_api:another",
),
"result for another",
],
],
"agent_outcome": AgentFinish(
return_values={"answer": "answer"}, log="finish:answer"
),
},
]
async with awith_checkpointer(checkpointer_name) as checkpointer:
# test state get/update methods with interrupt_after
app_w_interrupt = workflow.compile(
checkpointer=checkpointer,
interrupt_after=["agent"],
)
config = {"configurable": {"thread_id": "1"}}
assert [
c
async for c in app_w_interrupt.astream(
{"input": "what is weather in sf"}, config
)
] == [
{
"agent": {
"input": "what is weather in sf",
"agent_outcome": AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:query",
),
}
}
]
assert await app_w_interrupt.aget_state(config) == StateSnapshot(
values={
"agent": {
"input": "what is weather in sf",
"agent_outcome": AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:query",
),
},
},
tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),),
next=("tools",),
config=(await app_w_interrupt.checkpointer.aget_tuple(config)).config,
created_at=(
await app_w_interrupt.checkpointer.aget_tuple(config)
).checkpoint["ts"],
metadata={
"parents": {},
"source": "loop",
"step": 0,
"writes": {
"agent": {
"agent": {
"input": "what is weather in sf",
"agent_outcome": AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:query",
),
}
}
},
"thread_id": "1",
},
parent_config=[
c async for c in app_w_interrupt.checkpointer.alist(config, limit=2)
][-1].config,
)
await app_w_interrupt.aupdate_state(
config,
{
"agent_outcome": AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:a different query",
),
"input": "what is weather in sf",
},
)
assert await app_w_interrupt.aget_state(config) == StateSnapshot(
values={
"agent": {
"agent_outcome": AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:a different query",
),
"input": "what is weather in sf",
},
},
tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),),
next=("tools",),
config=(await app_w_interrupt.checkpointer.aget_tuple(config)).config,
created_at=(
await app_w_interrupt.checkpointer.aget_tuple(config)
).checkpoint["ts"],
metadata={
"parents": {},
"source": "update",
"step": 1,
"writes": {
"agent": {
"agent_outcome": AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:a different query",
),
"input": "what is weather in sf",
}
},
"thread_id": "1",
},
parent_config=[
c async for c in app_w_interrupt.checkpointer.alist(config, limit=2)
][-1].config,
)
assert [c async for c in app_w_interrupt.astream(None, config)] == [
{
"agent": {
"agent_outcome": AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:a different query",
),
"input": "what is weather in sf",
},
},
{
"tools": {
"input": "what is weather in sf",
"intermediate_steps": [
[
AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:a different query",
),
"result for query",
]
],
}
},
{
"agent": {
"input": "what is weather in sf",
"intermediate_steps": [
[
AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:a different query",
),
"result for query",
]
],
"agent_outcome": AgentAction(
tool="search_api",
tool_input="another",
log="tool:search_api:another",
),
}
},
]
await app_w_interrupt.aupdate_state(
config,
{
"input": "what is weather in sf",
"intermediate_steps": [
[
AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:a different query",
),
"result for query",
]
],
"agent_outcome": AgentFinish(
return_values={"answer": "a really nice answer"},
log="finish:a really nice answer",
),
},
)
assert await app_w_interrupt.aget_state(config) == StateSnapshot(
values={
"agent": {
"input": "what is weather in sf",
"intermediate_steps": [
[
AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:a different query",
),
"result for query",
]
],
"agent_outcome": AgentFinish(
return_values={"answer": "a really nice answer"},
log="finish:a really nice answer",
),
},
},
tasks=(),
next=(),
config=(await app_w_interrupt.checkpointer.aget_tuple(config)).config,
created_at=(
await app_w_interrupt.checkpointer.aget_tuple(config)
).checkpoint["ts"],
metadata={
"parents": {},
"source": "update",
"step": 4,
"writes": {
"agent": {
"input": "what is weather in sf",
"intermediate_steps": [
[
AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:a different query",
),
"result for query",
]
],
"agent_outcome": AgentFinish(
return_values={"answer": "a really nice answer"},
log="finish:a really nice answer",
),
}
},
"thread_id": "1",
},
parent_config=[
c async for c in app_w_interrupt.checkpointer.alist(config, limit=2)
][-1].config,
)
# test state get/update methods with interrupt_before
app_w_interrupt = workflow.compile(
checkpointer=checkpointer,
interrupt_before=["tools"],
)
config = {"configurable": {"thread_id": "2"}}
llm.i = 0
assert [
c
async for c in app_w_interrupt.astream(
{"input": "what is weather in sf"}, config
)
] == [
{
"agent": {
"input": "what is weather in sf",
"agent_outcome": AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:query",
),
}
}
]
assert await app_w_interrupt.aget_state(config) == StateSnapshot(
values={
"agent": {
"input": "what is weather in sf",
"agent_outcome": AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:query",
),
},
},
tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),),
next=("tools",),
config=(await app_w_interrupt.checkpointer.aget_tuple(config)).config,
created_at=(
await app_w_interrupt.checkpointer.aget_tuple(config)
).checkpoint["ts"],
metadata={
"parents": {},
"source": "loop",
"step": 0,
"writes": {
"agent": {
"agent": {
"input": "what is weather in sf",
"agent_outcome": AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:query",
),
}
}
},
"thread_id": "2",
},
parent_config=[
c async for c in app_w_interrupt.checkpointer.alist(config, limit=2)
][-1].config,
)
await app_w_interrupt.aupdate_state(
config,
{
"agent_outcome": AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:a different query",
),
"input": "what is weather in sf",
},
)
assert await app_w_interrupt.aget_state(config) == StateSnapshot(
values={
"agent": {
"agent_outcome": AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:a different query",
),
"input": "what is weather in sf",
},
},
tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),),
next=("tools",),
config=(await app_w_interrupt.checkpointer.aget_tuple(config)).config,
created_at=(
await app_w_interrupt.checkpointer.aget_tuple(config)
).checkpoint["ts"],
metadata={
"parents": {},
"source": "update",
"step": 1,
"writes": {
"agent": {
"agent_outcome": AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:a different query",
),
"input": "what is weather in sf",
}
},
"thread_id": "2",
},
parent_config=[
c async for c in app_w_interrupt.checkpointer.alist(config, limit=2)
][-1].config,
)
assert [c async for c in app_w_interrupt.astream(None, config)] == [
{
"agent": {
"agent_outcome": AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:a different query",
),
"input": "what is weather in sf",
},
},
{
"tools": {
"input": "what is weather in sf",
"intermediate_steps": [
[
AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:a different query",
),
"result for query",
]
],
}
},
{
"agent": {
"input": "what is weather in sf",
"intermediate_steps": [
[
AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:a different query",
),
"result for query",
]
],
"agent_outcome": AgentAction(
tool="search_api",
tool_input="another",
log="tool:search_api:another",
),
}
},
]
await app_w_interrupt.aupdate_state(
config,
{
"input": "what is weather in sf",
"intermediate_steps": [
[
AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:a different query",
),
"result for query",
]
],
"agent_outcome": AgentFinish(
return_values={"answer": "a really nice answer"},
log="finish:a really nice answer",
),
},
)
assert await app_w_interrupt.aget_state(config) == StateSnapshot(
values={
"agent": {
"input": "what is weather in sf",
"intermediate_steps": [
[
AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:a different query",
),
"result for query",
]
],
"agent_outcome": AgentFinish(
return_values={"answer": "a really nice answer"},
log="finish:a really nice answer",
),
},
},
tasks=(),
next=(),
config=(await app_w_interrupt.checkpointer.aget_tuple(config)).config,
created_at=(
await app_w_interrupt.checkpointer.aget_tuple(config)
).checkpoint["ts"],
metadata={
"parents": {},
"source": "update",
"step": 4,
"writes": {
"agent": {
"input": "what is weather in sf",
"intermediate_steps": [
[
AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:a different query",
),
"result for query",
]
],
"agent_outcome": AgentFinish(
return_values={"answer": "a really nice answer"},
log="finish:a really nice answer",
),
}
},
"thread_id": "2",
},
parent_config=[
c async for c in app_w_interrupt.checkpointer.alist(config, limit=2)
][-1].config,
)
# test re-invoke to continue with interrupt_before
app_w_interrupt = workflow.compile(
checkpointer=checkpointer,
interrupt_before=["tools"],
)
config = {"configurable": {"thread_id": "3"}}
llm.i = 0 # reset the llm
assert [
c
async for c in app_w_interrupt.astream(
{"input": "what is weather in sf"}, config
)
] == [
{
"agent": {
"input": "what is weather in sf",
"agent_outcome": AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:query",
),
}
}
]
assert await app_w_interrupt.aget_state(config) == StateSnapshot(
values={
"agent": {
"input": "what is weather in sf",
"agent_outcome": AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:query",
),
},
},
tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),),
next=("tools",),
config=(await app_w_interrupt.checkpointer.aget_tuple(config)).config,
created_at=(
await app_w_interrupt.checkpointer.aget_tuple(config)
).checkpoint["ts"],
metadata={
"parents": {},
"source": "loop",
"step": 0,
"writes": {
"agent": {
"agent": {
"input": "what is weather in sf",
"agent_outcome": AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:query",
),
}
}
},
"thread_id": "3",
},
parent_config=[
c async for c in app_w_interrupt.checkpointer.alist(config, limit=2)
][-1].config,
)
assert [c async for c in app_w_interrupt.astream(None, config)] == [
{
"agent": {
"input": "what is weather in sf",
"agent_outcome": AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:query",
),
},
},
{
"tools": {
"input": "what is weather in sf",
"intermediate_steps": [
[
AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:query",
),
"result for query",
]
],
}
},
{
"agent": {
"input": "what is weather in sf",
"intermediate_steps": [
[
AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:query",
),
"result for query",
]
],
"agent_outcome": AgentAction(
tool="search_api",
tool_input="another",
log="tool:search_api:another",
),
}
},
]
assert [c async for c in app_w_interrupt.astream(None, config)] == [
{
"agent": {
"input": "what is weather in sf",
"intermediate_steps": [
[
AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:query",
),
"result for query",
]
],
"agent_outcome": AgentAction(
tool="search_api",
tool_input="another",
log="tool:search_api:another",
),
}
},
{
"tools": {
"input": "what is weather in sf",
"intermediate_steps": [
[
AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:query",
),
"result for query",
],
[
AgentAction(
tool="search_api",
tool_input="another",
log="tool:search_api:another",
),
"result for another",
],
],
}
},
{
"agent": {
"input": "what is weather in sf",
"intermediate_steps": [
[
AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:query",
),
"result for query",
],
[
AgentAction(
tool="search_api",
tool_input="another",
log="tool:search_api:another",
),
"result for another",
],
],
"agent_outcome": AgentFinish(
return_values={"answer": "answer"}, log="finish:answer"
),
}
},
]
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC)
async def test_conditional_graph_state(
mocker: MockerFixture, checkpointer_name: str
) -> None:
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.language_models.fake import FakeStreamingListLLM
from langchain_core.prompts import PromptTemplate
from langchain_core.tools import tool
setup = mocker.Mock()
teardown = mocker.Mock()
@asynccontextmanager
async def assert_ctx_once() -> AsyncIterator[None]:
assert setup.call_count == 0
assert teardown.call_count == 0
try:
yield
finally:
assert setup.call_count == 1
assert teardown.call_count == 1
setup.reset_mock()
teardown.reset_mock()
class MyPydanticContextModel(BaseModel, arbitrary_types_allowed=True):
session: httpx.AsyncClient
something_else: str
@asynccontextmanager
async def make_context(
config: RunnableConfig,
) -> AsyncIterator[MyPydanticContextModel]:
assert isinstance(config, dict)
setup()
session = httpx.AsyncClient()
try:
yield MyPydanticContextModel(session=session, something_else="hello")
finally:
await session.aclose()
teardown()
class AgentState(TypedDict):
input: Annotated[str, UntrackedValue]
agent_outcome: Optional[Union[AgentAction, AgentFinish]]
intermediate_steps: Annotated[list[tuple[AgentAction, str]], operator.add]
context: Annotated[MyPydanticContextModel, Context(make_context)]
# Assemble the tools
@tool()
def search_api(query: str) -> str:
"""Searches the API for the query."""
return f"result for {query}"
tools = [search_api]
# Construct the agent
prompt = PromptTemplate.from_template("Hello!")
llm = FakeStreamingListLLM(
responses=[
"tool:search_api:query",
"tool:search_api:another",
"finish:answer",
]
)
def agent_parser(input: str) -> dict[str, Union[AgentAction, AgentFinish]]:
if input.startswith("finish"):
_, answer = input.split(":")
return {
"agent_outcome": AgentFinish(
return_values={"answer": answer}, log=input
)
}
else:
_, tool_name, tool_input = input.split(":")
return {
"agent_outcome": AgentAction(
tool=tool_name, tool_input=tool_input, log=input
)
}
agent = prompt | llm | agent_parser
# Define tool execution logic
def execute_tools(data: AgentState) -> dict:
# check we have httpx session in AgentState
assert isinstance(data["context"], MyPydanticContextModel)
# execute the tool
agent_action: AgentAction = data.pop("agent_outcome")
observation = {t.name: t for t in tools}[agent_action.tool].invoke(
agent_action.tool_input
)
return {"intermediate_steps": [[agent_action, observation]]}
# Define decision-making logic
def should_continue(data: AgentState) -> str:
# check we have httpx session in AgentState
assert isinstance(data["context"], MyPydanticContextModel)
# Logic to decide whether to continue in the loop or exit
if isinstance(data["agent_outcome"], AgentFinish):
return "exit"
else:
return "continue"
# Define a new graph
workflow = StateGraph(AgentState)
workflow.add_node("agent", agent)
workflow.add_node("tools", execute_tools)
workflow.set_entry_point("agent")
workflow.add_conditional_edges(
"agent", should_continue, {"continue": "tools", "exit": END}
)
workflow.add_edge("tools", "agent")
app = workflow.compile()
async with assert_ctx_once():
assert await app.ainvoke({"input": "what is weather in sf"}) == {
"input": "what is weather in sf",
"intermediate_steps": [
[
AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:query",
),
"result for query",
],
[
AgentAction(
tool="search_api",
tool_input="another",
log="tool:search_api:another",
),
"result for another",
],
],
"agent_outcome": AgentFinish(
return_values={"answer": "answer"}, log="finish:answer"
),
}
async with assert_ctx_once():
assert [c async for c in app.astream({"input": "what is weather in sf"})] == [
{
"agent": {
"agent_outcome": AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:query",
),
}
},
{
"tools": {
"intermediate_steps": [
[
AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:query",
),
"result for query",
]
],
}
},
{
"agent": {
"agent_outcome": AgentAction(
tool="search_api",
tool_input="another",
log="tool:search_api:another",
),
}
},
{
"tools": {
"intermediate_steps": [
[
AgentAction(
tool="search_api",
tool_input="another",
log="tool:search_api:another",
),
"result for another",
],
],
}
},
{
"agent": {
"agent_outcome": AgentFinish(
return_values={"answer": "answer"}, log="finish:answer"
),
}
},
]
async with assert_ctx_once():
patches = [c async for c in app.astream_log({"input": "what is weather in sf"})]
patch_paths = {op["path"] for log in patches for op in log.ops}
# Check that agent (one of the nodes) has its output streamed to the logs
assert "/logs/agent/streamed_output/-" in patch_paths
# Check that agent (one of the nodes) has its final output set in the logs
assert "/logs/agent/final_output" in patch_paths
assert [
p["value"]
for log in patches
for p in log.ops
if p["path"] == "/logs/agent/final_output"
or p["path"] == "/logs/agent:2/final_output"
or p["path"] == "/logs/agent:3/final_output"
] == [
{
"agent_outcome": AgentAction(
tool="search_api", tool_input="query", log="tool:search_api:query"
)
},
{
"agent_outcome": AgentAction(
tool="search_api", tool_input="another", log="tool:search_api:another"
)
},
{
"agent_outcome": AgentFinish(
return_values={"answer": "answer"}, log="finish:answer"
),
},
]
async with awith_checkpointer(checkpointer_name) as checkpointer:
# test state get/update methods with interrupt_after
app_w_interrupt = workflow.compile(
checkpointer=checkpointer,
interrupt_after=["agent"],
)
config = {"configurable": {"thread_id": "1"}}
async with assert_ctx_once():
assert [
c
async for c in app_w_interrupt.astream(
{"input": "what is weather in sf"}, config
)
] == [
{
"agent": {
"agent_outcome": AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:query",
),
}
},
{"__interrupt__": ()},
]
assert await app_w_interrupt.aget_state(config) == StateSnapshot(
values={
"agent_outcome": AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:query",
),
"intermediate_steps": [],
},
tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),),
next=("tools",),
config=(await app_w_interrupt.checkpointer.aget_tuple(config)).config,
created_at=(
await app_w_interrupt.checkpointer.aget_tuple(config)
).checkpoint["ts"],
metadata={
"parents": {},
"source": "loop",
"step": 1,
"writes": {
"agent": {
"agent_outcome": AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:query",
),
}
},
"thread_id": "1",
},
parent_config=[
c async for c in app_w_interrupt.checkpointer.alist(config, limit=2)
][-1].config,
)
async with assert_ctx_once():
await app_w_interrupt.aupdate_state(
config,
{
"agent_outcome": AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:a different query",
)
},
)
assert await app_w_interrupt.aget_state(config) == StateSnapshot(
values={
"agent_outcome": AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:a different query",
),
"intermediate_steps": [],
},
tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),),
next=("tools",),
config=(await app_w_interrupt.checkpointer.aget_tuple(config)).config,
created_at=(
await app_w_interrupt.checkpointer.aget_tuple(config)
).checkpoint["ts"],
metadata={
"parents": {},
"source": "update",
"step": 2,
"writes": {
"agent": {
"agent_outcome": AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:a different query",
)
}
},
"thread_id": "1",
},
parent_config=[
c async for c in app_w_interrupt.checkpointer.alist(config, limit=2)
][-1].config,
)
async with assert_ctx_once():
assert [c async for c in app_w_interrupt.astream(None, config)] == [
{
"tools": {
"intermediate_steps": [
[
AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:a different query",
),
"result for query",
]
],
}
},
{
"agent": {
"agent_outcome": AgentAction(
tool="search_api",
tool_input="another",
log="tool:search_api:another",
),
}
},
{"__interrupt__": ()},
]
async with assert_ctx_once():
await app_w_interrupt.aupdate_state(
config,
{
"agent_outcome": AgentFinish(
return_values={"answer": "a really nice answer"},
log="finish:a really nice answer",
)
},
)
assert await app_w_interrupt.aget_state(config) == StateSnapshot(
values={
"agent_outcome": AgentFinish(
return_values={"answer": "a really nice answer"},
log="finish:a really nice answer",
),
"intermediate_steps": [
[
AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:a different query",
),
"result for query",
]
],
},
tasks=(),
next=(),
config=(await app_w_interrupt.checkpointer.aget_tuple(config)).config,
created_at=(
await app_w_interrupt.checkpointer.aget_tuple(config)
).checkpoint["ts"],
metadata={
"parents": {},
"source": "update",
"step": 5,
"writes": {
"agent": {
"agent_outcome": AgentFinish(
return_values={"answer": "a really nice answer"},
log="finish:a really nice answer",
)
}
},
"thread_id": "1",
},
parent_config=[
c async for c in app_w_interrupt.checkpointer.alist(config, limit=2)
][-1].config,
)
# test state get/update methods with interrupt_before
app_w_interrupt = workflow.compile(
checkpointer=checkpointer,
interrupt_before=["tools"],
)
config = {"configurable": {"thread_id": "2"}}
llm.i = 0 # reset the llm
assert [
c
async for c in app_w_interrupt.astream(
{"input": "what is weather in sf"}, config
)
] == [
{
"agent": {
"agent_outcome": AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:query",
),
}
},
{"__interrupt__": ()},
]
assert await app_w_interrupt.aget_state(config) == StateSnapshot(
values={
"agent_outcome": AgentAction(
tool="search_api", tool_input="query", log="tool:search_api:query"
),
"intermediate_steps": [],
},
tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),),
next=("tools",),
config=(await app_w_interrupt.checkpointer.aget_tuple(config)).config,
created_at=(
await app_w_interrupt.checkpointer.aget_tuple(config)
).checkpoint["ts"],
metadata={
"parents": {},
"source": "loop",
"step": 1,
"writes": {
"agent": {
"agent_outcome": AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:query",
),
}
},
"thread_id": "2",
},
parent_config=[
c async for c in app_w_interrupt.checkpointer.alist(config, limit=2)
][-1].config,
)
await app_w_interrupt.aupdate_state(
config,
{
"agent_outcome": AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:a different query",
)
},
)
assert await app_w_interrupt.aget_state(config) == StateSnapshot(
values={
"agent_outcome": AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:a different query",
),
"intermediate_steps": [],
},
tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),),
next=("tools",),
config=(await app_w_interrupt.checkpointer.aget_tuple(config)).config,
created_at=(
await app_w_interrupt.checkpointer.aget_tuple(config)
).checkpoint["ts"],
metadata={
"parents": {},
"source": "update",
"step": 2,
"writes": {
"agent": {
"agent_outcome": AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:a different query",
)
}
},
"thread_id": "2",
},
parent_config=[
c async for c in app_w_interrupt.checkpointer.alist(config, limit=2)
][-1].config,
)
assert [c async for c in app_w_interrupt.astream(None, config)] == [
{
"tools": {
"intermediate_steps": [
[
AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:a different query",
),
"result for query",
]
],
}
},
{
"agent": {
"agent_outcome": AgentAction(
tool="search_api",
tool_input="another",
log="tool:search_api:another",
),
}
},
{"__interrupt__": ()},
]
await app_w_interrupt.aupdate_state(
config,
{
"agent_outcome": AgentFinish(
return_values={"answer": "a really nice answer"},
log="finish:a really nice answer",
)
},
)
assert await app_w_interrupt.aget_state(config) == StateSnapshot(
values={
"agent_outcome": AgentFinish(
return_values={"answer": "a really nice answer"},
log="finish:a really nice answer",
),
"intermediate_steps": [
[
AgentAction(
tool="search_api",
tool_input="query",
log="tool:search_api:a different query",
),
"result for query",
]
],
},
tasks=(),
next=(),
config=(await app_w_interrupt.checkpointer.aget_tuple(config)).config,
created_at=(
await app_w_interrupt.checkpointer.aget_tuple(config)
).checkpoint["ts"],
metadata={
"parents": {},
"source": "update",
"step": 5,
"writes": {
"agent": {
"agent_outcome": AgentFinish(
return_values={"answer": "a really nice answer"},
log="finish:a really nice answer",
)
}
},
"thread_id": "2",
},
parent_config=[
c async for c in app_w_interrupt.checkpointer.alist(config, limit=2)
][-1].config,
)
async def test_conditional_entrypoint_graph() -> None:
async def left(data: str) -> str:
return data + "->left"
async def right(data: str) -> str:
return data + "->right"
def should_start(data: str) -> str:
# Logic to decide where to start
if len(data) > 10:
return "go-right"
else:
return "go-left"
# Define a new graph
workflow = Graph()
workflow.add_node("left", left)
workflow.add_node("right", right)
workflow.set_conditional_entry_point(
should_start, {"go-left": "left", "go-right": "right"}
)
workflow.add_conditional_edges("left", lambda data: END)
workflow.add_edge("right", END)
app = workflow.compile()
assert await app.ainvoke("what is weather in sf") == "what is weather in sf->right"
assert [c async for c in app.astream("what is weather in sf")] == [
{"right": "what is weather in sf->right"},
]
async def test_conditional_entrypoint_graph_state() -> None:
class AgentState(TypedDict, total=False):
input: str
output: str
steps: Annotated[list[str], operator.add]
async def left(data: AgentState) -> AgentState:
return {"output": data["input"] + "->left"}
async def right(data: AgentState) -> AgentState:
return {"output": data["input"] + "->right"}
def should_start(data: AgentState) -> str:
assert data["steps"] == [], "Expected input to be read from the state"
# Logic to decide where to start
if len(data["input"]) > 10:
return "go-right"
else:
return "go-left"
# Define a new graph
workflow = StateGraph(AgentState)
workflow.add_node("left", left)
workflow.add_node("right", right)
workflow.set_conditional_entry_point(
should_start, {"go-left": "left", "go-right": "right"}
)
workflow.add_conditional_edges("left", lambda data: END)
workflow.add_edge("right", END)
app = workflow.compile()
assert await app.ainvoke({"input": "what is weather in sf"}) == {
"input": "what is weather in sf",
"output": "what is weather in sf->right",
"steps": [],
}
assert [c async for c in app.astream({"input": "what is weather in sf"})] == [
{"right": {"output": "what is weather in sf->right"}},
]
async def test_prebuilt_tool_chat() -> None:
from langchain_core.messages import AIMessage, HumanMessage
from langchain_core.tools import tool
model = FakeChatModel(
messages=[
AIMessage(
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "query"},
},
],
),
AIMessage(
content="",
tool_calls=[
{
"id": "tool_call234",
"name": "search_api",
"args": {"query": "another"},
},
{
"id": "tool_call567",
"name": "search_api",
"args": {"query": "a third one"},
},
],
),
AIMessage(content="answer"),
]
)
@tool()
def search_api(query: str) -> str:
"""Searches the API for the query."""
return f"result for {query}"
tools = [search_api]
app = create_tool_calling_executor(model, tools)
assert await app.ainvoke(
{"messages": [HumanMessage(content="what is weather in sf")]}
) == {
"messages": [
_AnyIdHumanMessage(content="what is weather in sf"),
_AnyIdAIMessage(
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "query"},
},
],
),
_AnyIdToolMessage(
content="result for query",
name="search_api",
tool_call_id="tool_call123",
),
_AnyIdAIMessage(
content="",
tool_calls=[
{
"id": "tool_call234",
"name": "search_api",
"args": {"query": "another"},
},
{
"id": "tool_call567",
"name": "search_api",
"args": {"query": "a third one"},
},
],
),
_AnyIdToolMessage(
content="result for another",
name="search_api",
tool_call_id="tool_call234",
),
_AnyIdToolMessage(
content="result for a third one",
name="search_api",
tool_call_id="tool_call567",
id=AnyStr(),
),
_AnyIdAIMessage(content="answer"),
]
}
assert [
c
async for c in app.astream(
{"messages": [HumanMessage(content="what is weather in sf")]},
stream_mode="messages",
)
] == [
(
_AnyIdAIMessageChunk(
content="",
tool_calls=[
{
"name": "search_api",
"args": {"query": "query"},
"id": "tool_call123",
"type": "tool_call",
}
],
tool_call_chunks=[
{
"name": "search_api",
"args": '{"query": "query"}',
"id": "tool_call123",
"index": None,
"type": "tool_call_chunk",
}
],
),
{
"langgraph_step": 1,
"langgraph_node": "agent",
"langgraph_triggers": ["start:agent"],
"langgraph_path": ("__pregel_pull", "agent"),
"langgraph_checkpoint_ns": AnyStr("agent:"),
"checkpoint_ns": AnyStr("agent:"),
"ls_provider": "fakechatmodel",
"ls_model_type": "chat",
},
),
(
_AnyIdToolMessage(
content="result for query",
name="search_api",
tool_call_id="tool_call123",
),
{
"langgraph_step": 2,
"langgraph_node": "tools",
"langgraph_triggers": ["branch:agent:should_continue:tools"],
"langgraph_path": ("__pregel_pull", "tools"),
"langgraph_checkpoint_ns": AnyStr("tools:"),
},
),
(
_AnyIdAIMessageChunk(
content="",
tool_calls=[
{
"name": "search_api",
"args": {"query": "another"},
"id": "tool_call234",
"type": "tool_call",
},
{
"name": "search_api",
"args": {"query": "a third one"},
"id": "tool_call567",
"type": "tool_call",
},
],
tool_call_chunks=[
{
"name": "search_api",
"args": '{"query": "another"}',
"id": "tool_call234",
"index": None,
"type": "tool_call_chunk",
},
{
"name": "search_api",
"args": '{"query": "a third one"}',
"id": "tool_call567",
"index": None,
"type": "tool_call_chunk",
},
],
),
{
"langgraph_step": 3,
"langgraph_node": "agent",
"langgraph_triggers": ["tools"],
"langgraph_path": ("__pregel_pull", "agent"),
"langgraph_checkpoint_ns": AnyStr("agent:"),
"checkpoint_ns": AnyStr("agent:"),
"ls_provider": "fakechatmodel",
"ls_model_type": "chat",
},
),
(
_AnyIdToolMessage(
content="result for another",
name="search_api",
tool_call_id="tool_call234",
),
{
"langgraph_step": 4,
"langgraph_node": "tools",
"langgraph_triggers": ["branch:agent:should_continue:tools"],
"langgraph_path": ("__pregel_pull", "tools"),
"langgraph_checkpoint_ns": AnyStr("tools:"),
},
),
(
_AnyIdToolMessage(
content="result for a third one",
name="search_api",
tool_call_id="tool_call567",
),
{
"langgraph_step": 4,
"langgraph_node": "tools",
"langgraph_triggers": ["branch:agent:should_continue:tools"],
"langgraph_path": ("__pregel_pull", "tools"),
"langgraph_checkpoint_ns": AnyStr("tools:"),
},
),
(
_AnyIdAIMessageChunk(
content="answer",
),
{
"langgraph_step": 5,
"langgraph_node": "agent",
"langgraph_triggers": ["tools"],
"langgraph_path": ("__pregel_pull", "agent"),
"langgraph_checkpoint_ns": AnyStr("agent:"),
"checkpoint_ns": AnyStr("agent:"),
"ls_provider": "fakechatmodel",
"ls_model_type": "chat",
},
),
]
assert [
c
async for c in app.astream(
{"messages": [HumanMessage(content="what is weather in sf")]}
)
] == [
{
"agent": {
"messages": [
_AnyIdAIMessage(
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "query"},
},
],
)
]
}
},
{
"tools": {
"messages": [
_AnyIdToolMessage(
content="result for query",
name="search_api",
tool_call_id="tool_call123",
)
]
}
},
{
"agent": {
"messages": [
_AnyIdAIMessage(
content="",
tool_calls=[
{
"id": "tool_call234",
"name": "search_api",
"args": {"query": "another"},
},
{
"id": "tool_call567",
"name": "search_api",
"args": {"query": "a third one"},
},
],
)
]
}
},
{
"tools": {
"messages": [
_AnyIdToolMessage(
content="result for another",
name="search_api",
tool_call_id="tool_call234",
),
_AnyIdToolMessage(
content="result for a third one",
name="search_api",
tool_call_id="tool_call567",
),
]
}
},
{"agent": {"messages": [_AnyIdAIMessage(content="answer")]}},
]
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC)
async def test_state_graph_packets(checkpointer_name: str) -> None:
from langchain_core.language_models.fake_chat_models import (
FakeMessagesListChatModel,
)
from langchain_core.messages import (
AIMessage,
BaseMessage,
HumanMessage,
ToolMessage,
)
from langchain_core.tools import tool
class AgentState(TypedDict):
messages: Annotated[list[BaseMessage], add_messages]
session: Annotated[httpx.AsyncClient, Context(httpx.AsyncClient)]
@tool()
def search_api(query: str) -> str:
"""Searches the API for the query."""
return f"result for {query}"
tools = [search_api]
tools_by_name = {t.name: t for t in tools}
model = FakeMessagesListChatModel(
responses=[
AIMessage(
id="ai1",
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "query"},
},
],
),
AIMessage(
id="ai2",
content="",
tool_calls=[
{
"id": "tool_call234",
"name": "search_api",
"args": {"query": "another", "idx": 0},
},
{
"id": "tool_call567",
"name": "search_api",
"args": {"query": "a third one", "idx": 1},
},
],
),
AIMessage(id="ai3", content="answer"),
]
)
# Define decision-making logic
def should_continue(data: AgentState) -> str:
assert isinstance(data["session"], httpx.AsyncClient)
# Logic to decide whether to continue in the loop or exit
if tool_calls := data["messages"][-1].tool_calls:
return [Send("tools", tool_call) for tool_call in tool_calls]
else:
return END
async def tools_node(input: ToolCall, config: RunnableConfig) -> AgentState:
await asyncio.sleep(input["args"].get("idx", 0) / 10)
output = await tools_by_name[input["name"]].ainvoke(input["args"], config)
return {
"messages": ToolMessage(
content=output, name=input["name"], tool_call_id=input["id"]
)
}
# Define a new graph
workflow = StateGraph(AgentState)
# Define the two nodes we will cycle between
workflow.add_node("agent", {"messages": RunnablePick("messages") | model})
workflow.add_node("tools", tools_node)
# Set the entrypoint as `agent`
# This means that this node is the first one called
workflow.set_entry_point("agent")
# We now add a conditional edge
workflow.add_conditional_edges("agent", should_continue)
# We now add a normal edge from `tools` to `agent`.
# This means that after `tools` is called, `agent` node is called next.
workflow.add_edge("tools", "agent")
# Finally, we compile it!
# This compiles it into a LangChain Runnable,
# meaning you can use it as you would any other runnable
app = workflow.compile()
assert await app.ainvoke(
{"messages": HumanMessage(content="what is weather in sf")}
) == {
"messages": [
_AnyIdHumanMessage(content="what is weather in sf"),
AIMessage(
id="ai1",
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "query"},
},
],
),
_AnyIdToolMessage(
content="result for query",
name="search_api",
tool_call_id="tool_call123",
),
AIMessage(
id="ai2",
content="",
tool_calls=[
{
"id": "tool_call234",
"name": "search_api",
"args": {"query": "another", "idx": 0},
},
{
"id": "tool_call567",
"name": "search_api",
"args": {"query": "a third one", "idx": 1},
},
],
),
_AnyIdToolMessage(
content="result for another",
name="search_api",
tool_call_id="tool_call234",
),
_AnyIdToolMessage(
content="result for a third one",
name="search_api",
tool_call_id="tool_call567",
),
AIMessage(content="answer", id="ai3"),
]
}
assert [
c
async for c in app.astream(
{"messages": [HumanMessage(content="what is weather in sf")]}
)
] == [
{
"agent": {
"messages": AIMessage(
id="ai1",
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "query"},
},
],
)
},
},
{
"tools": {
"messages": _AnyIdToolMessage(
content="result for query",
name="search_api",
tool_call_id="tool_call123",
)
}
},
{
"agent": {
"messages": AIMessage(
id="ai2",
content="",
tool_calls=[
{
"id": "tool_call234",
"name": "search_api",
"args": {"query": "another", "idx": 0},
},
{
"id": "tool_call567",
"name": "search_api",
"args": {"query": "a third one", "idx": 1},
},
],
)
}
},
{
"tools": {
"messages": _AnyIdToolMessage(
content="result for another",
name="search_api",
tool_call_id="tool_call234",
)
},
},
{
"tools": {
"messages": _AnyIdToolMessage(
content="result for a third one",
name="search_api",
tool_call_id="tool_call567",
),
},
},
{"agent": {"messages": AIMessage(content="answer", id="ai3")}},
]
async with awith_checkpointer(checkpointer_name) as checkpointer:
# interrupt after agent
app_w_interrupt = workflow.compile(
checkpointer=checkpointer,
interrupt_after=["agent"],
)
config = {"configurable": {"thread_id": "1"}}
assert [
c
async for c in app_w_interrupt.astream(
{"messages": HumanMessage(content="what is weather in sf")}, config
)
] == [
{
"agent": {
"messages": AIMessage(
id="ai1",
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "query"},
},
],
)
}
},
{"__interrupt__": ()},
]
if not FF_SEND_V2:
return
assert await app_w_interrupt.aget_state(config) == StateSnapshot(
values={
"messages": [
_AnyIdHumanMessage(content="what is weather in sf"),
AIMessage(
id="ai1",
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "query"},
},
],
),
]
},
tasks=(
PregelTask(
id=AnyStr(),
name="agent",
path=("__pregel_pull", "agent"),
error=None,
interrupts=(),
state=None,
result={
"messages": AIMessage(
"",
id="ai1",
tool_calls=[
{
"name": "search_api",
"args": {"query": "query"},
"id": "tool_call123",
"type": "tool_call",
}
],
)
},
),
PregelTask(
AnyStr(), "tools", (PUSH, ("__pregel_pull", "agent"), 2, AnyStr())
),
),
next=("tools",),
config=(await app_w_interrupt.checkpointer.aget_tuple(config)).config,
created_at=(
await app_w_interrupt.checkpointer.aget_tuple(config)
).checkpoint["ts"],
metadata={
"parents": {},
"source": "loop",
"step": 0,
"writes": None,
"thread_id": "1",
},
parent_config=[
c async for c in app_w_interrupt.checkpointer.alist(config, limit=2)
][-1].config,
)
# modify ai message
last_message = (await app_w_interrupt.aget_state(config)).values["messages"][-1]
last_message.tool_calls[0]["args"]["query"] = "a different query"
await app_w_interrupt.aupdate_state(config, {"messages": last_message})
# message was replaced instead of appended
tup = await app_w_interrupt.checkpointer.aget_tuple(config)
assert await app_w_interrupt.aget_state(config) == StateSnapshot(
values={
"messages": [
_AnyIdHumanMessage(content="what is weather in sf"),
AIMessage(
id="ai1",
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "a different query"},
},
],
),
]
},
tasks=(PregelTask(AnyStr(), "tools", (PUSH, (), 0, AnyStr())),),
next=("tools",),
config=tup.config,
created_at=tup.checkpoint["ts"],
metadata={
"parents": {},
"source": "update",
"step": 1,
"writes": {
"agent": {
"messages": AIMessage(
id="ai1",
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "a different query"},
},
],
)
}
},
"thread_id": "1",
},
parent_config=[
c async for c in app_w_interrupt.checkpointer.alist(config, limit=2)
][-1].config,
)
assert [c async for c in app_w_interrupt.astream(None, config)] == [
{
"tools": {
"messages": _AnyIdToolMessage(
content="result for a different query",
name="search_api",
tool_call_id="tool_call123",
)
}
},
{
"agent": {
"messages": AIMessage(
id="ai2",
content="",
tool_calls=[
{
"id": "tool_call234",
"name": "search_api",
"args": {"query": "another", "idx": 0},
},
{
"id": "tool_call567",
"name": "search_api",
"args": {"query": "a third one", "idx": 1},
},
],
)
},
},
{"__interrupt__": ()},
]
tup = await app_w_interrupt.checkpointer.aget_tuple(config)
assert await app_w_interrupt.aget_state(config) == StateSnapshot(
values={
"messages": [
_AnyIdHumanMessage(content="what is weather in sf"),
AIMessage(
id="ai1",
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "a different query"},
},
],
),
_AnyIdToolMessage(
content="result for a different query",
name="search_api",
tool_call_id="tool_call123",
),
AIMessage(
id="ai2",
content="",
tool_calls=[
{
"id": "tool_call234",
"name": "search_api",
"args": {"query": "another", "idx": 0},
},
{
"id": "tool_call567",
"name": "search_api",
"args": {"query": "a third one", "idx": 1},
},
],
),
]
},
tasks=(
PregelTask(
id=AnyStr(),
name="agent",
path=("__pregel_pull", "agent"),
error=None,
interrupts=(),
state=None,
result={
"messages": AIMessage(
"",
id="ai2",
tool_calls=[
{
"name": "search_api",
"args": {"query": "another", "idx": 0},
"id": "tool_call234",
"type": "tool_call",
},
{
"name": "search_api",
"args": {"query": "a third one", "idx": 1},
"id": "tool_call567",
"type": "tool_call",
},
],
)
},
),
PregelTask(
AnyStr(), "tools", (PUSH, ("__pregel_pull", "agent"), 2, AnyStr())
),
PregelTask(
AnyStr(), "tools", (PUSH, ("__pregel_pull", "agent"), 3, AnyStr())
),
),
next=("tools", "tools"),
config=tup.config,
created_at=tup.checkpoint["ts"],
metadata={
"parents": {},
"source": "loop",
"step": 2,
"writes": {
"tools": {
"messages": _AnyIdToolMessage(
content="result for a different query",
name="search_api",
tool_call_id="tool_call123",
),
},
},
"thread_id": "1",
},
parent_config=[
c async for c in app_w_interrupt.checkpointer.alist(config, limit=2)
][-1].config,
)
await app_w_interrupt.aupdate_state(
config,
{"messages": AIMessage(content="answer", id="ai2")},
)
# replaces message even if object identity is different, as long as id is the same
tup = await app_w_interrupt.checkpointer.aget_tuple(config)
assert await app_w_interrupt.aget_state(config) == StateSnapshot(
values={
"messages": [
_AnyIdHumanMessage(content="what is weather in sf"),
AIMessage(
id="ai1",
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "a different query"},
},
],
),
_AnyIdToolMessage(
content="result for a different query",
name="search_api",
tool_call_id="tool_call123",
),
AIMessage(content="answer", id="ai2"),
]
},
tasks=(),
next=(),
config=tup.config,
created_at=tup.checkpoint["ts"],
metadata={
"parents": {},
"source": "update",
"step": 3,
"writes": {
"agent": {
"messages": AIMessage(content="answer", id="ai2"),
}
},
"thread_id": "1",
},
parent_config=[
c async for c in app_w_interrupt.checkpointer.alist(config, limit=2)
][-1].config,
)
# interrupt before tools
app_w_interrupt = workflow.compile(
checkpointer=checkpointer,
interrupt_before=["tools"],
)
config = {"configurable": {"thread_id": "2"}}
model.i = 0
assert [
c
async for c in app_w_interrupt.astream(
{"messages": HumanMessage(content="what is weather in sf")}, config
)
] == [
{
"agent": {
"messages": AIMessage(
id="ai1",
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "query"},
},
],
)
}
},
{"__interrupt__": ()},
]
assert await app_w_interrupt.aget_state(config) == StateSnapshot(
values={
"messages": [
_AnyIdHumanMessage(content="what is weather in sf"),
AIMessage(
id="ai1",
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "query"},
},
],
),
]
},
tasks=(
PregelTask(
id=AnyStr(),
name="agent",
path=("__pregel_pull", "agent"),
error=None,
interrupts=(),
state=None,
result={
"messages": AIMessage(
content="",
additional_kwargs={},
response_metadata={},
id="ai1",
tool_calls=[
{
"name": "search_api",
"args": {"query": "query"},
"id": "tool_call123",
"type": "tool_call",
}
],
)
},
),
PregelTask(
AnyStr(), "tools", (PUSH, ("__pregel_pull", "agent"), 2, AnyStr())
),
),
next=("tools",),
config=(await app_w_interrupt.checkpointer.aget_tuple(config)).config,
created_at=(
await app_w_interrupt.checkpointer.aget_tuple(config)
).checkpoint["ts"],
metadata={
"parents": {},
"source": "loop",
"step": 0,
"writes": None,
"thread_id": "2",
},
parent_config=[
c async for c in app_w_interrupt.checkpointer.alist(config, limit=2)
][-1].config,
)
# modify ai message
last_message = (await app_w_interrupt.aget_state(config)).values["messages"][-1]
last_message.tool_calls[0]["args"]["query"] = "a different query"
await app_w_interrupt.aupdate_state(config, {"messages": last_message})
# message was replaced instead of appended
tup = await app_w_interrupt.checkpointer.aget_tuple(config)
assert await app_w_interrupt.aget_state(config) == StateSnapshot(
values={
"messages": [
_AnyIdHumanMessage(content="what is weather in sf"),
AIMessage(
id="ai1",
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "a different query"},
},
],
),
]
},
tasks=(PregelTask(AnyStr(), "tools", (PUSH, (), 0, AnyStr())),),
next=("tools",),
config=tup.config,
created_at=tup.checkpoint["ts"],
metadata={
"parents": {},
"source": "update",
"step": 1,
"writes": {
"agent": {
"messages": AIMessage(
id="ai1",
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "a different query"},
},
],
)
}
},
"thread_id": "2",
},
parent_config=[
c async for c in app_w_interrupt.checkpointer.alist(config, limit=2)
][-1].config,
)
assert [c async for c in app_w_interrupt.astream(None, config)] == [
{
"tools": {
"messages": _AnyIdToolMessage(
content="result for a different query",
name="search_api",
tool_call_id="tool_call123",
)
}
},
{
"agent": {
"messages": AIMessage(
id="ai2",
content="",
tool_calls=[
{
"id": "tool_call234",
"name": "search_api",
"args": {"query": "another", "idx": 0},
},
{
"id": "tool_call567",
"name": "search_api",
"args": {"query": "a third one", "idx": 1},
},
],
)
},
},
{"__interrupt__": ()},
]
tup = await app_w_interrupt.checkpointer.aget_tuple(config)
assert await app_w_interrupt.aget_state(config) == StateSnapshot(
values={
"messages": [
_AnyIdHumanMessage(content="what is weather in sf"),
AIMessage(
id="ai1",
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "a different query"},
},
],
),
_AnyIdToolMessage(
content="result for a different query",
name="search_api",
tool_call_id="tool_call123",
),
AIMessage(
id="ai2",
content="",
tool_calls=[
{
"id": "tool_call234",
"name": "search_api",
"args": {"query": "another", "idx": 0},
},
{
"id": "tool_call567",
"name": "search_api",
"args": {"query": "a third one", "idx": 1},
},
],
),
]
},
tasks=(
PregelTask(
id=AnyStr(),
name="agent",
path=("__pregel_pull", "agent"),
error=None,
interrupts=(),
state=None,
result={
"messages": AIMessage(
content="",
additional_kwargs={},
response_metadata={},
id="ai2",
tool_calls=[
{
"name": "search_api",
"args": {"query": "another", "idx": 0},
"id": "tool_call234",
"type": "tool_call",
},
{
"name": "search_api",
"args": {"query": "a third one", "idx": 1},
"id": "tool_call567",
"type": "tool_call",
},
],
)
},
),
PregelTask(
AnyStr(), "tools", (PUSH, ("__pregel_pull", "agent"), 2, AnyStr())
),
PregelTask(
AnyStr(), "tools", (PUSH, ("__pregel_pull", "agent"), 3, AnyStr())
),
),
next=("tools", "tools"),
config=tup.config,
created_at=tup.checkpoint["ts"],
metadata={
"parents": {},
"source": "loop",
"step": 2,
"writes": {
"tools": {
"messages": _AnyIdToolMessage(
content="result for a different query",
name="search_api",
tool_call_id="tool_call123",
),
},
},
"thread_id": "2",
},
parent_config=[
c async for c in app_w_interrupt.checkpointer.alist(config, limit=2)
][-1].config,
)
await app_w_interrupt.aupdate_state(
config,
{"messages": AIMessage(content="answer", id="ai2")},
)
# replaces message even if object identity is different, as long as id is the same
tup = await app_w_interrupt.checkpointer.aget_tuple(config)
assert await app_w_interrupt.aget_state(config) == StateSnapshot(
values={
"messages": [
_AnyIdHumanMessage(content="what is weather in sf"),
AIMessage(
id="ai1",
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "a different query"},
},
],
),
_AnyIdToolMessage(
content="result for a different query",
name="search_api",
tool_call_id="tool_call123",
),
AIMessage(content="answer", id="ai2"),
]
},
tasks=(),
next=(),
config=tup.config,
created_at=tup.checkpoint["ts"],
metadata={
"parents": {},
"source": "update",
"step": 3,
"writes": {
"agent": {
"messages": AIMessage(content="answer", id="ai2"),
}
},
"thread_id": "2",
},
parent_config=[
c async for c in app_w_interrupt.checkpointer.alist(config, limit=2)
][-1].config,
)
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC)
async def test_message_graph(checkpointer_name: str) -> None:
from langchain_core.language_models.fake_chat_models import (
FakeMessagesListChatModel,
)
from langchain_core.messages import AIMessage, HumanMessage
from langchain_core.tools import tool
class FakeFuntionChatModel(FakeMessagesListChatModel):
def bind_functions(self, functions: list):
return self
@tool()
def search_api(query: str) -> str:
"""Searches the API for the query."""
return f"result for {query}"
tools = [search_api]
model = FakeFuntionChatModel(
responses=[
AIMessage(
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "query"},
}
],
id="ai1",
),
AIMessage(
content="",
tool_calls=[
{
"id": "tool_call456",
"name": "search_api",
"args": {"query": "another"},
}
],
id="ai2",
),
AIMessage(content="answer", id="ai3"),
]
)
# Define the function that determines whether to continue or not
def should_continue(messages):
last_message = messages[-1]
# If there is no function call, then we finish
if not last_message.tool_calls:
return "end"
# Otherwise if there is, we continue
else:
return "continue"
# Define a new graph
workflow = MessageGraph()
# Define the two nodes we will cycle between
workflow.add_node("agent", model)
workflow.add_node("tools", ToolNode(tools))
# Set the entrypoint as `agent`
# This means that this node is the first one called
workflow.set_entry_point("agent")
# We now add a conditional edge
workflow.add_conditional_edges(
# First, we define the start node. We use `agent`.
# This means these are the edges taken after the `agent` node is called.
"agent",
# Next, we pass in the function that will determine which node is called next.
should_continue,
# Finally we pass in a mapping.
# The keys are strings, and the values are other nodes.
# END is a special node marking that the graph should finish.
# What will happen is we will call `should_continue`, and then the output of that
# will be matched against the keys in this mapping.
# Based on which one it matches, that node will then be called.
{
# If `tools`, then we call the tool node.
"continue": "tools",
# Otherwise we finish.
"end": END,
},
)
# We now add a normal edge from `tools` to `agent`.
# This means that after `tools` is called, `agent` node is called next.
workflow.add_edge("tools", "agent")
# Finally, we compile it!
# This compiles it into a LangChain Runnable,
# meaning you can use it as you would any other runnable
app = workflow.compile()
assert await app.ainvoke(HumanMessage(content="what is weather in sf")) == [
_AnyIdHumanMessage(
content="what is weather in sf",
),
AIMessage(
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "query"},
}
],
id="ai1", # respects ids passed in
),
_AnyIdToolMessage(
content="result for query",
name="search_api",
tool_call_id="tool_call123",
),
AIMessage(
content="",
tool_calls=[
{
"id": "tool_call456",
"name": "search_api",
"args": {"query": "another"},
}
],
id="ai2",
),
_AnyIdToolMessage(
content="result for another",
name="search_api",
tool_call_id="tool_call456",
),
AIMessage(content="answer", id="ai3"),
]
assert [
c async for c in app.astream([HumanMessage(content="what is weather in sf")])
] == [
{
"agent": AIMessage(
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "query"},
}
],
id="ai1",
)
},
{
"tools": [
_AnyIdToolMessage(
content="result for query",
name="search_api",
tool_call_id="tool_call123",
)
]
},
{
"agent": AIMessage(
content="",
tool_calls=[
{
"id": "tool_call456",
"name": "search_api",
"args": {"query": "another"},
}
],
id="ai2",
)
},
{
"tools": [
_AnyIdToolMessage(
content="result for another",
name="search_api",
tool_call_id="tool_call456",
)
]
},
{"agent": AIMessage(content="answer", id="ai3")},
]
async with awith_checkpointer(checkpointer_name) as checkpointer:
app_w_interrupt = workflow.compile(
checkpointer=checkpointer,
interrupt_after=["agent"],
)
config = {"configurable": {"thread_id": "1"}}
assert [
c
async for c in app_w_interrupt.astream(
HumanMessage(content="what is weather in sf"), config
)
] == [
{
"agent": AIMessage(
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "query"},
}
],
id="ai1",
)
},
{"__interrupt__": ()},
]
tup = await app_w_interrupt.checkpointer.aget_tuple(config)
assert await app_w_interrupt.aget_state(config) == StateSnapshot(
values=[
_AnyIdHumanMessage(content="what is weather in sf"),
AIMessage(
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "query"},
}
],
id="ai1",
),
],
tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),),
next=("tools",),
config=tup.config,
created_at=tup.checkpoint["ts"],
metadata={
"parents": {},
"source": "loop",
"step": 1,
"writes": {
"agent": AIMessage(
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "query"},
}
],
id="ai1",
)
},
"thread_id": "1",
},
parent_config=[
c async for c in app_w_interrupt.checkpointer.alist(config, limit=2)
][-1].config,
)
# modify ai message
last_message = (await app_w_interrupt.aget_state(config)).values[-1]
last_message.tool_calls[0]["args"] = {"query": "a different query"}
await app_w_interrupt.aupdate_state(config, last_message)
# message was replaced instead of appended
tup = await app_w_interrupt.checkpointer.aget_tuple(config)
assert await app_w_interrupt.aget_state(config) == StateSnapshot(
values=[
_AnyIdHumanMessage(content="what is weather in sf"),
AIMessage(
content="",
id="ai1",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "a different query"},
}
],
),
],
tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),),
next=("tools",),
config=tup.config,
created_at=tup.checkpoint["ts"],
metadata={
"parents": {},
"source": "update",
"step": 2,
"writes": {
"agent": AIMessage(
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "a different query"},
}
],
id="ai1",
)
},
"thread_id": "1",
},
parent_config=[
c async for c in app_w_interrupt.checkpointer.alist(config, limit=2)
][-1].config,
)
assert [c async for c in app_w_interrupt.astream(None, config)] == [
{
"tools": [
_AnyIdToolMessage(
content="result for a different query",
name="search_api",
tool_call_id="tool_call123",
)
]
},
{
"agent": AIMessage(
content="",
tool_calls=[
{
"id": "tool_call456",
"name": "search_api",
"args": {"query": "another"},
}
],
id="ai2",
)
},
{"__interrupt__": ()},
]
tup = await app_w_interrupt.checkpointer.aget_tuple(config)
assert await app_w_interrupt.aget_state(config) == StateSnapshot(
values=[
_AnyIdHumanMessage(content="what is weather in sf"),
AIMessage(
content="",
id="ai1",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "a different query"},
}
],
),
_AnyIdToolMessage(
content="result for a different query",
name="search_api",
tool_call_id="tool_call123",
),
AIMessage(
content="",
tool_calls=[
{
"id": "tool_call456",
"name": "search_api",
"args": {"query": "another"},
}
],
id="ai2",
),
],
tasks=(PregelTask(AnyStr(), "tools", (PULL, "tools")),),
next=("tools",),
config=tup.config,
created_at=tup.checkpoint["ts"],
metadata={
"parents": {},
"source": "loop",
"step": 4,
"writes": {
"agent": AIMessage(
content="",
tool_calls=[
{
"id": "tool_call456",
"name": "search_api",
"args": {"query": "another"},
}
],
id="ai2",
)
},
"thread_id": "1",
},
parent_config=[
c async for c in app_w_interrupt.checkpointer.alist(config, limit=2)
][-1].config,
)
await app_w_interrupt.aupdate_state(
config,
AIMessage(content="answer", id="ai2"),
)
# replaces message even if object identity is different, as long as id is the same
tup = await app_w_interrupt.checkpointer.aget_tuple(config)
assert await app_w_interrupt.aget_state(config) == StateSnapshot(
values=[
_AnyIdHumanMessage(content="what is weather in sf"),
AIMessage(
content="",
id="ai1",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "a different query"},
}
],
),
_AnyIdToolMessage(
content="result for a different query",
name="search_api",
tool_call_id="tool_call123",
),
AIMessage(content="answer", id="ai2"),
],
tasks=(),
next=(),
config=tup.config,
created_at=tup.checkpoint["ts"],
metadata={
"parents": {},
"source": "update",
"step": 5,
"writes": {"agent": AIMessage(content="answer", id="ai2")},
"thread_id": "1",
},
parent_config=[
c async for c in app_w_interrupt.checkpointer.alist(config, limit=2)
][-1].config,
)
async def test_in_one_fan_out_out_one_graph_state() -> None:
def sorted_add(x: list[str], y: list[str]) -> list[str]:
return sorted(operator.add(x, y))
class State(TypedDict, total=False):
query: str
answer: str
docs: Annotated[list[str], operator.add]
async def rewrite_query(data: State) -> State:
return {"query": f'query: {data["query"]}'}
async def retriever_one(data: State) -> State:
await asyncio.sleep(0.1)
return {"docs": ["doc1", "doc2"]}
async def retriever_two(data: State) -> State:
return {"docs": ["doc3", "doc4"]}
async def qa(data: State) -> State:
return {"answer": ",".join(data["docs"])}
workflow = StateGraph(State)
workflow.add_node("rewrite_query", rewrite_query)
workflow.add_node("retriever_one", retriever_one)
workflow.add_node("retriever_two", retriever_two)
workflow.add_node("qa", qa)
workflow.set_entry_point("rewrite_query")
workflow.add_edge("rewrite_query", "retriever_one")
workflow.add_edge("rewrite_query", "retriever_two")
workflow.add_edge("retriever_one", "qa")
workflow.add_edge("retriever_two", "qa")
workflow.set_finish_point("qa")
app = workflow.compile()
assert await app.ainvoke({"query": "what is weather in sf"}) == {
"query": "query: what is weather in sf",
"docs": ["doc1", "doc2", "doc3", "doc4"],
"answer": "doc1,doc2,doc3,doc4",
}
assert [c async for c in app.astream({"query": "what is weather in sf"})] == [
{"rewrite_query": {"query": "query: what is weather in sf"}},
{"retriever_two": {"docs": ["doc3", "doc4"]}},
{"retriever_one": {"docs": ["doc1", "doc2"]}},
{"qa": {"answer": "doc1,doc2,doc3,doc4"}},
]
assert [
c
async for c in app.astream(
{"query": "what is weather in sf"}, stream_mode="values"
)
] == [
{"query": "what is weather in sf", "docs": []},
{"query": "query: what is weather in sf", "docs": []},
{
"query": "query: what is weather in sf",
"docs": ["doc1", "doc2", "doc3", "doc4"],
},
{
"query": "query: what is weather in sf",
"docs": ["doc1", "doc2", "doc3", "doc4"],
"answer": "doc1,doc2,doc3,doc4",
},
]
assert [
c
async for c in app.astream(
{"query": "what is weather in sf"},
stream_mode=["values", "updates", "debug"],
)
] == [
("values", {"query": "what is weather in sf", "docs": []}),
(
"debug",
{
"type": "task",
"timestamp": AnyStr(),
"step": 1,
"payload": {
"id": AnyStr(),
"name": "rewrite_query",
"input": {"query": "what is weather in sf", "docs": []},
"triggers": ["start:rewrite_query"],
},
},
),
("updates", {"rewrite_query": {"query": "query: what is weather in sf"}}),
(
"debug",
{
"type": "task_result",
"timestamp": AnyStr(),
"step": 1,
"payload": {
"id": AnyStr(),
"name": "rewrite_query",
"result": [("query", "query: what is weather in sf")],
"error": None,
"interrupts": [],
},
},
),
("values", {"query": "query: what is weather in sf", "docs": []}),
(
"debug",
{
"type": "task",
"timestamp": AnyStr(),
"step": 2,
"payload": {
"id": AnyStr(),
"name": "retriever_one",
"input": {"query": "query: what is weather in sf", "docs": []},
"triggers": ["rewrite_query"],
},
},
),
(
"debug",
{
"type": "task",
"timestamp": AnyStr(),
"step": 2,
"payload": {
"id": AnyStr(),
"name": "retriever_two",
"input": {"query": "query: what is weather in sf", "docs": []},
"triggers": ["rewrite_query"],
},
},
),
(
"updates",
{"retriever_two": {"docs": ["doc3", "doc4"]}},
),
(
"debug",
{
"type": "task_result",
"timestamp": AnyStr(),
"step": 2,
"payload": {
"id": AnyStr(),
"name": "retriever_two",
"result": [("docs", ["doc3", "doc4"])],
"error": None,
"interrupts": [],
},
},
),
(
"updates",
{"retriever_one": {"docs": ["doc1", "doc2"]}},
),
(
"debug",
{
"type": "task_result",
"timestamp": AnyStr(),
"step": 2,
"payload": {
"id": AnyStr(),
"name": "retriever_one",
"result": [("docs", ["doc1", "doc2"])],
"error": None,
"interrupts": [],
},
},
),
(
"values",
{
"query": "query: what is weather in sf",
"docs": ["doc1", "doc2", "doc3", "doc4"],
},
),
(
"debug",
{
"type": "task",
"timestamp": AnyStr(),
"step": 3,
"payload": {
"id": AnyStr(),
"name": "qa",
"input": {
"query": "query: what is weather in sf",
"docs": ["doc1", "doc2", "doc3", "doc4"],
},
"triggers": ["retriever_one", "retriever_two"],
},
},
),
("updates", {"qa": {"answer": "doc1,doc2,doc3,doc4"}}),
(
"debug",
{
"type": "task_result",
"timestamp": AnyStr(),
"step": 3,
"payload": {
"id": AnyStr(),
"name": "qa",
"result": [("answer", "doc1,doc2,doc3,doc4")],
"error": None,
"interrupts": [],
},
},
),
(
"values",
{
"query": "query: what is weather in sf",
"answer": "doc1,doc2,doc3,doc4",
"docs": ["doc1", "doc2", "doc3", "doc4"],
},
),
]
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC)
async def test_start_branch_then(checkpointer_name: str) -> None:
class State(TypedDict):
my_key: Annotated[str, operator.add]
market: str
shared: Annotated[dict[str, dict[str, Any]], SharedValue.on("assistant_id")]
other: Annotated[dict[str, dict[str, Any]], SharedValue.on("assistant_id")]
def assert_shared_value(data: State, config: RunnableConfig) -> State:
assert "shared" in data
if thread_id := config["configurable"].get("thread_id"):
if thread_id == "1":
# this is the first thread, so should not see a value
assert data["shared"] == {}
return {"shared": {"1": {"hello": "world"}}, "other": {"2": {1: 2}}}
elif thread_id == "2":
# this should get value saved by thread 1
assert data["shared"] == {"1": {"hello": "world"}}
elif thread_id == "3":
# this is a different assistant, so should not see previous value
assert data["shared"] == {}
return {}
def tool_two_slow(data: State, config: RunnableConfig) -> State:
return {"my_key": " slow", **assert_shared_value(data, config)}
def tool_two_fast(data: State, config: RunnableConfig) -> State:
return {"my_key": " fast", **assert_shared_value(data, config)}
tool_two_graph = StateGraph(State)
tool_two_graph.add_node("tool_two_slow", tool_two_slow)
tool_two_graph.add_node("tool_two_fast", tool_two_fast)
tool_two_graph.set_conditional_entry_point(
lambda s: "tool_two_slow" if s["market"] == "DE" else "tool_two_fast", then=END
)
tool_two = tool_two_graph.compile()
assert await tool_two.ainvoke({"my_key": "value", "market": "DE"}) == {
"my_key": "value slow",
"market": "DE",
}
assert await tool_two.ainvoke({"my_key": "value", "market": "US"}) == {
"my_key": "value fast",
"market": "US",
}
async with awith_checkpointer(checkpointer_name) as checkpointer:
tool_two = tool_two_graph.compile(
store=InMemoryStore(),
checkpointer=checkpointer,
interrupt_before=["tool_two_fast", "tool_two_slow"],
)
# missing thread_id
with pytest.raises(ValueError, match="thread_id"):
await tool_two.ainvoke({"my_key": "value", "market": "DE"})
thread1 = {"configurable": {"thread_id": "1", "assistant_id": "a"}}
# stop when about to enter node
assert await tool_two.ainvoke({"my_key": "value", "market": "DE"}, thread1) == {
"my_key": "value",
"market": "DE",
}
assert [c.metadata async for c in tool_two.checkpointer.alist(thread1)] == [
{
"parents": {},
"source": "loop",
"step": 0,
"writes": None,
"assistant_id": "a",
"thread_id": "1",
},
{
"parents": {},
"source": "input",
"step": -1,
"writes": {"__start__": {"my_key": "value", "market": "DE"}},
"assistant_id": "a",
"thread_id": "1",
},
]
assert await tool_two.aget_state(thread1) == StateSnapshot(
values={"my_key": "value", "market": "DE"},
tasks=(PregelTask(AnyStr(), "tool_two_slow", (PULL, "tool_two_slow")),),
next=("tool_two_slow",),
config=(await tool_two.checkpointer.aget_tuple(thread1)).config,
created_at=(await tool_two.checkpointer.aget_tuple(thread1)).checkpoint[
"ts"
],
metadata={
"parents": {},
"source": "loop",
"step": 0,
"writes": None,
"assistant_id": "a",
"thread_id": "1",
},
parent_config=[
c async for c in tool_two.checkpointer.alist(thread1, limit=2)
][-1].config,
)
# resume, for same result as above
assert await tool_two.ainvoke(None, thread1, debug=1) == {
"my_key": "value slow",
"market": "DE",
}
assert await tool_two.aget_state(thread1) == StateSnapshot(
values={"my_key": "value slow", "market": "DE"},
tasks=(),
next=(),
config=(await tool_two.checkpointer.aget_tuple(thread1)).config,
created_at=(await tool_two.checkpointer.aget_tuple(thread1)).checkpoint[
"ts"
],
metadata={
"parents": {},
"source": "loop",
"step": 1,
"writes": {"tool_two_slow": {"my_key": " slow"}},
"assistant_id": "a",
"thread_id": "1",
},
parent_config=[
c async for c in tool_two.checkpointer.alist(thread1, limit=2)
][-1].config,
)
thread2 = {"configurable": {"thread_id": "2", "assistant_id": "a"}}
# stop when about to enter node
assert await tool_two.ainvoke({"my_key": "value", "market": "US"}, thread2) == {
"my_key": "value",
"market": "US",
}
assert await tool_two.aget_state(thread2) == StateSnapshot(
values={"my_key": "value", "market": "US"},
tasks=(PregelTask(AnyStr(), "tool_two_fast", (PULL, "tool_two_fast")),),
next=("tool_two_fast",),
config=(await tool_two.checkpointer.aget_tuple(thread2)).config,
created_at=(await tool_two.checkpointer.aget_tuple(thread2)).checkpoint[
"ts"
],
metadata={
"parents": {},
"source": "loop",
"step": 0,
"writes": None,
"assistant_id": "a",
"thread_id": "2",
},
parent_config=[
c async for c in tool_two.checkpointer.alist(thread2, limit=2)
][-1].config,
)
# resume, for same result as above
assert await tool_two.ainvoke(None, thread2, debug=1) == {
"my_key": "value fast",
"market": "US",
}
assert await tool_two.aget_state(thread2) == StateSnapshot(
values={"my_key": "value fast", "market": "US"},
tasks=(),
next=(),
config=(await tool_two.checkpointer.aget_tuple(thread2)).config,
created_at=(await tool_two.checkpointer.aget_tuple(thread2)).checkpoint[
"ts"
],
metadata={
"parents": {},
"source": "loop",
"step": 1,
"writes": {"tool_two_fast": {"my_key": " fast"}},
"assistant_id": "a",
"thread_id": "2",
},
parent_config=[
c async for c in tool_two.checkpointer.alist(thread2, limit=2)
][-1].config,
)
thread3 = {"configurable": {"thread_id": "3", "assistant_id": "b"}}
# stop when about to enter node
assert await tool_two.ainvoke({"my_key": "value", "market": "US"}, thread3) == {
"my_key": "value",
"market": "US",
}
assert await tool_two.aget_state(thread3) == StateSnapshot(
values={"my_key": "value", "market": "US"},
tasks=(PregelTask(AnyStr(), "tool_two_fast", (PULL, "tool_two_fast")),),
next=("tool_two_fast",),
config=(await tool_two.checkpointer.aget_tuple(thread3)).config,
created_at=(await tool_two.checkpointer.aget_tuple(thread3)).checkpoint[
"ts"
],
metadata={
"parents": {},
"source": "loop",
"step": 0,
"writes": None,
"assistant_id": "b",
"thread_id": "3",
},
parent_config=[
c async for c in tool_two.checkpointer.alist(thread3, limit=2)
][-1].config,
)
# update state
await tool_two.aupdate_state(thread3, {"my_key": "key"}) # appends to my_key
assert await tool_two.aget_state(thread3) == StateSnapshot(
values={"my_key": "valuekey", "market": "US"},
tasks=(PregelTask(AnyStr(), "tool_two_fast", (PULL, "tool_two_fast")),),
next=("tool_two_fast",),
config=(await tool_two.checkpointer.aget_tuple(thread3)).config,
created_at=(await tool_two.checkpointer.aget_tuple(thread3)).checkpoint[
"ts"
],
metadata={
"parents": {},
"source": "update",
"step": 1,
"writes": {START: {"my_key": "key"}},
"assistant_id": "b",
"thread_id": "3",
},
parent_config=[
c async for c in tool_two.checkpointer.alist(thread3, limit=2)
][-1].config,
)
# resume, for same result as above
assert await tool_two.ainvoke(None, thread3, debug=1) == {
"my_key": "valuekey fast",
"market": "US",
}
assert await tool_two.aget_state(thread3) == StateSnapshot(
values={"my_key": "valuekey fast", "market": "US"},
tasks=(),
next=(),
config=(await tool_two.checkpointer.aget_tuple(thread3)).config,
created_at=(await tool_two.checkpointer.aget_tuple(thread3)).checkpoint[
"ts"
],
metadata={
"parents": {},
"source": "loop",
"step": 2,
"writes": {"tool_two_fast": {"my_key": " fast"}},
"assistant_id": "b",
"thread_id": "3",
},
parent_config=[
c async for c in tool_two.checkpointer.alist(thread3, limit=2)
][-1].config,
)
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC)
async def test_branch_then(checkpointer_name: str) -> None:
class State(TypedDict):
my_key: Annotated[str, operator.add]
market: str
tool_two_graph = StateGraph(State)
tool_two_graph.set_entry_point("prepare")
tool_two_graph.set_finish_point("finish")
tool_two_graph.add_conditional_edges(
source="prepare",
path=lambda s: "tool_two_slow" if s["market"] == "DE" else "tool_two_fast",
then="finish",
)
tool_two_graph.add_node("prepare", lambda s: {"my_key": " prepared"})
tool_two_graph.add_node("tool_two_slow", lambda s: {"my_key": " slow"})
tool_two_graph.add_node("tool_two_fast", lambda s: {"my_key": " fast"})
tool_two_graph.add_node("finish", lambda s: {"my_key": " finished"})
tool_two = tool_two_graph.compile()
assert await tool_two.ainvoke({"my_key": "value", "market": "DE"}, debug=1) == {
"my_key": "value prepared slow finished",
"market": "DE",
}
assert await tool_two.ainvoke({"my_key": "value", "market": "US"}) == {
"my_key": "value prepared fast finished",
"market": "US",
}
async with awith_checkpointer(checkpointer_name) as checkpointer:
# test stream_mode=debug
tool_two = tool_two_graph.compile(checkpointer=checkpointer)
thread10 = {"configurable": {"thread_id": "10"}}
assert [
c
async for c in tool_two.astream(
{"my_key": "value", "market": "DE"}, thread10, stream_mode="debug"
)
] == [
{
"type": "checkpoint",
"timestamp": AnyStr(),
"step": -1,
"payload": {
"config": {
"tags": [],
"metadata": {"thread_id": "10"},
"callbacks": None,
"recursion_limit": 25,
"configurable": {
"thread_id": "10",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
},
},
"values": {"my_key": ""},
"metadata": {
"parents": {},
"source": "input",
"step": -1,
"writes": {"__start__": {"my_key": "value", "market": "DE"}},
"thread_id": "10",
},
"parent_config": None,
"next": ["__start__"],
"tasks": [
{
"id": AnyStr(),
"name": "__start__",
"interrupts": (),
"state": None,
}
],
},
},
{
"type": "checkpoint",
"timestamp": AnyStr(),
"step": 0,
"payload": {
"config": {
"tags": [],
"metadata": {"thread_id": "10"},
"callbacks": None,
"recursion_limit": 25,
"configurable": {
"thread_id": "10",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
},
},
"values": {
"my_key": "value",
"market": "DE",
},
"metadata": {
"parents": {},
"source": "loop",
"step": 0,
"writes": None,
"thread_id": "10",
},
"parent_config": {
"tags": [],
"metadata": {"thread_id": "10"},
"callbacks": None,
"recursion_limit": 25,
"configurable": {
"thread_id": "10",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
},
},
"next": ["prepare"],
"tasks": [
{
"id": AnyStr(),
"name": "prepare",
"interrupts": (),
"state": None,
}
],
},
},
{
"type": "task",
"timestamp": AnyStr(),
"step": 1,
"payload": {
"id": AnyStr(),
"name": "prepare",
"input": {"my_key": "value", "market": "DE"},
"triggers": ["start:prepare"],
},
},
{
"type": "task_result",
"timestamp": AnyStr(),
"step": 1,
"payload": {
"id": AnyStr(),
"name": "prepare",
"result": [("my_key", " prepared")],
"error": None,
"interrupts": [],
},
},
{
"type": "checkpoint",
"timestamp": AnyStr(),
"step": 1,
"payload": {
"config": {
"tags": [],
"metadata": {"thread_id": "10"},
"callbacks": None,
"recursion_limit": 25,
"configurable": {
"thread_id": "10",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
},
},
"values": {
"my_key": "value prepared",
"market": "DE",
},
"metadata": {
"parents": {},
"source": "loop",
"step": 1,
"writes": {"prepare": {"my_key": " prepared"}},
"thread_id": "10",
},
"parent_config": {
"tags": [],
"metadata": {"thread_id": "10"},
"callbacks": None,
"recursion_limit": 25,
"configurable": {
"thread_id": "10",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
},
},
"next": ["tool_two_slow"],
"tasks": [
{
"id": AnyStr(),
"name": "tool_two_slow",
"interrupts": (),
"state": None,
}
],
},
},
{
"type": "task",
"timestamp": AnyStr(),
"step": 2,
"payload": {
"id": AnyStr(),
"name": "tool_two_slow",
"input": {"my_key": "value prepared", "market": "DE"},
"triggers": ["branch:prepare:condition:tool_two_slow"],
},
},
{
"type": "task_result",
"timestamp": AnyStr(),
"step": 2,
"payload": {
"id": AnyStr(),
"name": "tool_two_slow",
"result": [("my_key", " slow")],
"error": None,
"interrupts": [],
},
},
{
"type": "checkpoint",
"timestamp": AnyStr(),
"step": 2,
"payload": {
"config": {
"tags": [],
"metadata": {"thread_id": "10"},
"callbacks": None,
"recursion_limit": 25,
"configurable": {
"thread_id": "10",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
},
},
"values": {
"my_key": "value prepared slow",
"market": "DE",
},
"metadata": {
"parents": {},
"source": "loop",
"step": 2,
"writes": {"tool_two_slow": {"my_key": " slow"}},
"thread_id": "10",
},
"parent_config": {
"tags": [],
"metadata": {"thread_id": "10"},
"callbacks": None,
"recursion_limit": 25,
"configurable": {
"thread_id": "10",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
},
},
"next": ["finish"],
"tasks": [
{
"id": AnyStr(),
"name": "finish",
"interrupts": (),
"state": None,
}
],
},
},
{
"type": "task",
"timestamp": AnyStr(),
"step": 3,
"payload": {
"id": AnyStr(),
"name": "finish",
"input": {"my_key": "value prepared slow", "market": "DE"},
"triggers": ["branch:prepare:condition::then"],
},
},
{
"type": "task_result",
"timestamp": AnyStr(),
"step": 3,
"payload": {
"id": AnyStr(),
"name": "finish",
"result": [("my_key", " finished")],
"error": None,
"interrupts": [],
},
},
{
"type": "checkpoint",
"timestamp": AnyStr(),
"step": 3,
"payload": {
"config": {
"tags": [],
"metadata": {"thread_id": "10"},
"callbacks": None,
"recursion_limit": 25,
"configurable": {
"thread_id": "10",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
},
},
"values": {
"my_key": "value prepared slow finished",
"market": "DE",
},
"metadata": {
"parents": {},
"source": "loop",
"step": 3,
"writes": {"finish": {"my_key": " finished"}},
"thread_id": "10",
},
"parent_config": {
"tags": [],
"metadata": {"thread_id": "10"},
"callbacks": None,
"recursion_limit": 25,
"configurable": {
"thread_id": "10",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
},
},
"next": [],
"tasks": [],
},
},
]
tool_two = tool_two_graph.compile(
checkpointer=checkpointer,
interrupt_before=["tool_two_fast", "tool_two_slow"],
)
# missing thread_id
with pytest.raises(ValueError, match="thread_id"):
await tool_two.ainvoke({"my_key": "value", "market": "DE"})
thread1 = {"configurable": {"thread_id": "11"}}
# stop when about to enter node
assert [
c
async for c in tool_two.astream(
{"my_key": "value", "market": "DE"}, thread1, stream_mode="debug"
)
] == [
{
"type": "checkpoint",
"timestamp": AnyStr(),
"step": -1,
"payload": {
"config": {
"tags": [],
"metadata": {"thread_id": "11"},
"callbacks": None,
"recursion_limit": 25,
"configurable": {
"thread_id": "11",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
},
},
"values": {"my_key": ""},
"metadata": {
"parents": {},
"source": "input",
"step": -1,
"writes": {"__start__": {"my_key": "value", "market": "DE"}},
"thread_id": "11",
},
"parent_config": None,
"next": ["__start__"],
"tasks": [
{
"id": AnyStr(),
"name": "__start__",
"interrupts": (),
"state": None,
}
],
},
},
{
"type": "checkpoint",
"timestamp": AnyStr(),
"step": 0,
"payload": {
"config": {
"tags": [],
"metadata": {"thread_id": "11"},
"callbacks": None,
"recursion_limit": 25,
"configurable": {
"thread_id": "11",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
},
},
"values": {
"my_key": "value",
"market": "DE",
},
"metadata": {
"parents": {},
"source": "loop",
"step": 0,
"writes": None,
"thread_id": "11",
},
"parent_config": {
"tags": [],
"metadata": {"thread_id": "11"},
"callbacks": None,
"recursion_limit": 25,
"configurable": {
"thread_id": "11",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
},
},
"next": ["prepare"],
"tasks": [
{
"id": AnyStr(),
"name": "prepare",
"interrupts": (),
"state": None,
}
],
},
},
{
"type": "task",
"timestamp": AnyStr(),
"step": 1,
"payload": {
"id": AnyStr(),
"name": "prepare",
"input": {"my_key": "value", "market": "DE"},
"triggers": ["start:prepare"],
},
},
{
"type": "task_result",
"timestamp": AnyStr(),
"step": 1,
"payload": {
"id": AnyStr(),
"name": "prepare",
"result": [("my_key", " prepared")],
"error": None,
"interrupts": [],
},
},
{
"type": "checkpoint",
"timestamp": AnyStr(),
"step": 1,
"payload": {
"config": {
"tags": [],
"metadata": {"thread_id": "11"},
"callbacks": None,
"recursion_limit": 25,
"configurable": {
"thread_id": "11",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
},
},
"values": {
"my_key": "value prepared",
"market": "DE",
},
"metadata": {
"parents": {},
"source": "loop",
"step": 1,
"writes": {"prepare": {"my_key": " prepared"}},
"thread_id": "11",
},
"parent_config": {
"tags": [],
"metadata": {"thread_id": "11"},
"callbacks": None,
"recursion_limit": 25,
"configurable": {
"thread_id": "11",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
},
},
"next": ["tool_two_slow"],
"tasks": [
{
"id": AnyStr(),
"name": "tool_two_slow",
"interrupts": (),
"state": None,
}
],
},
},
]
assert await tool_two.aget_state(thread1) == StateSnapshot(
values={"my_key": "value prepared", "market": "DE"},
tasks=(PregelTask(AnyStr(), "tool_two_slow", (PULL, "tool_two_slow")),),
next=("tool_two_slow",),
config=(await tool_two.checkpointer.aget_tuple(thread1)).config,
created_at=(await tool_two.checkpointer.aget_tuple(thread1)).checkpoint[
"ts"
],
metadata={
"parents": {},
"source": "loop",
"step": 1,
"writes": {"prepare": {"my_key": " prepared"}},
"thread_id": "11",
},
parent_config=[
c async for c in tool_two.checkpointer.alist(thread1, limit=2)
][-1].config,
)
# resume, for same result as above
assert await tool_two.ainvoke(None, thread1, debug=1) == {
"my_key": "value prepared slow finished",
"market": "DE",
}
assert await tool_two.aget_state(thread1) == StateSnapshot(
values={"my_key": "value prepared slow finished", "market": "DE"},
tasks=(),
next=(),
config=(await tool_two.checkpointer.aget_tuple(thread1)).config,
created_at=(await tool_two.checkpointer.aget_tuple(thread1)).checkpoint[
"ts"
],
metadata={
"parents": {},
"source": "loop",
"step": 3,
"writes": {"finish": {"my_key": " finished"}},
"thread_id": "11",
},
parent_config=[
c async for c in tool_two.checkpointer.alist(thread1, limit=2)
][-1].config,
)
thread2 = {"configurable": {"thread_id": "12"}}
# stop when about to enter node
assert await tool_two.ainvoke({"my_key": "value", "market": "US"}, thread2) == {
"my_key": "value prepared",
"market": "US",
}
assert await tool_two.aget_state(thread2) == StateSnapshot(
values={"my_key": "value prepared", "market": "US"},
tasks=(PregelTask(AnyStr(), "tool_two_fast", (PULL, "tool_two_fast")),),
next=("tool_two_fast",),
config=(await tool_two.checkpointer.aget_tuple(thread2)).config,
created_at=(await tool_two.checkpointer.aget_tuple(thread2)).checkpoint[
"ts"
],
metadata={
"parents": {},
"source": "loop",
"step": 1,
"writes": {"prepare": {"my_key": " prepared"}},
"thread_id": "12",
},
parent_config=[
c async for c in tool_two.checkpointer.alist(thread2, limit=2)
][-1].config,
)
# resume, for same result as above
assert await tool_two.ainvoke(None, thread2, debug=1) == {
"my_key": "value prepared fast finished",
"market": "US",
}
assert await tool_two.aget_state(thread2) == StateSnapshot(
values={"my_key": "value prepared fast finished", "market": "US"},
tasks=(),
next=(),
config=(await tool_two.checkpointer.aget_tuple(thread2)).config,
created_at=(await tool_two.checkpointer.aget_tuple(thread2)).checkpoint[
"ts"
],
metadata={
"parents": {},
"source": "loop",
"step": 3,
"writes": {"finish": {"my_key": " finished"}},
"thread_id": "12",
},
parent_config=[
c async for c in tool_two.checkpointer.alist(thread2, limit=2)
][-1].config,
)
tool_two = tool_two_graph.compile(
checkpointer=checkpointer, interrupt_after=["prepare"]
)
# missing thread_id
with pytest.raises(ValueError, match="thread_id"):
await tool_two.ainvoke({"my_key": "value", "market": "DE"})
thread1 = {"configurable": {"thread_id": "21"}}
# stop when about to enter node
assert await tool_two.ainvoke({"my_key": "value", "market": "DE"}, thread1) == {
"my_key": "value prepared",
"market": "DE",
}
assert await tool_two.aget_state(thread1) == StateSnapshot(
values={"my_key": "value prepared", "market": "DE"},
tasks=(PregelTask(AnyStr(), "tool_two_slow", (PULL, "tool_two_slow")),),
next=("tool_two_slow",),
config=(await tool_two.checkpointer.aget_tuple(thread1)).config,
created_at=(await tool_two.checkpointer.aget_tuple(thread1)).checkpoint[
"ts"
],
metadata={
"parents": {},
"source": "loop",
"step": 1,
"writes": {"prepare": {"my_key": " prepared"}},
"thread_id": "21",
},
parent_config=[
c async for c in tool_two.checkpointer.alist(thread1, limit=2)
][-1].config,
)
# resume, for same result as above
assert await tool_two.ainvoke(None, thread1, debug=1) == {
"my_key": "value prepared slow finished",
"market": "DE",
}
assert await tool_two.aget_state(thread1) == StateSnapshot(
values={"my_key": "value prepared slow finished", "market": "DE"},
tasks=(),
next=(),
config=(await tool_two.checkpointer.aget_tuple(thread1)).config,
created_at=(await tool_two.checkpointer.aget_tuple(thread1)).checkpoint[
"ts"
],
metadata={
"parents": {},
"source": "loop",
"step": 3,
"writes": {"finish": {"my_key": " finished"}},
"thread_id": "21",
},
parent_config=[
c async for c in tool_two.checkpointer.alist(thread1, limit=2)
][-1].config,
)
thread2 = {"configurable": {"thread_id": "22"}}
# stop when about to enter node
assert await tool_two.ainvoke({"my_key": "value", "market": "US"}, thread2) == {
"my_key": "value prepared",
"market": "US",
}
assert await tool_two.aget_state(thread2) == StateSnapshot(
values={"my_key": "value prepared", "market": "US"},
tasks=(PregelTask(AnyStr(), "tool_two_fast", (PULL, "tool_two_fast")),),
next=("tool_two_fast",),
config=(await tool_two.checkpointer.aget_tuple(thread2)).config,
created_at=(await tool_two.checkpointer.aget_tuple(thread2)).checkpoint[
"ts"
],
metadata={
"parents": {},
"source": "loop",
"step": 1,
"writes": {"prepare": {"my_key": " prepared"}},
"thread_id": "22",
},
parent_config=[
c async for c in tool_two.checkpointer.alist(thread2, limit=2)
][-1].config,
)
# resume, for same result as above
assert await tool_two.ainvoke(None, thread2, debug=1) == {
"my_key": "value prepared fast finished",
"market": "US",
}
assert await tool_two.aget_state(thread2) == StateSnapshot(
values={"my_key": "value prepared fast finished", "market": "US"},
tasks=(),
next=(),
config=(await tool_two.checkpointer.aget_tuple(thread2)).config,
created_at=(await tool_two.checkpointer.aget_tuple(thread2)).checkpoint[
"ts"
],
metadata={
"parents": {},
"source": "loop",
"step": 3,
"writes": {"finish": {"my_key": " finished"}},
"thread_id": "22",
},
parent_config=[
c async for c in tool_two.checkpointer.alist(thread2, limit=2)
][-1].config,
)
thread3 = {"configurable": {"thread_id": "23"}}
# update an empty thread before first run
uconfig = await tool_two.aupdate_state(
thread3, {"my_key": "key", "market": "DE"}
)
# check current state
assert await tool_two.aget_state(thread3) == StateSnapshot(
values={"my_key": "key", "market": "DE"},
tasks=(PregelTask(AnyStr(), "prepare", (PULL, "prepare")),),
next=("prepare",),
config=uconfig,
created_at=AnyStr(),
metadata={
"parents": {},
"source": "update",
"step": 0,
"writes": {START: {"my_key": "key", "market": "DE"}},
"thread_id": "23",
},
parent_config=None,
)
# run from this point
assert await tool_two.ainvoke(None, thread3) == {
"my_key": "key prepared",
"market": "DE",
}
# get state after first node
assert await tool_two.aget_state(thread3) == StateSnapshot(
values={"my_key": "key prepared", "market": "DE"},
tasks=(PregelTask(AnyStr(), "tool_two_slow", (PULL, "tool_two_slow")),),
next=("tool_two_slow",),
config=(await tool_two.checkpointer.aget_tuple(thread3)).config,
created_at=(await tool_two.checkpointer.aget_tuple(thread3)).checkpoint[
"ts"
],
metadata={
"parents": {},
"source": "loop",
"step": 1,
"writes": {"prepare": {"my_key": " prepared"}},
"thread_id": "23",
},
parent_config=uconfig,
)
# resume, for same result as above
assert await tool_two.ainvoke(None, thread3, debug=1) == {
"my_key": "key prepared slow finished",
"market": "DE",
}
assert await tool_two.aget_state(thread3) == StateSnapshot(
values={"my_key": "key prepared slow finished", "market": "DE"},
tasks=(),
next=(),
config=(await tool_two.checkpointer.aget_tuple(thread3)).config,
created_at=(await tool_two.checkpointer.aget_tuple(thread3)).checkpoint[
"ts"
],
metadata={
"parents": {},
"source": "loop",
"step": 3,
"writes": {"finish": {"my_key": " finished"}},
"thread_id": "23",
},
parent_config=[
c async for c in tool_two.checkpointer.alist(thread3, limit=2)
][-1].config,
)
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC)
async def test_in_one_fan_out_state_graph_waiting_edge(checkpointer_name: str) -> None:
def sorted_add(
x: list[str], y: Union[list[str], list[tuple[str, str]]]
) -> list[str]:
if isinstance(y[0], tuple):
for rem, _ in y:
x.remove(rem)
y = [t[1] for t in y]
return sorted(operator.add(x, y))
class State(TypedDict, total=False):
query: str
answer: str
docs: Annotated[list[str], sorted_add]
async def rewrite_query(data: State) -> State:
return {"query": f'query: {data["query"]}'}
async def analyzer_one(data: State) -> State:
return {"query": f'analyzed: {data["query"]}'}
async def retriever_one(data: State) -> State:
return {"docs": ["doc1", "doc2"]}
async def retriever_two(data: State) -> State:
await asyncio.sleep(0.1)
return {"docs": ["doc3", "doc4"]}
async def qa(data: State) -> State:
return {"answer": ",".join(data["docs"])}
workflow = StateGraph(State)
workflow.add_node("rewrite_query", rewrite_query)
workflow.add_node("analyzer_one", analyzer_one)
workflow.add_node("retriever_one", retriever_one)
workflow.add_node("retriever_two", retriever_two)
workflow.add_node("qa", qa)
workflow.set_entry_point("rewrite_query")
workflow.add_edge("rewrite_query", "analyzer_one")
workflow.add_edge("analyzer_one", "retriever_one")
workflow.add_edge("rewrite_query", "retriever_two")
workflow.add_edge(["retriever_one", "retriever_two"], "qa")
workflow.set_finish_point("qa")
app = workflow.compile()
assert await app.ainvoke({"query": "what is weather in sf"}) == {
"query": "analyzed: query: what is weather in sf",
"docs": ["doc1", "doc2", "doc3", "doc4"],
"answer": "doc1,doc2,doc3,doc4",
}
assert [c async for c in app.astream({"query": "what is weather in sf"})] == [
{"rewrite_query": {"query": "query: what is weather in sf"}},
{"analyzer_one": {"query": "analyzed: query: what is weather in sf"}},
{"retriever_two": {"docs": ["doc3", "doc4"]}},
{"retriever_one": {"docs": ["doc1", "doc2"]}},
{"qa": {"answer": "doc1,doc2,doc3,doc4"}},
]
async with awith_checkpointer(checkpointer_name) as checkpointer:
app_w_interrupt = workflow.compile(
checkpointer=checkpointer,
interrupt_after=["retriever_one"],
)
config = {"configurable": {"thread_id": "1"}}
assert [
c
async for c in app_w_interrupt.astream(
{"query": "what is weather in sf"}, config
)
] == [
{"rewrite_query": {"query": "query: what is weather in sf"}},
{"analyzer_one": {"query": "analyzed: query: what is weather in sf"}},
{"retriever_two": {"docs": ["doc3", "doc4"]}},
{"retriever_one": {"docs": ["doc1", "doc2"]}},
{"__interrupt__": ()},
]
assert [c async for c in app_w_interrupt.astream(None, config)] == [
{"qa": {"answer": "doc1,doc2,doc3,doc4"}},
]
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC)
async def test_in_one_fan_out_state_graph_waiting_edge_via_branch(
snapshot: SnapshotAssertion, checkpointer_name: str
) -> None:
def sorted_add(
x: list[str], y: Union[list[str], list[tuple[str, str]]]
) -> list[str]:
if isinstance(y[0], tuple):
for rem, _ in y:
x.remove(rem)
y = [t[1] for t in y]
return sorted(operator.add(x, y))
class State(TypedDict, total=False):
query: str
answer: str
docs: Annotated[list[str], sorted_add]
async def rewrite_query(data: State) -> State:
return {"query": f'query: {data["query"]}'}
async def analyzer_one(data: State) -> State:
return {"query": f'analyzed: {data["query"]}'}
async def retriever_one(data: State) -> State:
return {"docs": ["doc1", "doc2"]}
async def retriever_two(data: State) -> State:
await asyncio.sleep(0.1)
return {"docs": ["doc3", "doc4"]}
async def qa(data: State) -> State:
return {"answer": ",".join(data["docs"])}
workflow = StateGraph(State)
workflow.add_node("rewrite_query", rewrite_query)
workflow.add_node("analyzer_one", analyzer_one)
workflow.add_node("retriever_one", retriever_one)
workflow.add_node("retriever_two", retriever_two)
workflow.add_node("qa", qa)
workflow.set_entry_point("rewrite_query")
workflow.add_edge("rewrite_query", "analyzer_one")
workflow.add_edge("analyzer_one", "retriever_one")
workflow.add_conditional_edges(
"rewrite_query", lambda _: "retriever_two", {"retriever_two": "retriever_two"}
)
workflow.add_edge(["retriever_one", "retriever_two"], "qa")
workflow.set_finish_point("qa")
app = workflow.compile()
assert await app.ainvoke({"query": "what is weather in sf"}, debug=True) == {
"query": "analyzed: query: what is weather in sf",
"docs": ["doc1", "doc2", "doc3", "doc4"],
"answer": "doc1,doc2,doc3,doc4",
}
assert [c async for c in app.astream({"query": "what is weather in sf"})] == [
{"rewrite_query": {"query": "query: what is weather in sf"}},
{"analyzer_one": {"query": "analyzed: query: what is weather in sf"}},
{"retriever_two": {"docs": ["doc3", "doc4"]}},
{"retriever_one": {"docs": ["doc1", "doc2"]}},
{"qa": {"answer": "doc1,doc2,doc3,doc4"}},
]
async with awith_checkpointer(checkpointer_name) as checkpointer:
app_w_interrupt = workflow.compile(
checkpointer=checkpointer,
interrupt_after=["retriever_one"],
)
config = {"configurable": {"thread_id": "1"}}
assert [
c
async for c in app_w_interrupt.astream(
{"query": "what is weather in sf"}, config
)
] == [
{"rewrite_query": {"query": "query: what is weather in sf"}},
{"analyzer_one": {"query": "analyzed: query: what is weather in sf"}},
{"retriever_two": {"docs": ["doc3", "doc4"]}},
{"retriever_one": {"docs": ["doc1", "doc2"]}},
{"__interrupt__": ()},
]
assert [c async for c in app_w_interrupt.astream(None, config)] == [
{"qa": {"answer": "doc1,doc2,doc3,doc4"}},
]
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC)
async def test_in_one_fan_out_state_graph_waiting_edge_custom_state_class(
snapshot: SnapshotAssertion, mocker: MockerFixture, checkpointer_name: str
) -> None:
from pydantic.v1 import BaseModel, ValidationError
setup = mocker.Mock()
teardown = mocker.Mock()
@asynccontextmanager
async def assert_ctx_once() -> AsyncIterator[None]:
assert setup.call_count == 0
assert teardown.call_count == 0
try:
yield
finally:
assert setup.call_count == 1
assert teardown.call_count == 1
setup.reset_mock()
teardown.reset_mock()
@asynccontextmanager
async def make_httpx_client() -> AsyncIterator[httpx.AsyncClient]:
setup()
async with httpx.AsyncClient() as client:
try:
yield client
finally:
teardown()
def sorted_add(
x: list[str], y: Union[list[str], list[tuple[str, str]]]
) -> list[str]:
if isinstance(y[0], tuple):
for rem, _ in y:
x.remove(rem)
y = [t[1] for t in y]
return sorted(operator.add(x, y))
class State(BaseModel):
class Config:
arbitrary_types_allowed = True
query: str
answer: Optional[str] = None
docs: Annotated[list[str], sorted_add]
client: Annotated[httpx.AsyncClient, Context(make_httpx_client)]
class Input(BaseModel):
query: str
class Output(BaseModel):
answer: str
docs: list[str]
class StateUpdate(BaseModel):
query: Optional[str] = None
answer: Optional[str] = None
docs: Optional[list[str]] = None
async def rewrite_query(data: State) -> State:
return {"query": f"query: {data.query}"}
async def analyzer_one(data: State) -> State:
return StateUpdate(query=f"analyzed: {data.query}")
async def retriever_one(data: State) -> State:
return {"docs": ["doc1", "doc2"]}
async def retriever_two(data: State) -> State:
await asyncio.sleep(0.1)
return {"docs": ["doc3", "doc4"]}
async def qa(data: State) -> State:
return {"answer": ",".join(data.docs)}
async def decider(data: State) -> str:
assert isinstance(data, State)
return "retriever_two"
workflow = StateGraph(State, input=Input, output=Output)
workflow.add_node("rewrite_query", rewrite_query)
workflow.add_node("analyzer_one", analyzer_one)
workflow.add_node("retriever_one", retriever_one)
workflow.add_node("retriever_two", retriever_two)
workflow.add_node("qa", qa)
workflow.set_entry_point("rewrite_query")
workflow.add_edge("rewrite_query", "analyzer_one")
workflow.add_edge("analyzer_one", "retriever_one")
workflow.add_conditional_edges(
"rewrite_query", decider, {"retriever_two": "retriever_two"}
)
workflow.add_edge(["retriever_one", "retriever_two"], "qa")
workflow.set_finish_point("qa")
app = workflow.compile()
async with assert_ctx_once():
with pytest.raises(ValidationError):
await app.ainvoke({"query": {}})
async with assert_ctx_once():
assert await app.ainvoke({"query": "what is weather in sf"}) == {
"docs": ["doc1", "doc2", "doc3", "doc4"],
"answer": "doc1,doc2,doc3,doc4",
}
async with assert_ctx_once():
assert [c async for c in app.astream({"query": "what is weather in sf"})] == [
{"rewrite_query": {"query": "query: what is weather in sf"}},
{"analyzer_one": {"query": "analyzed: query: what is weather in sf"}},
{"retriever_two": {"docs": ["doc3", "doc4"]}},
{"retriever_one": {"docs": ["doc1", "doc2"]}},
{"qa": {"answer": "doc1,doc2,doc3,doc4"}},
]
async with awith_checkpointer(checkpointer_name) as checkpointer:
app_w_interrupt = workflow.compile(
checkpointer=checkpointer,
interrupt_after=["retriever_one"],
)
config = {"configurable": {"thread_id": "1"}}
async with assert_ctx_once():
assert [
c
async for c in app_w_interrupt.astream(
{"query": "what is weather in sf"}, config
)
] == [
{"rewrite_query": {"query": "query: what is weather in sf"}},
{"analyzer_one": {"query": "analyzed: query: what is weather in sf"}},
{"retriever_two": {"docs": ["doc3", "doc4"]}},
{"retriever_one": {"docs": ["doc1", "doc2"]}},
{"__interrupt__": ()},
]
async with assert_ctx_once():
assert [c async for c in app_w_interrupt.astream(None, config)] == [
{"qa": {"answer": "doc1,doc2,doc3,doc4"}},
]
assert await app_w_interrupt.aget_state(config) == StateSnapshot(
values={
"query": "analyzed: query: what is weather in sf",
"answer": "doc1,doc2,doc3,doc4",
"docs": ["doc1", "doc2", "doc3", "doc4"],
},
tasks=(),
next=(),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "loop",
"writes": {"qa": {"answer": "doc1,doc2,doc3,doc4"}},
"step": 4,
"thread_id": "1",
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
)
async with assert_ctx_once():
assert await app_w_interrupt.aupdate_state(
config, {"docs": ["doc5"]}, as_node="rewrite_query"
) == {
"configurable": {
"thread_id": "1",
"checkpoint_id": AnyStr(),
"checkpoint_ns": "",
}
}
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC)
async def test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2(
snapshot: SnapshotAssertion, checkpointer_name: str
) -> None:
from pydantic import BaseModel, ValidationError
def sorted_add(
x: list[str], y: Union[list[str], list[tuple[str, str]]]
) -> list[str]:
if isinstance(y[0], tuple):
for rem, _ in y:
x.remove(rem)
y = [t[1] for t in y]
return sorted(operator.add(x, y))
class InnerObject(BaseModel):
yo: int
class State(BaseModel):
query: str
inner: InnerObject
answer: Optional[str] = None
docs: Annotated[list[str], sorted_add]
class StateUpdate(BaseModel):
query: Optional[str] = None
answer: Optional[str] = None
docs: Optional[list[str]] = None
async def rewrite_query(data: State) -> State:
return {"query": f"query: {data.query}"}
async def analyzer_one(data: State) -> State:
return StateUpdate(query=f"analyzed: {data.query}")
async def retriever_one(data: State) -> State:
return {"docs": ["doc1", "doc2"]}
async def retriever_two(data: State) -> State:
await asyncio.sleep(0.1)
return {"docs": ["doc3", "doc4"]}
async def qa(data: State) -> State:
return {"answer": ",".join(data.docs)}
async def decider(data: State) -> str:
assert isinstance(data, State)
return "retriever_two"
workflow = StateGraph(State)
workflow.add_node("rewrite_query", rewrite_query)
workflow.add_node("analyzer_one", analyzer_one)
workflow.add_node("retriever_one", retriever_one)
workflow.add_node("retriever_two", retriever_two)
workflow.add_node("qa", qa)
workflow.set_entry_point("rewrite_query")
workflow.add_edge("rewrite_query", "analyzer_one")
workflow.add_edge("analyzer_one", "retriever_one")
workflow.add_conditional_edges(
"rewrite_query", decider, {"retriever_two": "retriever_two"}
)
workflow.add_edge(["retriever_one", "retriever_two"], "qa")
workflow.set_finish_point("qa")
app = workflow.compile()
if SHOULD_CHECK_SNAPSHOTS:
assert app.get_graph().draw_mermaid(with_styles=False) == snapshot
assert app.get_input_schema().model_json_schema() == snapshot
assert app.get_output_schema().model_json_schema() == snapshot
with pytest.raises(ValidationError):
await app.ainvoke({"query": {}})
assert await app.ainvoke(
{"query": "what is weather in sf", "inner": {"yo": 1}}
) == {
"query": "analyzed: query: what is weather in sf",
"docs": ["doc1", "doc2", "doc3", "doc4"],
"answer": "doc1,doc2,doc3,doc4",
"inner": {"yo": 1},
}
assert [
c
async for c in app.astream(
{"query": "what is weather in sf", "inner": {"yo": 1}}
)
] == [
{"rewrite_query": {"query": "query: what is weather in sf"}},
{"analyzer_one": {"query": "analyzed: query: what is weather in sf"}},
{"retriever_two": {"docs": ["doc3", "doc4"]}},
{"retriever_one": {"docs": ["doc1", "doc2"]}},
{"qa": {"answer": "doc1,doc2,doc3,doc4"}},
]
async with awith_checkpointer(checkpointer_name) as checkpointer:
app_w_interrupt = workflow.compile(
checkpointer=checkpointer,
interrupt_after=["retriever_one"],
)
config = {"configurable": {"thread_id": "1"}}
assert [
c
async for c in app_w_interrupt.astream(
{"query": "what is weather in sf", "inner": {"yo": 1}}, config
)
] == [
{"rewrite_query": {"query": "query: what is weather in sf"}},
{"analyzer_one": {"query": "analyzed: query: what is weather in sf"}},
{"retriever_two": {"docs": ["doc3", "doc4"]}},
{"retriever_one": {"docs": ["doc1", "doc2"]}},
{"__interrupt__": ()},
]
assert [c async for c in app_w_interrupt.astream(None, config)] == [
{"qa": {"answer": "doc1,doc2,doc3,doc4"}},
]
assert await app_w_interrupt.aupdate_state(
config, {"docs": ["doc5"]}, as_node="rewrite_query"
) == {
"configurable": {
"thread_id": "1",
"checkpoint_id": AnyStr(),
"checkpoint_ns": "",
}
}
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC)
async def test_in_one_fan_out_state_graph_waiting_edge_plus_regular(
checkpointer_name: str,
) -> None:
def sorted_add(
x: list[str], y: Union[list[str], list[tuple[str, str]]]
) -> list[str]:
if isinstance(y[0], tuple):
for rem, _ in y:
x.remove(rem)
y = [t[1] for t in y]
return sorted(operator.add(x, y))
class State(TypedDict, total=False):
query: str
answer: str
docs: Annotated[list[str], sorted_add]
async def rewrite_query(data: State) -> State:
return {"query": f'query: {data["query"]}'}
async def analyzer_one(data: State) -> State:
await asyncio.sleep(0.1)
return {"query": f'analyzed: {data["query"]}'}
async def retriever_one(data: State) -> State:
return {"docs": ["doc1", "doc2"]}
async def retriever_two(data: State) -> State:
await asyncio.sleep(0.2)
return {"docs": ["doc3", "doc4"]}
async def qa(data: State) -> State:
return {"answer": ",".join(data["docs"])}
workflow = StateGraph(State)
workflow.add_node("rewrite_query", rewrite_query)
workflow.add_node("analyzer_one", analyzer_one)
workflow.add_node("retriever_one", retriever_one)
workflow.add_node("retriever_two", retriever_two)
workflow.add_node("qa", qa)
workflow.set_entry_point("rewrite_query")
workflow.add_edge("rewrite_query", "analyzer_one")
workflow.add_edge("analyzer_one", "retriever_one")
workflow.add_edge("rewrite_query", "retriever_two")
workflow.add_edge(["retriever_one", "retriever_two"], "qa")
workflow.set_finish_point("qa")
# silly edge, to make sure having been triggered before doesn't break
# semantics of named barrier (== waiting edges)
workflow.add_edge("rewrite_query", "qa")
app = workflow.compile()
assert await app.ainvoke({"query": "what is weather in sf"}) == {
"query": "analyzed: query: what is weather in sf",
"docs": ["doc1", "doc2", "doc3", "doc4"],
"answer": "doc1,doc2,doc3,doc4",
}
assert [c async for c in app.astream({"query": "what is weather in sf"})] == [
{"rewrite_query": {"query": "query: what is weather in sf"}},
{"qa": {"answer": ""}},
{"analyzer_one": {"query": "analyzed: query: what is weather in sf"}},
{"retriever_two": {"docs": ["doc3", "doc4"]}},
{"retriever_one": {"docs": ["doc1", "doc2"]}},
{"qa": {"answer": "doc1,doc2,doc3,doc4"}},
]
async with awith_checkpointer(checkpointer_name) as checkpointer:
app_w_interrupt = workflow.compile(
checkpointer=checkpointer,
interrupt_after=["retriever_one"],
)
config = {"configurable": {"thread_id": "1"}}
assert [
c
async for c in app_w_interrupt.astream(
{"query": "what is weather in sf"}, config
)
] == [
{"rewrite_query": {"query": "query: what is weather in sf"}},
{"qa": {"answer": ""}},
{"analyzer_one": {"query": "analyzed: query: what is weather in sf"}},
{"retriever_two": {"docs": ["doc3", "doc4"]}},
{"retriever_one": {"docs": ["doc1", "doc2"]}},
{"__interrupt__": ()},
]
assert [c async for c in app_w_interrupt.astream(None, config)] == [
{"qa": {"answer": "doc1,doc2,doc3,doc4"}},
]
async def test_in_one_fan_out_state_graph_waiting_edge_multiple() -> None:
def sorted_add(
x: list[str], y: Union[list[str], list[tuple[str, str]]]
) -> list[str]:
if isinstance(y[0], tuple):
for rem, _ in y:
x.remove(rem)
y = [t[1] for t in y]
return sorted(operator.add(x, y))
class State(TypedDict, total=False):
query: str
answer: str
docs: Annotated[list[str], sorted_add]
async def rewrite_query(data: State) -> State:
return {"query": f'query: {data["query"]}'}
async def analyzer_one(data: State) -> State:
return {"query": f'analyzed: {data["query"]}'}
async def retriever_one(data: State) -> State:
return {"docs": ["doc1", "doc2"]}
async def retriever_two(data: State) -> State:
await asyncio.sleep(0.1)
return {"docs": ["doc3", "doc4"]}
async def qa(data: State) -> State:
return {"answer": ",".join(data["docs"])}
async def decider(data: State) -> None:
return None
def decider_cond(data: State) -> str:
if data["query"].count("analyzed") > 1:
return "qa"
else:
return "rewrite_query"
workflow = StateGraph(State)
workflow.add_node("rewrite_query", rewrite_query)
workflow.add_node("analyzer_one", analyzer_one)
workflow.add_node("retriever_one", retriever_one)
workflow.add_node("retriever_two", retriever_two)
workflow.add_node("decider", decider)
workflow.add_node("qa", qa)
workflow.set_entry_point("rewrite_query")
workflow.add_edge("rewrite_query", "analyzer_one")
workflow.add_edge("analyzer_one", "retriever_one")
workflow.add_edge("rewrite_query", "retriever_two")
workflow.add_edge(["retriever_one", "retriever_two"], "decider")
workflow.add_conditional_edges("decider", decider_cond)
workflow.set_finish_point("qa")
app = workflow.compile()
assert await app.ainvoke({"query": "what is weather in sf"}) == {
"query": "analyzed: query: analyzed: query: what is weather in sf",
"answer": "doc1,doc1,doc2,doc2,doc3,doc3,doc4,doc4",
"docs": ["doc1", "doc1", "doc2", "doc2", "doc3", "doc3", "doc4", "doc4"],
}
assert [c async for c in app.astream({"query": "what is weather in sf"})] == [
{"rewrite_query": {"query": "query: what is weather in sf"}},
{"analyzer_one": {"query": "analyzed: query: what is weather in sf"}},
{"retriever_two": {"docs": ["doc3", "doc4"]}},
{"retriever_one": {"docs": ["doc1", "doc2"]}},
{"decider": None},
{"rewrite_query": {"query": "query: analyzed: query: what is weather in sf"}},
{
"analyzer_one": {
"query": "analyzed: query: analyzed: query: what is weather in sf"
}
},
{"retriever_two": {"docs": ["doc3", "doc4"]}},
{"retriever_one": {"docs": ["doc1", "doc2"]}},
{"decider": None},
{"qa": {"answer": "doc1,doc1,doc2,doc2,doc3,doc3,doc4,doc4"}},
]
async def test_in_one_fan_out_state_graph_waiting_edge_multiple_cond_edge() -> None:
def sorted_add(
x: list[str], y: Union[list[str], list[tuple[str, str]]]
) -> list[str]:
if isinstance(y[0], tuple):
for rem, _ in y:
x.remove(rem)
y = [t[1] for t in y]
return sorted(operator.add(x, y))
class State(TypedDict, total=False):
query: str
answer: str
docs: Annotated[list[str], sorted_add]
async def rewrite_query(data: State) -> State:
return {"query": f'query: {data["query"]}'}
async def retriever_picker(data: State) -> list[str]:
return ["analyzer_one", "retriever_two"]
async def analyzer_one(data: State) -> State:
return {"query": f'analyzed: {data["query"]}'}
async def retriever_one(data: State) -> State:
return {"docs": ["doc1", "doc2"]}
async def retriever_two(data: State) -> State:
await asyncio.sleep(0.1)
return {"docs": ["doc3", "doc4"]}
async def qa(data: State) -> State:
return {"answer": ",".join(data["docs"])}
async def decider(data: State) -> None:
return None
def decider_cond(data: State) -> str:
if data["query"].count("analyzed") > 1:
return "qa"
else:
return "rewrite_query"
workflow = StateGraph(State)
workflow.add_node("rewrite_query", rewrite_query)
workflow.add_node("analyzer_one", analyzer_one)
workflow.add_node("retriever_one", retriever_one)
workflow.add_node("retriever_two", retriever_two)
workflow.add_node("decider", decider)
workflow.add_node("qa", qa)
workflow.set_entry_point("rewrite_query")
workflow.add_conditional_edges("rewrite_query", retriever_picker)
workflow.add_edge("analyzer_one", "retriever_one")
workflow.add_edge(["retriever_one", "retriever_two"], "decider")
workflow.add_conditional_edges("decider", decider_cond)
workflow.set_finish_point("qa")
app = workflow.compile()
assert await app.ainvoke({"query": "what is weather in sf"}) == {
"query": "analyzed: query: analyzed: query: what is weather in sf",
"answer": "doc1,doc1,doc2,doc2,doc3,doc3,doc4,doc4",
"docs": ["doc1", "doc1", "doc2", "doc2", "doc3", "doc3", "doc4", "doc4"],
}
assert [c async for c in app.astream({"query": "what is weather in sf"})] == [
{"rewrite_query": {"query": "query: what is weather in sf"}},
{"analyzer_one": {"query": "analyzed: query: what is weather in sf"}},
{"retriever_two": {"docs": ["doc3", "doc4"]}},
{"retriever_one": {"docs": ["doc1", "doc2"]}},
{"decider": None},
{"rewrite_query": {"query": "query: analyzed: query: what is weather in sf"}},
{
"analyzer_one": {
"query": "analyzed: query: analyzed: query: what is weather in sf"
}
},
{"retriever_two": {"docs": ["doc3", "doc4"]}},
{"retriever_one": {"docs": ["doc1", "doc2"]}},
{"decider": None},
{"qa": {"answer": "doc1,doc1,doc2,doc2,doc3,doc3,doc4,doc4"}},
]
async def test_nested_graph(snapshot: SnapshotAssertion) -> None:
def never_called_fn(state: Any):
assert 0, "This function should never be called"
never_called = RunnableLambda(never_called_fn)
class InnerState(TypedDict):
my_key: str
my_other_key: str
def up(state: InnerState):
return {"my_key": state["my_key"] + " there", "my_other_key": state["my_key"]}
inner = StateGraph(InnerState)
inner.add_node("up", up)
inner.set_entry_point("up")
inner.set_finish_point("up")
class State(TypedDict):
my_key: str
never_called: Any
async def side(state: State):
return {"my_key": state["my_key"] + " and back again"}
graph = StateGraph(State)
graph.add_node("inner", inner.compile())
graph.add_node("side", side)
graph.set_entry_point("inner")
graph.add_edge("inner", "side")
graph.set_finish_point("side")
app = graph.compile()
assert await app.ainvoke({"my_key": "my value", "never_called": never_called}) == {
"my_key": "my value there and back again",
"never_called": never_called,
}
assert [
chunk
async for chunk in app.astream(
{"my_key": "my value", "never_called": never_called}
)
] == [
{"inner": {"my_key": "my value there"}},
{"side": {"my_key": "my value there and back again"}},
]
assert [
chunk
async for chunk in app.astream(
{"my_key": "my value", "never_called": never_called}, stream_mode="values"
)
] == [
{"my_key": "my value", "never_called": never_called},
{"my_key": "my value there", "never_called": never_called},
{"my_key": "my value there and back again", "never_called": never_called},
]
times_called = 0
async for event in app.astream_events(
{"my_key": "my value", "never_called": never_called},
version="v2",
config={"run_id": UUID(int=0)},
stream_mode="values",
):
if event["event"] == "on_chain_end" and event["run_id"] == str(UUID(int=0)):
times_called += 1
assert event["data"] == {
"output": {
"my_key": "my value there and back again",
"never_called": never_called,
}
}
assert times_called == 1
times_called = 0
async for event in app.astream_events(
{"my_key": "my value", "never_called": never_called},
version="v2",
config={"run_id": UUID(int=0)},
):
if event["event"] == "on_chain_end" and event["run_id"] == str(UUID(int=0)):
times_called += 1
assert event["data"] == {
"output": {
"my_key": "my value there and back again",
"never_called": never_called,
}
}
assert times_called == 1
chain = app | RunnablePassthrough()
assert await chain.ainvoke(
{"my_key": "my value", "never_called": never_called}
) == {
"my_key": "my value there and back again",
"never_called": never_called,
}
assert [
chunk
async for chunk in chain.astream(
{"my_key": "my value", "never_called": never_called}
)
] == [
{"inner": {"my_key": "my value there"}},
{"side": {"my_key": "my value there and back again"}},
]
times_called = 0
async for event in chain.astream_events(
{"my_key": "my value", "never_called": never_called},
version="v2",
config={"run_id": UUID(int=0)},
):
if event["event"] == "on_chain_end" and event["run_id"] == str(UUID(int=0)):
times_called += 1
assert event["data"] == {
"output": [
{"inner": {"my_key": "my value there"}},
{"side": {"my_key": "my value there and back again"}},
]
}
assert times_called == 1
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC)
async def test_stream_subgraphs_during_execution(checkpointer_name: str) -> None:
class InnerState(TypedDict):
my_key: Annotated[str, operator.add]
my_other_key: str
async def inner_1(state: InnerState):
return {"my_key": "got here", "my_other_key": state["my_key"]}
async def inner_2(state: InnerState):
await asyncio.sleep(0.5)
return {
"my_key": " and there",
"my_other_key": state["my_key"],
}
inner = StateGraph(InnerState)
inner.add_node("inner_1", inner_1)
inner.add_node("inner_2", inner_2)
inner.add_edge("inner_1", "inner_2")
inner.set_entry_point("inner_1")
inner.set_finish_point("inner_2")
class State(TypedDict):
my_key: Annotated[str, operator.add]
async def outer_1(state: State):
await asyncio.sleep(0.2)
return {"my_key": " and parallel"}
async def outer_2(state: State):
return {"my_key": " and back again"}
graph = StateGraph(State)
graph.add_node("inner", inner.compile())
graph.add_node("outer_1", outer_1)
graph.add_node("outer_2", outer_2)
graph.add_edge(START, "inner")
graph.add_edge(START, "outer_1")
graph.add_edge(["inner", "outer_1"], "outer_2")
graph.add_edge("outer_2", END)
async with awith_checkpointer(checkpointer_name) as checkpointer:
app = graph.compile(checkpointer=checkpointer)
start = perf_counter()
chunks: list[tuple[float, Any]] = []
config = {"configurable": {"thread_id": "2"}}
async for c in app.astream({"my_key": ""}, config, subgraphs=True):
chunks.append((round(perf_counter() - start, 1), c))
for idx in range(len(chunks)):
elapsed, c = chunks[idx]
chunks[idx] = (round(elapsed - chunks[0][0], 1), c)
assert chunks == [
# arrives before "inner" finishes
(
FloatBetween(0.0, 0.1),
(
(AnyStr("inner:"),),
{"inner_1": {"my_key": "got here", "my_other_key": ""}},
),
),
(FloatBetween(0.2, 0.4), ((), {"outer_1": {"my_key": " and parallel"}})),
(
FloatBetween(0.5, 0.7),
(
(AnyStr("inner:"),),
{"inner_2": {"my_key": " and there", "my_other_key": "got here"}},
),
),
(FloatBetween(0.5, 0.7), ((), {"inner": {"my_key": "got here and there"}})),
(FloatBetween(0.5, 0.7), ((), {"outer_2": {"my_key": " and back again"}})),
]
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC)
async def test_stream_buffering_single_node(checkpointer_name: str) -> None:
class State(TypedDict):
my_key: Annotated[str, operator.add]
async def node(state: State, writer: StreamWriter):
writer("Before sleep")
await asyncio.sleep(0.2)
writer("After sleep")
return {"my_key": "got here"}
builder = StateGraph(State)
builder.add_node("node", node)
builder.add_edge(START, "node")
builder.add_edge("node", END)
async with awith_checkpointer(checkpointer_name) as checkpointer:
graph = builder.compile(checkpointer=checkpointer)
start = perf_counter()
chunks: list[tuple[float, Any]] = []
config = {"configurable": {"thread_id": "2"}}
async for c in graph.astream({"my_key": ""}, config, stream_mode="custom"):
chunks.append((round(perf_counter() - start, 1), c))
assert chunks == [
(FloatBetween(0.0, 0.1), "Before sleep"),
(FloatBetween(0.2, 0.3), "After sleep"),
]
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC)
async def test_nested_graph_interrupts_parallel(checkpointer_name: str) -> None:
class InnerState(TypedDict):
my_key: Annotated[str, operator.add]
my_other_key: str
async def inner_1(state: InnerState):
await asyncio.sleep(0.1)
return {"my_key": "got here", "my_other_key": state["my_key"]}
async def inner_2(state: InnerState):
return {
"my_key": " and there",
"my_other_key": state["my_key"],
}
inner = StateGraph(InnerState)
inner.add_node("inner_1", inner_1)
inner.add_node("inner_2", inner_2)
inner.add_edge("inner_1", "inner_2")
inner.set_entry_point("inner_1")
inner.set_finish_point("inner_2")
class State(TypedDict):
my_key: Annotated[str, operator.add]
async def outer_1(state: State):
return {"my_key": " and parallel"}
async def outer_2(state: State):
return {"my_key": " and back again"}
graph = StateGraph(State)
graph.add_node(
"inner",
inner.compile(interrupt_before=["inner_2"]),
)
graph.add_node("outer_1", outer_1)
graph.add_node("outer_2", outer_2)
graph.add_edge(START, "inner")
graph.add_edge(START, "outer_1")
graph.add_edge(["inner", "outer_1"], "outer_2")
graph.set_finish_point("outer_2")
async with awith_checkpointer(checkpointer_name) as checkpointer:
app = graph.compile(checkpointer=checkpointer)
# test invoke w/ nested interrupt
config = {"configurable": {"thread_id": "1"}}
assert await app.ainvoke({"my_key": ""}, config, debug=True) == {
"my_key": " and parallel",
}
assert await app.ainvoke(None, config, debug=True) == {
"my_key": "got here and there and parallel and back again",
}
# below combo of assertions is asserting two things
# - outer_1 finishes before inner interrupts (because we see its output in stream, which only happens after node finishes)
# - the writes of outer are persisted in 1st call and used in 2nd call, ie outer isn't called again (because we dont see outer_1 output again in 2nd stream)
# test stream updates w/ nested interrupt
config = {"configurable": {"thread_id": "2"}}
assert [
c async for c in app.astream({"my_key": ""}, config, subgraphs=True)
] == [
# we got to parallel node first
((), {"outer_1": {"my_key": " and parallel"}}),
(
(AnyStr("inner:"),),
{"inner_1": {"my_key": "got here", "my_other_key": ""}},
),
((), {"__interrupt__": ()}),
]
assert [c async for c in app.astream(None, config)] == [
{"outer_1": {"my_key": " and parallel"}, "__metadata__": {"cached": True}},
{"inner": {"my_key": "got here and there"}},
{"outer_2": {"my_key": " and back again"}},
]
# test stream values w/ nested interrupt
config = {"configurable": {"thread_id": "3"}}
assert [
c async for c in app.astream({"my_key": ""}, config, stream_mode="values")
] == [
{"my_key": ""},
{"my_key": " and parallel"},
]
assert [c async for c in app.astream(None, config, stream_mode="values")] == [
{"my_key": ""},
{"my_key": "got here and there and parallel"},
{"my_key": "got here and there and parallel and back again"},
]
# # test interrupts BEFORE the parallel node
app = graph.compile(checkpointer=checkpointer, interrupt_before=["outer_1"])
config = {"configurable": {"thread_id": "4"}}
assert [
c async for c in app.astream({"my_key": ""}, config, stream_mode="values")
] == [
{"my_key": ""},
]
# while we're waiting for the node w/ interrupt inside to finish
assert [c async for c in app.astream(None, config, stream_mode="values")] == [
{"my_key": ""},
{"my_key": " and parallel"},
]
assert [c async for c in app.astream(None, config, stream_mode="values")] == [
{"my_key": ""},
{"my_key": "got here and there and parallel"},
{"my_key": "got here and there and parallel and back again"},
]
# test interrupts AFTER the parallel node
app = graph.compile(checkpointer=checkpointer, interrupt_after=["outer_1"])
config = {"configurable": {"thread_id": "5"}}
assert [
c async for c in app.astream({"my_key": ""}, config, stream_mode="values")
] == [
{"my_key": ""},
{"my_key": " and parallel"},
]
assert [c async for c in app.astream(None, config, stream_mode="values")] == [
{"my_key": ""},
{"my_key": "got here and there and parallel"},
]
assert [c async for c in app.astream(None, config, stream_mode="values")] == [
{"my_key": "got here and there and parallel"},
{"my_key": "got here and there and parallel and back again"},
]
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC)
async def test_doubly_nested_graph_interrupts(checkpointer_name: str) -> None:
class State(TypedDict):
my_key: str
class ChildState(TypedDict):
my_key: str
class GrandChildState(TypedDict):
my_key: str
async def grandchild_1(state: ChildState):
return {"my_key": state["my_key"] + " here"}
async def grandchild_2(state: ChildState):
return {
"my_key": state["my_key"] + " and there",
}
grandchild = StateGraph(GrandChildState)
grandchild.add_node("grandchild_1", grandchild_1)
grandchild.add_node("grandchild_2", grandchild_2)
grandchild.add_edge("grandchild_1", "grandchild_2")
grandchild.set_entry_point("grandchild_1")
grandchild.set_finish_point("grandchild_2")
child = StateGraph(ChildState)
child.add_node(
"child_1",
grandchild.compile(interrupt_before=["grandchild_2"]),
)
child.set_entry_point("child_1")
child.set_finish_point("child_1")
async def parent_1(state: State):
return {"my_key": "hi " + state["my_key"]}
async def parent_2(state: State):
return {"my_key": state["my_key"] + " and back again"}
graph = StateGraph(State)
graph.add_node("parent_1", parent_1)
graph.add_node("child", child.compile())
graph.add_node("parent_2", parent_2)
graph.set_entry_point("parent_1")
graph.add_edge("parent_1", "child")
graph.add_edge("child", "parent_2")
graph.set_finish_point("parent_2")
async with awith_checkpointer(checkpointer_name) as checkpointer:
app = graph.compile(checkpointer=checkpointer)
# test invoke w/ nested interrupt
config = {"configurable": {"thread_id": "1"}}
assert await app.ainvoke({"my_key": "my value"}, config, debug=True) == {
"my_key": "hi my value",
}
assert await app.ainvoke(None, config, debug=True) == {
"my_key": "hi my value here and there and back again",
}
# test stream updates w/ nested interrupt
nodes: list[str] = []
config = {
"configurable": {"thread_id": "2", CONFIG_KEY_NODE_FINISHED: nodes.append}
}
assert [c async for c in app.astream({"my_key": "my value"}, config)] == [
{"parent_1": {"my_key": "hi my value"}},
{"__interrupt__": ()},
]
assert nodes == ["parent_1", "grandchild_1"]
assert [c async for c in app.astream(None, config)] == [
{"child": {"my_key": "hi my value here and there"}},
{"parent_2": {"my_key": "hi my value here and there and back again"}},
]
assert nodes == [
"parent_1",
"grandchild_1",
"grandchild_2",
"child_1",
"child",
"parent_2",
]
# test stream values w/ nested interrupt
config = {"configurable": {"thread_id": "3"}}
assert [
c
async for c in app.astream(
{"my_key": "my value"}, config, stream_mode="values"
)
] == [
{"my_key": "my value"},
{"my_key": "hi my value"},
]
assert [c async for c in app.astream(None, config, stream_mode="values")] == [
{"my_key": "hi my value"},
{"my_key": "hi my value here and there"},
{"my_key": "hi my value here and there and back again"},
]
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC)
async def test_nested_graph_state(checkpointer_name: str) -> None:
class InnerState(TypedDict):
my_key: str
my_other_key: str
def inner_1(state: InnerState):
return {
"my_key": state["my_key"] + " here",
"my_other_key": state["my_key"],
}
def inner_2(state: InnerState):
return {
"my_key": state["my_key"] + " and there",
"my_other_key": state["my_key"],
}
inner = StateGraph(InnerState)
inner.add_node("inner_1", inner_1)
inner.add_node("inner_2", inner_2)
inner.add_edge("inner_1", "inner_2")
inner.set_entry_point("inner_1")
inner.set_finish_point("inner_2")
class State(TypedDict):
my_key: str
other_parent_key: str
def outer_1(state: State):
return {"my_key": "hi " + state["my_key"]}
def outer_2(state: State):
return {"my_key": state["my_key"] + " and back again"}
graph = StateGraph(State)
graph.add_node("outer_1", outer_1)
graph.add_node(
"inner",
inner.compile(interrupt_before=["inner_2"]),
)
graph.add_node("outer_2", outer_2)
graph.set_entry_point("outer_1")
graph.add_edge("outer_1", "inner")
graph.add_edge("inner", "outer_2")
graph.set_finish_point("outer_2")
async with awith_checkpointer(checkpointer_name) as checkpointer:
app = graph.compile(checkpointer=checkpointer)
config = {"configurable": {"thread_id": "1"}}
await app.ainvoke({"my_key": "my value"}, config, debug=True)
# test state w/ nested subgraph state (right after interrupt)
# first get_state without subgraph state
assert await app.aget_state(config) == StateSnapshot(
values={"my_key": "hi my value"},
tasks=(
PregelTask(
AnyStr(),
"inner",
(PULL, "inner"),
state={
"configurable": {"thread_id": "1", "checkpoint_ns": AnyStr()}
},
),
),
next=("inner",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "loop",
"writes": {"outer_1": {"my_key": "hi my value"}},
"step": 1,
"thread_id": "1",
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
)
# now, get_state with subgraphs state
assert await app.aget_state(config, subgraphs=True) == StateSnapshot(
values={"my_key": "hi my value"},
tasks=(
PregelTask(
AnyStr(),
"inner",
(PULL, "inner"),
state=StateSnapshot(
values={
"my_key": "hi my value here",
"my_other_key": "hi my value",
},
tasks=(
PregelTask(
AnyStr(),
"inner_2",
(PULL, "inner_2"),
),
),
next=("inner_2",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr("inner:"),
"checkpoint_id": AnyStr(),
"checkpoint_map": AnyDict(
{"": AnyStr(), AnyStr("child:"): AnyStr()}
),
}
},
metadata={
"parents": {
"": AnyStr(),
},
"source": "loop",
"writes": {
"inner_1": {
"my_key": "hi my value here",
"my_other_key": "hi my value",
}
},
"step": 1,
"thread_id": "1",
"checkpoint_ns": AnyStr("inner:"),
"langgraph_node": "inner",
"langgraph_path": [PULL, "inner"],
"langgraph_step": 2,
"langgraph_triggers": ["outer_1"],
"langgraph_checkpoint_ns": AnyStr("inner:"),
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr("inner:"),
"checkpoint_id": AnyStr(),
"checkpoint_map": AnyDict(
{"": AnyStr(), AnyStr("child:"): AnyStr()}
),
}
},
),
),
),
next=("inner",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "loop",
"writes": {"outer_1": {"my_key": "hi my value"}},
"step": 1,
"thread_id": "1",
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
)
# get_state_history returns outer graph checkpoints
history = [c async for c in app.aget_state_history(config)]
assert history == [
StateSnapshot(
values={"my_key": "hi my value"},
tasks=(
PregelTask(
AnyStr(),
"inner",
(PULL, "inner"),
state={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr("inner:"),
}
},
),
),
next=("inner",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "loop",
"writes": {"outer_1": {"my_key": "hi my value"}},
"step": 1,
"thread_id": "1",
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
),
StateSnapshot(
values={"my_key": "my value"},
tasks=(
PregelTask(
AnyStr(),
"outer_1",
(PULL, "outer_1"),
result={"my_key": "hi my value"},
),
),
next=("outer_1",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "loop",
"writes": None,
"step": 0,
"thread_id": "1",
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
),
StateSnapshot(
values={},
tasks=(
PregelTask(
AnyStr(),
"__start__",
(PULL, "__start__"),
result={"my_key": "my value"},
),
),
next=("__start__",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "input",
"writes": {"__start__": {"my_key": "my value"}},
"step": -1,
"thread_id": "1",
},
created_at=AnyStr(),
parent_config=None,
),
]
# get_state_history for a subgraph returns its checkpoints
child_history = [
c async for c in app.aget_state_history(history[0].tasks[0].state)
]
assert child_history == [
StateSnapshot(
values={"my_key": "hi my value here", "my_other_key": "hi my value"},
next=("inner_2",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr("inner:"),
"checkpoint_id": AnyStr(),
"checkpoint_map": AnyDict(
{"": AnyStr(), AnyStr("child:"): AnyStr()}
),
}
},
metadata={
"source": "loop",
"writes": {
"inner_1": {
"my_key": "hi my value here",
"my_other_key": "hi my value",
}
},
"step": 1,
"parents": {"": AnyStr()},
"thread_id": "1",
"checkpoint_ns": AnyStr("inner:"),
"langgraph_node": "inner",
"langgraph_path": [PULL, "inner"],
"langgraph_step": 2,
"langgraph_triggers": ["outer_1"],
"langgraph_checkpoint_ns": AnyStr("inner:"),
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr("inner:"),
"checkpoint_id": AnyStr(),
"checkpoint_map": AnyDict(
{"": AnyStr(), AnyStr("child:"): AnyStr()}
),
}
},
tasks=(PregelTask(AnyStr(), "inner_2", (PULL, "inner_2")),),
),
StateSnapshot(
values={"my_key": "hi my value"},
next=("inner_1",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr("inner:"),
"checkpoint_id": AnyStr(),
"checkpoint_map": AnyDict(
{"": AnyStr(), AnyStr("child:"): AnyStr()}
),
}
},
metadata={
"source": "loop",
"writes": None,
"step": 0,
"parents": {"": AnyStr()},
"thread_id": "1",
"checkpoint_ns": AnyStr("inner:"),
"langgraph_node": "inner",
"langgraph_path": [PULL, "inner"],
"langgraph_step": 2,
"langgraph_triggers": ["outer_1"],
"langgraph_checkpoint_ns": AnyStr("inner:"),
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr("inner:"),
"checkpoint_id": AnyStr(),
"checkpoint_map": AnyDict(
{"": AnyStr(), AnyStr("child:"): AnyStr()}
),
}
},
tasks=(
PregelTask(
AnyStr(),
"inner_1",
(PULL, "inner_1"),
result={
"my_key": "hi my value here",
"my_other_key": "hi my value",
},
),
),
),
StateSnapshot(
values={},
next=("__start__",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr("inner:"),
"checkpoint_id": AnyStr(),
"checkpoint_map": AnyDict(
{"": AnyStr(), AnyStr("child:"): AnyStr()}
),
}
},
metadata={
"source": "input",
"writes": {"__start__": {"my_key": "hi my value"}},
"step": -1,
"parents": {"": AnyStr()},
"thread_id": "1",
"checkpoint_ns": AnyStr("inner:"),
"langgraph_node": "inner",
"langgraph_path": [PULL, "inner"],
"langgraph_step": 2,
"langgraph_triggers": ["outer_1"],
"langgraph_checkpoint_ns": AnyStr("inner:"),
},
created_at=AnyStr(),
parent_config=None,
tasks=(
PregelTask(
AnyStr(),
"__start__",
(PULL, "__start__"),
result={"my_key": "hi my value"},
),
),
),
]
# resume
await app.ainvoke(None, config, debug=True)
# test state w/ nested subgraph state (after resuming from interrupt)
assert await app.aget_state(config) == StateSnapshot(
values={"my_key": "hi my value here and there and back again"},
tasks=(),
next=(),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "loop",
"writes": {
"outer_2": {"my_key": "hi my value here and there and back again"}
},
"step": 3,
"thread_id": "1",
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
)
# test full history at the end
actual_history = [c async for c in app.aget_state_history(config)]
expected_history = [
StateSnapshot(
values={"my_key": "hi my value here and there and back again"},
tasks=(),
next=(),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "loop",
"writes": {
"outer_2": {
"my_key": "hi my value here and there and back again"
}
},
"step": 3,
"thread_id": "1",
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
),
StateSnapshot(
values={"my_key": "hi my value here and there"},
tasks=(
PregelTask(
AnyStr(),
"outer_2",
(PULL, "outer_2"),
result={"my_key": "hi my value here and there and back again"},
),
),
next=("outer_2",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "loop",
"writes": {"inner": {"my_key": "hi my value here and there"}},
"step": 2,
"thread_id": "1",
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
),
StateSnapshot(
values={"my_key": "hi my value"},
tasks=(
PregelTask(
AnyStr(),
"inner",
(PULL, "inner"),
state={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr(),
}
},
result={"my_key": "hi my value here and there"},
),
),
next=("inner",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "loop",
"writes": {"outer_1": {"my_key": "hi my value"}},
"step": 1,
"thread_id": "1",
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
),
StateSnapshot(
values={"my_key": "my value"},
tasks=(
PregelTask(
AnyStr(),
"outer_1",
(PULL, "outer_1"),
result={"my_key": "hi my value"},
),
),
next=("outer_1",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "loop",
"writes": None,
"step": 0,
"thread_id": "1",
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
),
StateSnapshot(
values={},
tasks=(
PregelTask(
AnyStr(),
"__start__",
(PULL, "__start__"),
result={"my_key": "my value"},
),
),
next=("__start__",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "input",
"writes": {"__start__": {"my_key": "my value"}},
"step": -1,
"thread_id": "1",
},
created_at=AnyStr(),
parent_config=None,
),
]
assert actual_history == expected_history
# test looking up parent state by checkpoint ID
for actual_snapshot, expected_snapshot in zip(actual_history, expected_history):
assert await app.aget_state(actual_snapshot.config) == expected_snapshot
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC)
async def test_doubly_nested_graph_state(checkpointer_name: str) -> None:
class State(TypedDict):
my_key: str
class ChildState(TypedDict):
my_key: str
class GrandChildState(TypedDict):
my_key: str
def grandchild_1(state: ChildState):
return {"my_key": state["my_key"] + " here"}
def grandchild_2(state: ChildState):
return {
"my_key": state["my_key"] + " and there",
}
grandchild = StateGraph(GrandChildState)
grandchild.add_node("grandchild_1", grandchild_1)
grandchild.add_node("grandchild_2", grandchild_2)
grandchild.add_edge("grandchild_1", "grandchild_2")
grandchild.set_entry_point("grandchild_1")
grandchild.set_finish_point("grandchild_2")
child = StateGraph(ChildState)
child.add_node(
"child_1",
grandchild.compile(interrupt_before=["grandchild_2"]),
)
child.set_entry_point("child_1")
child.set_finish_point("child_1")
def parent_1(state: State):
return {"my_key": "hi " + state["my_key"]}
def parent_2(state: State):
return {"my_key": state["my_key"] + " and back again"}
graph = StateGraph(State)
graph.add_node("parent_1", parent_1)
graph.add_node("child", child.compile())
graph.add_node("parent_2", parent_2)
graph.set_entry_point("parent_1")
graph.add_edge("parent_1", "child")
graph.add_edge("child", "parent_2")
graph.set_finish_point("parent_2")
async with awith_checkpointer(checkpointer_name) as checkpointer:
app = graph.compile(checkpointer=checkpointer)
# test invoke w/ nested interrupt
config = {"configurable": {"thread_id": "1"}}
assert [
c async for c in app.astream({"my_key": "my value"}, config, subgraphs=True)
] == [
((), {"parent_1": {"my_key": "hi my value"}}),
(
(AnyStr("child:"), AnyStr("child_1:")),
{"grandchild_1": {"my_key": "hi my value here"}},
),
((), {"__interrupt__": ()}),
]
# get state without subgraphs
outer_state = await app.aget_state(config)
assert outer_state == StateSnapshot(
values={"my_key": "hi my value"},
tasks=(
PregelTask(
AnyStr(),
"child",
(PULL, "child"),
state={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr("child"),
}
},
),
),
next=("child",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "loop",
"writes": {"parent_1": {"my_key": "hi my value"}},
"step": 1,
"thread_id": "1",
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
)
child_state = await app.aget_state(outer_state.tasks[0].state)
assert (
child_state.tasks[0]
== StateSnapshot(
values={"my_key": "hi my value"},
tasks=(
PregelTask(
AnyStr(),
"child_1",
(PULL, "child_1"),
state={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr(),
}
},
),
),
next=("child_1",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr("child:"),
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {"": AnyStr()},
"source": "loop",
"writes": None,
"step": 0,
"thread_id": "1",
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr("child:"),
"checkpoint_id": AnyStr(),
}
},
).tasks[0]
)
grandchild_state = await app.aget_state(child_state.tasks[0].state)
assert grandchild_state == StateSnapshot(
values={"my_key": "hi my value here"},
tasks=(
PregelTask(
AnyStr(),
"grandchild_2",
(PULL, "grandchild_2"),
),
),
next=("grandchild_2",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr(),
"checkpoint_id": AnyStr(),
"checkpoint_map": AnyDict(
{
"": AnyStr(),
AnyStr("child:"): AnyStr(),
AnyStr(re.compile(r"child:.+|child1:")): AnyStr(),
}
),
}
},
metadata={
"parents": AnyDict(
{
"": AnyStr(),
AnyStr("child:"): AnyStr(),
}
),
"source": "loop",
"writes": {"grandchild_1": {"my_key": "hi my value here"}},
"step": 1,
"thread_id": "1",
"checkpoint_ns": AnyStr("child:"),
"langgraph_checkpoint_ns": AnyStr("child:"),
"langgraph_node": "child_1",
"langgraph_path": [PULL, AnyStr("child_1")],
"langgraph_step": 1,
"langgraph_triggers": [AnyStr("start:child_1")],
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr(),
"checkpoint_id": AnyStr(),
"checkpoint_map": AnyDict(
{
"": AnyStr(),
AnyStr("child:"): AnyStr(),
AnyStr(re.compile(r"child:.+|child1:")): AnyStr(),
}
),
}
},
)
# get state with subgraphs
assert await app.aget_state(config, subgraphs=True) == StateSnapshot(
values={"my_key": "hi my value"},
tasks=(
PregelTask(
AnyStr(),
"child",
(PULL, "child"),
state=StateSnapshot(
values={"my_key": "hi my value"},
tasks=(
PregelTask(
AnyStr(),
"child_1",
(PULL, "child_1"),
state=StateSnapshot(
values={"my_key": "hi my value here"},
tasks=(
PregelTask(
AnyStr(),
"grandchild_2",
(PULL, "grandchild_2"),
),
),
next=("grandchild_2",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr(),
"checkpoint_id": AnyStr(),
"checkpoint_map": AnyDict(
{
"": AnyStr(),
AnyStr("child:"): AnyStr(),
AnyStr(
re.compile(r"child:.+|child1:")
): AnyStr(),
}
),
}
},
metadata={
"parents": AnyDict(
{
"": AnyStr(),
AnyStr("child:"): AnyStr(),
}
),
"source": "loop",
"writes": {
"grandchild_1": {
"my_key": "hi my value here"
}
},
"step": 1,
"thread_id": "1",
"checkpoint_ns": AnyStr("child:"),
"langgraph_checkpoint_ns": AnyStr("child:"),
"langgraph_node": "child_1",
"langgraph_path": [
PULL,
AnyStr("child_1"),
],
"langgraph_step": 1,
"langgraph_triggers": [AnyStr("start:child_1")],
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr(),
"checkpoint_id": AnyStr(),
"checkpoint_map": AnyDict(
{
"": AnyStr(),
AnyStr("child:"): AnyStr(),
AnyStr(
re.compile(r"child:.+|child1:")
): AnyStr(),
}
),
}
},
),
),
),
next=("child_1",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr("child:"),
"checkpoint_id": AnyStr(),
"checkpoint_map": AnyDict(
{"": AnyStr(), AnyStr("child:"): AnyStr()}
),
}
},
metadata={
"parents": {"": AnyStr()},
"source": "loop",
"writes": None,
"step": 0,
"thread_id": "1",
"checkpoint_ns": AnyStr("child:"),
"langgraph_node": "child",
"langgraph_path": [PULL, AnyStr("child")],
"langgraph_step": 2,
"langgraph_triggers": [AnyStr("parent_1")],
"langgraph_checkpoint_ns": AnyStr("child:"),
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr("child:"),
"checkpoint_id": AnyStr(),
"checkpoint_map": AnyDict(
{"": AnyStr(), AnyStr("child:"): AnyStr()}
),
}
},
),
),
),
next=("child",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "loop",
"writes": {"parent_1": {"my_key": "hi my value"}},
"step": 1,
"thread_id": "1",
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
)
# resume
assert [c async for c in app.astream(None, config, subgraphs=True)] == [
(
(AnyStr("child:"), AnyStr("child_1:")),
{"grandchild_2": {"my_key": "hi my value here and there"}},
),
(
(AnyStr("child:"),),
{"child_1": {"my_key": "hi my value here and there"}},
),
((), {"child": {"my_key": "hi my value here and there"}}),
((), {"parent_2": {"my_key": "hi my value here and there and back again"}}),
]
# get state with and without subgraphs
assert (
await app.aget_state(config)
== await app.aget_state(config, subgraphs=True)
== StateSnapshot(
values={"my_key": "hi my value here and there and back again"},
tasks=(),
next=(),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "loop",
"writes": {
"parent_2": {
"my_key": "hi my value here and there and back again"
}
},
"step": 3,
"thread_id": "1",
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
)
)
# get outer graph history
outer_history = [c async for c in app.aget_state_history(config)]
assert (
outer_history[0]
== [
StateSnapshot(
values={"my_key": "hi my value here and there and back again"},
tasks=(),
next=(),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "loop",
"writes": {
"parent_2": {
"my_key": "hi my value here and there and back again"
}
},
"step": 3,
"thread_id": "1",
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
),
StateSnapshot(
values={"my_key": "hi my value here and there"},
next=("parent_2",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "loop",
"writes": {"child": {"my_key": "hi my value here and there"}},
"step": 2,
"thread_id": "1",
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
tasks=(
PregelTask(
id=AnyStr(), name="parent_2", path=(PULL, "parent_2")
),
),
),
StateSnapshot(
values={"my_key": "hi my value"},
tasks=(
PregelTask(
AnyStr(),
"child",
(PULL, "child"),
state={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr("child"),
}
},
),
),
next=("child",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "loop",
"writes": {"parent_1": {"my_key": "hi my value"}},
"step": 1,
"thread_id": "1",
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
),
StateSnapshot(
values={"my_key": "my value"},
next=("parent_1",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "loop",
"writes": None,
"step": 0,
"thread_id": "1",
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
tasks=(
PregelTask(
id=AnyStr(), name="parent_1", path=(PULL, "parent_1")
),
),
),
StateSnapshot(
values={},
next=("__start__",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "input",
"writes": {"my_key": "my value"},
"step": -1,
"thread_id": "1",
},
created_at=AnyStr(),
parent_config=None,
tasks=(
PregelTask(
id=AnyStr(), name="__start__", path=(PULL, "__start__")
),
),
),
][0]
)
# get child graph history
child_history = [
c async for c in app.aget_state_history(outer_history[2].tasks[0].state)
]
assert child_history == [
StateSnapshot(
values={"my_key": "hi my value here and there"},
next=(),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr("child:"),
"checkpoint_id": AnyStr(),
"checkpoint_map": AnyDict(
{"": AnyStr(), AnyStr("child:"): AnyStr()}
),
}
},
metadata={
"source": "loop",
"writes": {"child_1": {"my_key": "hi my value here and there"}},
"step": 1,
"parents": {"": AnyStr()},
"thread_id": "1",
"checkpoint_ns": AnyStr("child:"),
"langgraph_node": "child",
"langgraph_path": [PULL, AnyStr("child")],
"langgraph_step": 2,
"langgraph_triggers": [AnyStr("parent_1")],
"langgraph_checkpoint_ns": AnyStr("child:"),
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr("child:"),
"checkpoint_id": AnyStr(),
"checkpoint_map": AnyDict(
{"": AnyStr(), AnyStr("child:"): AnyStr()}
),
}
},
tasks=(),
),
StateSnapshot(
values={"my_key": "hi my value"},
next=("child_1",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr("child:"),
"checkpoint_id": AnyStr(),
"checkpoint_map": AnyDict(
{"": AnyStr(), AnyStr("child:"): AnyStr()}
),
}
},
metadata={
"source": "loop",
"writes": None,
"step": 0,
"parents": {"": AnyStr()},
"thread_id": "1",
"checkpoint_ns": AnyStr("child:"),
"langgraph_node": "child",
"langgraph_path": [PULL, AnyStr("child")],
"langgraph_step": 2,
"langgraph_triggers": [AnyStr("parent_1")],
"langgraph_checkpoint_ns": AnyStr("child:"),
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr("child:"),
"checkpoint_id": AnyStr(),
"checkpoint_map": AnyDict(
{"": AnyStr(), AnyStr("child:"): AnyStr()}
),
}
},
tasks=(
PregelTask(
id=AnyStr(),
name="child_1",
path=(PULL, "child_1"),
state={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr("child:"),
}
},
result={"my_key": "hi my value here and there"},
),
),
),
StateSnapshot(
values={},
next=("__start__",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr("child:"),
"checkpoint_id": AnyStr(),
"checkpoint_map": AnyDict(
{"": AnyStr(), AnyStr("child:"): AnyStr()}
),
}
},
metadata={
"source": "input",
"writes": {"__start__": {"my_key": "hi my value"}},
"step": -1,
"parents": {"": AnyStr()},
"thread_id": "1",
"checkpoint_ns": AnyStr("child:"),
"langgraph_node": "child",
"langgraph_path": [PULL, AnyStr("child")],
"langgraph_step": 2,
"langgraph_triggers": [AnyStr("parent_1")],
"langgraph_checkpoint_ns": AnyStr("child:"),
},
created_at=AnyStr(),
parent_config=None,
tasks=(
PregelTask(
id=AnyStr(),
name="__start__",
path=(PULL, "__start__"),
result={"my_key": "hi my value"},
),
),
),
]
# get grandchild graph history
grandchild_history = [
c async for c in app.aget_state_history(child_history[1].tasks[0].state)
]
assert grandchild_history == [
StateSnapshot(
values={"my_key": "hi my value here and there"},
next=(),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr(),
"checkpoint_id": AnyStr(),
"checkpoint_map": AnyDict(
{
"": AnyStr(),
AnyStr("child:"): AnyStr(),
AnyStr(re.compile(r"child:.+|child1:")): AnyStr(),
}
),
}
},
metadata={
"source": "loop",
"writes": {
"grandchild_2": {"my_key": "hi my value here and there"}
},
"step": 2,
"parents": AnyDict(
{
"": AnyStr(),
AnyStr("child:"): AnyStr(),
}
),
"thread_id": "1",
"checkpoint_ns": AnyStr("child:"),
"langgraph_checkpoint_ns": AnyStr("child:"),
"langgraph_node": "child_1",
"langgraph_path": [
PULL,
AnyStr("child_1"),
],
"langgraph_step": 1,
"langgraph_triggers": [AnyStr("start:child_1")],
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr(),
"checkpoint_id": AnyStr(),
"checkpoint_map": AnyDict(
{
"": AnyStr(),
AnyStr("child:"): AnyStr(),
AnyStr(re.compile(r"child:.+|child1:")): AnyStr(),
}
),
}
},
tasks=(),
),
StateSnapshot(
values={"my_key": "hi my value here"},
next=("grandchild_2",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr(),
"checkpoint_id": AnyStr(),
"checkpoint_map": AnyDict(
{
"": AnyStr(),
AnyStr("child:"): AnyStr(),
AnyStr(re.compile(r"child:.+|child1:")): AnyStr(),
}
),
}
},
metadata={
"source": "loop",
"writes": {"grandchild_1": {"my_key": "hi my value here"}},
"step": 1,
"parents": AnyDict(
{
"": AnyStr(),
AnyStr("child:"): AnyStr(),
}
),
"thread_id": "1",
"checkpoint_ns": AnyStr("child:"),
"langgraph_checkpoint_ns": AnyStr("child:"),
"langgraph_node": "child_1",
"langgraph_path": [
PULL,
AnyStr("child_1"),
],
"langgraph_step": 1,
"langgraph_triggers": [AnyStr("start:child_1")],
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr(),
"checkpoint_id": AnyStr(),
"checkpoint_map": AnyDict(
{
"": AnyStr(),
AnyStr("child:"): AnyStr(),
AnyStr(re.compile(r"child:.+|child1:")): AnyStr(),
}
),
}
},
tasks=(
PregelTask(
id=AnyStr(),
name="grandchild_2",
path=(PULL, "grandchild_2"),
result={"my_key": "hi my value here and there"},
),
),
),
StateSnapshot(
values={"my_key": "hi my value"},
next=("grandchild_1",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr(),
"checkpoint_id": AnyStr(),
"checkpoint_map": AnyDict(
{
"": AnyStr(),
AnyStr("child:"): AnyStr(),
AnyStr(re.compile(r"child:.+|child1:")): AnyStr(),
}
),
}
},
metadata={
"source": "loop",
"writes": None,
"step": 0,
"parents": AnyDict(
{
"": AnyStr(),
AnyStr("child:"): AnyStr(),
}
),
"thread_id": "1",
"checkpoint_ns": AnyStr("child:"),
"langgraph_checkpoint_ns": AnyStr("child:"),
"langgraph_node": "child_1",
"langgraph_path": [
PULL,
AnyStr("child_1"),
],
"langgraph_step": 1,
"langgraph_triggers": [AnyStr("start:child_1")],
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr(),
"checkpoint_id": AnyStr(),
"checkpoint_map": AnyDict(
{
"": AnyStr(),
AnyStr("child:"): AnyStr(),
AnyStr(re.compile(r"child:.+|child1:")): AnyStr(),
}
),
}
},
tasks=(
PregelTask(
id=AnyStr(),
name="grandchild_1",
path=(PULL, "grandchild_1"),
result={"my_key": "hi my value here"},
),
),
),
StateSnapshot(
values={},
next=("__start__",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr(),
"checkpoint_id": AnyStr(),
"checkpoint_map": AnyDict(
{
"": AnyStr(),
AnyStr("child:"): AnyStr(),
AnyStr(re.compile(r"child:.+|child1:")): AnyStr(),
}
),
}
},
metadata={
"source": "input",
"writes": {"__start__": {"my_key": "hi my value"}},
"step": -1,
"parents": AnyDict(
{
"": AnyStr(),
AnyStr("child:"): AnyStr(),
}
),
"thread_id": "1",
"checkpoint_ns": AnyStr("child:"),
"langgraph_checkpoint_ns": AnyStr("child:"),
"langgraph_node": "child_1",
"langgraph_path": [
PULL,
AnyStr("child_1"),
],
"langgraph_step": 1,
"langgraph_triggers": [AnyStr("start:child_1")],
},
created_at=AnyStr(),
parent_config=None,
tasks=(
PregelTask(
id=AnyStr(),
name="__start__",
path=(PULL, "__start__"),
result={"my_key": "hi my value"},
),
),
),
]
# replay grandchild checkpoint
assert [
c
async for c in app.astream(
None, grandchild_history[2].config, subgraphs=True
)
] == [
(
(AnyStr("child:"), AnyStr("child_1:")),
{"grandchild_1": {"my_key": "hi my value here"}},
),
((), {"__interrupt__": ()}),
]
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC)
async def test_send_to_nested_graphs(checkpointer_name: str) -> None:
class OverallState(TypedDict):
subjects: list[str]
jokes: Annotated[list[str], operator.add]
async def continue_to_jokes(state: OverallState):
return [Send("generate_joke", {"subject": s}) for s in state["subjects"]]
class JokeState(TypedDict):
subject: str
async def edit(state: JokeState):
subject = state["subject"]
return {"subject": f"{subject} - hohoho"}
# subgraph
subgraph = StateGraph(JokeState, output=OverallState)
subgraph.add_node("edit", edit)
subgraph.add_node(
"generate", lambda state: {"jokes": [f"Joke about {state['subject']}"]}
)
subgraph.set_entry_point("edit")
subgraph.add_edge("edit", "generate")
subgraph.set_finish_point("generate")
# parent graph
builder = StateGraph(OverallState)
builder.add_node(
"generate_joke",
subgraph.compile(interrupt_before=["generate"]),
)
builder.add_conditional_edges(START, continue_to_jokes)
builder.add_edge("generate_joke", END)
async with awith_checkpointer(checkpointer_name) as checkpointer:
graph = builder.compile(checkpointer=checkpointer)
config = {"configurable": {"thread_id": "1"}}
tracer = FakeTracer()
# invoke and pause at nested interrupt
assert await graph.ainvoke(
{"subjects": ["cats", "dogs"]},
config={**config, "callbacks": [tracer]},
) == {
"subjects": ["cats", "dogs"],
"jokes": [],
}
assert len(tracer.runs) == 1, "Should produce exactly 1 root run"
# check state
outer_state = await graph.aget_state(config)
if not FF_SEND_V2:
# update state of dogs joke graph
await graph.aupdate_state(
outer_state.tasks[1].state, {"subject": "turtles - hohoho"}
)
# continue past interrupt
assert await graph.ainvoke(None, config=config) == {
"subjects": ["cats", "dogs"],
"jokes": ["Joke about cats - hohoho", "Joke about turtles - hohoho"],
}
return
assert outer_state == StateSnapshot(
values={"subjects": ["cats", "dogs"], "jokes": []},
tasks=(
PregelTask(
id=AnyStr(),
name="__start__",
path=("__pregel_pull", "__start__"),
error=None,
interrupts=(),
state=None,
result={"subjects": ["cats", "dogs"]},
),
PregelTask(
AnyStr(),
"generate_joke",
(PUSH, ("__pregel_pull", "__start__"), 1, AnyStr()),
state={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr("generate_joke:"),
}
},
),
PregelTask(
AnyStr(),
"generate_joke",
(PUSH, ("__pregel_pull", "__start__"), 2, AnyStr()),
state={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr("generate_joke:"),
}
},
),
),
next=("generate_joke", "generate_joke"),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "input",
"writes": {
"__start__": {
"subjects": [
"cats",
"dogs",
],
}
},
"step": -1,
"thread_id": "1",
},
created_at=AnyStr(),
parent_config=None,
)
# update state of dogs joke graph
await graph.aupdate_state(
outer_state.tasks[2].state, {"subject": "turtles - hohoho"}
)
# continue past interrupt
assert await graph.ainvoke(None, config=config) == {
"subjects": ["cats", "dogs"],
"jokes": ["Joke about cats - hohoho", "Joke about turtles - hohoho"],
}
actual_snapshot = await graph.aget_state(config)
expected_snapshot = StateSnapshot(
values={
"subjects": ["cats", "dogs"],
"jokes": ["Joke about cats - hohoho", "Joke about turtles - hohoho"],
},
tasks=(),
next=(),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "loop",
"writes": {
"generate_joke": [
{"jokes": ["Joke about cats - hohoho"]},
{"jokes": ["Joke about turtles - hohoho"]},
]
},
"step": 0,
"thread_id": "1",
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
)
assert actual_snapshot == expected_snapshot
# test full history
actual_history = [c async for c in graph.aget_state_history(config)]
expected_history = [
StateSnapshot(
values={
"subjects": ["cats", "dogs"],
"jokes": [
"Joke about cats - hohoho",
"Joke about turtles - hohoho",
],
},
tasks=(),
next=(),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "loop",
"writes": {
"generate_joke": [
{"jokes": ["Joke about cats - hohoho"]},
{"jokes": ["Joke about turtles - hohoho"]},
]
},
"step": 0,
"thread_id": "1",
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
),
StateSnapshot(
values={"jokes": []},
next=("__start__", "generate_joke", "generate_joke"),
tasks=(
PregelTask(
id=AnyStr(),
name="__start__",
path=("__pregel_pull", "__start__"),
error=None,
interrupts=(),
state=None,
result={"subjects": ["cats", "dogs"]},
),
PregelTask(
AnyStr(),
"generate_joke",
(PUSH, ("__pregel_pull", "__start__"), 1, AnyStr()),
state={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr("generate_joke:"),
}
},
result={"jokes": ["Joke about cats - hohoho"]},
),
PregelTask(
AnyStr(),
"generate_joke",
(PUSH, ("__pregel_pull", "__start__"), 2, AnyStr()),
state={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr("generate_joke:"),
}
},
result={"jokes": ["Joke about turtles - hohoho"]},
),
),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"parents": {},
"source": "input",
"writes": {"__start__": {"subjects": ["cats", "dogs"]}},
"step": -1,
"thread_id": "1",
},
created_at=AnyStr(),
parent_config=None,
),
]
assert actual_history == expected_history
@pytest.mark.skipif(
sys.version_info < (3, 11),
reason="Python 3.11+ is required for async contextvars support",
)
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC)
async def test_weather_subgraph(
checkpointer_name: str, snapshot: SnapshotAssertion
) -> None:
from langchain_core.language_models.fake_chat_models import (
FakeMessagesListChatModel,
)
from langchain_core.messages import AIMessage, ToolCall
from langchain_core.tools import tool
from langgraph.graph import MessagesState
# setup subgraph
@tool
def get_weather(city: str):
"""Get the weather for a specific city"""
return f"I'ts sunny in {city}!"
weather_model = FakeMessagesListChatModel(
responses=[
AIMessage(
content="",
tool_calls=[
ToolCall(
id="tool_call123",
name="get_weather",
args={"city": "San Francisco"},
)
],
)
]
)
class SubGraphState(MessagesState):
city: str
def model_node(state: SubGraphState, writer: StreamWriter):
writer(" very")
result = weather_model.invoke(state["messages"])
return {"city": cast(AIMessage, result).tool_calls[0]["args"]["city"]}
def weather_node(state: SubGraphState, writer: StreamWriter):
writer(" good")
result = get_weather.invoke({"city": state["city"]})
return {"messages": [{"role": "assistant", "content": result}]}
subgraph = StateGraph(SubGraphState)
subgraph.add_node(model_node)
subgraph.add_node(weather_node)
subgraph.add_edge(START, "model_node")
subgraph.add_edge("model_node", "weather_node")
subgraph.add_edge("weather_node", END)
subgraph = subgraph.compile(interrupt_before=["weather_node"])
# setup main graph
class RouterState(MessagesState):
route: Literal["weather", "other"]
class Router(TypedDict):
route: Literal["weather", "other"]
router_model = FakeMessagesListChatModel(
responses=[
AIMessage(
content="",
tool_calls=[
ToolCall(
id="tool_call123",
name="router",
args={"dest": "weather"},
)
],
)
]
)
def router_node(state: RouterState, writer: StreamWriter):
writer("I'm")
system_message = "Classify the incoming query as either about weather or not."
messages = [{"role": "system", "content": system_message}] + state["messages"]
route = router_model.invoke(messages)
return {"route": cast(AIMessage, route).tool_calls[0]["args"]["dest"]}
def normal_llm_node(state: RouterState):
return {"messages": [AIMessage("Hello!")]}
def route_after_prediction(state: RouterState):
if state["route"] == "weather":
return "weather_graph"
else:
return "normal_llm_node"
def weather_graph(state: RouterState):
# this tests that all async checkpointers tested also implement sync methods
# as the subgraph called with sync invoke will use sync checkpointer methods
return subgraph.invoke(state)
graph = StateGraph(RouterState)
graph.add_node(router_node)
graph.add_node(normal_llm_node)
graph.add_node("weather_graph", weather_graph)
graph.add_edge(START, "router_node")
graph.add_conditional_edges("router_node", route_after_prediction)
graph.add_edge("normal_llm_node", END)
graph.add_edge("weather_graph", END)
def get_first_in_list():
return [*graph.get_state_history(config, limit=1)][0]
async with awith_checkpointer(checkpointer_name) as checkpointer:
graph = graph.compile(checkpointer=checkpointer)
assert graph.get_graph(xray=1).draw_mermaid() == snapshot
config = {"configurable": {"thread_id": "1"}}
thread2 = {"configurable": {"thread_id": "2"}}
inputs = {"messages": [{"role": "user", "content": "what's the weather in sf"}]}
# run with custom output
assert [
c async for c in graph.astream(inputs, thread2, stream_mode="custom")
] == [
"I'm",
" very",
]
assert [
c async for c in graph.astream(None, thread2, stream_mode="custom")
] == [
" good",
]
# run until interrupt
assert [
c
async for c in graph.astream(
inputs, config=config, stream_mode="updates", subgraphs=True
)
] == [
((), {"router_node": {"route": "weather"}}),
((AnyStr("weather_graph:"),), {"model_node": {"city": "San Francisco"}}),
((), {"__interrupt__": ()}),
]
# check current state
state = await graph.aget_state(config)
assert state == StateSnapshot(
values={
"messages": [_AnyIdHumanMessage(content="what's the weather in sf")],
"route": "weather",
},
next=("weather_graph",),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"source": "loop",
"writes": {"router_node": {"route": "weather"}},
"step": 1,
"parents": {},
"thread_id": "1",
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
tasks=(
PregelTask(
id=AnyStr(),
name="weather_graph",
path=(PULL, "weather_graph"),
state={
"configurable": {
"thread_id": "1",
"checkpoint_ns": AnyStr("weather_graph:"),
}
},
),
),
)
# confirm that list() delegates to alist() correctly
assert await asyncio.to_thread(get_first_in_list) == state
# update
await graph.aupdate_state(state.tasks[0].state, {"city": "la"})
# run after update
assert [
c
async for c in graph.astream(
None, config=config, stream_mode="updates", subgraphs=True
)
] == [
(
(AnyStr("weather_graph:"),),
{
"weather_node": {
"messages": [
{"role": "assistant", "content": "I'ts sunny in la!"}
]
}
},
),
(
(),
{
"weather_graph": {
"messages": [
_AnyIdHumanMessage(content="what's the weather in sf"),
_AnyIdAIMessage(content="I'ts sunny in la!"),
]
}
},
),
]
# try updating acting as weather node
config = {"configurable": {"thread_id": "14"}}
inputs = {"messages": [{"role": "user", "content": "what's the weather in sf"}]}
assert [
c
async for c in graph.astream(
inputs, config=config, stream_mode="updates", subgraphs=True
)
] == [
((), {"router_node": {"route": "weather"}}),
((AnyStr("weather_graph:"),), {"model_node": {"city": "San Francisco"}}),
((), {"__interrupt__": ()}),
]
state = await graph.aget_state(config, subgraphs=True)
assert state == StateSnapshot(
values={
"messages": [_AnyIdHumanMessage(content="what's the weather in sf")],
"route": "weather",
},
next=("weather_graph",),
config={
"configurable": {
"thread_id": "14",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"source": "loop",
"writes": {"router_node": {"route": "weather"}},
"step": 1,
"parents": {},
"thread_id": "14",
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "14",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
tasks=(
PregelTask(
id=AnyStr(),
name="weather_graph",
path=(PULL, "weather_graph"),
state=StateSnapshot(
values={
"messages": [
_AnyIdHumanMessage(content="what's the weather in sf")
],
"city": "San Francisco",
},
next=("weather_node",),
config={
"configurable": {
"thread_id": "14",
"checkpoint_ns": AnyStr("weather_graph:"),
"checkpoint_id": AnyStr(),
"checkpoint_map": AnyDict(
{
"": AnyStr(),
AnyStr("weather_graph:"): AnyStr(),
}
),
}
},
metadata={
"source": "loop",
"writes": {"model_node": {"city": "San Francisco"}},
"step": 1,
"parents": {"": AnyStr()},
"thread_id": "14",
"checkpoint_ns": AnyStr("weather_graph:"),
"langgraph_node": "weather_graph",
"langgraph_path": [PULL, "weather_graph"],
"langgraph_step": 2,
"langgraph_triggers": [
"branch:router_node:route_after_prediction:weather_graph"
],
"langgraph_checkpoint_ns": AnyStr("weather_graph:"),
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "14",
"checkpoint_ns": AnyStr("weather_graph:"),
"checkpoint_id": AnyStr(),
"checkpoint_map": AnyDict(
{
"": AnyStr(),
AnyStr("weather_graph:"): AnyStr(),
}
),
}
},
tasks=(
PregelTask(
id=AnyStr(),
name="weather_node",
path=(PULL, "weather_node"),
),
),
),
),
),
)
await graph.aupdate_state(
state.tasks[0].state.config,
{"messages": [{"role": "assistant", "content": "rainy"}]},
as_node="weather_node",
)
state = await graph.aget_state(config, subgraphs=True)
assert state == StateSnapshot(
values={
"messages": [_AnyIdHumanMessage(content="what's the weather in sf")],
"route": "weather",
},
next=("weather_graph",),
config={
"configurable": {
"thread_id": "14",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"source": "loop",
"writes": {"router_node": {"route": "weather"}},
"step": 1,
"parents": {},
"thread_id": "14",
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "14",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
tasks=(
PregelTask(
id=AnyStr(),
name="weather_graph",
path=(PULL, "weather_graph"),
state=StateSnapshot(
values={
"messages": [
_AnyIdHumanMessage(content="what's the weather in sf"),
_AnyIdAIMessage(content="rainy"),
],
"city": "San Francisco",
},
next=(),
config={
"configurable": {
"thread_id": "14",
"checkpoint_ns": AnyStr("weather_graph:"),
"checkpoint_id": AnyStr(),
"checkpoint_map": AnyDict(
{
"": AnyStr(),
AnyStr("weather_graph:"): AnyStr(),
}
),
}
},
metadata={
"step": 2,
"source": "update",
"writes": {
"weather_node": {
"messages": [
{"role": "assistant", "content": "rainy"}
]
}
},
"parents": {"": AnyStr()},
"thread_id": "14",
"checkpoint_id": AnyStr(),
"checkpoint_ns": AnyStr("weather_graph:"),
"langgraph_node": "weather_graph",
"langgraph_path": [PULL, "weather_graph"],
"langgraph_step": 2,
"langgraph_triggers": [
"branch:router_node:route_after_prediction:weather_graph"
],
"langgraph_checkpoint_ns": AnyStr("weather_graph:"),
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "14",
"checkpoint_ns": AnyStr("weather_graph:"),
"checkpoint_id": AnyStr(),
"checkpoint_map": AnyDict(
{
"": AnyStr(),
AnyStr("weather_graph:"): AnyStr(),
}
),
}
},
tasks=(),
),
),
),
)
assert [
c
async for c in graph.astream(
None, config=config, stream_mode="updates", subgraphs=True
)
] == [
(
(),
{
"weather_graph": {
"messages": [
_AnyIdHumanMessage(content="what's the weather in sf"),
_AnyIdAIMessage(content="rainy"),
]
}
},
),
]
async def test_checkpoint_metadata() -> None:
"""This test verifies that a run's configurable fields are merged with the
previous checkpoint config for each step in the run.
"""
# set up test
from langchain_core.language_models.fake_chat_models import (
FakeMessagesListChatModel,
)
from langchain_core.messages import AIMessage, AnyMessage
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.tools import tool
# graph state
class BaseState(TypedDict):
messages: Annotated[list[AnyMessage], add_messages]
# initialize graph nodes
@tool()
def search_api(query: str) -> str:
"""Searches the API for the query."""
return f"result for {query}"
tools = [search_api]
prompt = ChatPromptTemplate.from_messages(
[
("system", "You are a nice assistant."),
("placeholder", "{messages}"),
]
)
model = FakeMessagesListChatModel(
responses=[
AIMessage(
content="",
tool_calls=[
{
"id": "tool_call123",
"name": "search_api",
"args": {"query": "query"},
},
],
),
AIMessage(content="answer"),
]
)
def agent(state: BaseState, config: RunnableConfig) -> BaseState:
formatted = prompt.invoke(state)
response = model.invoke(formatted)
return {"messages": response}
def should_continue(data: BaseState) -> str:
# Logic to decide whether to continue in the loop or exit
if not data["messages"][-1].tool_calls:
return "exit"
else:
return "continue"
# define graphs w/ and w/o interrupt
workflow = StateGraph(BaseState)
workflow.add_node("agent", agent)
workflow.add_node("tools", ToolNode(tools))
workflow.set_entry_point("agent")
workflow.add_conditional_edges(
"agent", should_continue, {"continue": "tools", "exit": END}
)
workflow.add_edge("tools", "agent")
# graph w/o interrupt
checkpointer_1 = MemorySaverAssertCheckpointMetadata()
app = workflow.compile(checkpointer=checkpointer_1)
# graph w/ interrupt
checkpointer_2 = MemorySaverAssertCheckpointMetadata()
app_w_interrupt = workflow.compile(
checkpointer=checkpointer_2, interrupt_before=["tools"]
)
# assertions
# invoke graph w/o interrupt
await app.ainvoke(
{"messages": ["what is weather in sf"]},
{
"configurable": {
"thread_id": "1",
"test_config_1": "foo",
"test_config_2": "bar",
},
},
)
config = {"configurable": {"thread_id": "1"}}
# assert that checkpoint metadata contains the run's configurable fields
chkpnt_metadata_1 = (await checkpointer_1.aget_tuple(config)).metadata
assert chkpnt_metadata_1["thread_id"] == "1"
assert chkpnt_metadata_1["test_config_1"] == "foo"
assert chkpnt_metadata_1["test_config_2"] == "bar"
# Verify that all checkpoint metadata have the expected keys. This check
# is needed because a run may have an arbitrary number of steps depending
# on how the graph is constructed.
chkpnt_tuples_1 = checkpointer_1.alist(config)
async for chkpnt_tuple in chkpnt_tuples_1:
assert chkpnt_tuple.metadata["thread_id"] == "1"
assert chkpnt_tuple.metadata["test_config_1"] == "foo"
assert chkpnt_tuple.metadata["test_config_2"] == "bar"
# invoke graph, but interrupt before tool call
await app_w_interrupt.ainvoke(
{"messages": ["what is weather in sf"]},
{
"configurable": {
"thread_id": "2",
"test_config_3": "foo",
"test_config_4": "bar",
},
},
)
config = {"configurable": {"thread_id": "2"}}
# assert that checkpoint metadata contains the run's configurable fields
chkpnt_metadata_2 = (await checkpointer_2.aget_tuple(config)).metadata
assert chkpnt_metadata_2["thread_id"] == "2"
assert chkpnt_metadata_2["test_config_3"] == "foo"
assert chkpnt_metadata_2["test_config_4"] == "bar"
# resume graph execution
await app_w_interrupt.ainvoke(
input=None,
config={
"configurable": {
"thread_id": "2",
"test_config_3": "foo",
"test_config_4": "bar",
}
},
)
# assert that checkpoint metadata contains the run's configurable fields
chkpnt_metadata_3 = (await checkpointer_2.aget_tuple(config)).metadata
assert chkpnt_metadata_3["thread_id"] == "2"
assert chkpnt_metadata_3["test_config_3"] == "foo"
assert chkpnt_metadata_3["test_config_4"] == "bar"
# Verify that all checkpoint metadata have the expected keys. This check
# is needed because a run may have an arbitrary number of steps depending
# on how the graph is constructed.
chkpnt_tuples_2 = checkpointer_2.alist(config)
async for chkpnt_tuple in chkpnt_tuples_2:
assert chkpnt_tuple.metadata["thread_id"] == "2"
assert chkpnt_tuple.metadata["test_config_3"] == "foo"
assert chkpnt_tuple.metadata["test_config_4"] == "bar"
async def test_checkpointer_null_pending_writes() -> None:
class Node:
def __init__(self, name: str):
self.name = name
setattr(self, "__name__", name)
def __call__(self, state):
return [self.name]
builder = StateGraph(Annotated[list, operator.add])
builder.add_node(Node("1"))
builder.add_edge(START, "1")
graph = builder.compile(checkpointer=MemorySaverNoPending())
assert graph.invoke([], {"configurable": {"thread_id": "foo"}}) == ["1"]
assert graph.invoke([], {"configurable": {"thread_id": "foo"}}) == ["1"] * 2
assert (await graph.ainvoke([], {"configurable": {"thread_id": "foo"}})) == [
"1"
] * 3
assert (await graph.ainvoke([], {"configurable": {"thread_id": "foo"}})) == [
"1"
] * 4
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC)
@pytest.mark.parametrize("store_name", ALL_STORES_ASYNC)
async def test_store_injected_async(checkpointer_name: str, store_name: str) -> None:
class State(TypedDict):
count: Annotated[int, operator.add]
doc_id = str(uuid.uuid4())
doc = {"some-key": "this-is-a-val"}
uid = uuid.uuid4().hex
namespace = (f"foo-{uid}", "bar")
thread_1 = str(uuid.uuid4())
thread_2 = str(uuid.uuid4())
class Node:
def __init__(self, i: Optional[int] = None):
self.i = i
async def __call__(
self, inputs: State, config: RunnableConfig, store: BaseStore
):
assert isinstance(store, BaseStore)
await store.aput(
namespace
if self.i is not None
and config["configurable"]["thread_id"] in (thread_1, thread_2)
else (f"foo_{self.i}", "bar"),
doc_id,
{
**doc,
"from_thread": config["configurable"]["thread_id"],
"some_val": inputs["count"],
},
)
return {"count": 1}
builder = StateGraph(State)
builder.add_node("node", Node())
builder.add_edge("__start__", "node")
N = 500
M = 1
if "duckdb" in store_name:
logger.warning(
"DuckDB store implementation has a known issue that does not"
" support concurrent writes, so we're reducing the test scope"
)
N = M = 1
for i in range(N):
builder.add_node(f"node_{i}", Node(i))
builder.add_edge("__start__", f"node_{i}")
async with awith_checkpointer(checkpointer_name) as checkpointer, awith_store(
store_name
) as the_store:
graph = builder.compile(store=the_store, checkpointer=checkpointer)
# Test batch operations with multiple threads
results = await graph.abatch(
[{"count": 0}] * M,
([{"configurable": {"thread_id": str(uuid.uuid4())}}] * (M - 1))
+ [{"configurable": {"thread_id": thread_1}}],
)
result = results[-1]
assert result == {"count": N + 1}
returned_doc = (await the_store.aget(namespace, doc_id)).value
assert returned_doc == {**doc, "from_thread": thread_1, "some_val": 0}
assert len((await the_store.asearch(namespace))) == 1
# Check results after another turn of the same thread
result = await graph.ainvoke(
{"count": 0}, {"configurable": {"thread_id": thread_1}}
)
assert result == {"count": (N + 1) * 2}
returned_doc = (await the_store.aget(namespace, doc_id)).value
assert returned_doc == {**doc, "from_thread": thread_1, "some_val": N + 1}
assert len((await the_store.asearch(namespace))) == 1
# Test with a different thread
result = await graph.ainvoke(
{"count": 0}, {"configurable": {"thread_id": thread_2}}
)
assert result == {"count": N + 1}
returned_doc = (await the_store.aget(namespace, doc_id)).value
assert returned_doc == {
**doc,
"from_thread": thread_2,
"some_val": 0,
} # Overwrites the whole doc
assert (
len((await the_store.asearch(namespace))) == 1
) # still overwriting the same one
async def test_debug_retry():
class State(TypedDict):
messages: Annotated[list[str], operator.add]
def node(name):
async def _node(state: State):
return {"messages": [f"entered {name} node"]}
return _node
builder = StateGraph(State)
builder.add_node("one", node("one"))
builder.add_node("two", node("two"))
builder.add_edge(START, "one")
builder.add_edge("one", "two")
builder.add_edge("two", END)
saver = MemorySaver()
graph = builder.compile(checkpointer=saver)
config = {"configurable": {"thread_id": "1"}}
await graph.ainvoke({"messages": []}, config=config)
# re-run step: 1
async for c in saver.alist(config):
if c.metadata["step"] == 1:
target_config = c.parent_config
break
assert target_config is not None
update_config = await graph.aupdate_state(target_config, values=None)
events = [
c async for c in graph.astream(None, config=update_config, stream_mode="debug")
]
checkpoint_events = list(
reversed([e["payload"] for e in events if e["type"] == "checkpoint"])
)
checkpoint_history = {
c.config["configurable"]["checkpoint_id"]: c
async for c in graph.aget_state_history(config)
}
def lax_normalize_config(config: Optional[dict]) -> Optional[dict]:
if config is None:
return None
return config["configurable"]
for stream in checkpoint_events:
stream_conf = lax_normalize_config(stream["config"])
stream_parent_conf = lax_normalize_config(stream["parent_config"])
assert stream_conf != stream_parent_conf
# ensure the streamed checkpoint == checkpoint from checkpointer.list()
history = checkpoint_history[stream["config"]["configurable"]["checkpoint_id"]]
history_conf = lax_normalize_config(history.config)
assert stream_conf == history_conf
history_parent_conf = lax_normalize_config(history.parent_config)
assert stream_parent_conf == history_parent_conf
async def test_debug_subgraphs():
class State(TypedDict):
messages: Annotated[list[str], operator.add]
def node(name):
async def _node(state: State):
return {"messages": [f"entered {name} node"]}
return _node
parent = StateGraph(State)
child = StateGraph(State)
child.add_node("c_one", node("c_one"))
child.add_node("c_two", node("c_two"))
child.add_edge(START, "c_one")
child.add_edge("c_one", "c_two")
child.add_edge("c_two", END)
parent.add_node("p_one", node("p_one"))
parent.add_node("p_two", child.compile())
parent.add_edge(START, "p_one")
parent.add_edge("p_one", "p_two")
parent.add_edge("p_two", END)
graph = parent.compile(checkpointer=MemorySaver())
config = {"configurable": {"thread_id": "1"}}
events = [
c
async for c in graph.astream(
{"messages": []},
config=config,
stream_mode="debug",
)
]
checkpoint_events = list(
reversed([e["payload"] for e in events if e["type"] == "checkpoint"])
)
checkpoint_history = [c async for c in graph.aget_state_history(config)]
assert len(checkpoint_events) == len(checkpoint_history)
def normalize_config(config: Optional[dict]) -> Optional[dict]:
if config is None:
return None
return config["configurable"]
for stream, history in zip(checkpoint_events, checkpoint_history):
assert stream["values"] == history.values
assert stream["next"] == list(history.next)
assert normalize_config(stream["config"]) == normalize_config(history.config)
assert normalize_config(stream["parent_config"]) == normalize_config(
history.parent_config
)
assert len(stream["tasks"]) == len(history.tasks)
for stream_task, history_task in zip(stream["tasks"], history.tasks):
assert stream_task["id"] == history_task.id
assert stream_task["name"] == history_task.name
assert stream_task["interrupts"] == history_task.interrupts
assert stream_task.get("error") == history_task.error
assert stream_task.get("state") == history_task.state
async def test_debug_nested_subgraphs():
from collections import defaultdict
class State(TypedDict):
messages: Annotated[list[str], operator.add]
def node(name):
async def _node(state: State):
return {"messages": [f"entered {name} node"]}
return _node
grand_parent = StateGraph(State)
parent = StateGraph(State)
child = StateGraph(State)
child.add_node("c_one", node("c_one"))
child.add_node("c_two", node("c_two"))
child.add_edge(START, "c_one")
child.add_edge("c_one", "c_two")
child.add_edge("c_two", END)
parent.add_node("p_one", node("p_one"))
parent.add_node("p_two", child.compile())
parent.add_edge(START, "p_one")
parent.add_edge("p_one", "p_two")
parent.add_edge("p_two", END)
grand_parent.add_node("gp_one", node("gp_one"))
grand_parent.add_node("gp_two", parent.compile())
grand_parent.add_edge(START, "gp_one")
grand_parent.add_edge("gp_one", "gp_two")
grand_parent.add_edge("gp_two", END)
graph = grand_parent.compile(checkpointer=MemorySaver())
config = {"configurable": {"thread_id": "1"}}
events = [
c
async for c in graph.astream(
{"messages": []},
config=config,
stream_mode="debug",
subgraphs=True,
)
]
stream_ns: dict[tuple, dict] = defaultdict(list)
for ns, e in events:
if e["type"] == "checkpoint":
stream_ns[ns].append(e["payload"])
assert list(stream_ns.keys()) == [
(),
(AnyStr("gp_two:"),),
(AnyStr("gp_two:"), AnyStr("p_two:")),
]
history_ns = {}
for ns in stream_ns.keys():
async def get_history():
history = [
c
async for c in graph.aget_state_history(
{"configurable": {"thread_id": "1", "checkpoint_ns": "|".join(ns)}}
)
]
return history[::-1]
history_ns[ns] = await get_history()
def normalize_config(config: Optional[dict]) -> Optional[dict]:
if config is None:
return None
clean_config = {}
clean_config["thread_id"] = config["configurable"]["thread_id"]
clean_config["checkpoint_id"] = config["configurable"]["checkpoint_id"]
clean_config["checkpoint_ns"] = config["configurable"]["checkpoint_ns"]
if "checkpoint_map" in config["configurable"]:
clean_config["checkpoint_map"] = config["configurable"]["checkpoint_map"]
return clean_config
for checkpoint_events, checkpoint_history in zip(
stream_ns.values(), history_ns.values()
):
for stream, history in zip(checkpoint_events, checkpoint_history):
assert stream["values"] == history.values
assert stream["next"] == list(history.next)
assert normalize_config(stream["config"]) == normalize_config(
history.config
)
assert normalize_config(stream["parent_config"]) == normalize_config(
history.parent_config
)
assert len(stream["tasks"]) == len(history.tasks)
for stream_task, history_task in zip(stream["tasks"], history.tasks):
assert stream_task["id"] == history_task.id
assert stream_task["name"] == history_task.name
assert stream_task["interrupts"] == history_task.interrupts
assert stream_task.get("error") == history_task.error
assert stream_task.get("state") == history_task.state
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC)
async def test_parent_command(checkpointer_name: str) -> None:
from langchain_core.messages import BaseMessage
from langchain_core.tools import tool
@tool(return_direct=True)
def get_user_name() -> Command:
"""Retrieve user name"""
return Command(update={"user_name": "Meow"}, graph=Command.PARENT)
subgraph_builder = StateGraph(MessagesState)
subgraph_builder.add_node("tool", get_user_name)
subgraph_builder.add_edge(START, "tool")
subgraph = subgraph_builder.compile()
class CustomParentState(TypedDict):
messages: Annotated[list[BaseMessage], add_messages]
# this key is not available to the child graph
user_name: str
builder = StateGraph(CustomParentState)
builder.add_node("alice", subgraph)
builder.add_edge(START, "alice")
async with awith_checkpointer(checkpointer_name) as checkpointer:
graph = builder.compile(checkpointer=checkpointer)
config = {"configurable": {"thread_id": "1"}}
assert await graph.ainvoke(
{"messages": [("user", "get user name")]}, config
) == {
"messages": [
_AnyIdHumanMessage(
content="get user name", additional_kwargs={}, response_metadata={}
),
],
"user_name": "Meow",
}
assert await graph.aget_state(config) == StateSnapshot(
values={
"messages": [
_AnyIdHumanMessage(
content="get user name",
additional_kwargs={},
response_metadata={},
),
],
"user_name": "Meow",
},
next=(),
config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
metadata={
"source": "loop",
"writes": {
"alice": {
"user_name": "Meow",
}
},
"thread_id": "1",
"step": 1,
"parents": {},
},
created_at=AnyStr(),
parent_config={
"configurable": {
"thread_id": "1",
"checkpoint_ns": "",
"checkpoint_id": AnyStr(),
}
},
tasks=(),
)
@pytest.mark.skipif(
sys.version_info < (3, 11),
reason="Python 3.11+ is required for async contextvars support",
)
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC)
async def test_interrupt_subgraph(checkpointer_name: str):
class State(TypedDict):
baz: str
def foo(state):
return {"baz": "foo"}
def bar(state):
value = interrupt("Please provide baz value:")
return {"baz": value}
child_builder = StateGraph(State)
child_builder.add_node(bar)
child_builder.add_edge(START, "bar")
builder = StateGraph(State)
builder.add_node(foo)
builder.add_node("bar", child_builder.compile())
builder.add_edge(START, "foo")
builder.add_edge("foo", "bar")
async with awith_checkpointer(checkpointer_name) as checkpointer:
graph = builder.compile(checkpointer=checkpointer)
thread1 = {"configurable": {"thread_id": "1"}}
# First run, interrupted at bar
assert await graph.ainvoke({"baz": ""}, thread1)
# Resume with answer
assert await graph.ainvoke(Command(resume="bar"), thread1)
@pytest.mark.skipif(
sys.version_info < (3, 11),
reason="Python 3.11+ is required for async contextvars support",
)
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC)
async def test_interrupt_multiple(checkpointer_name: str):
class State(TypedDict):
my_key: Annotated[str, operator.add]
async def node(s: State) -> State:
answer = interrupt({"value": 1})
answer2 = interrupt({"value": 2})
return {"my_key": answer + " " + answer2}
builder = StateGraph(State)
builder.add_node("node", node)
builder.add_edge(START, "node")
async with awith_checkpointer(checkpointer_name) as checkpointer:
graph = builder.compile(checkpointer=checkpointer)
thread1 = {"configurable": {"thread_id": "1"}}
assert [
e async for e in graph.astream({"my_key": "DE", "market": "DE"}, thread1)
] == [
{
"__interrupt__": (
Interrupt(
value={"value": 1},
resumable=True,
ns=[AnyStr("node:")],
when="during",
),
)
}
]
assert [
event
async for event in graph.astream(
Command(resume="answer 1", update={"my_key": "foofoo"}),
thread1,
stream_mode="updates",
)
] == [
{
"__interrupt__": (
Interrupt(
value={"value": 2},
resumable=True,
ns=[AnyStr("node:")],
when="during",
),
)
}
]
assert [
event
async for event in graph.astream(
Command(resume="answer 2"), thread1, stream_mode="updates"
)
] == [
{"node": {"my_key": "answer 1 answer 2"}},
]
@pytest.mark.skipif(
sys.version_info < (3, 11),
reason="Python 3.11+ is required for async contextvars support",
)
@pytest.mark.parametrize("checkpointer_name", ALL_CHECKPOINTERS_ASYNC)
async def test_interrupt_loop(checkpointer_name: str):
class State(TypedDict):
age: int
other: str
async def ask_age(s: State):
"""Ask an expert for help."""
question = "How old are you?"
value = None
for _ in range(10):
value: str = interrupt(question)
if not value.isdigit() or int(value) < 18:
question = "invalid response"
value = None
else:
break
return {"age": int(value)}
builder = StateGraph(State)
builder.add_node("node", ask_age)
builder.add_edge(START, "node")
async with awith_checkpointer(checkpointer_name) as checkpointer:
graph = builder.compile(checkpointer=checkpointer)
thread1 = {"configurable": {"thread_id": "1"}}
assert [e async for e in graph.astream({"other": ""}, thread1)] == [
{
"__interrupt__": (
Interrupt(
value="How old are you?",
resumable=True,
ns=[AnyStr("node:")],
when="during",
),
)
}
]
assert [
event
async for event in graph.astream(
Command(resume="13"),
thread1,
)
] == [
{
"__interrupt__": (
Interrupt(
value="invalid response",
resumable=True,
ns=[AnyStr("node:")],
when="during",
),
)
}
]
assert [
event
async for event in graph.astream(
Command(resume="15"),
thread1,
)
] == [
{
"__interrupt__": (
Interrupt(
value="invalid response",
resumable=True,
ns=[AnyStr("node:")],
when="during",
),
)
}
]
assert [
event async for event in graph.astream(Command(resume="19"), thread1)
] == [
{"node": {"age": 19}},
]
|
0 | lc_public_repos/langgraph/libs/langgraph | lc_public_repos/langgraph/libs/langgraph/tests/test_io.py | from typing import Iterator
from langgraph.pregel.io import single
def test_single() -> None:
closed = False
def myiter() -> Iterator[int]:
try:
yield 1
yield 2
finally:
nonlocal closed
closed = True
assert single(myiter()) == 1
assert closed
|
0 | lc_public_repos/langgraph/libs/langgraph | lc_public_repos/langgraph/libs/langgraph/tests/fake_chat.py | import re
from typing import Any, AsyncIterator, Iterator, List, Optional, cast
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models.fake_chat_models import GenericFakeChatModel
from langchain_core.messages import AIMessage, AIMessageChunk, BaseMessage
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
class FakeChatModel(GenericFakeChatModel):
messages: list[BaseMessage]
i: int = 0
def bind_tools(self, functions: list):
return self
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
"""Top Level call"""
if self.i >= len(self.messages):
self.i = 0
message = self.messages[self.i]
self.i += 1
if isinstance(message, str):
message_ = AIMessage(content=message)
else:
if hasattr(message, "model_copy"):
message_ = message.model_copy()
else:
message_ = message.copy()
generation = ChatGeneration(message=message_)
return ChatResult(generations=[generation])
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
"""Stream the output of the model."""
chat_result = self._generate(
messages, stop=stop, run_manager=run_manager, **kwargs
)
if not isinstance(chat_result, ChatResult):
raise ValueError(
f"Expected generate to return a ChatResult, "
f"but got {type(chat_result)} instead."
)
message = chat_result.generations[0].message
if not isinstance(message, AIMessage):
raise ValueError(
f"Expected invoke to return an AIMessage, "
f"but got {type(message)} instead."
)
content = message.content
if content:
# Use a regular expression to split on whitespace with a capture group
# so that we can preserve the whitespace in the output.
assert isinstance(content, str)
content_chunks = cast(list[str], re.split(r"(\s)", content))
for token in content_chunks:
chunk = ChatGenerationChunk(
message=AIMessageChunk(content=token, id=message.id)
)
if run_manager:
run_manager.on_llm_new_token(token, chunk=chunk)
yield chunk
else:
args = message.__dict__
args.pop("type")
chunk = ChatGenerationChunk(message=AIMessageChunk(**args))
if run_manager:
run_manager.on_llm_new_token("", chunk=chunk)
yield chunk
async def _astream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[ChatGenerationChunk]:
"""Stream the output of the model."""
chat_result = self._generate(
messages, stop=stop, run_manager=run_manager, **kwargs
)
if not isinstance(chat_result, ChatResult):
raise ValueError(
f"Expected generate to return a ChatResult, "
f"but got {type(chat_result)} instead."
)
message = chat_result.generations[0].message
if not isinstance(message, AIMessage):
raise ValueError(
f"Expected invoke to return an AIMessage, "
f"but got {type(message)} instead."
)
content = message.content
if content:
# Use a regular expression to split on whitespace with a capture group
# so that we can preserve the whitespace in the output.
assert isinstance(content, str)
content_chunks = cast(list[str], re.split(r"(\s)", content))
for token in content_chunks:
chunk = ChatGenerationChunk(
message=AIMessageChunk(content=token, id=message.id)
)
if run_manager:
run_manager.on_llm_new_token(token, chunk=chunk)
yield chunk
else:
args = message.__dict__
args.pop("type")
chunk = ChatGenerationChunk(message=AIMessageChunk(**args))
if run_manager:
await run_manager.on_llm_new_token("", chunk=chunk)
yield chunk
|
0 | lc_public_repos/langgraph/libs/langgraph | lc_public_repos/langgraph/libs/langgraph/tests/any_str.py | import re
from typing import Any, Sequence, Union
from typing_extensions import Self
class FloatBetween(float):
def __new__(cls, min_value: float, max_value: float) -> Self:
return super().__new__(cls, min_value)
def __init__(self, min_value: float, max_value: float) -> None:
super().__init__()
self.min_value = min_value
self.max_value = max_value
def __eq__(self, other: object) -> bool:
return (
isinstance(other, float)
and other >= self.min_value
and other <= self.max_value
)
def __hash__(self) -> int:
return hash((float(self), self.min_value, self.max_value))
class AnyStr(str):
def __init__(self, prefix: Union[str, re.Pattern] = "") -> None:
super().__init__()
self.prefix = prefix
def __eq__(self, other: object) -> bool:
return isinstance(other, str) and (
other.startswith(self.prefix)
if isinstance(self.prefix, str)
else self.prefix.match(other)
)
def __hash__(self) -> int:
return hash((str(self), self.prefix))
class AnyDict(dict):
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
def __eq__(self, other: object) -> bool:
if not isinstance(other, dict) or len(self) != len(other):
return False
for k, v in self.items():
if kk := next((kk for kk in other if kk == k), None):
if v == other[kk]:
continue
else:
return False
else:
return True
class AnyVersion:
def __init__(self) -> None:
super().__init__()
def __eq__(self, other: object) -> bool:
return isinstance(other, (str, int, float))
def __hash__(self) -> int:
return hash(str(self))
class UnsortedSequence:
def __init__(self, *values: Any) -> None:
self.seq = values
def __eq__(self, value: object) -> bool:
return (
isinstance(value, Sequence)
and len(self.seq) == len(value)
and all(a in value for a in self.seq)
)
def __hash__(self) -> int:
return hash(frozenset(self.seq))
def __repr__(self) -> str:
return repr(self.seq)
|
0 | lc_public_repos/langgraph/libs/langgraph | lc_public_repos/langgraph/libs/langgraph/tests/messages.py | """Redefined messages as a work-around for pydantic issue with AnyStr.
The code below creates version of pydantic models
that will work in unit tests with AnyStr as id field
Please note that the `id` field is assigned AFTER the model is created
to workaround an issue with pydantic ignoring the __eq__ method on
subclassed strings.
"""
from typing import Any
from langchain_core.documents import Document
from langchain_core.messages import AIMessage, AIMessageChunk, HumanMessage, ToolMessage
from tests.any_str import AnyStr
def _AnyIdDocument(**kwargs: Any) -> Document:
"""Create a document with an id field."""
message = Document(**kwargs)
message.id = AnyStr()
return message
def _AnyIdAIMessage(**kwargs: Any) -> AIMessage:
"""Create ai message with an any id field."""
message = AIMessage(**kwargs)
message.id = AnyStr()
return message
def _AnyIdAIMessageChunk(**kwargs: Any) -> AIMessageChunk:
"""Create ai message with an any id field."""
message = AIMessageChunk(**kwargs)
message.id = AnyStr()
return message
def _AnyIdHumanMessage(**kwargs: Any) -> HumanMessage:
"""Create a human message with an any id field."""
message = HumanMessage(**kwargs)
message.id = AnyStr()
return message
def _AnyIdToolMessage(**kwargs: Any) -> ToolMessage:
"""Create a tool message with an any id field."""
message = ToolMessage(**kwargs)
message.id = AnyStr()
return message
|
0 | lc_public_repos/langgraph/libs/langgraph | lc_public_repos/langgraph/libs/langgraph/tests/conftest.py | import sys
from contextlib import asynccontextmanager
from typing import AsyncIterator, Optional
from uuid import UUID, uuid4
import pytest
from langchain_core import __version__ as core_version
from packaging import version
from psycopg import AsyncConnection, Connection
from psycopg_pool import AsyncConnectionPool, ConnectionPool
from pytest_mock import MockerFixture
from langgraph.checkpoint.base import BaseCheckpointSaver
from langgraph.checkpoint.duckdb import DuckDBSaver
from langgraph.checkpoint.duckdb.aio import AsyncDuckDBSaver
from langgraph.checkpoint.postgres import PostgresSaver
from langgraph.checkpoint.postgres.aio import AsyncPostgresSaver
from langgraph.checkpoint.sqlite import SqliteSaver
from langgraph.checkpoint.sqlite.aio import AsyncSqliteSaver
from langgraph.store.base import BaseStore
from langgraph.store.duckdb import AsyncDuckDBStore, DuckDBStore
from langgraph.store.memory import InMemoryStore
from langgraph.store.postgres import AsyncPostgresStore, PostgresStore
pytest.register_assert_rewrite("tests.memory_assert")
DEFAULT_POSTGRES_URI = "postgres://postgres:postgres@localhost:5442/"
# TODO: fix this once core is released
IS_LANGCHAIN_CORE_030_OR_GREATER = version.parse(core_version) >= version.parse(
"0.3.0.dev0"
)
SHOULD_CHECK_SNAPSHOTS = IS_LANGCHAIN_CORE_030_OR_GREATER
@pytest.fixture
def anyio_backend():
return "asyncio"
@pytest.fixture()
def deterministic_uuids(mocker: MockerFixture) -> MockerFixture:
side_effect = (
UUID(f"00000000-0000-4000-8000-{i:012}", version=4) for i in range(10000)
)
return mocker.patch("uuid.uuid4", side_effect=side_effect)
# checkpointer fixtures
@pytest.fixture(scope="function")
def checkpointer_memory():
from tests.memory_assert import MemorySaverAssertImmutable
yield MemorySaverAssertImmutable()
@pytest.fixture(scope="function")
def checkpointer_sqlite():
with SqliteSaver.from_conn_string(":memory:") as checkpointer:
yield checkpointer
@asynccontextmanager
async def _checkpointer_sqlite_aio():
async with AsyncSqliteSaver.from_conn_string(":memory:") as checkpointer:
yield checkpointer
@pytest.fixture(scope="function")
def checkpointer_duckdb():
with DuckDBSaver.from_conn_string(":memory:") as checkpointer:
checkpointer.setup()
yield checkpointer
@asynccontextmanager
async def _checkpointer_duckdb_aio():
async with AsyncDuckDBSaver.from_conn_string(":memory:") as checkpointer:
await checkpointer.setup()
yield checkpointer
@pytest.fixture(scope="function")
def checkpointer_postgres():
database = f"test_{uuid4().hex[:16]}"
# create unique db
with Connection.connect(DEFAULT_POSTGRES_URI, autocommit=True) as conn:
conn.execute(f"CREATE DATABASE {database}")
try:
# yield checkpointer
with PostgresSaver.from_conn_string(
DEFAULT_POSTGRES_URI + database
) as checkpointer:
checkpointer.setup()
yield checkpointer
finally:
# drop unique db
with Connection.connect(DEFAULT_POSTGRES_URI, autocommit=True) as conn:
conn.execute(f"DROP DATABASE {database}")
@pytest.fixture(scope="function")
def checkpointer_postgres_pipe():
database = f"test_{uuid4().hex[:16]}"
# create unique db
with Connection.connect(DEFAULT_POSTGRES_URI, autocommit=True) as conn:
conn.execute(f"CREATE DATABASE {database}")
try:
# yield checkpointer
with PostgresSaver.from_conn_string(
DEFAULT_POSTGRES_URI + database
) as checkpointer:
checkpointer.setup()
# setup can't run inside pipeline because of implicit transaction
with checkpointer.conn.pipeline() as pipe:
checkpointer.pipe = pipe
yield checkpointer
finally:
# drop unique db
with Connection.connect(DEFAULT_POSTGRES_URI, autocommit=True) as conn:
conn.execute(f"DROP DATABASE {database}")
@pytest.fixture(scope="function")
def checkpointer_postgres_pool():
database = f"test_{uuid4().hex[:16]}"
# create unique db
with Connection.connect(DEFAULT_POSTGRES_URI, autocommit=True) as conn:
conn.execute(f"CREATE DATABASE {database}")
try:
# yield checkpointer
with ConnectionPool(
DEFAULT_POSTGRES_URI + database, max_size=10, kwargs={"autocommit": True}
) as pool:
checkpointer = PostgresSaver(pool)
checkpointer.setup()
yield checkpointer
finally:
# drop unique db
with Connection.connect(DEFAULT_POSTGRES_URI, autocommit=True) as conn:
conn.execute(f"DROP DATABASE {database}")
@asynccontextmanager
async def _checkpointer_postgres_aio():
if sys.version_info < (3, 10):
pytest.skip("Async Postgres tests require Python 3.10+")
database = f"test_{uuid4().hex[:16]}"
# create unique db
async with await AsyncConnection.connect(
DEFAULT_POSTGRES_URI, autocommit=True
) as conn:
await conn.execute(f"CREATE DATABASE {database}")
try:
# yield checkpointer
async with AsyncPostgresSaver.from_conn_string(
DEFAULT_POSTGRES_URI + database
) as checkpointer:
await checkpointer.setup()
yield checkpointer
finally:
# drop unique db
async with await AsyncConnection.connect(
DEFAULT_POSTGRES_URI, autocommit=True
) as conn:
await conn.execute(f"DROP DATABASE {database}")
@asynccontextmanager
async def _checkpointer_postgres_aio_pipe():
if sys.version_info < (3, 10):
pytest.skip("Async Postgres tests require Python 3.10+")
database = f"test_{uuid4().hex[:16]}"
# create unique db
async with await AsyncConnection.connect(
DEFAULT_POSTGRES_URI, autocommit=True
) as conn:
await conn.execute(f"CREATE DATABASE {database}")
try:
# yield checkpointer
async with AsyncPostgresSaver.from_conn_string(
DEFAULT_POSTGRES_URI + database
) as checkpointer:
await checkpointer.setup()
# setup can't run inside pipeline because of implicit transaction
async with checkpointer.conn.pipeline() as pipe:
checkpointer.pipe = pipe
yield checkpointer
finally:
# drop unique db
async with await AsyncConnection.connect(
DEFAULT_POSTGRES_URI, autocommit=True
) as conn:
await conn.execute(f"DROP DATABASE {database}")
@asynccontextmanager
async def _checkpointer_postgres_aio_pool():
if sys.version_info < (3, 10):
pytest.skip("Async Postgres tests require Python 3.10+")
database = f"test_{uuid4().hex[:16]}"
# create unique db
async with await AsyncConnection.connect(
DEFAULT_POSTGRES_URI, autocommit=True
) as conn:
await conn.execute(f"CREATE DATABASE {database}")
try:
# yield checkpointer
async with AsyncConnectionPool(
DEFAULT_POSTGRES_URI + database, max_size=10, kwargs={"autocommit": True}
) as pool:
checkpointer = AsyncPostgresSaver(pool)
await checkpointer.setup()
yield checkpointer
finally:
# drop unique db
async with await AsyncConnection.connect(
DEFAULT_POSTGRES_URI, autocommit=True
) as conn:
await conn.execute(f"DROP DATABASE {database}")
@asynccontextmanager
async def awith_checkpointer(
checkpointer_name: Optional[str],
) -> AsyncIterator[BaseCheckpointSaver]:
if checkpointer_name is None:
yield None
elif checkpointer_name == "memory":
from tests.memory_assert import MemorySaverAssertImmutable
yield MemorySaverAssertImmutable()
elif checkpointer_name == "sqlite_aio":
async with _checkpointer_sqlite_aio() as checkpointer:
yield checkpointer
elif checkpointer_name == "duckdb_aio":
async with _checkpointer_duckdb_aio() as checkpointer:
yield checkpointer
elif checkpointer_name == "postgres_aio":
async with _checkpointer_postgres_aio() as checkpointer:
yield checkpointer
elif checkpointer_name == "postgres_aio_pipe":
async with _checkpointer_postgres_aio_pipe() as checkpointer:
yield checkpointer
elif checkpointer_name == "postgres_aio_pool":
async with _checkpointer_postgres_aio_pool() as checkpointer:
yield checkpointer
else:
raise NotImplementedError(f"Unknown checkpointer: {checkpointer_name}")
@asynccontextmanager
async def _store_postgres_aio():
if sys.version_info < (3, 10):
pytest.skip("Async Postgres tests require Python 3.10+")
database = f"test_{uuid4().hex[:16]}"
async with await AsyncConnection.connect(
DEFAULT_POSTGRES_URI, autocommit=True
) as conn:
await conn.execute(f"CREATE DATABASE {database}")
try:
async with AsyncPostgresStore.from_conn_string(
DEFAULT_POSTGRES_URI + database
) as store:
await store.setup()
yield store
finally:
async with await AsyncConnection.connect(
DEFAULT_POSTGRES_URI, autocommit=True
) as conn:
await conn.execute(f"DROP DATABASE {database}")
@asynccontextmanager
async def _store_postgres_aio_pipe():
if sys.version_info < (3, 10):
pytest.skip("Async Postgres tests require Python 3.10+")
database = f"test_{uuid4().hex[:16]}"
async with await AsyncConnection.connect(
DEFAULT_POSTGRES_URI, autocommit=True
) as conn:
await conn.execute(f"CREATE DATABASE {database}")
try:
async with AsyncPostgresStore.from_conn_string(
DEFAULT_POSTGRES_URI + database
) as store:
await store.setup() # Run in its own transaction
async with AsyncPostgresStore.from_conn_string(
DEFAULT_POSTGRES_URI + database, pipeline=True
) as store:
yield store
finally:
async with await AsyncConnection.connect(
DEFAULT_POSTGRES_URI, autocommit=True
) as conn:
await conn.execute(f"DROP DATABASE {database}")
@asynccontextmanager
async def _store_postgres_aio_pool():
if sys.version_info < (3, 10):
pytest.skip("Async Postgres tests require Python 3.10+")
database = f"test_{uuid4().hex[:16]}"
async with await AsyncConnection.connect(
DEFAULT_POSTGRES_URI, autocommit=True
) as conn:
await conn.execute(f"CREATE DATABASE {database}")
try:
async with AsyncPostgresStore.from_conn_string(
DEFAULT_POSTGRES_URI + database,
pool_config={"max_size": 10},
) as store:
await store.setup()
yield store
finally:
async with await AsyncConnection.connect(
DEFAULT_POSTGRES_URI, autocommit=True
) as conn:
await conn.execute(f"DROP DATABASE {database}")
@asynccontextmanager
async def _store_duckdb_aio():
async with AsyncDuckDBStore.from_conn_string(":memory:") as store:
await store.setup()
yield store
@pytest.fixture(scope="function")
def store_postgres():
database = f"test_{uuid4().hex[:16]}"
# create unique db
with Connection.connect(DEFAULT_POSTGRES_URI, autocommit=True) as conn:
conn.execute(f"CREATE DATABASE {database}")
try:
# yield store
with PostgresStore.from_conn_string(DEFAULT_POSTGRES_URI + database) as store:
store.setup()
yield store
finally:
# drop unique db
with Connection.connect(DEFAULT_POSTGRES_URI, autocommit=True) as conn:
conn.execute(f"DROP DATABASE {database}")
@pytest.fixture(scope="function")
def store_postgres_pipe():
database = f"test_{uuid4().hex[:16]}"
# create unique db
with Connection.connect(DEFAULT_POSTGRES_URI, autocommit=True) as conn:
conn.execute(f"CREATE DATABASE {database}")
try:
# yield store
with PostgresStore.from_conn_string(DEFAULT_POSTGRES_URI + database) as store:
store.setup() # Run in its own transaction
with PostgresStore.from_conn_string(
DEFAULT_POSTGRES_URI + database, pipeline=True
) as store:
yield store
finally:
# drop unique db
with Connection.connect(DEFAULT_POSTGRES_URI, autocommit=True) as conn:
conn.execute(f"DROP DATABASE {database}")
@pytest.fixture(scope="function")
def store_postgres_pool():
database = f"test_{uuid4().hex[:16]}"
# create unique db
with Connection.connect(DEFAULT_POSTGRES_URI, autocommit=True) as conn:
conn.execute(f"CREATE DATABASE {database}")
try:
# yield store
with PostgresStore.from_conn_string(
DEFAULT_POSTGRES_URI + database, pool_config={"max_size": 10}
) as store:
store.setup()
yield store
finally:
# drop unique db
with Connection.connect(DEFAULT_POSTGRES_URI, autocommit=True) as conn:
conn.execute(f"DROP DATABASE {database}")
@pytest.fixture(scope="function")
def store_duckdb():
with DuckDBStore.from_conn_string(":memory:") as store:
store.setup()
yield store
@pytest.fixture(scope="function")
def store_in_memory():
yield InMemoryStore()
@asynccontextmanager
async def awith_store(store_name: Optional[str]) -> AsyncIterator[BaseStore]:
if store_name is None:
yield None
elif store_name == "in_memory":
yield InMemoryStore()
elif store_name == "postgres_aio":
async with _store_postgres_aio() as store:
yield store
elif store_name == "postgres_aio_pipe":
async with _store_postgres_aio_pipe() as store:
yield store
elif store_name == "postgres_aio_pool":
async with _store_postgres_aio_pool() as store:
yield store
elif store_name == "duckdb_aio":
async with _store_duckdb_aio() as store:
yield store
else:
raise NotImplementedError(f"Unknown store {store_name}")
ALL_CHECKPOINTERS_SYNC = [
"memory",
"sqlite",
"postgres",
"postgres_pipe",
"postgres_pool",
]
ALL_CHECKPOINTERS_ASYNC = [
"memory",
"sqlite_aio",
"postgres_aio",
"postgres_aio_pipe",
"postgres_aio_pool",
]
ALL_CHECKPOINTERS_ASYNC_PLUS_NONE = [
*ALL_CHECKPOINTERS_ASYNC,
None,
]
ALL_STORES_SYNC = [
"in_memory",
"postgres",
"postgres_pipe",
"postgres_pool",
"duckdb",
]
ALL_STORES_ASYNC = [
"in_memory",
"postgres_aio",
"postgres_aio_pipe",
"postgres_aio_pool",
"duckdb_aio",
]
|
0 | lc_public_repos/langgraph/libs/langgraph | lc_public_repos/langgraph/libs/langgraph/tests/any_int.py | class AnyInt(int):
def __init__(self) -> None:
super().__init__()
def __eq__(self, other: object) -> bool:
return isinstance(other, int)
|
0 | lc_public_repos/langgraph/libs/langgraph | lc_public_repos/langgraph/libs/langgraph/tests/test_remote_graph.py | from unittest.mock import AsyncMock, MagicMock
import pytest
from langchain_core.runnables.graph import (
Edge as DrawableEdge,
)
from langchain_core.runnables.graph import (
Node as DrawableNode,
)
from langgraph_sdk.schema import StreamPart
from langgraph.errors import GraphInterrupt
from langgraph.pregel.remote import RemoteGraph
from langgraph.pregel.types import StateSnapshot
def test_with_config():
# set up test
remote_pregel = RemoteGraph(
"test_graph_id",
config={
"configurable": {
"foo": "bar",
"thread_id": "thread_id_1",
}
},
)
# call method / assertions
config = {"configurable": {"hello": "world"}}
remote_pregel_copy = remote_pregel.with_config(config)
# assert that a copy was returned
assert remote_pregel_copy != remote_pregel
# assert that configs were merged
assert remote_pregel_copy.config == {
"configurable": {
"foo": "bar",
"thread_id": "thread_id_1",
"hello": "world",
}
}
def test_get_graph():
# set up test
mock_sync_client = MagicMock()
mock_sync_client.assistants.get_graph.return_value = {
"nodes": [
{"id": "__start__", "type": "schema", "data": "__start__"},
{"id": "__end__", "type": "schema", "data": "__end__"},
{
"id": "agent",
"type": "runnable",
"data": {
"id": ["langgraph", "utils", "RunnableCallable"],
"name": "agent_1",
},
},
],
"edges": [
{"source": "__start__", "target": "agent"},
{"source": "agent", "target": "__end__"},
],
}
remote_pregel = RemoteGraph("test_graph_id", sync_client=mock_sync_client)
# call method / assertions
drawable_graph = remote_pregel.get_graph()
assert drawable_graph.nodes == {
"__start__": DrawableNode(
id="__start__", name="__start__", data="__start__", metadata=None
),
"__end__": DrawableNode(
id="__end__", name="__end__", data="__end__", metadata=None
),
"agent": DrawableNode(
id="agent",
name="agent_1",
data={"id": ["langgraph", "utils", "RunnableCallable"], "name": "agent_1"},
metadata=None,
),
}
assert drawable_graph.edges == [
DrawableEdge(source="__start__", target="agent"),
DrawableEdge(source="agent", target="__end__"),
]
@pytest.mark.anyio
async def test_aget_graph():
# set up test
mock_async_client = AsyncMock()
mock_async_client.assistants.get_graph.return_value = {
"nodes": [
{"id": "__start__", "type": "schema", "data": "__start__"},
{"id": "__end__", "type": "schema", "data": "__end__"},
{
"id": "agent",
"type": "runnable",
"data": {
"id": ["langgraph", "utils", "RunnableCallable"],
"name": "agent_1",
},
},
],
"edges": [
{"source": "__start__", "target": "agent"},
{"source": "agent", "target": "__end__"},
],
}
remote_pregel = RemoteGraph("test_graph_id", client=mock_async_client)
# call method / assertions
drawable_graph = await remote_pregel.aget_graph()
assert drawable_graph.nodes == {
"__start__": DrawableNode(
id="__start__", name="__start__", data="__start__", metadata=None
),
"__end__": DrawableNode(
id="__end__", name="__end__", data="__end__", metadata=None
),
"agent": DrawableNode(
id="agent",
name="agent_1",
data={"id": ["langgraph", "utils", "RunnableCallable"], "name": "agent_1"},
metadata=None,
),
}
assert drawable_graph.edges == [
DrawableEdge(source="__start__", target="agent"),
DrawableEdge(source="agent", target="__end__"),
]
def test_get_state():
# set up test
mock_sync_client = MagicMock()
mock_sync_client.threads.get_state.return_value = {
"values": {"messages": [{"type": "human", "content": "hello"}]},
"next": None,
"checkpoint": {
"thread_id": "thread_1",
"checkpoint_ns": "ns",
"checkpoint_id": "checkpoint_1",
"checkpoint_map": {},
},
"metadata": {},
"created_at": "timestamp",
"parent_checkpoint": None,
"tasks": [],
}
# call method / assertions
remote_pregel = RemoteGraph(
"test_graph_id",
sync_client=mock_sync_client,
)
config = {"configurable": {"thread_id": "thread1"}}
state_snapshot = remote_pregel.get_state(config)
assert state_snapshot == StateSnapshot(
values={"messages": [{"type": "human", "content": "hello"}]},
next=(),
config={
"configurable": {
"thread_id": "thread_1",
"checkpoint_ns": "ns",
"checkpoint_id": "checkpoint_1",
"checkpoint_map": {},
}
},
metadata={},
created_at="timestamp",
parent_config=None,
tasks=(),
)
@pytest.mark.anyio
async def test_aget_state():
mock_async_client = AsyncMock()
mock_async_client.threads.get_state.return_value = {
"values": {"messages": [{"type": "human", "content": "hello"}]},
"next": None,
"checkpoint": {
"thread_id": "thread_1",
"checkpoint_ns": "ns",
"checkpoint_id": "checkpoint_2",
"checkpoint_map": {},
},
"metadata": {},
"created_at": "timestamp",
"parent_checkpoint": {
"thread_id": "thread_1",
"checkpoint_ns": "ns",
"checkpoint_id": "checkpoint_1",
"checkpoint_map": {},
},
"tasks": [],
}
# call method / assertions
remote_pregel = RemoteGraph(
"test_graph_id",
client=mock_async_client,
)
config = {"configurable": {"thread_id": "thread1"}}
state_snapshot = await remote_pregel.aget_state(config)
assert state_snapshot == StateSnapshot(
values={"messages": [{"type": "human", "content": "hello"}]},
next=(),
config={
"configurable": {
"thread_id": "thread_1",
"checkpoint_ns": "ns",
"checkpoint_id": "checkpoint_2",
"checkpoint_map": {},
}
},
metadata={},
created_at="timestamp",
parent_config={
"configurable": {
"thread_id": "thread_1",
"checkpoint_ns": "ns",
"checkpoint_id": "checkpoint_1",
"checkpoint_map": {},
}
},
tasks=(),
)
def test_get_state_history():
# set up test
mock_sync_client = MagicMock()
mock_sync_client.threads.get_history.return_value = [
{
"values": {"messages": [{"type": "human", "content": "hello"}]},
"next": None,
"checkpoint": {
"thread_id": "thread_1",
"checkpoint_ns": "ns",
"checkpoint_id": "checkpoint_1",
"checkpoint_map": {},
},
"metadata": {},
"created_at": "timestamp",
"parent_checkpoint": None,
"tasks": [],
}
]
# call method / assertions
remote_pregel = RemoteGraph(
"test_graph_id",
sync_client=mock_sync_client,
)
config = {"configurable": {"thread_id": "thread1"}}
state_history_snapshot = list(
remote_pregel.get_state_history(config, filter=None, before=None, limit=None)
)
assert len(state_history_snapshot) == 1
assert state_history_snapshot[0] == StateSnapshot(
values={"messages": [{"type": "human", "content": "hello"}]},
next=(),
config={
"configurable": {
"thread_id": "thread_1",
"checkpoint_ns": "ns",
"checkpoint_id": "checkpoint_1",
"checkpoint_map": {},
}
},
metadata={},
created_at="timestamp",
parent_config=None,
tasks=(),
)
@pytest.mark.anyio
async def test_aget_state_history():
# set up test
mock_async_client = AsyncMock()
mock_async_client.threads.get_history.return_value = [
{
"values": {"messages": [{"type": "human", "content": "hello"}]},
"next": None,
"checkpoint": {
"thread_id": "thread_1",
"checkpoint_ns": "ns",
"checkpoint_id": "checkpoint_1",
"checkpoint_map": {},
},
"metadata": {},
"created_at": "timestamp",
"parent_checkpoint": None,
"tasks": [],
}
]
# call method / assertions
remote_pregel = RemoteGraph(
"test_graph_id",
client=mock_async_client,
)
config = {"configurable": {"thread_id": "thread1"}}
state_history_snapshot = []
async for state_snapshot in remote_pregel.aget_state_history(
config, filter=None, before=None, limit=None
):
state_history_snapshot.append(state_snapshot)
assert len(state_history_snapshot) == 1
assert state_history_snapshot[0] == StateSnapshot(
values={"messages": [{"type": "human", "content": "hello"}]},
next=(),
config={
"configurable": {
"thread_id": "thread_1",
"checkpoint_ns": "ns",
"checkpoint_id": "checkpoint_1",
"checkpoint_map": {},
}
},
metadata={},
created_at="timestamp",
parent_config=None,
tasks=(),
)
def test_update_state():
# set up test
mock_sync_client = MagicMock()
mock_sync_client.threads.update_state.return_value = {
"checkpoint": {
"thread_id": "thread_1",
"checkpoint_ns": "ns",
"checkpoint_id": "checkpoint_1",
"checkpoint_map": {},
}
}
# call method / assertions
remote_pregel = RemoteGraph(
"test_graph_id",
sync_client=mock_sync_client,
)
config = {"configurable": {"thread_id": "thread1"}}
response = remote_pregel.update_state(config, {"key": "value"})
assert response == {
"configurable": {
"thread_id": "thread_1",
"checkpoint_ns": "ns",
"checkpoint_id": "checkpoint_1",
"checkpoint_map": {},
}
}
@pytest.mark.anyio
async def test_aupdate_state():
# set up test
mock_async_client = AsyncMock()
mock_async_client.threads.update_state.return_value = {
"checkpoint": {
"thread_id": "thread_1",
"checkpoint_ns": "ns",
"checkpoint_id": "checkpoint_1",
"checkpoint_map": {},
}
}
# call method / assertions
remote_pregel = RemoteGraph(
"test_graph_id",
client=mock_async_client,
)
config = {"configurable": {"thread_id": "thread1"}}
response = await remote_pregel.aupdate_state(config, {"key": "value"})
assert response == {
"configurable": {
"thread_id": "thread_1",
"checkpoint_ns": "ns",
"checkpoint_id": "checkpoint_1",
"checkpoint_map": {},
}
}
def test_stream():
# set up test
mock_sync_client = MagicMock()
mock_sync_client.runs.stream.return_value = [
StreamPart(event="values", data={"chunk": "data1"}),
StreamPart(event="values", data={"chunk": "data2"}),
StreamPart(event="values", data={"chunk": "data3"}),
StreamPart(event="updates", data={"chunk": "data4"}),
StreamPart(event="updates", data={"__interrupt__": ()}),
]
# call method / assertions
remote_pregel = RemoteGraph(
"test_graph_id",
sync_client=mock_sync_client,
)
# stream modes doesn't include 'updates'
stream_parts = []
with pytest.raises(GraphInterrupt):
for stream_part in remote_pregel.stream(
{"input": "data"},
config={"configurable": {"thread_id": "thread_1"}},
stream_mode="values",
):
stream_parts.append(stream_part)
assert stream_parts == [
{"chunk": "data1"},
{"chunk": "data2"},
{"chunk": "data3"},
]
mock_sync_client.runs.stream.return_value = [
StreamPart(event="updates", data={"chunk": "data3"}),
StreamPart(event="updates", data={"chunk": "data4"}),
StreamPart(event="updates", data={"__interrupt__": ()}),
]
# default stream_mode is updates
stream_parts = []
with pytest.raises(GraphInterrupt):
for stream_part in remote_pregel.stream(
{"input": "data"},
config={"configurable": {"thread_id": "thread_1"}},
):
stream_parts.append(stream_part)
assert stream_parts == [
{"chunk": "data3"},
{"chunk": "data4"},
]
# list stream_mode includes mode names
stream_parts = []
with pytest.raises(GraphInterrupt):
for stream_part in remote_pregel.stream(
{"input": "data"},
config={"configurable": {"thread_id": "thread_1"}},
stream_mode=["updates"],
):
stream_parts.append(stream_part)
assert stream_parts == [
("updates", {"chunk": "data3"}),
("updates", {"chunk": "data4"}),
]
# subgraphs + list modes
stream_parts = []
with pytest.raises(GraphInterrupt):
for stream_part in remote_pregel.stream(
{"input": "data"},
config={"configurable": {"thread_id": "thread_1"}},
stream_mode=["updates"],
subgraphs=True,
):
stream_parts.append(stream_part)
assert stream_parts == [
((), "updates", {"chunk": "data3"}),
((), "updates", {"chunk": "data4"}),
]
# subgraphs + single mode
stream_parts = []
with pytest.raises(GraphInterrupt):
for stream_part in remote_pregel.stream(
{"input": "data"},
config={"configurable": {"thread_id": "thread_1"}},
subgraphs=True,
):
stream_parts.append(stream_part)
assert stream_parts == [
((), {"chunk": "data3"}),
((), {"chunk": "data4"}),
]
@pytest.mark.anyio
async def test_astream():
# set up test
mock_async_client = MagicMock()
async_iter = MagicMock()
async_iter.__aiter__.return_value = [
StreamPart(event="values", data={"chunk": "data1"}),
StreamPart(event="values", data={"chunk": "data2"}),
StreamPart(event="values", data={"chunk": "data3"}),
StreamPart(event="updates", data={"chunk": "data4"}),
StreamPart(event="updates", data={"__interrupt__": ()}),
]
mock_async_client.runs.stream.return_value = async_iter
# call method / assertions
remote_pregel = RemoteGraph(
"test_graph_id",
client=mock_async_client,
)
# stream modes doesn't include 'updates'
stream_parts = []
with pytest.raises(GraphInterrupt):
async for stream_part in remote_pregel.astream(
{"input": "data"},
config={"configurable": {"thread_id": "thread_1"}},
stream_mode="values",
):
stream_parts.append(stream_part)
assert stream_parts == [
{"chunk": "data1"},
{"chunk": "data2"},
{"chunk": "data3"},
]
async_iter = MagicMock()
async_iter.__aiter__.return_value = [
StreamPart(event="updates", data={"chunk": "data3"}),
StreamPart(event="updates", data={"chunk": "data4"}),
StreamPart(event="updates", data={"__interrupt__": ()}),
]
mock_async_client.runs.stream.return_value = async_iter
# default stream_mode is updates
stream_parts = []
with pytest.raises(GraphInterrupt):
async for stream_part in remote_pregel.astream(
{"input": "data"},
config={"configurable": {"thread_id": "thread_1"}},
):
stream_parts.append(stream_part)
assert stream_parts == [
{"chunk": "data3"},
{"chunk": "data4"},
]
# list stream_mode includes mode names
stream_parts = []
with pytest.raises(GraphInterrupt):
async for stream_part in remote_pregel.astream(
{"input": "data"},
config={"configurable": {"thread_id": "thread_1"}},
stream_mode=["updates"],
):
stream_parts.append(stream_part)
assert stream_parts == [
("updates", {"chunk": "data3"}),
("updates", {"chunk": "data4"}),
]
# subgraphs + list modes
stream_parts = []
with pytest.raises(GraphInterrupt):
async for stream_part in remote_pregel.astream(
{"input": "data"},
config={"configurable": {"thread_id": "thread_1"}},
stream_mode=["updates"],
subgraphs=True,
):
stream_parts.append(stream_part)
assert stream_parts == [
((), "updates", {"chunk": "data3"}),
((), "updates", {"chunk": "data4"}),
]
# subgraphs + single mode
stream_parts = []
with pytest.raises(GraphInterrupt):
async for stream_part in remote_pregel.astream(
{"input": "data"},
config={"configurable": {"thread_id": "thread_1"}},
subgraphs=True,
):
stream_parts.append(stream_part)
assert stream_parts == [
((), {"chunk": "data3"}),
((), {"chunk": "data4"}),
]
async_iter = MagicMock()
async_iter.__aiter__.return_value = [
StreamPart(event="updates|my|subgraph", data={"chunk": "data3"}),
StreamPart(event="updates|hello|subgraph", data={"chunk": "data4"}),
StreamPart(event="updates|bye|subgraph", data={"__interrupt__": ()}),
]
mock_async_client.runs.stream.return_value = async_iter
# subgraphs + list modes
stream_parts = []
with pytest.raises(GraphInterrupt):
async for stream_part in remote_pregel.astream(
{"input": "data"},
config={"configurable": {"thread_id": "thread_1"}},
stream_mode=["updates"],
subgraphs=True,
):
stream_parts.append(stream_part)
assert stream_parts == [
(("my", "subgraph"), "updates", {"chunk": "data3"}),
(("hello", "subgraph"), "updates", {"chunk": "data4"}),
]
# subgraphs + single mode
stream_parts = []
with pytest.raises(GraphInterrupt):
async for stream_part in remote_pregel.astream(
{"input": "data"},
config={"configurable": {"thread_id": "thread_1"}},
subgraphs=True,
):
stream_parts.append(stream_part)
assert stream_parts == [
(("my", "subgraph"), {"chunk": "data3"}),
(("hello", "subgraph"), {"chunk": "data4"}),
]
def test_invoke():
# set up test
mock_sync_client = MagicMock()
mock_sync_client.runs.stream.return_value = [
StreamPart(event="values", data={"chunk": "data1"}),
StreamPart(event="values", data={"chunk": "data2"}),
StreamPart(
event="values", data={"messages": [{"type": "human", "content": "world"}]}
),
]
# call method / assertions
remote_pregel = RemoteGraph(
"test_graph_id",
sync_client=mock_sync_client,
)
config = {"configurable": {"thread_id": "thread_1"}}
result = remote_pregel.invoke(
{"input": {"messages": [{"type": "human", "content": "hello"}]}}, config
)
assert result == {"messages": [{"type": "human", "content": "world"}]}
@pytest.mark.anyio
async def test_ainvoke():
# set up test
mock_async_client = MagicMock()
async_iter = MagicMock()
async_iter.__aiter__.return_value = [
StreamPart(event="values", data={"chunk": "data1"}),
StreamPart(event="values", data={"chunk": "data2"}),
StreamPart(
event="values", data={"messages": [{"type": "human", "content": "world"}]}
),
]
mock_async_client.runs.stream.return_value = async_iter
# call method / assertions
remote_pregel = RemoteGraph(
"test_graph_id",
client=mock_async_client,
)
config = {"configurable": {"thread_id": "thread_1"}}
result = await remote_pregel.ainvoke(
{"input": {"messages": [{"type": "human", "content": "hello"}]}}, config
)
assert result == {"messages": [{"type": "human", "content": "world"}]}
@pytest.mark.skip("Unskip this test to manually test the LangGraph Cloud integration")
@pytest.mark.anyio
async def test_langgraph_cloud_integration():
from langgraph_sdk.client import get_client, get_sync_client
from langgraph.checkpoint.memory import MemorySaver
from langgraph.graph import END, START, MessagesState, StateGraph
# create RemotePregel instance
client = get_client()
sync_client = get_sync_client()
remote_pregel = RemoteGraph(
"agent",
client=client,
sync_client=sync_client,
)
# define graph
workflow = StateGraph(MessagesState)
workflow.add_node("agent", remote_pregel)
workflow.add_edge(START, "agent")
workflow.add_edge("agent", END)
app = workflow.compile(checkpointer=MemorySaver())
# test invocation
input = {
"messages": [
{
"role": "human",
"content": "What's the weather in SF?",
}
]
}
# test invoke
response = app.invoke(
input,
config={"configurable": {"thread_id": "39a6104a-34e7-4f83-929c-d9eb163003c9"}},
interrupt_before=["agent"],
)
print("response:", response["messages"][-1].content)
# test stream
async for chunk in app.astream(
input,
config={"configurable": {"thread_id": "2dc3e3e7-39ac-4597-aa57-4404b944e82a"}},
subgraphs=True,
stream_mode=["debug", "messages"],
):
print("chunk:", chunk)
# test stream events
async for chunk in remote_pregel.astream_events(
input,
config={"configurable": {"thread_id": "2dc3e3e7-39ac-4597-aa57-4404b944e82a"}},
version="v2",
subgraphs=True,
stream_mode=[],
):
print("chunk:", chunk)
# test get state
state_snapshot = await remote_pregel.aget_state(
config={"configurable": {"thread_id": "2dc3e3e7-39ac-4597-aa57-4404b944e82a"}},
subgraphs=True,
)
print("state snapshot:", state_snapshot)
# test update state
response = await remote_pregel.aupdate_state(
config={"configurable": {"thread_id": "6645e002-ed50-4022-92a3-d0d186fdf812"}},
values={
"messages": [
{
"role": "ai",
"content": "Hello world again!",
}
]
},
)
print("response:", response)
# test get history
async for state in remote_pregel.aget_state_history(
config={"configurable": {"thread_id": "2dc3e3e7-39ac-4597-aa57-4404b944e82a"}},
):
print("state snapshot:", state)
# test get graph
remote_pregel.graph_id = "fe096781-5601-53d2-b2f6-0d3403f7e9ca" # must be UUID
graph = await remote_pregel.aget_graph(xray=True)
print("graph:", graph)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.