index int64 0 0 | repo_id stringclasses 596 values | file_path stringlengths 31 168 | content stringlengths 1 6.2M |
|---|---|---|---|
0 | lc_public_repos/langsmith-sdk/python/tests | lc_public_repos/langsmith-sdk/python/tests/integration_tests/fake_server.py | from fastapi import FastAPI, Request
from langsmith import traceable
from langsmith.middleware import TracingMiddleware
from langsmith.run_helpers import get_current_run_tree, trace, tracing_context
fake_app = FastAPI()
fake_app.add_middleware(TracingMiddleware)
@traceable
def fake_function():
span = get_current_run_tree()
assert span is not None
parent_run = span.parent_run
assert parent_run is not None
assert "did-propagate" in span.tags or []
assert span.metadata["some-cool-value"] == 42
assert span.session_name == "distributed-tracing"
return "Fake function response"
@traceable
def fake_function_two(foo: str):
span = get_current_run_tree()
assert span is not None
parent_run = span.parent_run
assert parent_run is not None
assert "did-propagate" in (span.tags or [])
assert span.metadata["some-cool-value"] == 42
assert span.session_name == "distributed-tracing"
return "Fake function response"
@traceable
def fake_function_three(foo: str):
span = get_current_run_tree()
assert span is not None
parent_run = span.parent_run
assert parent_run is not None
assert "did-propagate" in (span.tags or [])
assert span.metadata["some-cool-value"] == 42
assert span.session_name == "distributed-tracing"
return "Fake function response"
@fake_app.post("/fake-route")
async def fake_route(request: Request):
with trace(
"Trace",
project_name="Definitely-not-your-grandpas-project",
):
fake_function()
fake_function_two(
"foo",
langsmith_extra={
"project_name": "Definitely-not-your-grandpas-project",
},
)
with tracing_context(
parent=request.headers, project_name="Definitely-not-your-grandpas-project"
):
fake_function_three("foo")
return {"message": "Fake route response"}
|
0 | lc_public_repos/langsmith-sdk/python/tests/integration_tests | lc_public_repos/langsmith-sdk/python/tests/integration_tests/wrappers/test_openai.py | # mypy: disable-error-code="attr-defined, union-attr, arg-type, call-overload"
import json
import os
import time
from pathlib import Path
from typing import Any
from unittest import mock
import pytest
import langsmith
from langsmith.wrappers import wrap_openai
@pytest.mark.parametrize("stream", [False, True])
def test_chat_sync_api(stream: bool):
import openai # noqa
mock_session = mock.MagicMock()
client = langsmith.Client(session=mock_session)
original_client = openai.Client()
patched_client = wrap_openai(openai.Client(), tracing_extra={"client": client})
messages = [{"role": "user", "content": "Say 'foo'"}]
original = original_client.chat.completions.create(
messages=messages, # noqa: [arg-type]
stream=stream,
temperature=0,
seed=42,
model="gpt-3.5-turbo",
)
patched = patched_client.chat.completions.create(
messages=messages, # noqa: [arg-type]
stream=stream,
temperature=0,
seed=42,
model="gpt-3.5-turbo",
)
if stream:
# We currently return a generator, so
# the types aren't the same.
original_chunks = list(original)
patched_chunks = list(patched)
assert len(original_chunks) == len(patched_chunks)
assert [o.choices == p.choices for o, p in zip(original_chunks, patched_chunks)]
else:
assert type(original) is type(patched)
assert original.choices == patched.choices
# Give the thread a chance.
time.sleep(0.01)
for call in mock_session.request.call_args_list[1:]:
assert call[0][0].upper() == "POST"
@pytest.mark.parametrize("stream", [False, True])
async def test_chat_async_api(stream: bool):
import openai # noqa
mock_session = mock.MagicMock()
client = langsmith.Client(session=mock_session)
original_client = openai.AsyncClient()
patched_client = wrap_openai(openai.AsyncClient(), tracing_extra={"client": client})
messages = [{"role": "user", "content": "Say 'foo'"}]
original = await original_client.chat.completions.create(
messages=messages, stream=stream, temperature=0, seed=42, model="gpt-3.5-turbo"
)
patched = await patched_client.chat.completions.create(
messages=messages, stream=stream, temperature=0, seed=42, model="gpt-3.5-turbo"
)
if stream:
# We currently return a generator, so
# the types aren't the same.
original_chunks = []
async for chunk in original:
original_chunks.append(chunk)
patched_chunks = []
async for chunk in patched:
patched_chunks.append(chunk)
assert len(original_chunks) == len(patched_chunks)
assert [o.choices == p.choices for o, p in zip(original_chunks, patched_chunks)]
else:
assert type(original) is type(patched)
assert original.choices == patched.choices
# Give the thread a chance.
time.sleep(0.1)
for call in mock_session.request.call_args_list[1:]:
assert call[0][0].upper() == "POST"
@pytest.mark.parametrize("stream", [False, True])
def test_completions_sync_api(stream: bool):
import openai
mock_session = mock.MagicMock()
client = langsmith.Client(session=mock_session)
original_client = openai.Client()
patched_client = wrap_openai(openai.Client(), tracing_extra={"client": client})
prompt = ("Say 'Foo' then stop.",)
original = original_client.completions.create(
model="gpt-3.5-turbo-instruct",
prompt=prompt,
max_tokens=3,
temperature=0,
seed=42,
stream=stream,
)
patched = patched_client.completions.create(
model="gpt-3.5-turbo-instruct",
prompt=prompt,
max_tokens=3,
temperature=0,
seed=42,
stream=stream,
)
if stream:
# We currently return a generator, so
# the types aren't the same.
original_chunks = list(original)
patched_chunks = list(patched)
assert len(original_chunks) == len(patched_chunks)
assert [o.choices == p.choices for o, p in zip(original_chunks, patched_chunks)]
assert original.response
assert patched.response
else:
assert type(original) is type(patched)
assert original.choices == patched.choices
# Give the thread a chance.
time.sleep(0.1)
for call in mock_session.request.call_args_list[1:]:
assert call[0][0].upper() == "POST"
@pytest.mark.parametrize("stream", [False, True])
async def test_completions_async_api(stream: bool):
import openai
mock_session = mock.MagicMock()
client = langsmith.Client(session=mock_session)
original_client = openai.AsyncClient()
patched_client = wrap_openai(
openai.AsyncClient(),
tracing_extra={"client": client},
chat_name="chattychat",
completions_name="incompletions",
)
prompt = ("Say 'Hi i'm ChatGPT' then stop.",)
original = await original_client.completions.create(
model="gpt-3.5-turbo-instruct",
prompt=prompt,
max_tokens=5,
temperature=0,
seed=42,
stream=stream,
)
patched = await patched_client.completions.create(
model="gpt-3.5-turbo-instruct",
prompt=prompt,
max_tokens=5,
temperature=0,
seed=42,
stream=stream,
)
if stream:
# We currently return a generator, so
# the types aren't the same.
original_chunks = []
async for chunk in original:
original_chunks.append(chunk)
patched_chunks = []
async for chunk in patched:
patched_chunks.append(chunk)
assert len(original_chunks) == len(patched_chunks)
assert [o.choices == p.choices for o, p in zip(original_chunks, patched_chunks)]
assert original.response
assert patched.response
else:
assert type(original) is type(patched)
assert original.choices == patched.choices
# Give the thread a chance.
for _ in range(10):
time.sleep(0.1)
if mock_session.request.call_count >= 1:
break
assert mock_session.request.call_count >= 1
for call in mock_session.request.call_args_list[1:]:
assert call[0][0].upper() == "POST"
class Collect:
"""
Collects the runs for inspection.
"""
def __init__(self):
self.run = None
def __call__(self, run):
self.run = run
def _collect_requests(mock_session: mock.MagicMock, filename: str):
mock_requests = mock_session.request.call_args_list
collected_requests = {}
for _ in range(10):
time.sleep(0.1)
for call in mock_requests:
if json_bytes := call.kwargs.get("data"):
json_str = json_bytes.decode("utf-8")
collected_requests.update(json.loads(json_str))
all_events = [
*collected_requests.get("post", []),
*collected_requests.get("patch", []),
]
# if end_time has been set, we can stop collecting as the background
# thread has finished processing the run
if any(event.get("end_time") for event in all_events):
break
mock_session.request.call_args_list.clear()
if os.environ.get("WRITE_TOKEN_COUNTING_TEST_DATA") == "1":
dir_path = Path(__file__).resolve().parent.parent / "test_data"
file_path = dir_path / f"{filename}.json"
with open(file_path, "w") as f:
json.dump(collected_requests, f, indent=2)
test_cases = [
{
"description": "stream",
"params": {
"model": "gpt-4o-mini",
"messages": [{"role": "user", "content": "howdy"}],
"stream": True,
"stream_options": {"include_usage": True},
},
"expect_usage_metadata": True,
},
{
"description": "stream no usage",
"params": {
"model": "gpt-4o-mini",
"messages": [{"role": "user", "content": "howdy"}],
"stream": True,
},
"expect_usage_metadata": False,
},
{
"description": "",
"params": {
"model": "gpt-4o-mini",
"messages": [{"role": "user", "content": "howdy"}],
},
"expect_usage_metadata": True,
},
{
"description": "reasoning",
"params": {
"model": "o1-mini",
"messages": [
{
"role": "user",
"content": (
"Write a bash script that takes a matrix represented "
"as a string with format '[1,2],[3,4],[5,6]' and prints the "
"transpose in the same format."
),
}
],
},
"expect_usage_metadata": True,
"check_reasoning_tokens": True,
},
]
@pytest.mark.parametrize("test_case", test_cases)
def test_wrap_openai_chat_tokens(test_case):
import openai
from openai.types.chat import ChatCompletion, ChatCompletionChunk
oai_client = openai.Client()
mock_session = mock.MagicMock()
ls_client = langsmith.Client(session=mock_session)
wrapped_oai_client = wrap_openai(oai_client, tracing_extra={"client": ls_client})
collect = Collect()
run_id_to_usage_metadata = {}
with langsmith.tracing_context(enabled=True):
params: dict[str, Any] = test_case["params"].copy()
params["langsmith_extra"] = {"on_end": collect}
res = wrapped_oai_client.chat.completions.create(**params)
if params.get("stream"):
for chunk in res:
assert isinstance(chunk, ChatCompletionChunk)
if test_case.get("expect_usage_metadata") and hasattr(chunk, "usage"):
oai_usage = chunk.usage
else:
assert isinstance(res, ChatCompletion)
oai_usage = res.usage
if test_case["expect_usage_metadata"]:
usage_metadata = collect.run.outputs["usage_metadata"]
assert usage_metadata["input_tokens"] == oai_usage.prompt_tokens
assert usage_metadata["output_tokens"] == oai_usage.completion_tokens
assert usage_metadata["total_tokens"] == oai_usage.total_tokens
if test_case.get("check_reasoning_tokens"):
assert (
usage_metadata["output_token_details"]["reasoning"]
== oai_usage.completion_tokens_details.reasoning_tokens
)
else:
assert collect.run.outputs.get("usage_metadata") is None
assert collect.run.outputs.get("usage") is None
run_id_to_usage_metadata[collect.run.id] = collect.run.outputs.get(
"usage_metadata"
)
filename = f"langsmith_py_wrap_openai_{test_case['description'].replace(' ', '_')}"
_collect_requests(mock_session, filename)
@pytest.mark.asyncio
@pytest.mark.parametrize("test_case", test_cases)
async def test_wrap_openai_chat_async_tokens(test_case):
import openai
from openai.types.chat import ChatCompletion, ChatCompletionChunk
oai_client = openai.AsyncClient()
mock_session = mock.MagicMock()
ls_client = langsmith.Client(session=mock_session)
wrapped_oai_client = wrap_openai(oai_client, tracing_extra={"client": ls_client})
collect = Collect()
run_id_to_usage_metadata = {}
with langsmith.tracing_context(enabled=True):
params: dict[str, Any] = test_case["params"].copy()
params["langsmith_extra"] = {"on_end": collect}
res = await wrapped_oai_client.chat.completions.create(**params)
if params.get("stream"):
oai_usage = None
async for chunk in res:
assert isinstance(chunk, ChatCompletionChunk)
if test_case.get("expect_usage_metadata") and hasattr(chunk, "usage"):
oai_usage = chunk.usage
else:
assert isinstance(res, ChatCompletion)
oai_usage = res.usage
if test_case["expect_usage_metadata"]:
usage_metadata = collect.run.outputs["usage_metadata"]
assert usage_metadata["input_tokens"] == oai_usage.prompt_tokens
assert usage_metadata["output_tokens"] == oai_usage.completion_tokens
assert usage_metadata["total_tokens"] == oai_usage.total_tokens
if test_case.get("check_reasoning_tokens"):
assert (
usage_metadata["output_token_details"]["reasoning"]
== oai_usage.completion_tokens_details.reasoning_tokens
)
else:
assert collect.run.outputs.get("usage_metadata") is None
assert collect.run.outputs.get("usage") is None
run_id_to_usage_metadata[collect.run.id] = collect.run.outputs.get(
"usage_metadata"
)
filename = f"langsmith_py_wrap_openai_{test_case['description'].replace(' ', '_')}"
_collect_requests(mock_session, filename)
|
0 | lc_public_repos/langsmith-sdk/python/tests/integration_tests | lc_public_repos/langsmith-sdk/python/tests/integration_tests/test_data/langsmith_py_wrap_openai_stream_no_usage.json | {
"post": [
{
"id": "de56b9f0-eed2-4195-8786-c6dc0fa897e3",
"start_time": "2024-10-11T20:58:22.254895+00:00",
"extra": {
"metadata": {
"ls_method": "traceable",
"ls_provider": "openai",
"ls_model_type": "chat",
"ls_model_name": "gpt-4o-mini",
"revision_id": "v0.1.82-381-g03d9e1a-dirty"
},
"runtime": {
"sdk": "langsmith-py",
"sdk_version": "0.1.131",
"library": "langsmith",
"platform": "macOS-13.2-arm64-arm-64bit",
"runtime": "python",
"py_implementation": "CPython",
"runtime_version": "3.11.7",
"langchain_version": "0.2.9",
"langchain_core_version": "0.2.21"
}
},
"serialized": {
"name": "ChatOpenAI",
"signature": "(*, messages: 'Iterable[ChatCompletionMessageParam]', model: 'Union[str, ChatModel]', frequency_penalty: 'Optional[float] | NotGiven' = NOT_GIVEN, function_call: 'completion_create_params.FunctionCall | NotGiven' = NOT_GIVEN, functions: 'Iterable[completion_create_params.Function] | NotGiven' = NOT_GIVEN, logit_bias: 'Optional[Dict[str, int]] | NotGiven' = NOT_GIVEN, logprobs: 'Optional[bool] | NotGiven' = NOT_GIVEN, max_completion_tokens: 'Optional[int] | NotGiven' = NOT_GIVEN, max_tokens: 'Optional[int] | NotGiven' = NOT_GIVEN, n: 'Optional[int] | NotGiven' = NOT_GIVEN, parallel_tool_calls: 'bool | NotGiven' = NOT_GIVEN, presence_penalty: 'Optional[float] | NotGiven' = NOT_GIVEN, response_format: 'completion_create_params.ResponseFormat | NotGiven' = NOT_GIVEN, seed: 'Optional[int] | NotGiven' = NOT_GIVEN, service_tier: \"Optional[Literal['auto', 'default']] | NotGiven\" = NOT_GIVEN, stop: 'Union[Optional[str], List[str]] | NotGiven' = NOT_GIVEN, stream: 'Optional[Literal[False]] | Literal[True] | NotGiven' = NOT_GIVEN, stream_options: 'Optional[ChatCompletionStreamOptionsParam] | NotGiven' = NOT_GIVEN, temperature: 'Optional[float] | NotGiven' = NOT_GIVEN, tool_choice: 'ChatCompletionToolChoiceOptionParam | NotGiven' = NOT_GIVEN, tools: 'Iterable[ChatCompletionToolParam] | NotGiven' = NOT_GIVEN, top_logprobs: 'Optional[int] | NotGiven' = NOT_GIVEN, top_p: 'Optional[float] | NotGiven' = NOT_GIVEN, user: 'str | NotGiven' = NOT_GIVEN, extra_headers: 'Headers | None' = None, extra_query: 'Query | None' = None, extra_body: 'Body | None' = None, timeout: 'float | httpx.Timeout | None | NotGiven' = NOT_GIVEN) -> 'ChatCompletion | AsyncStream[ChatCompletionChunk]'",
"doc": null
},
"events": [],
"tags": [],
"attachments": {},
"dotted_order": "20241011T205822254895Zde56b9f0-eed2-4195-8786-c6dc0fa897e3",
"trace_id": "de56b9f0-eed2-4195-8786-c6dc0fa897e3",
"outputs": {},
"session_name": "default",
"name": "ChatOpenAI",
"inputs": {
"messages": [
{
"role": "user",
"content": "howdy"
}
],
"model": "gpt-4o-mini",
"stream": true,
"extra_headers": null,
"extra_query": null,
"extra_body": null
},
"run_type": "llm"
}
],
"patch": [
{
"id": "de56b9f0-eed2-4195-8786-c6dc0fa897e3",
"name": "ChatOpenAI",
"trace_id": "de56b9f0-eed2-4195-8786-c6dc0fa897e3",
"parent_run_id": null,
"dotted_order": "20241011T205822254895Zde56b9f0-eed2-4195-8786-c6dc0fa897e3",
"tags": [],
"extra": {
"metadata": {
"ls_method": "traceable",
"ls_provider": "openai",
"ls_model_type": "chat",
"ls_model_name": "gpt-4o-mini",
"revision_id": "v0.1.82-381-g03d9e1a-dirty"
},
"runtime": {
"sdk": "langsmith-py",
"sdk_version": "0.1.131",
"library": "langsmith",
"platform": "macOS-13.2-arm64-arm-64bit",
"runtime": "python",
"py_implementation": "CPython",
"runtime_version": "3.11.7",
"langchain_version": "0.2.9",
"langchain_core_version": "0.2.21"
}
},
"end_time": "2024-10-11T20:58:23.181899+00:00",
"outputs": {
"id": "chatcmpl-AHH0Ik2ZQY05uutXjxSaS6C3nvYfy",
"choices": [
{
"index": 0,
"finish_reason": "stop",
"message": {
"role": "assistant",
"content": "Howdy! How can I assist you today?"
}
}
],
"created": 1728680302,
"model": "gpt-4o-mini-2024-07-18",
"object": "chat.completion.chunk",
"service_tier": null,
"system_fingerprint": "fp_e2bde53e6e",
"usage_metadata": null
},
"events": [
{
"name": "new_token",
"time": "2024-10-11T20:58:23.044675+00:00",
"kwargs": {
"token": {
"id": "chatcmpl-AHH0Ik2ZQY05uutXjxSaS6C3nvYfy",
"choices": [
{
"delta": {
"content": "",
"role": "assistant"
},
"index": 0
}
],
"created": 1728680302,
"model": "gpt-4o-mini-2024-07-18",
"object": "chat.completion.chunk",
"system_fingerprint": "fp_e2bde53e6e"
}
}
},
{
"name": "new_token",
"time": "2024-10-11T20:58:23.045159+00:00",
"kwargs": {
"token": {
"id": "chatcmpl-AHH0Ik2ZQY05uutXjxSaS6C3nvYfy",
"choices": [
{
"delta": {
"content": "Howdy"
},
"index": 0
}
],
"created": 1728680302,
"model": "gpt-4o-mini-2024-07-18",
"object": "chat.completion.chunk",
"system_fingerprint": "fp_e2bde53e6e"
}
}
},
{
"name": "new_token",
"time": "2024-10-11T20:58:23.076141+00:00",
"kwargs": {
"token": {
"id": "chatcmpl-AHH0Ik2ZQY05uutXjxSaS6C3nvYfy",
"choices": [
{
"delta": {
"content": "!"
},
"index": 0
}
],
"created": 1728680302,
"model": "gpt-4o-mini-2024-07-18",
"object": "chat.completion.chunk",
"system_fingerprint": "fp_e2bde53e6e"
}
}
},
{
"name": "new_token",
"time": "2024-10-11T20:58:23.076801+00:00",
"kwargs": {
"token": {
"id": "chatcmpl-AHH0Ik2ZQY05uutXjxSaS6C3nvYfy",
"choices": [
{
"delta": {
"content": " How"
},
"index": 0
}
],
"created": 1728680302,
"model": "gpt-4o-mini-2024-07-18",
"object": "chat.completion.chunk",
"system_fingerprint": "fp_e2bde53e6e"
}
}
},
{
"name": "new_token",
"time": "2024-10-11T20:58:23.103700+00:00",
"kwargs": {
"token": {
"id": "chatcmpl-AHH0Ik2ZQY05uutXjxSaS6C3nvYfy",
"choices": [
{
"delta": {
"content": " can"
},
"index": 0
}
],
"created": 1728680302,
"model": "gpt-4o-mini-2024-07-18",
"object": "chat.completion.chunk",
"system_fingerprint": "fp_e2bde53e6e"
}
}
},
{
"name": "new_token",
"time": "2024-10-11T20:58:23.104351+00:00",
"kwargs": {
"token": {
"id": "chatcmpl-AHH0Ik2ZQY05uutXjxSaS6C3nvYfy",
"choices": [
{
"delta": {
"content": " I"
},
"index": 0
}
],
"created": 1728680302,
"model": "gpt-4o-mini-2024-07-18",
"object": "chat.completion.chunk",
"system_fingerprint": "fp_e2bde53e6e"
}
}
},
{
"name": "new_token",
"time": "2024-10-11T20:58:23.129299+00:00",
"kwargs": {
"token": {
"id": "chatcmpl-AHH0Ik2ZQY05uutXjxSaS6C3nvYfy",
"choices": [
{
"delta": {
"content": " assist"
},
"index": 0
}
],
"created": 1728680302,
"model": "gpt-4o-mini-2024-07-18",
"object": "chat.completion.chunk",
"system_fingerprint": "fp_e2bde53e6e"
}
}
},
{
"name": "new_token",
"time": "2024-10-11T20:58:23.129883+00:00",
"kwargs": {
"token": {
"id": "chatcmpl-AHH0Ik2ZQY05uutXjxSaS6C3nvYfy",
"choices": [
{
"delta": {
"content": " you"
},
"index": 0
}
],
"created": 1728680302,
"model": "gpt-4o-mini-2024-07-18",
"object": "chat.completion.chunk",
"system_fingerprint": "fp_e2bde53e6e"
}
}
},
{
"name": "new_token",
"time": "2024-10-11T20:58:23.179545+00:00",
"kwargs": {
"token": {
"id": "chatcmpl-AHH0Ik2ZQY05uutXjxSaS6C3nvYfy",
"choices": [
{
"delta": {
"content": " today"
},
"index": 0
}
],
"created": 1728680302,
"model": "gpt-4o-mini-2024-07-18",
"object": "chat.completion.chunk",
"system_fingerprint": "fp_e2bde53e6e"
}
}
},
{
"name": "new_token",
"time": "2024-10-11T20:58:23.180217+00:00",
"kwargs": {
"token": {
"id": "chatcmpl-AHH0Ik2ZQY05uutXjxSaS6C3nvYfy",
"choices": [
{
"delta": {
"content": "?"
},
"index": 0
}
],
"created": 1728680302,
"model": "gpt-4o-mini-2024-07-18",
"object": "chat.completion.chunk",
"system_fingerprint": "fp_e2bde53e6e"
}
}
},
{
"name": "new_token",
"time": "2024-10-11T20:58:23.180931+00:00",
"kwargs": {
"token": {
"id": "chatcmpl-AHH0Ik2ZQY05uutXjxSaS6C3nvYfy",
"choices": [
{
"delta": {},
"finish_reason": "stop",
"index": 0
}
],
"created": 1728680302,
"model": "gpt-4o-mini-2024-07-18",
"object": "chat.completion.chunk",
"system_fingerprint": "fp_e2bde53e6e"
}
}
}
]
}
]
} |
0 | lc_public_repos/langsmith-sdk/python/tests/integration_tests | lc_public_repos/langsmith-sdk/python/tests/integration_tests/test_data/langsmith_py_wrap_openai_stream.json | {
"post": [
{
"id": "fe8ffecb-72ce-4cd2-bdb7-01f34654c391",
"start_time": "2024-10-11T20:58:20.695375+00:00",
"extra": {
"metadata": {
"ls_method": "traceable",
"ls_provider": "openai",
"ls_model_type": "chat",
"ls_model_name": "gpt-4o-mini",
"revision_id": "v0.1.82-381-g03d9e1a-dirty"
},
"runtime": {
"sdk": "langsmith-py",
"sdk_version": "0.1.131",
"library": "langsmith",
"platform": "macOS-13.2-arm64-arm-64bit",
"runtime": "python",
"py_implementation": "CPython",
"runtime_version": "3.11.7",
"langchain_version": "0.2.9",
"langchain_core_version": "0.2.21"
}
},
"serialized": {
"name": "ChatOpenAI",
"signature": "(*, messages: 'Iterable[ChatCompletionMessageParam]', model: 'Union[str, ChatModel]', frequency_penalty: 'Optional[float] | NotGiven' = NOT_GIVEN, function_call: 'completion_create_params.FunctionCall | NotGiven' = NOT_GIVEN, functions: 'Iterable[completion_create_params.Function] | NotGiven' = NOT_GIVEN, logit_bias: 'Optional[Dict[str, int]] | NotGiven' = NOT_GIVEN, logprobs: 'Optional[bool] | NotGiven' = NOT_GIVEN, max_completion_tokens: 'Optional[int] | NotGiven' = NOT_GIVEN, max_tokens: 'Optional[int] | NotGiven' = NOT_GIVEN, n: 'Optional[int] | NotGiven' = NOT_GIVEN, parallel_tool_calls: 'bool | NotGiven' = NOT_GIVEN, presence_penalty: 'Optional[float] | NotGiven' = NOT_GIVEN, response_format: 'completion_create_params.ResponseFormat | NotGiven' = NOT_GIVEN, seed: 'Optional[int] | NotGiven' = NOT_GIVEN, service_tier: \"Optional[Literal['auto', 'default']] | NotGiven\" = NOT_GIVEN, stop: 'Union[Optional[str], List[str]] | NotGiven' = NOT_GIVEN, stream: 'Optional[Literal[False]] | Literal[True] | NotGiven' = NOT_GIVEN, stream_options: 'Optional[ChatCompletionStreamOptionsParam] | NotGiven' = NOT_GIVEN, temperature: 'Optional[float] | NotGiven' = NOT_GIVEN, tool_choice: 'ChatCompletionToolChoiceOptionParam | NotGiven' = NOT_GIVEN, tools: 'Iterable[ChatCompletionToolParam] | NotGiven' = NOT_GIVEN, top_logprobs: 'Optional[int] | NotGiven' = NOT_GIVEN, top_p: 'Optional[float] | NotGiven' = NOT_GIVEN, user: 'str | NotGiven' = NOT_GIVEN, extra_headers: 'Headers | None' = None, extra_query: 'Query | None' = None, extra_body: 'Body | None' = None, timeout: 'float | httpx.Timeout | None | NotGiven' = NOT_GIVEN) -> 'ChatCompletion | AsyncStream[ChatCompletionChunk]'",
"doc": null
},
"events": [],
"tags": [],
"attachments": {},
"dotted_order": "20241011T205820695375Zfe8ffecb-72ce-4cd2-bdb7-01f34654c391",
"trace_id": "fe8ffecb-72ce-4cd2-bdb7-01f34654c391",
"outputs": {},
"session_name": "default",
"name": "ChatOpenAI",
"inputs": {
"messages": [
{
"role": "user",
"content": "howdy"
}
],
"model": "gpt-4o-mini",
"stream": true,
"stream_options": {
"include_usage": true
},
"extra_headers": null,
"extra_query": null,
"extra_body": null
},
"run_type": "llm"
}
],
"patch": [
{
"id": "fe8ffecb-72ce-4cd2-bdb7-01f34654c391",
"name": "ChatOpenAI",
"trace_id": "fe8ffecb-72ce-4cd2-bdb7-01f34654c391",
"parent_run_id": null,
"dotted_order": "20241011T205820695375Zfe8ffecb-72ce-4cd2-bdb7-01f34654c391",
"tags": [],
"extra": {
"metadata": {
"ls_method": "traceable",
"ls_provider": "openai",
"ls_model_type": "chat",
"ls_model_name": "gpt-4o-mini",
"revision_id": "v0.1.82-381-g03d9e1a-dirty"
},
"runtime": {
"sdk": "langsmith-py",
"sdk_version": "0.1.131",
"library": "langsmith",
"platform": "macOS-13.2-arm64-arm-64bit",
"runtime": "python",
"py_implementation": "CPython",
"runtime_version": "3.11.7",
"langchain_version": "0.2.9",
"langchain_core_version": "0.2.21"
}
},
"end_time": "2024-10-11T20:58:22.023816+00:00",
"outputs": {
"id": "chatcmpl-AHH0HKxF2K5Rnu1DJ51k9CPTcerd1",
"choices": [
{
"index": 0,
"finish_reason": "stop",
"message": {
"role": "assistant",
"content": "Howdy! How can I assist you today?"
}
}
],
"created": 1728680301,
"model": "gpt-4o-mini-2024-07-18",
"object": "chat.completion.chunk",
"service_tier": null,
"system_fingerprint": "fp_8552ec53e1",
"usage_metadata": {
"input_tokens": 9,
"output_tokens": 9,
"total_tokens": 18,
"input_token_details": {
"cache_read": 0
},
"output_token_details": {
"reasoning": 0
}
}
},
"events": [
{
"name": "new_token",
"time": "2024-10-11T20:58:21.933794+00:00",
"kwargs": {
"token": {
"id": "chatcmpl-AHH0HKxF2K5Rnu1DJ51k9CPTcerd1",
"choices": [
{
"delta": {
"content": "",
"role": "assistant"
},
"index": 0
}
],
"created": 1728680301,
"model": "gpt-4o-mini-2024-07-18",
"object": "chat.completion.chunk",
"system_fingerprint": "fp_8552ec53e1"
}
}
},
{
"name": "new_token",
"time": "2024-10-11T20:58:21.934186+00:00",
"kwargs": {
"token": {
"id": "chatcmpl-AHH0HKxF2K5Rnu1DJ51k9CPTcerd1",
"choices": [
{
"delta": {
"content": "Howdy"
},
"index": 0
}
],
"created": 1728680301,
"model": "gpt-4o-mini-2024-07-18",
"object": "chat.completion.chunk",
"system_fingerprint": "fp_8552ec53e1"
}
}
},
{
"name": "new_token",
"time": "2024-10-11T20:58:21.955034+00:00",
"kwargs": {
"token": {
"id": "chatcmpl-AHH0HKxF2K5Rnu1DJ51k9CPTcerd1",
"choices": [
{
"delta": {
"content": "!"
},
"index": 0
}
],
"created": 1728680301,
"model": "gpt-4o-mini-2024-07-18",
"object": "chat.completion.chunk",
"system_fingerprint": "fp_8552ec53e1"
}
}
},
{
"name": "new_token",
"time": "2024-10-11T20:58:21.955547+00:00",
"kwargs": {
"token": {
"id": "chatcmpl-AHH0HKxF2K5Rnu1DJ51k9CPTcerd1",
"choices": [
{
"delta": {
"content": " How"
},
"index": 0
}
],
"created": 1728680301,
"model": "gpt-4o-mini-2024-07-18",
"object": "chat.completion.chunk",
"system_fingerprint": "fp_8552ec53e1"
}
}
},
{
"name": "new_token",
"time": "2024-10-11T20:58:22.005714+00:00",
"kwargs": {
"token": {
"id": "chatcmpl-AHH0HKxF2K5Rnu1DJ51k9CPTcerd1",
"choices": [
{
"delta": {
"content": " can"
},
"index": 0
}
],
"created": 1728680301,
"model": "gpt-4o-mini-2024-07-18",
"object": "chat.completion.chunk",
"system_fingerprint": "fp_8552ec53e1"
}
}
},
{
"name": "new_token",
"time": "2024-10-11T20:58:22.007009+00:00",
"kwargs": {
"token": {
"id": "chatcmpl-AHH0HKxF2K5Rnu1DJ51k9CPTcerd1",
"choices": [
{
"delta": {
"content": " I"
},
"index": 0
}
],
"created": 1728680301,
"model": "gpt-4o-mini-2024-07-18",
"object": "chat.completion.chunk",
"system_fingerprint": "fp_8552ec53e1"
}
}
},
{
"name": "new_token",
"time": "2024-10-11T20:58:22.008457+00:00",
"kwargs": {
"token": {
"id": "chatcmpl-AHH0HKxF2K5Rnu1DJ51k9CPTcerd1",
"choices": [
{
"delta": {
"content": " assist"
},
"index": 0
}
],
"created": 1728680301,
"model": "gpt-4o-mini-2024-07-18",
"object": "chat.completion.chunk",
"system_fingerprint": "fp_8552ec53e1"
}
}
},
{
"name": "new_token",
"time": "2024-10-11T20:58:22.008855+00:00",
"kwargs": {
"token": {
"id": "chatcmpl-AHH0HKxF2K5Rnu1DJ51k9CPTcerd1",
"choices": [
{
"delta": {
"content": " you"
},
"index": 0
}
],
"created": 1728680301,
"model": "gpt-4o-mini-2024-07-18",
"object": "chat.completion.chunk",
"system_fingerprint": "fp_8552ec53e1"
}
}
},
{
"name": "new_token",
"time": "2024-10-11T20:58:22.010922+00:00",
"kwargs": {
"token": {
"id": "chatcmpl-AHH0HKxF2K5Rnu1DJ51k9CPTcerd1",
"choices": [
{
"delta": {
"content": " today"
},
"index": 0
}
],
"created": 1728680301,
"model": "gpt-4o-mini-2024-07-18",
"object": "chat.completion.chunk",
"system_fingerprint": "fp_8552ec53e1"
}
}
},
{
"name": "new_token",
"time": "2024-10-11T20:58:22.011337+00:00",
"kwargs": {
"token": {
"id": "chatcmpl-AHH0HKxF2K5Rnu1DJ51k9CPTcerd1",
"choices": [
{
"delta": {
"content": "?"
},
"index": 0
}
],
"created": 1728680301,
"model": "gpt-4o-mini-2024-07-18",
"object": "chat.completion.chunk",
"system_fingerprint": "fp_8552ec53e1"
}
}
},
{
"name": "new_token",
"time": "2024-10-11T20:58:22.012554+00:00",
"kwargs": {
"token": {
"id": "chatcmpl-AHH0HKxF2K5Rnu1DJ51k9CPTcerd1",
"choices": [
{
"delta": {},
"finish_reason": "stop",
"index": 0
}
],
"created": 1728680301,
"model": "gpt-4o-mini-2024-07-18",
"object": "chat.completion.chunk",
"system_fingerprint": "fp_8552ec53e1"
}
}
},
{
"name": "new_token",
"time": "2024-10-11T20:58:22.015478+00:00",
"kwargs": {
"token": {
"id": "chatcmpl-AHH0HKxF2K5Rnu1DJ51k9CPTcerd1",
"choices": [],
"created": 1728680301,
"model": "gpt-4o-mini-2024-07-18",
"object": "chat.completion.chunk",
"system_fingerprint": "fp_8552ec53e1",
"usage": {
"completion_tokens": 9,
"prompt_tokens": 9,
"total_tokens": 18,
"completion_tokens_details": {
"reasoning_tokens": 0
},
"prompt_tokens_details": {
"cached_tokens": 0
}
}
}
}
}
]
}
]
} |
0 | lc_public_repos/langsmith-sdk/python/tests/integration_tests | lc_public_repos/langsmith-sdk/python/tests/integration_tests/test_data/langsmith_py_wrap_openai_.json | {
"post": [
{
"id": "d0d84d31-923d-4cb5-94a8-40a0a0087578",
"start_time": "2024-10-11T20:58:23.298773+00:00",
"extra": {
"metadata": {
"ls_method": "traceable",
"ls_provider": "openai",
"ls_model_type": "chat",
"ls_model_name": "gpt-4o-mini",
"revision_id": "v0.1.82-381-g03d9e1a-dirty"
},
"runtime": {
"sdk": "langsmith-py",
"sdk_version": "0.1.131",
"library": "langsmith",
"platform": "macOS-13.2-arm64-arm-64bit",
"runtime": "python",
"py_implementation": "CPython",
"runtime_version": "3.11.7",
"langchain_version": "0.2.9",
"langchain_core_version": "0.2.21"
}
},
"serialized": {
"name": "ChatOpenAI",
"signature": "(*, messages: 'Iterable[ChatCompletionMessageParam]', model: 'Union[str, ChatModel]', frequency_penalty: 'Optional[float] | NotGiven' = NOT_GIVEN, function_call: 'completion_create_params.FunctionCall | NotGiven' = NOT_GIVEN, functions: 'Iterable[completion_create_params.Function] | NotGiven' = NOT_GIVEN, logit_bias: 'Optional[Dict[str, int]] | NotGiven' = NOT_GIVEN, logprobs: 'Optional[bool] | NotGiven' = NOT_GIVEN, max_completion_tokens: 'Optional[int] | NotGiven' = NOT_GIVEN, max_tokens: 'Optional[int] | NotGiven' = NOT_GIVEN, n: 'Optional[int] | NotGiven' = NOT_GIVEN, parallel_tool_calls: 'bool | NotGiven' = NOT_GIVEN, presence_penalty: 'Optional[float] | NotGiven' = NOT_GIVEN, response_format: 'completion_create_params.ResponseFormat | NotGiven' = NOT_GIVEN, seed: 'Optional[int] | NotGiven' = NOT_GIVEN, service_tier: \"Optional[Literal['auto', 'default']] | NotGiven\" = NOT_GIVEN, stop: 'Union[Optional[str], List[str]] | NotGiven' = NOT_GIVEN, stream: 'Optional[Literal[False]] | Literal[True] | NotGiven' = NOT_GIVEN, stream_options: 'Optional[ChatCompletionStreamOptionsParam] | NotGiven' = NOT_GIVEN, temperature: 'Optional[float] | NotGiven' = NOT_GIVEN, tool_choice: 'ChatCompletionToolChoiceOptionParam | NotGiven' = NOT_GIVEN, tools: 'Iterable[ChatCompletionToolParam] | NotGiven' = NOT_GIVEN, top_logprobs: 'Optional[int] | NotGiven' = NOT_GIVEN, top_p: 'Optional[float] | NotGiven' = NOT_GIVEN, user: 'str | NotGiven' = NOT_GIVEN, extra_headers: 'Headers | None' = None, extra_query: 'Query | None' = None, extra_body: 'Body | None' = None, timeout: 'float | httpx.Timeout | None | NotGiven' = NOT_GIVEN) -> 'ChatCompletion | AsyncStream[ChatCompletionChunk]'",
"doc": null
},
"events": [],
"tags": [],
"attachments": {},
"dotted_order": "20241011T205823298773Zd0d84d31-923d-4cb5-94a8-40a0a0087578",
"trace_id": "d0d84d31-923d-4cb5-94a8-40a0a0087578",
"outputs": {},
"session_name": "default",
"name": "ChatOpenAI",
"inputs": {
"messages": [
{
"role": "user",
"content": "howdy"
}
],
"model": "gpt-4o-mini",
"stream": false,
"extra_headers": null,
"extra_query": null,
"extra_body": null
},
"run_type": "llm"
}
],
"patch": [
{
"id": "d0d84d31-923d-4cb5-94a8-40a0a0087578",
"name": "ChatOpenAI",
"trace_id": "d0d84d31-923d-4cb5-94a8-40a0a0087578",
"parent_run_id": null,
"dotted_order": "20241011T205823298773Zd0d84d31-923d-4cb5-94a8-40a0a0087578",
"tags": [],
"extra": {
"metadata": {
"ls_method": "traceable",
"ls_provider": "openai",
"ls_model_type": "chat",
"ls_model_name": "gpt-4o-mini",
"revision_id": "v0.1.82-381-g03d9e1a-dirty"
},
"runtime": {
"sdk": "langsmith-py",
"sdk_version": "0.1.131",
"library": "langsmith",
"platform": "macOS-13.2-arm64-arm-64bit",
"runtime": "python",
"py_implementation": "CPython",
"runtime_version": "3.11.7",
"langchain_version": "0.2.9",
"langchain_core_version": "0.2.21"
}
},
"end_time": "2024-10-11T20:58:24.417106+00:00",
"outputs": {
"id": "chatcmpl-AHH0KBvLG7Wq3wfSEGQuxh0xE07Fl",
"choices": [
{
"finish_reason": "stop",
"index": 0,
"logprobs": null,
"message": {
"content": "Howdy! How can I assist you today?",
"refusal": null,
"role": "assistant",
"function_call": null,
"tool_calls": null
}
}
],
"created": 1728680304,
"model": "gpt-4o-mini-2024-07-18",
"object": "chat.completion",
"service_tier": null,
"system_fingerprint": "fp_e2bde53e6e",
"usage_metadata": {
"input_tokens": 9,
"output_tokens": 9,
"total_tokens": 18,
"input_token_details": {
"cache_read": 0
},
"output_token_details": {
"reasoning": 0
}
}
},
"events": []
}
]
} |
0 | lc_public_repos/langsmith-sdk/python/tests/integration_tests | lc_public_repos/langsmith-sdk/python/tests/integration_tests/test_data/langsmith_py_wrap_openai_reasoning.json | {
"post": [
{
"id": "a8b34ded-ccd2-4fb7-bccb-9cd625066a14",
"start_time": "2024-10-11T20:58:24.544431+00:00",
"extra": {
"metadata": {
"ls_method": "traceable",
"ls_provider": "openai",
"ls_model_type": "chat",
"ls_model_name": "o1-mini",
"revision_id": "v0.1.82-381-g03d9e1a-dirty"
},
"runtime": {
"sdk": "langsmith-py",
"sdk_version": "0.1.131",
"library": "langsmith",
"platform": "macOS-13.2-arm64-arm-64bit",
"runtime": "python",
"py_implementation": "CPython",
"runtime_version": "3.11.7",
"langchain_version": "0.2.9",
"langchain_core_version": "0.2.21"
}
},
"serialized": {
"name": "ChatOpenAI",
"signature": "(*, messages: 'Iterable[ChatCompletionMessageParam]', model: 'Union[str, ChatModel]', frequency_penalty: 'Optional[float] | NotGiven' = NOT_GIVEN, function_call: 'completion_create_params.FunctionCall | NotGiven' = NOT_GIVEN, functions: 'Iterable[completion_create_params.Function] | NotGiven' = NOT_GIVEN, logit_bias: 'Optional[Dict[str, int]] | NotGiven' = NOT_GIVEN, logprobs: 'Optional[bool] | NotGiven' = NOT_GIVEN, max_completion_tokens: 'Optional[int] | NotGiven' = NOT_GIVEN, max_tokens: 'Optional[int] | NotGiven' = NOT_GIVEN, n: 'Optional[int] | NotGiven' = NOT_GIVEN, parallel_tool_calls: 'bool | NotGiven' = NOT_GIVEN, presence_penalty: 'Optional[float] | NotGiven' = NOT_GIVEN, response_format: 'completion_create_params.ResponseFormat | NotGiven' = NOT_GIVEN, seed: 'Optional[int] | NotGiven' = NOT_GIVEN, service_tier: \"Optional[Literal['auto', 'default']] | NotGiven\" = NOT_GIVEN, stop: 'Union[Optional[str], List[str]] | NotGiven' = NOT_GIVEN, stream: 'Optional[Literal[False]] | Literal[True] | NotGiven' = NOT_GIVEN, stream_options: 'Optional[ChatCompletionStreamOptionsParam] | NotGiven' = NOT_GIVEN, temperature: 'Optional[float] | NotGiven' = NOT_GIVEN, tool_choice: 'ChatCompletionToolChoiceOptionParam | NotGiven' = NOT_GIVEN, tools: 'Iterable[ChatCompletionToolParam] | NotGiven' = NOT_GIVEN, top_logprobs: 'Optional[int] | NotGiven' = NOT_GIVEN, top_p: 'Optional[float] | NotGiven' = NOT_GIVEN, user: 'str | NotGiven' = NOT_GIVEN, extra_headers: 'Headers | None' = None, extra_query: 'Query | None' = None, extra_body: 'Body | None' = None, timeout: 'float | httpx.Timeout | None | NotGiven' = NOT_GIVEN) -> 'ChatCompletion | AsyncStream[ChatCompletionChunk]'",
"doc": null
},
"events": [],
"tags": [],
"attachments": {},
"dotted_order": "20241011T205824544431Za8b34ded-ccd2-4fb7-bccb-9cd625066a14",
"trace_id": "a8b34ded-ccd2-4fb7-bccb-9cd625066a14",
"outputs": {},
"session_name": "default",
"name": "ChatOpenAI",
"inputs": {
"messages": [
{
"role": "user",
"content": "Write a bash script that takes a matrix represented as a string with format '[1,2],[3,4],[5,6]' and prints the transpose in the same format."
}
],
"model": "o1-mini",
"stream": false,
"extra_headers": null,
"extra_query": null,
"extra_body": null
},
"run_type": "llm"
}
],
"patch": [
{
"id": "a8b34ded-ccd2-4fb7-bccb-9cd625066a14",
"name": "ChatOpenAI",
"trace_id": "a8b34ded-ccd2-4fb7-bccb-9cd625066a14",
"parent_run_id": null,
"dotted_order": "20241011T205824544431Za8b34ded-ccd2-4fb7-bccb-9cd625066a14",
"tags": [],
"extra": {
"metadata": {
"ls_method": "traceable",
"ls_provider": "openai",
"ls_model_type": "chat",
"ls_model_name": "o1-mini",
"revision_id": "v0.1.82-381-g03d9e1a-dirty"
},
"runtime": {
"sdk": "langsmith-py",
"sdk_version": "0.1.131",
"library": "langsmith",
"platform": "macOS-13.2-arm64-arm-64bit",
"runtime": "python",
"py_implementation": "CPython",
"runtime_version": "3.11.7",
"langchain_version": "0.2.9",
"langchain_core_version": "0.2.21"
}
},
"end_time": "2024-10-11T20:58:39.682524+00:00",
"outputs": {
"id": "chatcmpl-AHH0LWUyAupsCrDZu564ZHwRbNQeZ",
"choices": [
{
"finish_reason": "stop",
"index": 0,
"logprobs": null,
"message": {
"content": "Certainly! Below is a **Bash script** that takes a matrix represented as a string in the format `\"[1,2],[3,4],[5,6]\"` and prints its transpose in the same format. The script uses `awk` to handle the parsing and transposition logic efficiently.\n\n### **Script: `transpose_matrix.sh`**\n\n```bash\n#!/bin/bash\n\n# Check if exactly one argument is provided\nif [ \"$#\" -ne 1 ]; then\n echo \"Usage: $0 '[1,2],[3,4],[5,6]'\"\n exit 1\nfi\n\ninput=\"$1\"\n\n# Use awk to parse the input and perform the transpose\necho \"$input\" | awk '\nBEGIN {\n # Define the field separator to split the input into rows\n FS=\"\\\\],\\\\[|\\\\[|\\\\]\"\n}\n\n{\n row = 0\n # Iterate over each field (row)\n for (i = 1; i <= NF; i++) {\n if ($i != \"\") {\n row++\n # Split the row into individual elements based on comma\n split($i, elements, \",\")\n for (j = 1; j <= length(elements); j++) {\n # Store elements in a 2D array\n matrix[j, row] = elements[j]\n # Keep track of the maximum number of columns and rows\n if (j > max_col) max_col = j\n if (row > max_row) max_row = row\n }\n }\n }\n}\n\nEND {\n # Initialize an empty string to build the output\n output = \"\"\n # Iterate over each column to create transposed rows\n for (i = 1; i <= max_col; i++) {\n output = output \"[\"\n for (j = 1; j <= max_row; j++) {\n output = output matrix[i, j]\n if (j < max_row) {\n output = output \",\"\n }\n }\n output = output \"]\"\n if (i < max_col) {\n output = output \",\"\n }\n # Append the transposed row to the final output\n transposed = transposed output\n }\n # Print the final transposed matrix\n print transposed\n}\n'\n```\n\n### **How It Works**\n\n1. **Input Validation:**\n - The script first checks if exactly one argument is provided. If not, it displays usage instructions and exits.\n\n2. **Parsing with `awk`:**\n - **Field Separator (`FS`):**\n - The `FS` is set to handle the input format by splitting the string into individual rows. It looks for `\"],[\"`, `\"[\"`, or `\"]\"` as separators.\n \n - **Reading Rows and Columns:**\n - For each row, the script splits the elements by commas and stores them in a 2D array `matrix[j, row]`, where `j` is the column index and `row` is the row index.\n - It also keeps track of the maximum number of columns (`max_col`) and rows (`max_row`) to handle matrices of varying sizes.\n \n - **Transposing the Matrix:**\n - In the `END` block, the script iterates over each column and constructs transposed rows by collecting elements from each original row.\n - It formats the output to match the input style, enclosing each transposed row in square brackets and separating them with commas.\n\n3. **Execution:**\n - Make the script executable:\n ```bash\n chmod +x transpose_matrix.sh\n ```\n - Run the script with a matrix string as an argument:\n ```bash\n ./transpose_matrix.sh \"[1,2],[3,4],[5,6]\"\n ```\n - **Output:**\n ```\n [1,3,5],[2,4,6]\n ```\n\n### **Examples**\n\n1. **Square Matrix:**\n ```bash\n ./transpose_matrix.sh \"[1,2],[3,4]\"\n ```\n **Output:**\n ```\n [1,3],[2,4]\n ```\n\n2. **Non-Square Matrix:**\n ```bash\n ./transpose_matrix.sh \"[1,2,3],[4,5,6]\"\n ```\n **Output:**\n ```\n [1,4],[2,5],[3,6]\n ```\n\n3. **Matrix with Negative Numbers and Multiple Digits:**\n ```bash\n ./transpose_matrix.sh \"[10,-2,33],[4,5,-6]\"\n ```\n **Output:**\n ```\n [10,4],[-2,5],[33,-6]\n ```\n\n### **Notes**\n\n- **Robustness:**\n - The script assumes that the input is well-formed, with each row enclosed in square brackets and elements separated by commas.\n - It can handle matrices that are not square (i.e., different numbers of rows and columns).\n\n- **Dependencies:**\n - The script relies on `awk`, which is commonly available in Unix-like environments.\n\nFeel free to modify and enhance the script based on your specific needs!",
"refusal": null,
"role": "assistant",
"function_call": null,
"tool_calls": null
}
}
],
"created": 1728680305,
"model": "o1-mini-2024-09-12",
"object": "chat.completion",
"service_tier": null,
"system_fingerprint": "fp_692002f015",
"usage_metadata": {
"input_tokens": 43,
"output_tokens": 2497,
"total_tokens": 2540,
"input_token_details": {
"cache_read": 0
},
"output_token_details": {
"reasoning": 1408
}
}
},
"events": []
}
]
} |
0 | lc_public_repos/langsmith-sdk/python/tests | lc_public_repos/langsmith-sdk/python/tests/unit_tests/test_env.py | from langsmith.env import __all__ as env_all
from langsmith.env import get_git_info
_EXPECTED = [
"get_docker_compose_command",
"get_docker_compose_version",
"get_docker_environment",
"get_docker_version",
"get_langchain_env_var_metadata",
"get_langchain_env_vars",
"get_langchain_environment",
"get_release_shas",
"get_runtime_and_metrics",
"get_runtime_environment",
"get_system_metrics",
"get_git_info",
]
def test_public_api() -> None:
assert env_all == _EXPECTED
def test_git_info() -> None:
git_info = get_git_info()
assert git_info is not None
assert git_info["commit"] is not None
assert git_info["remote_url"] is not None
assert "langsmith-sdk" in git_info["remote_url"]
|
0 | lc_public_repos/langsmith-sdk/python/tests | lc_public_repos/langsmith-sdk/python/tests/unit_tests/test_anonymizer.py | # mypy: disable-error-code="annotation-unchecked"
import json
import re
import uuid
from typing import List, Union, cast
from unittest.mock import MagicMock
from uuid import uuid4
from pydantic import BaseModel
from langsmith import Client, traceable, tracing_context
from langsmith.anonymizer import RuleNodeProcessor, StringNodeRule, create_anonymizer
EMAIL_REGEX = re.compile(r"[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}")
UUID_REGEX = re.compile(
r"[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}"
)
def test_replacer_function():
def replacer(text: str, _: List[Union[str, int]]):
text = EMAIL_REGEX.sub("[email address]", text)
text = UUID_REGEX.sub("[uuid]", text)
return text
assert create_anonymizer(replacer)(
{
"message": "Hello, this is my email: hello@example.com",
"metadata": str(uuid4()),
}
) == {
"message": "Hello, this is my email: [email address]",
"metadata": "[uuid]",
}
assert create_anonymizer(replacer)(["human", "hello@example.com"]) == [
"human",
"[email address]",
]
assert create_anonymizer(replacer)("hello@example.com") == "[email address]"
def test_replacer_lambda():
assert create_anonymizer(lambda text: EMAIL_REGEX.sub("[email address]", text))(
{
"message": "Hello, this is my email: hello@example.com",
}
) == {
"message": "Hello, this is my email: [email address]",
}
def test_replacer_declared():
replacers = [
StringNodeRule(pattern=EMAIL_REGEX, replace="[email address]"),
StringNodeRule(pattern=UUID_REGEX, replace="[uuid]"),
]
assert create_anonymizer(replacers)(
{
"message": "Hello, this is my email: hello@example.com",
"metadata": str(uuid4()),
}
) == {
"message": "Hello, this is my email: [email address]",
"metadata": "[uuid]",
}
assert create_anonymizer(replacers)(["human", "hello@example.com"]) == [
"human",
"[email address]",
]
assert create_anonymizer(replacers)("hello@example.com") == "[email address]"
def test_replacer_declared_in_traceable():
replacers = [
StringNodeRule(pattern=EMAIL_REGEX, replace="[email address]"),
StringNodeRule(pattern=UUID_REGEX, replace="[uuid]"),
]
anonymizer = create_anonymizer(replacers)
mock_client = Client(
session=MagicMock(),
auto_batch_tracing=False,
anonymizer=anonymizer,
api_url="http://localhost:1984",
api_key="123",
)
user_email = "my-test@langchain.ai"
user_id = "4ae21a90-d43b-4017-bb21-4fd9add235ff"
class MyOutput(BaseModel):
user_email: str
user_id: uuid.UUID
body: str
class MyInput(BaseModel):
from_email: str
@traceable(client=mock_client)
def my_func(body: str, from_: MyInput) -> MyOutput:
return MyOutput(user_email=user_email, user_id=user_id, body=body)
body_ = "Hello from Pluto"
with tracing_context(enabled=True):
res = my_func(body_, from_=MyInput(from_email="my-from-test@langchain.ai"))
expected = MyOutput(user_email=user_email, user_id=uuid.UUID(user_id), body=body_)
assert res == expected
# get posts
posts = [
json.loads(call[2]["data"])
for call in mock_client.session.request.mock_calls
if call.args and call.args[1].endswith("runs")
]
patches = [
json.loads(call[2]["data"])
for call in mock_client.session.request.mock_calls
if call.args
and cast(str, call.args[0]).lower() == "patch"
and "/runs" in call.args[1]
]
expected_inputs = {"from_": {"from_email": "[email address]"}, "body": body_}
expected_outputs = {
"output": {
"user_email": "[email address]",
"user_id": "[uuid]",
"body": body_,
}
}
assert len(posts) == 1
posted_data = posts[0]
assert posted_data["inputs"] == expected_inputs
assert len(patches) == 1
patched_data = patches[0]
if "inputs" in patched_data:
assert patched_data["inputs"] == expected_inputs
assert patched_data["outputs"] == expected_outputs
def test_rule_node_processor_scrub_sensitive_info():
rules = [
StringNodeRule(pattern=re.compile(r"\b\d{3}-\d{2}-\d{4}\b"), replace="[ssn]"),
StringNodeRule(
pattern=re.compile(r"[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}"),
replace="[email]",
),
StringNodeRule(
pattern=re.compile(r"\b\d{3}[-.\s]?\d{3}[-.\s]?\d{4}\b"), replace="[phone]"
),
]
processor = RuleNodeProcessor(rules)
nodes = [
{"value": "My SSN is 123-45-6789.", "path": ["field1"]},
{"value": "Contact me at john.doe@example.com.", "path": ["field2"]},
{"value": "Call me on 123-456-7890.", "path": ["field3"]},
]
expected = [
{"value": "My SSN is [ssn].", "path": ["field1"]},
{"value": "Contact me at [email].", "path": ["field2"]},
{"value": "Call me on [phone].", "path": ["field3"]},
]
result = processor.mask_nodes(nodes)
assert result == expected
def test_rule_node_processor_default_replace():
rules = [
StringNodeRule(pattern=re.compile(r"sensitive")),
]
processor = RuleNodeProcessor(rules)
nodes = [
{"value": "This contains sensitive data", "path": ["field1"]},
]
expected = [
{"value": "This contains [redacted] data", "path": ["field1"]},
]
result = processor.mask_nodes(nodes)
assert result == expected
|
0 | lc_public_repos/langsmith-sdk/python/tests | lc_public_repos/langsmith-sdk/python/tests/unit_tests/test_run_trees.py | import json
from concurrent.futures import ThreadPoolExecutor
from datetime import datetime
from unittest.mock import MagicMock
from uuid import UUID
import pytest
from langsmith import run_trees
from langsmith.client import Client
from langsmith.run_trees import RunTree
def test_run_tree_accepts_tpe() -> None:
mock_client = MagicMock(spec=Client)
run_trees.RunTree(
name="My Chat Bot",
inputs={"text": "Summarize this morning's meetings."},
client=mock_client,
executor=ThreadPoolExecutor(), # type: ignore
)
def test_lazy_rt() -> None:
run_tree = RunTree(name="foo")
assert run_tree.ls_client is None
assert run_tree._client is None
assert isinstance(run_tree.client, Client)
client = Client(api_key="foo")
run_tree._client = client
assert run_tree._client == client
assert RunTree(name="foo", client=client).client == client
assert RunTree(name="foo", ls_client=client).client == client
def test_json_serializable():
run_tree = RunTree(name="foo")
d = run_tree.dict()
assert not d.get("client") and not d.get("ls_client")
assert isinstance(run_tree.client, Client)
d = run_tree.dict()
assert not d.get("client") and not d.get("ls_client")
d = json.loads(run_tree.json())
assert not d.get("client") and not d.get("ls_client")
run_tree = RunTree(name="foo", ls_client=Client())
d = run_tree.dict()
assert not d.get("client") and not d.get("ls_client")
d = json.loads(run_tree.json())
assert not d.get("client") and not d.get("ls_client")
run_tree = RunTree(name="foo", client=Client())
d = run_tree.dict()
assert not d.get("client") and not d.get("ls_client")
d = json.loads(run_tree.json())
assert not d.get("client") and not d.get("ls_client")
@pytest.mark.parametrize(
"inputs, expected",
[
(
"20240412T202937370454Z152ce25c-064e-4742-bf36-8bb0389f8805.20240412T202937627763Zfe8b541f-e75a-4ee6-b92d-732710897194.20240412T202937708023Z625b30ed-2fbb-4387-81b1-cb5d6221e5b4.20240412T202937775748Z448dc09f-ad54-4475-b3a4-fa43018ca621.20240412T202937981350Z4cd59ea4-491e-4ed9-923f-48cd93e03755.20240412T202938078862Zcd168cf7-ee72-48c2-8ec0-50ab09821973.20240412T202938152278Z32481c1a-b83c-4b53-a52e-1ea893ffba51",
[
(
datetime(2024, 4, 12, 20, 29, 37, 370454),
UUID("152ce25c-064e-4742-bf36-8bb0389f8805"),
),
(
datetime(2024, 4, 12, 20, 29, 37, 627763),
UUID("fe8b541f-e75a-4ee6-b92d-732710897194"),
),
(
datetime(2024, 4, 12, 20, 29, 37, 708023),
UUID("625b30ed-2fbb-4387-81b1-cb5d6221e5b4"),
),
(
datetime(2024, 4, 12, 20, 29, 37, 775748),
UUID("448dc09f-ad54-4475-b3a4-fa43018ca621"),
),
(
datetime(2024, 4, 12, 20, 29, 37, 981350),
UUID("4cd59ea4-491e-4ed9-923f-48cd93e03755"),
),
(
datetime(2024, 4, 12, 20, 29, 38, 78862),
UUID("cd168cf7-ee72-48c2-8ec0-50ab09821973"),
),
(
datetime(2024, 4, 12, 20, 29, 38, 152278),
UUID("32481c1a-b83c-4b53-a52e-1ea893ffba51"),
),
],
),
],
)
def test_parse_dotted_order(inputs, expected):
assert run_trees._parse_dotted_order(inputs) == expected
def test_run_tree_events_not_null():
mock_client = MagicMock(spec=Client)
run_tree = run_trees.RunTree(
name="My Chat Bot",
inputs={"text": "Summarize this morning's meetings."},
client=mock_client,
events=None,
)
assert run_tree.events == []
def test_nested_run_trees_from_dotted_order():
grandparent = run_trees.RunTree(
name="Grandparent",
inputs={"text": "Summarize this morning's meetings."},
client=MagicMock(spec=Client),
)
parent = grandparent.create_child(
name="Parent",
)
child = parent.create_child(
name="Child",
)
# Check child
clone = run_trees.RunTree.from_dotted_order(
dotted_order=child.dotted_order,
name="Clone",
client=MagicMock(spec=Client),
)
assert clone.id == child.id
assert clone.parent_run_id == child.parent_run_id
assert clone.dotted_order == child.dotted_order
# Check parent
parent_clone = run_trees.RunTree.from_dotted_order(
dotted_order=parent.dotted_order,
name="Parent Clone",
client=MagicMock(spec=Client),
)
assert parent_clone.id == parent.id
assert parent_clone.parent_run_id == parent.parent_run_id
assert parent_clone.dotted_order == parent.dotted_order
# Check grandparent
grandparent_clone = run_trees.RunTree.from_dotted_order(
dotted_order=grandparent.dotted_order,
name="Grandparent Clone",
client=MagicMock(spec=Client),
)
assert grandparent_clone.id == grandparent.id
assert grandparent_clone.parent_run_id is None
assert grandparent_clone.dotted_order == grandparent.dotted_order
|
0 | lc_public_repos/langsmith-sdk/python/tests | lc_public_repos/langsmith-sdk/python/tests/unit_tests/test_operations.py | from langsmith._internal import _orjson
from langsmith._internal._operations import (
SerializedFeedbackOperation,
SerializedRunOperation,
combine_serialized_queue_operations,
)
def test_combine_serialized_queue_operations():
# Arrange
serialized_run_operations = [
SerializedRunOperation(
operation="post",
id="id1",
trace_id="trace_id1",
_none=_orjson.dumps({"a": 1}),
inputs="inputs1",
outputs="outputs1",
events="events1",
attachments=None,
),
SerializedRunOperation(
operation="patch",
id="id1",
trace_id="trace_id1",
_none=_orjson.dumps({"b": "2"}),
inputs="inputs1-patched",
outputs="outputs1-patched",
events="events1",
attachments=None,
),
SerializedFeedbackOperation(
id="id2",
trace_id="trace_id2",
feedback="feedback2",
),
SerializedRunOperation(
operation="post",
id="id3",
trace_id="trace_id3",
_none="none3",
inputs="inputs3",
outputs="outputs3",
events="events3",
attachments=None,
),
SerializedRunOperation(
operation="patch",
id="id4",
trace_id="trace_id4",
_none="none4",
inputs="inputs4-patched",
outputs="outputs4-patched",
events="events4",
attachments=None,
),
SerializedRunOperation(
operation="post",
id="id5",
trace_id="trace_id5",
_none="none5",
inputs="inputs5",
outputs=None,
events="events5",
attachments=None,
),
SerializedRunOperation(
operation="patch",
id="id5",
trace_id="trace_id5",
_none=None,
inputs=None,
outputs="outputs5-patched",
events=None,
attachments=None,
),
]
# Act
result = combine_serialized_queue_operations(serialized_run_operations)
# Assert
assert result == [
# merged 1+2
SerializedRunOperation(
operation="post",
id="id1",
trace_id="trace_id1",
_none=_orjson.dumps({"a": 1, "b": "2"}),
inputs="inputs1-patched",
outputs="outputs1-patched",
events="events1",
attachments=None,
),
# 4 passthrough
serialized_run_operations[3],
# merged 6+7
SerializedRunOperation(
operation="post",
id="id5",
trace_id="trace_id5",
_none="none5",
inputs="inputs5",
outputs="outputs5-patched",
events="events5",
attachments=None,
),
# 3,5 are passthrough in that order
serialized_run_operations[2],
serialized_run_operations[4],
]
|
0 | lc_public_repos/langsmith-sdk/python/tests | lc_public_repos/langsmith-sdk/python/tests/unit_tests/test_client.py | """Test the LangSmith client."""
import asyncio
import dataclasses
import gc
import inspect
import io
import itertools
import json
import logging
import math
import pathlib
import sys
import time
import uuid
import warnings
import weakref
from datetime import datetime, timezone
from enum import Enum
from io import BytesIO
from typing import Dict, List, Literal, NamedTuple, Optional, Type, Union
from unittest import mock
from unittest.mock import MagicMock, patch
import dataclasses_json
import pytest
import requests
from multipart import MultipartParser, MultipartPart, parse_options_header
from pydantic import BaseModel
from requests import HTTPError
import langsmith.env as ls_env
import langsmith.utils as ls_utils
from langsmith import AsyncClient, EvaluationResult, aevaluate, evaluate, run_trees
from langsmith import schemas as ls_schemas
from langsmith._internal import _orjson
from langsmith._internal._serde import _serialize_json
from langsmith.client import (
Client,
_dumps_json,
_is_langchain_hosted,
_parse_token_or_url,
)
from langsmith.utils import LangSmithUserError
_CREATED_AT = datetime(2015, 1, 1, 0, 0, 0)
def test_is_localhost() -> None:
assert ls_utils._is_localhost("http://localhost:1984")
assert ls_utils._is_localhost("http://localhost:1984")
assert ls_utils._is_localhost("http://0.0.0.0:1984")
assert not ls_utils._is_localhost("http://example.com:1984")
def test__is_langchain_hosted() -> None:
assert _is_langchain_hosted("https://api.smith.langchain.com")
assert _is_langchain_hosted("https://beta.api.smith.langchain.com")
assert _is_langchain_hosted("https://dev.api.smith.langchain.com")
def _clear_env_cache():
ls_utils.get_env_var.cache_clear()
def test_validate_api_url(monkeypatch: pytest.MonkeyPatch) -> None:
# Scenario 1: Both LANGCHAIN_ENDPOINT and LANGSMITH_ENDPOINT
# are set, but api_url is not
_clear_env_cache()
monkeypatch.setenv("LANGCHAIN_ENDPOINT", "https://api.smith.langchain-endpoint.com")
monkeypatch.setenv("LANGSMITH_ENDPOINT", "https://api.smith.langsmith-endpoint.com")
client = Client(auto_batch_tracing=False)
assert client.api_url == "https://api.smith.langsmith-endpoint.com"
# Scenario 2: Both LANGCHAIN_ENDPOINT and LANGSMITH_ENDPOINT
# are set, and api_url is set
_clear_env_cache()
monkeypatch.setenv("LANGCHAIN_ENDPOINT", "https://api.smith.langchain-endpoint.com")
monkeypatch.setenv("LANGSMITH_ENDPOINT", "https://api.smith.langsmith-endpoint.com")
client = Client(
api_url="https://api.smith.langchain.com",
api_key="123",
auto_batch_tracing=False,
)
assert client.api_url == "https://api.smith.langchain.com"
# Scenario 3: LANGCHAIN_ENDPOINT is set, but LANGSMITH_ENDPOINT is not
_clear_env_cache()
monkeypatch.setenv("LANGCHAIN_ENDPOINT", "https://api.smith.langchain-endpoint.com")
monkeypatch.delenv("LANGSMITH_ENDPOINT", raising=False)
client = Client(auto_batch_tracing=False)
assert client.api_url == "https://api.smith.langchain-endpoint.com"
# Scenario 4: LANGCHAIN_ENDPOINT is not set, but LANGSMITH_ENDPOINT is set
_clear_env_cache()
monkeypatch.delenv("LANGCHAIN_ENDPOINT", raising=False)
monkeypatch.setenv("LANGSMITH_ENDPOINT", "https://api.smith.langsmith-endpoint.com")
client = Client(auto_batch_tracing=False)
assert client.api_url == "https://api.smith.langsmith-endpoint.com"
def test_validate_api_key(monkeypatch: pytest.MonkeyPatch) -> None:
# Scenario 1: Both LANGCHAIN_API_KEY and LANGSMITH_API_KEY are set,
# but api_key is not
_clear_env_cache()
monkeypatch.setenv("LANGCHAIN_API_KEY", "env_langchain_api_key")
monkeypatch.setenv("LANGSMITH_API_KEY", "env_langsmith_api_key")
client = Client()
assert client.api_key == "env_langsmith_api_key"
# Scenario 2: Both LANGCHAIN_API_KEY and LANGSMITH_API_KEY are set,
# and api_key is set
_clear_env_cache()
monkeypatch.setenv("LANGCHAIN_API_KEY", "env_langchain_api_key")
monkeypatch.setenv("LANGSMITH_API_KEY", "env_langsmith_api_key")
client = Client(api_url="https://api.smith.langchain.com", api_key="123")
assert client.api_key == "123"
# Scenario 3: LANGCHAIN_API_KEY is set, but LANGSMITH_API_KEY is not
monkeypatch.setenv("LANGCHAIN_API_KEY", "env_langchain_api_key")
monkeypatch.delenv("LANGSMITH_API_KEY", raising=False)
client = Client()
assert client.api_key == "env_langchain_api_key"
# Scenario 4: LANGCHAIN_API_KEY is not set, but LANGSMITH_API_KEY is set
_clear_env_cache()
monkeypatch.delenv("LANGCHAIN_API_KEY", raising=False)
monkeypatch.setenv("LANGSMITH_API_KEY", "env_langsmith_api_key")
client = Client()
assert client.api_key == "env_langsmith_api_key"
def test_validate_multiple_urls(monkeypatch: pytest.MonkeyPatch) -> None:
_clear_env_cache()
monkeypatch.setenv("LANGCHAIN_ENDPOINT", "https://api.smith.langchain-endpoint.com")
monkeypatch.setenv("LANGSMITH_ENDPOINT", "https://api.smith.langsmith-endpoint.com")
monkeypatch.setenv("LANGSMITH_RUNS_ENDPOINTS", "{}")
with pytest.raises(ls_utils.LangSmithUserError):
Client()
monkeypatch.undo()
with pytest.raises(ls_utils.LangSmithUserError):
Client(
api_url="https://api.smith.langchain.com",
api_key="123",
api_urls={"https://api.smith.langchain.com": "123"},
)
data = {
"https://api.smith.langsmith-endpoint_1.com": "123",
"https://api.smith.langsmith-endpoint_2.com": "456",
"https://api.smith.langsmith-endpoint_3.com": "789",
}
monkeypatch.delenv("LANGCHAIN_ENDPOINT", raising=False)
monkeypatch.delenv("LANGSMITH_ENDPOINT", raising=False)
monkeypatch.setenv("LANGSMITH_RUNS_ENDPOINTS", json.dumps(data))
client = Client(auto_batch_tracing=False)
assert client._write_api_urls == data
assert client.api_url == "https://api.smith.langsmith-endpoint_1.com"
assert client.api_key == "123"
@mock.patch("langsmith.client.requests.Session")
def test_headers(monkeypatch: pytest.MonkeyPatch) -> None:
_clear_env_cache()
monkeypatch.delenv("LANGCHAIN_API_KEY", raising=False)
with patch.dict("os.environ", {}, clear=True):
client = Client(api_url="http://localhost:1984", api_key="123")
assert "x-api-key" in client._headers
assert client._headers["x-api-key"] == "123"
client_no_key = Client(api_url="http://localhost:1984")
assert "x-api-key" not in client_no_key._headers
@mock.patch("langsmith.client.requests.Session")
def test_upload_csv(mock_session_cls: mock.Mock) -> None:
_clear_env_cache()
dataset_id = str(uuid.uuid4())
example_1 = ls_schemas.Example(
id=str(uuid.uuid4()),
created_at=_CREATED_AT,
inputs={"input": "1"},
outputs={"output": "2"},
dataset_id=dataset_id,
)
example_2 = ls_schemas.Example(
id=str(uuid.uuid4()),
created_at=_CREATED_AT,
inputs={"input": "3"},
outputs={"output": "4"},
dataset_id=dataset_id,
)
mock_response = mock.Mock()
mock_response.json.return_value = {
"id": dataset_id,
"name": "test.csv",
"description": "Test dataset",
"owner_id": "the owner",
"created_at": _CREATED_AT,
"examples": [example_1, example_2],
}
mock_session = mock.Mock()
def mock_request(*args, **kwargs): # type: ignore
if args[0] == "POST" and args[1].endswith("datasets"):
return mock_response
return MagicMock()
mock_session.request.return_value = mock_response
mock_session_cls.return_value = mock_session
client = Client(
api_url="http://localhost:1984",
api_key="123",
)
client._tenant_id = uuid.uuid4()
csv_file = ("test.csv", BytesIO(b"input,output\n1,2\n3,4\n"))
dataset = client.upload_csv(
csv_file,
description="Test dataset",
input_keys=["input"],
output_keys=["output"],
)
assert dataset.id == uuid.UUID(dataset_id)
assert dataset.name == "test.csv"
assert dataset.description == "Test dataset"
def test_async_methods() -> None:
"""For every method defined on the Client, if there is a
corresponding async method, then the async method args should be a
superset of the sync method args.
"""
sync_methods = [
method
for method in dir(Client)
if not method.startswith("_")
and callable(getattr(Client, method))
and not asyncio.iscoroutinefunction(getattr(Client, method))
]
async_methods = [
method
for method in dir(Client)
if not method.startswith("_")
and method not in {"arun_on_dataset"}
and callable(getattr(Client, method))
and asyncio.iscoroutinefunction(getattr(Client, method))
]
for async_method in async_methods:
sync_method = async_method[1:] # Remove the "a" from the beginning
assert sync_method in sync_methods
sync_args = set(inspect.signature(Client.__dict__[sync_method]).parameters)
async_args = set(inspect.signature(Client.__dict__[async_method]).parameters)
extra_args = sync_args - async_args
assert not extra_args, (
f"Extra args for {async_method} "
f"(compared to {sync_method}): {extra_args}"
)
def test_create_run_unicode() -> None:
inputs = {
"foo": "これは私の友達です",
"bar": "این یک کتاب است",
"baz": "😊🌺🎉💻🚀🌈🍕🏄♂️🎁🐶🌟🏖️👍🚲🎈",
"qux": "나는\u3000밥을\u3000먹었습니다.",
"는\u3000밥": "나는\u3000밥을\u3000먹었습니다.",
}
session = mock.Mock()
session.request = mock.Mock()
client = Client(api_url="http://localhost:1984", api_key="123", session=session)
id_ = uuid.uuid4()
client.create_run("my_run", inputs=inputs, run_type="llm", id=id_)
client.update_run(id_, status="completed")
@pytest.mark.parametrize("use_multipart_endpoint", (True, False))
def test_create_run_mutate(
use_multipart_endpoint: bool, monkeypatch: pytest.MonkeyPatch
) -> None:
inputs = {"messages": ["hi"], "mygen": (i for i in range(10))}
session = mock.Mock()
session.request = mock.Mock()
client = Client(
api_url="http://localhost:1984",
api_key="123",
session=session,
info=ls_schemas.LangSmithInfo(
batch_ingest_config=ls_schemas.BatchIngestConfig(
use_multipart_endpoint=use_multipart_endpoint,
size_limit_bytes=None, # Note this field is not used here
size_limit=100,
scale_up_nthreads_limit=16,
scale_up_qsize_trigger=1000,
scale_down_nempty_trigger=4,
)
),
)
id_ = uuid.uuid4()
run_dict = dict(
id=id_,
name="my_run",
inputs=inputs,
run_type="llm",
trace_id=id_,
dotted_order=run_trees._create_current_dotted_order(
datetime.now(timezone.utc), id_
),
)
client.create_run(**run_dict) # type: ignore
inputs["messages"].append("there") # type: ignore
outputs = {"messages": ["hi", "there"]}
client.update_run(
id_,
outputs=outputs,
end_time=datetime.now(timezone.utc),
trace_id=id_,
dotted_order=run_dict["dotted_order"],
)
if use_multipart_endpoint:
for _ in range(10):
time.sleep(0.1) # Give the background thread time to stop
payloads = [
(call[2]["headers"], call[2]["data"])
for call in session.request.mock_calls
if call.args and call.args[1].endswith("runs/multipart")
]
if payloads:
break
else:
assert False, "No payloads found"
parts: List[MultipartPart] = []
for payload in payloads:
headers, data = payload
assert headers["Content-Type"].startswith("multipart/form-data")
# this is a current implementation detail, if we change implementation
# we update this assertion
assert isinstance(data, bytes)
boundary = parse_options_header(headers["Content-Type"])[1]["boundary"]
parser = MultipartParser(io.BytesIO(data), boundary)
parts.extend(parser.parts())
assert [p.name for p in parts] == [
f"post.{id_}",
f"post.{id_}.inputs",
f"post.{id_}.outputs",
]
assert [p.headers.get("content-type") for p in parts] == [
"application/json",
"application/json",
"application/json",
]
outputs_parsed = json.loads(parts[2].value)
assert outputs_parsed == outputs
inputs_parsed = json.loads(parts[1].value)
assert inputs_parsed["messages"] == ["hi"]
assert inputs_parsed["mygen"].startswith( # type: ignore
"<generator object test_create_run_mutate.<locals>."
)
run_parsed = json.loads(parts[0].value)
assert "inputs" not in run_parsed
assert "outputs" not in run_parsed
assert run_parsed["trace_id"] == str(id_)
assert run_parsed["dotted_order"] == run_dict["dotted_order"]
else:
for _ in range(10):
time.sleep(0.1) # Give the background thread time to stop
payloads = [
json.loads(call[2]["data"])
for call in session.request.mock_calls
if call.args and call.args[1].endswith("runs/batch")
]
if payloads:
break
else:
assert False, "No payloads found"
posts = [pr for payload in payloads for pr in payload.get("post", [])]
patches = [pr for payload in payloads for pr in payload.get("patch", [])]
inputs = next(
(
pr["inputs"]
for pr in itertools.chain(posts, patches)
if pr.get("inputs")
),
{},
)
outputs = next(
(
pr["outputs"]
for pr in itertools.chain(posts, patches)
if pr.get("outputs")
),
{},
)
# Check that the mutated value wasn't posted
assert "messages" in inputs
assert inputs["messages"] == ["hi"]
assert "mygen" in inputs
assert inputs["mygen"].startswith( # type: ignore
"<generator object test_create_run_mutate.<locals>."
)
assert outputs == {"messages": ["hi", "there"]}
class CallTracker:
def __init__(self) -> None:
self.counter = 0
def __call__(self, *args: object, **kwargs: object) -> None:
self.counter += 1
@pytest.mark.flaky(reruns=5)
@pytest.mark.parametrize("supports_batch_endpoint", [True, False])
@pytest.mark.parametrize("auto_batch_tracing", [True, False])
def test_client_gc(auto_batch_tracing: bool, supports_batch_endpoint: bool) -> None:
session = mock.MagicMock(spec=requests.Session)
api_url = "http://localhost:1984"
def mock_get(*args, **kwargs):
if args[0] == f"{api_url}/info":
response = mock.Mock()
if supports_batch_endpoint:
response.json.return_value = {}
else:
response.raise_for_status.side_effect = HTTPError()
response.status_code = 404
return response
else:
return MagicMock()
session.get.side_effect = mock_get
client = Client(
api_url=api_url,
api_key="123",
auto_batch_tracing=auto_batch_tracing,
session=session,
)
tracker = CallTracker()
weakref.finalize(client, tracker)
assert tracker.counter == 0
for _ in range(10):
id = uuid.uuid4()
client.create_run(
"my_run",
inputs={},
run_type="llm",
id=id,
trace_id=id,
dotted_order=id,
)
if auto_batch_tracing:
assert client.tracing_queue
client.tracing_queue.join()
request_calls = [
call
for call in session.request.mock_calls
if call.args and call.args[0] == "POST"
]
assert len(request_calls) >= 1
for call in request_calls:
assert call.args[0] == "POST"
assert call.args[1] == "http://localhost:1984/runs/batch"
get_calls = [
call
for call in session.request.mock_calls
if call.args and call.args[0] == "GET"
]
# assert len(get_calls) == 1
for call in get_calls:
assert call.args[1] == f"{api_url}/info"
else:
request_calls = [
call
for call in session.request.mock_calls
if call.args and call.args[0] == "POST"
]
assert len(request_calls) == 10
for call in request_calls:
assert call.args[0] == "POST"
assert call.args[1] == "http://localhost:1984/runs"
if auto_batch_tracing:
get_calls = [
call
for call in session.get.mock_calls
if call.args and call.args[0] == "GET"
]
for call in get_calls:
assert call.args[1] == f"{api_url}/info"
del client
time.sleep(3) # Give the background thread time to stop
gc.collect() # Force garbage collection
assert tracker.counter == 1, "Client was not garbage collected"
@pytest.mark.parametrize("auto_batch_tracing", [True, False])
def test_client_gc_no_batched_runs(auto_batch_tracing: bool) -> None:
session = mock.MagicMock(spec=requests.Session)
client = Client(
api_url="http://localhost:1984",
api_key="123",
auto_batch_tracing=auto_batch_tracing,
session=session,
)
tracker = CallTracker()
weakref.finalize(client, tracker)
assert tracker.counter == 0
# because no trace_id/dotted_order provided, auto batch is disabled
for _ in range(10):
client.create_run("my_run", inputs={}, run_type="llm", id=uuid.uuid4())
request_calls = [
call
for call in session.request.mock_calls
if call.args and call.args[0] == "POST"
]
assert len(request_calls) == 10
for call in request_calls:
assert call.args[1] == "http://localhost:1984/runs"
del client
time.sleep(2) # Give the background thread time to stop
gc.collect() # Force garbage collection
assert tracker.counter == 1, "Client was not garbage collected"
@pytest.mark.parametrize("auto_batch_tracing", [True, False])
def test_create_run_with_filters(auto_batch_tracing: bool) -> None:
session = mock.MagicMock(spec=requests.Session)
def filter_inputs(inputs: dict) -> dict:
return {"hi there": "woah"}
def filter_outputs(outputs: dict):
return {k: v + "goodbye" for k, v in outputs.items()}
client = Client(
api_url="http://localhost:1984",
api_key="123",
auto_batch_tracing=auto_batch_tracing,
session=session,
hide_inputs=filter_inputs,
hide_outputs=filter_outputs,
)
tracker = CallTracker()
weakref.finalize(client, tracker)
assert tracker.counter == 0
expected = ['"hi there":"woah"']
for _ in range(3):
id_ = uuid.uuid4()
client.create_run("my_run", inputs={"foo": "bar"}, run_type="llm", id=id_)
output_val = uuid.uuid4().hex[:5]
client.update_run(
id_, end_time=datetime.now(), outputs={"theoutput": output_val}
)
expected.append(output_val + "goodbye")
request_calls = [
call
for call in session.request.mock_calls
if call.args and call.args[0] in {"POST", "PATCH"}
]
all_posted = "\n".join(
[call.kwargs["data"].decode("utf-8") for call in request_calls]
)
assert all([exp in all_posted for exp in expected])
def test_client_gc_after_autoscale() -> None:
session = mock.MagicMock(spec=requests.Session)
client = Client(
api_url="http://localhost:1984",
api_key="123",
session=session,
auto_batch_tracing=True,
)
tracker = CallTracker()
weakref.finalize(client, tracker)
assert tracker.counter == 0
tracing_queue = client.tracing_queue
assert tracing_queue is not None
for _ in range(50_000):
id = uuid.uuid4()
client.create_run(
"my_run",
inputs={},
run_type="llm",
id=id,
trace_id=id,
dotted_order=id,
)
del client
tracing_queue.join()
time.sleep(2) # Give the background threads time to stop
gc.collect() # Force garbage collection
assert tracker.counter == 1, "Client was not garbage collected"
request_calls = [
call
for call in session.request.mock_calls
if call.args and call.args[0] == "POST"
]
assert len(request_calls) >= 500 and len(request_calls) <= 550
for call in request_calls:
assert call.args[0] == "POST"
assert call.args[1] == "http://localhost:1984/runs/batch"
@pytest.mark.parametrize("supports_batch_endpoint", [True, False])
@pytest.mark.parametrize("auto_batch_tracing", [True, False])
def test_create_run_includes_langchain_env_var_metadata(
supports_batch_endpoint: bool,
auto_batch_tracing: bool,
) -> None:
session = mock.Mock()
session.request = mock.Mock()
api_url = "http://localhost:1984"
def mock_get(*args, **kwargs):
if args[0] == f"{api_url}/info":
response = mock.Mock()
if supports_batch_endpoint:
response.json.return_value = {}
else:
response.raise_for_status.side_effect = HTTPError()
response.status_code = 404
return response
else:
return MagicMock()
session.get.side_effect = mock_get
client = Client(
api_url=api_url,
api_key="123",
auto_batch_tracing=auto_batch_tracing,
session=session,
)
inputs = {
"foo": "これは私の友達です",
"bar": "این یک کتاب است",
"baz": "😊🌺🎉💻🚀🌈🍕🏄♂️🎁🐶🌟🏖️👍🚲🎈",
"qux": "나는\u3000밥을\u3000먹었습니다.",
"는\u3000밥": "나는\u3000밥을\u3000먹었습니다.",
}
# Set the environment variables just for this test
with patch.dict("os.environ", {"LANGCHAIN_REVISION": "abcd2234"}):
# Clear the cache to ensure the environment variables are re-read
ls_env.get_langchain_env_var_metadata.cache_clear()
id_ = uuid.uuid4()
start_time = datetime.now()
client.create_run(
"my_run",
inputs=inputs,
run_type="llm",
id=id_,
trace_id=id_,
dotted_order=f"{start_time.strftime('%Y%m%dT%H%M%S%fZ')}{id_}",
start_time=start_time,
)
if tracing_queue := client.tracing_queue:
tracing_queue.join()
# Check the posted value in the request
posted_value = json.loads(session.request.call_args[1]["data"])
if auto_batch_tracing:
assert (
posted_value["post"][0]["extra"]["metadata"]["LANGCHAIN_REVISION"]
== "abcd2234"
)
else:
assert posted_value["extra"]["metadata"]["LANGCHAIN_REVISION"] == "abcd2234"
assert "LANGCHAIN_API_KEY" not in posted_value["extra"]["metadata"]
@pytest.mark.parametrize("source_type", ["api", "model"])
def test_create_feedback_string_source_type(source_type: str) -> None:
session = mock.Mock()
client = Client(api_url="http://localhost:1984", api_key="123", session=session)
request_object = mock.Mock()
request_object.json.return_value = {
"id": uuid.uuid4(),
"key": "Foo",
"created_at": _CREATED_AT,
"modified_at": _CREATED_AT,
"run_id": uuid.uuid4(),
}
session.post.return_value = request_object
id_ = uuid.uuid4()
client.create_feedback(
id_,
key="Foo",
feedback_source_type=source_type,
)
def test_pydantic_serialize() -> None:
"""Test that pydantic objects can be serialized."""
test_uuid = uuid.uuid4()
test_time = datetime.now()
class ChildPydantic(BaseModel):
uid: uuid.UUID
child_path_keys: Dict[pathlib.Path, pathlib.Path]
class MyPydantic(BaseModel):
foo: str
uid: uuid.UUID
tim: datetime
ex: Optional[str] = None
child: Optional[ChildPydantic] = None
path_keys: Dict[pathlib.Path, pathlib.Path]
obj = MyPydantic(
foo="bar",
uid=test_uuid,
tim=test_time,
child=ChildPydantic(
uid=test_uuid, child_path_keys={pathlib.Path("foo"): pathlib.Path("bar")}
),
path_keys={pathlib.Path("foo"): pathlib.Path("bar")},
)
res = json.loads(json.dumps(obj, default=_serialize_json))
expected = {
"foo": "bar",
"uid": str(test_uuid),
"tim": test_time.isoformat(),
"child": {
"uid": str(test_uuid),
"child_path_keys": {"foo": "bar"},
},
"path_keys": {"foo": "bar"},
}
assert res == expected
obj2 = {"output": obj}
res2 = json.loads(json.dumps(obj2, default=_serialize_json))
assert res2 == {"output": expected}
def test_serialize_json(caplog) -> None:
caplog.set_level(logging.ERROR)
class MyClass:
def __init__(self, x: int) -> None:
self.x = x
self.y = "y"
self.a_list = [1, 2, 3]
self.a_tuple = (1, 2, 3)
self.a_set = {1, 2, 3}
self.a_dict = {"foo": "bar"}
self.my_bytes = b"foo"
def __repr__(self) -> str:
return "I fell back"
def __hash__(self) -> int:
return 1
class ClassWithTee:
def __init__(self) -> None:
tee_a, tee_b = itertools.tee(range(10))
self.tee_a = tee_a
self.tee_b = tee_b
def __repr__(self):
return "tee_a, tee_b"
class MyPydantic(BaseModel):
foo: str
bar: int
path_keys: Dict[pathlib.Path, "MyPydantic"]
@dataclasses.dataclass
class MyDataclass:
foo: str
bar: int
def something(self) -> None:
pass
class MyEnum(str, Enum):
FOO = "foo"
BAR = "bar"
class ClassWithFakeDict:
def dict(self) -> Dict:
raise ValueError("This should not be called")
def to_dict(self) -> Dict:
return {"foo": "bar"}
@dataclasses_json.dataclass_json
@dataclasses.dataclass
class Person:
name: str
uid = uuid.uuid4()
current_time = datetime.now()
class MyNamedTuple(NamedTuple):
foo: str
bar: int
to_serialize = {
"uid": uid,
"time": current_time,
"my_class": MyClass(1),
"class_with_tee": ClassWithTee(),
"my_dataclass": MyDataclass("foo", 1),
"my_enum": MyEnum.FOO,
"my_pydantic": MyPydantic(
foo="foo",
bar=1,
path_keys={pathlib.Path("foo"): MyPydantic(foo="foo", bar=1, path_keys={})},
),
"my_pydantic_class": MyPydantic,
"person": Person(name="foo_person"),
"a_bool": True,
"a_none": None,
"a_str": "foo",
"an_int": 1,
"a_float": 1.1,
"named_tuple": MyNamedTuple(foo="foo", bar=1),
"fake_json": ClassWithFakeDict(),
"some_set": set("a"),
"set_with_class": set([MyClass(1)]),
"my_mock": MagicMock(text="Hello, world"),
}
res = _orjson.loads(_dumps_json(to_serialize))
assert (
"model_dump" not in caplog.text
), f"Unexpected error logs were emitted: {caplog.text}"
expected = {
"uid": str(uid),
"time": current_time.isoformat(),
"my_class": "I fell back",
"class_with_tee": "tee_a, tee_b",
"my_dataclass": {"foo": "foo", "bar": 1},
"my_enum": "foo",
"my_pydantic": {
"foo": "foo",
"bar": 1,
"path_keys": {"foo": {"foo": "foo", "bar": 1, "path_keys": {}}},
},
"my_pydantic_class": lambda x: "MyPydantic" in x,
"person": {"name": "foo_person"},
"a_bool": True,
"a_none": None,
"a_str": "foo",
"an_int": 1,
"a_float": 1.1,
"named_tuple": {"bar": 1, "foo": "foo"},
"fake_json": {"foo": "bar"},
"some_set": ["a"],
"set_with_class": ["I fell back"],
"my_mock": lambda x: "Mock" in x,
}
assert set(expected) == set(res)
for k, v in expected.items():
try:
if callable(v):
assert v(res[k]), f"Failed for {k}"
else:
assert res[k] == v, f"Failed for {k}"
except AssertionError:
raise
@dataclasses.dataclass
class CyclicClass:
other: Optional["CyclicClass"]
def __repr__(self) -> str:
return "my_cycles..."
my_cyclic = CyclicClass(other=CyclicClass(other=None))
my_cyclic.other.other = my_cyclic # type: ignore
res = _orjson.loads(_dumps_json({"cyclic": my_cyclic}))
assert res == {"cyclic": "my_cycles..."}
expected = {"foo": "foo", "bar": 1}
def test__dumps_json():
chars = "".join(chr(cp) for cp in range(0, sys.maxunicode + 1))
trans_table = str.maketrans("", "", "")
all_chars = chars.translate(trans_table)
serialized_json = _dumps_json({"chars": all_chars})
assert isinstance(serialized_json, bytes)
serialized_str = serialized_json.decode("utf-8")
assert '"chars"' in serialized_str
assert "\\uD800" not in serialized_str
assert "\\uDC00" not in serialized_str
@patch("langsmith.client.requests.Session", autospec=True)
def test_host_url(_: MagicMock) -> None:
client = Client(api_url="https://api.foobar.com/api", api_key="API_KEY")
assert client._host_url == "https://api.foobar.com"
client = Client(
api_url="https://api.langsmith.com",
api_key="API_KEY",
web_url="https://web.langsmith.com",
)
assert client._host_url == "https://web.langsmith.com"
client = Client(api_url="http://localhost:8000", api_key="API_KEY")
assert client._host_url == "http://localhost"
client = Client(api_url="https://eu.api.smith.langchain.com", api_key="API_KEY")
assert client._host_url == "https://eu.smith.langchain.com"
client = Client(api_url="https://dev.api.smith.langchain.com", api_key="API_KEY")
assert client._host_url == "https://dev.smith.langchain.com"
client = Client(api_url="https://api.smith.langchain.com", api_key="API_KEY")
assert client._host_url == "https://smith.langchain.com"
@patch("langsmith.client.time.sleep")
def test_retry_on_connection_error(mock_sleep: MagicMock):
mock_session = MagicMock()
client = Client(api_key="test", session=mock_session, auto_batch_tracing=False)
mock_session.request.side_effect = requests.ConnectionError()
with pytest.raises(ls_utils.LangSmithConnectionError):
client.request_with_retries("GET", "https://test.url", stop_after_attempt=2)
assert mock_session.request.call_count == 2
@patch("langsmith.client.time.sleep")
def test_http_status_500_handling(mock_sleep):
mock_session = MagicMock()
client = Client(api_key="test", session=mock_session, auto_batch_tracing=False)
mock_response = MagicMock()
mock_response.status_code = 500
mock_response.raise_for_status.side_effect = HTTPError()
mock_session.request.return_value = mock_response
with pytest.raises(ls_utils.LangSmithAPIError):
client.request_with_retries("GET", "https://test.url", stop_after_attempt=2)
assert mock_session.request.call_count == 2
@patch("langsmith.client.time.sleep")
def test_pass_on_409_handling(mock_sleep):
mock_session = MagicMock()
client = Client(api_key="test", session=mock_session, auto_batch_tracing=False)
mock_response = MagicMock()
mock_response.status_code = 409
mock_response.raise_for_status.side_effect = HTTPError()
mock_session.request.return_value = mock_response
response = client.request_with_retries(
"GET",
"https://test.url",
stop_after_attempt=5,
to_ignore=[ls_utils.LangSmithConflictError],
)
assert mock_session.request.call_count == 1
assert response == mock_response
@patch("langsmith.client.ls_utils.raise_for_status_with_text")
def test_http_status_429_handling(mock_raise_for_status):
mock_session = MagicMock()
client = Client(api_key="test", session=mock_session)
mock_response = MagicMock()
mock_response.status_code = 429
mock_session.request.return_value = mock_response
mock_raise_for_status.side_effect = HTTPError()
with pytest.raises(ls_utils.LangSmithRateLimitError):
client.request_with_retries("GET", "https://test.url")
@patch("langsmith.client.ls_utils.raise_for_status_with_text")
def test_http_status_401_handling(mock_raise_for_status):
mock_session = MagicMock()
client = Client(api_key="test", session=mock_session)
mock_response = MagicMock()
mock_response.status_code = 401
mock_session.request.return_value = mock_response
mock_raise_for_status.side_effect = HTTPError()
with pytest.raises(ls_utils.LangSmithAuthError):
client.request_with_retries("GET", "https://test.url")
@patch("langsmith.client.ls_utils.raise_for_status_with_text")
def test_http_status_404_handling(mock_raise_for_status):
mock_session = MagicMock()
client = Client(api_key="test", session=mock_session)
mock_response = MagicMock()
mock_response.status_code = 404
mock_session.request.return_value = mock_response
mock_raise_for_status.side_effect = HTTPError()
with pytest.raises(ls_utils.LangSmithNotFoundError):
client.request_with_retries("GET", "https://test.url")
@patch("langsmith.client.ls_utils.raise_for_status_with_text")
def test_batch_ingest_run_retry_on_429(mock_raise_for_status):
mock_session = MagicMock()
client = Client(api_key="test", session=mock_session)
mock_response = MagicMock()
mock_response.headers = {"retry-after": "0.5"}
mock_response.status_code = 429
mock_session.request.return_value = mock_response
mock_raise_for_status.side_effect = HTTPError()
client.batch_ingest_runs(
create=[
{
"name": "test",
"id": str(uuid.uuid4()),
"trace_id": str(uuid.uuid4()),
"dotted_order": str(uuid.uuid4()),
}
],
)
# Check that there were 3 post calls (may be other get calls though)
assert mock_session.request.call_count >= 3
# count the number of POST requests
assert (
sum([1 for call in mock_session.request.call_args_list if call[0][0] == "POST"])
== 3
)
MB = 1024 * 1024
@pytest.mark.parametrize("payload_size", [MB, 5 * MB, 9 * MB, 21 * MB])
@pytest.mark.parametrize("use_multipart_endpoint", (True, False))
def test_batch_ingest_run_splits_large_batches(
payload_size: int, use_multipart_endpoint: bool
):
mock_session = MagicMock()
client = Client(api_key="test", session=mock_session)
mock_response = MagicMock()
mock_response.status_code = 200
mock_session.request.return_value = mock_response
# Create 6 run ops total, each with an inputs dictionary that's payload_size bytess
run_ids = [str(uuid.uuid4()) for _ in range(3)]
patch_ids = [str(uuid.uuid4()) for _ in range(3)]
posts = [
{
"name": "test",
"id": run_id,
"trace_id": run_id,
"dotted_order": run_id,
"inputs": {"x": "a" * payload_size},
"start_time": "2021-01-01T00:00:00Z",
}
for run_id in run_ids
]
patches = [
{
"id": run_id,
"trace_id": run_id,
"dotted_order": run_id,
"end_time": "2021-01-01T00:00:00Z",
"outputs": {"y": "b" * payload_size},
}
for run_id in patch_ids
]
if use_multipart_endpoint:
client.multipart_ingest(create=posts, update=patches)
# multipart endpoint should only send one request
expected_num_requests = 1
# count the number of POST requests
assert sum(
[1 for call in mock_session.request.call_args_list if call[0][0] == "POST"]
) in (expected_num_requests, expected_num_requests + 1)
request_bodies = [
op
for call in mock_session.request.call_args_list
for op in (
MultipartParser(
(
io.BytesIO(call[1]["data"])
if isinstance(call[1]["data"], bytes)
else call[1]["data"]
),
parse_options_header(call[1]["headers"]["Content-Type"])[1][
"boundary"
],
)
if call[0][0] == "POST"
else []
)
]
all_run_ids = run_ids + patch_ids
# Check that all the run_ids are present in the request bodies
for run_id in all_run_ids:
assert any(
[body.name.split(".")[1] == run_id for body in request_bodies]
), run_id
else:
client.batch_ingest_runs(create=posts, update=patches)
# we can support up to 20MB per batch, so we need to find the number of batches
# we should be sending
max_in_batch = max(1, (20 * MB) // (payload_size + 20))
expected_num_requests = min(6, math.ceil((len(run_ids) * 2) / max_in_batch))
# count the number of POST requests
assert (
sum(
[
1
for call in mock_session.request.call_args_list
if call[0][0] == "POST"
]
)
== expected_num_requests
)
request_bodies = [
op
for call in mock_session.request.call_args_list
for reqs in (
_orjson.loads(call[1]["data"]).values() if call[0][0] == "POST" else []
)
for op in reqs
]
all_run_ids = run_ids + patch_ids
# Check that all the run_ids are present in the request bodies
for run_id in all_run_ids:
assert any([body["id"] == str(run_id) for body in request_bodies])
# Check that no duplicate run_ids are present in the request bodies
assert len(request_bodies) == len(set([body["id"] for body in request_bodies]))
@mock.patch("langsmith.client.requests.Session")
def test_select_eval_results(mock_session_cls: mock.Mock):
expected = EvaluationResult(
key="foo",
value="bar",
score=7899082,
metadata={"a": "b"},
comment="hi",
feedback_config={"c": "d"},
)
client = Client(api_key="test")
for count, input_ in [
(1, expected),
(1, expected.dict()),
(1, {"results": [expected]}),
(1, {"results": [expected.dict()]}),
(2, {"results": [expected.dict(), expected.dict()]}),
(2, {"results": [expected, expected]}),
]:
op = client._select_eval_results(input_)
assert len(op) == count
assert op == [expected] * count
expected2 = EvaluationResult(
key="foo",
metadata={"a": "b"},
comment="this is a comment",
feedback_config={"c": "d"},
)
as_reasoning = {
"reasoning": expected2.comment,
**expected2.dict(exclude={"comment"}),
}
for input_ in [as_reasoning, {"results": [as_reasoning]}, {"results": [expected2]}]:
assert client._select_eval_results(input_) == [
expected2,
]
@pytest.mark.parametrize("client_cls", [Client, AsyncClient])
@mock.patch("langsmith.client.requests.Session")
def test_validate_api_key_if_hosted(
monkeypatch: pytest.MonkeyPatch, client_cls: Union[Type[Client], Type[AsyncClient]]
) -> None:
monkeypatch.delenv("LANGCHAIN_API_KEY", raising=False)
monkeypatch.delenv("LANGSMITH_API_KEY", raising=False)
with pytest.warns(ls_utils.LangSmithMissingAPIKeyWarning):
client_cls(api_url="https://api.smith.langchain.com")
with warnings.catch_warnings():
# Check no warning is raised here.
warnings.simplefilter("error")
client_cls(api_url="http://localhost:1984")
def test_parse_token_or_url():
# Test with URL
url = "https://smith.langchain.com/public/419dcab2-1d66-4b94-8901-0357ead390df/d"
api_url = "https://api.smith.langchain.com"
assert _parse_token_or_url(url, api_url) == (
api_url,
"419dcab2-1d66-4b94-8901-0357ead390df",
)
url = "https://smith.langchain.com/public/419dcab2-1d66-4b94-8901-0357ead390df/d"
beta_api_url = "https://beta.api.smith.langchain.com"
# Should still point to the correct public one
assert _parse_token_or_url(url, beta_api_url) == (
api_url,
"419dcab2-1d66-4b94-8901-0357ead390df",
)
token = "419dcab2-1d66-4b94-8901-0357ead390df"
assert _parse_token_or_url(token, api_url) == (
api_url,
token,
)
# Test with UUID object
token_uuid = uuid.UUID("419dcab2-1d66-4b94-8901-0357ead390df")
assert _parse_token_or_url(token_uuid, api_url) == (
api_url,
str(token_uuid),
)
# Test with custom num_parts
url_custom = (
"https://smith.langchain.com/public/419dcab2-1d66-4b94-8901-0357ead390df/p/q"
)
assert _parse_token_or_url(url_custom, api_url, num_parts=3) == (
api_url,
"419dcab2-1d66-4b94-8901-0357ead390df",
)
# Test with invalid URL
invalid_url = "https://invalid.com/419dcab2-1d66-4b94-8901-0357ead390df"
with pytest.raises(LangSmithUserError):
_parse_token_or_url(invalid_url, api_url)
_PROMPT_COMMITS = [
(
True,
"tools",
{
"owner": "-",
"repo": "tweet-generator-example-with-tools",
"commit_hash": "b862ce708ffeb932331a9345ea2a2fe6a76d62cf83e9aab834c24bb12bd516c9",
"manifest": {
"lc": 1,
"type": "constructor",
"id": ["langchain", "schema", "runnable", "RunnableSequence"],
"kwargs": {
"first": {
"lc": 1,
"type": "constructor",
"id": ["langchain", "prompts", "chat", "ChatPromptTemplate"],
"kwargs": {
"input_variables": ["topic"],
"metadata": {
"lc_hub_owner": "-",
"lc_hub_repo": "tweet-generator-example",
"lc_hub_commit_hash": "c39837bd8d010da739d6d4adc7f2dca2f2461521661a393d37606f5c696109a5",
},
"messages": [
{
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"prompts",
"chat",
"SystemMessagePromptTemplate",
],
"kwargs": {
"prompt": {
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"prompts",
"prompt",
"PromptTemplate",
],
"kwargs": {
"input_variables": [],
"template_format": "f-string",
"template": "Generate a tweet based on the provided topic.",
},
}
},
},
{
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"prompts",
"chat",
"HumanMessagePromptTemplate",
],
"kwargs": {
"prompt": {
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"prompts",
"prompt",
"PromptTemplate",
],
"kwargs": {
"input_variables": ["topic"],
"template_format": "f-string",
"template": "{topic}",
},
}
},
},
],
},
"name": "StructuredPrompt",
},
"last": {
"lc": 1,
"type": "constructor",
"id": ["langchain", "schema", "runnable", "RunnableBinding"],
"kwargs": {
"bound": {
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"chat_models",
"anthropic",
"ChatAnthropic",
],
"kwargs": {
"temperature": 1,
"max_tokens": 1024,
"top_p": 1,
"top_k": -1,
"anthropic_api_key": {
"id": ["ANTHROPIC_API_KEY"],
"lc": 1,
"type": "secret",
},
"model": "claude-3-5-sonnet-20240620",
},
},
"kwargs": {
"tools": [
{
"type": "function",
"function": {
"name": "GenerateTweet",
"description": "Submit your tweet.",
"parameters": {
"properties": {
"tweet": {
"type": "string",
"description": "The generated tweet.",
}
},
"required": ["tweet"],
"type": "object",
},
},
},
{
"type": "function",
"function": {
"name": "SomethingElse",
"description": "",
"parameters": {
"properties": {
"aval": {
"type": "array",
"items": {"type": "string"},
}
},
"required": [],
"type": "object",
},
},
},
]
},
},
},
},
},
"examples": [],
},
),
(
True,
"structured",
{
"owner": "-",
"repo": "tweet-generator-example",
"commit_hash": "e8da7f9e80471ace9b96c4f8fd55a215020126521f1da8f66130604c101fc522",
"manifest": {
"lc": 1,
"type": "constructor",
"id": ["langchain", "schema", "runnable", "RunnableSequence"],
"kwargs": {
"first": {
"lc": 1,
"type": "constructor",
"id": [
"langchain_core",
"prompts",
"structured",
"StructuredPrompt",
],
"kwargs": {
"input_variables": ["topic"],
"metadata": {
"lc_hub_owner": "langchain-ai",
"lc_hub_repo": "tweet-generator-example",
"lc_hub_commit_hash": "7c32ca78a2831b6b3a3904eb5704b48a0730e93f29afb0853cfaefc42dc09f9c",
},
"messages": [
{
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"prompts",
"chat",
"SystemMessagePromptTemplate",
],
"kwargs": {
"prompt": {
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"prompts",
"prompt",
"PromptTemplate",
],
"kwargs": {
"input_variables": [],
"template_format": "f-string",
"template": "Generate a tweet about the given topic.",
},
}
},
},
{
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"prompts",
"chat",
"HumanMessagePromptTemplate",
],
"kwargs": {
"prompt": {
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"prompts",
"prompt",
"PromptTemplate",
],
"kwargs": {
"input_variables": ["topic"],
"template_format": "f-string",
"template": "{topic}",
},
}
},
},
],
"schema_": {
"title": "GenerateTweet",
"description": "Submit your tweet.",
"type": "object",
"properties": {
"tweet": {
"type": "string",
"description": "The generated tweet.",
}
},
"required": ["tweet"],
},
},
"name": "StructuredPrompt",
},
"last": {
"lc": 1,
"type": "constructor",
"id": ["langchain", "schema", "runnable", "RunnableBinding"],
"kwargs": {
"bound": {
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"chat_models",
"anthropic",
"ChatAnthropic",
],
"kwargs": {
"temperature": 1,
"max_tokens": 1024,
"top_p": 1,
"top_k": -1,
"anthropic_api_key": {
"id": ["ANTHROPIC_API_KEY"],
"lc": 1,
"type": "secret",
},
"model": "claude-3-5-sonnet-20240620",
},
},
"kwargs": {},
},
},
},
},
"examples": [],
},
),
(
True,
"none",
{
"owner": "-",
"repo": "tweet-generator-example-with-nothing",
"commit_hash": "06c657373bdfcadec0d4d0933416b2c11f1b283ef3d1ca5dfb35dd6ed28b9f78",
"manifest": {
"lc": 1,
"type": "constructor",
"id": ["langchain", "schema", "runnable", "RunnableSequence"],
"kwargs": {
"first": {
"lc": 1,
"type": "constructor",
"id": ["langchain", "prompts", "chat", "ChatPromptTemplate"],
"kwargs": {
"messages": [
{
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"prompts",
"chat",
"SystemMessagePromptTemplate",
],
"kwargs": {
"prompt": {
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"prompts",
"prompt",
"PromptTemplate",
],
"kwargs": {
"input_variables": [],
"template_format": "f-string",
"template": "Generate a tweet about the given topic.",
},
}
},
},
{
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"prompts",
"chat",
"HumanMessagePromptTemplate",
],
"kwargs": {
"prompt": {
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"prompts",
"prompt",
"PromptTemplate",
],
"kwargs": {
"input_variables": ["topic"],
"template_format": "f-string",
"template": "{topic}",
},
}
},
},
],
"input_variables": ["topic"],
},
},
"last": {
"lc": 1,
"type": "constructor",
"id": ["langchain", "schema", "runnable", "RunnableBinding"],
"kwargs": {
"bound": {
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"chat_models",
"openai",
"ChatOpenAI",
],
"kwargs": {
"openai_api_key": {
"id": ["OPENAI_API_KEY"],
"lc": 1,
"type": "secret",
},
"model": "gpt-4o-mini",
},
},
"kwargs": {},
},
},
},
},
"examples": [],
},
),
(
False,
"tools",
{
"owner": "-",
"repo": "tweet-generator-example-with-tools",
"commit_hash": "b862ce708ffeb932331a9345ea2a2fe6a76d62cf83e9aab834c24bb12bd516c9",
"manifest": {
"lc": 1,
"type": "constructor",
"id": ["langchain", "prompts", "chat", "ChatPromptTemplate"],
"kwargs": {
"input_variables": ["topic"],
"metadata": {
"lc_hub_owner": "-",
"lc_hub_repo": "tweet-generator-example",
"lc_hub_commit_hash": "c39837bd8d010da739d6d4adc7f2dca2f2461521661a393d37606f5c696109a5",
},
"messages": [
{
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"prompts",
"chat",
"SystemMessagePromptTemplate",
],
"kwargs": {
"prompt": {
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"prompts",
"prompt",
"PromptTemplate",
],
"kwargs": {
"input_variables": [],
"template_format": "f-string",
"template": "Generate a tweet based on the provided topic.",
},
}
},
},
{
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"prompts",
"chat",
"HumanMessagePromptTemplate",
],
"kwargs": {
"prompt": {
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"prompts",
"prompt",
"PromptTemplate",
],
"kwargs": {
"input_variables": ["topic"],
"template_format": "f-string",
"template": "{topic}",
},
}
},
},
],
},
"name": "StructuredPrompt",
},
"examples": [],
},
),
(
False,
"structured",
{
"owner": "-",
"repo": "tweet-generator-example",
"commit_hash": "e8da7f9e80471ace9b96c4f8fd55a215020126521f1da8f66130604c101fc522",
"manifest": {
"lc": 1,
"type": "constructor",
"id": ["langchain_core", "prompts", "structured", "StructuredPrompt"],
"kwargs": {
"input_variables": ["topic"],
"metadata": {
"lc_hub_owner": "langchain-ai",
"lc_hub_repo": "tweet-generator-example",
"lc_hub_commit_hash": "7c32ca78a2831b6b3a3904eb5704b48a0730e93f29afb0853cfaefc42dc09f9c",
},
"messages": [
{
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"prompts",
"chat",
"SystemMessagePromptTemplate",
],
"kwargs": {
"prompt": {
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"prompts",
"prompt",
"PromptTemplate",
],
"kwargs": {
"input_variables": [],
"template_format": "f-string",
"template": "Generate a tweet about the given topic.",
},
}
},
},
{
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"prompts",
"chat",
"HumanMessagePromptTemplate",
],
"kwargs": {
"prompt": {
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"prompts",
"prompt",
"PromptTemplate",
],
"kwargs": {
"input_variables": ["topic"],
"template_format": "f-string",
"template": "{topic}",
},
}
},
},
],
"schema_": {
"title": "GenerateTweet",
"description": "Submit your tweet.",
"type": "object",
"properties": {
"tweet": {
"type": "string",
"description": "The generated tweet.",
}
},
"required": ["tweet"],
},
},
"name": "StructuredPrompt",
},
"examples": [],
},
),
(
False,
"none",
{
"owner": "-",
"repo": "tweet-generator-example-with-nothing",
"commit_hash": "06c657373bdfcadec0d4d0933416b2c11f1b283ef3d1ca5dfb35dd6ed28b9f78",
"manifest": {
"lc": 1,
"type": "constructor",
"id": ["langchain", "prompts", "chat", "ChatPromptTemplate"],
"kwargs": {
"messages": [
{
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"prompts",
"chat",
"SystemMessagePromptTemplate",
],
"kwargs": {
"prompt": {
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"prompts",
"prompt",
"PromptTemplate",
],
"kwargs": {
"input_variables": [],
"template_format": "f-string",
"template": "Generate a tweet about the given topic.",
},
}
},
},
{
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"prompts",
"chat",
"HumanMessagePromptTemplate",
],
"kwargs": {
"prompt": {
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"prompts",
"prompt",
"PromptTemplate",
],
"kwargs": {
"input_variables": ["topic"],
"template_format": "f-string",
"template": "{topic}",
},
}
},
},
],
"input_variables": ["topic"],
},
},
"examples": [],
},
),
]
@pytest.mark.parametrize("include_model, manifest_type, manifest_data", _PROMPT_COMMITS)
def test_pull_prompt(
include_model: bool,
manifest_type: Literal["structured", "tool", "none"],
manifest_data: dict,
):
try:
from langchain_core.language_models.base import BaseLanguageModel
from langchain_core.output_parsers import JsonOutputKeyToolsParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.prompts.structured import StructuredPrompt
from langchain_core.runnables import RunnableBinding, RunnableSequence
except ImportError:
pytest.skip("Skipping test that requires langchain")
# Create a mock session
mock_session = mock.Mock()
# prompt_commit = ls_schemas.PromptCommit(**manifest_data)
mock_session.request.side_effect = lambda method, url, **kwargs: mock.Mock(
json=lambda: manifest_data if "/commits/" in url else None
)
# Create a client with Info pre-created and version >= 0.6
info = ls_schemas.LangSmithInfo(version="0.6.0")
client = Client(
api_url="http://localhost:1984",
api_key="fake_api_key",
session=mock_session,
info=info,
)
with mock.patch.dict(
"os.environ",
{
"ANTHROPIC_API_KEY": "test_anthropic_key",
"OPENAI_API_KEY": "test_openai_key",
},
):
result = client.pull_prompt(
prompt_identifier=manifest_data["repo"], include_model=include_model
)
expected_prompt_type = (
StructuredPrompt if manifest_type == "structured" else ChatPromptTemplate
)
if include_model:
assert isinstance(result, RunnableSequence)
assert isinstance(result.first, expected_prompt_type)
if manifest_type != "structured":
assert not isinstance(result.first, StructuredPrompt)
assert len(result.steps) == 2
if manifest_type == "tool":
assert result.steps[1].kwargs.get("tools")
else:
assert len(result.steps) == 3
assert isinstance(result.steps[1], RunnableBinding)
assert result.steps[1].kwargs.get("tools")
assert isinstance(result.steps[1].bound, BaseLanguageModel)
assert isinstance(result.steps[2], JsonOutputKeyToolsParser)
else:
assert isinstance(result, expected_prompt_type)
if manifest_type != "structured":
assert not isinstance(result, StructuredPrompt)
def test_evaluate_methods() -> None:
client_args = set(inspect.signature(Client.evaluate).parameters).difference(
{"self"}
)
eval_args = set(inspect.signature(evaluate).parameters).difference({"client"})
assert client_args == eval_args
client_args = set(inspect.signature(Client.aevaluate).parameters).difference(
{"self"}
)
eval_args = set(inspect.signature(aevaluate).parameters).difference({"client"})
extra_args = client_args - eval_args
assert not extra_args
|
0 | lc_public_repos/langsmith-sdk/python/tests | lc_public_repos/langsmith-sdk/python/tests/unit_tests/test_run_helpers.py | import asyncio
import functools
import inspect
import json
import os
import sys
import time
import uuid
import warnings
from typing import (
Any,
AsyncGenerator,
Generator,
List,
Optional,
Set,
Tuple,
Union,
cast,
)
from unittest.mock import MagicMock, patch
import pytest
from requests_toolbelt import MultipartEncoder
from typing_extensions import Annotated, Literal
import langsmith
from langsmith import Client
from langsmith import client as ls_client
from langsmith import schemas as ls_schemas
from langsmith import utils as ls_utils
from langsmith._internal import _aiter as aitertools
from langsmith.run_helpers import (
_get_inputs,
as_runnable,
get_current_run_tree,
is_traceable_function,
trace,
traceable,
tracing_context,
)
from langsmith.run_trees import RunTree
def _get_calls(
mock_client: Any,
minimum: Optional[int] = 0,
verbs: Set[str] = {"POST"},
attempts: int = 10,
) -> list:
calls = []
for _ in range(attempts):
calls = [
c
for c in mock_client.session.request.mock_calls # type: ignore
if c.args and c.args[0] in verbs
]
if minimum is None:
return calls
if minimum is not None and len(calls) > minimum:
break
time.sleep(0.1)
return calls
def _get_data(mock_calls: List[Any]) -> List[Tuple[str, dict]]:
datas = []
for call_ in mock_calls:
data = json.loads(call_.kwargs["data"])
for verb in ("post", "patch"):
for payload in data.get(verb) or []:
datas.append((verb, payload))
return datas
def _get_multipart_data(mock_calls: List[Any]) -> List[Tuple[str, Tuple[Any, bytes]]]:
datas = []
for call_ in mock_calls:
data = call_.kwargs.get("data")
if isinstance(data, MultipartEncoder):
fields = data.fields
for key, value in fields:
if isinstance(value, tuple):
_, file_content, content_type, _ = value
datas.append((key, (content_type, file_content)))
else:
datas.append((key, value))
return datas
def test__get_inputs_with_no_args() -> None:
def foo() -> None:
pass
signature = inspect.signature(foo)
inputs = _get_inputs(signature)
assert inputs == {}
def test__get_inputs_with_args() -> None:
def foo(a: int, b: int, c: int) -> None:
pass
signature = inspect.signature(foo)
inputs = _get_inputs(signature, 1, 2, 3)
assert inputs == {"a": 1, "b": 2, "c": 3}
def test__get_inputs_with_defaults() -> None:
def foo(a: int, b: int, c: int = 3) -> None:
pass
signature = inspect.signature(foo)
inputs = _get_inputs(signature, 1, 2)
assert inputs == {"a": 1, "b": 2, "c": 3}
def test__get_inputs_with_var_args() -> None:
# Mis-named args as kwargs to check that it's mapped correctly
def foo(a: int, b: int, *kwargs: Any) -> None:
pass
signature = inspect.signature(foo)
inputs = _get_inputs(signature, 1, 2, 3, 4)
assert inputs == {"a": 1, "b": 2, "kwargs": (3, 4)}
def test__get_inputs_with_var_kwargs() -> None:
def foo(a: int, b: int, **kwargs: Any) -> None:
pass
signature = inspect.signature(foo)
inputs = _get_inputs(signature, 1, 2, c=3, d=4)
assert inputs == {"a": 1, "b": 2, "c": 3, "d": 4}
def test__get_inputs_with_var_kwargs_and_varargs() -> None:
def foo(a: int, b: int, *args: Any, **kwargs: Any) -> None:
pass
signature = inspect.signature(foo)
inputs = _get_inputs(signature, 1, 2, 3, 4, c=5, d=6)
assert inputs == {"a": 1, "b": 2, "args": (3, 4), "c": 5, "d": 6}
def test__get_inputs_with_class_method() -> None:
class Foo:
@classmethod
def bar(cls, a: int, b: int) -> None:
pass
signature = inspect.signature(Foo.bar)
inputs = _get_inputs(signature, 1, 2)
assert inputs == {"a": 1, "b": 2}
def test__get_inputs_with_static_method() -> None:
class Foo:
@staticmethod
def bar(a: int, b: int) -> None:
pass
signature = inspect.signature(Foo.bar)
inputs = _get_inputs(signature, 1, 2)
assert inputs == {"a": 1, "b": 2}
def test__get_inputs_with_self() -> None:
class Foo:
def bar(self, a: int, b: int) -> None:
pass
signature = inspect.signature(Foo.bar)
inputs = _get_inputs(signature, Foo(), 1, 2)
assert inputs == {"a": 1, "b": 2}
def test__get_inputs_with_kwargs_and_var_kwargs() -> None:
def foo(a: int, b: int, **kwargs: Any) -> None:
pass
signature = inspect.signature(foo)
inputs = _get_inputs(signature, 1, 2, c=3, **{"d": 4})
assert inputs == {"a": 1, "b": 2, "c": 3, "d": 4}
def test__get_inputs_with_var_kwargs_and_other_kwargs() -> None:
def foo(a: int, b: int, **kwargs: Any) -> None:
pass
signature = inspect.signature(foo)
inputs = _get_inputs(signature, 1, 2, c=3, other_kwargs={"d": 4})
assert inputs == {"a": 1, "b": 2, "c": 3, "other_kwargs": {"d": 4}}
def test__get_inputs_with_keyword_only_args() -> None:
def foo(a: int, *, b: int, c: int) -> None:
pass
signature = inspect.signature(foo)
inputs = _get_inputs(signature, 1, b=2, c=3)
assert inputs == {"a": 1, "b": 2, "c": 3}
def test__get_inputs_with_keyword_only_args_and_defaults() -> None:
def foo(a: int, *, b: int = 2, c: int = 3) -> None:
pass
signature = inspect.signature(foo)
inputs = _get_inputs(signature, 1)
assert inputs == {"a": 1, "b": 2, "c": 3}
def test__get_inputs_misnamed_and_required_keyword_only_args() -> None:
def foo(kwargs: int, *, b: int, c: int, **some_other_kwargs: Any) -> None:
pass
signature = inspect.signature(foo)
inputs = _get_inputs(signature, 1, b=2, c=3, d=4, e=5, other_kwargs={"f": 6})
assert inputs == {
"kwargs": 1,
"b": 2,
"c": 3,
"d": 4,
"e": 5,
"other_kwargs": {"f": 6},
}
def _get_mock_client(**kwargs: Any) -> Client:
mock_session = MagicMock()
client = Client(session=mock_session, api_key="test", **kwargs)
return client
@pytest.fixture
def mock_client() -> Client:
return _get_mock_client()
@pytest.mark.parametrize("use_next", [True, False])
@pytest.mark.parametrize("return_val", [None, "foo"])
def test_traceable_iterator(
use_next: bool, return_val: Optional[str], mock_client: Client
) -> None:
with tracing_context(enabled=True):
@traceable(client=mock_client)
def my_iterator_fn(a, b, d, **kwargs) -> Any:
assert kwargs == {"e": 5}
for i in range(a + b + d):
yield i
return return_val
expected = [0, 1, 2, 3, 4, 5]
if return_val is not None:
expected.append(return_val)
genout = my_iterator_fn(1, 2, 3, e=5)
if use_next:
results = []
while True:
try:
results.append(next(genout))
except StopIteration as e:
assert e.value == return_val
if e.value is not None:
results.append(e.value)
break
else:
results = list(genout)
if return_val is not None:
results.append(return_val)
assert results == expected
# check the mock_calls
mock_calls = _get_calls(mock_client, minimum=1)
assert 1 <= len(mock_calls) <= 2
call = mock_calls[0]
assert call.args[0] == "POST"
assert call.args[1].startswith("https://api.smith.langchain.com")
body = json.loads(mock_calls[0].kwargs["data"])
assert body["post"]
assert body["post"][0]["outputs"]["output"] == expected
class MyStreamObject:
def __init__(self, some_values: list):
self.vals = some_values
self._iter = iter(self.vals)
def __next__(self):
return next(self._iter)
def __iter__(self):
yield from self.vals
class MyAsyncStreamObject:
def __init__(self, some_values: list):
self.vals = some_values
async def iter():
for val in some_values:
yield val
self._iter = iter()
async def __anext__(self):
return await aitertools.py_anext(self._iter)
async def __aiter__(self):
async for val in self._iter:
yield val
@pytest.mark.parametrize("use_next", [True, False])
@pytest.mark.parametrize("response_type", ["async", "async"])
async def test_traceable_stream(
use_next: bool, response_type: str, mock_client: Client
) -> None:
def reduce_fn(results: list):
return {"my_output": results}
@traceable(client=mock_client, reduce_fn=reduce_fn)
def my_stream_fn(a, b, d, **kwargs):
assert kwargs == {"e": 5}
vals = [0, 1, 2, 3, 4, 5]
if response_type == "sync":
return MyStreamObject(vals)
else:
return MyAsyncStreamObject(vals)
with tracing_context(enabled=True):
expected = [0, 1, 2, 3, 4, 5]
genout = my_stream_fn(1, 2, 3, e=5)
# assert getattr(genout, "vals") == expected
if use_next:
results = []
if response_type == "sync":
while True:
try:
results.append(next(genout))
except StopIteration:
break
else:
while True:
try:
results.append(await aitertools.py_anext(genout))
except StopAsyncIteration:
break
else:
if response_type == "sync":
results = list(genout)
else:
results = [r async for r in genout]
assert results == expected
# check the mock_calls
mock_calls = _get_calls(mock_client, minimum=1)
assert 1 <= len(mock_calls) <= 2
call = mock_calls[0]
assert call.args[0] == "POST"
assert call.args[1].startswith("https://api.smith.langchain.com")
call_data = [json.loads(mock_call.kwargs["data"]) for mock_call in mock_calls]
body = call_data[0]
assert body["post"]
assert body["post"][0]["name"] == "my_stream_fn"
if body["post"][0]["outputs"]:
assert body["post"][0]["outputs"] == {"my_output": expected}
else:
first_patch = next((d for d in call_data if d.get("patch")), None)
attempt = 0
while first_patch is None:
time.sleep(0.2)
if attempt > 2:
assert False, "Could not get patch"
mock_calls = _get_calls(mock_client, minimum=1)
call_data = [
json.loads(mock_call.kwargs["data"]) for mock_call in mock_calls
]
first_patch = next((d for d in call_data if d.get("patch")), None)
attempt += 1
if "name" in first_patch:
assert first_patch["name"] == "my_stream_fn"
assert first_patch[0]["outputs"] == {"my_output": expected}
@pytest.mark.parametrize("use_next", [True, False])
async def test_traceable_async_iterator(use_next: bool, mock_client: Client) -> None:
with tracing_context(enabled=True):
def filter_inputs(kwargs: dict):
return {"a": "FOOOOOO", "b": kwargs["b"], "d": kwargs["d"]}
@traceable(client=mock_client, process_inputs=filter_inputs)
async def my_iterator_fn(a, b, d, **kwargs):
assert kwargs == {"e": 5}
for i in range(a + b + d):
yield i
expected = [0, 1, 2, 3, 4, 5]
genout = my_iterator_fn(1, 2, 3, e=5)
if use_next:
results = []
async for item in genout:
results.append(item)
else:
results = [item async for item in genout]
assert results == expected
# check the mock_calls
mock_calls = _get_calls(mock_client, minimum=1)
assert 1 <= len(mock_calls) <= 2
call = mock_calls[0]
assert call.args[0] == "POST"
assert call.args[1].startswith("https://api.smith.langchain.com")
body = json.loads(call.kwargs["data"])
assert body["post"]
assert body["post"][0]["inputs"] == {"a": "FOOOOOO", "b": 2, "d": 3}
outputs_ = body["post"][0]["outputs"]
if "output" in outputs_:
assert outputs_["output"] == expected
# Assert the inputs are filtered as expected
else:
# It was put in the second batch
assert len(mock_calls) == 2
body_2 = json.loads(mock_calls[1].kwargs["data"])
assert body_2["patch"]
assert body_2["patch"][0]["outputs"]["output"] == expected
@patch("langsmith.run_trees.Client", autospec=True)
def test_traceable_iterator_noargs(_: MagicMock) -> None:
@traceable
def my_iterator_fn(a, b, d):
for i in range(a + b + d):
yield i
assert list(my_iterator_fn(1, 2, 3)) == [0, 1, 2, 3, 4, 5]
@patch("langsmith.run_trees.Client", autospec=True)
async def test_traceable_async_iterator_noargs(_: MagicMock) -> None:
# Check that it's callable without the parens
@traceable
async def my_iterator_fn(a, b, d):
for i in range(a + b + d):
yield i
assert [i async for i in my_iterator_fn(1, 2, 3)] == [0, 1, 2, 3, 4, 5]
@patch("langsmith.client.requests.Session", autospec=True)
def test_as_runnable(_: MagicMock, mock_client: Client) -> None:
@traceable(client=mock_client)
def my_function(a, b, d):
return a + b + d
with tracing_context(enabled=False):
runnable = as_runnable(my_function)
assert runnable.invoke({"a": 1, "b": 2, "d": 3}) == 6
@patch("langsmith.client.requests.Session", autospec=True)
def test_as_runnable_batch(mock_client: Client) -> None:
@traceable(client=mock_client)
def my_function(a, b, d):
return a + b + d
with tracing_context(enabled=False):
runnable = as_runnable(my_function)
assert runnable.batch(
[
{"a": 1, "b": 2, "d": 3},
{"a": 1, "b": 2, "d": 4},
]
) == [6, 7]
@patch("langsmith.client.requests.Session", autospec=True)
async def test_as_runnable_async(_: MagicMock) -> None:
@traceable()
async def my_function(a, b, d):
return a + b + d
runnable = as_runnable(my_function)
with tracing_context(enabled=False):
result = await runnable.ainvoke({"a": 1, "b": 2, "d": 3})
assert result == 6
@patch("langsmith.client.requests.Session", autospec=True)
async def test_as_runnable_async_batch(_: MagicMock) -> None:
@traceable()
async def my_function(a, b, d):
return a + b + d
runnable = as_runnable(my_function)
with tracing_context(enabled=False):
result = await runnable.abatch(
[
{"a": 1, "b": 2, "d": 3},
{"a": 1, "b": 2, "d": 4},
]
)
assert result == [6, 7]
def test_traceable_parent_from_runnable_config() -> None:
try:
from langchain.callbacks.tracers import LangChainTracer
from langchain.schema.runnable import RunnableLambda
except ImportError:
pytest.skip("Skipping test that requires langchain")
with tracing_context(enabled=True):
mock_client_ = _get_mock_client()
@traceable()
def my_function(a: int) -> int:
return a * 2
my_function_runnable = RunnableLambda(my_function)
assert (
my_function_runnable.invoke(
1, {"callbacks": [LangChainTracer(client=mock_client_)]}
)
== 2
)
# Inspect the mock_calls and assert that 2 runs were created,
# one for the parent and one for the child
mock_calls = _get_calls(mock_client_, minimum=2)
posts = []
for call in mock_calls:
if call.args and call.args[0] != "GET":
assert call.args[0] == "POST"
assert call.args[1].startswith("https://api.smith.langchain.com")
body = json.loads(call.kwargs["data"])
assert body["post"]
posts.extend(body["post"])
assert len(posts) == 2
parent = next(p for p in posts if p["parent_run_id"] is None)
child = next(p for p in posts if p["parent_run_id"] is not None)
assert child["parent_run_id"] == parent["id"]
def test_traceable_parent_from_runnable_config_accepts_config() -> None:
try:
from langchain.callbacks.tracers import LangChainTracer
from langchain.schema.runnable import RunnableLambda
except ImportError:
pytest.skip("Skipping test that requires langchain")
with tracing_context(enabled=True):
mock_client_ = _get_mock_client()
@traceable()
def my_function(a: int, config: dict) -> int:
assert isinstance(config, dict)
return a * 2
my_function_runnable = RunnableLambda(my_function)
assert (
my_function_runnable.invoke(
1, {"callbacks": [LangChainTracer(client=mock_client_)]}
)
== 2
)
# Inspect the mock_calls and assert that 2 runs were created,
# one for the parent and one for the child
mock_calls = _get_calls(mock_client_, minimum=2)
posts = []
for call in mock_calls:
if call.args and call.args[0] != "GET":
assert call.args[0] == "POST"
assert call.args[1].startswith("https://api.smith.langchain.com")
body = json.loads(call.kwargs["data"])
assert body["post"]
posts.extend(body["post"])
assert len(posts) == 2
parent = next(p for p in posts if p["parent_run_id"] is None)
child = next(p for p in posts if p["parent_run_id"] is not None)
assert child["parent_run_id"] == parent["id"]
def test_traceable_project_name() -> None:
with tracing_context(enabled=True):
mock_client_ = _get_mock_client()
@traceable(client=mock_client_, project_name="my foo project")
def my_function(a: int, b: int, d: int) -> int:
return a + b + d
my_function(1, 2, 3)
# Inspect the mock_calls and asser tthat "my foo project" is in
# the session_name arg of the body
mock_calls = _get_calls(mock_client_, minimum=1)
assert 1 <= len(mock_calls) <= 2
call = mock_calls[0]
assert call.args[0] == "POST"
assert call.args[1].startswith("https://api.smith.langchain.com")
body = json.loads(call.kwargs["data"])
assert body["post"]
assert body["post"][0]["session_name"] == "my foo project"
# reset
mock_client_ = _get_mock_client()
@traceable(client=mock_client_, project_name="my bar project")
def my_other_function(run_tree) -> int:
return my_function(1, 2, 3)
my_other_function() # type: ignore
# Inspect the mock_calls and assert that "my bar project" is in
# both all POST runs in the single request. We want to ensure
# all runs in a trace are associated with the same project.
mock_calls = _get_calls(mock_client_, minimum=1)
assert 1 <= len(mock_calls) <= 2
call = mock_calls[0]
assert call.args[0] == "POST"
assert call.args[1].startswith("https://api.smith.langchain.com")
body = json.loads(call.kwargs["data"])
assert body["post"]
assert body["post"][0]["session_name"] == "my bar project"
assert body["post"][1]["session_name"] == "my bar project"
def test_is_traceable_function(mock_client: Client) -> None:
@traceable(client=mock_client)
def my_function(a: int, b: int, d: int) -> int:
return a + b + d
assert is_traceable_function(my_function)
def test_is_traceable_partial_function(mock_client: Client) -> None:
@traceable(client=mock_client)
def my_function(a: int, b: int, d: int) -> int:
return a + b + d
partial_function = functools.partial(my_function, 1, 2)
assert is_traceable_function(partial_function)
def test_is_not_traceable_function() -> None:
def my_function(a: int, b: int, d: int) -> int:
return a + b + d
assert not is_traceable_function(my_function)
def test_is_traceable_class_call(mock_client: Client) -> None:
class Foo:
@traceable(client=mock_client)
def __call__(self, a: int, b: int) -> None:
pass
assert is_traceable_function(Foo())
def test_is_not_traceable_class_call() -> None:
class Foo:
def __call__(self, a: int, b: int) -> None:
pass
assert not is_traceable_function(Foo())
def test_traceable_warning() -> None:
with warnings.catch_warnings(record=True) as warning_records:
warnings.simplefilter("always")
@traceable(run_type="invalid_run_type") # type: ignore
def my_function() -> None:
pass
assert len(warning_records) == 1
assert issubclass(warning_records[0].category, UserWarning)
assert "Unrecognized run_type: invalid_run_type" in str(
warning_records[0].message
)
assert "Did you mean @traceable(name='invalid_run_type')?" in str(
warning_records[0].message
)
def test_traceable_wrong_run_type_pos_arg() -> None:
with warnings.catch_warnings(record=True) as warning_records:
warnings.simplefilter("always")
@traceable("my_run_type") # type: ignore
def my_function() -> None:
pass
assert len(warning_records) == 1
assert issubclass(warning_records[0].category, UserWarning)
assert "Unrecognized run_type: my_run_type" in str(warning_records[0].message)
assert "Did you mean @traceable(name='my_run_type')?" in str(
warning_records[0].message
)
def test_traceable_too_many_pos_args() -> None:
with warnings.catch_warnings(record=True) as warning_records:
warnings.simplefilter("always")
@traceable("chain", "my_function") # type: ignore
def my_function() -> None:
pass
assert len(warning_records) == 1
assert issubclass(warning_records[0].category, UserWarning)
assert "only accepts one positional argument" in str(warning_records[0].message)
# Really hard to get contextvar propagation right for async generators
# prior to Python 3.10
@pytest.mark.skipif(
sys.version_info < (3, 11),
reason="Skipping for Python 3.10 or earlier",
)
async def test_async_generator():
@traceable
def some_sync_func(query: str) -> list:
return [query, query]
@traceable
async def some_async_func(queries: list) -> AsyncGenerator[list, None]:
await asyncio.sleep(0.01)
for query in queries:
yield query
@traceable
async def another_async_func(query: str) -> str:
rid = uuid.uuid4()
with langsmith.trace(
name="zee-cm", inputs={"query": query}, run_id=rid
) as run_tree:
run_tree.end(outputs={"query": query})
assert run_tree.id == rid
return query
@traceable
async def create_document_context(documents: list) -> str:
await asyncio.sleep(0.01)
return "\n".join(documents)
@traceable
async def summarize_answers(
query: str, document_context: str
) -> AsyncGenerator[str, None]:
await asyncio.sleep(0.01)
for i in range(3):
yield f"Answer {i}"
@traceable(run_type="chain", name="expand_and_answer_questions")
async def my_answer(
query: str,
) -> AsyncGenerator[Any, None]:
expanded_terms = some_sync_func(query=query)
docs_gen = some_async_func(
queries=expanded_terms,
)
documents = []
async for document in docs_gen:
documents.append(document)
break
await another_async_func(query=query)
for document in documents:
yield document
async for document in docs_gen:
documents.append(document)
yield document
document_context = await create_document_context(
documents=documents,
)
final_answer = summarize_answers(query=query, document_context=document_context)
async for chunk in final_answer:
yield chunk
run: Optional[RunTree] = None # type: ignore
def _get_run(r: RunTree) -> None:
nonlocal run
run = r
mock_client_ = _get_mock_client()
with tracing_context(enabled=True):
chunks = my_answer(
"some_query", langsmith_extra={"on_end": _get_run, "client": mock_client_}
)
all_chunks = []
async for chunk in chunks:
all_chunks.append(chunk)
assert all_chunks == [
"some_query",
"some_query",
"Answer 0",
"Answer 1",
"Answer 2",
]
assert run is not None
run = cast(RunTree, run)
assert run.name == "expand_and_answer_questions"
child_runs = run.child_runs
assert child_runs and len(child_runs) == 5
names = [run.name for run in child_runs]
assert names == [
"some_sync_func",
"some_async_func",
"another_async_func",
"create_document_context",
"summarize_answers",
]
assert len(child_runs[2].child_runs) == 1 # type: ignore
def test_generator():
@traceable
def some_sync_func(query: str) -> list:
return [query, query]
@traceable
def some_func(queries: list) -> Generator[list, None, None]:
for query in queries:
yield query
@traceable
def another_func(query: str) -> str:
with langsmith.trace(name="zee-cm", inputs={"query": query}) as run_tree:
run_tree.end(outputs={"query": query})
return query
@traceable
def create_document_context(documents: list) -> str:
return "\n".join(documents)
@traceable
def summarize_answers(
query: str, document_context: str
) -> Generator[str, None, None]:
for i in range(3):
yield f"Answer {i}"
@traceable(run_type="chain", name="expand_and_answer_questions")
def my_answer(
query: str,
) -> Generator[Any, None, None]:
expanded_terms = some_sync_func(query=query)
docs_gen = some_func(
queries=expanded_terms,
)
documents = []
for document in docs_gen:
documents.append(document)
break
another_func(query=query)
for document in documents:
yield document
for document in docs_gen:
documents.append(document)
yield document
document_context = create_document_context(
documents=documents,
)
final_answer = summarize_answers(query=query, document_context=document_context)
for chunk in final_answer:
yield chunk
run: Optional[RunTree] = None # type: ignore
def _get_run(r: RunTree) -> None:
nonlocal run
run = r
mock_client_ = _get_mock_client()
with tracing_context(enabled=True):
chunks = my_answer(
"some_query",
langsmith_extra={
"name": "test_overridding_name",
"on_end": _get_run,
"client": mock_client_,
},
)
all_chunks = []
for chunk in chunks:
all_chunks.append(chunk)
assert all_chunks == [
"some_query",
"some_query",
"Answer 0",
"Answer 1",
"Answer 2",
]
assert run is not None
run = cast(RunTree, run)
assert run.name == "test_overridding_name"
child_runs = run.child_runs
assert child_runs and len(child_runs) == 5
names = [run.name for run in child_runs]
assert names == [
"some_sync_func",
"some_func",
"another_func",
"create_document_context",
"summarize_answers",
]
assert len(child_runs[2].child_runs) == 1 # type: ignore
@pytest.mark.parametrize("enabled", [True, "local"])
def test_traceable_regular(enabled: Union[bool, Literal["local"]]):
@traceable
def some_sync_func(query: str, **kwargs: Any) -> list:
assert kwargs == {"a": 1, "b": 2}
return [query, query]
@traceable
def some_func(queries: list) -> list:
return queries
@traceable
def another_func(query: str) -> str:
with langsmith.trace(name="zee-cm", inputs={"query": query}) as run_tree:
run_tree.end(outputs={"query": query})
return query
@traceable
def create_document_context(documents: list) -> str:
return "\n".join(documents)
@traceable
def summarize_answers(query: str, document_context: str) -> list:
return [f"Answer {i}" for i in range(3)]
@traceable(run_type="chain", name="expand_and_answer_questions")
def my_answer(
query: str,
) -> list:
expanded_terms = some_sync_func(query=query, a=1, b=2)
documents = some_func(
queries=expanded_terms,
)
another_func(query=query)
document_context = create_document_context(
documents=documents,
)
final_answer = summarize_answers(query=query, document_context=document_context)
return documents + final_answer
run: Optional[RunTree] = None # type: ignore
def _get_run(r: RunTree) -> None:
nonlocal run
run = r
mock_client_ = _get_mock_client()
with tracing_context(enabled=enabled):
all_chunks = my_answer(
"some_query", langsmith_extra={"on_end": _get_run, "client": mock_client_}
)
assert all_chunks == [
"some_query",
"some_query",
"Answer 0",
"Answer 1",
"Answer 2",
]
assert run is not None
run = cast(RunTree, run)
assert run.name == "expand_and_answer_questions"
child_runs = run.child_runs
assert child_runs and len(child_runs) == 5
names = [run.name for run in child_runs]
assert names == [
"some_sync_func",
"some_func",
"another_func",
"create_document_context",
"summarize_answers",
]
assert len(child_runs[2].child_runs) == 1 # type: ignore
mock_calls = _get_calls(mock_client_)
assert len(mock_calls) == (0 if enabled == "local" else 1)
@pytest.mark.parametrize("enabled", [True, "local"])
async def test_traceable_async(enabled: Union[bool, Literal["local"]]):
@traceable
def some_sync_func(query: str) -> list:
return [query, query]
@traceable
async def some_async_func(queries: list, *, required: str, **kwargs: Any) -> list:
assert required == "foo"
assert kwargs == {"a": 1, "b": 2}
await asyncio.sleep(0.01)
return queries
@traceable
async def another_async_func(query: str) -> str:
with langsmith.trace(name="zee-cm", inputs={"query": query}) as run_tree:
run_tree.end(outputs={"query": query})
return query
@traceable
async def create_document_context(documents: list) -> str:
await asyncio.sleep(0.01)
return "\n".join(documents)
@traceable
async def summarize_answers(query: str, document_context: str) -> list:
await asyncio.sleep(0.01)
return [f"Answer {i}" for i in range(3)]
@traceable(run_type="chain", name="expand_and_answer_questions")
async def my_answer(
query: str,
) -> list:
expanded_terms = some_sync_func(query=query)
documents = await some_async_func(
queries=expanded_terms, required="foo", a=1, b=2
)
await another_async_func(query=query)
document_context = await create_document_context(
documents=documents,
)
final_answer = await summarize_answers(
query=query, document_context=document_context
)
return documents + final_answer
run: Optional[RunTree] = None # type: ignore
def _get_run(r: RunTree) -> None:
nonlocal run
run = r
mock_client_ = _get_mock_client()
with tracing_context(enabled=enabled):
all_chunks = await my_answer(
"some_query", langsmith_extra={"on_end": _get_run, "client": mock_client_}
)
assert all_chunks == [
"some_query",
"some_query",
"Answer 0",
"Answer 1",
"Answer 2",
]
assert run is not None
run = cast(RunTree, run)
assert run.name == "expand_and_answer_questions"
child_runs = run.child_runs
assert child_runs and len(child_runs) == 5
names = [run.name for run in child_runs]
assert names == [
"some_sync_func",
"some_async_func",
"another_async_func",
"create_document_context",
"summarize_answers",
]
assert len(child_runs[2].child_runs) == 1 # type: ignore
mock_calls = _get_calls(mock_client_)
assert len(mock_calls) == (0 if enabled == "local" else 1)
@pytest.mark.parametrize("enabled", [True, "local"])
def test_traceable_to_trace(enabled: Union[bool, Literal["local"]]):
@traceable
def parent_fn(a: int, b: int) -> int:
with langsmith.trace(name="child_fn", inputs={"a": a, "b": b}) as run_tree:
result = a + b
run_tree.end(outputs={"result": result})
return result
run: Optional[RunTree] = None # type: ignore
def _get_run(r: RunTree) -> None:
nonlocal run
run = r
mock_client_ = _get_mock_client()
with tracing_context(enabled=enabled):
result = parent_fn(
1, 2, langsmith_extra={"on_end": _get_run, "client": mock_client_}
)
assert result == 3
assert run is not None
run = cast(RunTree, run)
assert run.name == "parent_fn"
assert run.outputs == {"output": 3}
assert run.inputs == {"a": 1, "b": 2}
child_runs = run.child_runs
assert child_runs
assert len(child_runs) == 1
assert child_runs[0].name == "child_fn"
assert child_runs[0].inputs == {"a": 1, "b": 2}
mock_calls = _get_calls(mock_client_)
assert len(mock_calls) == (0 if enabled == "local" else 1)
@pytest.mark.parametrize("enabled", [True, "local"])
async def test_traceable_to_atrace(enabled: Union[bool, Literal["local"]]):
@traceable
async def great_grandchild_fn(a: int, b: int) -> int:
return a + b
@traceable
async def parent_fn(a: int, b: int) -> int:
async with langsmith.trace(
name="child_fn", inputs={"a": a, "b": b}
) as run_tree:
async with langsmith.trace(
"grandchild_fn", inputs={"a": a, "b": b, "c": "oh my"}
) as run_tree_gc:
try:
async with langsmith.trace("expect_error", inputs={}):
raise ValueError("oh no")
except ValueError:
pass
result = await great_grandchild_fn(a, b)
run_tree_gc.end(outputs={"result": result})
run_tree.end(outputs={"result": result})
return result
run: Optional[RunTree] = None # type: ignore
def _get_run(r: RunTree) -> None:
nonlocal run
run = r
mock_client_ = _get_mock_client()
with tracing_context(enabled=enabled):
result = await parent_fn(
1, 2, langsmith_extra={"on_end": _get_run, "client": mock_client_}
)
assert result == 3
assert run is not None
run = cast(RunTree, run)
assert run.name == "parent_fn"
assert run.outputs == {"output": 3}
assert run.inputs == {"a": 1, "b": 2}
child_runs = run.child_runs
assert child_runs
assert len(child_runs) == 1
child = child_runs[0]
assert child.name == "child_fn"
assert child.inputs == {"a": 1, "b": 2}
assert len(child.child_runs) == 1
grandchild = child.child_runs[0]
assert grandchild.name == "grandchild_fn"
assert grandchild.inputs == {"a": 1, "b": 2, "c": "oh my"}
assert len(grandchild.child_runs) == 2
ggcerror = grandchild.child_runs[0]
assert ggcerror.name == "expect_error"
assert "oh no" in str(ggcerror.error)
ggc = grandchild.child_runs[1]
assert ggc.name == "great_grandchild_fn"
assert ggc.inputs == {"a": 1, "b": 2}
mock_calls = _get_calls(mock_client_)
assert len(mock_calls) == (0 if enabled == "local" else 1)
@pytest.mark.parametrize("enabled", [True, "local"])
def test_trace_to_traceable(enabled: Union[bool, Literal["local"]]):
@traceable
def child_fn(a: int, b: int) -> int:
return a + b
mock_client_ = _get_mock_client()
with tracing_context(enabled=enabled):
rid = uuid.uuid4()
with langsmith.trace(
name="parent_fn", inputs={"a": 1, "b": 2}, client=mock_client_, run_id=rid
) as run:
result = child_fn(1, 2)
run.end(outputs={"result": result})
assert run.id == rid
assert result == 3
assert run.name == "parent_fn"
assert run.outputs == {"result": 3}
assert run.inputs == {"a": 1, "b": 2}
child_runs = run.child_runs
assert child_runs
assert len(child_runs) == 1
assert child_runs[0].name == "child_fn"
assert child_runs[0].inputs == {"a": 1, "b": 2}
def test_client_not_passed_when_traceable_parent():
mock_client = _get_mock_client()
rt = RunTree(name="foo", client=mock_client)
headers = rt.to_headers()
@traceable
def my_run(foo: str):
return {"baz": "buzz"}
my_run(foo="bar", langsmith_extra={"parent": headers, "client": mock_client})
mock_calls = _get_calls(mock_client)
assert len(mock_calls) == 0
def test_client_passed_when_trace_parent():
mock_client = _get_mock_client()
rt = RunTree(name="foo", client=mock_client)
headers = rt.to_headers()
with tracing_context(enabled=True):
with trace(
name="foo", inputs={"foo": "bar"}, parent=headers, client=mock_client
) as rt:
rt.outputs["bar"] = "baz"
calls = _get_calls(mock_client)
assert len(calls) == 1
call = calls[0]
assert call.args[0] == "POST"
assert call.args[1].startswith("https://api.smith.langchain.com")
body = json.loads(call.kwargs["data"])
assert body["post"]
assert body["post"][0]["inputs"] == {"foo": "bar"}
assert body["post"][0]["outputs"] == {"bar": "baz"}
def test_client_not_called_when_enabled_local():
mock_client = _get_mock_client()
headers = RunTree(name="foo", client=mock_client).to_headers()
with tracing_context(enabled="local"):
with trace(
name="foo", inputs={"foo": "bar"}, parent=headers, client=mock_client
) as rt:
rt.outputs["bar"] = "baz"
calls = _get_calls(mock_client)
assert len(calls) == 0
def test_from_runnable_config():
try:
from langchain_core.tools import tool # type: ignore
from langchain_core.tracers.langchain import LangChainTracer # type: ignore
except ImportError:
pytest.skip("Skipping test that requires langchain")
gc_run_id = uuid.uuid4()
@tool
def my_grandchild_tool(text: str, callbacks: Any = None) -> str:
"""Foo."""
lct: LangChainTracer = callbacks.handlers[0]
assert str(gc_run_id) in lct.run_map
run = lct.run_map[str(gc_run_id)]
assert run.name == "my_grandchild_tool"
assert run.run_type == "tool"
assert lct.project_name == "foo"
parent_run = lct.run_map[str(run.parent_run_id)]
assert parent_run
assert parent_run.name == "my_traceable"
assert parent_run.run_type == "retriever"
grandparent_run = lct.run_map[str(parent_run.parent_run_id)]
assert grandparent_run
assert grandparent_run.name == "my_tool"
assert grandparent_run.run_type == "tool"
return text
@traceable(run_type="retriever")
def my_traceable(text: str) -> str:
rt = get_current_run_tree()
assert rt
assert rt.run_type == "retriever"
assert rt.parent_run_id
assert rt.parent_run
assert rt.parent_run.run_type == "tool"
assert rt.session_name == "foo"
return my_grandchild_tool.invoke({"text": text}, {"run_id": gc_run_id})
@tool
def my_tool(text: str) -> str:
"""Foo."""
return my_traceable(text)
mock_client = _get_mock_client()
tracer = LangChainTracer(client=mock_client, project_name="foo")
my_tool.invoke({"text": "hello"}, {"callbacks": [tracer]})
def test_io_interops():
try:
from langchain_core.language_models import FakeListChatModel
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnableLambda
from langchain_core.tracers import LangChainTracer
except ImportError:
pytest.skip("Skipping test that requires langchain")
tracer = LangChainTracer(client=_get_mock_client(auto_batch_tracing=False))
stage_added = {
"parent_input": {"original_input": "original_input_value"},
"child_input": {"parent_input": "parent_input_value"},
"child_output": {"child_output": "child_output_value"},
"parent_output": {"parent_output": "parent_output_value"},
}
llm = FakeListChatModel(responses=["bar"])
prompt = ChatPromptTemplate.from_messages([("system", "Hi {name}")])
some_chain = prompt | llm
@RunnableLambda
def child(inputs: dict) -> dict:
res = some_chain.invoke({"name": "foo"})
assert res.content == "bar"
return {**stage_added["child_output"], **inputs}
@RunnableLambda
def the_parent(inputs: dict) -> dict:
return {
**stage_added["parent_output"],
**child.invoke({**stage_added["child_input"], **inputs}),
}
expected_at_stage = {}
current = {}
for stage in stage_added:
current = {**current, **stage_added[stage]}
expected_at_stage[stage] = current
parent_result = the_parent.invoke(
stage_added["parent_input"], {"callbacks": [tracer]}
)
assert parent_result == expected_at_stage["parent_output"]
mock_posts = _get_calls(tracer.client, minimum=5)
assert len(mock_posts) == 5
datas = [json.loads(mock_post.kwargs["data"]) for mock_post in mock_posts]
names = [
"the_parent",
"child",
"RunnableSequence",
"ChatPromptTemplate",
"FakeListChatModel",
]
contains_serialized = {"ChatPromptTemplate", "FakeListChatModel"}
ids_contains_serialized = set()
for n, d in zip(names, datas):
assert n == d["name"]
if n in contains_serialized:
assert d["serialized"]
assert "graph" not in d["serialized"]
ids_contains_serialized.add(d["id"])
else:
assert d.get("serialized") is None
assert datas[0]["name"] == "the_parent"
assert datas[0]["inputs"] == expected_at_stage["parent_input"]
assert not datas[0]["outputs"]
assert datas[1]["name"] == "child"
assert datas[1]["inputs"] == expected_at_stage["child_input"]
assert not datas[1]["outputs"]
ids = {d["name"]: d["id"] for d in datas}
# Check the patch requests
mock_patches = _get_calls(tracer.client, verbs={"PATCH"}, minimum=5)
assert len(mock_patches) == 5
patches_datas = [
json.loads(mock_patch.kwargs["data"]) for mock_patch in mock_patches
]
patches_dict = {d["id"]: d for d in patches_datas}
child_patch = patches_dict[ids["child"]]
assert child_patch["outputs"] == expected_at_stage["child_output"]
assert child_patch["inputs"] == expected_at_stage["child_input"]
assert child_patch["name"] == "child"
parent_patch = patches_dict[ids["the_parent"]]
assert parent_patch["outputs"] == expected_at_stage["parent_output"]
assert parent_patch["inputs"] == expected_at_stage["parent_input"]
assert parent_patch["name"] == "the_parent"
for d in patches_datas:
if d["id"] in ids_contains_serialized:
assert "serialized" not in d or d.get("serialized")
else:
assert d.get("serialized") is None
def test_trace_respects_tracing_context():
mock_client = _get_mock_client()
with tracing_context(enabled=False):
with trace(name="foo", inputs={"a": 1}, client=mock_client):
pass
mock_calls = _get_calls(mock_client)
assert not mock_calls
def test_trace_nested_enable_disable():
# Test that you can disable then re-enable tracing
# and the trace connects as expected
mock_client = _get_mock_client()
with tracing_context(enabled=True):
with trace(name="foo", inputs={"a": 1}, client=mock_client) as run:
with tracing_context(enabled=False):
with trace(name="bar", inputs={"b": 2}, client=mock_client) as run2:
with tracing_context(enabled=True):
with trace(
name="baz", inputs={"c": 3}, client=mock_client
) as run3:
run3.end(outputs={"c": 3})
run2.end(outputs={"b": 2})
run.end(outputs={"a": 1})
# Now we need to ensure that there are 2 runs created (2 posts and 2 patches),
# run -> run3
# with run2 being invisible
mock_calls = _get_calls(mock_client, verbs={"POST", "PATCH"})
datas = [json.loads(mock_post.kwargs["data"]) for mock_post in mock_calls]
assert "post" in datas[0]
posted = datas[0]["post"]
assert len(posted) == 2
assert posted[0]["name"] == "foo"
assert posted[1]["name"] == "baz"
dotted_parts = posted[1]["dotted_order"].split(".")
assert len(dotted_parts) == 2
parent_dotted = posted[0]["dotted_order"]
assert parent_dotted == dotted_parts[0]
def test_tracing_disabled_project_name_set():
mock_client = _get_mock_client()
@traceable
def foo(a: int) -> int:
return a
with tracing_context(enabled=False):
with trace(
name="foo", inputs={"a": 1}, client=mock_client, project_name="my_project"
):
pass
foo(1, langsmith_extra={"client": mock_client, "project_name": "my_project"})
mock_calls = _get_calls(mock_client)
assert not mock_calls
@pytest.mark.parametrize("auto_batch_tracing", [True, False])
async def test_traceable_async_exception(auto_batch_tracing: bool):
mock_client = _get_mock_client(
auto_batch_tracing=auto_batch_tracing,
info=ls_schemas.LangSmithInfo(
batch_ingest_config=ls_schemas.BatchIngestConfig(
size_limit_bytes=None, # Note this field is not used here
size_limit=100,
scale_up_nthreads_limit=16,
scale_up_qsize_trigger=1000,
scale_down_nempty_trigger=4,
)
),
)
@traceable
async def my_function(a: int) -> int:
raise ValueError("foo")
with tracing_context(enabled=True):
with pytest.raises(ValueError, match="foo"):
await my_function(1, langsmith_extra={"client": mock_client})
# Get ALL the call args for the mock_client
num_calls = 1 if auto_batch_tracing else 2
mock_calls = _get_calls(
mock_client, verbs={"POST", "PATCH", "GET"}, minimum=num_calls
)
assert len(mock_calls) >= num_calls
@pytest.mark.parametrize("auto_batch_tracing", [True, False])
async def test_traceable_async_gen_exception(auto_batch_tracing: bool):
mock_client = _get_mock_client(
auto_batch_tracing=auto_batch_tracing,
info=ls_schemas.LangSmithInfo(
batch_ingest_config=ls_schemas.BatchIngestConfig(
size_limit_bytes=None, # Note this field is not used here
size_limit=100,
scale_up_nthreads_limit=16,
scale_up_qsize_trigger=1000,
scale_down_nempty_trigger=4,
)
),
)
@traceable
async def my_function(a: int) -> AsyncGenerator[int, None]:
for i in range(5):
yield i
raise ValueError("foo")
with tracing_context(enabled=True):
with pytest.raises(ValueError, match="foo"):
async for _ in my_function(1, langsmith_extra={"client": mock_client}):
pass
# Get ALL the call args for the mock_client
num_calls = 1 if auto_batch_tracing else 2
mock_calls = _get_calls(
mock_client, verbs={"POST", "PATCH", "GET"}, minimum=num_calls
)
assert len(mock_calls) == num_calls
if auto_batch_tracing:
datas = _get_data(mock_calls)
outputs = [p["outputs"] for _, p in datas if p.get("outputs")]
assert len(outputs) == 1
assert outputs[0]["output"] == list(range(5))
@pytest.mark.parametrize("auto_batch_tracing", [True, False])
async def test_traceable_gen_exception(auto_batch_tracing: bool):
mock_client = _get_mock_client(
auto_batch_tracing=auto_batch_tracing,
info=ls_schemas.LangSmithInfo(
batch_ingest_config=ls_schemas.BatchIngestConfig(
size_limit_bytes=None, # Note this field is not used here
size_limit=100,
scale_up_nthreads_limit=16,
scale_up_qsize_trigger=1000,
scale_down_nempty_trigger=4,
)
),
)
@traceable
def my_function(a: int) -> Generator[int, None, None]:
for i in range(5):
yield i
raise ValueError("foo")
with tracing_context(enabled=True):
with pytest.raises(ValueError, match="foo"):
for _ in my_function(1, langsmith_extra={"client": mock_client}):
pass
# Get ALL the call args for the mock_client
num_calls = 1 if auto_batch_tracing else 2
mock_calls = _get_calls(
mock_client, verbs={"POST", "PATCH", "GET"}, minimum=num_calls
)
assert len(mock_calls) == num_calls
if auto_batch_tracing:
datas = _get_data(mock_calls)
outputs = [p["outputs"] for _, p in datas if p.get("outputs")]
assert len(outputs) == 1
assert outputs[0]["output"] == list(range(5))
@pytest.mark.parametrize("env_var", [True, False])
@pytest.mark.parametrize("context", [True, False, None])
async def test_trace_respects_env_var(env_var: bool, context: Optional[bool]):
ls_utils.get_env_var.cache_clear()
mock_client = _get_mock_client()
with patch.dict(os.environ, {"LANGSMITH_TRACING": "true" if env_var else "false "}):
with tracing_context(enabled=context):
with trace(name="foo", inputs={"a": 1}, client=mock_client) as run:
assert run.name == "foo"
pass
async with trace(name="bar", inputs={"b": 2}, client=mock_client) as run2:
assert run2.name == "bar"
pass
mock_calls = _get_calls(mock_client)
if context is None:
expect = env_var
else:
expect = context
if expect:
assert len(mock_calls) >= 1
else:
assert not mock_calls
async def test_process_inputs_outputs():
mock_client = _get_mock_client()
in_s = "what's life's meaning"
def process_inputs(inputs: dict) -> dict:
assert inputs == {"val": in_s, "ooblek": "nada"}
inputs["val2"] = "this is mutated"
return {"serialized_in": "what's the meaning of life?"}
def process_outputs(outputs: int) -> dict:
assert outputs == 42
return {"serialized_out": 24}
@traceable(process_inputs=process_inputs, process_outputs=process_outputs)
def my_function(val: str, **kwargs: Any) -> int:
assert not kwargs.get("val2")
return 42
with tracing_context(enabled=True):
my_function(
in_s,
ooblek="nada",
langsmith_extra={"client": mock_client},
)
def _check_client(client: Client) -> None:
mock_calls = _get_calls(client)
assert len(mock_calls) == 1
call = mock_calls[0]
assert call.args[0] == "POST"
assert call.args[1].startswith("https://api.smith.langchain.com")
body = json.loads(call.kwargs["data"])
assert body["post"]
assert body["post"][0]["inputs"] == {
"serialized_in": "what's the meaning of life?"
}
assert body["post"][0]["outputs"] == {"serialized_out": 24}
_check_client(mock_client)
class Untruthy:
def __init__(self, val: Any) -> None:
self.val = val
def __bool__(self) -> bool:
raise ValueError("I'm not truthy")
def __eq__(self, other: Any) -> bool:
if isinstance(other, Untruthy):
return self.val == other.val
return self.val == other
@traceable(process_inputs=process_inputs, process_outputs=process_outputs)
async def amy_function(val: str, **kwargs: Any) -> int:
assert not kwargs.get("val2")
return Untruthy(42) # type: ignore
mock_client = _get_mock_client()
with tracing_context(enabled=True):
await amy_function(
in_s,
ooblek="nada",
langsmith_extra={"client": mock_client},
)
_check_client(mock_client)
# Do generator
def reducer(outputs: list) -> dict:
return {"reduced": outputs[0]}
def process_reduced_outputs(outputs: dict) -> dict:
assert outputs == {"reduced": 42}
return {"serialized_out": 24}
@traceable(
process_inputs=process_inputs,
process_outputs=process_reduced_outputs,
reduce_fn=reducer,
)
def my_gen(val: str, **kwargs: Any) -> Generator[int, None, None]:
assert not kwargs.get("val2")
yield 42
mock_client = _get_mock_client()
with tracing_context(enabled=True):
result = list(
my_gen(
in_s,
ooblek="nada",
langsmith_extra={"client": mock_client},
)
)
assert result == [42]
_check_client(mock_client)
@traceable(
process_inputs=process_inputs,
process_outputs=process_reduced_outputs,
reduce_fn=reducer,
)
async def amy_gen(val: str, **kwargs: Any) -> AsyncGenerator[int, None]:
assert not kwargs.get("val2")
yield Untruthy(42) # type: ignore
mock_client = _get_mock_client()
with tracing_context(enabled=True):
result = [
i
async for i in amy_gen(
in_s, ooblek="nada", langsmith_extra={"client": mock_client}
)
]
assert result == [42]
_check_client(mock_client)
def test_traceable_stop_iteration():
def my_generator():
yield from range(5)
return ("last", "vals")
def consume(gen):
last_vals = yield from gen()
assert last_vals == ("last", "vals")
assert list(consume(my_generator)) == list(range(5))
wrapped = traceable(my_generator)
assert list(consume(wrapped)) == list(range(5))
def test_traceable_input_attachments():
with patch.object(ls_client.ls_env, "get_runtime_environment") as mock_get_env:
mock_get_env.return_value = {
"LANGSMITH_test_traceable_input_attachments": "aval"
}
@traceable
def my_func(
val: int,
att1: ls_schemas.Attachment,
att2: Annotated[tuple, ls_schemas.Attachment],
run_tree: RunTree,
):
run_tree.attachments["anoutput"] = ls_schemas.Attachment(
mime_type="text/plain", data=b"noidea"
)
return "foo"
mock_client = _get_mock_client(
info=ls_schemas.LangSmithInfo(
batch_ingest_config=ls_schemas.BatchIngestConfig(
use_multipart_endpoint=True,
size_limit_bytes=None, # Note this field is not used here
size_limit=100,
scale_up_nthreads_limit=16,
scale_up_qsize_trigger=1000,
scale_down_nempty_trigger=4,
)
),
)
long_content = b"c" * 20_000_000
with tracing_context(enabled=True):
result = my_func(
42,
ls_schemas.Attachment(mime_type="text/plain", data=long_content),
("application/octet-stream", "content2"),
langsmith_extra={"client": mock_client},
)
assert result == "foo"
for _ in range(10):
calls = _get_calls(mock_client)
datas = _get_multipart_data(calls)
if len(datas) >= 7:
break
time.sleep(1)
# main run, inputs, outputs, events, att1, att2, anoutput
assert len(datas) == 7
# First 4 are type application/json (run, inputs, outputs, events)
trace_id = datas[0][0].split(".")[1]
_, (_, run_stuff) = next(
data for data in datas if data[0] == f"post.{trace_id}"
)
assert (
json.loads(run_stuff)["extra"]["runtime"].get(
"LANGSMITH_test_traceable_input_attachments"
)
== "aval"
)
_, (_, inputs) = next(
data for data in datas if data[0] == f"post.{trace_id}.inputs"
)
assert json.loads(inputs) == {"val": 42}
# last three are the mime types provided
_, (mime_type1, content1) = next(
data for data in datas if data[0] == f"attachment.{trace_id}.att1"
)
assert mime_type1 == "text/plain"
assert content1 == long_content
_, (mime_type2, content2) = next(
data for data in datas if data[0] == f"attachment.{trace_id}.att2"
)
assert mime_type2 == "application/octet-stream"
assert content2 == b"content2"
# Assert that anoutput is uploaded
_, (mime_type_output, content_output) = next(
data for data in datas if data[0] == f"attachment.{trace_id}.anoutput"
)
assert mime_type_output == "text/plain"
assert content_output == b"noidea"
|
0 | lc_public_repos/langsmith-sdk/python/tests | lc_public_repos/langsmith-sdk/python/tests/unit_tests/test_testing.py | import uuid
from langsmith._testing import _get_id, _serde_example_values
def test__serde_example_values():
class Foo:
def __init__(self, a, b):
self.a = a
self.b = b
results = _serde_example_values({"foo": Foo(1, 2)})
assert "foo" in results
assert isinstance(results["foo"], str)
def test__get_id():
class Foo:
bar: str = "baz" # type: ignore
def __init__(self, a: int, b: int):
self.a = a
self.b = b
def foo(x: Foo):
return x
suite_id = uuid.UUID("4e32bff6-5762-4906-8d74-ee2bd0f1d234")
_get_id(foo, {"x": Foo(1, 2)}, suite_id)
|
0 | lc_public_repos/langsmith-sdk/python/tests | lc_public_repos/langsmith-sdk/python/tests/unit_tests/test_expect.py | from unittest import mock
from langsmith import expect
from langsmith._expect import ls_client
def _is_none(x: object) -> bool:
return x is None
@mock.patch.object(ls_client, "Client", autospec=True)
def test_expect_explicit_none(mock_client: mock.Mock) -> None:
expect(None).against(_is_none)
expect(None).to_be_none()
expect.score(1).to_equal(1)
expect.score(1).to_be_less_than(2)
expect.score(1).to_be_greater_than(0)
expect.score(1).to_be_between(0, 2)
expect.score(1).to_be_approximately(1, 2)
expect({1, 2}).to_contain(1)
|
0 | lc_public_repos/langsmith-sdk/python/tests | lc_public_repos/langsmith-sdk/python/tests/unit_tests/test_utils.py | # mypy: disable-error-code="annotation-unchecked"
import copy
import dataclasses
import functools
import itertools
import threading
import unittest
import uuid
from datetime import datetime
from enum import Enum
from typing import Any, NamedTuple, Optional
from unittest.mock import MagicMock, patch
import attr
import dataclasses_json
import pytest
from pydantic import BaseModel
import langsmith.utils as ls_utils
from langsmith import Client, traceable
from langsmith.run_helpers import tracing_context
class LangSmithProjectNameTest(unittest.TestCase):
class GetTracerProjectTestCase:
def __init__(
self, test_name, envvars, expected_project_name, return_default_value=None
):
self.test_name = test_name
self.envvars = envvars
self.expected_project_name = expected_project_name
self.return_default_value = return_default_value
def test_correct_get_tracer_project(self):
ls_utils.get_env_var.cache_clear()
cases = [
self.GetTracerProjectTestCase(
test_name="default to 'default' when no project provided",
envvars={},
expected_project_name="default",
),
self.GetTracerProjectTestCase(
test_name="default to 'default' when "
+ "return_default_value=True and no project provided",
envvars={},
expected_project_name="default",
),
self.GetTracerProjectTestCase(
test_name="do not default if return_default_value=False "
+ "when no project provided",
envvars={},
expected_project_name=None,
return_default_value=False,
),
self.GetTracerProjectTestCase(
test_name="use session_name for legacy tracers",
envvars={"LANGCHAIN_SESSION": "old_timey_session"},
expected_project_name="old_timey_session",
),
self.GetTracerProjectTestCase(
test_name="use LANGCHAIN_PROJECT over SESSION_NAME",
envvars={
"LANGCHAIN_SESSION": "old_timey_session",
"LANGCHAIN_PROJECT": "modern_session",
},
expected_project_name="modern_session",
),
self.GetTracerProjectTestCase(
test_name="hosted projects get precedence over all other defaults",
envvars={
"HOSTED_LANGSERVE_PROJECT_NAME": "hosted_project",
"LANGCHAIN_SESSION": "old_timey_session",
"LANGCHAIN_PROJECT": "modern_session",
},
expected_project_name="hosted_project",
),
]
for case in cases:
ls_utils.get_env_var.cache_clear()
ls_utils.get_tracer_project.cache_clear()
with self.subTest(msg=case.test_name):
with pytest.MonkeyPatch.context() as mp:
for k, v in case.envvars.items():
mp.setenv(k, v)
project = (
ls_utils.get_tracer_project()
if case.return_default_value is None
else ls_utils.get_tracer_project(case.return_default_value)
)
self.assertEqual(project, case.expected_project_name)
def test_tracing_enabled():
ls_utils.get_env_var.cache_clear()
with patch.dict(
"os.environ", {"LANGCHAIN_TRACING_V2": "false", "LANGSMITH_TRACING": "false"}
):
assert not ls_utils.tracing_is_enabled()
with tracing_context(enabled=True):
assert ls_utils.tracing_is_enabled()
with tracing_context(enabled=False):
assert not ls_utils.tracing_is_enabled()
with tracing_context(enabled=False):
assert not ls_utils.tracing_is_enabled()
assert not ls_utils.tracing_is_enabled()
@traceable
def child_function():
assert ls_utils.tracing_is_enabled()
return 1
@traceable
def untraced_child_function():
assert not ls_utils.tracing_is_enabled()
return 1
@traceable
def parent_function():
with patch.dict(
"os.environ",
{"LANGCHAIN_TRACING_V2": "false", "LANGSMITH_TRACING": "false"},
):
assert ls_utils.tracing_is_enabled()
child_function()
with tracing_context(enabled=False):
assert not ls_utils.tracing_is_enabled()
return untraced_child_function()
ls_utils.get_env_var.cache_clear()
with patch.dict(
"os.environ", {"LANGCHAIN_TRACING_V2": "true", "LANGSMITH_TRACING": "true"}
):
mock_client = MagicMock(spec=Client)
parent_function(langsmith_extra={"client": mock_client})
def test_tracing_disabled():
ls_utils.get_env_var.cache_clear()
with patch.dict(
"os.environ", {"LANGCHAIN_TRACING_V2": "true", "LANGSMITH_TRACING": "true"}
):
assert ls_utils.tracing_is_enabled()
with tracing_context(enabled=False):
assert not ls_utils.tracing_is_enabled()
with tracing_context(enabled=True):
assert ls_utils.tracing_is_enabled()
with tracing_context(enabled=False):
assert not ls_utils.tracing_is_enabled()
assert ls_utils.tracing_is_enabled()
def test_deepish_copy():
class MyClass:
def __init__(self, x: int) -> None:
self.x = x
self.y = "y"
self.a_list = [1, 2, 3]
self.a_tuple = (1, 2, 3)
self.a_set = {1, 2, 3}
self.a_dict = {"foo": "bar"}
self.my_bytes = b"foo"
class ClassWithTee:
def __init__(self) -> None:
tee_a, tee_b = itertools.tee(range(10))
self.tee_a = tee_a
self.tee_b = tee_b
class MyClassWithSlots:
__slots__ = ["x", "y"]
def __init__(self, x: int) -> None:
self.x = x
self.y = "y"
class MyPydantic(BaseModel):
foo: str
bar: int
baz: dict
@dataclasses.dataclass
class MyDataclass:
foo: str
bar: int
def something(self) -> None:
pass
class MyEnum(str, Enum):
FOO = "foo"
BAR = "bar"
class ClassWithFakeJson:
def json(self):
raise ValueError("This should not be called")
def to_json(self) -> dict:
return {"foo": "bar"}
@dataclasses_json.dataclass_json
@dataclasses.dataclass
class Person:
name: str
@attr.dataclass
class AttrDict:
foo: str = attr.ib()
bar: int
uid = uuid.uuid4()
current_time = datetime.now()
class NestedClass:
__slots__ = ["person", "lock"]
def __init__(self) -> None:
self.person = Person(name="foo")
self.lock = [threading.Lock()]
def __deepcopy__(self, memo: Optional[dict] = None) -> Any:
cls = type(self)
m = cls.__new__(cls)
setattr(m, "__dict__", copy.deepcopy(self.__dict__, memo=memo))
class CyclicClass:
def __init__(self) -> None:
self.cyclic = self
def __repr__(self) -> str:
return "SoCyclic"
class CyclicClass2:
def __init__(self) -> None:
self.cyclic: Any = None
self.other: Any = None
def __repr__(self) -> str:
return "SoCyclic2"
cycle_2 = CyclicClass2()
cycle_2.cyclic = CyclicClass2()
cycle_2.cyclic.other = cycle_2
class MyNamedTuple(NamedTuple):
foo: str
bar: int
my_dict = {
"uid": uid,
"time": current_time,
"adict": {"foo": "bar"},
"my_class": MyClass(1),
"class_with_tee": ClassWithTee(),
"my_slotted_class": MyClassWithSlots(1),
"my_dataclass": MyDataclass("foo", 1),
"my_enum": MyEnum.FOO,
"my_pydantic": MyPydantic(foo="foo", bar=1, baz={"foo": "bar"}),
"person": Person(name="foo"),
"a_bool": True,
"a_none": None,
"a_str": "foo",
"an_int": 1,
"a_float": 1.1,
"nested_class": NestedClass(),
"attr_dict": AttrDict(foo="foo", bar=1),
"named_tuple": MyNamedTuple(foo="foo", bar=1),
"cyclic": CyclicClass(),
"cyclic2": cycle_2,
"fake_json": ClassWithFakeJson(),
}
assert ls_utils.deepish_copy(my_dict) == my_dict
def test_is_version_greater_or_equal():
# Test versions equal to 0.5.23
assert ls_utils.is_version_greater_or_equal("0.5.23", "0.5.23")
# Test versions greater than 0.5.23
assert ls_utils.is_version_greater_or_equal("0.5.24", "0.5.23")
assert ls_utils.is_version_greater_or_equal("0.6.0", "0.5.23")
assert ls_utils.is_version_greater_or_equal("1.0.0", "0.5.23")
# Test versions less than 0.5.23
assert not ls_utils.is_version_greater_or_equal("0.5.22", "0.5.23")
assert not ls_utils.is_version_greater_or_equal("0.5.0", "0.5.23")
assert not ls_utils.is_version_greater_or_equal("0.4.99", "0.5.23")
def test_parse_prompt_identifier():
# Valid cases
assert ls_utils.parse_prompt_identifier("name") == ("-", "name", "latest")
assert ls_utils.parse_prompt_identifier("owner/name") == ("owner", "name", "latest")
assert ls_utils.parse_prompt_identifier("owner/name:commit") == (
"owner",
"name",
"commit",
)
assert ls_utils.parse_prompt_identifier("name:commit") == ("-", "name", "commit")
# Invalid cases
invalid_identifiers = [
"",
"/",
":",
"owner/",
"/name",
"owner//name",
"owner/name/",
"owner/name/extra",
":commit",
]
for invalid_id in invalid_identifiers:
try:
ls_utils.parse_prompt_identifier(invalid_id)
assert False, f"Expected ValueError for identifier: {invalid_id}"
except ValueError:
pass # This is the expected behavior
def test_get_api_key() -> None:
ls_utils.get_env_var.cache_clear()
assert ls_utils.get_api_key("provided_api_key") == "provided_api_key"
assert ls_utils.get_api_key("'provided_api_key'") == "provided_api_key"
assert ls_utils.get_api_key('"_provided_api_key"') == "_provided_api_key"
with patch.dict("os.environ", {"LANGCHAIN_API_KEY": "env_api_key"}, clear=True):
api_key_ = ls_utils.get_api_key(None)
assert api_key_ == "env_api_key"
ls_utils.get_env_var.cache_clear()
with patch.dict("os.environ", {}, clear=True):
assert ls_utils.get_api_key(None) is None
ls_utils.get_env_var.cache_clear()
assert ls_utils.get_api_key("") is None
assert ls_utils.get_api_key(" ") is None
def test_get_api_url() -> None:
ls_utils.get_env_var.cache_clear()
assert ls_utils.get_api_url("http://provided.url") == "http://provided.url"
with patch.dict("os.environ", {"LANGCHAIN_ENDPOINT": "http://env.url"}):
assert ls_utils.get_api_url(None) == "http://env.url"
ls_utils.get_env_var.cache_clear()
with patch.dict("os.environ", {}, clear=True):
assert ls_utils.get_api_url(None) == "https://api.smith.langchain.com"
ls_utils.get_env_var.cache_clear()
with patch.dict("os.environ", {}, clear=True):
assert ls_utils.get_api_url(None) == "https://api.smith.langchain.com"
ls_utils.get_env_var.cache_clear()
with patch.dict("os.environ", {"LANGCHAIN_ENDPOINT": "http://env.url"}):
assert ls_utils.get_api_url(None) == "http://env.url"
ls_utils.get_env_var.cache_clear()
with pytest.raises(ls_utils.LangSmithUserError):
ls_utils.get_api_url(" ")
def test_get_func_name():
class Foo:
def __call__(self, foo: int):
return "bar"
assert ls_utils._get_function_name(Foo()) == "Foo"
assert ls_utils._get_function_name(functools.partial(Foo(), foo=3)) == "Foo"
class AFoo:
async def __call__(self, foo: int):
return "bar"
assert ls_utils._get_function_name(AFoo()) == "AFoo"
assert ls_utils._get_function_name(functools.partial(AFoo(), foo=3)) == "AFoo"
def foo(bar: int) -> None:
return bar
assert ls_utils._get_function_name(foo) == "foo"
assert ls_utils._get_function_name(functools.partial(foo, bar=3)) == "foo"
async def afoo(bar: int) -> None:
return bar
assert ls_utils._get_function_name(afoo) == "afoo"
assert ls_utils._get_function_name(functools.partial(afoo, bar=3)) == "afoo"
lambda_func = lambda x: x + 1 # noqa
assert ls_utils._get_function_name(lambda_func) == "<lambda>"
class BarClass:
pass
assert ls_utils._get_function_name(BarClass) == "BarClass"
assert ls_utils._get_function_name(print) == "print"
assert ls_utils._get_function_name("not_a_function") == "not_a_function"
|
0 | lc_public_repos/langsmith-sdk/python/tests/unit_tests | lc_public_repos/langsmith-sdk/python/tests/unit_tests/caching/.test_tracing_fake_server.yaml | interactions:
- request:
body: '{"val": 8, "should_err": 0}'
headers: {}
method: POST
uri: http://localhost:8257/fake-route
response:
body:
string: '{"STATUS":"SUCCESS"}'
headers:
content-length:
- '20'
content-type:
- application/json
status:
code: 200
message: OK
- request:
body: '{"val": 8, "should_err": 0}'
headers: {}
method: POST
uri: http://localhost:8257/fake-route
response:
body:
string: '{"STATUS":"SUCCESS"}'
headers:
Content-Length:
- '20'
Content-Type:
- application/json
Date:
- Thu, 23 May 2024 05:39:12 GMT
Server:
- uvicorn
status:
code: 200
message: OK
version: 1
|
0 | lc_public_repos/langsmith-sdk/python/tests/unit_tests | lc_public_repos/langsmith-sdk/python/tests/unit_tests/cli/test_main.py | """Test utilities in the LangSmith server."""
|
0 | lc_public_repos/langsmith-sdk/python/tests/unit_tests | lc_public_repos/langsmith-sdk/python/tests/unit_tests/evaluation/test_evaluator.py | import asyncio
import logging
import uuid
from typing import Any, Optional
from unittest import mock
from unittest.mock import MagicMock
import pytest
from langsmith import schemas
from langsmith.evaluation.evaluator import (
ComparisonEvaluationResult,
DynamicComparisonRunEvaluator,
DynamicRunEvaluator,
EvaluationResult,
EvaluationResults,
Example,
Run,
run_evaluator,
)
from langsmith.evaluation.integrations._langchain import LangChainStringEvaluator
from langsmith.run_helpers import tracing_context
@pytest.fixture
def run_1() -> Run:
run = MagicMock()
run.inputs = {"input": "1"}
run.outputs = {"output": "2"}
return run
@pytest.fixture
def example_1():
example = MagicMock()
example.inputs = {"input": "1"}
example.outputs = {"output": "2"}
return example
def test_run_evaluator_decorator(run_1: Run, example_1: Example):
@run_evaluator
def sample_evaluator(run: Run, example: Optional[Example]) -> EvaluationResult:
return EvaluationResult(key="test", score=1.0)
assert isinstance(sample_evaluator, DynamicRunEvaluator)
with tracing_context(enabled=False):
result = sample_evaluator.evaluate_run(run_1, example_1)
assert isinstance(result, EvaluationResult)
assert result.key == "test"
assert result.score == 1.0
async def test_dynamic_comparison_run_evaluator():
def foo(runs: list, example):
return ComparisonEvaluationResult(key="bar", scores={uuid.uuid4(): 3.1})
async def afoo(runs: list, example):
return ComparisonEvaluationResult(key="bar", scores={uuid.uuid4(): 3.1})
evaluators = [
DynamicComparisonRunEvaluator(foo),
DynamicComparisonRunEvaluator(afoo),
DynamicComparisonRunEvaluator(foo, afoo),
]
for e in evaluators:
res = await e.acompare_runs([], None)
assert res.key == "bar"
repr(e)
def test_run_evaluator_decorator_dict(run_1: Run, example_1: Example):
@run_evaluator
def sample_evaluator(run: Run, example: Optional[Example]) -> dict:
return {"key": "test", "score": 1.0}
assert isinstance(sample_evaluator, DynamicRunEvaluator)
with tracing_context(enabled=False):
result = sample_evaluator.evaluate_run(run_1, example_1)
assert isinstance(result, EvaluationResult)
assert result.key == "test"
assert result.score == 1.0
def test_run_evaluator_decorator_dict_no_key(run_1: Run, example_1: Example):
@run_evaluator
def sample_evaluator(run: Run, example: Optional[Example]) -> dict:
return {"score": 1.0}
assert isinstance(sample_evaluator, DynamicRunEvaluator)
with tracing_context(enabled=False):
result = sample_evaluator.evaluate_run(run_1, example_1)
assert isinstance(result, EvaluationResult)
assert result.key == "sample_evaluator"
assert result.score == 1.0
def test_run_evaluator_decorator_dict_with_comment(run_1: Run, example_1: Example):
@run_evaluator
def sample_evaluator(run: Run, example: Optional[Example]) -> dict:
return {"score": 1.0, "comment": "test"}
assert isinstance(sample_evaluator, DynamicRunEvaluator)
with tracing_context(enabled=False):
result = sample_evaluator.evaluate_run(run_1, example_1)
assert isinstance(result, EvaluationResult)
assert result.key == "sample_evaluator"
assert result.score == 1.0
assert result.comment == "test"
def test_run_evaluator_decorator_multi_return(run_1: Run, example_1: Example):
@run_evaluator
def sample_evaluator(run: Run, example: Optional[Example]) -> dict:
return {
"results": [
{"key": "test", "score": 1.0},
{"key": "test2", "score": 2.0},
]
}
assert isinstance(sample_evaluator, DynamicRunEvaluator)
with tracing_context(enabled=False):
result = sample_evaluator.evaluate_run(run_1, example_1)
assert not isinstance(result, EvaluationResult)
assert "results" in result
assert len(result["results"]) == 2
assert result["results"][0].key == "test"
assert result["results"][0].score == 1.0
assert result["results"][1].key == "test2"
assert result["results"][1].score == 2.0
def test_run_evaluator_decorator_multi_return_no_key(run_1: Run, example_1: Example):
@run_evaluator
def sample_evaluator(run: Run, example: Optional[Example]) -> dict:
return {
"results": [
{"score": 1.0},
{"key": "test2", "score": 2.0},
]
}
assert isinstance(sample_evaluator, DynamicRunEvaluator)
with pytest.raises(ValueError):
with tracing_context(enabled=False):
sample_evaluator.evaluate_run(run_1, example_1)
def test_run_evaluator_decorator_return_multi_evaluation_result(
run_1: Run, example_1: Example
):
@run_evaluator
def sample_evaluator(run: Run, example: Optional[Example]) -> EvaluationResults:
return EvaluationResults(
results=[
EvaluationResult(key="test", score=1.0),
EvaluationResult(key="test2", score=2.0),
]
)
assert isinstance(sample_evaluator, DynamicRunEvaluator)
with tracing_context(enabled=False):
result = sample_evaluator.evaluate_run(run_1, example_1)
assert not isinstance(result, EvaluationResult)
assert "results" in result
assert len(result["results"]) == 2
assert result["results"][0].key == "test"
assert result["results"][0].score == 1.0
assert result["results"][1].key == "test2"
assert result["results"][1].score == 2.0
async def test_run_evaluator_decorator_async(run_1: Run, example_1: Example):
@run_evaluator
async def sample_evaluator(
run: Run, example: Optional[Example]
) -> EvaluationResult:
await asyncio.sleep(0.01)
return EvaluationResult(key="test", score=1.0)
assert isinstance(sample_evaluator, DynamicRunEvaluator)
with tracing_context(enabled=False):
result = await sample_evaluator.aevaluate_run(run_1, example_1)
assert isinstance(result, EvaluationResult)
assert result.key == "test"
assert result.score == 1.0
async def test_run_evaluator_decorator_dict_async(run_1: Run, example_1: Example):
@run_evaluator
async def sample_evaluator(run: Run, example: Optional[Example]) -> dict:
await asyncio.sleep(0.01)
return {"key": "test", "score": 1.0}
assert isinstance(sample_evaluator, DynamicRunEvaluator)
with tracing_context(enabled=False):
result = await sample_evaluator.aevaluate_run(run_1, example_1)
assert isinstance(result, EvaluationResult)
assert result.key == "test"
assert result.score == 1.0
async def test_run_evaluator_decorator_dict_no_key_async(
run_1: Run, example_1: Example
):
@run_evaluator
async def sample_evaluator(run: Run, example: Optional[Example]) -> dict:
await asyncio.sleep(0.01)
return {"score": 1.0}
assert isinstance(sample_evaluator, DynamicRunEvaluator)
with tracing_context(enabled=False):
result = await sample_evaluator.aevaluate_run(run_1, example_1)
assert isinstance(result, EvaluationResult)
assert result.key == "sample_evaluator"
assert result.score == 1.0
async def test_run_evaluator_decorator_dict_with_comment_async(
run_1: Run, example_1: Example
):
@run_evaluator
async def sample_evaluator(run: Run, example: Optional[Example]) -> dict:
await asyncio.sleep(0.01)
return {"score": 1.0, "comment": "test"}
assert isinstance(sample_evaluator, DynamicRunEvaluator)
with tracing_context(enabled=False):
result = await sample_evaluator.aevaluate_run(run_1, example_1)
assert isinstance(result, EvaluationResult)
assert result.key == "sample_evaluator"
assert result.score == 1.0
assert result.comment == "test"
async def test_run_evaluator_decorator_multi_return_async(
run_1: Run, example_1: Example
):
_response = {
"results": [
{"key": "test", "score": 1.0},
{"key": "test2", "score": 2.0},
]
}
@run_evaluator
async def sample_evaluator(run: Run, example: Optional[Example]) -> dict:
await asyncio.sleep(0.01)
return _response
@run_evaluator
def sample_sync_evaluator(run: Run, example: Optional[Example]) -> dict:
return _response
assert isinstance(sample_evaluator, DynamicRunEvaluator)
result = await sample_evaluator.aevaluate_run(run_1, example_1)
assert not isinstance(result, EvaluationResult)
assert "results" in result
assert len(result["results"]) == 2
assert result["results"][0].key == "test"
assert result["results"][0].score == 1.0
assert result["results"][1].key == "test2"
assert result["results"][1].score == 2.0
with tracing_context(enabled=False):
aresult = await sample_sync_evaluator.aevaluate_run(run_1, example_1)
sresult = sample_sync_evaluator.evaluate_run(run_1, example_1)
scores = [result.score for result in result["results"]]
assert (
scores
== [r.score for r in sresult["results"]]
== [r.score for r in aresult["results"]]
)
async def test_run_evaluator_decorator_multi_return_no_key_async(
run_1: Run, example_1: Example
):
@run_evaluator
async def sample_evaluator(run: Run, example: Optional[Example]) -> dict:
await asyncio.sleep(0.01)
return {
"results": [
{"score": 1.0},
{"key": "test2", "score": 2.0},
]
}
assert isinstance(sample_evaluator, DynamicRunEvaluator)
with pytest.raises(ValueError):
with tracing_context(enabled=False):
await sample_evaluator.aevaluate_run(run_1, example_1)
async def test_run_evaluator_decorator_return_multi_evaluation_result_async(
run_1: Run, example_1: Example
):
@run_evaluator
async def sample_evaluator(
run: Run, example: Optional[Example]
) -> EvaluationResults:
await asyncio.sleep(0.01)
return EvaluationResults(
results=[
EvaluationResult(key="test", score=1.0),
EvaluationResult(key="test2", score=2.0),
]
)
assert isinstance(sample_evaluator, DynamicRunEvaluator)
with tracing_context(enabled=False):
result = await sample_evaluator.aevaluate_run(run_1, example_1)
assert not isinstance(result, EvaluationResult)
assert "results" in result
assert len(result["results"]) == 2
assert result["results"][0].key == "test"
assert result["results"][0].score == 1.0
assert result["results"][1].key == "test2"
assert result["results"][1].score == 2.0
@pytest.mark.parametrize("response", [None, {}, []])
async def test_evaluator_raises_for_null_output(response: Any):
@run_evaluator # type: ignore
def bad_evaluator(run: schemas.Run, example: schemas.Example):
return response
@run_evaluator # type: ignore
async def abad_evaluator(run: schemas.Run, example: schemas.Example):
return response
fake_run = MagicMock()
fake_example = MagicMock()
with pytest.raises(ValueError, match="Expected a non-empty "):
bad_evaluator.evaluate_run(fake_run, fake_example)
with pytest.raises(ValueError, match="Expected a non-empty "):
await bad_evaluator.aevaluate_run(fake_run, fake_example)
with pytest.raises(ValueError, match="Expected a non-empty "):
await abad_evaluator.aevaluate_run(fake_run, fake_example)
@pytest.mark.parametrize("response", [[5], {"accuracy": 5}])
async def test_evaluator_raises_for_bad_output(response: Any):
@run_evaluator # type: ignore
def bad_evaluator(run: schemas.Run, example: schemas.Example):
return response
@run_evaluator # type: ignore
async def abad_evaluator(run: schemas.Run, example: schemas.Example):
return response
fake_run = MagicMock()
fake_example = MagicMock()
with pytest.raises(ValueError, match="Expected"):
bad_evaluator.evaluate_run(fake_run, fake_example)
with pytest.raises(ValueError, match="Expected"):
await bad_evaluator.aevaluate_run(fake_run, fake_example)
with pytest.raises(ValueError, match="Expected"):
await abad_evaluator.aevaluate_run(fake_run, fake_example)
def test_check_value_non_numeric(caplog):
# Test when score is None and value is numeric
with caplog.at_level(logging.WARNING):
EvaluationResult(key="test", value=5)
assert (
"Numeric values should be provided in the 'score' field, not 'value'. Got: 5"
in caplog.text
)
# Test when score is provided and value is numeric (should not log)
with caplog.at_level(logging.WARNING):
caplog.clear()
EvaluationResult(key="test", score=5, value="non-numeric")
assert (
"Numeric values should be provided in the 'score' field, not 'value'."
not in caplog.text
)
# Test when both score and value are None (should not log)
with caplog.at_level(logging.WARNING):
caplog.clear()
EvaluationResult(key="test")
assert (
"Numeric values should be provided in the 'score' field, not 'value'."
not in caplog.text
)
# Test when value is non-numeric (should not log)
with caplog.at_level(logging.WARNING):
caplog.clear()
EvaluationResult(key="test", value="non-numeric")
assert (
"Numeric values should be provided in the 'score' field, not 'value'."
not in caplog.text
)
def test_langchain_run_evaluator_native_async():
try:
from langchain.evaluation import load_evaluator # noqa
except ImportError:
pytest.skip("Skipping test that requires langchain")
with mock.patch.dict("os.environ", {"OPENAI_API_KEY": "fake_api_key"}):
res = LangChainStringEvaluator(evaluator="qa")
run_evaluator = res.as_run_evaluator()
assert hasattr(run_evaluator, "afunc")
assert hasattr(run_evaluator, "func")
|
0 | lc_public_repos/langsmith-sdk/python/tests/unit_tests | lc_public_repos/langsmith-sdk/python/tests/unit_tests/evaluation/test_runner.py | """Test the eval runner."""
import asyncio
import functools
import itertools
import json
import random
import sys
import time
import uuid
from datetime import datetime, timezone
from threading import Lock
from typing import Callable, List
from unittest import mock
from unittest.mock import MagicMock
import pytest
from langsmith import Client, aevaluate, evaluate
from langsmith import schemas as ls_schemas
from langsmith.evaluation.evaluator import (
_normalize_comparison_evaluator_func,
_normalize_evaluator_func,
_normalize_summary_evaluator,
)
class FakeRequest:
def __init__(self, ds_id, ds_name, ds_examples, tenant_id):
self.created_session = None
self.runs = {}
self.should_fail = False
self.ds_id = ds_id
self.ds_name = ds_name
self.ds_examples = ds_examples
self.tenant_id = tenant_id
def request(self, verb: str, endpoint: str, *args, **kwargs):
if verb == "GET":
if endpoint == "http://localhost:1984/datasets":
res = MagicMock()
res.json.return_value = {
"id": self.ds_id,
"created_at": "2021-09-01T00:00:00Z",
"name": self.ds_name,
}
return res
elif endpoint == "http://localhost:1984/examples":
res = MagicMock()
res.json.return_value = [e.dict() for e in self.ds_examples]
return res
elif endpoint == "http://localhost:1984/sessions":
res = {} # type: ignore
if kwargs["params"]["name"] == self.created_session["name"]: # type: ignore
res = self.created_session # type: ignore
response = MagicMock()
response.json.return_value = res
return response
elif (
endpoint
== f"http://localhost:1984/sessions/{self.created_session['id']}"
): # type: ignore
res = self.created_session # type: ignore
response = MagicMock()
response.json.return_value = res
return response
else:
self.should_fail = True
raise ValueError(f"Unknown endpoint: {endpoint}")
elif verb == "POST":
if endpoint == "http://localhost:1984/sessions":
self.created_session = json.loads(kwargs["data"]) | {
"tenant_id": self.tenant_id
}
response = MagicMock()
response.json.return_value = self.created_session
return response
elif endpoint == "http://localhost:1984/runs/batch":
loaded_runs = json.loads(kwargs["data"])
posted = loaded_runs.get("post", [])
patched = loaded_runs.get("patch", [])
for p in posted:
self.runs[p["id"]] = p
for p in patched:
self.runs[p["id"]].update(p)
response = MagicMock()
return response
elif endpoint == "http://localhost:1984/runs/query":
res = MagicMock()
res.json.return_value = {
"runs": [
r
for r in self.runs.values()
if r["trace_id"] == r["id"] and r.get("reference_example_id")
]
}
return res
elif endpoint == "http://localhost:1984/feedback":
response = MagicMock()
response.json.return_value = {}
return response
elif endpoint == "http://localhost:1984/datasets/comparative":
response = MagicMock()
self.created_comparative_experiment = json.loads(kwargs["data"]) | {
"tenant_id": self.tenant_id,
"modified_at": datetime.now(),
}
response.json.return_value = self.created_comparative_experiment
return response
else:
raise ValueError(f"Unknown endpoint: {endpoint}")
elif verb == "PATCH":
if (
endpoint
== f"http://localhost:1984/sessions/{self.created_session['id']}"
): # type: ignore
updates = json.loads(kwargs["data"])
self.created_session.update({k: v for k, v in updates.items() if v}) # type: ignore
response = MagicMock()
response.json.return_value = self.created_session
return response
else:
self.should_fail = True
raise ValueError(f"Unknown endpoint: {endpoint}")
else:
self.should_fail = True
raise ValueError(f"Unknown verb: {verb}, {endpoint}")
def _wait_until(condition: Callable, timeout: int = 8):
start = time.time()
while time.time() - start < timeout:
if condition():
return
time.sleep(0.1)
raise TimeoutError("Condition not met")
def _create_example(idx: int) -> ls_schemas.Example:
return ls_schemas.Example(
id=uuid.uuid4(),
inputs={"in": idx},
outputs={"answer": idx + 1},
dataset_id="00886375-eb2a-4038-9032-efff60309896",
created_at=datetime.now(timezone.utc),
)
@pytest.mark.skipif(sys.version_info < (3, 9), reason="requires python3.9 or higher")
@pytest.mark.parametrize("blocking", [False, True])
@pytest.mark.parametrize("as_runnable", [False, True])
@pytest.mark.parametrize("upload_results", [False, True])
def test_evaluate_results(
blocking: bool, as_runnable: bool, upload_results: bool
) -> None:
session = mock.Mock()
ds_name = "my-dataset"
ds_id = "00886375-eb2a-4038-9032-efff60309896"
SPLIT_SIZE = 3
NUM_REPETITIONS = 4
ds_examples = [_create_example(i) for i in range(10)]
dev_split = random.sample(ds_examples, SPLIT_SIZE)
tenant_id = str(uuid.uuid4())
fake_request = FakeRequest(ds_id, ds_name, ds_examples, tenant_id)
session.request = fake_request.request
client = Client(
api_url="http://localhost:1984",
api_key="123",
session=session,
info=ls_schemas.LangSmithInfo(
batch_ingest_config=ls_schemas.BatchIngestConfig(
size_limit_bytes=None, # Note this field is not used here
size_limit=100,
scale_up_nthreads_limit=16,
scale_up_qsize_trigger=1000,
scale_down_nempty_trigger=4,
)
),
)
client._tenant_id = tenant_id # type: ignore
ordering_of_stuff: List[str] = []
locked = False
lock = Lock()
slow_index = None
def predict(inputs: dict) -> dict:
nonlocal locked
nonlocal slow_index
if len(ordering_of_stuff) > 2 and not locked:
with lock:
if len(ordering_of_stuff) > 2 and not locked:
locked = True
time.sleep(3)
slow_index = len(ordering_of_stuff)
ordering_of_stuff.append("predict")
else:
ordering_of_stuff.append("predict")
else:
ordering_of_stuff.append("predict")
return {"output": inputs["in"] + 1}
if as_runnable:
try:
from langchain_core.runnables import RunnableLambda
except ImportError:
pytest.skip("langchain-core not installed.")
return
else:
predict = RunnableLambda(predict)
def score_value_first(run, example):
ordering_of_stuff.append("evaluate")
return {"score": 0.3}
def score_unpacked_inputs_outputs(inputs, outputs):
ordering_of_stuff.append("evaluate")
return {"score": outputs["output"]}
def score_unpacked_inputs_outputs_reference(inputs, outputs, reference_outputs):
ordering_of_stuff.append("evaluate")
return {"score": reference_outputs["answer"]}
def eval_float(run, example):
ordering_of_stuff.append("evaluate")
return 0.2
def eval_str(run, example):
ordering_of_stuff.append("evaluate")
return "good"
def eval_list(run, example):
ordering_of_stuff.append("evaluate")
return [
{"score": True, "key": "list_eval_bool"},
{"score": 1, "key": "list_eval_int"},
]
def summary_eval_runs_examples(runs_, examples_):
return {"score": len(runs_[0].dotted_order)}
def summary_eval_inputs_outputs(inputs, outputs):
return [{"score": len([x["in"] for x in inputs])}]
def summary_eval_outputs_reference(outputs, reference_outputs):
return len([x["answer"] for x in reference_outputs])
evaluators = [
score_value_first,
score_unpacked_inputs_outputs,
score_unpacked_inputs_outputs_reference,
eval_float,
eval_str,
eval_list,
]
summary_evaluators = [
summary_eval_runs_examples,
summary_eval_inputs_outputs,
summary_eval_outputs_reference,
]
results = evaluate(
predict,
client=client,
data=dev_split,
evaluators=evaluators,
summary_evaluators=summary_evaluators,
num_repetitions=NUM_REPETITIONS,
blocking=blocking,
upload_results=upload_results,
max_concurrency=None,
)
if not blocking:
deltas = []
last = None
start = time.time()
now = start
for _ in results:
now = time.time()
deltas.append((now - last) if last is not None else 0) # type: ignore
last = now
assert now - start > 1.5
# Essentially we want to check that 1 delay is > 1.5s and the rest are < 0.1s
assert len(deltas) == SPLIT_SIZE * NUM_REPETITIONS
assert slow_index is not None
total_quick = sum([d < 0.5 for d in deltas])
total_slow = sum([d > 0.5 for d in deltas])
tolerance = 3
assert total_slow < tolerance
assert total_quick > (SPLIT_SIZE * NUM_REPETITIONS - 1) - tolerance
for r in results:
assert r["run"].outputs["output"] == r["example"].inputs["in"] + 1 # type: ignore
assert set(r["run"].outputs.keys()) == {"output"} # type: ignore
assert len(r["evaluation_results"]["results"]) == len(evaluators) + 1
assert all(
er.score is not None or er.value is not None
for er in r["evaluation_results"]["results"]
)
assert len(results._summary_results["results"]) == len(summary_evaluators)
N_PREDS = SPLIT_SIZE * NUM_REPETITIONS
if upload_results:
assert fake_request.created_session
_wait_until(lambda: fake_request.runs)
_wait_until(lambda: len(ordering_of_stuff) == (N_PREDS * (len(evaluators) + 1)))
_wait_until(lambda: slow_index is not None)
# Want it to be interleaved
assert ordering_of_stuff[:N_PREDS] != ["predict"] * N_PREDS
else:
assert not fake_request.created_session
# It's delayed, so it'll be the penultimate event
# Will run all other preds and evals, then this, then the last eval
assert slow_index == (len(evaluators) + 1) * (N_PREDS - 1)
if upload_results:
def score_value(run, example):
return {"score": 0.7}
ex_results = evaluate(
fake_request.created_session["name"],
evaluators=[score_value],
client=client,
blocking=blocking,
)
second_item = next(itertools.islice(iter(ex_results), 1, 2))
first_list = list(ex_results)
second_list = list(ex_results)
second_item_after = next(itertools.islice(iter(ex_results), 1, 2))
assert len(first_list) == len(second_list) == SPLIT_SIZE * NUM_REPETITIONS
assert first_list == second_list
assert second_item == second_item_after
dev_xample_ids = [e.id for e in dev_split]
for r in ex_results:
assert r["example"].id in dev_xample_ids
assert r["evaluation_results"]["results"][0].score == 0.7
assert r["run"].reference_example_id in dev_xample_ids
assert not fake_request.should_fail
ex_results2 = evaluate(
fake_request.created_session["name"],
evaluators=[score_value],
client=client,
blocking=blocking,
)
assert [x["evaluation_results"]["results"][0].score for x in ex_results2] == [
x["evaluation_results"]["results"][0].score for x in ex_results
]
# Returning list of non-dicts not supported.
def bad_eval_list(run, example):
ordering_of_stuff.append("evaluate")
return ["foo", 1]
results = evaluate(
predict,
client=client,
data=dev_split,
evaluators=[bad_eval_list],
num_repetitions=NUM_REPETITIONS,
blocking=blocking,
)
for r in results:
assert r["evaluation_results"]["results"][0].extra == {"error": True}
# test invalid evaluators
# args need to be positional
def eval1(*, inputs, outputs):
pass
# if more than 2 positional args, they must all have default arg names
# (run, example, ...)
def eval2(x, y, inputs):
pass
evaluators = [eval1, eval2]
for eval_ in evaluators:
with pytest.raises(ValueError, match="Invalid evaluator function."):
_normalize_evaluator_func(eval_)
with pytest.raises(ValueError, match="Invalid evaluator function."):
evaluate((lambda x: x), data=ds_examples, evaluators=[eval_], client=client)
def test_evaluate_raises_for_async():
async def my_func(inputs: dict):
pass
match = "Async functions are not supported by"
with pytest.raises(ValueError, match=match):
evaluate(my_func, data="foo")
async def my_other_func(inputs: dict, other_val: int):
pass
with pytest.raises(ValueError, match=match):
evaluate(functools.partial(my_other_func, other_val=3), data="foo")
if sys.version_info < (3, 10):
return
try:
from langchain_core.runnables import RunnableLambda
except ImportError:
pytest.skip("langchain-core not installed.")
return
with pytest.raises(ValueError, match=match):
evaluate(
functools.partial(RunnableLambda(my_func).ainvoke, inputs={"foo": "bar"}),
data="foo",
)
@pytest.mark.skipif(sys.version_info < (3, 9), reason="requires python3.9 or higher")
@pytest.mark.parametrize("blocking", [False, True])
@pytest.mark.parametrize("as_runnable", [False, True])
@pytest.mark.parametrize("upload_results", [False, True])
async def test_aevaluate_results(
blocking: bool, as_runnable: bool, upload_results: bool
) -> None:
session = mock.Mock()
ds_name = "my-dataset"
ds_id = "00886375-eb2a-4038-9032-efff60309896"
SPLIT_SIZE = 3
NUM_REPETITIONS = 4
ds_examples = [_create_example(i) for i in range(10)]
dev_split = random.sample(ds_examples, SPLIT_SIZE)
tenant_id = str(uuid.uuid4())
fake_request = FakeRequest(ds_id, ds_name, ds_examples, tenant_id)
session.request = fake_request.request
client = Client(
api_url="http://localhost:1984",
api_key="123",
session=session,
info=ls_schemas.LangSmithInfo(
batch_ingest_config=ls_schemas.BatchIngestConfig(
size_limit_bytes=None, # Note this field is not used here
size_limit=100,
scale_up_nthreads_limit=16,
scale_up_qsize_trigger=1000,
scale_down_nempty_trigger=4,
)
),
)
client._tenant_id = tenant_id # type: ignore
ordering_of_stuff: List[str] = []
locked = False
lock = asyncio.Lock()
slow_index = None
async def predict(inputs: dict) -> dict:
nonlocal locked
nonlocal slow_index
if len(ordering_of_stuff) > 2 and not locked:
async with lock:
if len(ordering_of_stuff) > 2 and not locked:
locked = True
await asyncio.sleep(3)
slow_index = len(ordering_of_stuff)
ordering_of_stuff.append("predict")
else:
ordering_of_stuff.append("predict")
else:
ordering_of_stuff.append("predict")
return {"output": inputs["in"] + 1}
if as_runnable:
try:
from langchain_core.runnables import RunnableLambda
except ImportError:
pytest.skip("langchain-core not installed.")
return
else:
predict = RunnableLambda(predict)
async def score_value_first(run, example):
ordering_of_stuff.append("evaluate")
return {"score": 0.3}
async def score_unpacked_inputs_outputs(inputs, outputs):
ordering_of_stuff.append("evaluate")
return {"score": outputs["output"]}
async def score_unpacked_inputs_outputs_reference(
inputs, outputs, reference_outputs
):
ordering_of_stuff.append("evaluate")
return {"score": reference_outputs["answer"]}
async def eval_float(run, example):
ordering_of_stuff.append("evaluate")
return 0.2
async def eval_str(run, example):
ordering_of_stuff.append("evaluate")
return "good"
async def eval_list(run, example):
ordering_of_stuff.append("evaluate")
return [
{"score": True, "key": "list_eval_bool"},
{"score": 1, "key": "list_eval_int"},
]
def summary_eval_runs_examples(runs_, examples_):
return {"score": len(runs_[0].dotted_order)}
def summary_eval_inputs_outputs(inputs, outputs):
return {"score": len([x["in"] for x in inputs])}
def summary_eval_outputs_reference(outputs, reference_outputs):
return {"score": len([x["answer"] for x in reference_outputs])}
evaluators = [
score_value_first,
score_unpacked_inputs_outputs,
score_unpacked_inputs_outputs_reference,
eval_float,
eval_str,
eval_list,
]
summary_evaluators = [
summary_eval_runs_examples,
summary_eval_inputs_outputs,
summary_eval_outputs_reference,
]
results = await aevaluate(
predict,
client=client,
data=dev_split,
evaluators=evaluators,
summary_evaluators=summary_evaluators,
num_repetitions=NUM_REPETITIONS,
blocking=blocking,
upload_results=upload_results,
max_concurrency=None,
)
if not blocking:
deltas = []
last = None
start = time.time()
now = None
async for _ in results:
now = time.time()
if last is None:
elapsed = now - start
assert elapsed < 3
deltas.append((now - last) if last is not None else 0) # type: ignore
last = now
total = now - start # type: ignore
assert total > 1.5
# Essentially we want to check that 1 delay is > 1.5s and the rest are < 0.1s
assert len(deltas) == SPLIT_SIZE * NUM_REPETITIONS
total_quick = sum([d < 0.5 for d in deltas])
total_slow = sum([d > 0.5 for d in deltas])
tolerance = 3
assert total_slow < tolerance
assert total_quick > (SPLIT_SIZE * NUM_REPETITIONS - 1) - tolerance
assert any([d > 1 for d in deltas])
async for r in results:
assert r["run"].outputs["output"] == r["example"].inputs["in"] + 1 # type: ignore
assert set(r["run"].outputs.keys()) == {"output"} # type: ignore
assert all(
er.score is not None or er.value is not None
for er in r["evaluation_results"]["results"]
)
assert len(results._summary_results["results"]) == len(summary_evaluators)
N_PREDS = SPLIT_SIZE * NUM_REPETITIONS
if upload_results:
assert fake_request.created_session
_wait_until(lambda: fake_request.runs)
_wait_until(lambda: len(ordering_of_stuff) == N_PREDS * (len(evaluators) + 1))
_wait_until(lambda: slow_index is not None)
# Want it to be interleaved
assert ordering_of_stuff[:N_PREDS] != ["predict"] * N_PREDS
assert slow_index is not None
# It's delayed, so it'll be the penultimate event
# Will run all other preds and evals, then this, then the last eval
assert slow_index == (N_PREDS - 1) * (len(evaluators) + 1)
assert fake_request.created_session["name"]
else:
assert not fake_request.created_session
async def score_value(run, example):
return {"score": 0.7}
if upload_results:
ex_results = await aevaluate(
fake_request.created_session["name"],
evaluators=[score_value],
client=client,
blocking=blocking,
)
all_results = [r async for r in ex_results]
assert len(all_results) == SPLIT_SIZE * NUM_REPETITIONS
dev_xample_ids = [e.id for e in dev_split]
async for r in ex_results:
assert r["example"].id in dev_xample_ids
assert r["evaluation_results"]["results"][0].score == 0.7
assert r["run"].reference_example_id in dev_xample_ids
assert not fake_request.should_fail
ex_results2 = await aevaluate(
fake_request.created_session["name"],
evaluators=[score_value],
client=client,
blocking=blocking,
)
assert [
x["evaluation_results"]["results"][0].score async for x in ex_results2
] == [x["evaluation_results"]["results"][0].score for x in all_results]
# Returning list of non-dicts not supported.
async def bad_eval_list(run, example):
ordering_of_stuff.append("evaluate")
return ["foo", 1]
results = await aevaluate(
predict,
client=client,
data=dev_split,
evaluators=[bad_eval_list],
num_repetitions=NUM_REPETITIONS,
blocking=blocking,
upload_results=upload_results,
)
async for r in results:
assert r["evaluation_results"]["results"][0].extra == {"error": True}
# test invalid evaluators
# args need to be positional
async def eval1(*, inputs, outputs):
pass
# if more than 2 positional args, they must all have default arg names
# (run, example, ...)
async def eval2(x, y, inputs):
pass
evaluators = [eval1, eval2]
async def atarget(x):
return x
for eval_ in evaluators:
with pytest.raises(ValueError, match="Invalid evaluator function."):
_normalize_evaluator_func(eval_)
with pytest.raises(ValueError, match="Invalid evaluator function."):
await aevaluate(
atarget,
data=ds_examples,
evaluators=[eval_],
client=client,
upload_results=upload_results,
blocking=blocking,
)
def summary_eval_runs_examples(runs_, examples_):
return {"score": len(runs_[0].dotted_order)}
def summary_eval_inputs_outputs(inputs, outputs):
return {"score": max([len(x["in"]) for x in inputs])}
def summary_eval_outputs_reference(outputs, reference_outputs):
return min([len(x["response"]) for x in outputs])
@pytest.mark.parametrize(
"evaluator",
[
summary_eval_runs_examples,
summary_eval_inputs_outputs,
summary_eval_outputs_reference,
],
)
def test__normalize_summary_evaluator(evaluator: Callable) -> None:
normalized = _normalize_summary_evaluator(evaluator)
runs = [
ls_schemas.Run(
name="foo",
start_time=datetime.now(),
run_type="chain",
id=uuid.uuid4(),
dotted_order="a" * 12,
outputs={"response": "c" * 12},
)
]
examples = [
ls_schemas.Example(
id=uuid.uuid4(),
inputs={"in": "b" * 12},
)
]
assert normalized(runs, examples)["score"] == 12
def summary_eval_kwargs(*, runs, examples):
return
def summary_eval_unknown_positional_args(runs, examples, foo):
return
@pytest.mark.parametrize(
"evaluator",
[summary_eval_kwargs, summary_eval_unknown_positional_args],
)
def test__normalize_summary_evaluator_invalid(evaluator: Callable) -> None:
with pytest.raises(ValueError, match="Invalid evaluator function."):
_normalize_summary_evaluator(evaluator)
def comparison_eval(runs, example):
return [len(r.outputs["response"]) for r in runs]
def comparison_eval_simple(inputs, outputs, reference_outputs):
return [len(o["response"]) for o in outputs]
def comparison_eval_no_inputs(outputs, reference_outputs):
return [min(len(o["response"]), len(reference_outputs["answer"])) for o in outputs]
@pytest.mark.parametrize(
"evaluator",
[comparison_eval, comparison_eval_simple, comparison_eval_no_inputs],
)
def test__normalize_comparison_evaluator(evaluator: Callable) -> None:
runs = [
ls_schemas.Run(
name="foo",
start_time=datetime.now(),
run_type="chain",
id=uuid.uuid4(),
dotted_order="a",
outputs={"response": "c" * 2},
),
ls_schemas.Run(
name="foo",
start_time=datetime.now(),
run_type="chain",
id=uuid.uuid4(),
dotted_order="d",
outputs={"response": "e" * 3},
),
]
example = ls_schemas.Example(
id=uuid.uuid4(), inputs={"in": "b"}, outputs={"answer": "f" * 4}
)
normalized = _normalize_comparison_evaluator_func(evaluator)
assert normalized(runs, example) == [2, 3]
async def acomparison_eval(runs, example):
return [len(r.outputs["response"]) for r in runs]
async def acomparison_eval_simple(inputs, outputs, reference_outputs):
return [len(o["response"]) for o in outputs]
async def acomparison_eval_no_inputs(outputs, reference_outputs):
return [min(len(o["response"]), len(reference_outputs["answer"])) for o in outputs]
@pytest.mark.parametrize(
"evaluator",
[acomparison_eval, acomparison_eval_simple, acomparison_eval_no_inputs],
)
async def test__normalize_comparison_evaluator_async(evaluator: Callable) -> None:
runs = [
ls_schemas.Run(
name="foo",
start_time=datetime.now(),
run_type="chain",
id=uuid.uuid4(),
dotted_order="a",
outputs={"response": "c" * 2},
),
ls_schemas.Run(
name="foo",
start_time=datetime.now(),
run_type="chain",
id=uuid.uuid4(),
dotted_order="d",
outputs={"response": "e" * 3},
),
]
example = ls_schemas.Example(
id=uuid.uuid4(), inputs={"in": "b"}, outputs={"answer": "f" * 4}
)
normalized = _normalize_comparison_evaluator_func(evaluator)
assert await normalized(runs, example) == [2, 3]
def comparison_eval_kwargs(*, runs, example):
return
def comparison_eval_unknown_positional_args(runs, example, foo):
return
@pytest.mark.parametrize(
"evaluator",
[comparison_eval_kwargs, comparison_eval_unknown_positional_args],
)
def test__normalize_comparison_evaluator_invalid(evaluator: Callable) -> None:
with pytest.raises(ValueError, match="Invalid evaluator function."):
_normalize_comparison_evaluator_func(evaluator)
def test_invalid_evaluate_args() -> None:
for kwargs in [
{"num_repetitions": 2},
{"experiment": "foo"},
{"upload_results": False},
{"experiment_prefix": "foo"},
{"data": "data"},
]:
with pytest.raises(
ValueError,
match=(
"Received invalid arguments. .* when target is an existing experiment."
),
):
evaluate("foo", **kwargs)
for kwargs in [
{"num_repetitions": 2},
{"experiment": "foo"},
{"upload_results": False},
{"summary_evaluators": [(lambda a, b: 2)]},
{"data": "data"},
]:
with pytest.raises(
ValueError,
match=(
"Received invalid arguments. .* when target is two existing "
"experiments."
),
):
evaluate(("foo", "bar"), **kwargs)
with pytest.raises(
ValueError, match="Received invalid target. If a tuple is specified"
):
evaluate(("foo", "bar", "baz"))
with pytest.raises(ValueError, match="Received unsupported arguments"):
evaluate((lambda x: x), data="data", load_nested=True)
async def test_invalid_aevaluate_args() -> None:
for kwargs in [
{"num_repetitions": 2},
{"experiment": "foo"},
{"upload_results": False},
{"experiment_prefix": "foo"},
{"data": "data"},
]:
with pytest.raises(
ValueError,
match=(
"Received invalid arguments. .* when target is an existing experiment."
),
):
await aevaluate("foo", **kwargs)
with pytest.raises(ValueError, match="Received unsupported arguments"):
await aevaluate((lambda x: x), data="data", load_nested=True)
|
0 | lc_public_repos/langsmith-sdk/python/tests | lc_public_repos/langsmith-sdk/python/tests/external/test_instructor_evals.py | from enum import Enum
from itertools import product
from typing import Literal
import instructor # type: ignore
import pytest
from anthropic import AsyncAnthropic # type: ignore
from openai import AsyncOpenAI
from pydantic import BaseModel
from langsmith import test
class Models(str, Enum):
GPT35TURBO = "gpt-3.5-turbo"
GPT4TURBO = "gpt-4-turbo"
CLAUDE3_SONNET = "claude-3-sonnet-20240229"
CLAUDE3_OPUS = "claude-3-opus-20240229"
CLAUDE3_HAIKU = "claude-3-haiku-20240307"
clients = (
instructor.from_openai(
AsyncOpenAI(),
model=Models.GPT35TURBO,
),
instructor.from_openai(
AsyncOpenAI(),
model=Models.GPT4TURBO,
),
instructor.from_anthropic(
AsyncAnthropic(),
model=Models.CLAUDE3_OPUS,
max_tokens=4000,
),
instructor.from_anthropic(
AsyncAnthropic(),
model=Models.CLAUDE3_SONNET,
max_tokens=4000,
),
instructor.from_anthropic(
AsyncAnthropic(),
model=Models.CLAUDE3_HAIKU,
max_tokens=4000,
),
)
class ClassifySpam(BaseModel):
label: Literal["spam", "not_spam"]
data = [
("I am a spammer who sends many emails every day", "spam"),
("I am a responsible person who does not spam", "not_spam"),
]
d = list(product(clients, data))
@pytest.mark.asyncio_cooperative
@test()
@pytest.mark.parametrize("client, data", d[:3])
async def test_classification(client, data):
input, expected = data
prediction = await client.create(
response_model=ClassifySpam,
messages=[
{
"role": "system",
"content": "Classify this text as 'spam' or 'not_spam'.",
},
{
"role": "user",
"content": input,
},
],
)
assert prediction.label == expected
|
0 | lc_public_repos/langsmith-sdk/python/tests | lc_public_repos/langsmith-sdk/python/tests/evaluation/__init__.py | """LangSmith Evaluations.
This module provides a comprehensive suite of tools for evaluating language models and their outputs using LangSmith.
Key Features:
- Robust evaluation framework for assessing model performance across diverse tasks
- Flexible configuration options for customizing evaluation criteria and metrics
- Seamless integration with LangSmith's platform for end-to-end evaluation workflows
- Advanced analytics and reporting capabilities for actionable insights
Usage:
1. Import the necessary components from this module
2. Configure your evaluation parameters and criteria
3. Run your language model through the evaluation pipeline
4. Analyze the results using our built-in tools or export for further processing
Example:
from langsmith.evaluation import RunEvaluator, MetricCalculator
evaluator = RunEvaluator(model="gpt-3.5-turbo", dataset_name="customer_support")
results = evaluator.run()
metrics = MetricCalculator(results).calculate()
print(metrics.summary())
For detailed API documentation and advanced usage scenarios, visit:
https://docs.langsmith.com/evaluation
Note: This module is designed to work seamlessly with the LangSmith platform.
Ensure you have the necessary credentials and permissions set up before use.
"""
|
0 | lc_public_repos/langsmith-sdk/python/tests | lc_public_repos/langsmith-sdk/python/tests/evaluation/test_evaluation.py | import asyncio
import functools
import logging
import time
from contextlib import contextmanager
from typing import Callable, Sequence, Tuple, TypeVar
import pytest
from langsmith import Client, aevaluate, evaluate, expect, test
from langsmith.evaluation import EvaluationResult, EvaluationResults
from langsmith.schemas import Example, Run
T = TypeVar("T")
@contextmanager
def suppress_warnings():
logger = logging.getLogger()
current_level = logger.level
logger.setLevel(logging.CRITICAL)
try:
yield
finally:
logger.setLevel(current_level)
def wait_for(
condition: Callable[[], Tuple[T, bool]],
max_sleep_time: int = 120,
sleep_time: int = 3,
) -> T:
"""Wait for a condition to be true."""
start_time = time.time()
last_e = None
while time.time() - start_time < max_sleep_time:
try:
res, cond = condition()
if cond:
return res
except Exception as e:
last_e = e
time.sleep(sleep_time)
total_time = time.time() - start_time
if last_e is not None:
raise last_e
raise ValueError(f"Callable did not return within {total_time}")
async def test_error_handling_evaluators():
client = Client()
_ = client.clone_public_dataset(
"https://smith.langchain.com/public/419dcab2-1d66-4b94-8901-0357ead390df/d"
)
dataset_name = "Evaluate Examples"
# Case 1: Normal dictionary return
def error_dict_evaluator(run: Run, example: Example):
if True: # This condition ensures the error is always raised
raise ValueError("Error in dict evaluator")
return {"key": "dict_key", "score": 1}
# Case 2: EvaluationResult return
def error_evaluation_result(run: Run, example: Example):
if True: # This condition ensures the error is always raised
raise ValueError("Error in EvaluationResult evaluator")
return EvaluationResult(key="eval_result_key", score=1)
# Case 3: EvaluationResults return
def error_evaluation_results(run: Run, example: Example):
if True: # This condition ensures the error is always raised
raise ValueError("Error in EvaluationResults evaluator")
return EvaluationResults(
results=[
EvaluationResult(key="eval_results_key1", score=1),
EvaluationResult(key="eval_results_key2", score=2),
]
)
# Case 4: Dictionary without 'key' field
def error_dict_no_key(run: Run, example: Example):
if True: # This condition ensures the error is always raised
raise ValueError("Error in dict without key evaluator")
return {"score": 1}
# Case 5: dict-style results
def error_evaluation_results_dict(run: Run, example: Example):
if True: # This condition ensures the error is always raised
raise ValueError("Error in EvaluationResults dict evaluator")
return {
"results": [
dict(key="eval_results_dict_key1", score=1),
{"key": "eval_results_dict_key2", "score": 2},
EvaluationResult(key="eval_results_dict_key3", score=3),
]
}
def predict(inputs: dict) -> dict:
return {"output": "Yes"}
with suppress_warnings():
sync_results = evaluate(
predict,
data=client.list_examples(
dataset_name=dataset_name,
as_of="test_version",
),
evaluators=[
error_dict_evaluator,
error_evaluation_result,
error_evaluation_results,
error_dict_no_key,
error_evaluation_results_dict,
],
max_concurrency=1, # To ensure deterministic order
)
assert len(sync_results) == 10 # Assuming 10 examples in the dataset
def check_results(results):
for result in results:
eval_results = result["evaluation_results"]["results"]
assert len(eval_results) == 8
# Check error handling for each evaluator
assert eval_results[0].key == "dict_key"
assert "Error in dict evaluator" in eval_results[0].comment
assert eval_results[0].extra.get("error") is True
assert eval_results[1].key == "eval_result_key"
assert "Error in EvaluationResult evaluator" in eval_results[1].comment
assert eval_results[1].extra.get("error") is True
assert eval_results[2].key == "eval_results_key1"
assert "Error in EvaluationResults evaluator" in eval_results[2].comment
assert eval_results[2].extra.get("error") is True
assert eval_results[3].key == "eval_results_key2"
assert "Error in EvaluationResults evaluator" in eval_results[3].comment
assert eval_results[3].extra.get("error") is True
assert eval_results[4].key == "error_dict_no_key"
assert "Error in dict without key evaluator" in eval_results[4].comment
assert eval_results[4].extra.get("error") is True
assert eval_results[5].key == "eval_results_dict_key1"
assert (
"Error in EvaluationResults dict evaluator" in eval_results[5].comment
)
assert eval_results[5].extra.get("error") is True
assert eval_results[6].key == "eval_results_dict_key2"
assert (
"Error in EvaluationResults dict evaluator" in eval_results[6].comment
)
assert eval_results[6].extra.get("error") is True
assert eval_results[7].key == "eval_results_dict_key3"
assert (
"Error in EvaluationResults dict evaluator" in eval_results[7].comment
)
assert eval_results[7].extra.get("error") is True
check_results(sync_results)
async def apredict(inputs: dict):
return predict(inputs)
with suppress_warnings():
async_results = await aevaluate(
apredict,
data=list(
client.list_examples(
dataset_name=dataset_name,
as_of="test_version",
)
),
evaluators=[
error_dict_evaluator,
error_evaluation_result,
error_evaluation_results,
error_dict_no_key,
error_evaluation_results_dict,
],
max_concurrency=1, # To ensure deterministic order
)
assert len(async_results) == 10 # Assuming 10 examples in the dataset
check_results([res async for res in async_results])
@functools.lru_cache(maxsize=1)
def _has_pandas() -> bool:
try:
import pandas # noqa
return True
except Exception:
return False
def test_evaluate():
client = Client()
_ = client.clone_public_dataset(
"https://smith.langchain.com/public/419dcab2-1d66-4b94-8901-0357ead390df/d"
)
dataset_name = "Evaluate Examples"
def accuracy(run: Run, example: Example):
pred = run.outputs["output"] # type: ignore
expected = example.outputs["answer"] # type: ignore
return {"score": expected.lower() == pred.lower()}
def precision(runs: Sequence[Run], examples: Sequence[Example]):
predictions = [run.outputs["output"].lower() for run in runs] # type: ignore
expected = [example.outputs["answer"].lower() for example in examples] # type: ignore
tp = sum([p == e for p, e in zip(predictions, expected) if p == "yes"])
fp = sum([p == "yes" and e == "no" for p, e in zip(predictions, expected)])
return {"score": tp / (tp + fp)}
def predict(inputs: dict) -> dict:
return {"output": "Yes"}
results = evaluate(
predict,
data=client.list_examples(dataset_name=dataset_name, as_of="test_version"),
evaluators=[accuracy],
summary_evaluators=[precision],
description="My sync experiment",
metadata={
"my-prompt-version": "abcd-1234",
"function": "evaluate",
},
num_repetitions=3,
)
assert len(results) == 30
if _has_pandas():
df = results.to_pandas()
assert len(df) == 30
assert set(df.columns) == {
"inputs.context",
"inputs.question",
"outputs.output",
"error",
"reference.answer",
"feedback.accuracy",
"execution_time",
"example_id",
"id",
}
examples = client.list_examples(dataset_name=dataset_name, as_of="test_version")
for example in examples:
assert len([r for r in results if r["example"].id == example.id]) == 3
# Run it again with the existing project
results2 = evaluate(
predict,
data=client.list_examples(dataset_name=dataset_name, as_of="test_version"),
evaluators=[accuracy],
summary_evaluators=[precision],
experiment=results.experiment_name,
)
assert len(results2) == 10
# ... and again with the object
experiment = client.read_project(project_name=results.experiment_name)
results3 = evaluate(
predict,
data=client.list_examples(dataset_name=dataset_name, as_of="test_version"),
evaluators=[accuracy],
summary_evaluators=[precision],
experiment=experiment,
)
assert len(results3) == 10
# ... and again with the ID
results4 = evaluate(
predict,
data=client.list_examples(dataset_name=dataset_name, as_of="test_version"),
evaluators=[accuracy],
summary_evaluators=[precision],
experiment=str(experiment.id),
)
assert len(results4) == 10
async def test_aevaluate():
client = Client()
_ = client.clone_public_dataset(
"https://smith.langchain.com/public/419dcab2-1d66-4b94-8901-0357ead390df/d"
)
dataset_name = "Evaluate Examples"
def accuracy(run: Run, example: Example):
pred = run.outputs["output"] # type: ignore
expected = example.outputs["answer"] # type: ignore
return {"score": expected.lower() == pred.lower()}
async def slow_accuracy(run: Run, example: Example):
pred = run.outputs["output"] # type: ignore
expected = example.outputs["answer"] # type: ignore
await asyncio.sleep(5)
return {"score": expected.lower() == pred.lower()}
def precision(runs: Sequence[Run], examples: Sequence[Example]):
predictions = [run.outputs["output"].lower() for run in runs] # type: ignore
expected = [example.outputs["answer"].lower() for example in examples] # type: ignore
tp = sum([p == e for p, e in zip(predictions, expected) if p == "yes"])
fp = sum([p == "yes" and e == "no" for p, e in zip(predictions, expected)])
return {"score": tp / (tp + fp)}
async def apredict(inputs: dict) -> dict:
await asyncio.sleep(0.1)
return {"output": "Yes"}
results = await aevaluate(
apredict,
data=client.list_examples(dataset_name=dataset_name, as_of="test_version"),
evaluators=[accuracy, slow_accuracy],
summary_evaluators=[precision],
experiment_prefix="My Experiment",
description="My Experiment Description",
metadata={
"my-prompt-version": "abcd-1234",
"function": "aevaluate",
},
num_repetitions=2,
)
assert len(results) == 20
if _has_pandas():
df = results.to_pandas()
assert len(df) == 20
examples = client.list_examples(dataset_name=dataset_name, as_of="test_version")
all_results = [r async for r in results]
all_examples = []
for example in examples:
count = 0
for r in all_results:
if r["run"].reference_example_id == example.id:
count += 1
assert count == 2
all_examples.append(example)
# Wait for there to be 2x runs vs. examples
def check_run_count():
current_runs = list(
client.list_runs(project_name=results.experiment_name, is_root=True)
)
for r in current_runs:
assert "accuracy" in r.feedback_stats
assert "slow_accuracy" in r.feedback_stats
return current_runs, len(current_runs) == 2 * len(all_examples)
final_runs = wait_for(check_run_count, max_sleep_time=60, sleep_time=2)
assert len(final_runs) == 2 * len(
all_examples
), f"Expected {2 * len(all_examples)} runs, but got {len(final_runs)}"
# Run it again with the existing project
results2 = await aevaluate(
apredict,
data=client.list_examples(dataset_name=dataset_name, as_of="test_version"),
evaluators=[accuracy],
summary_evaluators=[precision],
experiment=results.experiment_name,
)
assert len(results2) == 10
# ... and again with the object
experiment = client.read_project(project_name=results.experiment_name)
results3 = await aevaluate(
apredict,
data=client.list_examples(dataset_name=dataset_name, as_of="test_version"),
evaluators=[accuracy],
summary_evaluators=[precision],
experiment=experiment,
)
assert len(results3) == 10
# ... and again with the ID
results4 = await aevaluate(
apredict,
data=client.list_examples(dataset_name=dataset_name, as_of="test_version"),
evaluators=[accuracy],
summary_evaluators=[precision],
experiment=str(experiment.id),
)
assert len(results4) == 10
@test
def test_foo():
expect(3 + 4).to_equal(7)
@pytest.fixture
def some_input():
return "Some input"
@pytest.fixture
def expected_output():
return "input"
@test(output_keys=["expected_output"])
def test_bar(some_input: str, expected_output: str):
expect(some_input).to_contain(expected_output)
@test
async def test_baz():
await asyncio.sleep(0.1)
expect(3 + 4).to_equal(7)
return 7
@test
@pytest.mark.parametrize("x, y", [(1, 2), (2, 3)])
def test_foo_parametrized(x, y):
expect(x + y).to_be_greater_than(0)
return x + y
@test(output_keys=["z"])
@pytest.mark.parametrize("x, y, z", [(1, 2, 3), (2, 3, 5)])
def test_bar_parametrized(x, y, z):
expect(x + y).to_equal(z)
return {"z": x + y}
@test(test_suite_name="tests.evaluation.test_evaluation::test_foo_async_parametrized")
@pytest.mark.parametrize("x, y", [(1, 2), (2, 3)])
async def test_foo_async_parametrized(x, y):
await asyncio.sleep(0.1)
expect(x + y).to_be_greater_than(0)
return x + y
@test(output_keys=["z"])
@pytest.mark.parametrize("x, y, z", [(1, 2, 3), (2, 3, 5)])
async def test_bar_async_parametrized(x, y, z):
await asyncio.sleep(0.1)
expect(x + y).to_equal(z)
return {"z": x + y}
@test
def test_pytest_skip():
pytest.skip("Skip this test")
@test
async def test_async_pytest_skip():
pytest.skip("Skip this test")
async def test_aevaluate_good_error():
client = Client()
ds_name = "__Empty Dataset Do Not Modify"
if not client.has_dataset(dataset_name=ds_name):
client.create_dataset(dataset_name=ds_name)
async def predict(inputs: dict):
return {}
match_val = "No examples found in the dataset."
with pytest.raises(ValueError, match=match_val):
await aevaluate(
predict,
data=ds_name,
)
with pytest.raises(ValueError, match="Must specify 'data'"):
await aevaluate(
predict,
data=[],
)
with pytest.raises(ValueError, match=match_val):
await aevaluate(
predict,
data=(_ for _ in range(0)),
)
|
0 | lc_public_repos/langsmith-sdk/python | lc_public_repos/langsmith-sdk/python/bench/tracing_client_via_pyo3.py | import os
import time
from datetime import datetime, timedelta, timezone
from typing import Any, Dict
from uuid import uuid4
from tracing_client_bench import create_run_data
from langsmith.client import Client
def amend_run_data_in_place(
run: Dict[str, Any],
run_id: str,
start_time: str,
end_time: str,
dotted_order: str,
):
run["id"] = run_id
run["trace_id"] = run_id
run["dotted_order"] = dotted_order
run["start_time"] = start_time
run["end_time"] = end_time
def benchmark_run_creation(json_size, num_runs) -> None:
"""Benchmark the creation of runs."""
if os.environ.get("LANGSMITH_USE_PYO3_CLIENT") is None:
print(
"LANGSMITH_USE_PYO3_CLIENT is not set, so this run will not use PyO3.\n"
" It will use only the pure Python code paths."
)
api_key = os.environ["LANGSMITH_API_KEY"]
if not api_key:
raise Exception("No API key configured")
client = Client(
api_url="https://beta.api.smith.langchain.com",
api_key=api_key,
)
project_name = "__tracing_client_bench_pyo3_" + datetime.now().strftime(
"%Y%m%dT%H%M%S"
)
bench_start_time = datetime.now(timezone.utc)
runs = [
create_run_data(
str(uuid4()), json_size, bench_start_time + timedelta(milliseconds=i * 2)
)
for i in range(num_runs)
]
start = time.perf_counter()
for run in runs:
client.create_run(**run, project_name=project_name)
end = time.perf_counter()
# data = []
# run = create_run_data(str(uuid4()), json_size, bench_start_time)
# for i in range(num_runs):
# run_id = str(uuid4())
# start_time = bench_start_time + timedelta(milliseconds=i * 2)
# end_time = start_time + timedelta(milliseconds=1)
# dotted_order = f"{start_time.strftime('%Y%m%dT%H%M%S%fZ')}{run_id}"
# data.append((run_id, start_time, end_time, dotted_order))
#
# start = time.perf_counter()
# for data_tuple in data:
# amend_run_data_in_place(run, *data_tuple)
# client.create_run(**run, project_name=project_name)
# end = time.perf_counter()
if client._pyo3_client:
# Wait for the queue to drain.
client._pyo3_client.drain()
else:
client.tracing_queue.join()
total = time.perf_counter() - start
just_create_run = end - start
queue_drain_time = total - just_create_run
throughput = num_runs / just_create_run
throughput_including_drain = num_runs / total
print(f"Made {num_runs} create_run() calls in {just_create_run:.2f}s")
print(f"Spent {queue_drain_time:.2f} waiting for the queue to drain")
print(f"Total time: {num_runs} runs in {total:.2f}s")
print(f"Throughput: {throughput:.2f} req/s")
print(f"Throughput (incl. drain): {throughput_including_drain:.2f} req/s")
def main():
"""
Run benchmarks with different combinations of parameters and report results.
"""
json_size = 3_000
num_runs = 1000
benchmark_run_creation(json_size, num_runs)
if __name__ == "__main__":
main()
|
0 | lc_public_repos/langsmith-sdk/python | lc_public_repos/langsmith-sdk/python/bench/create_run.py | import logging
import statistics
import time
from queue import PriorityQueue
from typing import Dict
from unittest.mock import Mock
from uuid import uuid4
from langsmith._internal._background_thread import (
_tracing_thread_drain_queue,
_tracing_thread_handle_batch,
)
from langsmith.client import Client
def create_large_json(length: int) -> Dict:
"""Create a large JSON object for benchmarking purposes."""
large_array = [
{
"index": i,
"data": f"This is element number {i}",
"nested": {"id": i, "value": f"Nested value for element {i}"},
}
for i in range(length)
]
return {
"name": "Huge JSON",
"description": "This is a very large JSON object for benchmarking purposes.",
"array": large_array,
"metadata": {
"created_at": "2024-10-22T19:00:00Z",
"author": "Python Program",
"version": 1.0,
},
}
def create_run_data(run_id: str, json_size: int) -> Dict:
"""Create a single run data object."""
return {
"name": "Run Name",
"id": run_id,
"run_type": "chain",
"inputs": create_large_json(json_size),
"outputs": create_large_json(json_size),
"extra": {"extra_data": "value"},
"trace_id": "trace_id",
"dotted_order": "1.1",
"tags": ["tag1", "tag2"],
"session_name": "Session Name",
}
def mock_session() -> Mock:
"""Create a mock session object."""
mock_session = Mock()
mock_response = Mock()
mock_response.status_code = 202
mock_response.text = "Accepted"
mock_response.json.return_value = {"status": "success"}
mock_session.request.return_value = mock_response
return mock_session
def create_dummy_data(json_size, num_runs) -> list:
return [create_run_data(str(uuid4()), json_size) for i in range(num_runs)]
def create_runs(runs: list, client: Client) -> None:
for run in runs:
client.create_run(**run)
def process_queue(client: Client) -> None:
if client.tracing_queue is None:
raise ValueError("Tracing queue is None")
while next_batch := _tracing_thread_drain_queue(
client.tracing_queue, limit=100, block=False
):
_tracing_thread_handle_batch(
client, client.tracing_queue, next_batch, use_multipart=True
)
def benchmark_run_creation(
*, num_runs: int, json_size: int, samples: int, benchmark_thread: bool
) -> Dict:
"""
Benchmark run creation with specified parameters.
Returns timing statistics.
"""
timings = []
if benchmark_thread:
client = Client(session=mock_session(), api_key="xxx", auto_batch_tracing=False)
client.tracing_queue = PriorityQueue()
else:
client = Client(session=mock_session(), api_key="xxx")
if client.tracing_queue is None:
raise ValueError("Tracing queue is None")
for _ in range(samples):
runs = create_dummy_data(json_size, num_runs)
start = time.perf_counter()
create_runs(runs, client)
# wait for client.tracing_queue to be empty
if benchmark_thread:
# reset the timer
start = time.perf_counter()
process_queue(client)
else:
client.tracing_queue.join()
elapsed = time.perf_counter() - start
del runs
timings.append(elapsed)
return {
"mean": statistics.mean(timings),
"median": statistics.median(timings),
"stdev": statistics.stdev(timings) if len(timings) > 1 else 0,
"min": min(timings),
"max": max(timings),
}
def test_benchmark_runs(
*, json_size: int, num_runs: int, samples: int, benchmark_thread: bool
):
"""
Run benchmarks with different combinations of parameters and report results.
"""
results = benchmark_run_creation(
num_runs=num_runs,
json_size=json_size,
samples=samples,
benchmark_thread=benchmark_thread,
)
print(f"\nBenchmark Results for {num_runs} runs with JSON size {json_size}:")
print(f"Mean time: {results['mean']:.4f} seconds")
print(f"Median time: {results['median']:.4f} seconds")
print(f"Std Dev: {results['stdev']:.4f} seconds")
print(f"Min time: {results['min']:.4f} seconds")
print(f"Max time: {results['max']:.4f} seconds")
print(f"Throughput: {num_runs / results['mean']:.2f} runs/second")
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
test_benchmark_runs(json_size=5000, num_runs=1000, samples=1, benchmark_thread=True)
|
0 | lc_public_repos/langsmith-sdk/python | lc_public_repos/langsmith-sdk/python/bench/tracing_client_bench.py | import statistics
import time
from datetime import datetime, timedelta, timezone
from typing import Dict, Optional
from unittest.mock import Mock
from uuid import uuid4
from langsmith.client import Client
def create_large_json(length: int) -> Dict:
"""Create a large JSON object for benchmarking purposes."""
large_array = [
{
"index": i,
"data": f"This is element number {i}",
"nested": {"id": i, "value": f"Nested value for element {i}"},
}
for i in range(length)
]
return {
"name": "Huge JSON",
"description": "This is a very large JSON object for benchmarking purposes.",
"array": large_array,
"metadata": {
"created_at": "2024-10-22T19:00:00Z",
"author": "Python Program",
"version": 1.0,
},
}
def create_run_data(
run_id: str, json_size: int, start_time: Optional[datetime] = None
) -> Dict:
"""Create a single run data object."""
if start_time is None:
start_time = datetime.now(timezone.utc)
end_time = start_time + timedelta(milliseconds=1)
dotted_order = f"{start_time.strftime('%Y%m%dT%H%M%S%fZ')}{run_id}"
return {
"name": "Run Name",
"id": run_id,
"run_type": "chain",
"inputs": create_large_json(json_size),
"outputs": create_large_json(json_size),
"extra": {"extra_data": "value"},
"trace_id": run_id,
"dotted_order": dotted_order,
"tags": ["tag1", "tag2"],
"session_name": "Session Name",
"start_time": start_time.isoformat(),
"end_time": end_time.isoformat(),
}
def benchmark_run_creation(num_runs: int, json_size: int, samples: int = 1) -> Dict:
"""
Benchmark run creation with specified parameters.
Returns timing statistics.
"""
timings = []
project_name = "__tracing_client_bench_python" + datetime.now().strftime(
"%Y%m%dT%H%M%S"
)
for _ in range(samples):
runs = [create_run_data(str(uuid4()), json_size) for i in range(num_runs)]
mock_session = Mock()
mock_response = Mock()
mock_response.status_code = 202
mock_response.text = "Accepted"
mock_response.json.return_value = {"status": "success"}
mock_session.request.return_value = mock_response
client = Client(session=mock_session, api_key="xxx")
start = time.perf_counter()
for run in runs:
client.create_run(**run, project_name=project_name)
# wait for client.tracing_queue to be empty
client.tracing_queue.join()
elapsed = time.perf_counter() - start
timings.append(elapsed)
return {
"mean": statistics.mean(timings),
"median": statistics.median(timings),
"stdev": statistics.stdev(timings) if len(timings) > 1 else 0,
"min": min(timings),
"max": max(timings),
}
json_size = 3_000
num_runs = 1000
def main(json_size: int, num_runs: int):
"""
Run benchmarks with different combinations of parameters and report results.
"""
results = benchmark_run_creation(num_runs=num_runs, json_size=json_size)
print(f"\nBenchmark Results for {num_runs} runs with JSON size {json_size}:")
print(f"Mean time: {results['mean']:.4f} seconds")
print(f"Median time: {results['median']:.4f} seconds")
print(f"Std Dev: {results['stdev']:.4f} seconds")
print(f"Min time: {results['min']:.4f} seconds")
print(f"Max time: {results['max']:.4f} seconds")
print(f"Throughput: {num_runs / results['mean']:.2f} runs/second")
if __name__ == "__main__":
main(json_size, num_runs)
|
0 | lc_public_repos/langsmith-sdk/python | lc_public_repos/langsmith-sdk/python/bench/dumps_json.py | import uuid
from dataclasses import dataclass, field
from datetime import datetime
from decimal import Decimal
from typing import Any, Callable, Dict, Optional
import numpy as np
from pydantic import BaseModel, Field
from pydantic.v1 import BaseModel as BaseModelV1
from pydantic.v1 import Field as FieldV1
def _default():
return {
"some_val": "😈",
"uuid_val": uuid.uuid4(),
"datetime_val": datetime.now(),
"list_val": [238928376271863487] * 5,
"decimal_val": Decimal("3.14"),
"set_val": {1, 2, 3},
"tuple_val": (4, 5, 6),
"bytes_val": b"hello world",
"arr": np.random.random(10),
}
@dataclass
class DeeplyNested:
"""An object."""
vals: Dict[str, Any] = field(default_factory=_default)
class DeeplyNestedModel(BaseModel):
vals: Dict[str, Any] = Field(default_factory=_default)
class DeeplyNestedModelV1(BaseModelV1):
vals: Dict[str, Any] = FieldV1(default_factory=_default)
def create_nested_instance(
depth: int = 5,
width: int = 5,
branch_constructor: Optional[Callable] = DeeplyNested,
leaf_constructor: Optional[Callable] = None,
) -> DeeplyNested:
top_level = DeeplyNested()
current_level = top_level
root_constructor = leaf_constructor or DeeplyNested
for i in range(depth):
for j in range(width):
key = f"key_{i}_{j}"
if i < depth - 1:
value = branch_constructor()
current_level.vals[key] = value
if j == 0:
next_level = value
else:
current_level.vals[key] = root_constructor()
if i < depth - 1:
current_level = next_level
return top_level
if __name__ == "__main__":
import time
from langsmith.client import _dumps_json
class MyClass:
def __init__(self):
self.vals = {}
def run():
res = create_nested_instance(200, 150, leaf_constructor=MyClass)
start_time = time.time()
res = _dumps_json({"input": res})
end_time = time.time()
print(f"Size: {len(res) / 1024:.2f} KB")
print(f"Time taken: {end_time - start_time:.2f} seconds")
run()
|
0 | lc_public_repos/langsmith-sdk/python | lc_public_repos/langsmith-sdk/python/bench/tracing_rust_client_bench.py | import datetime
import statistics
import time
from typing import Dict
from uuid import uuid4
from langsmith_pyo3 import BlockingTracingClient
from tracing_client_bench import create_run_data
def benchmark_run_creation(num_runs: int, json_size: int, samples: int = 1) -> Dict:
"""
Benchmark run creation with specified parameters.
Returns timing statistics.
"""
timings = []
bench_start_time = datetime.datetime.now(datetime.timezone.utc)
for _ in range(samples):
print("creating data")
runs = [
create_run_data(
str(uuid4()),
json_size,
bench_start_time + datetime.timedelta(milliseconds=i * 2),
)
for i in range(num_runs)
]
endpoint = "http://localhost:1234/FILL_ME_IN"
queue_capacity = 1_000_000
batch_size = 100
batch_timeout_millis = 1000
worker_threads = 1
print("initializing client")
client = BlockingTracingClient(
endpoint,
"mock-api-key",
queue_capacity,
batch_size,
batch_timeout_millis,
worker_threads,
)
print("beginning runs")
start = time.perf_counter()
for run in runs:
client.create_run(run)
# wait for client queues to be empty
client.drain()
elapsed = time.perf_counter() - start
print(f"runs complete: {elapsed:.3f}s")
timings.append(elapsed)
return {
"mean": statistics.mean(timings),
"median": statistics.median(timings),
"stdev": statistics.stdev(timings) if len(timings) > 1 else 0,
"min": min(timings),
"max": max(timings),
}
json_size = 3_000
num_runs = 1000
def main(json_size: int, num_runs: int):
"""
Run benchmarks with different combinations of parameters and report results.
"""
results = benchmark_run_creation(num_runs=num_runs, json_size=json_size)
print(f"\nBenchmark Results for {num_runs} runs with JSON size {json_size}:")
print(f"Mean time: {results['mean']:.4f} seconds")
print(f"Median time: {results['median']:.4f} seconds")
print(f"Std Dev: {results['stdev']:.4f} seconds")
print(f"Min time: {results['min']:.4f} seconds")
print(f"Max time: {results['max']:.4f} seconds")
print(f"Throughput: {num_runs / results['mean']:.2f} runs/second")
if __name__ == "__main__":
main(json_size, num_runs)
|
0 | lc_public_repos/langsmith-sdk/python | lc_public_repos/langsmith-sdk/python/bench/json_serialization.py | import statistics
import time
import zlib
from concurrent.futures import ThreadPoolExecutor
import orjson
def create_json_with_large_array(length):
"""Create a large JSON object for benchmarking purposes."""
large_array = [
{
"index": i,
"data": f"This is element number {i}",
"nested": {"id": i, "value": f"Nested value for element {i}"},
}
for i in range(length)
]
return {
"name": "Huge JSON",
"description": "This is a very large JSON object for benchmarking purposes.",
"array": large_array,
"metadata": {
"created_at": "2024-10-22T19:00:00Z",
"author": "Python Program",
"version": 1.0,
},
}
def create_json_with_large_strings(length: int) -> dict:
large_string = "a" * length # Create a large string of repeated 'a' characters
return {
"name": "Huge JSON",
"description": "This is a very large JSON object for benchmarking purposes.",
"key1": large_string,
"key2": large_string,
"key3": large_string,
"metadata": {
"created_at": "2024-10-22T19:00:00Z",
"author": "Python Program",
"version": 1.0,
},
}
def serialize_sequential(data):
"""Serialize data sequentially."""
return [orjson.dumps(json_obj) for json_obj in data]
def serialize_parallel(data):
"""Serialize data in parallel using ThreadPoolExecutor."""
with ThreadPoolExecutor() as executor:
return list(executor.map(orjson.dumps, data))
def serialize_sequential_gz(data):
"""Serialize data sequentially and compress using zlib.
With adjustable compression level."""
compressed_data = []
for json_obj in data:
serialized = orjson.dumps(json_obj)
compressed = zlib.compress(serialized, level=1)
compressed_data.append(compressed)
return compressed_data
def serialize_parallel_gz(data):
"""Serialize data in parallel with zlib.
Using ThreadPoolExecutor and zlib with adjustable compression level."""
def compress_item(json_obj):
serialized = orjson.dumps(json_obj)
return zlib.compress(serialized, level=1)
with ThreadPoolExecutor() as executor:
compressed_data = list(executor.map(compress_item, data))
return compressed_data
def gzip_parallel(serialized_data):
"""Compress serialized data in parallel using ThreadPoolExecutor and zlib."""
with ThreadPoolExecutor() as executor:
return list(executor.map(zlib.compress, serialized_data))
def gzip_sequential(serialized_data):
"""Compress serialized data sequentially using zlib."""
return [zlib.compress(serialized) for serialized in serialized_data]
def benchmark_serialization(data, func, samples=10):
"""Benchmark a serialization function with multiple samples."""
timings = []
for _ in range(samples):
start = time.perf_counter()
func(data)
elapsed = time.perf_counter() - start
timings.append(elapsed)
return {
"mean": statistics.mean(timings),
"median": statistics.median(timings),
"stdev": statistics.stdev(timings) if len(timings) > 1 else 0,
"min": min(timings),
"max": max(timings),
}
def main():
num_json_objects = 2000
json_length = 5000
data = [create_json_with_large_array(json_length) for _ in range(num_json_objects)]
serialized_data = serialize_sequential(data)
for func in [
serialize_sequential,
serialize_parallel,
serialize_sequential_gz,
serialize_parallel_gz,
gzip_sequential,
gzip_parallel,
]:
# data = [
# create_json_with_large_strings(json_length)
# for _ in range(num_json_objects)
# ]
print(
f"\nBenchmarking {func.__name__} with {num_json_objects} JSON objects "
f"of length {json_length}..."
)
results_seq = (
benchmark_serialization(data, func)
if not func.__name__.startswith("gzip")
else benchmark_serialization(serialized_data, func)
)
print(f"Mean time: {results_seq['mean']:.4f} seconds")
print(f"Median time: {results_seq['median']:.4f} seconds")
print(f"Std Dev: {results_seq['stdev']:.4f} seconds")
print(f"Min time: {results_seq['min']:.4f} seconds")
print(f"Max time: {results_seq['max']:.4f} seconds")
if __name__ == "__main__":
main()
|
0 | lc_public_repos/langsmith-sdk/python | lc_public_repos/langsmith-sdk/python/bench/__main__.py | from pyperf._runner import Runner
from bench.create_run_tree import create_run_trees
from bench.dumps_json import (
DeeplyNestedModel,
DeeplyNestedModelV1,
create_nested_instance,
)
from langsmith.client import _dumps_json
class MyClass:
def __init__(self):
self.vals = {}
benchmarks = (
(
"create_5_000_run_trees",
create_run_trees,
5_000,
),
(
"create_10_000_run_trees",
create_run_trees,
10_000,
),
(
"create_20_000_run_trees",
create_run_trees,
10_000,
),
(
"dumps_class_nested_py_branch_and_leaf_200x400",
lambda x: _dumps_json({"input": x}),
create_nested_instance(
200, 400, branch_constructor=MyClass, leaf_constructor=MyClass
),
),
(
"dumps_class_nested_py_leaf_50x100",
lambda x: _dumps_json({"input": x}),
create_nested_instance(50, 100, leaf_constructor=MyClass),
),
(
"dumps_class_nested_py_leaf_100x200",
lambda x: _dumps_json({"input": x}),
create_nested_instance(100, 200, leaf_constructor=MyClass),
),
(
"dumps_dataclass_nested_50x100",
lambda x: _dumps_json({"input": x}),
create_nested_instance(50, 100),
),
(
"dumps_pydantic_nested_50x100",
lambda x: _dumps_json({"input": x}),
create_nested_instance(50, 100, branch_constructor=DeeplyNestedModel),
),
(
"dumps_pydanticv1_nested_50x100",
lambda x: _dumps_json({"input": x}),
create_nested_instance(50, 100, branch_constructor=DeeplyNestedModelV1),
),
)
r = Runner()
for name, fn, input_ in benchmarks:
r.bench_func(name, fn, input_)
|
0 | lc_public_repos/langsmith-sdk/python | lc_public_repos/langsmith-sdk/python/bench/create_run_tree.py | import os
from unittest.mock import patch
from langsmith import RunTree
os.environ["LANGSMITH_API_KEY"] = "fake"
def create_run_trees(N: int):
with patch("langsmith.client.requests.Session", autospec=True):
for i in range(N):
RunTree(name=str(i)).post()
|
0 | lc_public_repos | lc_public_repos/langchain-academy/requirements.txt | langgraph
langgraph-sdk
langgraph-checkpoint-sqlite
langsmith
langchain-community
langchain-core
langchain-openai
notebook
tavily-python
wikipedia
trustcall
langgraph-cli |
0 | lc_public_repos | lc_public_repos/langchain-academy/README.md | # LangChain Academy
## Introduction
Welcome to LangChain Academy!
This is a growing set of modules focused on foundational concepts within the LangChain ecosystem.
Module 0 is basic setup and Modules 1 - 4 focus on LangGraph, progressively adding more advanced themes.
In each module folder, you'll see a set of notebooks. A LangChain Academy accompanies each notebook
to guide you through the topic. Each module also has a `studio` subdirectory, with a set of relevant
graphs that we will explore using the LangGraph API and Studio.
## Setup
### Python version
To get the most out of this course, please ensure you're using Python 3.11 or later.
This version is required for optimal compatibility with LangGraph. If you're on an older version,
upgrading will ensure everything runs smoothly.
```
python3 --version
```
### Clone repo
```
git clone https://github.com/langchain-ai/langchain-academy.git
$ cd langchain-academy
```
### Create an environment and install dependencies
#### Mac/Linux/WSL
```
$ python3 -m venv lc-academy-env
$ source lc-academy-env/bin/activate
$ pip install -r requirements.txt
```
#### Windows Powershell
```
PS> python3 -m venv lc-academy-env
PS> Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope Process
PS> lc-academy-env\scripts\activate
PS> pip install -r requirements.txt
```
### Running notebooks
If you don't have Jupyter set up, follow installation instructions [here](https://jupyter.org/install).
```
$ jupyter notebook
```
### Setting up env variables
Briefly going over how to set up environment variables. You can also
use a `.env` file with `python-dotenv` library.
#### Mac/Linux/WSL
```
$ export API_ENV_VAR="your-api-key-here"
```
#### Windows Powershell
```
PS> $env:API_ENV_VAR = "your-api-key-here"
```
### Set OpenAI API key
* If you don't have an OpenAI API key, you can sign up [here](https://openai.com/index/openai-api/).
* Set `OPENAI_API_KEY` in your environment
### Sign up and Set LangSmith API
* Sign up for LangSmith [here](https://smith.langchain.com/), find out more about LangSmith
* and how to use it within your workflow [here](https://www.langchain.com/langsmith), and relevant library [docs](https://docs.smith.langchain.com/)!
* Set `LANGCHAIN_API_KEY`, `LANGCHAIN_TRACING_V2=true` in your environment
### Set up Tavily API for web search
* Tavily Search API is a search engine optimized for LLMs and RAG, aimed at efficient,
quick, and persistent search results.
* You can sign up for an API key [here](https://tavily.com/).
It's easy to sign up and offers a very generous free tier. Some lessons (in Module 4) will use Tavily.
* Set `TAVILY_API_KEY` in your environment.
### Set up LangGraph Studio
* Currently, Studio only has macOS support and needs Docker Desktop running.
* Download the latest `.dmg` file [here](https://github.com/langchain-ai/langgraph-studio?tab=readme-ov-file#download)
* Install Docker desktop for Mac [here](https://docs.docker.com/engine/install/)
### Running Studio
Graphs for LangGraph Studio are in the `module-x/studio/` folders.
* To use Studio, you will need to create a .env file with the relevant API keys
* Run this from the command line to create these files for module 1 to 4, as an example:
```
$ for i in {1..4}; do
cp module-$i/studio/.env.example module-$i/studio/.env
echo "OPENAI_API_KEY=\"$OPENAI_API_KEY\"" > module-$i/studio/.env
done
$ echo "TAVILY_API_KEY=\"$TAVILY_API_KEY\"" >> module-4/studio/.env
```
|
0 | lc_public_repos/langchain-academy | lc_public_repos/langchain-academy/module-5/memoryschema_profile.ipynb | import os, getpass
def _set_env(var: str):
# Check if the variable is set in the OS environment
env_value = os.environ.get(var)
if not env_value:
# If not set, prompt the user for input
env_value = getpass.getpass(f"{var}: ")
# Set the environment variable for the current process
os.environ[var] = env_value
_set_env("LANGCHAIN_API_KEY")
os.environ["LANGCHAIN_TRACING_V2"] = "true"
os.environ["LANGCHAIN_PROJECT"] = "langchain-academy"from typing import TypedDict, List
class UserProfile(TypedDict):
"""User profile schema with typed fields"""
user_name: str # The user's preferred name
interests: List[str] # A list of the user's interests# TypedDict instance
user_profile: UserProfile = {
"user_name": "Lance",
"interests": ["biking", "technology", "coffee"]
}
user_profileimport uuid
from langgraph.store.memory import InMemoryStore
# Initialize the in-memory store
in_memory_store = InMemoryStore()
# Namespace for the memory to save
user_id = "1"
namespace_for_memory = (user_id, "memory")
# Save a memory to namespace as key and value
key = "user_profile"
value = user_profile
in_memory_store.put(namespace_for_memory, key, value)# Search
for m in in_memory_store.search(namespace_for_memory):
print(m.dict())# Get the memory by namespace and key
profile = in_memory_store.get(namespace_for_memory, "user_profile")
profile.value_set_env("OPENAI_API_KEY")from pydantic import BaseModel, Field
from langchain_core.messages import HumanMessage
from langchain_openai import ChatOpenAI
# Initialize the model
model = ChatOpenAI(model="gpt-4o", temperature=0)
# Bind schema to model
model_with_structure = model.with_structured_output(UserProfile)
# Invoke the model to produce structured output that matches the schema
structured_output = model_with_structure.invoke([HumanMessage("My name is Lance, I like to bike.")])
structured_outputfrom IPython.display import Image, display
from langgraph.checkpoint.memory import MemorySaver
from langgraph.graph import StateGraph, MessagesState, START, END
from langgraph.store.base import BaseStore
from langchain_core.messages import HumanMessage, SystemMessage, AIMessage
from langchain_core.runnables.config import RunnableConfig
# Chatbot instruction
MODEL_SYSTEM_MESSAGE = """You are a helpful assistant with memory that provides information about the user.
If you have memory for this user, use it to personalize your responses.
Here is the memory (it may be empty): {memory}"""
# Create new memory from the chat history and any existing memory
CREATE_MEMORY_INSTRUCTION = """Create or update a user profile memory based on the user's chat history.
This will be saved for long-term memory. If there is an existing memory, simply update it.
Here is the existing memory (it may be empty): {memory}"""
def call_model(state: MessagesState, config: RunnableConfig, store: BaseStore):
"""Load memory from the store and use it to personalize the chatbot's response."""
# Get the user ID from the config
user_id = config["configurable"]["user_id"]
# Retrieve memory from the store
namespace = ("memory", user_id)
existing_memory = store.get(namespace, "user_memory")
# Format the memories for the system prompt
if existing_memory and existing_memory.value:
memory_dict = existing_memory.value
formatted_memory = (
f"Name: {memory_dict.get('user_name', 'Unknown')}\n"
f"Interests: {', '.join(memory_dict.get('interests', []))}"
)
else:
formatted_memory = None
# Format the memory in the system prompt
system_msg = MODEL_SYSTEM_MESSAGE.format(memory=formatted_memory)
# Respond using memory as well as the chat history
response = model.invoke([SystemMessage(content=system_msg)]+state["messages"])
return {"messages": response}
def write_memory(state: MessagesState, config: RunnableConfig, store: BaseStore):
"""Reflect on the chat history and save a memory to the store."""
# Get the user ID from the config
user_id = config["configurable"]["user_id"]
# Retrieve existing memory from the store
namespace = ("memory", user_id)
existing_memory = store.get(namespace, "user_memory")
# Format the memories for the system prompt
if existing_memory and existing_memory.value:
memory_dict = existing_memory.value
formatted_memory = (
f"Name: {memory_dict.get('user_name', 'Unknown')}\n"
f"Interests: {', '.join(memory_dict.get('interests', []))}"
)
else:
formatted_memory = None
# Format the existing memory in the instruction
system_msg = CREATE_MEMORY_INSTRUCTION.format(memory=formatted_memory)
# Invoke the model to produce structured output that matches the schema
new_memory = model_with_structure.invoke([SystemMessage(content=system_msg)]+state['messages'])
# Overwrite the existing use profile memory
key = "user_memory"
store.put(namespace, key, new_memory)
# Define the graph
builder = StateGraph(MessagesState)
builder.add_node("call_model", call_model)
builder.add_node("write_memory", write_memory)
builder.add_edge(START, "call_model")
builder.add_edge("call_model", "write_memory")
builder.add_edge("write_memory", END)
# Store for long-term (across-thread) memory
across_thread_memory = InMemoryStore()
# Checkpointer for short-term (within-thread) memory
within_thread_memory = MemorySaver()
# Compile the graph with the checkpointer fir and store
graph = builder.compile(checkpointer=within_thread_memory, store=across_thread_memory)
# View
display(Image(graph.get_graph(xray=1).draw_mermaid_png()))# We supply a thread ID for short-term (within-thread) memory
# We supply a user ID for long-term (across-thread) memory
config = {"configurable": {"thread_id": "1", "user_id": "1"}}
# User input
input_messages = [HumanMessage(content="Hi, my name is Lance and I like to bike around San Francisco and eat at bakeries.")]
# Run the graph
for chunk in graph.stream({"messages": input_messages}, config, stream_mode="values"):
chunk["messages"][-1].pretty_print()# Namespace for the memory to save
user_id = "1"
namespace = ("memory", user_id)
existing_memory = across_thread_memory.get(namespace, "user_memory")
existing_memory.valuefrom typing import List, Optional
class OutputFormat(BaseModel):
preference: str
sentence_preference_revealed: str
class TelegramPreferences(BaseModel):
preferred_encoding: Optional[List[OutputFormat]] = None
favorite_telegram_operators: Optional[List[OutputFormat]] = None
preferred_telegram_paper: Optional[List[OutputFormat]] = None
class MorseCode(BaseModel):
preferred_key_type: Optional[List[OutputFormat]] = None
favorite_morse_abbreviations: Optional[List[OutputFormat]] = None
class Semaphore(BaseModel):
preferred_flag_color: Optional[List[OutputFormat]] = None
semaphore_skill_level: Optional[List[OutputFormat]] = None
class TrustFallPreferences(BaseModel):
preferred_fall_height: Optional[List[OutputFormat]] = None
trust_level: Optional[List[OutputFormat]] = None
preferred_catching_technique: Optional[List[OutputFormat]] = None
class CommunicationPreferences(BaseModel):
telegram: TelegramPreferences
morse_code: MorseCode
semaphore: Semaphore
class UserPreferences(BaseModel):
communication_preferences: CommunicationPreferences
trust_fall_preferences: TrustFallPreferences
class TelegramAndTrustFallPreferences(BaseModel):
pertinent_user_preferences: UserPreferencesfrom pydantic import ValidationError
# Bind schema to model
model_with_structure = model.with_structured_output(TelegramAndTrustFallPreferences)
# Conversation
conversation = """Operator: How may I assist with your telegram, sir?
Customer: I need to send a message about our trust fall exercise.
Operator: Certainly. Morse code or standard encoding?
Customer: Morse, please. I love using a straight key.
Operator: Excellent. What's your message?
Customer: Tell him I'm ready for a higher fall, and I prefer the diamond formation for catching.
Operator: Done. Shall I use our "Daredevil" paper for this daring message?
Customer: Perfect! Send it by your fastest carrier pigeon.
Operator: It'll be there within the hour, sir."""
# Invoke the model
try:
model_with_structure.invoke(f"""Extract the preferences from the following conversation:
<convo>
{conversation}
</convo>""")
except ValidationError as e:
print(e)# Conversation
conversation = [HumanMessage(content="Hi, I'm Lance."),
AIMessage(content="Nice to meet you, Lance."),
HumanMessage(content="I really like biking around San Francisco.")]from trustcall import create_extractor
# Schema
class UserProfile(BaseModel):
"""User profile schema with typed fields"""
user_name: str = Field(description="The user's preferred name")
interests: List[str] = Field(description="A list of the user's interests")
# Initialize the model
model = ChatOpenAI(model="gpt-4o", temperature=0)
# Create the extractor
trustcall_extractor = create_extractor(
model,
tools=[UserProfile],
tool_choice="UserProfile"
)
# Instruction
system_msg = "Extract the user profile from the following conversation"
# Invoke the extractor
result = trustcall_extractor.invoke({"messages": [SystemMessage(content=system_msg)]+conversation})for m in result["messages"]:
m.pretty_print()schema = result["responses"]
schemaschema[0].model_dump()result["response_metadata"]# Update the conversation
updated_conversation = [HumanMessage(content="Hi, I'm Lance."),
AIMessage(content="Nice to meet you, Lance."),
HumanMessage(content="I really like biking around San Francisco."),
AIMessage(content="San Francisco is a great city! Where do you go after biking?"),
HumanMessage(content="I really like to go to a bakery after biking."),]
# Update the instruction
system_msg = f"""Update the memory (JSON doc) to incorporate new information from the following conversation"""
# Invoke the extractor with the updated instruction and existing profile with the corresponding tool name (UserProfile)
result = trustcall_extractor.invoke({"messages": [SystemMessage(content=system_msg)]+updated_conversation},
{"existing": {"UserProfile": schema[0].model_dump()}}) for m in result["messages"]:
m.pretty_print()result["response_metadata"]updated_schema = result["responses"][0]
updated_schema.model_dump()bound = create_extractor(
model,
tools=[TelegramAndTrustFallPreferences],
tool_choice="TelegramAndTrustFallPreferences",
)
# Conversation
conversation = """Operator: How may I assist with your telegram, sir?
Customer: I need to send a message about our trust fall exercise.
Operator: Certainly. Morse code or standard encoding?
Customer: Morse, please. I love using a straight key.
Operator: Excellent. What's your message?
Customer: Tell him I'm ready for a higher fall, and I prefer the diamond formation for catching.
Operator: Done. Shall I use our "Daredevil" paper for this daring message?
Customer: Perfect! Send it by your fastest carrier pigeon.
Operator: It'll be there within the hour, sir."""
result = bound.invoke(
f"""Extract the preferences from the following conversation:
<convo>
{conversation}
</convo>"""
)
# Extract the preferences
result["responses"][0]from IPython.display import Image, display
from langchain_core.messages import HumanMessage, SystemMessage
from langgraph.graph import StateGraph, MessagesState, START, END
from langchain_core.runnables.config import RunnableConfig
from langgraph.checkpoint.memory import MemorySaver
from langgraph.store.base import BaseStore
# Initialize the model
model = ChatOpenAI(model="gpt-4o", temperature=0)
# Schema
class UserProfile(BaseModel):
""" Profile of a user """
user_name: str = Field(description="The user's preferred name")
user_location: str = Field(description="The user's location")
interests: list = Field(description="A list of the user's interests")
# Create the extractor
trustcall_extractor = create_extractor(
model,
tools=[UserProfile],
tool_choice="UserProfile", # Enforces use of the UserProfile tool
)
# Chatbot instruction
MODEL_SYSTEM_MESSAGE = """You are a helpful assistant with memory that provides information about the user.
If you have memory for this user, use it to personalize your responses.
Here is the memory (it may be empty): {memory}"""
# Extraction instruction
TRUSTCALL_INSTRUCTION = """Create or update the memory (JSON doc) to incorporate information from the following conversation:"""
def call_model(state: MessagesState, config: RunnableConfig, store: BaseStore):
"""Load memory from the store and use it to personalize the chatbot's response."""
# Get the user ID from the config
user_id = config["configurable"]["user_id"]
# Retrieve memory from the store
namespace = ("memory", user_id)
existing_memory = store.get(namespace, "user_memory")
# Format the memories for the system prompt
if existing_memory and existing_memory.value:
memory_dict = existing_memory.value
formatted_memory = (
f"Name: {memory_dict.get('user_name', 'Unknown')}\n"
f"Location: {memory_dict.get('user_location', 'Unknown')}\n"
f"Interests: {', '.join(memory_dict.get('interests', []))}"
)
else:
formatted_memory = None
# Format the memory in the system prompt
system_msg = MODEL_SYSTEM_MESSAGE.format(memory=formatted_memory)
# Respond using memory as well as the chat history
response = model.invoke([SystemMessage(content=system_msg)]+state["messages"])
return {"messages": response}
def write_memory(state: MessagesState, config: RunnableConfig, store: BaseStore):
"""Reflect on the chat history and save a memory to the store."""
# Get the user ID from the config
user_id = config["configurable"]["user_id"]
# Retrieve existing memory from the store
namespace = ("memory", user_id)
existing_memory = store.get(namespace, "user_memory")
# Get the profile as the value from the list, and convert it to a JSON doc
existing_profile = {"UserProfile": existing_memory.value} if existing_memory else None
# Invoke the extractor
result = trustcall_extractor.invoke({"messages": [SystemMessage(content=TRUSTCALL_INSTRUCTION)]+state["messages"], "existing": existing_profile})
# Get the updated profile as a JSON object
updated_profile = result["responses"][0].model_dump()
# Save the updated profile
key = "user_memory"
store.put(namespace, key, updated_profile)
# Define the graph
builder = StateGraph(MessagesState)
builder.add_node("call_model", call_model)
builder.add_node("write_memory", write_memory)
builder.add_edge(START, "call_model")
builder.add_edge("call_model", "write_memory")
builder.add_edge("write_memory", END)
# Store for long-term (across-thread) memory
across_thread_memory = InMemoryStore()
# Checkpointer for short-term (within-thread) memory
within_thread_memory = MemorySaver()
# Compile the graph with the checkpointer fir and store
graph = builder.compile(checkpointer=within_thread_memory, store=across_thread_memory)
# View
display(Image(graph.get_graph(xray=1).draw_mermaid_png()))# We supply a thread ID for short-term (within-thread) memory
# We supply a user ID for long-term (across-thread) memory
config = {"configurable": {"thread_id": "1", "user_id": "1"}}
# User input
input_messages = [HumanMessage(content="Hi, my name is Lance")]
# Run the graph
for chunk in graph.stream({"messages": input_messages}, config, stream_mode="values"):
chunk["messages"][-1].pretty_print()# User input
input_messages = [HumanMessage(content="I like to bike around San Francisco")]
# Run the graph
for chunk in graph.stream({"messages": input_messages}, config, stream_mode="values"):
chunk["messages"][-1].pretty_print()# Namespace for the memory to save
user_id = "1"
namespace = ("memory", user_id)
existing_memory = across_thread_memory.get(namespace, "user_memory")
existing_memory.dict()# The user profile saved as a JSON object
existing_memory.value# User input
input_messages = [HumanMessage(content="I also enjoy going to bakeries")]
# Run the graph
for chunk in graph.stream({"messages": input_messages}, config, stream_mode="values"):
chunk["messages"][-1].pretty_print()# We supply a thread ID for short-term (within-thread) memory
# We supply a user ID for long-term (across-thread) memory
config = {"configurable": {"thread_id": "2", "user_id": "1"}}
# User input
input_messages = [HumanMessage(content="What bakeries do you recommend for me?")]
# Run the graph
for chunk in graph.stream({"messages": input_messages}, config, stream_mode="values"):
chunk["messages"][-1].pretty_print() |
0 | lc_public_repos/langchain-academy | lc_public_repos/langchain-academy/module-5/memory_store.ipynb | import os, getpass
def _set_env(var: str):
if not os.environ.get(var):
os.environ[var] = getpass.getpass(f"{var}: ")
_set_env("LANGCHAIN_API_KEY")
os.environ["LANGCHAIN_TRACING_V2"] = "true"
os.environ["LANGCHAIN_PROJECT"] = "langchain-academy"import uuid
from langgraph.store.memory import InMemoryStore
in_memory_store = InMemoryStore()# Namespace for the memory to save
user_id = "1"
namespace_for_memory = (user_id, "memories")
# Save a memory to namespace as key and value
key = str(uuid.uuid4())
# The value needs to be a dictionary
value = {"food_preference" : "I like pizza"}
# Save the memory
in_memory_store.put(namespace_for_memory, key, value)# Search
memories = in_memory_store.search(namespace_for_memory)
type(memories)# Metatdata
memories[0].dict()# The key, value
print(memories[0].key, memories[0].value)# Get the memory by namespace and key
memory = in_memory_store.get(namespace_for_memory, key)
memory.dict()_set_env("OPENAI_API_KEY")# Chat model
from langchain_openai import ChatOpenAI
# Initialize the LLM
model = ChatOpenAI(model="gpt-4o", temperature=0) from IPython.display import Image, display
from langgraph.checkpoint.memory import MemorySaver
from langgraph.graph import StateGraph, MessagesState, START, END
from langgraph.store.base import BaseStore
from langchain_core.messages import HumanMessage, SystemMessage
from langchain_core.runnables.config import RunnableConfig
# Chatbot instruction
MODEL_SYSTEM_MESSAGE = """You are a helpful assistant with memory that provides information about the user.
If you have memory for this user, use it to personalize your responses.
Here is the memory (it may be empty): {memory}"""
# Create new memory from the chat history and any existing memory
CREATE_MEMORY_INSTRUCTION = """"You are collecting information about the user to personalize your responses.
CURRENT USER INFORMATION:
{memory}
INSTRUCTIONS:
1. Review the chat history below carefully
2. Identify new information about the user, such as:
- Personal details (name, location)
- Preferences (likes, dislikes)
- Interests and hobbies
- Past experiences
- Goals or future plans
3. Merge any new information with existing memory
4. Format the memory as a clear, bulleted list
5. If new information conflicts with existing memory, keep the most recent version
Remember: Only include factual information directly stated by the user. Do not make assumptions or inferences.
Based on the chat history below, please update the user information:"""
def call_model(state: MessagesState, config: RunnableConfig, store: BaseStore):
"""Load memory from the store and use it to personalize the chatbot's response."""
# Get the user ID from the config
user_id = config["configurable"]["user_id"]
# Retrieve memory from the store
namespace = ("memory", user_id)
key = "user_memory"
existing_memory = store.get(namespace, key)
# Extract the actual memory content if it exists and add a prefix
if existing_memory:
# Value is a dictionary with a memory key
existing_memory_content = existing_memory.value.get('memory')
else:
existing_memory_content = "No existing memory found."
# Format the memory in the system prompt
system_msg = MODEL_SYSTEM_MESSAGE.format(memory=existing_memory_content)
# Respond using memory as well as the chat history
response = model.invoke([SystemMessage(content=system_msg)]+state["messages"])
return {"messages": response}
def write_memory(state: MessagesState, config: RunnableConfig, store: BaseStore):
"""Reflect on the chat history and save a memory to the store."""
# Get the user ID from the config
user_id = config["configurable"]["user_id"]
# Retrieve existing memory from the store
namespace = ("memory", user_id)
existing_memory = store.get(namespace, "user_memory")
# Extract the memory
if existing_memory:
existing_memory_content = existing_memory.value.get('memory')
else:
existing_memory_content = "No existing memory found."
# Format the memory in the system prompt
system_msg = CREATE_MEMORY_INSTRUCTION.format(memory=existing_memory_content)
new_memory = model.invoke([SystemMessage(content=system_msg)]+state['messages'])
# Overwrite the existing memory in the store
key = "user_memory"
# Write value as a dictionary with a memory key
store.put(namespace, key, {"memory": new_memory.content})
# Define the graph
builder = StateGraph(MessagesState)
builder.add_node("call_model", call_model)
builder.add_node("write_memory", write_memory)
builder.add_edge(START, "call_model")
builder.add_edge("call_model", "write_memory")
builder.add_edge("write_memory", END)
# Store for long-term (across-thread) memory
across_thread_memory = InMemoryStore()
# Checkpointer for short-term (within-thread) memory
within_thread_memory = MemorySaver()
# Compile the graph with the checkpointer fir and store
graph = builder.compile(checkpointer=within_thread_memory, store=across_thread_memory)
# View
display(Image(graph.get_graph(xray=1).draw_mermaid_png()))# We supply a thread ID for short-term (within-thread) memory
# We supply a user ID for long-term (across-thread) memory
config = {"configurable": {"thread_id": "1", "user_id": "1"}}
# User input
input_messages = [HumanMessage(content="Hi, my name is Lance")]
# Run the graph
for chunk in graph.stream({"messages": input_messages}, config, stream_mode="values"):
chunk["messages"][-1].pretty_print()# User input
input_messages = [HumanMessage(content="I like to bike around San Francisco")]
# Run the graph
for chunk in graph.stream({"messages": input_messages}, config, stream_mode="values"):
chunk["messages"][-1].pretty_print()thread = {"configurable": {"thread_id": "1"}}
state = graph.get_state(thread).values
for m in state["messages"]:
m.pretty_print()# Namespace for the memory to save
user_id = "1"
namespace = ("memory", user_id)
existing_memory = across_thread_memory.get(namespace, "user_memory")
existing_memory.dict()# We supply a user ID for across-thread memory as well as a new thread ID
config = {"configurable": {"thread_id": "2", "user_id": "1"}}
# User input
input_messages = [HumanMessage(content="Hi! Where would you recommend that I go biking?")]
# Run the graph
for chunk in graph.stream({"messages": input_messages}, config, stream_mode="values"):
chunk["messages"][-1].pretty_print()# User input
input_messages = [HumanMessage(content="Great, are there any bakeries nearby that I can check out? I like a croissant after biking.")]
# Run the graph
for chunk in graph.stream({"messages": input_messages}, config, stream_mode="values"):
chunk["messages"][-1].pretty_print() |
0 | lc_public_repos/langchain-academy | lc_public_repos/langchain-academy/module-5/memory_agent.ipynb | import os, getpass
def _set_env(var: str):
# Check if the variable is set in the OS environment
env_value = os.environ.get(var)
if not env_value:
# If not set, prompt the user for input
env_value = getpass.getpass(f"{var}: ")
# Set the environment variable for the current process
os.environ[var] = env_value
_set_env("LANGCHAIN_API_KEY")
os.environ["LANGCHAIN_TRACING_V2"] = "true"
os.environ["LANGCHAIN_PROJECT"] = "langchain-academy"from pydantic import BaseModel, Field
class Memory(BaseModel):
content: str = Field(description="The main content of the memory. For example: User expressed interest in learning about French.")
class MemoryCollection(BaseModel):
memories: list[Memory] = Field(description="A list of memories about the user.")from trustcall import create_extractor
from langchain_openai import ChatOpenAI
# Inspect the tool calls made by Trustcall
class Spy:
def __init__(self):
self.called_tools = []
def __call__(self, run):
# Collect information about the tool calls made by the extractor.
q = [run]
while q:
r = q.pop()
if r.child_runs:
q.extend(r.child_runs)
if r.run_type == "chat_model":
self.called_tools.append(
r.outputs["generations"][0][0]["message"]["kwargs"]["tool_calls"]
)
# Initialize the spy
spy = Spy()
# Initialize the model
model = ChatOpenAI(model="gpt-4o", temperature=0)
# Create the extractor
trustcall_extractor = create_extractor(
model,
tools=[Memory],
tool_choice="Memory",
enable_inserts=True,
)
# Add the spy as a listener
trustcall_extractor_see_all_tool_calls = trustcall_extractor.with_listeners(on_end=spy)from langchain_core.messages import HumanMessage, SystemMessage, AIMessage
# Instruction
instruction = """Extract memories from the following conversation:"""
# Conversation
conversation = [HumanMessage(content="Hi, I'm Lance."),
AIMessage(content="Nice to meet you, Lance."),
HumanMessage(content="This morning I had a nice bike ride in San Francisco.")]
# Invoke the extractor
result = trustcall_extractor.invoke({"messages": [SystemMessage(content=instruction)] + conversation})# Messages contain the tool calls
for m in result["messages"]:
m.pretty_print()# Responses contain the memories that adhere to the schema
for m in result["responses"]:
print(m)# Metadata contains the tool call
for m in result["response_metadata"]:
print(m)# Update the conversation
updated_conversation = [AIMessage(content="That's great, did you do after?"),
HumanMessage(content="I went to Tartine and ate a croissant."),
AIMessage(content="What else is on your mind?"),
HumanMessage(content="I was thinking about my Japan, and going back this winter!"),]
# Update the instruction
system_msg = """Update existing memories and create new ones based on the following conversation:"""
# We'll save existing memories, giving them an ID, key (tool name), and value
tool_name = "Memory"
existing_memories = [(str(i), tool_name, memory.model_dump()) for i, memory in enumerate(result["responses"])] if result["responses"] else None
existing_memories# Invoke the extractor with our updated conversation and existing memories
result = trustcall_extractor_see_all_tool_calls.invoke({"messages": updated_conversation,
"existing": existing_memories})# Metadata contains the tool call
for m in result["response_metadata"]:
print(m)# Messages contain the tool calls
for m in result["messages"]:
m.pretty_print()# Parsed responses
for m in result["responses"]:
print(m)# Inspect the tool calls made by Trustcall
spy.called_toolsdef extract_tool_info(tool_calls, schema_name="Memory"):
"""Extract information from tool calls for both patches and new memories.
Args:
tool_calls: List of tool calls from the model
schema_name: Name of the schema tool (e.g., "Memory", "ToDo", "Profile")
"""
# Initialize list of changes
changes = []
for call_group in tool_calls:
for call in call_group:
if call['name'] == 'PatchDoc':
changes.append({
'type': 'update',
'doc_id': call['args']['json_doc_id'],
'planned_edits': call['args']['planned_edits'],
'value': call['args']['patches'][0]['value']
})
elif call['name'] == schema_name:
changes.append({
'type': 'new',
'value': call['args']
})
# Format results as a single string
result_parts = []
for change in changes:
if change['type'] == 'update':
result_parts.append(
f"Document {change['doc_id']} updated:\n"
f"Plan: {change['planned_edits']}\n"
f"Added content: {change['value']}"
)
else:
result_parts.append(
f"New {schema_name} created:\n"
f"Content: {change['value']}"
)
return "\n\n".join(result_parts)
# Inspect spy.called_tools to see exactly what happened during the extraction
schema_name = "Memory"
changes = extract_tool_info(spy.called_tools, schema_name)
print(changes)from typing import TypedDict, Literal
# Update memory tool
class UpdateMemory(TypedDict):
""" Decision on what memory type to update """
update_type: Literal['user', 'todo', 'instructions']_set_env("OPENAI_API_KEY")import uuid
from IPython.display import Image, display
from datetime import datetime
from trustcall import create_extractor
from typing import Optional
from pydantic import BaseModel, Field
from langchain_core.runnables import RunnableConfig
from langchain_core.messages import merge_message_runs, HumanMessage, SystemMessage
from langgraph.checkpoint.memory import MemorySaver
from langgraph.graph import StateGraph, MessagesState, END, START
from langgraph.store.base import BaseStore
from langgraph.store.memory import InMemoryStore
from langchain_openai import ChatOpenAI
# Initialize the model
model = ChatOpenAI(model="gpt-4o", temperature=0)
# User profile schema
class Profile(BaseModel):
"""This is the profile of the user you are chatting with"""
name: Optional[str] = Field(description="The user's name", default=None)
location: Optional[str] = Field(description="The user's location", default=None)
job: Optional[str] = Field(description="The user's job", default=None)
connections: list[str] = Field(
description="Personal connection of the user, such as family members, friends, or coworkers",
default_factory=list
)
interests: list[str] = Field(
description="Interests that the user has",
default_factory=list
)
# ToDo schema
class ToDo(BaseModel):
task: str = Field(description="The task to be completed.")
time_to_complete: Optional[int] = Field(description="Estimated time to complete the task (minutes).")
deadline: Optional[datetime] = Field(
description="When the task needs to be completed by (if applicable)",
default=None
)
solutions: list[str] = Field(
description="List of specific, actionable solutions (e.g., specific ideas, service providers, or concrete options relevant to completing the task)",
min_items=1,
default_factory=list
)
status: Literal["not started", "in progress", "done", "archived"] = Field(
description="Current status of the task",
default="not started"
)
# Create the Trustcall extractor for updating the user profile
profile_extractor = create_extractor(
model,
tools=[Profile],
tool_choice="Profile",
)
# Chatbot instruction for choosing what to update and what tools to call
MODEL_SYSTEM_MESSAGE = """You are a helpful chatbot.
You are designed to be a companion to a user, helping them keep track of their ToDo list.
You have a long term memory which keeps track of three things:
1. The user's profile (general information about them)
2. The user's ToDo list
3. General instructions for updating the ToDo list
Here is the current User Profile (may be empty if no information has been collected yet):
<user_profile>
{user_profile}
</user_profile>
Here is the current ToDo List (may be empty if no tasks have been added yet):
<todo>
{todo}
</todo>
Here are the current user-specified preferences for updating the ToDo list (may be empty if no preferences have been specified yet):
<instructions>
{instructions}
</instructions>
Here are your instructions for reasoning about the user's messages:
1. Reason carefully about the user's messages as presented below.
2. Decide whether any of the your long-term memory should be updated:
- If personal information was provided about the user, update the user's profile by calling UpdateMemory tool with type `user`
- If tasks are mentioned, update the ToDo list by calling UpdateMemory tool with type `todo`
- If the user has specified preferences for how to update the ToDo list, update the instructions by calling UpdateMemory tool with type `instructions`
3. Tell the user that you have updated your memory, if appropriate:
- Do not tell the user you have updated the user's profile
- Tell the user them when you update the todo list
- Do not tell the user that you have updated instructions
4. Err on the side of updating the todo list. No need to ask for explicit permission.
5. Respond naturally to user user after a tool call was made to save memories, or if no tool call was made."""
# Trustcall instruction
TRUSTCALL_INSTRUCTION = """Reflect on following interaction.
Use the provided tools to retain any necessary memories about the user.
Use parallel tool calling to handle updates and insertions simultaneously.
System Time: {time}"""
# Instructions for updating the ToDo list
CREATE_INSTRUCTIONS = """Reflect on the following interaction.
Based on this interaction, update your instructions for how to update ToDo list items.
Use any feedback from the user to update how they like to have items added, etc.
Your current instructions are:
<current_instructions>
{current_instructions}
</current_instructions>"""
# Node definitions
def task_mAIstro(state: MessagesState, config: RunnableConfig, store: BaseStore):
"""Load memories from the store and use them to personalize the chatbot's response."""
# Get the user ID from the config
user_id = config["configurable"]["user_id"]
# Retrieve profile memory from the store
namespace = ("profile", user_id)
memories = store.search(namespace)
if memories:
user_profile = memories[0].value
else:
user_profile = None
# Retrieve task memory from the store
namespace = ("todo", user_id)
memories = store.search(namespace)
todo = "\n".join(f"{mem.value}" for mem in memories)
# Retrieve custom instructions
namespace = ("instructions", user_id)
memories = store.search(namespace)
if memories:
instructions = memories[0].value
else:
instructions = ""
system_msg = MODEL_SYSTEM_MESSAGE.format(user_profile=user_profile, todo=todo, instructions=instructions)
# Respond using memory as well as the chat history
response = model.bind_tools([UpdateMemory], parallel_tool_calls=False).invoke([SystemMessage(content=system_msg)]+state["messages"])
return {"messages": [response]}
def update_profile(state: MessagesState, config: RunnableConfig, store: BaseStore):
"""Reflect on the chat history and update the memory collection."""
# Get the user ID from the config
user_id = config["configurable"]["user_id"]
# Define the namespace for the memories
namespace = ("profile", user_id)
# Retrieve the most recent memories for context
existing_items = store.search(namespace)
# Format the existing memories for the Trustcall extractor
tool_name = "Profile"
existing_memories = ([(existing_item.key, tool_name, existing_item.value)
for existing_item in existing_items]
if existing_items
else None
)
# Merge the chat history and the instruction
TRUSTCALL_INSTRUCTION_FORMATTED=TRUSTCALL_INSTRUCTION.format(time=datetime.now().isoformat())
updated_messages=list(merge_message_runs(messages=[SystemMessage(content=TRUSTCALL_INSTRUCTION_FORMATTED)] + state["messages"][:-1]))
# Invoke the extractor
result = profile_extractor.invoke({"messages": updated_messages,
"existing": existing_memories})
# Save the memories from Trustcall to the store
for r, rmeta in zip(result["responses"], result["response_metadata"]):
store.put(namespace,
rmeta.get("json_doc_id", str(uuid.uuid4())),
r.model_dump(mode="json"),
)
tool_calls = state['messages'][-1].tool_calls
return {"messages": [{"role": "tool", "content": "updated profile", "tool_call_id":tool_calls[0]['id']}]}
def update_todos(state: MessagesState, config: RunnableConfig, store: BaseStore):
"""Reflect on the chat history and update the memory collection."""
# Get the user ID from the config
user_id = config["configurable"]["user_id"]
# Define the namespace for the memories
namespace = ("todo", user_id)
# Retrieve the most recent memories for context
existing_items = store.search(namespace)
# Format the existing memories for the Trustcall extractor
tool_name = "ToDo"
existing_memories = ([(existing_item.key, tool_name, existing_item.value)
for existing_item in existing_items]
if existing_items
else None
)
# Merge the chat history and the instruction
TRUSTCALL_INSTRUCTION_FORMATTED=TRUSTCALL_INSTRUCTION.format(time=datetime.now().isoformat())
updated_messages=list(merge_message_runs(messages=[SystemMessage(content=TRUSTCALL_INSTRUCTION_FORMATTED)] + state["messages"][:-1]))
# Initialize the spy for visibility into the tool calls made by Trustcall
spy = Spy()
# Create the Trustcall extractor for updating the ToDo list
todo_extractor = create_extractor(
model,
tools=[ToDo],
tool_choice=tool_name,
enable_inserts=True
).with_listeners(on_end=spy)
# Invoke the extractor
result = todo_extractor.invoke({"messages": updated_messages,
"existing": existing_memories})
# Save the memories from Trustcall to the store
for r, rmeta in zip(result["responses"], result["response_metadata"]):
store.put(namespace,
rmeta.get("json_doc_id", str(uuid.uuid4())),
r.model_dump(mode="json"),
)
# Respond to the tool call made in task_mAIstro, confirming the update
tool_calls = state['messages'][-1].tool_calls
# Extract the changes made by Trustcall and add the the ToolMessage returned to task_mAIstro
todo_update_msg = extract_tool_info(spy.called_tools, tool_name)
return {"messages": [{"role": "tool", "content": todo_update_msg, "tool_call_id":tool_calls[0]['id']}]}
def update_instructions(state: MessagesState, config: RunnableConfig, store: BaseStore):
"""Reflect on the chat history and update the memory collection."""
# Get the user ID from the config
user_id = config["configurable"]["user_id"]
namespace = ("instructions", user_id)
existing_memory = store.get(namespace, "user_instructions")
# Format the memory in the system prompt
system_msg = CREATE_INSTRUCTIONS.format(current_instructions=existing_memory.value if existing_memory else None)
new_memory = model.invoke([SystemMessage(content=system_msg)]+state['messages'][:-1] + [HumanMessage(content="Please update the instructions based on the conversation")])
# Overwrite the existing memory in the store
key = "user_instructions"
store.put(namespace, key, {"memory": new_memory.content})
tool_calls = state['messages'][-1].tool_calls
return {"messages": [{"role": "tool", "content": "updated instructions", "tool_call_id":tool_calls[0]['id']}]}
# Conditional edge
def route_message(state: MessagesState, config: RunnableConfig, store: BaseStore) -> Literal[END, "update_todos", "update_instructions", "update_profile"]:
"""Reflect on the memories and chat history to decide whether to update the memory collection."""
message = state['messages'][-1]
if len(message.tool_calls) ==0:
return END
else:
tool_call = message.tool_calls[0]
if tool_call['args']['update_type'] == "user":
return "update_profile"
elif tool_call['args']['update_type'] == "todo":
return "update_todos"
elif tool_call['args']['update_type'] == "instructions":
return "update_instructions"
else:
raise ValueError
# Create the graph + all nodes
builder = StateGraph(MessagesState)
# Define the flow of the memory extraction process
builder.add_node(task_mAIstro)
builder.add_node(update_todos)
builder.add_node(update_profile)
builder.add_node(update_instructions)
builder.add_edge(START, "task_mAIstro")
builder.add_conditional_edges("task_mAIstro", route_message)
builder.add_edge("update_todos", "task_mAIstro")
builder.add_edge("update_profile", "task_mAIstro")
builder.add_edge("update_instructions", "task_mAIstro")
# Store for long-term (across-thread) memory
across_thread_memory = InMemoryStore()
# Checkpointer for short-term (within-thread) memory
within_thread_memory = MemorySaver()
# We compile the graph with the checkpointer and store
graph = builder.compile(checkpointer=within_thread_memory, store=across_thread_memory)
# View
display(Image(graph.get_graph(xray=1).draw_mermaid_png()))# We supply a thread ID for short-term (within-thread) memory
# We supply a user ID for long-term (across-thread) memory
config = {"configurable": {"thread_id": "1", "user_id": "Lance"}}
# User input to create a profile memory
input_messages = [HumanMessage(content="My name is Lance. I live in SF with my wife. I have a 1 year old daughter.")]
# Run the graph
for chunk in graph.stream({"messages": input_messages}, config, stream_mode="values"):
chunk["messages"][-1].pretty_print()# User input for a ToDo
input_messages = [HumanMessage(content="My wife asked me to book swim lessons for the baby.")]
# Run the graph
for chunk in graph.stream({"messages": input_messages}, config, stream_mode="values"):
chunk["messages"][-1].pretty_print()# User input to update instructions for creating ToDos
input_messages = [HumanMessage(content="When creating or updating ToDo items, include specific local businesses / vendors.")]
# Run the graph
for chunk in graph.stream({"messages": input_messages}, config, stream_mode="values"):
chunk["messages"][-1].pretty_print()# Check for updated instructions
user_id = "Lance"
# Search
for memory in across_thread_memory.search(("instructions", user_id)):
print(memory.value)# User input for a ToDo
input_messages = [HumanMessage(content="I need to fix the jammed electric Yale lock on the door.")]
# Run the graph
for chunk in graph.stream({"messages": input_messages}, config, stream_mode="values"):
chunk["messages"][-1].pretty_print()# Namespace for the memory to save
user_id = "Lance"
# Search
for memory in across_thread_memory.search(("todo", user_id)):
print(memory.value)# User input to update an existing ToDo
input_messages = [HumanMessage(content="For the swim lessons, I need to get that done by end of November.")]
# Run the graph
for chunk in graph.stream({"messages": input_messages}, config, stream_mode="values"):
chunk["messages"][-1].pretty_print()# User input for a ToDo
input_messages = [HumanMessage(content="Need to call back City Toyota to schedule car service.")]
# Run the graph
for chunk in graph.stream({"messages": input_messages}, config, stream_mode="values"):
chunk["messages"][-1].pretty_print()# Namespace for the memory to save
user_id = "Lance"
# Search
for memory in across_thread_memory.search(("todo", user_id)):
print(memory.value)# We supply a thread ID for short-term (within-thread) memory
# We supply a user ID for long-term (across-thread) memory
config = {"configurable": {"thread_id": "2", "user_id": "Lance"}}
# Chat with the chatbot
input_messages = [HumanMessage(content="I have 30 minutes, what tasks can I get done?")]
# Run the graph
for chunk in graph.stream({"messages": input_messages}, config, stream_mode="values"):
chunk["messages"][-1].pretty_print()# Chat with the chatbot
input_messages = [HumanMessage(content="Yes, give me some options to call for swim lessons.")]
# Run the graph
for chunk in graph.stream({"messages": input_messages}, config, stream_mode="values"):
chunk["messages"][-1].pretty_print() |
0 | lc_public_repos/langchain-academy | lc_public_repos/langchain-academy/module-5/memoryschema_collection.ipynb | import os, getpass
def _set_env(var: str):
# Check if the variable is set in the OS environment
env_value = os.environ.get(var)
if not env_value:
# If not set, prompt the user for input
env_value = getpass.getpass(f"{var}: ")
# Set the environment variable for the current process
os.environ[var] = env_value
_set_env("LANGCHAIN_API_KEY")
os.environ["LANGCHAIN_TRACING_V2"] = "true"
os.environ["LANGCHAIN_PROJECT"] = "langchain-academy"from pydantic import BaseModel, Field
class Memory(BaseModel):
content: str = Field(description="The main content of the memory. For example: User expressed interest in learning about French.")
class MemoryCollection(BaseModel):
memories: list[Memory] = Field(description="A list of memories about the user.")_set_env("OPENAI_API_KEY")from langchain_core.messages import HumanMessage
from langchain_openai import ChatOpenAI
# Initialize the model
model = ChatOpenAI(model="gpt-4o", temperature=0)
# Bind schema to model
model_with_structure = model.with_structured_output(MemoryCollection)
# Invoke the model to produce structured output that matches the schema
memory_collection = model_with_structure.invoke([HumanMessage("My name is Lance. I like to bike.")])
memory_collection.memoriesmemory_collection.memories[0].model_dump()import uuid
from langgraph.store.memory import InMemoryStore
# Initialize the in-memory store
in_memory_store = InMemoryStore()
# Namespace for the memory to save
user_id = "1"
namespace_for_memory = (user_id, "memories")
# Save a memory to namespace as key and value
key = str(uuid.uuid4())
value = memory_collection.memories[0].model_dump()
in_memory_store.put(namespace_for_memory, key, value)
key = str(uuid.uuid4())
value = memory_collection.memories[1].model_dump()
in_memory_store.put(namespace_for_memory, key, value)# Search
for m in in_memory_store.search(namespace_for_memory):
print(m.dict())from trustcall import create_extractor
# Create the extractor
trustcall_extractor = create_extractor(
model,
tools=[Memory],
tool_choice="Memory",
enable_inserts=True,
)from langchain_core.messages import HumanMessage, SystemMessage, AIMessage
# Instruction
instruction = """Extract memories from the following conversation:"""
# Conversation
conversation = [HumanMessage(content="Hi, I'm Lance."),
AIMessage(content="Nice to meet you, Lance."),
HumanMessage(content="This morning I had a nice bike ride in San Francisco.")]
# Invoke the extractor
result = trustcall_extractor.invoke({"messages": [SystemMessage(content=instruction)] + conversation})# Messages contain the tool calls
for m in result["messages"]:
m.pretty_print()# Responses contain the memories that adhere to the schema
for m in result["responses"]:
print(m)# Metadata contains the tool call
for m in result["response_metadata"]:
print(m)# Update the conversation
updated_conversation = [AIMessage(content="That's great, did you do after?"),
HumanMessage(content="I went to Tartine and ate a croissant."),
AIMessage(content="What else is on your mind?"),
HumanMessage(content="I was thinking about my Japan, and going back this winter!"),]
# Update the instruction
system_msg = """Update existing memories and create new ones based on the following conversation:"""
# We'll save existing memories, giving them an ID, key (tool name), and value
tool_name = "Memory"
existing_memories = [(str(i), tool_name, memory.model_dump()) for i, memory in enumerate(result["responses"])] if result["responses"] else None
existing_memories# Invoke the extractor with our updated conversation and existing memories
result = trustcall_extractor.invoke({"messages": updated_conversation,
"existing": existing_memories})# Messages from the model indicate two tool calls were made
for m in result["messages"]:
m.pretty_print()# Responses contain the memories that adhere to the schema
for m in result["responses"]:
print(m)# Metadata contains the tool call
for m in result["response_metadata"]:
print(m)from IPython.display import Image, display
import uuid
from langgraph.graph import StateGraph, MessagesState, START, END
from langgraph.store.memory import InMemoryStore
from langchain_core.messages import merge_message_runs
from langchain_core.messages import HumanMessage, SystemMessage
from langchain_core.runnables.config import RunnableConfig
from langgraph.checkpoint.memory import MemorySaver
from langgraph.store.base import BaseStore
# Initialize the model
model = ChatOpenAI(model="gpt-4o", temperature=0)
# Memory schema
class Memory(BaseModel):
content: str = Field(description="The main content of the memory. For example: User expressed interest in learning about French.")
# Create the Trustcall extractor
trustcall_extractor = create_extractor(
model,
tools=[Memory],
tool_choice="Memory",
# This allows the extractor to insert new memories
enable_inserts=True,
)
# Chatbot instruction
MODEL_SYSTEM_MESSAGE = """You are a helpful chatbot. You are designed to be a companion to a user.
You have a long term memory which keeps track of information you learn about the user over time.
Current Memory (may include updated memories from this conversation):
{memory}"""
# Trustcall instruction
TRUSTCALL_INSTRUCTION = """Reflect on following interaction.
Use the provided tools to retain any necessary memories about the user.
Use parallel tool calling to handle updates and insertions simultaneously:"""
def call_model(state: MessagesState, config: RunnableConfig, store: BaseStore):
"""Load memories from the store and use them to personalize the chatbot's response."""
# Get the user ID from the config
user_id = config["configurable"]["user_id"]
# Retrieve memory from the store
namespace = ("memories", user_id)
memories = store.search(namespace)
# Format the memories for the system prompt
info = "\n".join(f"- {mem.value['content']}" for mem in memories)
system_msg = MODEL_SYSTEM_MESSAGE.format(memory=info)
# Respond using memory as well as the chat history
response = model.invoke([SystemMessage(content=system_msg)]+state["messages"])
return {"messages": response}
def write_memory(state: MessagesState, config: RunnableConfig, store: BaseStore):
"""Reflect on the chat history and update the memory collection."""
# Get the user ID from the config
user_id = config["configurable"]["user_id"]
# Define the namespace for the memories
namespace = ("memories", user_id)
# Retrieve the most recent memories for context
existing_items = store.search(namespace)
# Format the existing memories for the Trustcall extractor
tool_name = "Memory"
existing_memories = ([(existing_item.key, tool_name, existing_item.value)
for existing_item in existing_items]
if existing_items
else None
)
# Merge the chat history and the instruction
updated_messages=list(merge_message_runs(messages=[SystemMessage(content=TRUSTCALL_INSTRUCTION)] + state["messages"]))
# Invoke the extractor
result = trustcall_extractor.invoke({"messages": updated_messages,
"existing": existing_memories})
# Save the memories from Trustcall to the store
for r, rmeta in zip(result["responses"], result["response_metadata"]):
store.put(namespace,
rmeta.get("json_doc_id", str(uuid.uuid4())),
r.model_dump(mode="json"),
)
# Define the graph
builder = StateGraph(MessagesState)
builder.add_node("call_model", call_model)
builder.add_node("write_memory", write_memory)
builder.add_edge(START, "call_model")
builder.add_edge("call_model", "write_memory")
builder.add_edge("write_memory", END)
# Store for long-term (across-thread) memory
across_thread_memory = InMemoryStore()
# Checkpointer for short-term (within-thread) memory
within_thread_memory = MemorySaver()
# Compile the graph with the checkpointer fir and store
graph = builder.compile(checkpointer=within_thread_memory, store=across_thread_memory)
# View
display(Image(graph.get_graph(xray=1).draw_mermaid_png()))# We supply a thread ID for short-term (within-thread) memory
# We supply a user ID for long-term (across-thread) memory
config = {"configurable": {"thread_id": "1", "user_id": "1"}}
# User input
input_messages = [HumanMessage(content="Hi, my name is Lance")]
# Run the graph
for chunk in graph.stream({"messages": input_messages}, config, stream_mode="values"):
chunk["messages"][-1].pretty_print()# User input
input_messages = [HumanMessage(content="I like to bike around San Francisco")]
# Run the graph
for chunk in graph.stream({"messages": input_messages}, config, stream_mode="values"):
chunk["messages"][-1].pretty_print()# Namespace for the memory to save
user_id = "1"
namespace = ("memories", user_id)
memories = across_thread_memory.search(namespace)
for m in memories:
print(m.dict())# User input
input_messages = [HumanMessage(content="I also enjoy going to bakeries")]
# Run the graph
for chunk in graph.stream({"messages": input_messages}, config, stream_mode="values"):
chunk["messages"][-1].pretty_print()# We supply a thread ID for short-term (within-thread) memory
# We supply a user ID for long-term (across-thread) memory
config = {"configurable": {"thread_id": "2", "user_id": "1"}}
# User input
input_messages = [HumanMessage(content="What bakeries do you recommend for me?")]
# Run the graph
for chunk in graph.stream({"messages": input_messages}, config, stream_mode="values"):
chunk["messages"][-1].pretty_print() |
0 | lc_public_repos/langchain-academy/module-5 | lc_public_repos/langchain-academy/module-5/studio/memory_store.py | from langchain_core.messages import SystemMessage
from langchain_core.runnables.config import RunnableConfig
from langchain_openai import ChatOpenAI
from langgraph.graph import StateGraph, MessagesState, START, END
from langgraph.store.base import BaseStore
import configuration
# Initialize the LLM
model = ChatOpenAI(model="gpt-4o", temperature=0)
# Chatbot instruction
MODEL_SYSTEM_MESSAGE = """You are a helpful assistant with memory that provides information about the user.
If you have memory for this user, use it to personalize your responses.
Here is the memory (it may be empty): {memory}"""
# Create new memory from the chat history and any existing memory
CREATE_MEMORY_INSTRUCTION = """"You are collecting information about the user to personalize your responses.
CURRENT USER INFORMATION:
{memory}
INSTRUCTIONS:
1. Review the chat history below carefully
2. Identify new information about the user, such as:
- Personal details (name, location)
- Preferences (likes, dislikes)
- Interests and hobbies
- Past experiences
- Goals or future plans
3. Merge any new information with existing memory
4. Format the memory as a clear, bulleted list
5. If new information conflicts with existing memory, keep the most recent version
Remember: Only include factual information directly stated by the user. Do not make assumptions or inferences.
Based on the chat history below, please update the user information:"""
def call_model(state: MessagesState, config: RunnableConfig, store: BaseStore):
"""Load memory from the store and use it to personalize the chatbot's response."""
# Get configuration
configurable = configuration.Configuration.from_runnable_config(config)
# Get the user ID from the config
user_id = configurable.user_id
# Retrieve memory from the store
namespace = ("memory", user_id)
key = "user_memory"
existing_memory = store.get(namespace, key)
# Extract the memory
if existing_memory:
# Value is a dictionary with a memory key
existing_memory_content = existing_memory.value.get('memory')
else:
existing_memory_content = "No existing memory found."
# Format the memory in the system prompt
system_msg = MODEL_SYSTEM_MESSAGE.format(memory=existing_memory_content)
# Respond using memory as well as the chat history
response = model.invoke([SystemMessage(content=system_msg)]+state["messages"])
return {"messages": response}
def write_memory(state: MessagesState, config: RunnableConfig, store: BaseStore):
"""Reflect on the chat history and save a memory to the store."""
# Get configuration
configurable = configuration.Configuration.from_runnable_config(config)
# Get the user ID from the config
user_id = configurable.user_id
# Retrieve existing memory from the store
namespace = ("memory", user_id)
existing_memory = store.get(namespace, "user_memory")
# Extract the memory
if existing_memory:
# Value is a dictionary with a memory key
existing_memory_content = existing_memory.value.get('memory')
else:
existing_memory_content = "No existing memory found."
# Format the memory in the system prompt
system_msg = CREATE_MEMORY_INSTRUCTION.format(memory=existing_memory_content)
new_memory = model.invoke([SystemMessage(content=system_msg)]+state['messages'])
# Overwrite the existing memory in the store
key = "user_memory"
store.put(namespace, key, {"memory": new_memory.content})
# Define the graph
builder = StateGraph(MessagesState,config_schema=configuration.Configuration)
builder.add_node("call_model", call_model)
builder.add_node("write_memory", write_memory)
builder.add_edge(START, "call_model")
builder.add_edge("call_model", "write_memory")
builder.add_edge("write_memory", END)
graph = builder.compile() |
0 | lc_public_repos/langchain-academy/module-5 | lc_public_repos/langchain-academy/module-5/studio/configuration.py | import os
from dataclasses import dataclass, field, fields
from typing import Any, Optional
from langchain_core.runnables import RunnableConfig
from typing_extensions import Annotated
from dataclasses import dataclass
@dataclass(kw_only=True)
class Configuration:
"""The configurable fields for the chatbot."""
user_id: str = "default-user"
@classmethod
def from_runnable_config(
cls, config: Optional[RunnableConfig] = None
) -> "Configuration":
"""Create a Configuration instance from a RunnableConfig."""
configurable = (
config["configurable"] if config and "configurable" in config else {}
)
values: dict[str, Any] = {
f.name: os.environ.get(f.name.upper(), configurable.get(f.name))
for f in fields(cls)
if f.init
}
return cls(**{k: v for k, v in values.items() if v}) |
0 | lc_public_repos/langchain-academy/module-5 | lc_public_repos/langchain-academy/module-5/studio/memory_agent.py | import uuid
from datetime import datetime
from pydantic import BaseModel, Field
from trustcall import create_extractor
from typing import Literal, Optional, TypedDict
from langchain_core.runnables import RunnableConfig
from langchain_core.messages import merge_message_runs
from langchain_core.messages import SystemMessage, HumanMessage
from langchain_openai import ChatOpenAI
from langgraph.checkpoint.memory import MemorySaver
from langgraph.graph import StateGraph, MessagesState, START, END
from langgraph.store.base import BaseStore
from langgraph.store.memory import InMemoryStore
import configuration
## Utilities
# Inspect the tool calls for Trustcall
class Spy:
def __init__(self):
self.called_tools = []
def __call__(self, run):
q = [run]
while q:
r = q.pop()
if r.child_runs:
q.extend(r.child_runs)
if r.run_type == "chat_model":
self.called_tools.append(
r.outputs["generations"][0][0]["message"]["kwargs"]["tool_calls"]
)
# Extract information from tool calls for both patches and new memories in Trustcall
def extract_tool_info(tool_calls, schema_name="Memory"):
"""Extract information from tool calls for both patches and new memories.
Args:
tool_calls: List of tool calls from the model
schema_name: Name of the schema tool (e.g., "Memory", "ToDo", "Profile")
"""
# Initialize list of changes
changes = []
for call_group in tool_calls:
for call in call_group:
if call['name'] == 'PatchDoc':
changes.append({
'type': 'update',
'doc_id': call['args']['json_doc_id'],
'planned_edits': call['args']['planned_edits'],
'value': call['args']['patches'][0]['value']
})
elif call['name'] == schema_name:
changes.append({
'type': 'new',
'value': call['args']
})
# Format results as a single string
result_parts = []
for change in changes:
if change['type'] == 'update':
result_parts.append(
f"Document {change['doc_id']} updated:\n"
f"Plan: {change['planned_edits']}\n"
f"Added content: {change['value']}"
)
else:
result_parts.append(
f"New {schema_name} created:\n"
f"Content: {change['value']}"
)
return "\n\n".join(result_parts)
## Schema definitions
# User profile schema
class Profile(BaseModel):
"""This is the profile of the user you are chatting with"""
name: Optional[str] = Field(description="The user's name", default=None)
location: Optional[str] = Field(description="The user's location", default=None)
job: Optional[str] = Field(description="The user's job", default=None)
connections: list[str] = Field(
description="Personal connection of the user, such as family members, friends, or coworkers",
default_factory=list
)
interests: list[str] = Field(
description="Interests that the user has",
default_factory=list
)
# ToDo schema
class ToDo(BaseModel):
task: str = Field(description="The task to be completed.")
time_to_complete: Optional[int] = Field(description="Estimated time to complete the task (minutes).")
deadline: Optional[datetime] = Field(
description="When the task needs to be completed by (if applicable)",
default=None
)
solutions: list[str] = Field(
description="List of specific, actionable solutions (e.g., specific ideas, service providers, or concrete options relevant to completing the task)",
min_items=1,
default_factory=list
)
status: Literal["not started", "in progress", "done", "archived"] = Field(
description="Current status of the task",
default="not started"
)
## Initialize the model and tools
# Update memory tool
class UpdateMemory(TypedDict):
""" Decision on what memory type to update """
update_type: Literal['user', 'todo', 'instructions']
# Initialize the model
model = ChatOpenAI(model="gpt-4o", temperature=0)
## Create the Trustcall extractors for updating the user profile and ToDo list
profile_extractor = create_extractor(
model,
tools=[Profile],
tool_choice="Profile",
)
## Prompts
# Chatbot instruction for choosing what to update and what tools to call
MODEL_SYSTEM_MESSAGE = """You are a helpful chatbot.
You are designed to be a companion to a user, helping them keep track of their ToDo list.
You have a long term memory which keeps track of three things:
1. The user's profile (general information about them)
2. The user's ToDo list
3. General instructions for updating the ToDo list
Here is the current User Profile (may be empty if no information has been collected yet):
<user_profile>
{user_profile}
</user_profile>
Here is the current ToDo List (may be empty if no tasks have been added yet):
<todo>
{todo}
</todo>
Here are the current user-specified preferences for updating the ToDo list (may be empty if no preferences have been specified yet):
<instructions>
{instructions}
</instructions>
Here are your instructions for reasoning about the user's messages:
1. Reason carefully about the user's messages as presented below.
2. Decide whether any of the your long-term memory should be updated:
- If personal information was provided about the user, update the user's profile by calling UpdateMemory tool with type `user`
- If tasks are mentioned, update the ToDo list by calling UpdateMemory tool with type `todo`
- If the user has specified preferences for how to update the ToDo list, update the instructions by calling UpdateMemory tool with type `instructions`
3. Tell the user that you have updated your memory, if appropriate:
- Do not tell the user you have updated the user's profile
- Tell the user them when you update the todo list
- Do not tell the user that you have updated instructions
4. Err on the side of updating the todo list. No need to ask for explicit permission.
5. Respond naturally to user user after a tool call was made to save memories, or if no tool call was made."""
# Trustcall instruction
TRUSTCALL_INSTRUCTION = """Reflect on following interaction.
Use the provided tools to retain any necessary memories about the user.
Use parallel tool calling to handle updates and insertions simultaneously.
System Time: {time}"""
# Instructions for updating the ToDo list
CREATE_INSTRUCTIONS = """Reflect on the following interaction.
Based on this interaction, update your instructions for how to update ToDo list items. Use any feedback from the user to update how they like to have items added, etc.
Your current instructions are:
<current_instructions>
{current_instructions}
</current_instructions>"""
## Node definitions
def task_mAIstro(state: MessagesState, config: RunnableConfig, store: BaseStore):
"""Load memories from the store and use them to personalize the chatbot's response."""
# Get the user ID from the config
configurable = configuration.Configuration.from_runnable_config(config)
user_id = configurable.user_id
# Retrieve profile memory from the store
namespace = ("profile", user_id)
memories = store.search(namespace)
if memories:
user_profile = memories[0].value
else:
user_profile = None
# Retrieve people memory from the store
namespace = ("todo", user_id)
memories = store.search(namespace)
todo = "\n".join(f"{mem.value}" for mem in memories)
# Retrieve custom instructions
namespace = ("instructions", user_id)
memories = store.search(namespace)
if memories:
instructions = memories[0].value
else:
instructions = ""
system_msg = MODEL_SYSTEM_MESSAGE.format(user_profile=user_profile, todo=todo, instructions=instructions)
# Respond using memory as well as the chat history
response = model.bind_tools([UpdateMemory], parallel_tool_calls=False).invoke([SystemMessage(content=system_msg)]+state["messages"])
return {"messages": [response]}
def update_profile(state: MessagesState, config: RunnableConfig, store: BaseStore):
"""Reflect on the chat history and update the memory collection."""
# Get the user ID from the config
configurable = configuration.Configuration.from_runnable_config(config)
user_id = configurable.user_id
# Define the namespace for the memories
namespace = ("profile", user_id)
# Retrieve the most recent memories for context
existing_items = store.search(namespace)
# Format the existing memories for the Trustcall extractor
tool_name = "Profile"
existing_memories = ([(existing_item.key, tool_name, existing_item.value)
for existing_item in existing_items]
if existing_items
else None
)
# Merge the chat history and the instruction
TRUSTCALL_INSTRUCTION_FORMATTED=TRUSTCALL_INSTRUCTION.format(time=datetime.now().isoformat())
updated_messages=list(merge_message_runs(messages=[SystemMessage(content=TRUSTCALL_INSTRUCTION_FORMATTED)] + state["messages"][:-1]))
# Invoke the extractor
result = profile_extractor.invoke({"messages": updated_messages,
"existing": existing_memories})
# Save save the memories from Trustcall to the store
for r, rmeta in zip(result["responses"], result["response_metadata"]):
store.put(namespace,
rmeta.get("json_doc_id", str(uuid.uuid4())),
r.model_dump(mode="json"),
)
tool_calls = state['messages'][-1].tool_calls
# Return tool message with update verification
return {"messages": [{"role": "tool", "content": "updated profile", "tool_call_id":tool_calls[0]['id']}]}
def update_todos(state: MessagesState, config: RunnableConfig, store: BaseStore):
"""Reflect on the chat history and update the memory collection."""
# Get the user ID from the config
configurable = configuration.Configuration.from_runnable_config(config)
user_id = configurable.user_id
# Define the namespace for the memories
namespace = ("todo", user_id)
# Retrieve the most recent memories for context
existing_items = store.search(namespace)
# Format the existing memories for the Trustcall extractor
tool_name = "ToDo"
existing_memories = ([(existing_item.key, tool_name, existing_item.value)
for existing_item in existing_items]
if existing_items
else None
)
# Merge the chat history and the instruction
TRUSTCALL_INSTRUCTION_FORMATTED=TRUSTCALL_INSTRUCTION.format(time=datetime.now().isoformat())
updated_messages=list(merge_message_runs(messages=[SystemMessage(content=TRUSTCALL_INSTRUCTION_FORMATTED)] + state["messages"][:-1]))
# Initialize the spy for visibility into the tool calls made by Trustcall
spy = Spy()
# Create the Trustcall extractor for updating the ToDo list
todo_extractor = create_extractor(
model,
tools=[ToDo],
tool_choice=tool_name,
enable_inserts=True
).with_listeners(on_end=spy)
# Invoke the extractor
result = todo_extractor.invoke({"messages": updated_messages,
"existing": existing_memories})
# Save save the memories from Trustcall to the store
for r, rmeta in zip(result["responses"], result["response_metadata"]):
store.put(namespace,
rmeta.get("json_doc_id", str(uuid.uuid4())),
r.model_dump(mode="json"),
)
# Respond to the tool call made in task_mAIstro, confirming the update
tool_calls = state['messages'][-1].tool_calls
# Extract the changes made by Trustcall and add the the ToolMessage returned to task_mAIstro
todo_update_msg = extract_tool_info(spy.called_tools, tool_name)
return {"messages": [{"role": "tool", "content": todo_update_msg, "tool_call_id":tool_calls[0]['id']}]}
def update_instructions(state: MessagesState, config: RunnableConfig, store: BaseStore):
"""Reflect on the chat history and update the memory collection."""
# Get the user ID from the config
configurable = configuration.Configuration.from_runnable_config(config)
user_id = configurable.user_id
namespace = ("instructions", user_id)
existing_memory = store.get(namespace, "user_instructions")
# Format the memory in the system prompt
system_msg = CREATE_INSTRUCTIONS.format(current_instructions=existing_memory.value if existing_memory else None)
new_memory = model.invoke([SystemMessage(content=system_msg)]+state['messages'][:-1] + [HumanMessage(content="Please update the instructions based on the conversation")])
# Overwrite the existing memory in the store
key = "user_instructions"
store.put(namespace, key, {"memory": new_memory.content})
tool_calls = state['messages'][-1].tool_calls
# Return tool message with update verification
return {"messages": [{"role": "tool", "content": "updated instructions", "tool_call_id":tool_calls[0]['id']}]}
# Conditional edge
def route_message(state: MessagesState, config: RunnableConfig, store: BaseStore) -> Literal[END, "update_todos", "update_instructions", "update_profile"]:
"""Reflect on the memories and chat history to decide whether to update the memory collection."""
message = state['messages'][-1]
if len(message.tool_calls) ==0:
return END
else:
tool_call = message.tool_calls[0]
if tool_call['args']['update_type'] == "user":
return "update_profile"
elif tool_call['args']['update_type'] == "todo":
return "update_todos"
elif tool_call['args']['update_type'] == "instructions":
return "update_instructions"
else:
raise ValueError
# Create the graph + all nodes
builder = StateGraph(MessagesState, config_schema=configuration.Configuration)
# Define the flow of the memory extraction process
builder.add_node(task_mAIstro)
builder.add_node(update_todos)
builder.add_node(update_profile)
builder.add_node(update_instructions)
# Define the flow
builder.add_edge(START, "task_mAIstro")
builder.add_conditional_edges("task_mAIstro", route_message)
builder.add_edge("update_todos", "task_mAIstro")
builder.add_edge("update_profile", "task_mAIstro")
builder.add_edge("update_instructions", "task_mAIstro")
# Compile the graph
graph = builder.compile() |
0 | lc_public_repos/langchain-academy/module-5 | lc_public_repos/langchain-academy/module-5/studio/requirements.txt | langgraph
langchain-core
langchain-community
langchain-openai
trustcall |
0 | lc_public_repos/langchain-academy/module-5 | lc_public_repos/langchain-academy/module-5/studio/memoryschema_collection.py | import uuid
from pydantic import BaseModel, Field
from trustcall import create_extractor
from langchain_core.messages import SystemMessage
from langchain_core.messages import merge_message_runs
from langchain_core.runnables.config import RunnableConfig
from langchain_openai import ChatOpenAI
from langgraph.graph import StateGraph, MessagesState, START, END
from langgraph.store.base import BaseStore
import configuration
# Initialize the LLM
model = ChatOpenAI(model="gpt-4o", temperature=0)
# Memory schema
class Memory(BaseModel):
content: str = Field(description="The main content of the memory. For example: User expressed interest in learning about French.")
# Create the Trustcall extractor
trustcall_extractor = create_extractor(
model,
tools=[Memory],
tool_choice="Memory",
# This allows the extractor to insert new memories
enable_inserts=True,
)
# Chatbot instruction
MODEL_SYSTEM_MESSAGE = """You are a helpful chatbot. You are designed to be a companion to a user.
You have a long term memory which keeps track of information you learn about the user over time.
Current Memory (may include updated memories from this conversation):
{memory}"""
# Trustcall instruction
TRUSTCALL_INSTRUCTION = """Reflect on following interaction.
Use the provided tools to retain any necessary memories about the user.
Use parallel tool calling to handle updates and insertions simultaneously:"""
def call_model(state: MessagesState, config: RunnableConfig, store: BaseStore):
"""Load memory from the store and use it to personalize the chatbot's response."""
# Get configuration
configurable = configuration.Configuration.from_runnable_config(config)
# Get the user ID from the config
user_id = configurable.user_id
# Retrieve memory from the store
namespace = ("memories", user_id)
memories = store.search(namespace)
# Format the memories for the system prompt
info = "\n".join(f"- {mem.value['content']}" for mem in memories)
system_msg = MODEL_SYSTEM_MESSAGE.format(memory=info)
# Respond using memory as well as the chat history
response = model.invoke([SystemMessage(content=system_msg)]+state["messages"])
return {"messages": response}
def write_memory(state: MessagesState, config: RunnableConfig, store: BaseStore):
"""Reflect on the chat history and save a memory to the store."""
# Get configuration
configurable = configuration.Configuration.from_runnable_config(config)
# Get the user ID from the config
user_id = configurable.user_id
# Define the namespace for the memories
namespace = ("memories", user_id)
# Retrieve the most recent memories for context
existing_items = store.search(namespace)
# Format the existing memories for the Trustcall extractor
tool_name = "Memory"
existing_memories = ([(existing_item.key, tool_name, existing_item.value)
for existing_item in existing_items]
if existing_items
else None
)
# Merge the chat history and the instruction
updated_messages=list(merge_message_runs(messages=[SystemMessage(content=TRUSTCALL_INSTRUCTION)] + state["messages"]))
# Invoke the extractor
result = trustcall_extractor.invoke({"messages": updated_messages,
"existing": existing_memories})
# Save the memories from Trustcall to the store
for r, rmeta in zip(result["responses"], result["response_metadata"]):
store.put(namespace,
rmeta.get("json_doc_id", str(uuid.uuid4())),
r.model_dump(mode="json"),
)
# Define the graph
builder = StateGraph(MessagesState,config_schema=configuration.Configuration)
builder.add_node("call_model", call_model)
builder.add_node("write_memory", write_memory)
builder.add_edge(START, "call_model")
builder.add_edge("call_model", "write_memory")
builder.add_edge("write_memory", END)
graph = builder.compile() |
0 | lc_public_repos/langchain-academy/module-5 | lc_public_repos/langchain-academy/module-5/studio/.env.example | OPENAI_API_KEY=sk-xxx |
0 | lc_public_repos/langchain-academy/module-5 | lc_public_repos/langchain-academy/module-5/studio/langgraph.json | {
"dockerfile_lines": [],
"graphs": {
"chatbot_memory": "./memory_store.py:graph",
"chatbot_memory_profile": "./memoryschema_profile.py:graph",
"chatbot_memory_collection": "./memoryschema_collection.py:graph",
"memory_agent": "./memory_agent.py:graph"
},
"env": "./.env",
"python_version": "3.11",
"dependencies": [
"."
]
} |
0 | lc_public_repos/langchain-academy/module-5 | lc_public_repos/langchain-academy/module-5/studio/memoryschema_profile.py | from pydantic import BaseModel, Field
from trustcall import create_extractor
from langchain_core.messages import SystemMessage
from langchain_core.runnables.config import RunnableConfig
from langchain_openai import ChatOpenAI
from langgraph.graph import StateGraph, MessagesState, START, END
from langgraph.store.base import BaseStore
import configuration
# Initialize the LLM
model = ChatOpenAI(model="gpt-4o", temperature=0)
# Schema
class UserProfile(BaseModel):
""" Profile of a user """
user_name: str = Field(description="The user's preferred name")
user_location: str = Field(description="The user's location")
interests: list = Field(description="A list of the user's interests")
# Create the extractor
trustcall_extractor = create_extractor(
model,
tools=[UserProfile],
tool_choice="UserProfile", # Enforces use of the UserProfile tool
)
# Chatbot instruction
MODEL_SYSTEM_MESSAGE = """You are a helpful assistant with memory that provides information about the user.
If you have memory for this user, use it to personalize your responses.
Here is the memory (it may be empty): {memory}"""
# Extraction instruction
TRUSTCALL_INSTRUCTION = """Create or update the memory (JSON doc) to incorporate information from the following conversation:"""
def call_model(state: MessagesState, config: RunnableConfig, store: BaseStore):
"""Load memory from the store and use it to personalize the chatbot's response."""
# Get configuration
configurable = configuration.Configuration.from_runnable_config(config)
# Get the user ID from the config
user_id = configurable.user_id
# Retrieve memory from the store
namespace = ("memory", user_id)
existing_memory = store.get(namespace, "user_memory")
# Format the memories for the system prompt
if existing_memory and existing_memory.value:
memory_dict = existing_memory.value
formatted_memory = (
f"Name: {memory_dict.get('user_name', 'Unknown')}\n"
f"Location: {memory_dict.get('user_location', 'Unknown')}\n"
f"Interests: {', '.join(memory_dict.get('interests', []))}"
)
else:
formatted_memory = None
# Format the memory in the system prompt
system_msg = MODEL_SYSTEM_MESSAGE.format(memory=formatted_memory)
# Respond using memory as well as the chat history
response = model.invoke([SystemMessage(content=system_msg)]+state["messages"])
return {"messages": response}
def write_memory(state: MessagesState, config: RunnableConfig, store: BaseStore):
"""Reflect on the chat history and save a memory to the store."""
# Get configuration
configurable = configuration.Configuration.from_runnable_config(config)
# Get the user ID from the config
user_id = configurable.user_id
# Retrieve existing memory from the store
namespace = ("memory", user_id)
existing_memory = store.get(namespace, "user_memory")
# Get the profile as the value from the list, and convert it to a JSON doc
existing_profile = {"UserProfile": existing_memory.value} if existing_memory else None
# Invoke the extractor
result = trustcall_extractor.invoke({"messages": [SystemMessage(content=TRUSTCALL_INSTRUCTION)]+state["messages"], "existing": existing_profile})
# Get the updated profile as a JSON object
updated_profile = result["responses"][0].model_dump()
# Save the updated profile
key = "user_memory"
store.put(namespace, key, updated_profile)
# Define the graph
builder = StateGraph(MessagesState,config_schema=configuration.Configuration)
builder.add_node("call_model", call_model)
builder.add_node("write_memory", write_memory)
builder.add_edge(START, "call_model")
builder.add_edge("call_model", "write_memory")
builder.add_edge("write_memory", END)
graph = builder.compile() |
0 | lc_public_repos/langchain-academy | lc_public_repos/langchain-academy/module-4/parallelization.ipynb | import os, getpass
def _set_env(var: str):
if not os.environ.get(var):
os.environ[var] = getpass.getpass(f"{var}: ")
_set_env("OPENAI_API_KEY")from IPython.display import Image, display
from typing import Any
from typing_extensions import TypedDict
from langgraph.graph import StateGraph, START, END
class State(TypedDict):
# The operator.add reducer fn makes this append-only
state: str
class ReturnNodeValue:
def __init__(self, node_secret: str):
self._value = node_secret
def __call__(self, state: State) -> Any:
print(f"Adding {self._value} to {state['state']}")
return {"state": [self._value]}
# Add nodes
builder = StateGraph(State)
# Initialize each node with node_secret
builder.add_node("a", ReturnNodeValue("I'm A"))
builder.add_node("b", ReturnNodeValue("I'm B"))
builder.add_node("c", ReturnNodeValue("I'm C"))
builder.add_node("d", ReturnNodeValue("I'm D"))
# Flow
builder.add_edge(START, "a")
builder.add_edge("a", "b")
builder.add_edge("b", "c")
builder.add_edge("c", "d")
builder.add_edge("d", END)
graph = builder.compile()
display(Image(graph.get_graph().draw_mermaid_png()))graph.invoke({"state": []})builder = StateGraph(State)
# Initialize each node with node_secret
builder.add_node("a", ReturnNodeValue("I'm A"))
builder.add_node("b", ReturnNodeValue("I'm B"))
builder.add_node("c", ReturnNodeValue("I'm C"))
builder.add_node("d", ReturnNodeValue("I'm D"))
# Flow
builder.add_edge(START, "a")
builder.add_edge("a", "b")
builder.add_edge("a", "c")
builder.add_edge("b", "d")
builder.add_edge("c", "d")
builder.add_edge("d", END)
graph = builder.compile()
display(Image(graph.get_graph().draw_mermaid_png()))from langgraph.errors import InvalidUpdateError
try:
graph.invoke({"state": []})
except InvalidUpdateError as e:
print(f"An error occurred: {e}")import operator
from typing import Annotated
class State(TypedDict):
# The operator.add reducer fn makes this append-only
state: Annotated[list, operator.add]
# Add nodes
builder = StateGraph(State)
# Initialize each node with node_secret
builder.add_node("a", ReturnNodeValue("I'm A"))
builder.add_node("b", ReturnNodeValue("I'm B"))
builder.add_node("c", ReturnNodeValue("I'm C"))
builder.add_node("d", ReturnNodeValue("I'm D"))
# Flow
builder.add_edge(START, "a")
builder.add_edge("a", "b")
builder.add_edge("a", "c")
builder.add_edge("b", "d")
builder.add_edge("c", "d")
builder.add_edge("d", END)
graph = builder.compile()
display(Image(graph.get_graph().draw_mermaid_png()))graph.invoke({"state": []})builder = StateGraph(State)
# Initialize each node with node_secret
builder.add_node("a", ReturnNodeValue("I'm A"))
builder.add_node("b", ReturnNodeValue("I'm B"))
builder.add_node("b2", ReturnNodeValue("I'm B2"))
builder.add_node("c", ReturnNodeValue("I'm C"))
builder.add_node("d", ReturnNodeValue("I'm D"))
# Flow
builder.add_edge(START, "a")
builder.add_edge("a", "b")
builder.add_edge("a", "c")
builder.add_edge("b", "b2")
builder.add_edge(["b2", "c"], "d")
builder.add_edge("d", END)
graph = builder.compile()
display(Image(graph.get_graph().draw_mermaid_png()))graph.invoke({"state": []})def sorting_reducer(left, right):
""" Combines and sorts the values in a list"""
if not isinstance(left, list):
left = [left]
if not isinstance(right, list):
right = [right]
return sorted(left + right, reverse=False)
class State(TypedDict):
# sorting_reducer will sort the values in state
state: Annotated[list, sorting_reducer]
# Add nodes
builder = StateGraph(State)
# Initialize each node with node_secret
builder.add_node("a", ReturnNodeValue("I'm A"))
builder.add_node("b", ReturnNodeValue("I'm B"))
builder.add_node("b2", ReturnNodeValue("I'm B2"))
builder.add_node("c", ReturnNodeValue("I'm C"))
builder.add_node("d", ReturnNodeValue("I'm D"))
# Flow
builder.add_edge(START, "a")
builder.add_edge("a", "b")
builder.add_edge("a", "c")
builder.add_edge("b", "b2")
builder.add_edge(["b2", "c"], "d")
builder.add_edge("d", END)
graph = builder.compile()
display(Image(graph.get_graph().draw_mermaid_png()))graph.invoke({"state": []})from langchain_openai import ChatOpenAI
llm = ChatOpenAI(model="gpt-4o", temperature=0) class State(TypedDict):
question: str
answer: str
context: Annotated[list, operator.add]import os, getpass
def _set_env(var: str):
if not os.environ.get(var):
os.environ[var] = getpass.getpass(f"{var}: ")
_set_env("TAVILY_API_KEY")from langchain_core.messages import HumanMessage, SystemMessage
from langchain_community.document_loaders import WikipediaLoader
from langchain_community.tools import TavilySearchResults
def search_web(state):
""" Retrieve docs from web search """
# Search
tavily_search = TavilySearchResults(max_results=3)
search_docs = tavily_search.invoke(state['question'])
# Format
formatted_search_docs = "\n\n---\n\n".join(
[
f'<Document href="{doc["url"]}"/>\n{doc["content"]}\n</Document>'
for doc in search_docs
]
)
return {"context": [formatted_search_docs]}
def search_wikipedia(state):
""" Retrieve docs from wikipedia """
# Search
search_docs = WikipediaLoader(query=state['question'],
load_max_docs=2).load()
# Format
formatted_search_docs = "\n\n---\n\n".join(
[
f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}\n</Document>'
for doc in search_docs
]
)
return {"context": [formatted_search_docs]}
def generate_answer(state):
""" Node to answer a question """
# Get state
context = state["context"]
question = state["question"]
# Template
answer_template = """Answer the question {question} using this context: {context}"""
answer_instructions = answer_template.format(question=question,
context=context)
# Answer
answer = llm.invoke([SystemMessage(content=answer_instructions)]+[HumanMessage(content=f"Answer the question.")])
# Append it to state
return {"answer": answer}
# Add nodes
builder = StateGraph(State)
# Initialize each node with node_secret
builder.add_node("search_web",search_web)
builder.add_node("search_wikipedia", search_wikipedia)
builder.add_node("generate_answer", generate_answer)
# Flow
builder.add_edge(START, "search_wikipedia")
builder.add_edge(START, "search_web")
builder.add_edge("search_wikipedia", "generate_answer")
builder.add_edge("search_web", "generate_answer")
builder.add_edge("generate_answer", END)
graph = builder.compile()
display(Image(graph.get_graph().draw_mermaid_png()))result = graph.invoke({"question": "How were Nvidia's Q2 2024 earnings"})
result['answer'].contentimport platform
if 'google.colab' in str(get_ipython()) or platform.system() != 'Darwin':
raise Exception("Unfortunately LangGraph Studio is currently not supported on Google Colab or requires a Mac")from langgraph_sdk import get_client
client = get_client(url="http://localhost:63082")thread = await client.threads.create()
input_question = {"question": "How were Nvidia Q2 2024 earnings?"}
async for event in client.runs.stream(thread["thread_id"],
assistant_id="parallelization",
input=input_question,
stream_mode="values"):
# Check if answer has been added to state
answer = event.data.get('answer', None)
if answer:
print(answer['content']) |
0 | lc_public_repos/langchain-academy | lc_public_repos/langchain-academy/module-4/sub-graph.ipynb | import os, getpass
def _set_env(var: str):
if not os.environ.get(var):
os.environ[var] = getpass.getpass(f"{var}: ")
_set_env("LANGCHAIN_API_KEY")
os.environ["LANGCHAIN_TRACING_V2"] = "true"
os.environ["LANGCHAIN_PROJECT"] = "langchain-academy"from operator import add
from typing_extensions import TypedDict
from typing import List, Optional, Annotated
# The structure of the logs
class Log(TypedDict):
id: str
question: str
docs: Optional[List]
answer: str
grade: Optional[int]
grader: Optional[str]
feedback: Optional[str]from IPython.display import Image, display
from langgraph.graph import StateGraph, START, END
# Failure Analysis Sub-graph
class FailureAnalysisState(TypedDict):
cleaned_logs: List[Log]
failures: List[Log]
fa_summary: str
processed_logs: List[str]
class FailureAnalysisOutputState(TypedDict):
fa_summary: str
processed_logs: List[str]
def get_failures(state):
""" Get logs that contain a failure """
cleaned_logs = state["cleaned_logs"]
failures = [log for log in cleaned_logs if "grade" in log]
return {"failures": failures}
def generate_summary(state):
""" Generate summary of failures """
failures = state["failures"]
# Add fxn: fa_summary = summarize(failures)
fa_summary = "Poor quality retrieval of Chroma documentation."
return {"fa_summary": fa_summary, "processed_logs": [f"failure-analysis-on-log-{failure['id']}" for failure in failures]}
fa_builder = StateGraph(input=FailureAnalysisState,output=FailureAnalysisOutputState)
fa_builder.add_node("get_failures", get_failures)
fa_builder.add_node("generate_summary", generate_summary)
fa_builder.add_edge(START, "get_failures")
fa_builder.add_edge("get_failures", "generate_summary")
fa_builder.add_edge("generate_summary", END)
graph = fa_builder.compile()
display(Image(graph.get_graph().draw_mermaid_png()))# Summarization subgraph
class QuestionSummarizationState(TypedDict):
cleaned_logs: List[Log]
qs_summary: str
report: str
processed_logs: List[str]
class QuestionSummarizationOutputState(TypedDict):
report: str
processed_logs: List[str]
def generate_summary(state):
cleaned_logs = state["cleaned_logs"]
# Add fxn: summary = summarize(generate_summary)
summary = "Questions focused on usage of ChatOllama and Chroma vector store."
return {"qs_summary": summary, "processed_logs": [f"summary-on-log-{log['id']}" for log in cleaned_logs]}
def send_to_slack(state):
qs_summary = state["qs_summary"]
# Add fxn: report = report_generation(qs_summary)
report = "foo bar baz"
return {"report": report}
qs_builder = StateGraph(input=QuestionSummarizationState,output=QuestionSummarizationOutputState)
qs_builder.add_node("generate_summary", generate_summary)
qs_builder.add_node("send_to_slack", send_to_slack)
qs_builder.add_edge(START, "generate_summary")
qs_builder.add_edge("generate_summary", "send_to_slack")
qs_builder.add_edge("send_to_slack", END)
graph = qs_builder.compile()
display(Image(graph.get_graph().draw_mermaid_png()))# Entry Graph
class EntryGraphState(TypedDict):
raw_logs: List[Log]
cleaned_logs: Annotated[List[Log], add] # This will be USED BY in BOTH sub-graphs
fa_summary: str # This will only be generated in the FA sub-graph
report: str # This will only be generated in the QS sub-graph
processed_logs: Annotated[List[int], add] # This will be generated in BOTH sub-graphs# Entry Graph
class EntryGraphState(TypedDict):
raw_logs: List[Log]
cleaned_logs: List[Log]
fa_summary: str # This will only be generated in the FA sub-graph
report: str # This will only be generated in the QS sub-graph
processed_logs: Annotated[List[int], add] # This will be generated in BOTH sub-graphs
def clean_logs(state):
# Get logs
raw_logs = state["raw_logs"]
# Data cleaning raw_logs -> docs
cleaned_logs = raw_logs
return {"cleaned_logs": cleaned_logs}
entry_builder = StateGraph(EntryGraphState)
entry_builder.add_node("clean_logs", clean_logs)
entry_builder.add_node("question_summarization", qs_builder.compile())
entry_builder.add_node("failure_analysis", fa_builder.compile())
entry_builder.add_edge(START, "clean_logs")
entry_builder.add_edge("clean_logs", "failure_analysis")
entry_builder.add_edge("clean_logs", "question_summarization")
entry_builder.add_edge("failure_analysis", END)
entry_builder.add_edge("question_summarization", END)
graph = entry_builder.compile()
from IPython.display import Image, display
# Setting xray to 1 will show the internal structure of the nested graph
display(Image(graph.get_graph(xray=1).draw_mermaid_png()))# Dummy logs
question_answer = Log(
id="1",
question="How can I import ChatOllama?",
answer="To import ChatOllama, use: 'from langchain_community.chat_models import ChatOllama.'",
)
question_answer_feedback = Log(
id="2",
question="How can I use Chroma vector store?",
answer="To use Chroma, define: rag_chain = create_retrieval_chain(retriever, question_answer_chain).",
grade=0,
grader="Document Relevance Recall",
feedback="The retrieved documents discuss vector stores in general, but not Chroma specifically",
)
raw_logs = [question_answer,question_answer_feedback]
graph.invoke({"raw_logs": raw_logs}) |
0 | lc_public_repos/langchain-academy | lc_public_repos/langchain-academy/module-4/map-reduce.ipynb | import os, getpass
def _set_env(var: str):
if not os.environ.get(var):
os.environ[var] = getpass.getpass(f"{var}: ")
_set_env("OPENAI_API_KEY")_set_env("LANGCHAIN_API_KEY")
os.environ["LANGCHAIN_TRACING_V2"] = "true"
os.environ["LANGCHAIN_PROJECT"] = "langchain-academy"from langchain_openai import ChatOpenAI
# Prompts we will use
subjects_prompt = """Generate a list of 3 sub-topics that are all related to this overall topic: {topic}."""
joke_prompt = """Generate a joke about {subject}"""
best_joke_prompt = """Below are a bunch of jokes about {topic}. Select the best one! Return the ID of the best one, starting 0 as the ID for the first joke. Jokes: \n\n {jokes}"""
# LLM
model = ChatOpenAI(model="gpt-4o", temperature=0) import operator
from typing import Annotated
from typing_extensions import TypedDict
from pydantic import BaseModel
class Subjects(BaseModel):
subjects: list[str]
class BestJoke(BaseModel):
id: int
class OverallState(TypedDict):
topic: str
subjects: list
jokes: Annotated[list, operator.add]
best_selected_joke: strdef generate_topics(state: OverallState):
prompt = subjects_prompt.format(topic=state["topic"])
response = model.with_structured_output(Subjects).invoke(prompt)
return {"subjects": response.subjects}from langgraph.constants import Send
def continue_to_jokes(state: OverallState):
return [Send("generate_joke", {"subject": s}) for s in state["subjects"]]class JokeState(TypedDict):
subject: str
class Joke(BaseModel):
joke: str
def generate_joke(state: JokeState):
prompt = joke_prompt.format(subject=state["subject"])
response = model.with_structured_output(Joke).invoke(prompt)
return {"jokes": [response.joke]}def best_joke(state: OverallState):
jokes = "\n\n".join(state["jokes"])
prompt = best_joke_prompt.format(topic=state["topic"], jokes=jokes)
response = model.with_structured_output(BestJoke).invoke(prompt)
return {"best_selected_joke": state["jokes"][response.id]}from IPython.display import Image
from langgraph.graph import END, StateGraph, START
# Construct the graph: here we put everything together to construct our graph
graph = StateGraph(OverallState)
graph.add_node("generate_topics", generate_topics)
graph.add_node("generate_joke", generate_joke)
graph.add_node("best_joke", best_joke)
graph.add_edge(START, "generate_topics")
graph.add_conditional_edges("generate_topics", continue_to_jokes, ["generate_joke"])
graph.add_edge("generate_joke", "best_joke")
graph.add_edge("best_joke", END)
# Compile the graph
app = graph.compile()
Image(app.get_graph().draw_mermaid_png())# Call the graph: here we call it to generate a list of jokes
for s in app.stream({"topic": "animals"}):
print(s) |
0 | lc_public_repos/langchain-academy | lc_public_repos/langchain-academy/module-4/research-assistant.ipynb | import os, getpass
def _set_env(var: str):
if not os.environ.get(var):
os.environ[var] = getpass.getpass(f"{var}: ")
_set_env("OPENAI_API_KEY")from langchain_openai import ChatOpenAI
llm = ChatOpenAI(model="gpt-4o", temperature=0) _set_env("LANGCHAIN_API_KEY")
os.environ["LANGCHAIN_TRACING_V2"] = "true"
os.environ["LANGCHAIN_PROJECT"] = "langchain-academy"from typing import List
from typing_extensions import TypedDict
from pydantic import BaseModel, Field
class Analyst(BaseModel):
affiliation: str = Field(
description="Primary affiliation of the analyst.",
)
name: str = Field(
description="Name of the analyst."
)
role: str = Field(
description="Role of the analyst in the context of the topic.",
)
description: str = Field(
description="Description of the analyst focus, concerns, and motives.",
)
@property
def persona(self) -> str:
return f"Name: {self.name}\nRole: {self.role}\nAffiliation: {self.affiliation}\nDescription: {self.description}\n"
class Perspectives(BaseModel):
analysts: List[Analyst] = Field(
description="Comprehensive list of analysts with their roles and affiliations.",
)
class GenerateAnalystsState(TypedDict):
topic: str # Research topic
max_analysts: int # Number of analysts
human_analyst_feedback: str # Human feedback
analysts: List[Analyst] # Analyst asking questionsfrom IPython.display import Image, display
from langgraph.graph import START, END, StateGraph
from langgraph.checkpoint.memory import MemorySaver
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
analyst_instructions="""You are tasked with creating a set of AI analyst personas. Follow these instructions carefully:
1. First, review the research topic:
{topic}
2. Examine any editorial feedback that has been optionally provided to guide creation of the analysts:
{human_analyst_feedback}
3. Determine the most interesting themes based upon documents and / or feedback above.
4. Pick the top {max_analysts} themes.
5. Assign one analyst to each theme."""
def create_analysts(state: GenerateAnalystsState):
""" Create analysts """
topic=state['topic']
max_analysts=state['max_analysts']
human_analyst_feedback=state.get('human_analyst_feedback', '')
# Enforce structured output
structured_llm = llm.with_structured_output(Perspectives)
# System message
system_message = analyst_instructions.format(topic=topic,
human_analyst_feedback=human_analyst_feedback,
max_analysts=max_analysts)
# Generate question
analysts = structured_llm.invoke([SystemMessage(content=system_message)]+[HumanMessage(content="Generate the set of analysts.")])
# Write the list of analysis to state
return {"analysts": analysts.analysts}
def human_feedback(state: GenerateAnalystsState):
""" No-op node that should be interrupted on """
pass
def should_continue(state: GenerateAnalystsState):
""" Return the next node to execute """
# Check if human feedback
human_analyst_feedback=state.get('human_analyst_feedback', None)
if human_analyst_feedback:
return "create_analysts"
# Otherwise end
return END
# Add nodes and edges
builder = StateGraph(GenerateAnalystsState)
builder.add_node("create_analysts", create_analysts)
builder.add_node("human_feedback", human_feedback)
builder.add_edge(START, "create_analysts")
builder.add_edge("create_analysts", "human_feedback")
builder.add_conditional_edges("human_feedback", should_continue, ["create_analysts", END])
# Compile
memory = MemorySaver()
graph = builder.compile(interrupt_before=['human_feedback'], checkpointer=memory)
# View
display(Image(graph.get_graph(xray=1).draw_mermaid_png()))# Input
max_analysts = 3
topic = "The benefits of adopting LangGraph as an agent framework"
thread = {"configurable": {"thread_id": "1"}}
# Run the graph until the first interruption
for event in graph.stream({"topic":topic,"max_analysts":max_analysts,}, thread, stream_mode="values"):
# Review
analysts = event.get('analysts', '')
if analysts:
for analyst in analysts:
print(f"Name: {analyst.name}")
print(f"Affiliation: {analyst.affiliation}")
print(f"Role: {analyst.role}")
print(f"Description: {analyst.description}")
print("-" * 50) # Get state and look at next node
state = graph.get_state(thread)
state.next# We now update the state as if we are the human_feedback node
graph.update_state(thread, {"human_analyst_feedback":
"Add in someone from a startup to add an entrepreneur perspective"}, as_node="human_feedback")# Continue the graph execution
for event in graph.stream(None, thread, stream_mode="values"):
# Review
analysts = event.get('analysts', '')
if analysts:
for analyst in analysts:
print(f"Name: {analyst.name}")
print(f"Affiliation: {analyst.affiliation}")
print(f"Role: {analyst.role}")
print(f"Description: {analyst.description}")
print("-" * 50) # If we are satisfied, then we simply supply no feedback
further_feedack = None
graph.update_state(thread, {"human_analyst_feedback":
further_feedack}, as_node="human_feedback")# Continue the graph execution to end
for event in graph.stream(None, thread, stream_mode="updates"):
print("--Node--")
node_name = next(iter(event.keys()))
print(node_name)final_state = graph.get_state(thread)
analysts = final_state.values.get('analysts')final_state.nextfor analyst in analysts:
print(f"Name: {analyst.name}")
print(f"Affiliation: {analyst.affiliation}")
print(f"Role: {analyst.role}")
print(f"Description: {analyst.description}")
print("-" * 50) import operator
from typing import Annotated
from langgraph.graph import MessagesState
class InterviewState(MessagesState):
max_num_turns: int # Number turns of conversation
context: Annotated[list, operator.add] # Source docs
analyst: Analyst # Analyst asking questions
interview: str # Interview transcript
sections: list # Final key we duplicate in outer state for Send() API
class SearchQuery(BaseModel):
search_query: str = Field(None, description="Search query for retrieval.")question_instructions = """You are an analyst tasked with interviewing an expert to learn about a specific topic.
Your goal is boil down to interesting and specific insights related to your topic.
1. Interesting: Insights that people will find surprising or non-obvious.
2. Specific: Insights that avoid generalities and include specific examples from the expert.
Here is your topic of focus and set of goals: {goals}
Begin by introducing yourself using a name that fits your persona, and then ask your question.
Continue to ask questions to drill down and refine your understanding of the topic.
When you are satisfied with your understanding, complete the interview with: "Thank you so much for your help!"
Remember to stay in character throughout your response, reflecting the persona and goals provided to you."""
def generate_question(state: InterviewState):
""" Node to generate a question """
# Get state
analyst = state["analyst"]
messages = state["messages"]
# Generate question
system_message = question_instructions.format(goals=analyst.persona)
question = llm.invoke([SystemMessage(content=system_message)]+messages)
# Write messages to state
return {"messages": [question]}def _set_env(var: str):
if not os.environ.get(var):
os.environ[var] = getpass.getpass(f"{var}: ")
_set_env("TAVILY_API_KEY")# Web search tool
from langchain_community.tools.tavily_search import TavilySearchResults
tavily_search = TavilySearchResults(max_results=3)# Wikipedia search tool
from langchain_community.document_loaders import WikipediaLoaderfrom langchain_core.messages import get_buffer_string
# Search query writing
search_instructions = SystemMessage(content=f"""You will be given a conversation between an analyst and an expert.
Your goal is to generate a well-structured query for use in retrieval and / or web-search related to the conversation.
First, analyze the full conversation.
Pay particular attention to the final question posed by the analyst.
Convert this final question into a well-structured web search query""")
def search_web(state: InterviewState):
""" Retrieve docs from web search """
# Search query
structured_llm = llm.with_structured_output(SearchQuery)
search_query = structured_llm.invoke([search_instructions]+state['messages'])
# Search
search_docs = tavily_search.invoke(search_query.search_query)
# Format
formatted_search_docs = "\n\n---\n\n".join(
[
f'<Document href="{doc["url"]}"/>\n{doc["content"]}\n</Document>'
for doc in search_docs
]
)
return {"context": [formatted_search_docs]}
def search_wikipedia(state: InterviewState):
""" Retrieve docs from wikipedia """
# Search query
structured_llm = llm.with_structured_output(SearchQuery)
search_query = structured_llm.invoke([search_instructions]+state['messages'])
# Search
search_docs = WikipediaLoader(query=search_query.search_query,
load_max_docs=2).load()
# Format
formatted_search_docs = "\n\n---\n\n".join(
[
f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}\n</Document>'
for doc in search_docs
]
)
return {"context": [formatted_search_docs]}
answer_instructions = """You are an expert being interviewed by an analyst.
Here is analyst area of focus: {goals}.
You goal is to answer a question posed by the interviewer.
To answer question, use this context:
{context}
When answering questions, follow these guidelines:
1. Use only the information provided in the context.
2. Do not introduce external information or make assumptions beyond what is explicitly stated in the context.
3. The context contain sources at the topic of each individual document.
4. Include these sources your answer next to any relevant statements. For example, for source # 1 use [1].
5. List your sources in order at the bottom of your answer. [1] Source 1, [2] Source 2, etc
6. If the source is: <Document source="assistant/docs/llama3_1.pdf" page="7"/>' then just list:
[1] assistant/docs/llama3_1.pdf, page 7
And skip the addition of the brackets as well as the Document source preamble in your citation."""
def generate_answer(state: InterviewState):
""" Node to answer a question """
# Get state
analyst = state["analyst"]
messages = state["messages"]
context = state["context"]
# Answer question
system_message = answer_instructions.format(goals=analyst.persona, context=context)
answer = llm.invoke([SystemMessage(content=system_message)]+messages)
# Name the message as coming from the expert
answer.name = "expert"
# Append it to state
return {"messages": [answer]}
def save_interview(state: InterviewState):
""" Save interviews """
# Get messages
messages = state["messages"]
# Convert interview to a string
interview = get_buffer_string(messages)
# Save to interviews key
return {"interview": interview}
def route_messages(state: InterviewState,
name: str = "expert"):
""" Route between question and answer """
# Get messages
messages = state["messages"]
max_num_turns = state.get('max_num_turns',2)
# Check the number of expert answers
num_responses = len(
[m for m in messages if isinstance(m, AIMessage) and m.name == name]
)
# End if expert has answered more than the max turns
if num_responses >= max_num_turns:
return 'save_interview'
# This router is run after each question - answer pair
# Get the last question asked to check if it signals the end of discussion
last_question = messages[-2]
if "Thank you so much for your help" in last_question.content:
return 'save_interview'
return "ask_question"
section_writer_instructions = """You are an expert technical writer.
Your task is to create a short, easily digestible section of a report based on a set of source documents.
1. Analyze the content of the source documents:
- The name of each source document is at the start of the document, with the <Document tag.
2. Create a report structure using markdown formatting:
- Use ## for the section title
- Use ### for sub-section headers
3. Write the report following this structure:
a. Title (## header)
b. Summary (### header)
c. Sources (### header)
4. Make your title engaging based upon the focus area of the analyst:
{focus}
5. For the summary section:
- Set up summary with general background / context related to the focus area of the analyst
- Emphasize what is novel, interesting, or surprising about insights gathered from the interview
- Create a numbered list of source documents, as you use them
- Do not mention the names of interviewers or experts
- Aim for approximately 400 words maximum
- Use numbered sources in your report (e.g., [1], [2]) based on information from source documents
6. In the Sources section:
- Include all sources used in your report
- Provide full links to relevant websites or specific document paths
- Separate each source by a newline. Use two spaces at the end of each line to create a newline in Markdown.
- It will look like:
### Sources
[1] Link or Document name
[2] Link or Document name
7. Be sure to combine sources. For example this is not correct:
[3] https://ai.meta.com/blog/meta-llama-3-1/
[4] https://ai.meta.com/blog/meta-llama-3-1/
There should be no redundant sources. It should simply be:
[3] https://ai.meta.com/blog/meta-llama-3-1/
8. Final review:
- Ensure the report follows the required structure
- Include no preamble before the title of the report
- Check that all guidelines have been followed"""
def write_section(state: InterviewState):
""" Node to answer a question """
# Get state
interview = state["interview"]
context = state["context"]
analyst = state["analyst"]
# Write section using either the gathered source docs from interview (context) or the interview itself (interview)
system_message = section_writer_instructions.format(focus=analyst.description)
section = llm.invoke([SystemMessage(content=system_message)]+[HumanMessage(content=f"Use this source to write your section: {context}")])
# Append it to state
return {"sections": [section.content]}
# Add nodes and edges
interview_builder = StateGraph(InterviewState)
interview_builder.add_node("ask_question", generate_question)
interview_builder.add_node("search_web", search_web)
interview_builder.add_node("search_wikipedia", search_wikipedia)
interview_builder.add_node("answer_question", generate_answer)
interview_builder.add_node("save_interview", save_interview)
interview_builder.add_node("write_section", write_section)
# Flow
interview_builder.add_edge(START, "ask_question")
interview_builder.add_edge("ask_question", "search_web")
interview_builder.add_edge("ask_question", "search_wikipedia")
interview_builder.add_edge("search_web", "answer_question")
interview_builder.add_edge("search_wikipedia", "answer_question")
interview_builder.add_conditional_edges("answer_question", route_messages,['ask_question','save_interview'])
interview_builder.add_edge("save_interview", "write_section")
interview_builder.add_edge("write_section", END)
# Interview
memory = MemorySaver()
interview_graph = interview_builder.compile(checkpointer=memory).with_config(run_name="Conduct Interviews")
# View
display(Image(interview_graph.get_graph().draw_mermaid_png()))# Pick one analyst
analysts[0]from IPython.display import Markdown
messages = [HumanMessage(f"So you said you were writing an article on {topic}?")]
thread = {"configurable": {"thread_id": "1"}}
interview = interview_graph.invoke({"analyst": analysts[0], "messages": messages, "max_num_turns": 2}, thread)
Markdown(interview['sections'][0])import operator
from typing import List, Annotated
from typing_extensions import TypedDict
class ResearchGraphState(TypedDict):
topic: str # Research topic
max_analysts: int # Number of analysts
human_analyst_feedback: str # Human feedback
analysts: List[Analyst] # Analyst asking questions
sections: Annotated[list, operator.add] # Send() API key
introduction: str # Introduction for the final report
content: str # Content for the final report
conclusion: str # Conclusion for the final report
final_report: str # Final reportfrom langgraph.constants import Send
def initiate_all_interviews(state: ResearchGraphState):
""" This is the "map" step where we run each interview sub-graph using Send API """
# Check if human feedback
human_analyst_feedback=state.get('human_analyst_feedback')
if human_analyst_feedback:
# Return to create_analysts
return "create_analysts"
# Otherwise kick off interviews in parallel via Send() API
else:
topic = state["topic"]
return [Send("conduct_interview", {"analyst": analyst,
"messages": [HumanMessage(
content=f"So you said you were writing an article on {topic}?"
)
]}) for analyst in state["analysts"]]
report_writer_instructions = """You are a technical writer creating a report on this overall topic:
{topic}
You have a team of analysts. Each analyst has done two things:
1. They conducted an interview with an expert on a specific sub-topic.
2. They write up their finding into a memo.
Your task:
1. You will be given a collection of memos from your analysts.
2. Think carefully about the insights from each memo.
3. Consolidate these into a crisp overall summary that ties together the central ideas from all of the memos.
4. Summarize the central points in each memo into a cohesive single narrative.
To format your report:
1. Use markdown formatting.
2. Include no pre-amble for the report.
3. Use no sub-heading.
4. Start your report with a single title header: ## Insights
5. Do not mention any analyst names in your report.
6. Preserve any citations in the memos, which will be annotated in brackets, for example [1] or [2].
7. Create a final, consolidated list of sources and add to a Sources section with the `## Sources` header.
8. List your sources in order and do not repeat.
[1] Source 1
[2] Source 2
Here are the memos from your analysts to build your report from:
{context}"""
def write_report(state: ResearchGraphState):
# Full set of sections
sections = state["sections"]
topic = state["topic"]
# Concat all sections together
formatted_str_sections = "\n\n".join([f"{section}" for section in sections])
# Summarize the sections into a final report
system_message = report_writer_instructions.format(topic=topic, context=formatted_str_sections)
report = llm.invoke([SystemMessage(content=system_message)]+[HumanMessage(content=f"Write a report based upon these memos.")])
return {"content": report.content}
intro_conclusion_instructions = """You are a technical writer finishing a report on {topic}
You will be given all of the sections of the report.
You job is to write a crisp and compelling introduction or conclusion section.
The user will instruct you whether to write the introduction or conclusion.
Include no pre-amble for either section.
Target around 100 words, crisply previewing (for introduction) or recapping (for conclusion) all of the sections of the report.
Use markdown formatting.
For your introduction, create a compelling title and use the # header for the title.
For your introduction, use ## Introduction as the section header.
For your conclusion, use ## Conclusion as the section header.
Here are the sections to reflect on for writing: {formatted_str_sections}"""
def write_introduction(state: ResearchGraphState):
# Full set of sections
sections = state["sections"]
topic = state["topic"]
# Concat all sections together
formatted_str_sections = "\n\n".join([f"{section}" for section in sections])
# Summarize the sections into a final report
instructions = intro_conclusion_instructions.format(topic=topic, formatted_str_sections=formatted_str_sections)
intro = llm.invoke([instructions]+[HumanMessage(content=f"Write the report introduction")])
return {"introduction": intro.content}
def write_conclusion(state: ResearchGraphState):
# Full set of sections
sections = state["sections"]
topic = state["topic"]
# Concat all sections together
formatted_str_sections = "\n\n".join([f"{section}" for section in sections])
# Summarize the sections into a final report
instructions = intro_conclusion_instructions.format(topic=topic, formatted_str_sections=formatted_str_sections)
conclusion = llm.invoke([instructions]+[HumanMessage(content=f"Write the report conclusion")])
return {"conclusion": conclusion.content}
def finalize_report(state: ResearchGraphState):
""" The is the "reduce" step where we gather all the sections, combine them, and reflect on them to write the intro/conclusion """
# Save full final report
content = state["content"]
if content.startswith("## Insights"):
content = content.strip("## Insights")
if "## Sources" in content:
try:
content, sources = content.split("\n## Sources\n")
except:
sources = None
else:
sources = None
final_report = state["introduction"] + "\n\n---\n\n" + content + "\n\n---\n\n" + state["conclusion"]
if sources is not None:
final_report += "\n\n## Sources\n" + sources
return {"final_report": final_report}
# Add nodes and edges
builder = StateGraph(ResearchGraphState)
builder.add_node("create_analysts", create_analysts)
builder.add_node("human_feedback", human_feedback)
builder.add_node("conduct_interview", interview_builder.compile())
builder.add_node("write_report",write_report)
builder.add_node("write_introduction",write_introduction)
builder.add_node("write_conclusion",write_conclusion)
builder.add_node("finalize_report",finalize_report)
# Logic
builder.add_edge(START, "create_analysts")
builder.add_edge("create_analysts", "human_feedback")
builder.add_conditional_edges("human_feedback", initiate_all_interviews, ["create_analysts", "conduct_interview"])
builder.add_edge("conduct_interview", "write_report")
builder.add_edge("conduct_interview", "write_introduction")
builder.add_edge("conduct_interview", "write_conclusion")
builder.add_edge(["write_conclusion", "write_report", "write_introduction"], "finalize_report")
builder.add_edge("finalize_report", END)
# Compile
memory = MemorySaver()
graph = builder.compile(interrupt_before=['human_feedback'], checkpointer=memory)
display(Image(graph.get_graph(xray=1).draw_mermaid_png()))# Inputs
max_analysts = 3
topic = "The benefits of adopting LangGraph as an agent framework"
thread = {"configurable": {"thread_id": "1"}}
# Run the graph until the first interruption
for event in graph.stream({"topic":topic,
"max_analysts":max_analysts},
thread,
stream_mode="values"):
analysts = event.get('analysts', '')
if analysts:
for analyst in analysts:
print(f"Name: {analyst.name}")
print(f"Affiliation: {analyst.affiliation}")
print(f"Role: {analyst.role}")
print(f"Description: {analyst.description}")
print("-" * 50) # We now update the state as if we are the human_feedback node
graph.update_state(thread, {"human_analyst_feedback":
"Add in the CEO of gen ai native startup"}, as_node="human_feedback")# Check
for event in graph.stream(None, thread, stream_mode="values"):
analysts = event.get('analysts', '')
if analysts:
for analyst in analysts:
print(f"Name: {analyst.name}")
print(f"Affiliation: {analyst.affiliation}")
print(f"Role: {analyst.role}")
print(f"Description: {analyst.description}")
print("-" * 50) # Confirm we are happy
graph.update_state(thread, {"human_analyst_feedback":
None}, as_node="human_feedback")# Continue
for event in graph.stream(None, thread, stream_mode="updates"):
print("--Node--")
node_name = next(iter(event.keys()))
print(node_name)from IPython.display import Markdown
final_state = graph.get_state(thread)
report = final_state.values.get('final_report')
Markdown(report) |
0 | lc_public_repos/langchain-academy/module-4 | lc_public_repos/langchain-academy/module-4/studio/research_assistant.py | import operator
from pydantic import BaseModel, Field
from typing import Annotated, List
from typing_extensions import TypedDict
from langchain_community.document_loaders import WikipediaLoader
from langchain_community.tools.tavily_search import TavilySearchResults
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage, get_buffer_string
from langchain_openai import ChatOpenAI
from langgraph.constants import Send
from langgraph.graph import END, MessagesState, START, StateGraph
### LLM
llm = ChatOpenAI(model="gpt-4o", temperature=0)
### Schema
class Analyst(BaseModel):
affiliation: str = Field(
description="Primary affiliation of the analyst.",
)
name: str = Field(
description="Name of the analyst."
)
role: str = Field(
description="Role of the analyst in the context of the topic.",
)
description: str = Field(
description="Description of the analyst focus, concerns, and motives.",
)
@property
def persona(self) -> str:
return f"Name: {self.name}\nRole: {self.role}\nAffiliation: {self.affiliation}\nDescription: {self.description}\n"
class Perspectives(BaseModel):
analysts: List[Analyst] = Field(
description="Comprehensive list of analysts with their roles and affiliations.",
)
class GenerateAnalystsState(TypedDict):
topic: str # Research topic
max_analysts: int # Number of analysts
human_analyst_feedback: str # Human feedback
analysts: List[Analyst] # Analyst asking questions
class InterviewState(MessagesState):
max_num_turns: int # Number turns of conversation
context: Annotated[list, operator.add] # Source docs
analyst: Analyst # Analyst asking questions
interview: str # Interview transcript
sections: list # Final key we duplicate in outer state for Send() API
class SearchQuery(BaseModel):
search_query: str = Field(None, description="Search query for retrieval.")
class ResearchGraphState(TypedDict):
topic: str # Research topic
max_analysts: int # Number of analysts
human_analyst_feedback: str # Human feedback
analysts: List[Analyst] # Analyst asking questions
sections: Annotated[list, operator.add] # Send() API key
introduction: str # Introduction for the final report
content: str # Content for the final report
conclusion: str # Conclusion for the final report
final_report: str # Final report
### Nodes and edges
analyst_instructions="""You are tasked with creating a set of AI analyst personas. Follow these instructions carefully:
1. First, review the research topic:
{topic}
2. Examine any editorial feedback that has been optionally provided to guide creation of the analysts:
{human_analyst_feedback}
3. Determine the most interesting themes based upon documents and / or feedback above.
4. Pick the top {max_analysts} themes.
5. Assign one analyst to each theme."""
def create_analysts(state: GenerateAnalystsState):
""" Create analysts """
topic=state['topic']
max_analysts=state['max_analysts']
human_analyst_feedback=state.get('human_analyst_feedback', '')
# Enforce structured output
structured_llm = llm.with_structured_output(Perspectives)
# System message
system_message = analyst_instructions.format(topic=topic,
human_analyst_feedback=human_analyst_feedback,
max_analysts=max_analysts)
# Generate question
analysts = structured_llm.invoke([SystemMessage(content=system_message)]+[HumanMessage(content="Generate the set of analysts.")])
# Write the list of analysis to state
return {"analysts": analysts.analysts}
def human_feedback(state: GenerateAnalystsState):
""" No-op node that should be interrupted on """
pass
# Generate analyst question
question_instructions = """You are an analyst tasked with interviewing an expert to learn about a specific topic.
Your goal is boil down to interesting and specific insights related to your topic.
1. Interesting: Insights that people will find surprising or non-obvious.
2. Specific: Insights that avoid generalities and include specific examples from the expert.
Here is your topic of focus and set of goals: {goals}
Begin by introducing yourself using a name that fits your persona, and then ask your question.
Continue to ask questions to drill down and refine your understanding of the topic.
When you are satisfied with your understanding, complete the interview with: "Thank you so much for your help!"
Remember to stay in character throughout your response, reflecting the persona and goals provided to you."""
def generate_question(state: InterviewState):
""" Node to generate a question """
# Get state
analyst = state["analyst"]
messages = state["messages"]
# Generate question
system_message = question_instructions.format(goals=analyst.persona)
question = llm.invoke([SystemMessage(content=system_message)]+messages)
# Write messages to state
return {"messages": [question]}
# Search query writing
search_instructions = SystemMessage(content=f"""You will be given a conversation between an analyst and an expert.
Your goal is to generate a well-structured query for use in retrieval and / or web-search related to the conversation.
First, analyze the full conversation.
Pay particular attention to the final question posed by the analyst.
Convert this final question into a well-structured web search query""")
def search_web(state: InterviewState):
""" Retrieve docs from web search """
# Search
tavily_search = TavilySearchResults(max_results=3)
# Search query
structured_llm = llm.with_structured_output(SearchQuery)
search_query = structured_llm.invoke([search_instructions]+state['messages'])
# Search
search_docs = tavily_search.invoke(search_query.search_query)
# Format
formatted_search_docs = "\n\n---\n\n".join(
[
f'<Document href="{doc["url"]}"/>\n{doc["content"]}\n</Document>'
for doc in search_docs
]
)
return {"context": [formatted_search_docs]}
def search_wikipedia(state: InterviewState):
""" Retrieve docs from wikipedia """
# Search query
structured_llm = llm.with_structured_output(SearchQuery)
search_query = structured_llm.invoke([search_instructions]+state['messages'])
# Search
search_docs = WikipediaLoader(query=search_query.search_query,
load_max_docs=2).load()
# Format
formatted_search_docs = "\n\n---\n\n".join(
[
f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}\n</Document>'
for doc in search_docs
]
)
return {"context": [formatted_search_docs]}
# Generate expert answer
answer_instructions = """You are an expert being interviewed by an analyst.
Here is analyst area of focus: {goals}.
You goal is to answer a question posed by the interviewer.
To answer question, use this context:
{context}
When answering questions, follow these guidelines:
1. Use only the information provided in the context.
2. Do not introduce external information or make assumptions beyond what is explicitly stated in the context.
3. The context contain sources at the topic of each individual document.
4. Include these sources your answer next to any relevant statements. For example, for source # 1 use [1].
5. List your sources in order at the bottom of your answer. [1] Source 1, [2] Source 2, etc
6. If the source is: <Document source="assistant/docs/llama3_1.pdf" page="7"/>' then just list:
[1] assistant/docs/llama3_1.pdf, page 7
And skip the addition of the brackets as well as the Document source preamble in your citation."""
def generate_answer(state: InterviewState):
""" Node to answer a question """
# Get state
analyst = state["analyst"]
messages = state["messages"]
context = state["context"]
# Answer question
system_message = answer_instructions.format(goals=analyst.persona, context=context)
answer = llm.invoke([SystemMessage(content=system_message)]+messages)
# Name the message as coming from the expert
answer.name = "expert"
# Append it to state
return {"messages": [answer]}
def save_interview(state: InterviewState):
""" Save interviews """
# Get messages
messages = state["messages"]
# Convert interview to a string
interview = get_buffer_string(messages)
# Save to interviews key
return {"interview": interview}
def route_messages(state: InterviewState,
name: str = "expert"):
""" Route between question and answer """
# Get messages
messages = state["messages"]
max_num_turns = state.get('max_num_turns',2)
# Check the number of expert answers
num_responses = len(
[m for m in messages if isinstance(m, AIMessage) and m.name == name]
)
# End if expert has answered more than the max turns
if num_responses >= max_num_turns:
return 'save_interview'
# This router is run after each question - answer pair
# Get the last question asked to check if it signals the end of discussion
last_question = messages[-2]
if "Thank you so much for your help" in last_question.content:
return 'save_interview'
return "ask_question"
# Write a summary (section of the final report) of the interview
section_writer_instructions = """You are an expert technical writer.
Your task is to create a short, easily digestible section of a report based on a set of source documents.
1. Analyze the content of the source documents:
- The name of each source document is at the start of the document, with the <Document tag.
2. Create a report structure using markdown formatting:
- Use ## for the section title
- Use ### for sub-section headers
3. Write the report following this structure:
a. Title (## header)
b. Summary (### header)
c. Sources (### header)
4. Make your title engaging based upon the focus area of the analyst:
{focus}
5. For the summary section:
- Set up summary with general background / context related to the focus area of the analyst
- Emphasize what is novel, interesting, or surprising about insights gathered from the interview
- Create a numbered list of source documents, as you use them
- Do not mention the names of interviewers or experts
- Aim for approximately 400 words maximum
- Use numbered sources in your report (e.g., [1], [2]) based on information from source documents
6. In the Sources section:
- Include all sources used in your report
- Provide full links to relevant websites or specific document paths
- Separate each source by a newline. Use two spaces at the end of each line to create a newline in Markdown.
- It will look like:
### Sources
[1] Link or Document name
[2] Link or Document name
7. Be sure to combine sources. For example this is not correct:
[3] https://ai.meta.com/blog/meta-llama-3-1/
[4] https://ai.meta.com/blog/meta-llama-3-1/
There should be no redundant sources. It should simply be:
[3] https://ai.meta.com/blog/meta-llama-3-1/
8. Final review:
- Ensure the report follows the required structure
- Include no preamble before the title of the report
- Check that all guidelines have been followed"""
def write_section(state: InterviewState):
""" Node to write a section """
# Get state
interview = state["interview"]
context = state["context"]
analyst = state["analyst"]
# Write section using either the gathered source docs from interview (context) or the interview itself (interview)
system_message = section_writer_instructions.format(focus=analyst.description)
section = llm.invoke([SystemMessage(content=system_message)]+[HumanMessage(content=f"Use this source to write your section: {context}")])
# Append it to state
return {"sections": [section.content]}
# Add nodes and edges
interview_builder = StateGraph(InterviewState)
interview_builder.add_node("ask_question", generate_question)
interview_builder.add_node("search_web", search_web)
interview_builder.add_node("search_wikipedia", search_wikipedia)
interview_builder.add_node("answer_question", generate_answer)
interview_builder.add_node("save_interview", save_interview)
interview_builder.add_node("write_section", write_section)
# Flow
interview_builder.add_edge(START, "ask_question")
interview_builder.add_edge("ask_question", "search_web")
interview_builder.add_edge("ask_question", "search_wikipedia")
interview_builder.add_edge("search_web", "answer_question")
interview_builder.add_edge("search_wikipedia", "answer_question")
interview_builder.add_conditional_edges("answer_question", route_messages,['ask_question','save_interview'])
interview_builder.add_edge("save_interview", "write_section")
interview_builder.add_edge("write_section", END)
def initiate_all_interviews(state: ResearchGraphState):
""" Conditional edge to initiate all interviews via Send() API or return to create_analysts """
# Check if human feedback
human_analyst_feedback=state.get('human_analyst_feedback','approve')
if human_analyst_feedback.lower() != 'approve':
# Return to create_analysts
return "create_analysts"
# Otherwise kick off interviews in parallel via Send() API
else:
topic = state["topic"]
return [Send("conduct_interview", {"analyst": analyst,
"messages": [HumanMessage(
content=f"So you said you were writing an article on {topic}?"
)
]}) for analyst in state["analysts"]]
# Write a report based on the interviews
report_writer_instructions = """You are a technical writer creating a report on this overall topic:
{topic}
You have a team of analysts. Each analyst has done two things:
1. They conducted an interview with an expert on a specific sub-topic.
2. They write up their finding into a memo.
Your task:
1. You will be given a collection of memos from your analysts.
2. Think carefully about the insights from each memo.
3. Consolidate these into a crisp overall summary that ties together the central ideas from all of the memos.
4. Summarize the central points in each memo into a cohesive single narrative.
To format your report:
1. Use markdown formatting.
2. Include no pre-amble for the report.
3. Use no sub-heading.
4. Start your report with a single title header: ## Insights
5. Do not mention any analyst names in your report.
6. Preserve any citations in the memos, which will be annotated in brackets, for example [1] or [2].
7. Create a final, consolidated list of sources and add to a Sources section with the `## Sources` header.
8. List your sources in order and do not repeat.
[1] Source 1
[2] Source 2
Here are the memos from your analysts to build your report from:
{context}"""
def write_report(state: ResearchGraphState):
""" Node to write the final report body """
# Full set of sections
sections = state["sections"]
topic = state["topic"]
# Concat all sections together
formatted_str_sections = "\n\n".join([f"{section}" for section in sections])
# Summarize the sections into a final report
system_message = report_writer_instructions.format(topic=topic, context=formatted_str_sections)
report = llm.invoke([SystemMessage(content=system_message)]+[HumanMessage(content=f"Write a report based upon these memos.")])
return {"content": report.content}
# Write the introduction or conclusion
intro_conclusion_instructions = """You are a technical writer finishing a report on {topic}
You will be given all of the sections of the report.
You job is to write a crisp and compelling introduction or conclusion section.
The user will instruct you whether to write the introduction or conclusion.
Include no pre-amble for either section.
Target around 100 words, crisply previewing (for introduction) or recapping (for conclusion) all of the sections of the report.
Use markdown formatting.
For your introduction, create a compelling title and use the # header for the title.
For your introduction, use ## Introduction as the section header.
For your conclusion, use ## Conclusion as the section header.
Here are the sections to reflect on for writing: {formatted_str_sections}"""
def write_introduction(state: ResearchGraphState):
""" Node to write the introduction """
# Full set of sections
sections = state["sections"]
topic = state["topic"]
# Concat all sections together
formatted_str_sections = "\n\n".join([f"{section}" for section in sections])
# Summarize the sections into a final report
instructions = intro_conclusion_instructions.format(topic=topic, formatted_str_sections=formatted_str_sections)
intro = llm.invoke([instructions]+[HumanMessage(content=f"Write the report introduction")])
return {"introduction": intro.content}
def write_conclusion(state: ResearchGraphState):
""" Node to write the conclusion """
# Full set of sections
sections = state["sections"]
topic = state["topic"]
# Concat all sections together
formatted_str_sections = "\n\n".join([f"{section}" for section in sections])
# Summarize the sections into a final report
instructions = intro_conclusion_instructions.format(topic=topic, formatted_str_sections=formatted_str_sections)
conclusion = llm.invoke([instructions]+[HumanMessage(content=f"Write the report conclusion")])
return {"conclusion": conclusion.content}
def finalize_report(state: ResearchGraphState):
""" The is the "reduce" step where we gather all the sections, combine them, and reflect on them to write the intro/conclusion """
# Save full final report
content = state["content"]
if content.startswith("## Insights"):
content = content.strip("## Insights")
if "## Sources" in content:
try:
content, sources = content.split("\n## Sources\n")
except:
sources = None
else:
sources = None
final_report = state["introduction"] + "\n\n---\n\n" + content + "\n\n---\n\n" + state["conclusion"]
if sources is not None:
final_report += "\n\n## Sources\n" + sources
return {"final_report": final_report}
# Add nodes and edges
builder = StateGraph(ResearchGraphState)
builder.add_node("create_analysts", create_analysts)
builder.add_node("human_feedback", human_feedback)
builder.add_node("conduct_interview", interview_builder.compile())
builder.add_node("write_report",write_report)
builder.add_node("write_introduction",write_introduction)
builder.add_node("write_conclusion",write_conclusion)
builder.add_node("finalize_report",finalize_report)
# Logic
builder.add_edge(START, "create_analysts")
builder.add_edge("create_analysts", "human_feedback")
builder.add_conditional_edges("human_feedback", initiate_all_interviews, ["create_analysts", "conduct_interview"])
builder.add_edge("conduct_interview", "write_report")
builder.add_edge("conduct_interview", "write_introduction")
builder.add_edge("conduct_interview", "write_conclusion")
builder.add_edge(["write_conclusion", "write_report", "write_introduction"], "finalize_report")
builder.add_edge("finalize_report", END)
# Compile
graph = builder.compile(interrupt_before=['human_feedback']) |
0 | lc_public_repos/langchain-academy/module-4 | lc_public_repos/langchain-academy/module-4/studio/requirements.txt | langgraph
langchain-core
langchain-community
langchain-openai
tavily-python
wikipedia |
0 | lc_public_repos/langchain-academy/module-4 | lc_public_repos/langchain-academy/module-4/studio/sub_graphs.py | from operator import add
from typing import List, Optional, Annotated
from typing_extensions import TypedDict
from langgraph.graph import StateGraph, START, END
# The structure of the logs
class Log(TypedDict):
id: str
question: str
docs: Optional[List]
answer: str
grade: Optional[int]
grader: Optional[str]
feedback: Optional[str]
# Failure Analysis Sub-graph
class FailureAnalysisState(TypedDict):
cleaned_logs: List[Log]
failures: List[Log]
fa_summary: str
processed_logs: List[str]
class FailureAnalysisOutputState(TypedDict):
fa_summary: str
processed_logs: List[str]
def get_failures(state):
""" Get logs that contain a failure """
cleaned_logs = state["cleaned_logs"]
failures = [log for log in cleaned_logs if "grade" in log]
return {"failures": failures}
def generate_summary(state):
""" Generate summary of failures """
failures = state["failures"]
# Add fxn: fa_summary = summarize(failures)
fa_summary = "Poor quality retrieval of Chroma documentation."
return {"fa_summary": fa_summary, "processed_logs": [f"failure-analysis-on-log-{failure['id']}" for failure in failures]}
fa_builder = StateGraph(input=FailureAnalysisState,output=FailureAnalysisOutputState)
fa_builder.add_node("get_failures", get_failures)
fa_builder.add_node("generate_summary", generate_summary)
fa_builder.add_edge(START, "get_failures")
fa_builder.add_edge("get_failures", "generate_summary")
fa_builder.add_edge("generate_summary", END)
# Summarization subgraph
class QuestionSummarizationState(TypedDict):
cleaned_logs: List[Log]
qs_summary: str
report: str
processed_logs: List[str]
class QuestionSummarizationOutputState(TypedDict):
report: str
processed_logs: List[str]
def generate_summary(state):
cleaned_logs = state["cleaned_logs"]
# Add fxn: summary = summarize(generate_summary)
summary = "Questions focused on usage of ChatOllama and Chroma vector store."
return {"qs_summary": summary, "processed_logs": [f"summary-on-log-{log['id']}" for log in cleaned_logs]}
def send_to_slack(state):
qs_summary = state["qs_summary"]
# Add fxn: report = report_generation(qs_summary)
report = "foo bar baz"
return {"report": report}
qs_builder = StateGraph(input=QuestionSummarizationState,output=QuestionSummarizationOutputState)
qs_builder.add_node("generate_summary", generate_summary)
qs_builder.add_node("send_to_slack", send_to_slack)
qs_builder.add_edge(START, "generate_summary")
qs_builder.add_edge("generate_summary", "send_to_slack")
qs_builder.add_edge("send_to_slack", END)
# Entry Graph
class EntryGraphState(TypedDict):
raw_logs: List[Log]
cleaned_logs: List[Log]
fa_summary: str # This will only be generated in the FA sub-graph
report: str # This will only be generated in the QS sub-graph
processed_logs: Annotated[List[int], add] # This will be generated in BOTH sub-graphs
def clean_logs(state):
# Get logs
raw_logs = state["raw_logs"]
# Data cleaning raw_logs -> docs
cleaned_logs = raw_logs
return {"cleaned_logs": cleaned_logs}
entry_builder = StateGraph(EntryGraphState)
entry_builder.add_node("clean_logs", clean_logs)
entry_builder.add_node("question_summarization", qs_builder.compile())
entry_builder.add_node("failure_analysis", fa_builder.compile())
entry_builder.add_edge(START, "clean_logs")
entry_builder.add_edge("clean_logs", "failure_analysis")
entry_builder.add_edge("clean_logs", "question_summarization")
entry_builder.add_edge("failure_analysis", END)
entry_builder.add_edge("question_summarization", END)
graph = entry_builder.compile() |
0 | lc_public_repos/langchain-academy/module-4 | lc_public_repos/langchain-academy/module-4/studio/.env.example | OPENAI_API_KEY=sk-xxx
TAVILY_API_KEY="tvly-xxxx" |
0 | lc_public_repos/langchain-academy/module-4 | lc_public_repos/langchain-academy/module-4/studio/map_reduce.py | import operator
from typing import Annotated
from typing_extensions import TypedDict
from pydantic import BaseModel
from langchain_openai import ChatOpenAI
from langgraph.constants import Send
from langgraph.graph import END, StateGraph, START
# Prompts we will use
subjects_prompt = """Generate a list of 3 sub-topics that are all related to this overall topic: {topic}."""
joke_prompt = """Generate a joke about {subject}"""
best_joke_prompt = """Below are a bunch of jokes about {topic}. Select the best one! Return the ID of the best one, starting 0 as the ID for the first joke. Jokes: \n\n {jokes}"""
# LLM
model = ChatOpenAI(model="gpt-4o", temperature=0)
# Define the state
class Subjects(BaseModel):
subjects: list[str]
class BestJoke(BaseModel):
id: int
class OverallState(TypedDict):
topic: str
subjects: list
jokes: Annotated[list, operator.add]
best_selected_joke: str
def generate_topics(state: OverallState):
prompt = subjects_prompt.format(topic=state["topic"])
response = model.with_structured_output(Subjects).invoke(prompt)
return {"subjects": response.subjects}
class JokeState(TypedDict):
subject: str
class Joke(BaseModel):
joke: str
def generate_joke(state: JokeState):
prompt = joke_prompt.format(subject=state["subject"])
response = model.with_structured_output(Joke).invoke(prompt)
return {"jokes": [response.joke]}
def best_joke(state: OverallState):
jokes = "\n\n".join(state["jokes"])
prompt = best_joke_prompt.format(topic=state["topic"], jokes=jokes)
response = model.with_structured_output(BestJoke).invoke(prompt)
return {"best_selected_joke": state["jokes"][response.id]}
def continue_to_jokes(state: OverallState):
return [Send("generate_joke", {"subject": s}) for s in state["subjects"]]
# Construct the graph: here we put everything together to construct our graph
graph_builder = StateGraph(OverallState)
graph_builder.add_node("generate_topics", generate_topics)
graph_builder.add_node("generate_joke", generate_joke)
graph_builder.add_node("best_joke", best_joke)
graph_builder.add_edge(START, "generate_topics")
graph_builder.add_conditional_edges("generate_topics", continue_to_jokes, ["generate_joke"])
graph_builder.add_edge("generate_joke", "best_joke")
graph_builder.add_edge("best_joke", END)
# Compile the graph
graph = graph_builder.compile()
|
0 | lc_public_repos/langchain-academy/module-4 | lc_public_repos/langchain-academy/module-4/studio/langgraph.json | {
"dockerfile_lines": [],
"graphs": {
"parallelization": "./parallelization.py:graph",
"sub_graphs": "./sub_graphs.py:graph",
"map_reduce": "./map_reduce.py:graph",
"research_assistant": "./research_assistant.py:graph"
},
"env": "./.env",
"python_version": "3.11",
"dependencies": [
"."
]
} |
0 | lc_public_repos/langchain-academy/module-4 | lc_public_repos/langchain-academy/module-4/studio/parallelization.py | import operator
from typing import Annotated
from typing_extensions import TypedDict
from langchain_core.documents import Document
from langchain_core.messages import HumanMessage, SystemMessage
from langchain_community.document_loaders import WikipediaLoader
from langchain_community.tools import TavilySearchResults
from langchain_openai import ChatOpenAI
from langgraph.graph import StateGraph, START, END
llm = ChatOpenAI(model="gpt-4o", temperature=0)
class State(TypedDict):
question: str
answer: str
context: Annotated[list, operator.add]
def search_web(state):
""" Retrieve docs from web search """
# Search
tavily_search = TavilySearchResults(max_results=3)
search_docs = tavily_search.invoke(state['question'])
# Format
formatted_search_docs = "\n\n---\n\n".join(
[
f'<Document href="{doc["url"]}"/>\n{doc["content"]}\n</Document>'
for doc in search_docs
]
)
return {"context": [formatted_search_docs]}
def search_wikipedia(state):
""" Retrieve docs from wikipedia """
# Search
search_docs = WikipediaLoader(query=state['question'],
load_max_docs=2).load()
# Format
formatted_search_docs = "\n\n---\n\n".join(
[
f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}\n</Document>'
for doc in search_docs
]
)
return {"context": [formatted_search_docs]}
def generate_answer(state):
""" Node to answer a question """
# Get state
context = state["context"]
question = state["question"]
# Template
answer_template = """Answer the question {question} using this context: {context}"""
answer_instructions = answer_template.format(question=question,
context=context)
# Answer
answer = llm.invoke([SystemMessage(content=answer_instructions)]+[HumanMessage(content=f"Answer the question.")])
# Append it to state
return {"answer": answer}
# Add nodes
builder = StateGraph(State)
# Initialize each node with node_secret
builder.add_node("search_web",search_web)
builder.add_node("search_wikipedia", search_wikipedia)
builder.add_node("generate_answer", generate_answer)
# Flow
builder.add_edge(START, "search_wikipedia")
builder.add_edge(START, "search_web")
builder.add_edge("search_wikipedia", "generate_answer")
builder.add_edge("search_web", "generate_answer")
builder.add_edge("generate_answer", END)
graph = builder.compile()
|
0 | lc_public_repos/langchain-academy | lc_public_repos/langchain-academy/module-6/connecting.ipynb | from langgraph_sdk import get_client
# Connect via SDK
url_for_cli_deployment = "http://localhost:8123"
client = get_client(url=url_for_cli_deployment)from langgraph.pregel.remote import RemoteGraph
from langchain_core.messages import convert_to_messages
from langchain_core.messages import HumanMessage, SystemMessage
# Connect via remote graph
url = "http://localhost:8123"
graph_name = "task_maistro"
remote_graph = RemoteGraph(graph_name, url=url)# Create a thread
thread = await client.threads.create()
thread# Check any existing runs on a thread
thread = await client.threads.create()
runs = await client.runs.list(thread["thread_id"])
print(runs)# Ensure we've created some ToDos and saved them to my user_id
user_input = "Add a ToDo to finish booking travel to Hong Kong by end of next week. Also, add a ToDo to call parents back about Thanksgiving plans."
config = {"configurable": {"user_id": "Test"}}
graph_name = "task_maistro"
run = await client.runs.create(thread["thread_id"], graph_name, input={"messages": [HumanMessage(content=user_input)]}, config=config)# Kick off a new thread and a new run
thread = await client.threads.create()
user_input = "Give me a summary of all ToDos."
config = {"configurable": {"user_id": "Test"}}
graph_name = "task_maistro"
run = await client.runs.create(thread["thread_id"], graph_name, input={"messages": [HumanMessage(content=user_input)]}, config=config)# Check the run status
print(await client.runs.get(thread["thread_id"], run["run_id"]))# Wait until the run completes
await client.runs.join(thread["thread_id"], run["run_id"])
print(await client.runs.get(thread["thread_id"], run["run_id"]))user_input = "What ToDo should I focus on first."
async for chunk in client.runs.stream(thread["thread_id"],
graph_name,
input={"messages": [HumanMessage(content=user_input)]},
config=config,
stream_mode="messages-tuple"):
if chunk.event == "messages":
print("".join(data_item['content'] for data_item in chunk.data if 'content' in data_item), end="", flush=True)thread_state = await client.threads.get_state(thread['thread_id'])
for m in convert_to_messages(thread_state['values']['messages']):
m.pretty_print()# Copy the thread
copied_thread = await client.threads.copy(thread['thread_id'])# Check the state of the copied thread
copied_thread_state = await client.threads.get_state(copied_thread['thread_id'])
for m in convert_to_messages(copied_thread_state['values']['messages']):
m.pretty_print()# Get the history of the thread
states = await client.threads.get_history(thread['thread_id'])
# Pick a state update to fork
to_fork = states[-2]
to_fork['values']to_fork['values']['messages'][0]['id']to_fork['next']to_fork['checkpoint_id']forked_input = {"messages": HumanMessage(content="Give me a summary of all ToDos that need to be done in the next week.",
id=to_fork['values']['messages'][0]['id'])}
# Update the state, creating a new checkpoint in the thread
forked_config = await client.threads.update_state(
thread["thread_id"],
forked_input,
checkpoint_id=to_fork['checkpoint_id']
)# Run the graph from the new checkpoint in the thread
async for chunk in client.runs.stream(thread["thread_id"],
graph_name,
input=None,
config=config,
checkpoint_id=forked_config['checkpoint_id'],
stream_mode="messages-tuple"):
if chunk.event == "messages":
print("".join(data_item['content'] for data_item in chunk.data if 'content' in data_item), end="", flush=True)items = await client.store.search_items(
("todo", "general", "Test"),
limit=5,
offset=0
)
items['items']from uuid import uuid4
await client.store.put_item(
("testing", "Test"),
key=str(uuid4()),
value={"todo": "Test SDK put_item"},
)items = await client.store.search_items(
("testing", "Test"),
limit=5,
offset=0
)
items['items'][item['key'] for item in items['items']]await client.store.delete_item(
("testing", "Test"),
key='3de441ba-8c79-4beb-8f52-00e4dcba68d4',
)items = await client.store.search_items(
("testing", "Test"),
limit=5,
offset=0
)
items['items'] |
0 | lc_public_repos/langchain-academy | lc_public_repos/langchain-academy/module-6/assistant.ipynb | from langgraph_sdk import get_client
url_for_cli_deployment = "http://localhost:8123"
client = get_client(url=url_for_cli_deployment)personal_assistant = await client.assistants.create(
# "task_maistro" is the name of a graph we deployed
"task_maistro",
config={"configurable": {"todo_category": "personal"}}
)
print(personal_assistant)task_maistro_role = """You are a friendly and organized personal task assistant. Your main focus is helping users stay on top of their personal tasks and commitments. Specifically:
- Help track and organize personal tasks
- When providing a 'todo summary':
1. List all current tasks grouped by deadline (overdue, today, this week, future)
2. Highlight any tasks missing deadlines and gently encourage adding them
3. Note any tasks that seem important but lack time estimates
- Proactively ask for deadlines when new tasks are added without them
- Maintain a supportive tone while helping the user stay accountable
- Help prioritize tasks based on deadlines and importance
Your communication style should be encouraging and helpful, never judgmental.
When tasks are missing deadlines, respond with something like "I notice [task] doesn't have a deadline yet. Would you like to add one to help us track it better?"""
configurations = {"todo_category": "personal",
"user_id": "lance",
"task_maistro_role": task_maistro_role}
personal_assistant = await client.assistants.update(
personal_assistant["assistant_id"],
config={"configurable": configurations}
)
print(personal_assistant)task_maistro_role = """You are a focused and efficient work task assistant.
Your main focus is helping users manage their work commitments with realistic timeframes.
Specifically:
- Help track and organize work tasks
- When providing a 'todo summary':
1. List all current tasks grouped by deadline (overdue, today, this week, future)
2. Highlight any tasks missing deadlines and gently encourage adding them
3. Note any tasks that seem important but lack time estimates
- When discussing new tasks, suggest that the user provide realistic time-frames based on task type:
• Developer Relations features: typically 1 day
• Course lesson reviews/feedback: typically 2 days
• Documentation sprints: typically 3 days
- Help prioritize tasks based on deadlines and team dependencies
- Maintain a professional tone while helping the user stay accountable
Your communication style should be supportive but practical.
When tasks are missing deadlines, respond with something like "I notice [task] doesn't have a deadline yet. Based on similar tasks, this might take [suggested timeframe]. Would you like to set a deadline with this in mind?"""
configurations = {"todo_category": "work",
"user_id": "lance",
"task_maistro_role": task_maistro_role}
work_assistant = await client.assistants.create(
# "task_maistro" is the name of a graph we deployed
"task_maistro",
config={"configurable": configurations}
)
print(work_assistant)assistants = await client.assistants.search()
for assistant in assistants:
print({
'assistant_id': assistant['assistant_id'],
'version': assistant['version'],
'config': assistant['config']
})await client.assistants.delete("assistant_id")work_assistant_id = assistants[0]['assistant_id']
personal_assistant_id = assistants[1]['assistant_id']from langchain_core.messages import HumanMessage
from langchain_core.messages import convert_to_messages
user_input = "Create or update few ToDos: 1) Re-film Module 6, lesson 5 by end of day today. 2) Update audioUX by next Monday."
thread = await client.threads.create()
async for chunk in client.runs.stream(thread["thread_id"],
work_assistant_id,
input={"messages": [HumanMessage(content=user_input)]},
stream_mode="values"):
if chunk.event == 'values':
state = chunk.data
convert_to_messages(state["messages"])[-1].pretty_print()user_input = "Create another ToDo: Finalize set of report generation tutorials."
thread = await client.threads.create()
async for chunk in client.runs.stream(thread["thread_id"],
work_assistant_id,
input={"messages": [HumanMessage(content=user_input)]},
stream_mode="values"):
if chunk.event == 'values':
state = chunk.data
convert_to_messages(state["messages"])[-1].pretty_print()user_input = "OK, for this task let's get it done by next Tuesday."
async for chunk in client.runs.stream(thread["thread_id"],
work_assistant_id,
input={"messages": [HumanMessage(content=user_input)]},
stream_mode="values"):
if chunk.event == 'values':
state = chunk.data
convert_to_messages(state["messages"])[-1].pretty_print()user_input = "Create ToDos: 1) Check on swim lessons for the baby this weekend. 2) For winter travel, check AmEx points."
thread = await client.threads.create()
async for chunk in client.runs.stream(thread["thread_id"],
personal_assistant_id,
input={"messages": [HumanMessage(content=user_input)]},
stream_mode="values"):
if chunk.event == 'values':
state = chunk.data
convert_to_messages(state["messages"])[-1].pretty_print()user_input = "Give me a todo summary."
thread = await client.threads.create()
async for chunk in client.runs.stream(thread["thread_id"],
personal_assistant_id,
input={"messages": [HumanMessage(content=user_input)]},
stream_mode="values"):
if chunk.event == 'values':
state = chunk.data
convert_to_messages(state["messages"])[-1].pretty_print() |
0 | lc_public_repos/langchain-academy | lc_public_repos/langchain-academy/module-6/double-texting.ipynb | from langgraph_sdk import get_client
url_for_cli_deployment = "http://localhost:8123"
client = get_client(url=url_for_cli_deployment)import httpx
from langchain_core.messages import HumanMessage
# Create a thread
thread = await client.threads.create()
# Create to dos
user_input_1 = "Add a ToDo to follow-up with DI Repairs."
user_input_2 = "Add a ToDo to mount dresser to the wall."
config = {"configurable": {"user_id": "Test-Double-Texting"}}
graph_name = "task_maistro"
run = await client.runs.create(
thread["thread_id"],
graph_name,
input={"messages": [HumanMessage(content=user_input_1)]},
config=config,
)
try:
await client.runs.create(
thread["thread_id"],
graph_name,
input={"messages": [HumanMessage(content=user_input_2)]},
config=config,
multitask_strategy="reject",
)
except httpx.HTTPStatusError as e:
print("Failed to start concurrent run", e)from langchain_core.messages import convert_to_messages
# Wait until the original run completes
await client.runs.join(thread["thread_id"], run["run_id"])
# Get the state of the thread
state = await client.threads.get_state(thread["thread_id"])
for m in convert_to_messages(state["values"]["messages"]):
m.pretty_print()# Create a new thread
thread = await client.threads.create()
# Create new ToDos
user_input_1 = "Send Erik his t-shirt gift this weekend."
user_input_2 = "Get cash and pay nanny for 2 weeks. Do this by Friday."
config = {"configurable": {"user_id": "Test-Double-Texting"}}
graph_name = "task_maistro"
first_run = await client.runs.create(
thread["thread_id"],
graph_name,
input={"messages": [HumanMessage(content=user_input_1)]},
config=config,
)
second_run = await client.runs.create(
thread["thread_id"],
graph_name,
input={"messages": [HumanMessage(content=user_input_2)]},
config=config,
multitask_strategy="enqueue",
)
# Wait until the second run completes
await client.runs.join(thread["thread_id"], second_run["run_id"])
# Get the state of the thread
state = await client.threads.get_state(thread["thread_id"])
for m in convert_to_messages(state["values"]["messages"]):
m.pretty_print()import asyncio
# Create a new thread
thread = await client.threads.create()
# Create new ToDos
user_input_1 = "Give me a summary of my ToDos due tomrrow."
user_input_2 = "Never mind, create a ToDo to Order Ham for Thanksgiving by next Friday."
config = {"configurable": {"user_id": "Test-Double-Texting"}}
graph_name = "task_maistro"
interrupted_run = await client.runs.create(
thread["thread_id"],
graph_name,
input={"messages": [HumanMessage(content=user_input_1)]},
config=config,
)
# Wait for some of run 1 to complete so that we can see it in the thread
await asyncio.sleep(1)
second_run = await client.runs.create(
thread["thread_id"],
graph_name,
input={"messages": [HumanMessage(content=user_input_2)]},
config=config,
multitask_strategy="interrupt",
)
# Wait until the second run completes
await client.runs.join(thread["thread_id"], second_run["run_id"])
# Get the state of the thread
state = await client.threads.get_state(thread["thread_id"])
for m in convert_to_messages(state["values"]["messages"]):
m.pretty_print()# Confirm that the first run was interrupted
print((await client.runs.get(thread["thread_id"], interrupted_run["run_id"]))["status"])# Create a new thread
thread = await client.threads.create()
# Create new ToDos
user_input_1 = "Add a ToDo to call to make appointment at Yoga."
user_input_2 = "Actually, add a ToDo to drop by Yoga in person on Sunday."
config = {"configurable": {"user_id": "Test-Double-Texting"}}
graph_name = "task_maistro"
rolled_back_run = await client.runs.create(
thread["thread_id"],
graph_name,
input={"messages": [HumanMessage(content=user_input_1)]},
config=config,
)
second_run = await client.runs.create(
thread["thread_id"],
graph_name,
input={"messages": [HumanMessage(content=user_input_2)]},
config=config,
multitask_strategy="rollback",
)
# Wait until the second run completes
await client.runs.join(thread["thread_id"], second_run["run_id"])
# Get the state of the thread
state = await client.threads.get_state(thread["thread_id"])
for m in convert_to_messages(state["values"]["messages"]):
m.pretty_print()# Confirm that the original run was deleted
try:
await client.runs.get(thread["thread_id"], rolled_back_run["run_id"])
except httpx.HTTPStatusError as _:
print("Original run was correctly deleted") |
0 | lc_public_repos/langchain-academy/module-6 | lc_public_repos/langchain-academy/module-6/deployment/docker-compose-example.yml | volumes:
langgraph-data:
driver: local
services:
langgraph-redis:
image: redis:6
healthcheck:
test: redis-cli ping
interval: 5s
timeout: 1s
retries: 5
langgraph-postgres:
image: postgres:16
ports:
- "5433:5432"
environment:
POSTGRES_DB: postgres
POSTGRES_USER: postgres
POSTGRES_PASSWORD: postgres
volumes:
- langgraph-data:/var/lib/postgresql/data
healthcheck:
test: pg_isready -U postgres
start_period: 10s
timeout: 1s
retries: 5
interval: 5s
langgraph-api:
image: "my-image"
ports:
- "8123:8000"
depends_on:
langgraph-redis:
condition: service_healthy
langgraph-postgres:
condition: service_healthy
environment:
REDIS_URI: redis://langgraph-redis:6379
OPENAI_API_KEY: "xxx"
LANGSMITH_API_KEY: "xxx"
POSTGRES_URI: postgres://postgres:postgres@langgraph-postgres:5432/postgres?sslmode=disable
|
0 | lc_public_repos/langchain-academy/module-6 | lc_public_repos/langchain-academy/module-6/deployment/configuration.py | import os
from dataclasses import dataclass, field, fields
from typing import Any, Optional
from langchain_core.runnables import RunnableConfig
from typing_extensions import Annotated
from dataclasses import dataclass
@dataclass(kw_only=True)
class Configuration:
"""The configurable fields for the chatbot."""
user_id: str = "default-user"
todo_category: str = "general"
task_maistro_role: str = "You are a helpful task management assistant. You help you create, organize, and manage the user's ToDo list."
@classmethod
def from_runnable_config(
cls, config: Optional[RunnableConfig] = None
) -> "Configuration":
"""Create a Configuration instance from a RunnableConfig."""
configurable = (
config["configurable"] if config and "configurable" in config else {}
)
values: dict[str, Any] = {
f.name: os.environ.get(f.name.upper(), configurable.get(f.name))
for f in fields(cls)
if f.init
}
return cls(**{k: v for k, v in values.items() if v}) |
0 | lc_public_repos/langchain-academy/module-6 | lc_public_repos/langchain-academy/module-6/deployment/requirements.txt | langgraph
langchain-core
langchain-community
langchain-openai
trustcall |
0 | lc_public_repos/langchain-academy/module-6 | lc_public_repos/langchain-academy/module-6/deployment/task_maistro.py | import uuid
from datetime import datetime
from pydantic import BaseModel, Field
from trustcall import create_extractor
from typing import Literal, Optional, TypedDict
from langchain_core.runnables import RunnableConfig
from langchain_core.messages import merge_message_runs
from langchain_core.messages import SystemMessage, HumanMessage
from langchain_openai import ChatOpenAI
from langgraph.checkpoint.memory import MemorySaver
from langgraph.graph import StateGraph, MessagesState, START, END
from langgraph.store.base import BaseStore
from langgraph.store.memory import InMemoryStore
import configuration
## Utilities
# Inspect the tool calls for Trustcall
class Spy:
def __init__(self):
self.called_tools = []
def __call__(self, run):
q = [run]
while q:
r = q.pop()
if r.child_runs:
q.extend(r.child_runs)
if r.run_type == "chat_model":
self.called_tools.append(
r.outputs["generations"][0][0]["message"]["kwargs"]["tool_calls"]
)
# Extract information from tool calls for both patches and new memories in Trustcall
def extract_tool_info(tool_calls, schema_name="Memory"):
"""Extract information from tool calls for both patches and new memories.
Args:
tool_calls: List of tool calls from the model
schema_name: Name of the schema tool (e.g., "Memory", "ToDo", "Profile")
"""
# Initialize list of changes
changes = []
for call_group in tool_calls:
for call in call_group:
if call['name'] == 'PatchDoc':
# Check if there are any patches
if call['args']['patches']:
changes.append({
'type': 'update',
'doc_id': call['args']['json_doc_id'],
'planned_edits': call['args']['planned_edits'],
'value': call['args']['patches'][0]['value']
})
else:
# Handle case where no changes were needed
changes.append({
'type': 'no_update',
'doc_id': call['args']['json_doc_id'],
'planned_edits': call['args']['planned_edits']
})
elif call['name'] == schema_name:
changes.append({
'type': 'new',
'value': call['args']
})
# Format results as a single string
result_parts = []
for change in changes:
if change['type'] == 'update':
result_parts.append(
f"Document {change['doc_id']} updated:\n"
f"Plan: {change['planned_edits']}\n"
f"Added content: {change['value']}"
)
elif change['type'] == 'no_update':
result_parts.append(
f"Document {change['doc_id']} unchanged:\n"
f"{change['planned_edits']}"
)
else:
result_parts.append(
f"New {schema_name} created:\n"
f"Content: {change['value']}"
)
return "\n\n".join(result_parts)
## Schema definitions
# User profile schema
class Profile(BaseModel):
"""This is the profile of the user you are chatting with"""
name: Optional[str] = Field(description="The user's name", default=None)
location: Optional[str] = Field(description="The user's location", default=None)
job: Optional[str] = Field(description="The user's job", default=None)
connections: list[str] = Field(
description="Personal connection of the user, such as family members, friends, or coworkers",
default_factory=list
)
interests: list[str] = Field(
description="Interests that the user has",
default_factory=list
)
# ToDo schema
class ToDo(BaseModel):
task: str = Field(description="The task to be completed.")
time_to_complete: Optional[int] = Field(description="Estimated time to complete the task (minutes).")
deadline: Optional[datetime] = Field(
description="When the task needs to be completed by (if applicable)",
default=None
)
solutions: list[str] = Field(
description="List of specific, actionable solutions (e.g., specific ideas, service providers, or concrete options relevant to completing the task)",
min_items=1,
default_factory=list
)
status: Literal["not started", "in progress", "done", "archived"] = Field(
description="Current status of the task",
default="not started"
)
## Initialize the model and tools
# Update memory tool
class UpdateMemory(TypedDict):
""" Decision on what memory type to update """
update_type: Literal['user', 'todo', 'instructions']
# Initialize the model
model = ChatOpenAI(model="gpt-4o", temperature=0)
## Create the Trustcall extractors for updating the user profile and ToDo list
profile_extractor = create_extractor(
model,
tools=[Profile],
tool_choice="Profile",
)
## Prompts
# Chatbot instruction for choosing what to update and what tools to call
MODEL_SYSTEM_MESSAGE = """{task_maistro_role}
You have a long term memory which keeps track of three things:
1. The user's profile (general information about them)
2. The user's ToDo list
3. General instructions for updating the ToDo list
Here is the current User Profile (may be empty if no information has been collected yet):
<user_profile>
{user_profile}
</user_profile>
Here is the current ToDo List (may be empty if no tasks have been added yet):
<todo>
{todo}
</todo>
Here are the current user-specified preferences for updating the ToDo list (may be empty if no preferences have been specified yet):
<instructions>
{instructions}
</instructions>
Here are your instructions for reasoning about the user's messages:
1. Reason carefully about the user's messages as presented below.
2. Decide whether any of the your long-term memory should be updated:
- If personal information was provided about the user, update the user's profile by calling UpdateMemory tool with type `user`
- If tasks are mentioned, update the ToDo list by calling UpdateMemory tool with type `todo`
- If the user has specified preferences for how to update the ToDo list, update the instructions by calling UpdateMemory tool with type `instructions`
3. Tell the user that you have updated your memory, if appropriate:
- Do not tell the user you have updated the user's profile
- Tell the user them when you update the todo list
- Do not tell the user that you have updated instructions
4. Err on the side of updating the todo list. No need to ask for explicit permission.
5. Respond naturally to user user after a tool call was made to save memories, or if no tool call was made."""
# Trustcall instruction
TRUSTCALL_INSTRUCTION = """Reflect on following interaction.
Use the provided tools to retain any necessary memories about the user.
Use parallel tool calling to handle updates and insertions simultaneously.
System Time: {time}"""
# Instructions for updating the ToDo list
CREATE_INSTRUCTIONS = """Reflect on the following interaction.
Based on this interaction, update your instructions for how to update ToDo list items. Use any feedback from the user to update how they like to have items added, etc.
Your current instructions are:
<current_instructions>
{current_instructions}
</current_instructions>"""
## Node definitions
def task_mAIstro(state: MessagesState, config: RunnableConfig, store: BaseStore):
"""Load memories from the store and use them to personalize the chatbot's response."""
# Get the user ID from the config
configurable = configuration.Configuration.from_runnable_config(config)
user_id = configurable.user_id
todo_category = configurable.todo_category
task_maistro_role = configurable.task_maistro_role
# Retrieve profile memory from the store
namespace = ("profile", todo_category, user_id)
memories = store.search(namespace)
if memories:
user_profile = memories[0].value
else:
user_profile = None
# Retrieve people memory from the store
namespace = ("todo", todo_category, user_id)
memories = store.search(namespace)
todo = "\n".join(f"{mem.value}" for mem in memories)
# Retrieve custom instructions
namespace = ("instructions", todo_category, user_id)
memories = store.search(namespace)
if memories:
instructions = memories[0].value
else:
instructions = ""
system_msg = MODEL_SYSTEM_MESSAGE.format(task_maistro_role=task_maistro_role, user_profile=user_profile, todo=todo, instructions=instructions)
# Respond using memory as well as the chat history
response = model.bind_tools([UpdateMemory], parallel_tool_calls=False).invoke([SystemMessage(content=system_msg)]+state["messages"])
return {"messages": [response]}
def update_profile(state: MessagesState, config: RunnableConfig, store: BaseStore):
"""Reflect on the chat history and update the memory collection."""
# Get the user ID from the config
configurable = configuration.Configuration.from_runnable_config(config)
user_id = configurable.user_id
todo_category = configurable.todo_category
# Define the namespace for the memories
namespace = ("profile", todo_category, user_id)
# Retrieve the most recent memories for context
existing_items = store.search(namespace)
# Format the existing memories for the Trustcall extractor
tool_name = "Profile"
existing_memories = ([(existing_item.key, tool_name, existing_item.value)
for existing_item in existing_items]
if existing_items
else None
)
# Merge the chat history and the instruction
TRUSTCALL_INSTRUCTION_FORMATTED=TRUSTCALL_INSTRUCTION.format(time=datetime.now().isoformat())
updated_messages=list(merge_message_runs(messages=[SystemMessage(content=TRUSTCALL_INSTRUCTION_FORMATTED)] + state["messages"][:-1]))
# Invoke the extractor
result = profile_extractor.invoke({"messages": updated_messages,
"existing": existing_memories})
# Save save the memories from Trustcall to the store
for r, rmeta in zip(result["responses"], result["response_metadata"]):
store.put(namespace,
rmeta.get("json_doc_id", str(uuid.uuid4())),
r.model_dump(mode="json"),
)
tool_calls = state['messages'][-1].tool_calls
# Return tool message with update verification
return {"messages": [{"role": "tool", "content": "updated profile", "tool_call_id":tool_calls[0]['id']}]}
def update_todos(state: MessagesState, config: RunnableConfig, store: BaseStore):
"""Reflect on the chat history and update the memory collection."""
# Get the user ID from the config
configurable = configuration.Configuration.from_runnable_config(config)
user_id = configurable.user_id
todo_category = configurable.todo_category
# Define the namespace for the memories
namespace = ("todo", todo_category, user_id)
# Retrieve the most recent memories for context
existing_items = store.search(namespace)
# Format the existing memories for the Trustcall extractor
tool_name = "ToDo"
existing_memories = ([(existing_item.key, tool_name, existing_item.value)
for existing_item in existing_items]
if existing_items
else None
)
# Merge the chat history and the instruction
TRUSTCALL_INSTRUCTION_FORMATTED=TRUSTCALL_INSTRUCTION.format(time=datetime.now().isoformat())
updated_messages=list(merge_message_runs(messages=[SystemMessage(content=TRUSTCALL_INSTRUCTION_FORMATTED)] + state["messages"][:-1]))
# Initialize the spy for visibility into the tool calls made by Trustcall
spy = Spy()
# Create the Trustcall extractor for updating the ToDo list
todo_extractor = create_extractor(
model,
tools=[ToDo],
tool_choice=tool_name,
enable_inserts=True
).with_listeners(on_end=spy)
# Invoke the extractor
result = todo_extractor.invoke({"messages": updated_messages,
"existing": existing_memories})
# Save save the memories from Trustcall to the store
for r, rmeta in zip(result["responses"], result["response_metadata"]):
store.put(namespace,
rmeta.get("json_doc_id", str(uuid.uuid4())),
r.model_dump(mode="json"),
)
# Respond to the tool call made in task_mAIstro, confirming the update
tool_calls = state['messages'][-1].tool_calls
# Extract the changes made by Trustcall and add the the ToolMessage returned to task_mAIstro
todo_update_msg = extract_tool_info(spy.called_tools, tool_name)
return {"messages": [{"role": "tool", "content": todo_update_msg, "tool_call_id":tool_calls[0]['id']}]}
def update_instructions(state: MessagesState, config: RunnableConfig, store: BaseStore):
"""Reflect on the chat history and update the memory collection."""
# Get the user ID from the config
configurable = configuration.Configuration.from_runnable_config(config)
user_id = configurable.user_id
todo_category = configurable.todo_category
namespace = ("instructions", todo_category, user_id)
existing_memory = store.get(namespace, "user_instructions")
# Format the memory in the system prompt
system_msg = CREATE_INSTRUCTIONS.format(current_instructions=existing_memory.value if existing_memory else None)
new_memory = model.invoke([SystemMessage(content=system_msg)]+state['messages'][:-1] + [HumanMessage(content="Please update the instructions based on the conversation")])
# Overwrite the existing memory in the store
key = "user_instructions"
store.put(namespace, key, {"memory": new_memory.content})
tool_calls = state['messages'][-1].tool_calls
# Return tool message with update verification
return {"messages": [{"role": "tool", "content": "updated instructions", "tool_call_id":tool_calls[0]['id']}]}
# Conditional edge
def route_message(state: MessagesState, config: RunnableConfig, store: BaseStore) -> Literal[END, "update_todos", "update_instructions", "update_profile"]:
"""Reflect on the memories and chat history to decide whether to update the memory collection."""
message = state['messages'][-1]
if len(message.tool_calls) ==0:
return END
else:
tool_call = message.tool_calls[0]
if tool_call['args']['update_type'] == "user":
return "update_profile"
elif tool_call['args']['update_type'] == "todo":
return "update_todos"
elif tool_call['args']['update_type'] == "instructions":
return "update_instructions"
else:
raise ValueError
# Create the graph + all nodes
builder = StateGraph(MessagesState, config_schema=configuration.Configuration)
# Define the flow of the memory extraction process
builder.add_node(task_mAIstro)
builder.add_node(update_todos)
builder.add_node(update_profile)
builder.add_node(update_instructions)
# Define the flow
builder.add_edge(START, "task_mAIstro")
builder.add_conditional_edges("task_mAIstro", route_message)
builder.add_edge("update_todos", "task_mAIstro")
builder.add_edge("update_profile", "task_mAIstro")
builder.add_edge("update_instructions", "task_mAIstro")
# Compile the graph
graph = builder.compile() |
0 | lc_public_repos/langchain-academy/module-6 | lc_public_repos/langchain-academy/module-6/deployment/langgraph.json | {
"dockerfile_lines": [],
"graphs": {
"task_maistro": "./task_maistro.py:graph"
},
"python_version": "3.11",
"dependencies": [
"."
]
} |
0 | lc_public_repos/langchain-academy | lc_public_repos/langchain-academy/module-1/chain.ipynb | from pprint import pprint
from langchain_core.messages import AIMessage, HumanMessage
messages = [AIMessage(content=f"So you said you were researching ocean mammals?", name="Model")]
messages.append(HumanMessage(content=f"Yes, that's right.",name="Lance"))
messages.append(AIMessage(content=f"Great, what would you like to learn about.", name="Model"))
messages.append(HumanMessage(content=f"I want to learn about the best place to see Orcas in the US.", name="Lance"))
for m in messages:
m.pretty_print()import os, getpass
def _set_env(var: str):
if not os.environ.get(var):
os.environ[var] = getpass.getpass(f"{var}: ")
_set_env("OPENAI_API_KEY")from langchain_openai import ChatOpenAI
llm = ChatOpenAI(model="gpt-4o")
result = llm.invoke(messages)
type(result)resultresult.response_metadatadef multiply(a: int, b: int) -> int:
"""Multiply a and b.
Args:
a: first int
b: second int
"""
return a * b
llm_with_tools = llm.bind_tools([multiply])tool_call = llm_with_tools.invoke([HumanMessage(content=f"What is 2 multiplied by 3", name="Lance")])
tool_calltool_call.additional_kwargs['tool_calls']from typing_extensions import TypedDict
from langchain_core.messages import AnyMessage
class MessagesState(TypedDict):
messages: list[AnyMessage]from typing import Annotated
from langgraph.graph.message import add_messages
class MessagesState(TypedDict):
messages: Annotated[list[AnyMessage], add_messages]from langgraph.graph import MessagesState
class MessagesState(MessagesState):
# Add any keys needed beyond messages, which is pre-built
pass# Initial state
initial_messages = [AIMessage(content="Hello! How can I assist you?", name="Model"),
HumanMessage(content="I'm looking for information on marine biology.", name="Lance")
]
# New message to add
new_message = AIMessage(content="Sure, I can help with that. What specifically are you interested in?", name="Model")
# Test
add_messages(initial_messages , new_message)from IPython.display import Image, display
from langgraph.graph import StateGraph, START, END
# Node
def tool_calling_llm(state: MessagesState):
return {"messages": [llm_with_tools.invoke(state["messages"])]}
# Build graph
builder = StateGraph(MessagesState)
builder.add_node("tool_calling_llm", tool_calling_llm)
builder.add_edge(START, "tool_calling_llm")
builder.add_edge("tool_calling_llm", END)
graph = builder.compile()
# View
display(Image(graph.get_graph().draw_mermaid_png()))messages = graph.invoke({"messages": HumanMessage(content="Hello!")})
for m in messages['messages']:
m.pretty_print()messages = graph.invoke({"messages": HumanMessage(content="Multiply 2 and 3")})
for m in messages['messages']:
m.pretty_print() |
0 | lc_public_repos/langchain-academy | lc_public_repos/langchain-academy/module-1/deployment.ipynb | import platform
if 'google.colab' in str(get_ipython()) or platform.system() != 'Darwin':
raise Exception("Unfortunately LangGraph Studio is currently not supported on Google Colab or requires a Mac")from langgraph_sdk import get_client# Replace this with the URL of your own deployed graph
URL = "http://localhost:56091"
client = get_client(url=URL)
# Search all hosted graphs
assistants = await client.assistants.search()assistants[-3]# We create a thread for tracking the state of our run
thread = await client.threads.create()from langchain_core.messages import HumanMessage
# Input
input = {"messages": [HumanMessage(content="Multiply 3 by 2.")]}
# Stream
async for chunk in client.runs.stream(
thread['thread_id'],
"agent",
input=input,
stream_mode="values",
):
if chunk.data and chunk.event != "metadata":
print(chunk.data['messages'][-1])import os, getpass
def _set_env(var: str):
if not os.environ.get(var):
os.environ[var] = getpass.getpass(f"{var}: ")
_set_env("LANGCHAIN_API_KEY")# Replace this with the URL of your deployed graph
URL = "https://langchain-academy-8011c561878d50b1883f7ed11b32d720.default.us.langgraph.app"
client = get_client(url=URL)
# Search all hosted graphs
assistants = await client.assistants.search()# Select the agent
agent = assistants[0]agentfrom langchain_core.messages import HumanMessage
# We create a thread for tracking the state of our run
thread = await client.threads.create()
# Input
input = {"messages": [HumanMessage(content="Multiply 3 by 2.")]}
# Stream
async for chunk in client.runs.stream(
thread['thread_id'],
"agent",
input=input,
stream_mode="values",
):
if chunk.data and chunk.event != "metadata":
print(chunk.data['messages'][-1]) |
0 | lc_public_repos/langchain-academy | lc_public_repos/langchain-academy/module-1/agent-memory.ipynb | import os, getpass
def _set_env(var: str):
if not os.environ.get(var):
os.environ[var] = getpass.getpass(f"{var}: ")
_set_env("OPENAI_API_KEY")_set_env("LANGCHAIN_API_KEY")
os.environ["LANGCHAIN_TRACING_V2"] = "true"
os.environ["LANGCHAIN_PROJECT"] = "langchain-academy"from langchain_openai import ChatOpenAI
def multiply(a: int, b: int) -> int:
"""Multiply a and b.
Args:
a: first int
b: second int
"""
return a * b
# This will be a tool
def add(a: int, b: int) -> int:
"""Adds a and b.
Args:
a: first int
b: second int
"""
return a + b
def divide(a: int, b: int) -> float:
"""Divide a and b.
Args:
a: first int
b: second int
"""
return a / b
tools = [add, multiply, divide]
llm = ChatOpenAI(model="gpt-4o")
llm_with_tools = llm.bind_tools(tools)from langgraph.graph import MessagesState
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
# System message
sys_msg = SystemMessage(content="You are a helpful assistant tasked with performing arithmetic on a set of inputs.")
# Node
def assistant(state: MessagesState):
return {"messages": [llm_with_tools.invoke([sys_msg] + state["messages"])]}from langgraph.graph import START, StateGraph
from langgraph.prebuilt import tools_condition, ToolNode
from IPython.display import Image, display
# Graph
builder = StateGraph(MessagesState)
# Define nodes: these do the work
builder.add_node("assistant", assistant)
builder.add_node("tools", ToolNode(tools))
# Define edges: these determine how the control flow moves
builder.add_edge(START, "assistant")
builder.add_conditional_edges(
"assistant",
# If the latest message (result) from assistant is a tool call -> tools_condition routes to tools
# If the latest message (result) from assistant is a not a tool call -> tools_condition routes to END
tools_condition,
)
builder.add_edge("tools", "assistant")
react_graph = builder.compile()
# Show
display(Image(react_graph.get_graph(xray=True).draw_mermaid_png()))messages = [HumanMessage(content="Add 3 and 4.")]
messages = react_graph.invoke({"messages": messages})
for m in messages['messages']:
m.pretty_print()messages = [HumanMessage(content="Multiply that by 2.")]
messages = react_graph.invoke({"messages": messages})
for m in messages['messages']:
m.pretty_print()from langgraph.checkpoint.memory import MemorySaver
memory = MemorySaver()
react_graph_memory = builder.compile(checkpointer=memory)# Specify a thread
config = {"configurable": {"thread_id": "1"}}
# Specify an input
messages = [HumanMessage(content="Add 3 and 4.")]
# Run
messages = react_graph_memory.invoke({"messages": messages},config)
for m in messages['messages']:
m.pretty_print()messages = [HumanMessage(content="Multiply that by 2.")]
messages = react_graph_memory.invoke({"messages": messages}, config)
for m in messages['messages']:
m.pretty_print() |
0 | lc_public_repos/langchain-academy | lc_public_repos/langchain-academy/module-1/agent.ipynb | import os, getpass
def _set_env(var: str):
if not os.environ.get(var):
os.environ[var] = getpass.getpass(f"{var}: ")
_set_env("OPENAI_API_KEY")_set_env("LANGCHAIN_API_KEY")
os.environ["LANGCHAIN_TRACING_V2"] = "true"
os.environ["LANGCHAIN_PROJECT"] = "langchain-academy"from langchain_openai import ChatOpenAI
def multiply(a: int, b: int) -> int:
"""Multiply a and b.
Args:
a: first int
b: second int
"""
return a * b
# This will be a tool
def add(a: int, b: int) -> int:
"""Adds a and b.
Args:
a: first int
b: second int
"""
return a + b
def divide(a: int, b: int) -> float:
"""Divide a and b.
Args:
a: first int
b: second int
"""
return a / b
tools = [add, multiply, divide]
llm = ChatOpenAI(model="gpt-4o")
# For this ipynb we set parallel tool calling to false as math generally is done sequentially, and this time we have 3 tools that can do math
# the OpenAI model specifically defaults to parallel tool calling for efficiency, see https://python.langchain.com/docs/how_to/tool_calling_parallel/
# play around with it and see how the model behaves with math equations!
llm_with_tools = llm.bind_tools(tools, parallel_tool_calls=False)from langgraph.graph import MessagesState
from langchain_core.messages import HumanMessage, SystemMessage
# System message
sys_msg = SystemMessage(content="You are a helpful assistant tasked with performing arithmetic on a set of inputs.")
# Node
def assistant(state: MessagesState):
return {"messages": [llm_with_tools.invoke([sys_msg] + state["messages"])]}from langgraph.graph import START, StateGraph
from langgraph.prebuilt import tools_condition
from langgraph.prebuilt import ToolNode
from IPython.display import Image, display
# Graph
builder = StateGraph(MessagesState)
# Define nodes: these do the work
builder.add_node("assistant", assistant)
builder.add_node("tools", ToolNode(tools))
# Define edges: these determine how the control flow moves
builder.add_edge(START, "assistant")
builder.add_conditional_edges(
"assistant",
# If the latest message (result) from assistant is a tool call -> tools_condition routes to tools
# If the latest message (result) from assistant is a not a tool call -> tools_condition routes to END
tools_condition,
)
builder.add_edge("tools", "assistant")
react_graph = builder.compile()
# Show
display(Image(react_graph.get_graph(xray=True).draw_mermaid_png()))messages = [HumanMessage(content="Add 3 and 4. Multiply the output by 2. Divide the output by 5")]
messages = react_graph.invoke({"messages": messages})for m in messages['messages']:
m.pretty_print() |
0 | lc_public_repos/langchain-academy | lc_public_repos/langchain-academy/module-1/router.ipynb | import os, getpass
def _set_env(var: str):
if not os.environ.get(var):
os.environ[var] = getpass.getpass(f"{var}: ")
_set_env("OPENAI_API_KEY")from langchain_openai import ChatOpenAI
def multiply(a: int, b: int) -> int:
"""Multiply a and b.
Args:
a: first int
b: second int
"""
return a * b
llm = ChatOpenAI(model="gpt-4o")
llm_with_tools = llm.bind_tools([multiply])from IPython.display import Image, display
from langgraph.graph import StateGraph, START, END
from langgraph.graph import MessagesState
from langgraph.prebuilt import ToolNode
from langgraph.prebuilt import tools_condition
# Node
def tool_calling_llm(state: MessagesState):
return {"messages": [llm_with_tools.invoke(state["messages"])]}
# Build graph
builder = StateGraph(MessagesState)
builder.add_node("tool_calling_llm", tool_calling_llm)
builder.add_node("tools", ToolNode([multiply]))
builder.add_edge(START, "tool_calling_llm")
builder.add_conditional_edges(
"tool_calling_llm",
# If the latest message (result) from assistant is a tool call -> tools_condition routes to tools
# If the latest message (result) from assistant is a not a tool call -> tools_condition routes to END
tools_condition,
)
builder.add_edge("tools", END)
graph = builder.compile()
# View
display(Image(graph.get_graph().draw_mermaid_png()))from langchain_core.messages import HumanMessage
messages = [HumanMessage(content="Hello world.")]
messages = graph.invoke({"messages": messages})
for m in messages['messages']:
m.pretty_print() |
0 | lc_public_repos/langchain-academy | lc_public_repos/langchain-academy/module-1/simple-graph.ipynb | from typing_extensions import TypedDict
class State(TypedDict):
graph_state: strdef node_1(state):
print("---Node 1---")
return {"graph_state": state['graph_state'] +" I am"}
def node_2(state):
print("---Node 2---")
return {"graph_state": state['graph_state'] +" happy!"}
def node_3(state):
print("---Node 3---")
return {"graph_state": state['graph_state'] +" sad!"}import random
from typing import Literal
def decide_mood(state) -> Literal["node_2", "node_3"]:
# Often, we will use state to decide on the next node to visit
user_input = state['graph_state']
# Here, let's just do a 50 / 50 split between nodes 2, 3
if random.random() < 0.5:
# 50% of the time, we return Node 2
return "node_2"
# 50% of the time, we return Node 3
return "node_3"from IPython.display import Image, display
from langgraph.graph import StateGraph, START, END
# Build graph
builder = StateGraph(State)
builder.add_node("node_1", node_1)
builder.add_node("node_2", node_2)
builder.add_node("node_3", node_3)
# Logic
builder.add_edge(START, "node_1")
builder.add_conditional_edges("node_1", decide_mood)
builder.add_edge("node_2", END)
builder.add_edge("node_3", END)
# Add
graph = builder.compile()
# View
display(Image(graph.get_graph().draw_mermaid_png()))graph.invoke({"graph_state" : "Hi, this is Lance."}) |
0 | lc_public_repos/langchain-academy/module-1 | lc_public_repos/langchain-academy/module-1/studio/requirements.txt | langgraph
langchain-core
langchain-community
langchain-openai
|
0 | lc_public_repos/langchain-academy/module-1 | lc_public_repos/langchain-academy/module-1/studio/simple.py | import random
from typing import Literal
from typing_extensions import TypedDict
from langgraph.graph import StateGraph, START, END
# State
class State(TypedDict):
graph_state: str
# Conditional edge
def decide_mood(state) -> Literal["node_2", "node_3"]:
# Often, we will use state to decide on the next node to visit
user_input = state['graph_state']
# Here, let's just do a 50 / 50 split between nodes 2, 3
if random.random() < 0.5:
# 50% of the time, we return Node 2
return "node_2"
# 50% of the time, we return Node 3
return "node_3"
# Nodes
def node_1(state):
print("---Node 1---")
return {"graph_state":state['graph_state'] +" I am"}
def node_2(state):
print("---Node 2---")
return {"graph_state":state['graph_state'] +" happy!"}
def node_3(state):
print("---Node 3---")
return {"graph_state":state['graph_state'] +" sad!"}
# Build graph
builder = StateGraph(State)
builder.add_node("node_1", node_1)
builder.add_node("node_2", node_2)
builder.add_node("node_3", node_3)
builder.add_edge(START, "node_1")
builder.add_conditional_edges("node_1", decide_mood)
builder.add_edge("node_2", END)
builder.add_edge("node_3", END)
# Compile graph
graph = builder.compile() |
0 | lc_public_repos/langchain-academy/module-1 | lc_public_repos/langchain-academy/module-1/studio/agent.py | from langchain_core.messages import SystemMessage
from langchain_openai import ChatOpenAI
from langgraph.graph import START, StateGraph, MessagesState
from langgraph.prebuilt import tools_condition, ToolNode
def add(a: int, b: int) -> int:
"""Adds a and b.
Args:
a: first int
b: second int
"""
return a + b
def multiply(a: int, b: int) -> int:
"""Multiplies a and b.
Args:
a: first int
b: second int
"""
return a * b
def divide(a: int, b: int) -> float:
"""Divide a and b.
Args:
a: first int
b: second int
"""
return a / b
tools = [add, multiply, divide]
# Define LLM with bound tools
llm = ChatOpenAI(model="gpt-4o")
llm_with_tools = llm.bind_tools(tools)
# System message
sys_msg = SystemMessage(content="You are a helpful assistant tasked with writing performing arithmetic on a set of inputs.")
# Node
def assistant(state: MessagesState):
return {"messages": [llm_with_tools.invoke([sys_msg] + state["messages"])]}
# Build graph
builder = StateGraph(MessagesState)
builder.add_node("assistant", assistant)
builder.add_node("tools", ToolNode(tools))
builder.add_edge(START, "assistant")
builder.add_conditional_edges(
"assistant",
# If the latest message (result) from assistant is a tool call -> tools_condition routes to tools
# If the latest message (result) from assistant is a not a tool call -> tools_condition routes to END
tools_condition,
)
builder.add_edge("tools", "assistant")
# Compile graph
graph = builder.compile()
|
0 | lc_public_repos/langchain-academy/module-1 | lc_public_repos/langchain-academy/module-1/studio/.env.example | OPENAI_API_KEY=sk-xxx |
0 | lc_public_repos/langchain-academy/module-1 | lc_public_repos/langchain-academy/module-1/studio/langgraph.json | {
"dockerfile_lines": [],
"graphs": {
"simple_graph": "./simple.py:graph",
"router": "./router.py:graph",
"agent": "./agent.py:graph"
},
"env": "./.env",
"python_version": "3.11",
"dependencies": [
"."
]
} |
0 | lc_public_repos/langchain-academy/module-1 | lc_public_repos/langchain-academy/module-1/studio/router.py | from langchain_openai import ChatOpenAI
from langgraph.graph import MessagesState
from langgraph.graph import StateGraph, START, END
from langgraph.prebuilt import ToolNode, tools_condition
# Tool
def multiply(a: int, b: int) -> int:
"""Multiplies a and b.
Args:
a: first int
b: second int
"""
return a * b
# LLM with bound tool
llm = ChatOpenAI(model="gpt-4o")
llm_with_tools = llm.bind_tools([multiply])
# Node
def tool_calling_llm(state: MessagesState):
return {"messages": [llm_with_tools.invoke(state["messages"])]}
# Build graph
builder = StateGraph(MessagesState)
builder.add_node("tool_calling_llm", tool_calling_llm)
builder.add_node("tools", ToolNode([multiply]))
builder.add_edge(START, "tool_calling_llm")
builder.add_conditional_edges(
"tool_calling_llm",
# If the latest message (result) from assistant is a tool call -> tools_condition routes to tools
# If the latest message (result) from assistant is a not a tool call -> tools_condition routes to END
tools_condition,
)
builder.add_edge("tools", END)
# Compile graph
graph = builder.compile() |
0 | lc_public_repos/langchain-academy | lc_public_repos/langchain-academy/module-3/breakpoints.ipynb | import os, getpass
def _set_env(var: str):
if not os.environ.get(var):
os.environ[var] = getpass.getpass(f"{var}: ")
_set_env("OPENAI_API_KEY")from langchain_openai import ChatOpenAI
def multiply(a: int, b: int) -> int:
"""Multiply a and b.
Args:
a: first int
b: second int
"""
return a * b
# This will be a tool
def add(a: int, b: int) -> int:
"""Adds a and b.
Args:
a: first int
b: second int
"""
return a + b
def divide(a: int, b: int) -> float:
"""Adds a and b.
Args:
a: first int
b: second int
"""
return a / b
tools = [add, multiply, divide]
llm = ChatOpenAI(model="gpt-4o")
llm_with_tools = llm.bind_tools(tools)from IPython.display import Image, display
from langgraph.checkpoint.memory import MemorySaver
from langgraph.graph import MessagesState
from langgraph.graph import START, StateGraph
from langgraph.prebuilt import tools_condition, ToolNode
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
# System message
sys_msg = SystemMessage(content="You are a helpful assistant tasked with performing arithmetic on a set of inputs.")
# Node
def assistant(state: MessagesState):
return {"messages": [llm_with_tools.invoke([sys_msg] + state["messages"])]}
# Graph
builder = StateGraph(MessagesState)
# Define nodes: these do the work
builder.add_node("assistant", assistant)
builder.add_node("tools", ToolNode(tools))
# Define edges: these determine the control flow
builder.add_edge(START, "assistant")
builder.add_conditional_edges(
"assistant",
# If the latest message (result) from assistant is a tool call -> tools_condition routes to tools
# If the latest message (result) from assistant is a not a tool call -> tools_condition routes to END
tools_condition,
)
builder.add_edge("tools", "assistant")
memory = MemorySaver()
graph = builder.compile(interrupt_before=["tools"], checkpointer=memory)
# Show
display(Image(graph.get_graph(xray=True).draw_mermaid_png()))# Input
initial_input = {"messages": HumanMessage(content="Multiply 2 and 3")}
# Thread
thread = {"configurable": {"thread_id": "1"}}
# Run the graph until the first interruption
for event in graph.stream(initial_input, thread, stream_mode="values"):
event['messages'][-1].pretty_print()state = graph.get_state(thread)
state.nextfor event in graph.stream(None, thread, stream_mode="values"):
event['messages'][-1].pretty_print()# Input
initial_input = {"messages": HumanMessage(content="Multiply 2 and 3")}
# Thread
thread = {"configurable": {"thread_id": "2"}}
# Run the graph until the first interruption
for event in graph.stream(initial_input, thread, stream_mode="values"):
event['messages'][-1].pretty_print()
# Get user feedback
user_approval = input("Do you want to call the tool? (yes/no): ")
# Check approval
if user_approval.lower() == "yes":
# If approved, continue the graph execution
for event in graph.stream(None, thread, stream_mode="values"):
event['messages'][-1].pretty_print()
else:
print("Operation cancelled by user.")import platform
if 'google.colab' in str(get_ipython()) or platform.system() != 'Darwin':
raise Exception("Unfortunately LangGraph Studio is currently not supported on Google Colab or requires a Mac")from langgraph_sdk import get_client
client = get_client(url="http://localhost:56091")initial_input = {"messages": HumanMessage(content="Multiply 2 and 3")}
thread = await client.threads.create()
async for chunk in client.runs.stream(
thread["thread_id"],
assistant_id="agent",
input=initial_input,
stream_mode="values",
interrupt_before=["tools"],
):
print(f"Receiving new event of type: {chunk.event}...")
messages = chunk.data.get('messages', [])
if messages:
print(messages[-1])
print("-" * 50)async for chunk in client.runs.stream(
thread["thread_id"],
"agent",
input=None,
stream_mode="values",
interrupt_before=["tools"],
):
print(f"Receiving new event of type: {chunk.event}...")
messages = chunk.data.get('messages', [])
if messages:
print(messages[-1])
print("-" * 50) |
0 | lc_public_repos/langchain-academy | lc_public_repos/langchain-academy/module-3/dynamic-breakpoints.ipynb | from IPython.display import Image, display
from typing_extensions import TypedDict
from langgraph.checkpoint.memory import MemorySaver
from langgraph.errors import NodeInterrupt
from langgraph.graph import START, END, StateGraph
class State(TypedDict):
input: str
def step_1(state: State) -> State:
print("---Step 1---")
return state
def step_2(state: State) -> State:
# Let's optionally raise a NodeInterrupt if the length of the input is longer than 5 characters
if len(state['input']) > 5:
raise NodeInterrupt(f"Received input that is longer than 5 characters: {state['input']}")
print("---Step 2---")
return state
def step_3(state: State) -> State:
print("---Step 3---")
return state
builder = StateGraph(State)
builder.add_node("step_1", step_1)
builder.add_node("step_2", step_2)
builder.add_node("step_3", step_3)
builder.add_edge(START, "step_1")
builder.add_edge("step_1", "step_2")
builder.add_edge("step_2", "step_3")
builder.add_edge("step_3", END)
# Set up memory
memory = MemorySaver()
# Compile the graph with memory
graph = builder.compile(checkpointer=memory)
# View
display(Image(graph.get_graph().draw_mermaid_png()))initial_input = {"input": "hello world"}
thread_config = {"configurable": {"thread_id": "1"}}
# Run the graph until the first interruption
for event in graph.stream(initial_input, thread_config, stream_mode="values"):
print(event)state = graph.get_state(thread_config)
print(state.next)print(state.tasks)for event in graph.stream(None, thread_config, stream_mode="values"):
print(event)state = graph.get_state(thread_config)
print(state.next)graph.update_state(
thread_config,
{"input": "hi"},
)for event in graph.stream(None, thread_config, stream_mode="values"):
print(event)import platform
if 'google.colab' in str(get_ipython()) or platform.system() != 'Darwin':
raise Exception("Unfortunately LangGraph Studio is currently not supported on Google Colab or requires a Mac")from langgraph_sdk import get_client
# Replace this with the URL of your own deployed graph
URL = "http://localhost:62575"
client = get_client(url=URL)
# Search all hosted graphs
assistants = await client.assistants.search()thread = await client.threads.create()
input_dict = {"input": "hello world"}
async for chunk in client.runs.stream(
thread["thread_id"],
assistant_id="dynamic_breakpoints",
input=input_dict,
stream_mode="values",):
print(f"Receiving new event of type: {chunk.event}...")
print(chunk.data)
print("\n\n")current_state = await client.threads.get_state(thread['thread_id'])current_state['next']await client.threads.update_state(thread['thread_id'], {"input": "hi!"})async for chunk in client.runs.stream(
thread["thread_id"],
assistant_id="dynamic_breakpoints",
input=None,
stream_mode="values",):
print(f"Receiving new event of type: {chunk.event}...")
print(chunk.data)
print("\n\n")current_state = await client.threads.get_state(thread['thread_id'])
current_state |
0 | lc_public_repos/langchain-academy | lc_public_repos/langchain-academy/module-3/time-travel.ipynb | import os, getpass
def _set_env(var: str):
if not os.environ.get(var):
os.environ[var] = getpass.getpass(f"{var}: ")
_set_env("OPENAI_API_KEY")from langchain_openai import ChatOpenAI
def multiply(a: int, b: int) -> int:
"""Multiply a and b.
Args:
a: first int
b: second int
"""
return a * b
# This will be a tool
def add(a: int, b: int) -> int:
"""Adds a and b.
Args:
a: first int
b: second int
"""
return a + b
def divide(a: int, b: int) -> float:
"""Adds a and b.
Args:
a: first int
b: second int
"""
return a / b
tools = [add, multiply, divide]
llm = ChatOpenAI(model="gpt-4o")
llm_with_tools = llm.bind_tools(tools)from IPython.display import Image, display
from langgraph.checkpoint.memory import MemorySaver
from langgraph.graph import MessagesState
from langgraph.graph import START, END, StateGraph
from langgraph.prebuilt import tools_condition, ToolNode
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
# System message
sys_msg = SystemMessage(content="You are a helpful assistant tasked with performing arithmetic on a set of inputs.")
# Node
def assistant(state: MessagesState):
return {"messages": [llm_with_tools.invoke([sys_msg] + state["messages"])]}
# Graph
builder = StateGraph(MessagesState)
# Define nodes: these do the work
builder.add_node("assistant", assistant)
builder.add_node("tools", ToolNode(tools))
# Define edges: these determine the control flow
builder.add_edge(START, "assistant")
builder.add_conditional_edges(
"assistant",
# If the latest message (result) from assistant is a tool call -> tools_condition routes to tools
# If the latest message (result) from assistant is a not a tool call -> tools_condition routes to END
tools_condition,
)
builder.add_edge("tools", "assistant")
memory = MemorySaver()
graph = builder.compile(checkpointer=MemorySaver())
# Show
display(Image(graph.get_graph(xray=True).draw_mermaid_png()))# Input
initial_input = {"messages": HumanMessage(content="Multiply 2 and 3")}
# Thread
thread = {"configurable": {"thread_id": "1"}}
# Run the graph until the first interruption
for event in graph.stream(initial_input, thread, stream_mode="values"):
event['messages'][-1].pretty_print()graph.get_state({'configurable': {'thread_id': '1'}})all_states = [s for s in graph.get_state_history(thread)]len(all_states)all_states[-2]to_replay = all_states[-2]to_replayto_replay.valuesto_replay.nextto_replay.configfor event in graph.stream(None, to_replay.config, stream_mode="values"):
event['messages'][-1].pretty_print()to_fork = all_states[-2]
to_fork.values["messages"]to_fork.configfork_config = graph.update_state(
to_fork.config,
{"messages": [HumanMessage(content='Multiply 5 and 3',
id=to_fork.values["messages"][0].id)]},
)fork_configall_states = [state for state in graph.get_state_history(thread) ]
all_states[0].values["messages"]graph.get_state({'configurable': {'thread_id': '1'}})for event in graph.stream(None, fork_config, stream_mode="values"):
event['messages'][-1].pretty_print()graph.get_state({'configurable': {'thread_id': '1'}})import platform
if 'google.colab' in str(get_ipython()) or platform.system() != 'Darwin':
raise Exception("Unfortunately LangGraph Studio is currently not supported on Google Colab or requires a Mac")from langgraph_sdk import get_client
client = get_client(url="http://localhost:62780")initial_input = {"messages": HumanMessage(content="Multiply 2 and 3")}
thread = await client.threads.create()
async for chunk in client.runs.stream(
thread["thread_id"],
assistant_id = "agent",
input=initial_input,
stream_mode="updates",
):
if chunk.data:
assisant_node = chunk.data.get('assistant', {}).get('messages', [])
tool_node = chunk.data.get('tools', {}).get('messages', [])
if assisant_node:
print("-" * 20+"Assistant Node"+"-" * 20)
print(assisant_node[-1])
elif tool_node:
print("-" * 20+"Tools Node"+"-" * 20)
print(tool_node[-1])states = await client.threads.get_history(thread['thread_id'])
to_replay = states[-2]
to_replayasync for chunk in client.runs.stream(
thread["thread_id"],
assistant_id="agent",
input=None,
stream_mode="values",
checkpoint_id=to_replay['checkpoint_id']
):
print(f"Receiving new event of type: {chunk.event}...")
print(chunk.data)
print("\n\n")async for chunk in client.runs.stream(
thread["thread_id"],
assistant_id="agent",
input=None,
stream_mode="updates",
checkpoint_id=to_replay['checkpoint_id']
):
if chunk.data:
assisant_node = chunk.data.get('assistant', {}).get('messages', [])
tool_node = chunk.data.get('tools', {}).get('messages', [])
if assisant_node:
print("-" * 20+"Assistant Node"+"-" * 20)
print(assisant_node[-1])
elif tool_node:
print("-" * 20+"Tools Node"+"-" * 20)
print(tool_node[-1])initial_input = {"messages": HumanMessage(content="Multiply 2 and 3")}
thread = await client.threads.create()
async for chunk in client.runs.stream(
thread["thread_id"],
assistant_id="agent",
input=initial_input,
stream_mode="updates",
):
if chunk.data:
assisant_node = chunk.data.get('assistant', {}).get('messages', [])
tool_node = chunk.data.get('tools', {}).get('messages', [])
if assisant_node:
print("-" * 20+"Assistant Node"+"-" * 20)
print(assisant_node[-1])
elif tool_node:
print("-" * 20+"Tools Node"+"-" * 20)
print(tool_node[-1])states = await client.threads.get_history(thread['thread_id'])
to_fork = states[-2]
to_fork['values']to_fork['values']['messages'][0]['id']to_fork['next']to_fork['checkpoint_id']forked_input = {"messages": HumanMessage(content="Multiply 3 and 3",
id=to_fork['values']['messages'][0]['id'])}
forked_config = await client.threads.update_state(
thread["thread_id"],
forked_input,
checkpoint_id=to_fork['checkpoint_id']
)forked_configstates = await client.threads.get_history(thread['thread_id'])
states[0]async for chunk in client.runs.stream(
thread["thread_id"],
assistant_id="agent",
input=None,
stream_mode="updates",
checkpoint_id=forked_config['checkpoint_id']
):
if chunk.data:
assisant_node = chunk.data.get('assistant', {}).get('messages', [])
tool_node = chunk.data.get('tools', {}).get('messages', [])
if assisant_node:
print("-" * 20+"Assistant Node"+"-" * 20)
print(assisant_node[-1])
elif tool_node:
print("-" * 20+"Tools Node"+"-" * 20)
print(tool_node[-1]) |
0 | lc_public_repos/langchain-academy | lc_public_repos/langchain-academy/module-3/streaming-interruption.ipynb | import os, getpass
def _set_env(var: str):
if not os.environ.get(var):
os.environ[var] = getpass.getpass(f"{var}: ")
_set_env("OPENAI_API_KEY")from IPython.display import Image, display
from langchain_openai import ChatOpenAI
from langchain_core.messages import SystemMessage, HumanMessage, RemoveMessage
from langchain_core.runnables import RunnableConfig
from langgraph.checkpoint.memory import MemorySaver
from langgraph.graph import StateGraph, START, END
from langgraph.graph import MessagesState
# LLM
model = ChatOpenAI(model="gpt-4o", temperature=0)
# State
class State(MessagesState):
summary: str
# Define the logic to call the model
def call_model(state: State, config: RunnableConfig):
# Get summary if it exists
summary = state.get("summary", "")
# If there is summary, then we add it
if summary:
# Add summary to system message
system_message = f"Summary of conversation earlier: {summary}"
# Append summary to any newer messages
messages = [SystemMessage(content=system_message)] + state["messages"]
else:
messages = state["messages"]
response = model.invoke(messages, config)
return {"messages": response}
def summarize_conversation(state: State):
# First, we get any existing summary
summary = state.get("summary", "")
# Create our summarization prompt
if summary:
# A summary already exists
summary_message = (
f"This is summary of the conversation to date: {summary}\n\n"
"Extend the summary by taking into account the new messages above:"
)
else:
summary_message = "Create a summary of the conversation above:"
# Add prompt to our history
messages = state["messages"] + [HumanMessage(content=summary_message)]
response = model.invoke(messages)
# Delete all but the 2 most recent messages
delete_messages = [RemoveMessage(id=m.id) for m in state["messages"][:-2]]
return {"summary": response.content, "messages": delete_messages}
# Determine whether to end or summarize the conversation
def should_continue(state: State):
"""Return the next node to execute."""
messages = state["messages"]
# If there are more than six messages, then we summarize the conversation
if len(messages) > 6:
return "summarize_conversation"
# Otherwise we can just end
return END
# Define a new graph
workflow = StateGraph(State)
workflow.add_node("conversation", call_model)
workflow.add_node(summarize_conversation)
# Set the entrypoint as conversation
workflow.add_edge(START, "conversation")
workflow.add_conditional_edges("conversation", should_continue)
workflow.add_edge("summarize_conversation", END)
# Compile
memory = MemorySaver()
graph = workflow.compile(checkpointer=memory)
display(Image(graph.get_graph().draw_mermaid_png()))# Create a thread
config = {"configurable": {"thread_id": "1"}}
# Start conversation
for chunk in graph.stream({"messages": [HumanMessage(content="hi! I'm Lance")]}, config, stream_mode="updates"):
print(chunk)# Start conversation
for chunk in graph.stream({"messages": [HumanMessage(content="hi! I'm Lance")]}, config, stream_mode="updates"):
chunk['conversation']["messages"].pretty_print()# Start conversation, again
config = {"configurable": {"thread_id": "2"}}
# Start conversation
input_message = HumanMessage(content="hi! I'm Lance")
for event in graph.stream({"messages": [input_message]}, config, stream_mode="values"):
for m in event['messages']:
m.pretty_print()
print("---"*25)config = {"configurable": {"thread_id": "3"}}
input_message = HumanMessage(content="Tell me about the 49ers NFL team")
async for event in graph.astream_events({"messages": [input_message]}, config, version="v2"):
print(f"Node: {event['metadata'].get('langgraph_node','')}. Type: {event['event']}. Name: {event['name']}")node_to_stream = 'conversation'
config = {"configurable": {"thread_id": "4"}}
input_message = HumanMessage(content="Tell me about the 49ers NFL team")
async for event in graph.astream_events({"messages": [input_message]}, config, version="v2"):
# Get chat model tokens from a particular node
if event["event"] == "on_chat_model_stream" and event['metadata'].get('langgraph_node','') == node_to_stream:
print(event["data"])config = {"configurable": {"thread_id": "5"}}
input_message = HumanMessage(content="Tell me about the 49ers NFL team")
async for event in graph.astream_events({"messages": [input_message]}, config, version="v2"):
# Get chat model tokens from a particular node
if event["event"] == "on_chat_model_stream" and event['metadata'].get('langgraph_node','') == node_to_stream:
data = event["data"]
print(data["chunk"].content, end="|")import platform
if 'google.colab' in str(get_ipython()) or platform.system() != 'Darwin':
raise Exception("Unfortunately LangGraph Studio is currently not supported on Google Colab or requires a Mac")from langgraph_sdk import get_client
# Replace this with the URL of your own deployed graph
URL = "http://localhost:56091"
client = get_client(url=URL)
# Search all hosted graphs
assistants = await client.assistants.search()# Create a new thread
thread = await client.threads.create()
# Input message
input_message = HumanMessage(content="Multiply 2 and 3")
async for event in client.runs.stream(thread["thread_id"],
assistant_id="agent",
input={"messages": [input_message]},
stream_mode="values"):
print(event)from langchain_core.messages import convert_to_messages
thread = await client.threads.create()
input_message = HumanMessage(content="Multiply 2 and 3")
async for event in client.runs.stream(thread["thread_id"], assistant_id="agent", input={"messages": [input_message]}, stream_mode="values"):
messages = event.data.get('messages',None)
if messages:
print(convert_to_messages(messages)[-1])
print('='*25)thread = await client.threads.create()
input_message = HumanMessage(content="Multiply 2 and 3")
async for event in client.runs.stream(thread["thread_id"],
assistant_id="agent",
input={"messages": [input_message]},
stream_mode="messages"):
print(event.event)thread = await client.threads.create()
input_message = HumanMessage(content="Multiply 2 and 3")
def format_tool_calls(tool_calls):
"""
Format a list of tool calls into a readable string.
Args:
tool_calls (list): A list of dictionaries, each representing a tool call.
Each dictionary should have 'id', 'name', and 'args' keys.
Returns:
str: A formatted string of tool calls, or "No tool calls" if the list is empty.
"""
if tool_calls:
formatted_calls = []
for call in tool_calls:
formatted_calls.append(
f"Tool Call ID: {call['id']}, Function: {call['name']}, Arguments: {call['args']}"
)
return "\n".join(formatted_calls)
return "No tool calls"
async for event in client.runs.stream(
thread["thread_id"],
assistant_id="agent",
input={"messages": [input_message]},
stream_mode="messages",):
# Handle metadata events
if event.event == "metadata":
print(f"Metadata: Run ID - {event.data['run_id']}")
print("-" * 50)
# Handle partial message events
elif event.event == "messages/partial":
for data_item in event.data:
# Process user messages
if "role" in data_item and data_item["role"] == "user":
print(f"Human: {data_item['content']}")
else:
# Extract relevant data from the event
tool_calls = data_item.get("tool_calls", [])
invalid_tool_calls = data_item.get("invalid_tool_calls", [])
content = data_item.get("content", "")
response_metadata = data_item.get("response_metadata", {})
if content:
print(f"AI: {content}")
if tool_calls:
print("Tool Calls:")
print(format_tool_calls(tool_calls))
if invalid_tool_calls:
print("Invalid Tool Calls:")
print(format_tool_calls(invalid_tool_calls))
if response_metadata:
finish_reason = response_metadata.get("finish_reason", "N/A")
print(f"Response Metadata: Finish Reason - {finish_reason}")
print("-" * 50) |
0 | lc_public_repos/langchain-academy | lc_public_repos/langchain-academy/module-3/edit-state-human-feedback.ipynb | import os, getpass
def _set_env(var: str):
if not os.environ.get(var):
os.environ[var] = getpass.getpass(f"{var}: ")
_set_env("OPENAI_API_KEY")from langchain_openai import ChatOpenAI
def multiply(a: int, b: int) -> int:
"""Multiply a and b.
Args:
a: first int
b: second int
"""
return a * b
# This will be a tool
def add(a: int, b: int) -> int:
"""Adds a and b.
Args:
a: first int
b: second int
"""
return a + b
def divide(a: int, b: int) -> float:
"""Adds a and b.
Args:
a: first int
b: second int
"""
return a / b
tools = [add, multiply, divide]
llm = ChatOpenAI(model="gpt-4o")
llm_with_tools = llm.bind_tools(tools)from IPython.display import Image, display
from langgraph.checkpoint.memory import MemorySaver
from langgraph.graph import MessagesState
from langgraph.graph import START, StateGraph
from langgraph.prebuilt import tools_condition, ToolNode
from langchain_core.messages import HumanMessage, SystemMessage
# System message
sys_msg = SystemMessage(content="You are a helpful assistant tasked with performing arithmetic on a set of inputs.")
# Node
def assistant(state: MessagesState):
return {"messages": [llm_with_tools.invoke([sys_msg] + state["messages"])]}
# Graph
builder = StateGraph(MessagesState)
# Define nodes: these do the work
builder.add_node("assistant", assistant)
builder.add_node("tools", ToolNode(tools))
# Define edges: these determine the control flow
builder.add_edge(START, "assistant")
builder.add_conditional_edges(
"assistant",
# If the latest message (result) from assistant is a tool call -> tools_condition routes to tools
# If the latest message (result) from assistant is a not a tool call -> tools_condition routes to END
tools_condition,
)
builder.add_edge("tools", "assistant")
memory = MemorySaver()
graph = builder.compile(interrupt_before=["assistant"], checkpointer=memory)
# Show
display(Image(graph.get_graph(xray=True).draw_mermaid_png()))# Input
initial_input = {"messages": "Multiply 2 and 3"}
# Thread
thread = {"configurable": {"thread_id": "1"}}
# Run the graph until the first interruption
for event in graph.stream(initial_input, thread, stream_mode="values"):
event['messages'][-1].pretty_print()state = graph.get_state(thread)
stategraph.update_state(
thread,
{"messages": [HumanMessage(content="No, actually multiply 3 and 3!")]},
)new_state = graph.get_state(thread).values
for m in new_state['messages']:
m.pretty_print()for event in graph.stream(None, thread, stream_mode="values"):
event['messages'][-1].pretty_print()for event in graph.stream(None, thread, stream_mode="values"):
event['messages'][-1].pretty_print()import platform
if 'google.colab' in str(get_ipython()) or platform.system() != 'Darwin':
raise Exception("Unfortunately LangGraph Studio is currently not supported on Google Colab or requires a Mac")from langgraph_sdk import get_client
client = get_client(url="http://localhost:56091")initial_input = {"messages": "Multiply 2 and 3"}
thread = await client.threads.create()
async for chunk in client.runs.stream(
thread["thread_id"],
"agent",
input=initial_input,
stream_mode="values",
interrupt_before=["assistant"],
):
print(f"Receiving new event of type: {chunk.event}...")
messages = chunk.data.get('messages', [])
if messages:
print(messages[-1])
print("-" * 50)current_state = await client.threads.get_state(thread['thread_id'])
current_statelast_message = current_state['values']['messages'][-1]
last_messagelast_message['content'] = "No, actually multiply 3 and 3!"
last_messagelast_messageawait client.threads.update_state(thread['thread_id'], {"messages": last_message})async for chunk in client.runs.stream(
thread["thread_id"],
assistant_id="agent",
input=None,
stream_mode="values",
interrupt_before=["assistant"],
):
print(f"Receiving new event of type: {chunk.event}...")
messages = chunk.data.get('messages', [])
if messages:
print(messages[-1])
print("-" * 50)async for chunk in client.runs.stream(
thread["thread_id"],
assistant_id="agent",
input=None,
stream_mode="values",
interrupt_before=["assistant"],
):
print(f"Receiving new event of type: {chunk.event}...")
messages = chunk.data.get('messages', [])
if messages:
print(messages[-1])
print("-" * 50)# System message
sys_msg = SystemMessage(content="You are a helpful assistant tasked with performing arithmetic on a set of inputs.")
# no-op node that should be interrupted on
def human_feedback(state: MessagesState):
pass
# Assistant node
def assistant(state: MessagesState):
return {"messages": [llm_with_tools.invoke([sys_msg] + state["messages"])]}
# Graph
builder = StateGraph(MessagesState)
# Define nodes: these do the work
builder.add_node("assistant", assistant)
builder.add_node("tools", ToolNode(tools))
builder.add_node("human_feedback", human_feedback)
# Define edges: these determine the control flow
builder.add_edge(START, "human_feedback")
builder.add_edge("human_feedback", "assistant")
builder.add_conditional_edges(
"assistant",
# If the latest message (result) from assistant is a tool call -> tools_condition routes to tools
# If the latest message (result) from assistant is a not a tool call -> tools_condition routes to END
tools_condition,
)
builder.add_edge("tools", "human_feedback")
memory = MemorySaver()
graph = builder.compile(interrupt_before=["human_feedback"], checkpointer=memory)
display(Image(graph.get_graph().draw_mermaid_png()))# Input
initial_input = {"messages": "Multiply 2 and 3"}
# Thread
thread = {"configurable": {"thread_id": "5"}}
# Run the graph until the first interruption
for event in graph.stream(initial_input, thread, stream_mode="values"):
event["messages"][-1].pretty_print()
# Get user input
user_input = input("Tell me how you want to update the state: ")
# We now update the state as if we are the human_feedback node
graph.update_state(thread, {"messages": user_input}, as_node="human_feedback")
# Continue the graph execution
for event in graph.stream(None, thread, stream_mode="values"):
event["messages"][-1].pretty_print()# Continue the graph execution
for event in graph.stream(None, thread, stream_mode="values"):
event["messages"][-1].pretty_print() |
0 | lc_public_repos/langchain-academy/module-3 | lc_public_repos/langchain-academy/module-3/studio/requirements.txt | langgraph
langchain-core
langchain-community
langchain-openai |
0 | lc_public_repos/langchain-academy/module-3 | lc_public_repos/langchain-academy/module-3/studio/agent.py | from langchain_core.messages import SystemMessage
from langchain_openai import ChatOpenAI
from langgraph.graph import START, StateGraph, MessagesState
from langgraph.prebuilt import tools_condition, ToolNode
def add(a: int, b: int) -> int:
"""Adds a and b.
Args:
a: first int
b: second int
"""
return a + b
def multiply(a: int, b: int) -> int:
"""Multiplies a and b.
Args:
a: first int
b: second int
"""
return a * b
def divide(a: int, b: int) -> float:
"""Adds a and b.
Args:
a: first int
b: second int
"""
return a / b
tools = [add, multiply, divide]
# Define LLM with bound tools
llm = ChatOpenAI(model="gpt-4o")
llm_with_tools = llm.bind_tools(tools)
# System message
sys_msg = SystemMessage(content="You are a helpful assistant tasked with writing performing arithmetic on a set of inputs.")
# Node
def assistant(state: MessagesState):
return {"messages": [llm_with_tools.invoke([sys_msg] + state["messages"])]}
# Build graph
builder = StateGraph(MessagesState)
builder.add_node("assistant", assistant)
builder.add_node("tools", ToolNode(tools))
builder.add_edge(START, "assistant")
builder.add_conditional_edges(
"assistant",
# If the latest message (result) from assistant is a tool call -> tools_condition routes to tools
# If the latest message (result) from assistant is a not a tool call -> tools_condition routes to END
tools_condition,
)
builder.add_edge("tools", "assistant")
# Compile graph
graph = builder.compile()
|
0 | lc_public_repos/langchain-academy/module-3 | lc_public_repos/langchain-academy/module-3/studio/dynamic_breakpoints.py | from typing_extensions import TypedDict
from langgraph.errors import NodeInterrupt
from langgraph.graph import START, END, StateGraph
class State(TypedDict):
input: str
def step_1(state: State) -> State:
print("---Step 1---")
return state
def step_2(state: State) -> State:
# Let's optionally raise a NodeInterrupt if the length of the input is longer than 5 characters
if len(state['input']) > 5:
raise NodeInterrupt(f"Received input that is longer than 5 characters: {state['input']}")
print("---Step 2---")
return state
def step_3(state: State) -> State:
print("---Step 3---")
return state
builder = StateGraph(State)
builder.add_node("step_1", step_1)
builder.add_node("step_2", step_2)
builder.add_node("step_3", step_3)
builder.add_edge(START, "step_1")
builder.add_edge("step_1", "step_2")
builder.add_edge("step_2", "step_3")
builder.add_edge("step_3", END)
graph = builder.compile() |
0 | lc_public_repos/langchain-academy/module-3 | lc_public_repos/langchain-academy/module-3/studio/.env.example | OPENAI_API_KEY=sk-xxx |
0 | lc_public_repos/langchain-academy/module-3 | lc_public_repos/langchain-academy/module-3/studio/langgraph.json | {
"dockerfile_lines": [],
"graphs": {
"agent": "./agent.py:graph",
"dynamic_breakpoints": "./dynamic_breakpoints.py:graph"
},
"env": "./.env",
"python_version": "3.11",
"dependencies": [
"."
]
} |
0 | lc_public_repos/langchain-academy | lc_public_repos/langchain-academy/module-2/trim-filter-messages.ipynb | import os, getpass
def _set_env(var: str):
if not os.environ.get(var):
os.environ[var] = getpass.getpass(f"{var}: ")
_set_env("OPENAI_API_KEY")_set_env("LANGCHAIN_API_KEY")
os.environ["LANGCHAIN_TRACING_V2"] = "true"
os.environ["LANGCHAIN_PROJECT"] = "langchain-academy"from pprint import pprint
from langchain_core.messages import AIMessage, HumanMessage
messages = [AIMessage(f"So you said you were researching ocean mammals?", name="Bot")]
messages.append(HumanMessage(f"Yes, I know about whales. But what others should I learn about?", name="Lance"))
for m in messages:
m.pretty_print()from langchain_openai import ChatOpenAI
llm = ChatOpenAI(model="gpt-4o")
llm.invoke(messages)from IPython.display import Image, display
from langgraph.graph import MessagesState
from langgraph.graph import StateGraph, START, END
# Node
def chat_model_node(state: MessagesState):
return {"messages": llm.invoke(state["messages"])}
# Build graph
builder = StateGraph(MessagesState)
builder.add_node("chat_model", chat_model_node)
builder.add_edge(START, "chat_model")
builder.add_edge("chat_model", END)
graph = builder.compile()
# View
display(Image(graph.get_graph().draw_mermaid_png()))output = graph.invoke({'messages': messages})
for m in output['messages']:
m.pretty_print()from langchain_core.messages import RemoveMessage
# Nodes
def filter_messages(state: MessagesState):
# Delete all but the 2 most recent messages
delete_messages = [RemoveMessage(id=m.id) for m in state["messages"][:-2]]
return {"messages": delete_messages}
def chat_model_node(state: MessagesState):
return {"messages": [llm.invoke(state["messages"])]}
# Build graph
builder = StateGraph(MessagesState)
builder.add_node("filter", filter_messages)
builder.add_node("chat_model", chat_model_node)
builder.add_edge(START, "filter")
builder.add_edge("filter", "chat_model")
builder.add_edge("chat_model", END)
graph = builder.compile()
# View
display(Image(graph.get_graph().draw_mermaid_png()))# Message list with a preamble
messages = [AIMessage("Hi.", name="Bot", id="1")]
messages.append(HumanMessage("Hi.", name="Lance", id="2"))
messages.append(AIMessage("So you said you were researching ocean mammals?", name="Bot", id="3"))
messages.append(HumanMessage("Yes, I know about whales. But what others should I learn about?", name="Lance", id="4"))
# Invoke
output = graph.invoke({'messages': messages})
for m in output['messages']:
m.pretty_print()# Node
def chat_model_node(state: MessagesState):
return {"messages": [llm.invoke(state["messages"][-1:])]}
# Build graph
builder = StateGraph(MessagesState)
builder.add_node("chat_model", chat_model_node)
builder.add_edge(START, "chat_model")
builder.add_edge("chat_model", END)
graph = builder.compile()
# View
display(Image(graph.get_graph().draw_mermaid_png()))messages.append(output['messages'][-1])
messages.append(HumanMessage(f"Tell me more about Narwhals!", name="Lance"))for m in messages:
m.pretty_print()# Invoke, using message filtering
output = graph.invoke({'messages': messages})
for m in output['messages']:
m.pretty_print()from langchain_core.messages import trim_messages
# Node
def chat_model_node(state: MessagesState):
messages = trim_messages(
state["messages"],
max_tokens=100,
strategy="last",
token_counter=ChatOpenAI(model="gpt-4o"),
allow_partial=False,
)
return {"messages": [llm.invoke(messages)]}
# Build graph
builder = StateGraph(MessagesState)
builder.add_node("chat_model", chat_model_node)
builder.add_edge(START, "chat_model")
builder.add_edge("chat_model", END)
graph = builder.compile()
# View
display(Image(graph.get_graph().draw_mermaid_png()))messages.append(output['messages'][-1])
messages.append(HumanMessage(f"Tell me where Orcas live!", name="Lance"))# Example of trimming messages
trim_messages(
messages,
max_tokens=100,
strategy="last",
token_counter=ChatOpenAI(model="gpt-4o"),
allow_partial=False
)# Invoke, using message trimming in the chat_model_node
messages_out_trim = graph.invoke({'messages': messages}) |
0 | lc_public_repos/langchain-academy | lc_public_repos/langchain-academy/module-2/chatbot-summarization.ipynb | import os, getpass
def _set_env(var: str):
if not os.environ.get(var):
os.environ[var] = getpass.getpass(f"{var}: ")
_set_env("OPENAI_API_KEY")_set_env("LANGCHAIN_API_KEY")
os.environ["LANGCHAIN_TRACING_V2"] = "true"
os.environ["LANGCHAIN_PROJECT"] = "langchain-academy"from langchain_openai import ChatOpenAI
model = ChatOpenAI(model="gpt-4o",temperature=0)from langgraph.graph import MessagesState
class State(MessagesState):
summary: strfrom langchain_core.messages import SystemMessage, HumanMessage, RemoveMessage
# Define the logic to call the model
def call_model(state: State):
# Get summary if it exists
summary = state.get("summary", "")
# If there is summary, then we add it
if summary:
# Add summary to system message
system_message = f"Summary of conversation earlier: {summary}"
# Append summary to any newer messages
messages = [SystemMessage(content=system_message)] + state["messages"]
else:
messages = state["messages"]
response = model.invoke(messages)
return {"messages": response}def summarize_conversation(state: State):
# First, we get any existing summary
summary = state.get("summary", "")
# Create our summarization prompt
if summary:
# A summary already exists
summary_message = (
f"This is summary of the conversation to date: {summary}\n\n"
"Extend the summary by taking into account the new messages above:"
)
else:
summary_message = "Create a summary of the conversation above:"
# Add prompt to our history
messages = state["messages"] + [HumanMessage(content=summary_message)]
response = model.invoke(messages)
# Delete all but the 2 most recent messages
delete_messages = [RemoveMessage(id=m.id) for m in state["messages"][:-2]]
return {"summary": response.content, "messages": delete_messages}from langgraph.graph import END
# Determine whether to end or summarize the conversation
def should_continue(state: State):
"""Return the next node to execute."""
messages = state["messages"]
# If there are more than six messages, then we summarize the conversation
if len(messages) > 6:
return "summarize_conversation"
# Otherwise we can just end
return ENDfrom IPython.display import Image, display
from langgraph.checkpoint.memory import MemorySaver
from langgraph.graph import StateGraph, START
# Define a new graph
workflow = StateGraph(State)
workflow.add_node("conversation", call_model)
workflow.add_node(summarize_conversation)
# Set the entrypoint as conversation
workflow.add_edge(START, "conversation")
workflow.add_conditional_edges("conversation", should_continue)
workflow.add_edge("summarize_conversation", END)
# Compile
memory = MemorySaver()
graph = workflow.compile(checkpointer=memory)
display(Image(graph.get_graph().draw_mermaid_png()))# Create a thread
config = {"configurable": {"thread_id": "1"}}
# Start conversation
input_message = HumanMessage(content="hi! I'm Lance")
output = graph.invoke({"messages": [input_message]}, config)
for m in output['messages'][-1:]:
m.pretty_print()
input_message = HumanMessage(content="what's my name?")
output = graph.invoke({"messages": [input_message]}, config)
for m in output['messages'][-1:]:
m.pretty_print()
input_message = HumanMessage(content="i like the 49ers!")
output = graph.invoke({"messages": [input_message]}, config)
for m in output['messages'][-1:]:
m.pretty_print()graph.get_state(config).values.get("summary","")input_message = HumanMessage(content="i like Nick Bosa, isn't he the highest paid defensive player?")
output = graph.invoke({"messages": [input_message]}, config)
for m in output['messages'][-1:]:
m.pretty_print()graph.get_state(config).values.get("summary","") |
0 | lc_public_repos/langchain-academy | lc_public_repos/langchain-academy/module-2/multiple-schemas.ipynb | from typing_extensions import TypedDict
from IPython.display import Image, display
from langgraph.graph import StateGraph, START, END
class OverallState(TypedDict):
foo: int
class PrivateState(TypedDict):
baz: int
def node_1(state: OverallState) -> PrivateState:
print("---Node 1---")
return {"baz": state['foo'] + 1}
def node_2(state: PrivateState) -> OverallState:
print("---Node 2---")
return {"foo": state['baz'] + 1}
# Build graph
builder = StateGraph(OverallState)
builder.add_node("node_1", node_1)
builder.add_node("node_2", node_2)
# Logic
builder.add_edge(START, "node_1")
builder.add_edge("node_1", "node_2")
builder.add_edge("node_2", END)
# Add
graph = builder.compile()
# View
display(Image(graph.get_graph().draw_mermaid_png()))graph.invoke({"foo" : 1})class OverallState(TypedDict):
question: str
answer: str
notes: str
def thinking_node(state: OverallState):
return {"answer": "bye", "notes": "... his is name is Lance"}
def answer_node(state: OverallState):
return {"answer": "bye Lance"}
graph = StateGraph(OverallState)
graph.add_node("answer_node", answer_node)
graph.add_node("thinking_node", thinking_node)
graph.add_edge(START, "thinking_node")
graph.add_edge("thinking_node", "answer_node")
graph.add_edge("answer_node", END)
graph = graph.compile()
# View
display(Image(graph.get_graph().draw_mermaid_png()))graph.invoke({"question":"hi"})class InputState(TypedDict):
question: str
class OutputState(TypedDict):
answer: str
class OverallState(TypedDict):
question: str
answer: str
notes: str
def thinking_node(state: InputState):
return {"answer": "bye", "notes": "... his is name is Lance"}
def answer_node(state: OverallState) -> OutputState:
return {"answer": "bye Lance"}
graph = StateGraph(OverallState, input=InputState, output=OutputState)
graph.add_node("answer_node", answer_node)
graph.add_node("thinking_node", thinking_node)
graph.add_edge(START, "thinking_node")
graph.add_edge("thinking_node", "answer_node")
graph.add_edge("answer_node", END)
graph = graph.compile()
# View
display(Image(graph.get_graph().draw_mermaid_png()))
graph.invoke({"question":"hi"}) |
0 | lc_public_repos/langchain-academy | lc_public_repos/langchain-academy/module-2/state-schema.ipynb | from typing_extensions import TypedDict
class TypedDictState(TypedDict):
foo: str
bar: strfrom typing import Literal
class TypedDictState(TypedDict):
name: str
mood: Literal["happy","sad"]import random
from IPython.display import Image, display
from langgraph.graph import StateGraph, START, END
def node_1(state):
print("---Node 1---")
return {"name": state['name'] + " is ... "}
def node_2(state):
print("---Node 2---")
return {"mood": "happy"}
def node_3(state):
print("---Node 3---")
return {"mood": "sad"}
def decide_mood(state) -> Literal["node_2", "node_3"]:
# Here, let's just do a 50 / 50 split between nodes 2, 3
if random.random() < 0.5:
# 50% of the time, we return Node 2
return "node_2"
# 50% of the time, we return Node 3
return "node_3"
# Build graph
builder = StateGraph(TypedDictState)
builder.add_node("node_1", node_1)
builder.add_node("node_2", node_2)
builder.add_node("node_3", node_3)
# Logic
builder.add_edge(START, "node_1")
builder.add_conditional_edges("node_1", decide_mood)
builder.add_edge("node_2", END)
builder.add_edge("node_3", END)
# Add
graph = builder.compile()
# View
display(Image(graph.get_graph().draw_mermaid_png()))graph.invoke({"name":"Lance"})from dataclasses import dataclass
@dataclass
class DataclassState:
name: str
mood: Literal["happy","sad"]def node_1(state):
print("---Node 1---")
return {"name": state.name + " is ... "}
# Build graph
builder = StateGraph(DataclassState)
builder.add_node("node_1", node_1)
builder.add_node("node_2", node_2)
builder.add_node("node_3", node_3)
# Logic
builder.add_edge(START, "node_1")
builder.add_conditional_edges("node_1", decide_mood)
builder.add_edge("node_2", END)
builder.add_edge("node_3", END)
# Add
graph = builder.compile()
# View
display(Image(graph.get_graph().draw_mermaid_png()))graph.invoke(DataclassState(name="Lance",mood="sad"))dataclass_instance = DataclassState(name="Lance", mood="mad")from pydantic import BaseModel, field_validator, ValidationError
class PydanticState(BaseModel):
name: str
mood: str # "happy" or "sad"
@field_validator('mood')
@classmethod
def validate_mood(cls, value):
# Ensure the mood is either "happy" or "sad"
if value not in ["happy", "sad"]:
raise ValueError("Each mood must be either 'happy' or 'sad'")
return value
try:
state = PydanticState(name="John Doe", mood="mad")
except ValidationError as e:
print("Validation Error:", e)# Build graph
builder = StateGraph(PydanticState)
builder.add_node("node_1", node_1)
builder.add_node("node_2", node_2)
builder.add_node("node_3", node_3)
# Logic
builder.add_edge(START, "node_1")
builder.add_conditional_edges("node_1", decide_mood)
builder.add_edge("node_2", END)
builder.add_edge("node_3", END)
# Add
graph = builder.compile()
# View
display(Image(graph.get_graph().draw_mermaid_png()))graph.invoke(PydanticState(name="Lance",mood="sad")) |
0 | lc_public_repos/langchain-academy | lc_public_repos/langchain-academy/module-2/state-reducers.ipynb | from typing_extensions import TypedDict
from IPython.display import Image, display
from langgraph.graph import StateGraph, START, END
class State(TypedDict):
foo: int
def node_1(state):
print("---Node 1---")
return {"foo": state['foo'] + 1}
# Build graph
builder = StateGraph(State)
builder.add_node("node_1", node_1)
# Logic
builder.add_edge(START, "node_1")
builder.add_edge("node_1", END)
# Add
graph = builder.compile()
# View
display(Image(graph.get_graph().draw_mermaid_png()))graph.invoke({"foo" : 1})class State(TypedDict):
foo: int
def node_1(state):
print("---Node 1---")
return {"foo": state['foo'] + 1}
def node_2(state):
print("---Node 2---")
return {"foo": state['foo'] + 1}
def node_3(state):
print("---Node 3---")
return {"foo": state['foo'] + 1}
# Build graph
builder = StateGraph(State)
builder.add_node("node_1", node_1)
builder.add_node("node_2", node_2)
builder.add_node("node_3", node_3)
# Logic
builder.add_edge(START, "node_1")
builder.add_edge("node_1", "node_2")
builder.add_edge("node_1", "node_3")
builder.add_edge("node_2", END)
builder.add_edge("node_3", END)
# Add
graph = builder.compile()
# View
display(Image(graph.get_graph().draw_mermaid_png()))from langgraph.errors import InvalidUpdateError
try:
graph.invoke({"foo" : 1})
except InvalidUpdateError as e:
print(f"InvalidUpdateError occurred: {e}")
from operator import add
from typing import Annotated
class State(TypedDict):
foo: Annotated[list[int], add]
def node_1(state):
print("---Node 1---")
return {"foo": [state['foo'][0] + 1]}
# Build graph
builder = StateGraph(State)
builder.add_node("node_1", node_1)
# Logic
builder.add_edge(START, "node_1")
builder.add_edge("node_1", END)
# Add
graph = builder.compile()
# View
display(Image(graph.get_graph().draw_mermaid_png()))graph.invoke({"foo" : [1]})def node_1(state):
print("---Node 1---")
return {"foo": [state['foo'][-1] + 1]}
def node_2(state):
print("---Node 2---")
return {"foo": [state['foo'][-1] + 1]}
def node_3(state):
print("---Node 3---")
return {"foo": [state['foo'][-1] + 1]}
# Build graph
builder = StateGraph(State)
builder.add_node("node_1", node_1)
builder.add_node("node_2", node_2)
builder.add_node("node_3", node_3)
# Logic
builder.add_edge(START, "node_1")
builder.add_edge("node_1", "node_2")
builder.add_edge("node_1", "node_3")
builder.add_edge("node_2", END)
builder.add_edge("node_3", END)
# Add
graph = builder.compile()
# View
display(Image(graph.get_graph().draw_mermaid_png()))graph.invoke({"foo" : [1]})try:
graph.invoke({"foo" : None})
except TypeError as e:
print(f"TypeError occurred: {e}")def reduce_list(left: list | None, right: list | None) -> list:
"""Safely combine two lists, handling cases where either or both inputs might be None.
Args:
left (list | None): The first list to combine, or None.
right (list | None): The second list to combine, or None.
Returns:
list: A new list containing all elements from both input lists.
If an input is None, it's treated as an empty list.
"""
if not left:
left = []
if not right:
right = []
return left + right
class DefaultState(TypedDict):
foo: Annotated[list[int], add]
class CustomReducerState(TypedDict):
foo: Annotated[list[int], reduce_list]def node_1(state):
print("---Node 1---")
return {"foo": [2]}
# Build graph
builder = StateGraph(DefaultState)
builder.add_node("node_1", node_1)
# Logic
builder.add_edge(START, "node_1")
builder.add_edge("node_1", END)
# Add
graph = builder.compile()
# View
display(Image(graph.get_graph().draw_mermaid_png()))
try:
print(graph.invoke({"foo" : None}))
except TypeError as e:
print(f"TypeError occurred: {e}")# Build graph
builder = StateGraph(CustomReducerState)
builder.add_node("node_1", node_1)
# Logic
builder.add_edge(START, "node_1")
builder.add_edge("node_1", END)
# Add
graph = builder.compile()
# View
display(Image(graph.get_graph().draw_mermaid_png()))
try:
print(graph.invoke({"foo" : None}))
except TypeError as e:
print(f"TypeError occurred: {e}")from typing import Annotated
from langgraph.graph import MessagesState
from langchain_core.messages import AnyMessage
from langgraph.graph.message import add_messages
# Define a custom TypedDict that includes a list of messages with add_messages reducer
class CustomMessagesState(TypedDict):
messages: Annotated[list[AnyMessage], add_messages]
added_key_1: str
added_key_2: str
# etc
# Use MessagesState, which includes the messages key with add_messages reducer
class ExtendedMessagesState(MessagesState):
# Add any keys needed beyond messages, which is pre-built
added_key_1: str
added_key_2: str
# etcfrom langgraph.graph.message import add_messages
from langchain_core.messages import AIMessage, HumanMessage
# Initial state
initial_messages = [AIMessage(content="Hello! How can I assist you?", name="Model"),
HumanMessage(content="I'm looking for information on marine biology.", name="Lance")
]
# New message to add
new_message = AIMessage(content="Sure, I can help with that. What specifically are you interested in?", name="Model")
# Test
add_messages(initial_messages , new_message)# Initial state
initial_messages = [AIMessage(content="Hello! How can I assist you?", name="Model", id="1"),
HumanMessage(content="I'm looking for information on marine biology.", name="Lance", id="2")
]
# New message to add
new_message = HumanMessage(content="I'm looking for information on whales, specifically", name="Lance", id="2")
# Test
add_messages(initial_messages , new_message)from langchain_core.messages import RemoveMessage
# Message list
messages = [AIMessage("Hi.", name="Bot", id="1")]
messages.append(HumanMessage("Hi.", name="Lance", id="2"))
messages.append(AIMessage("So you said you were researching ocean mammals?", name="Bot", id="3"))
messages.append(HumanMessage("Yes, I know about whales. But what others should I learn about?", name="Lance", id="4"))
# Isolate messages to delete
delete_messages = [RemoveMessage(id=m.id) for m in messages[:-2]]
print(delete_messages)add_messages(messages , delete_messages) |
0 | lc_public_repos/langchain-academy | lc_public_repos/langchain-academy/module-2/chatbot-external-memory.ipynb | import os, getpass
def _set_env(var: str):
if not os.environ.get(var):
os.environ[var] = getpass.getpass(f"{var}: ")
_set_env("OPENAI_API_KEY")import sqlite3
# In memory
conn = sqlite3.connect(":memory:", check_same_thread = False)# pull file if it doesn't exist and connect to local db
!mkdir -p state_db && [ ! -f state_db/example.db ] && wget -P state_db https://github.com/langchain-ai/langchain-academy/raw/main/module-2/state_db/example.db
db_path = "state_db/example.db"
conn = sqlite3.connect(db_path, check_same_thread=False)# Here is our checkpointer
from langgraph.checkpoint.sqlite import SqliteSaver
memory = SqliteSaver(conn)from langchain_openai import ChatOpenAI
from langchain_core.messages import SystemMessage, HumanMessage, RemoveMessage
from langgraph.graph import END
from langgraph.graph import MessagesState
model = ChatOpenAI(model="gpt-4o",temperature=0)
class State(MessagesState):
summary: str
# Define the logic to call the model
def call_model(state: State):
# Get summary if it exists
summary = state.get("summary", "")
# If there is summary, then we add it
if summary:
# Add summary to system message
system_message = f"Summary of conversation earlier: {summary}"
# Append summary to any newer messages
messages = [SystemMessage(content=system_message)] + state["messages"]
else:
messages = state["messages"]
response = model.invoke(messages)
return {"messages": response}
def summarize_conversation(state: State):
# First, we get any existing summary
summary = state.get("summary", "")
# Create our summarization prompt
if summary:
# A summary already exists
summary_message = (
f"This is summary of the conversation to date: {summary}\n\n"
"Extend the summary by taking into account the new messages above:"
)
else:
summary_message = "Create a summary of the conversation above:"
# Add prompt to our history
messages = state["messages"] + [HumanMessage(content=summary_message)]
response = model.invoke(messages)
# Delete all but the 2 most recent messages
delete_messages = [RemoveMessage(id=m.id) for m in state["messages"][:-2]]
return {"summary": response.content, "messages": delete_messages}
# Determine whether to end or summarize the conversation
def should_continue(state: State):
"""Return the next node to execute."""
messages = state["messages"]
# If there are more than six messages, then we summarize the conversation
if len(messages) > 6:
return "summarize_conversation"
# Otherwise we can just end
return ENDfrom IPython.display import Image, display
from langgraph.graph import StateGraph, START
# Define a new graph
workflow = StateGraph(State)
workflow.add_node("conversation", call_model)
workflow.add_node(summarize_conversation)
# Set the entrypoint as conversation
workflow.add_edge(START, "conversation")
workflow.add_conditional_edges("conversation", should_continue)
workflow.add_edge("summarize_conversation", END)
# Compile
graph = workflow.compile(checkpointer=memory)
display(Image(graph.get_graph().draw_mermaid_png()))# Create a thread
config = {"configurable": {"thread_id": "1"}}
# Start conversation
input_message = HumanMessage(content="hi! I'm Lance")
output = graph.invoke({"messages": [input_message]}, config)
for m in output['messages'][-1:]:
m.pretty_print()
input_message = HumanMessage(content="what's my name?")
output = graph.invoke({"messages": [input_message]}, config)
for m in output['messages'][-1:]:
m.pretty_print()
input_message = HumanMessage(content="i like the 49ers!")
output = graph.invoke({"messages": [input_message]}, config)
for m in output['messages'][-1:]:
m.pretty_print()config = {"configurable": {"thread_id": "1"}}
graph_state = graph.get_state(config)
graph_state# Create a thread
config = {"configurable": {"thread_id": "1"}}
graph_state = graph.get_state(config)
graph_state |
0 | lc_public_repos/langchain-academy/module-2 | lc_public_repos/langchain-academy/module-2/studio/requirements.txt | langgraph
langchain-core
langchain-community
langchain-openai |
0 | lc_public_repos/langchain-academy/module-2 | lc_public_repos/langchain-academy/module-2/studio/.env.example | OPENAI_API_KEY=sk-xxx |
0 | lc_public_repos/langchain-academy/module-2 | lc_public_repos/langchain-academy/module-2/studio/langgraph.json | {
"dockerfile_lines": [],
"graphs": {
"chatbot": "./chatbot.py:graph"
},
"env": "./.env",
"python_version": "3.11",
"dependencies": [
"."
]
} |
0 | lc_public_repos/langchain-academy/module-2 | lc_public_repos/langchain-academy/module-2/studio/chatbot.py | from langchain_core.messages import HumanMessage, SystemMessage, RemoveMessage
from langgraph.graph import MessagesState
from langgraph.graph import StateGraph, START, END
# We will use this model for both the conversation and the summarization
from langchain_openai import ChatOpenAI
model = ChatOpenAI(model="gpt-4o", temperature=0)
# State class to store messages and summary
class State(MessagesState):
summary: str
# Define the logic to call the model
def call_model(state: State):
# Get summary if it exists
summary = state.get("summary", "")
# If there is summary, then we add it to messages
if summary:
# Add summary to system message
system_message = f"Summary of conversation earlier: {summary}"
# Append summary to any newer messages
messages = [SystemMessage(content=system_message)] + state["messages"]
else:
messages = state["messages"]
response = model.invoke(messages)
return {"messages": response}
# Determine whether to end or summarize the conversation
def should_continue(state: State):
"""Return the next node to execute."""
messages = state["messages"]
# If there are more than six messages, then we summarize the conversation
if len(messages) > 6:
return "summarize_conversation"
# Otherwise we can just end
return END
def summarize_conversation(state: State):
# First get the summary if it exists
summary = state.get("summary", "")
# Create our summarization prompt
if summary:
# If a summary already exists, add it to the prompt
summary_message = (
f"This is summary of the conversation to date: {summary}\n\n"
"Extend the summary by taking into account the new messages above:"
)
else:
# If no summary exists, just create a new one
summary_message = "Create a summary of the conversation above:"
# Add prompt to our history
messages = state["messages"] + [HumanMessage(content=summary_message)]
response = model.invoke(messages)
# Delete all but the 2 most recent messages and add our summary to the state
delete_messages = [RemoveMessage(id=m.id) for m in state["messages"][:-2]]
return {"summary": response.content, "messages": delete_messages}
# Define a new graph
workflow = StateGraph(State)
workflow.add_node("conversation", call_model)
workflow.add_node(summarize_conversation)
# Set the entrypoint as conversation
workflow.add_edge(START, "conversation")
workflow.add_conditional_edges("conversation", should_continue)
workflow.add_edge("summarize_conversation", END)
# Compile
graph = workflow.compile() |
0 | lc_public_repos/langchain-academy | lc_public_repos/langchain-academy/module-0/basics.ipynb | import os, getpass
def _set_env(var: str):
if not os.environ.get(var):
os.environ[var] = getpass.getpass(f"{var}: ")
_set_env("OPENAI_API_KEY")from langchain_openai import ChatOpenAI
gpt4o_chat = ChatOpenAI(model="gpt-4o", temperature=0)
gpt35_chat = ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0)from langchain_core.messages import HumanMessage
# Create a message
msg = HumanMessage(content="Hello world", name="Lance")
# Message list
messages = [msg]
# Invoke the model with a list of messages
gpt4o_chat.invoke(messages)gpt4o_chat.invoke("hello world")gpt35_chat.invoke("hello world")_set_env("TAVILY_API_KEY")from langchain_community.tools.tavily_search import TavilySearchResults
tavily_search = TavilySearchResults(max_results=3)
search_docs = tavily_search.invoke("What is LangGraph?")search_docs |
0 | lc_public_repos | lc_public_repos/langchain-teacher/main.py | from langchain.callbacks.base import BaseCallbackHandler
from langchain.chat_models import ChatOpenAI
from langchain.schema import HumanMessage, AIMessage
import streamlit as st
from langsmith import Client
client = Client()
st.set_page_config(page_title="LangChain: Getting Started Class", page_icon="🦜")
st.title("🦜 LangChain: Getting Started Class")
button_css =""".stButton>button {
color: #4F8BF9;
border-radius: 50%;
height: 2em;
width: 2em;
font-size: 4px;
}"""
st.markdown(f'<style>{button_css}</style>', unsafe_allow_html=True)
class StreamHandler(BaseCallbackHandler):
def __init__(self, container, initial_text=""):
self.container = container
self.text = initial_text
def on_llm_new_token(self, token: str, **kwargs) -> None:
self.text += token
self.container.markdown(self.text)
with open("guide.txt", "r") as f:
guide = f.read()
from langchain.chat_models import ChatOpenAI
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder, HumanMessagePromptTemplate
from langchain.schema import SystemMessage
from langchain.memory import ConversationBufferMemory
template = """The below is a "Getting Started" guide for LangChain. You are an expert educator, and are responsible for walking the user through this getting started guide. You should make sure to guide them along, encouraging them to progress when appropriate. If they ask questions not related to this getting started guide, you should politely decline to answer and resume trying to teach them about LangChain!
Please limit any responses to only one concept or step at a time. Make sure they fully understand that before moving on to the next. This is an interactive lesson - do not lecture them, but rather engage and guide them along!
When they have finished the guide, congragulate them and tell them to move onto the next section.
-----------------
{content}""".format(content=guide)
prompt_template = ChatPromptTemplate(messages = [SystemMessage(content=template), MessagesPlaceholder(variable_name="chat_history"), HumanMessagePromptTemplate.from_template("{input}")])
from langchain.chains import LLMChain
def send_feedback(run_id, score):
client.create_feedback(run_id, "user_score", score=score)
if "messages" not in st.session_state:
st.session_state["messages"] = [AIMessage(content="Welcome! This short course with help you started with LangChain, and will cover LLMs, prompts, output parsers, and LLMChains.Before doing this, you should have a Python environment set up. Do you have that done?")]
for msg in st.session_state["messages"]:
if isinstance(msg, HumanMessage):
st.chat_message("user").write(msg.content)
else:
st.chat_message("assistant").write(msg.content)
if prompt := st.chat_input():
st.chat_message("user").write(prompt)
with st.chat_message("assistant"):
stream_handler = StreamHandler(st.empty())
model = ChatOpenAI(streaming=True, callbacks=[stream_handler], model="gpt-4")
chain = LLMChain(prompt=prompt_template, llm=model)
response = chain({"input":prompt, "chat_history":st.session_state.messages[-20:]}, include_run_info=True)
st.session_state.messages.append(HumanMessage(content=prompt))
st.session_state.messages.append(AIMessage(content=response[chain.output_key]))
run_id = response["__run"].run_id
col_blank, col_text, col1, col2 = st.columns([10, 2,1,1])
with col_text:
st.text("Feedback:")
with col1:
st.button("👍", on_click=send_feedback, args=(run_id, 1))
with col2:
st.button("👎", on_click=send_feedback, args=(run_id, 0))
|
0 | lc_public_repos | lc_public_repos/langchain-teacher/get_prompt.py | from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder, HumanMessagePromptTemplate
from langchain.schema import SystemMessage
from langchain.memory import ConversationBufferMemory
def load_prompt(content):
template = """You are an expert educator, and are responsible for walking the user \
through this lesson plan. You should make sure to guide them along, \
encouraging them to progress when appropriate. \
If they ask questions not related to this getting started guide, \
you should politely decline to answer and remind them to stay on topic.
Please limit any responses to only one concept or step at a time. \
Each step show only be ~5 lines of code at MOST. \
Only include 1 code snippet per message - make sure they can run that before giving them any more. \
Make sure they fully understand that before moving on to the next. \
This is an interactive lesson - do not lecture them, but rather engage and guide them along!
-----------------
{content}
-----------------
End of Content.
Now remember short response with only 1 code snippet per message.""".format(content=content)
prompt_template = ChatPromptTemplate(messages = [
SystemMessage(content=template),
MessagesPlaceholder(variable_name="chat_history"),
HumanMessagePromptTemplate.from_template("{input}")
])
return prompt_template
def load_prompt_with_questions(content):
template = """You are an expert educator, and are responsible for walking the user \
through this lesson plan. You should make sure to guide them along, \
encouraging them to progress when appropriate. \
If they ask questions not related to this getting started guide, \
you should politely decline to answer and remind them to stay on topic.\
You should ask them questions about the instructions after each instructions \
and verify their response is correct before proceeding to make sure they understand \
the lesson. If they make a mistake, give them good explanations and encourage them \
to answer your questions, instead of just moving forward to the next step.
Please limit any responses to only one concept or step at a time. \
Each step show only be ~5 lines of code at MOST. \
Only include 1 code snippet per message - make sure they can run that before giving them any more. \
Make sure they fully understand that before moving on to the next. \
This is an interactive lesson - do not lecture them, but rather engage and guide them along!\
-----------------
{content}
-----------------
End of Content.
Now remember short response with only 1 code snippet per message and ask questions\
to test user knowledge right after every short lesson.
Your teaching should be in the following interactive format:
Short lesson 3-5 sentences long
Questions about the short lesson (1-3 questions)
Short lesson 3-5 sentences long
Questions about the short lesson (1-3 questions)
...
""".format(content=content)
prompt_template = ChatPromptTemplate(messages = [
SystemMessage(content=template),
MessagesPlaceholder(variable_name="chat_history"),
HumanMessagePromptTemplate.from_template("{input}")
])
return prompt_template
|
0 | lc_public_repos | lc_public_repos/langchain-teacher/requirements.txt | openai
langchain>=0.0.315
streamlit
anthropic
pydantic<2
|
0 | lc_public_repos | lc_public_repos/langchain-teacher/README.md |
# LangChain-Teacher

## Description
LangChain-Teacher's goal is to facilitate interactive learning of LangChain, enabling users to begin with the Python-based LangChain through a chat-based learning interface. The app offers two teaching styles: Instructional, which provides step-by-step instructions, and Interactive lessons with questions, which prompts users with questions to assess their understanding.
The hosted version of the app is on Streamlit Cloud at [lang-teacher.streamlit.app](https://lang-teacher.streamlit.app/)
## How Does This Work?
The core of the teaching process is driven by the prompts defined in `get_prompt.py`. This module creates lessons based on the content available in the `lc_guides` folder, where lessons are stored as `.txt` files.
To give a bit more context:
- The prompt, combined with the lesson content from the `.txt` file, is sent to a Language Learning Model (LLM) to assist in generating bite-sized lessons.
- The chat memory helps LLM retain information about previous instructions and add new ones to the conversation.
- This showcases the power of prompt templates and how prompt engineering could be used in the development of LLM applications.
## Getting Started
This Streamlit app guides users through lessons using a chat-based interface. To get started, follow these steps:
### Prerequisites
- Python 3.10 or higher
### Installation
1. Clone the repository from GitHub or create a GitHub Codespace:
```
git clone https://github.com/hwchase17/langchain-teacher.git
```
Change directory to the langchain-teacher directory
```
cd langchain-teacher
```
2. Install the required dependencies listed in `requirements.txt`:
```
pip install -r requirements.txt
```
3. Create a `.env` file in the root directory and add the following environment variables:
```
OPENAI_API_KEY=
LANGCHAIN_ENDPOINT=
LANGCHAIN_API_KEY=
LANGCHAIN_TRACING_V2=
LANGCHAIN_PROJECT=
```
An example `.env` file is provided as `.env-example`. If you're not using LangSmith, you only need to set the `OPENAI_API_KEY` variable.
4. Run the Streamlit app using the command:
```
streamlit run lc_main.py
```
If using `dotenv` to manage environment variables, use the following command:
```
dotenv streamlit run lc_main.py
```
## Additional Files and Branches
- The initial version of the app used a getting started guide at guide.txt together with the main.py file to run the streamlit app. You can also run the initial version of the app using the command:
```
streamlit run main.py
```
- There is also a tutor for LangChain expression language with lesson files in the `lcel` folder and the `lcel.py` file to run the streamlit app.
- The `supervisor-model` branch in this repository implements a `SequentialChain` to supervise responses from students and teachers. This approach aims to ensure that questions are on-topic by the students and that the responses are accordingly as well by the teacher model.
## Future Work
- [ ] **Integration with LangSmith Hub**: Integrate prompts directly into the [LangSmith Hub](https://smith.langchain.com/).
- [ ] **Expanding Lesson Library**: Continuously add new lessons to create a comprehensive learning resource.
- [ ] **Token Usage Improvement**: Currently the prompt sent to the LLM is quite large as it takes the prompt and the lesson. Could be improved further.
## Contributions
Please feel free to add more lessons/examples/use cases. We would love for langchain-teacher to be the first stop for any new learner. You can contribute by creating pull requests or raising issues.
## License
This project is licensed under the [MIT License](LICENSE).
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.