index int64 0 0 | repo_id stringclasses 596 values | file_path stringlengths 31 168 | content stringlengths 1 6.2M |
|---|---|---|---|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/agents/test_openai_assistant.py | from functools import partial
from typing import Any
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from langchain.agents.openai_assistant import OpenAIAssistantRunnable
def _create_mock_client(*args: Any, use_async: bool = False, **kwargs: Any) -> Any:
client = AsyncMock() if use_async else MagicMock()
mock_assistant = MagicMock()
mock_assistant.id = "abc123"
client.beta.assistants.create.return_value = mock_assistant # type: ignore
return client
@pytest.mark.requires("openai")
def test_user_supplied_client() -> None:
import openai
client = openai.AzureOpenAI(
azure_endpoint="azure_endpoint",
api_key="api_key",
api_version="api_version",
)
assistant = OpenAIAssistantRunnable(
assistant_id="assistant_id",
client=client,
)
assert assistant.client == client
@pytest.mark.requires("openai")
@patch(
"langchain.agents.openai_assistant.base._get_openai_client",
new=partial(_create_mock_client, use_async=False),
)
def test_create_assistant() -> None:
assistant = OpenAIAssistantRunnable.create_assistant(
name="name",
instructions="instructions",
tools=[{"type": "code_interpreter"}],
model="",
)
assert isinstance(assistant, OpenAIAssistantRunnable)
@pytest.mark.requires("openai")
@patch(
"langchain.agents.openai_assistant.base._get_openai_async_client",
new=partial(_create_mock_client, use_async=True),
)
async def test_acreate_assistant() -> None:
assistant = await OpenAIAssistantRunnable.acreate_assistant(
name="name",
instructions="instructions",
tools=[{"type": "code_interpreter"}],
model="",
client=_create_mock_client(),
)
assert isinstance(assistant, OpenAIAssistantRunnable)
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/agents/test_openai_functions_multi.py | import json
import pytest
from langchain_core.agents import AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain_core.messages import AIMessage, SystemMessage
from langchain.agents.openai_functions_multi_agent.base import (
_FunctionsAgentAction,
_parse_ai_message,
)
# Test: _parse_ai_message() function.
class TestParseAIMessage:
# Test: Pass Non-AIMessage.
def test_not_an_ai(self) -> None:
err = f"Expected an AI message got {str(SystemMessage)}"
with pytest.raises(TypeError, match=err):
_parse_ai_message(SystemMessage(content="x"))
# Test: Model response (not a function call).
def test_model_response(self) -> None:
msg = AIMessage(content="Model response.")
result = _parse_ai_message(msg)
assert isinstance(result, AgentFinish)
assert result.return_values == {"output": "Model response."}
assert result.log == "Model response."
# Test: Model response with a function call.
def test_func_call(self) -> None:
act = json.dumps([{"action_name": "foo", "action": {"param": 42}}])
msg = AIMessage(
content="LLM thoughts.",
additional_kwargs={
"function_call": {"name": "foo", "arguments": f'{{"actions": {act}}}'}
},
)
result = _parse_ai_message(msg)
assert isinstance(result, list)
assert len(result) == 1
action = result[0]
assert isinstance(action, _FunctionsAgentAction)
assert action.tool == "foo"
assert action.tool_input == {"param": 42}
assert action.log == (
"\nInvoking: `foo` with `{'param': 42}`\nresponded: LLM thoughts.\n\n"
)
assert action.message_log == [msg]
# Test: Model response with a function call (old style tools).
def test_func_call_oldstyle(self) -> None:
act = json.dumps([{"action_name": "foo", "action": {"__arg1": "42"}}])
msg = AIMessage(
content="LLM thoughts.",
additional_kwargs={
"function_call": {"name": "foo", "arguments": f'{{"actions": {act}}}'}
},
)
result = _parse_ai_message(msg)
assert isinstance(result, list)
assert len(result) == 1
action = result[0]
assert isinstance(action, _FunctionsAgentAction)
assert action.tool == "foo"
assert action.tool_input == "42"
assert action.log == (
"\nInvoking: `foo` with `42`\nresponded: LLM thoughts.\n\n"
)
assert action.message_log == [msg]
# Test: Invalid function call args.
def test_func_call_invalid(self) -> None:
msg = AIMessage(
content="LLM thoughts.",
additional_kwargs={"function_call": {"name": "foo", "arguments": "{42]"}},
)
err = (
"Could not parse tool input: {'name': 'foo', 'arguments': '{42]'} "
"because the `arguments` is not valid JSON."
)
with pytest.raises(OutputParserException, match=err):
_parse_ai_message(msg)
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/agents/test_public_api.py | from langchain.agents import __all__ as agents_all
_EXPECTED = [
"Agent",
"AgentExecutor",
"AgentExecutorIterator",
"AgentOutputParser",
"AgentType",
"BaseMultiActionAgent",
"BaseSingleActionAgent",
"ConversationalAgent",
"ConversationalChatAgent",
"LLMSingleActionAgent",
"MRKLChain",
"OpenAIFunctionsAgent",
"OpenAIMultiFunctionsAgent",
"ReActChain",
"ReActTextWorldAgent",
"SelfAskWithSearchChain",
"StructuredChatAgent",
"Tool",
"XMLAgent",
"ZeroShotAgent",
"create_json_agent",
"create_openapi_agent",
"create_pbi_agent",
"create_pbi_chat_agent",
"create_spark_sql_agent",
"create_sql_agent",
"create_vectorstore_agent",
"create_vectorstore_router_agent",
"get_all_tool_names",
"initialize_agent",
"load_agent",
"load_huggingface_tool",
"load_tools",
"tool",
"create_openai_functions_agent",
"create_xml_agent",
"create_react_agent",
"create_openai_tools_agent",
"create_self_ask_with_search_agent",
"create_json_chat_agent",
"create_structured_chat_agent",
"create_tool_calling_agent",
]
def test_public_api() -> None:
"""Test for regressions or changes in the agents public API."""
assert sorted(agents_all) == sorted(_EXPECTED)
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/agents/test_chat.py | """Unittests for langchain.agents.chat package."""
from typing import Tuple
from langchain_core.agents import AgentAction
from langchain.agents.chat.output_parser import ChatOutputParser
output_parser = ChatOutputParser()
def get_action_and_input(text: str) -> Tuple[str, str]:
output = output_parser.parse(text)
if isinstance(output, AgentAction):
return output.tool, str(output.tool_input)
else:
return "Final Answer", output.return_values["output"]
def test_parse_with_language() -> None:
llm_output = """I can use the `foo` tool to achieve the goal.
Action:
```json
{
"action": "foo",
"action_input": "bar"
}
```
"""
action, action_input = get_action_and_input(llm_output)
assert action == "foo"
assert action_input == "bar"
def test_parse_without_language() -> None:
llm_output = """I can use the `foo` tool to achieve the goal.
Action:
```
{
"action": "foo",
"action_input": "bar"
}
```
"""
action, action_input = get_action_and_input(llm_output)
assert action == "foo"
assert action_input == "bar"
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/agents/test_mrkl.py | """Test MRKL functionality."""
from typing import Tuple
import pytest
from langchain_core.agents import AgentAction
from langchain_core.exceptions import OutputParserException
from langchain_core.prompts import PromptTemplate
from langchain_core.tools import Tool
from langchain.agents.mrkl.base import ZeroShotAgent
from langchain.agents.mrkl.output_parser import MRKLOutputParser
from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS, PREFIX, SUFFIX
from tests.unit_tests.llms.fake_llm import FakeLLM
def get_action_and_input(text: str) -> Tuple[str, str]:
output = MRKLOutputParser().parse(text)
if isinstance(output, AgentAction):
return output.tool, str(output.tool_input)
else:
return "Final Answer", output.return_values["output"]
def test_get_action_and_input() -> None:
"""Test getting an action from text."""
llm_output = (
"Thought: I need to search for NBA\n" "Action: Search\n" "Action Input: NBA"
)
action, action_input = get_action_and_input(llm_output)
assert action == "Search"
assert action_input == "NBA"
def test_get_action_and_input_whitespace() -> None:
"""Test getting an action from text."""
llm_output = "Thought: I need to search for NBA\nAction: Search \nAction Input: NBA"
action, action_input = get_action_and_input(llm_output)
assert action == "Search"
assert action_input == "NBA"
def test_get_action_and_input_newline() -> None:
"""Test getting an action from text where Action Input is a code snippet."""
llm_output = (
"Now I need to write a unittest for the function.\n\n"
"Action: Python\nAction Input:\n```\nimport unittest\n\nunittest.main()\n```"
)
action, action_input = get_action_and_input(llm_output)
assert action == "Python"
assert action_input == "```\nimport unittest\n\nunittest.main()\n```"
def test_get_action_and_input_newline_after_keyword() -> None:
"""Test getting an action and action input from the text
when there is a new line before the action
(after the keywords "Action:" and "Action Input:")
"""
llm_output = """
I can use the `ls` command to list the contents of the directory \
and `grep` to search for the specific file.
Action:
Terminal
Action Input:
ls -l ~/.bashrc.d/
"""
action, action_input = get_action_and_input(llm_output)
assert action == "Terminal"
assert action_input == "ls -l ~/.bashrc.d/\n"
def test_get_action_and_input_sql_query() -> None:
"""Test getting the action and action input from the text
when the LLM output is a well formed SQL query
"""
llm_output = """
I should query for the largest single shift payment for every unique user.
Action: query_sql_db
Action Input: \
SELECT "UserName", MAX(totalpayment) FROM user_shifts GROUP BY "UserName" """
action, action_input = get_action_and_input(llm_output)
assert action == "query_sql_db"
assert (
action_input
== 'SELECT "UserName", MAX(totalpayment) FROM user_shifts GROUP BY "UserName"'
)
def test_get_final_answer() -> None:
"""Test getting final answer."""
llm_output = "Thought: I can now answer the question\n" "Final Answer: 1994"
action, action_input = get_action_and_input(llm_output)
assert action == "Final Answer"
assert action_input == "1994"
def test_get_final_answer_new_line() -> None:
"""Test getting final answer."""
llm_output = "Thought: I can now answer the question\n" "Final Answer:\n1994"
action, action_input = get_action_and_input(llm_output)
assert action == "Final Answer"
assert action_input == "1994"
def test_get_final_answer_multiline() -> None:
"""Test getting final answer that is multiline."""
llm_output = "Thought: I can now answer the question\n" "Final Answer: 1994\n1993"
action, action_input = get_action_and_input(llm_output)
assert action == "Final Answer"
assert action_input == "1994\n1993"
def test_bad_action_input_line() -> None:
"""Test handling when no action input found."""
llm_output = "Thought: I need to search for NBA\n" "Action: Search\n" "Thought: NBA"
with pytest.raises(OutputParserException) as e_info:
get_action_and_input(llm_output)
assert e_info.value.observation is not None
def test_bad_action_line() -> None:
"""Test handling when no action found."""
llm_output = (
"Thought: I need to search for NBA\n" "Thought: Search\n" "Action Input: NBA"
)
with pytest.raises(OutputParserException) as e_info:
get_action_and_input(llm_output)
assert e_info.value.observation is not None
def test_valid_action_and_answer_raises_exception() -> None:
"""Test handling when both an action and answer are found."""
llm_output = (
"Thought: I need to search for NBA\n"
"Action: Search\n"
"Action Input: NBA\n"
"Observation: founded in 1994\n"
"Thought: I can now answer the question\n"
"Final Answer: 1994"
)
with pytest.raises(OutputParserException):
get_action_and_input(llm_output)
def test_from_chains() -> None:
"""Test initializing from chains."""
chain_configs = [
Tool(name="foo", func=lambda x: "foo", description="foobar1"),
Tool(name="bar", func=lambda x: "bar", description="foobar2"),
]
agent = ZeroShotAgent.from_llm_and_tools(FakeLLM(), chain_configs)
expected_tools_prompt = "foo(x) - foobar1\nbar(x) - foobar2"
expected_tool_names = "foo, bar"
expected_template = "\n\n".join(
[
PREFIX,
expected_tools_prompt,
FORMAT_INSTRUCTIONS.format(tool_names=expected_tool_names),
SUFFIX,
]
)
prompt = agent.llm_chain.prompt
assert isinstance(prompt, PromptTemplate)
assert prompt.template == expected_template
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/agents/test_imports.py | from langchain import agents
EXPECTED_ALL = [
"Agent",
"AgentExecutor",
"AgentExecutorIterator",
"AgentOutputParser",
"AgentType",
"BaseMultiActionAgent",
"BaseSingleActionAgent",
"ConversationalAgent",
"ConversationalChatAgent",
"LLMSingleActionAgent",
"MRKLChain",
"OpenAIFunctionsAgent",
"OpenAIMultiFunctionsAgent",
"ReActChain",
"ReActTextWorldAgent",
"SelfAskWithSearchChain",
"StructuredChatAgent",
"Tool",
"ZeroShotAgent",
"create_json_agent",
"create_openapi_agent",
"create_pbi_agent",
"create_pbi_chat_agent",
"create_spark_sql_agent",
"create_sql_agent",
"create_vectorstore_agent",
"create_vectorstore_router_agent",
"get_all_tool_names",
"initialize_agent",
"load_agent",
"load_huggingface_tool",
"load_tools",
"tool",
"XMLAgent",
"create_openai_functions_agent",
"create_xml_agent",
"create_react_agent",
"create_openai_tools_agent",
"create_self_ask_with_search_agent",
"create_json_chat_agent",
"create_structured_chat_agent",
"create_tool_calling_agent",
]
def test_all_imports() -> None:
assert set(agents.__all__) == set(EXPECTED_ALL)
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/agents/test_agent_iterator.py | from uuid import UUID
import pytest
from langchain_core.language_models import FakeListLLM
from langchain_core.tools import Tool
from langchain_core.tracers.context import collect_runs
from langchain.agents import (
AgentExecutor,
AgentExecutorIterator,
AgentType,
initialize_agent,
)
from langchain.schema import RUN_KEY
from tests.unit_tests.agents.test_agent import _get_agent
from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
def test_agent_iterator_bad_action() -> None:
"""Test react chain iterator when bad action given."""
agent = _get_agent()
agent_iter = agent.iter(inputs="when was langchain made")
outputs = []
for step in agent_iter:
outputs.append(step)
assert isinstance(outputs[-1], dict)
assert outputs[-1]["output"] == "curses foiled again"
def test_agent_iterator_stopped_early() -> None:
"""
Test react chain iterator when max iterations or
max execution time is exceeded.
"""
# iteration limit
agent = _get_agent(max_iterations=1)
agent_iter = agent.iter(inputs="when was langchain made")
outputs = []
for step in agent_iter:
outputs.append(step)
# NOTE: we don't use agent.run like in the test for the regular agent executor,
# so the dict structure for outputs stays intact
assert isinstance(outputs[-1], dict)
assert (
outputs[-1]["output"] == "Agent stopped due to iteration limit or time limit."
)
# execution time limit
agent = _get_agent(max_execution_time=1e-5)
agent_iter = agent.iter(inputs="when was langchain made")
outputs = []
for step in agent_iter:
outputs.append(step)
assert isinstance(outputs[-1], dict)
assert (
outputs[-1]["output"] == "Agent stopped due to iteration limit or time limit."
)
async def test_agent_async_iterator_stopped_early() -> None:
"""
Test react chain async iterator when max iterations or
max execution time is exceeded.
"""
# iteration limit
agent = _get_agent(max_iterations=1)
agent_async_iter = agent.iter(inputs="when was langchain made")
outputs = []
assert isinstance(agent_async_iter, AgentExecutorIterator)
async for step in agent_async_iter:
outputs.append(step)
assert isinstance(outputs[-1], dict)
assert (
outputs[-1]["output"] == "Agent stopped due to iteration limit or time limit."
)
# execution time limit
agent = _get_agent(max_execution_time=1e-5)
agent_async_iter = agent.iter(inputs="when was langchain made")
assert isinstance(agent_async_iter, AgentExecutorIterator)
outputs = []
async for step in agent_async_iter:
outputs.append(step)
assert (
outputs[-1]["output"] == "Agent stopped due to iteration limit or time limit."
)
def test_agent_iterator_with_callbacks() -> None:
"""Test react chain iterator with callbacks by setting verbose globally."""
handler1 = FakeCallbackHandler()
handler2 = FakeCallbackHandler()
bad_action_name = "BadAction"
responses = [
f"I'm turning evil\nAction: {bad_action_name}\nAction Input: misalignment",
"Oh well\nFinal Answer: curses foiled again",
]
fake_llm = FakeListLLM(cache=False, responses=responses, callbacks=[handler2])
tools = [
Tool(
name="Search",
func=lambda x: x,
description="Useful for searching",
),
Tool(
name="Lookup",
func=lambda x: x,
description="Useful for looking up things in a table",
),
]
agent = initialize_agent(
tools,
fake_llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
)
agent_iter = agent.iter(
inputs="when was langchain made", callbacks=[handler1], include_run_info=True
)
outputs = []
for step in agent_iter:
outputs.append(step)
assert isinstance(outputs[-1], dict)
assert outputs[-1]["output"] == "curses foiled again"
assert isinstance(outputs[-1][RUN_KEY].run_id, UUID)
# 1 top level chain run runs, 2 LLMChain runs, 2 LLM runs, 1 tool run
assert handler1.chain_starts == handler1.chain_ends == 3
assert handler1.llm_starts == handler1.llm_ends == 2
assert handler1.tool_starts == 1
assert handler1.tool_ends == 1
# 1 extra agent action
assert handler1.starts == 7
# 1 extra agent end
assert handler1.ends == 7
print("h:", handler1) # noqa: T201
assert handler1.errors == 0
# during LLMChain
assert handler1.text == 2
assert handler2.llm_starts == 2
assert handler2.llm_ends == 2
assert (
handler2.chain_starts
== handler2.tool_starts
== handler2.tool_ends
== handler2.chain_ends
== 0
)
async def test_agent_async_iterator_with_callbacks() -> None:
"""Test react chain async iterator with callbacks by setting verbose globally."""
handler1 = FakeCallbackHandler()
handler2 = FakeCallbackHandler()
bad_action_name = "BadAction"
responses = [
f"I'm turning evil\nAction: {bad_action_name}\nAction Input: misalignment",
"Oh well\nFinal Answer: curses foiled again",
]
fake_llm = FakeListLLM(cache=False, responses=responses, callbacks=[handler2])
tools = [
Tool(
name="Search",
func=lambda x: x,
description="Useful for searching",
),
Tool(
name="Lookup",
func=lambda x: x,
description="Useful for looking up things in a table",
),
]
agent = initialize_agent(
tools, fake_llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True
)
agent_async_iter = agent.iter(
inputs="when was langchain made",
callbacks=[handler1],
include_run_info=True,
)
assert isinstance(agent_async_iter, AgentExecutorIterator)
outputs = []
async for step in agent_async_iter:
outputs.append(step)
assert outputs[-1]["output"] == "curses foiled again"
assert isinstance(outputs[-1][RUN_KEY].run_id, UUID)
# 1 top level chain run runs, 2 LLMChain runs, 2 LLM runs, 1 tool run
assert handler1.chain_starts == handler1.chain_ends == 3
assert handler1.llm_starts == handler1.llm_ends == 2
assert handler1.tool_starts == 1
assert handler1.tool_ends == 1
# 1 extra agent action
assert handler1.starts == 7
# 1 extra agent end
assert handler1.ends == 7
assert handler1.errors == 0
# during LLMChain
assert handler1.text == 2
assert handler2.llm_starts == 2
assert handler2.llm_ends == 2
assert (
handler2.chain_starts
== handler2.tool_starts
== handler2.tool_ends
== handler2.chain_ends
== 0
)
def test_agent_iterator_properties_and_setters() -> None:
"""Test properties and setters of AgentExecutorIterator."""
agent = _get_agent()
agent.tags = None
agent_iter = agent.iter(inputs="when was langchain made")
assert isinstance(agent_iter, AgentExecutorIterator)
assert isinstance(agent_iter.inputs, dict)
assert isinstance(agent_iter.callbacks, type(None))
assert isinstance(agent_iter.tags, type(None))
assert isinstance(agent_iter.agent_executor, AgentExecutor)
agent_iter.inputs = "New input" # type: ignore
assert isinstance(agent_iter.inputs, dict)
agent_iter.callbacks = [FakeCallbackHandler()]
assert isinstance(agent_iter.callbacks, list)
agent_iter.tags = ["test"]
assert isinstance(agent_iter.tags, list)
new_agent = _get_agent()
agent_iter.agent_executor = new_agent
assert isinstance(agent_iter.agent_executor, AgentExecutor)
def test_agent_iterator_manual_run_id() -> None:
"""Test react chain iterator with manually specified run_id."""
agent = _get_agent()
run_id = UUID("f47ac10b-58cc-4372-a567-0e02b2c3d479")
with collect_runs() as cb:
agent_iter = agent.stream("when was langchain made", {"run_id": run_id})
list(agent_iter)
run = cb.traced_runs[0]
assert run.id == run_id
async def test_manually_specify_rid_async() -> None:
agent = _get_agent()
run_id = UUID("f47ac10b-58cc-4372-a567-0e02b2c3d479")
with collect_runs() as cb:
res = agent.astream("bar", {"run_id": run_id})
async for _ in res:
pass
run = cb.traced_runs[0]
assert run.id == run_id
def test_agent_iterator_reset() -> None:
"""Test reset functionality of AgentExecutorIterator."""
agent = _get_agent()
agent_iter = agent.iter(inputs="when was langchain made")
assert isinstance(agent_iter, AgentExecutorIterator)
# Perform one iteration
iterator = iter(agent_iter)
next(iterator)
# Check if properties are updated
assert agent_iter.iterations == 1
assert agent_iter.time_elapsed > 0.0
assert agent_iter.intermediate_steps
# Reset the iterator
agent_iter.reset()
# Check if properties are reset
assert agent_iter.iterations == 0
assert agent_iter.time_elapsed == 0.0
assert not agent_iter.intermediate_steps
def test_agent_iterator_output_structure() -> None:
"""Test the output structure of AgentExecutorIterator."""
agent = _get_agent()
agent_iter = agent.iter(inputs="when was langchain made")
for step in agent_iter:
assert isinstance(step, dict)
if "intermediate_step" in step:
assert isinstance(step["intermediate_step"], list)
elif "output" in step:
assert isinstance(step["output"], str)
else:
assert False, "Unexpected output structure"
async def test_agent_async_iterator_output_structure() -> None:
"""Test the async output structure of AgentExecutorIterator."""
agent = _get_agent()
agent_async_iter = agent.iter(inputs="when was langchain made", async_=True)
assert isinstance(agent_async_iter, AgentExecutorIterator)
async for step in agent_async_iter:
assert isinstance(step, dict)
if "intermediate_step" in step:
assert isinstance(step["intermediate_step"], list)
elif "output" in step:
assert isinstance(step["output"], str)
else:
assert False, "Unexpected output structure"
def test_agent_iterator_empty_input() -> None:
"""Test AgentExecutorIterator with empty input."""
agent = _get_agent()
agent_iter = agent.iter(inputs="")
outputs = []
for step in agent_iter:
outputs.append(step)
assert isinstance(outputs[-1], dict)
assert outputs[-1]["output"] # Check if there is an output
def test_agent_iterator_custom_stopping_condition() -> None:
"""Test AgentExecutorIterator with a custom stopping condition."""
agent = _get_agent()
class CustomAgentExecutorIterator(AgentExecutorIterator):
def _should_continue(self) -> bool:
return self.iterations < 2 # Custom stopping condition
agent_iter = CustomAgentExecutorIterator(agent, inputs="when was langchain made")
outputs = []
for step in agent_iter:
outputs.append(step)
assert len(outputs) == 2 # Check if the custom stopping condition is respected
def test_agent_iterator_failing_tool() -> None:
"""Test AgentExecutorIterator with a tool that raises an exception."""
# Get agent for testing.
bad_action_name = "FailingTool"
responses = [
f"I'm turning evil\nAction: {bad_action_name}\nAction Input: misalignment",
"Oh well\nFinal Answer: curses foiled again",
]
fake_llm = FakeListLLM(responses=responses)
tools = [
Tool(
name="FailingTool",
func=lambda x: 1 / 0, # This tool will raise a ZeroDivisionError
description="A tool that fails",
),
]
agent = initialize_agent(
tools, fake_llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True
)
agent_iter = agent.iter(inputs="when was langchain made")
assert isinstance(agent_iter, AgentExecutorIterator)
# initialize iterator
iterator = iter(agent_iter)
with pytest.raises(ZeroDivisionError):
next(iterator)
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/agents/test_mrkl_output_parser.py | import pytest
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain.agents.mrkl.output_parser import (
MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE,
MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE,
MRKLOutputParser,
)
mrkl_output_parser = MRKLOutputParser()
def test_valid_action_and_action_input_parse() -> None:
llm_output = """I can use the `foo` tool to achieve the goal.
Action: foo
Action Input: bar"""
agent_action: AgentAction = mrkl_output_parser.parse(llm_output) # type: ignore
assert agent_action.tool == "foo"
assert agent_action.tool_input == "bar"
def test_valid_final_answer_parse() -> None:
llm_output = """Final Answer: The best pizza to eat is margaritta """
agent_finish: AgentFinish = mrkl_output_parser.parse(llm_output) # type: ignore
assert (
agent_finish.return_values.get("output")
== "The best pizza to eat is margaritta"
)
def test_missing_action() -> None:
llm_output = """I can use the `foo` tool to achieve the goal."""
with pytest.raises(OutputParserException) as exception_info:
mrkl_output_parser.parse(llm_output)
assert (
exception_info.value.observation == MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE
)
def test_missing_action_input() -> None:
llm_output = """I can use the `foo` tool to achieve the goal.
Action: foo"""
with pytest.raises(OutputParserException) as exception_info:
mrkl_output_parser.parse(llm_output)
assert (
exception_info.value.observation
== MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE
)
def test_final_answer_before_parsable_action() -> None:
llm_output = """Final Answer: The best pizza to eat is margaritta
Action: foo
Action Input: bar
"""
agent_finish: AgentFinish = mrkl_output_parser.parse(llm_output) # type: ignore
assert (
agent_finish.return_values.get("output")
== "The best pizza to eat is margaritta"
)
def test_final_answer_after_parsable_action() -> None:
llm_output = """
Observation: I can use the `foo` tool to achieve the goal.
Action: foo
Action Input: bar
Final Answer: The best pizza to eat is margaritta
"""
with pytest.raises(OutputParserException) as exception_info:
mrkl_output_parser.parse(llm_output)
assert (
"Parsing LLM output produced both a final answer and a parse-able action"
in exception_info.value.args[0]
)
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/agents/__init__.py | """Test agent functionality."""
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/agents/test_structured_chat.py | """Unittests for langchain.agents.chat package."""
from textwrap import dedent
from typing import Any, Tuple
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
from langchain_core.tools import Tool
from langchain.agents.structured_chat.base import StructuredChatAgent
from langchain.agents.structured_chat.output_parser import StructuredChatOutputParser
output_parser = StructuredChatOutputParser()
def get_action_and_input(text: str) -> Tuple[str, str]:
output = output_parser.parse(text)
if isinstance(output, AgentAction):
return output.tool, str(output.tool_input)
elif isinstance(output, AgentFinish):
return output.return_values["output"], output.log
else:
raise ValueError("Unexpected output type")
def test_parse_with_language() -> None:
llm_output = """I can use the `foo` tool to achieve the goal.
Action:
```json
{
"action": "foo",
"action_input": "bar"
}
```
"""
action, action_input = get_action_and_input(llm_output)
assert action == "foo"
assert action_input == "bar"
def test_parse_without_language() -> None:
llm_output = """I can use the `foo` tool to achieve the goal.
Action:
```
{
"action": "foo",
"action_input": "bar"
}
```
"""
action, action_input = get_action_and_input(llm_output)
assert action == "foo"
assert action_input == "bar"
def test_parse_with_language_and_spaces() -> None:
llm_output = """I can use the `foo` tool to achieve the goal.
Action:
```json
{
"action": "foo",
"action_input": "bar"
}
```
"""
action, action_input = get_action_and_input(llm_output)
assert action == "foo"
assert action_input == "bar"
def test_parse_without_language_without_a_new_line() -> None:
llm_output = """I can use the `foo` tool to achieve the goal.
Action:
```{"action": "foo", "action_input": "bar"}```
"""
action, action_input = get_action_and_input(llm_output)
assert action == "foo"
assert action_input == "bar"
def test_parse_with_language_without_a_new_line() -> None:
llm_output = """I can use the `foo` tool to achieve the goal.
Action:
```json{"action": "foo", "action_input": "bar"}```
"""
# TODO: How should this be handled?
output, log = get_action_and_input(llm_output)
assert output == llm_output
assert log == llm_output
def test_parse_case_matched_and_final_answer() -> None:
llm_output = """I can use the `foo` tool to achieve the goal.
Action:
```json
{
"action": "Final Answer",
"action_input": "This is the final answer"
}
```
"""
output, log = get_action_and_input(llm_output)
assert output == "This is the final answer"
assert log == llm_output
# TODO: add more tests.
# Test: StructuredChatAgent.create_prompt() method.
class TestCreatePrompt:
# Test: Output should be a ChatPromptTemplate with sys and human messages.
def test_create_prompt_output(self) -> None:
prompt = StructuredChatAgent.create_prompt(
[Tool(name="foo", description="Test tool FOO", func=lambda x: x)]
)
assert isinstance(prompt, ChatPromptTemplate)
assert len(prompt.messages) == 2
assert isinstance(prompt.messages[0], SystemMessagePromptTemplate)
assert isinstance(prompt.messages[1], HumanMessagePromptTemplate)
# Test: Format with a single tool.
def test_system_message_single_tool(self) -> None:
prompt: Any = StructuredChatAgent.create_prompt(
[Tool(name="foo", description="Test tool FOO", func=lambda x: x)]
)
actual = prompt.messages[0].prompt.format()
expected = dedent(
"""
Respond to the human as helpfully and accurately as possible. You have access to the following tools:
foo: Test tool FOO, args: {'tool_input': {'type': 'string'}}
Use a json blob to specify a tool by providing an action key (tool name) and an action_input key (tool input).
Valid "action" values: "Final Answer" or foo
Provide only ONE action per $JSON_BLOB, as shown:
```
{
"action": $TOOL_NAME,
"action_input": $INPUT
}
```
Follow this format:
Question: input question to answer
Thought: consider previous and subsequent steps
Action:
```
$JSON_BLOB
```
Observation: action result
... (repeat Thought/Action/Observation N times)
Thought: I know what to respond
Action:
```
{
"action": "Final Answer",
"action_input": "Final response to human"
}
```
Begin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. Respond directly if appropriate. Format is Action:```$JSON_BLOB```then Observation:.
Thought:
""" # noqa: E501
).strip()
assert actual == expected
# Test: Format with multiple tools.
#
# Check:
#
# You have access to the following tools:
# ...
#
# and
#
# Valid "action" values: "Final Answer" or ...
#
def test_system_message_multiple_tools(self) -> None:
prompt: Any = StructuredChatAgent.create_prompt(
[
Tool(name="foo", description="Test tool FOO", func=lambda x: x),
Tool(name="bar", description="Test tool BAR", func=lambda x: x),
]
)
actual = prompt.messages[0].prompt.format()
expected = dedent(
"""
Respond to the human as helpfully and accurately as possible. You have access to the following tools:
foo: Test tool FOO, args: {'tool_input': {'type': 'string'}}
bar: Test tool BAR, args: {'tool_input': {'type': 'string'}}
Use a json blob to specify a tool by providing an action key (tool name) and an action_input key (tool input).
Valid "action" values: "Final Answer" or foo, bar
Provide only ONE action per $JSON_BLOB, as shown:
```
{
"action": $TOOL_NAME,
"action_input": $INPUT
}
```
Follow this format:
Question: input question to answer
Thought: consider previous and subsequent steps
Action:
```
$JSON_BLOB
```
Observation: action result
... (repeat Thought/Action/Observation N times)
Thought: I know what to respond
Action:
```
{
"action": "Final Answer",
"action_input": "Final response to human"
}
```
Begin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. Respond directly if appropriate. Format is Action:```$JSON_BLOB```then Observation:.
Thought:
""" # noqa: E501
).strip()
assert actual == expected
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/agents/test_agent.py | """Unit tests for agents."""
import json
from itertools import cycle
from typing import Any, Dict, List, Optional, Union, cast
from langchain_core.agents import (
AgentAction,
AgentFinish,
AgentStep,
)
from langchain_core.callbacks.manager import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
FunctionMessage,
HumanMessage,
ToolCall,
)
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.runnables.utils import add
from langchain_core.tools import Tool, tool
from langchain_core.tracers import RunLog, RunLogPatch
from langchain.agents import (
AgentExecutor,
AgentType,
create_openai_functions_agent,
create_openai_tools_agent,
create_tool_calling_agent,
initialize_agent,
)
from langchain.agents.output_parsers.openai_tools import OpenAIToolAgentAction
from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
from tests.unit_tests.llms.fake_chat_model import GenericFakeChatModel
from tests.unit_tests.stubs import (
_AnyIdAIMessageChunk,
)
class FakeListLLM(LLM):
"""Fake LLM for testing that outputs elements of a list."""
responses: List[str]
i: int = -1
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Increment counter, and then return response in that index."""
self.i += 1
print(f"=== Mock Response #{self.i} ===") # noqa: T201
print(self.responses[self.i]) # noqa: T201
return self.responses[self.i]
def get_num_tokens(self, text: str) -> int:
"""Return number of tokens in text."""
return len(text.split())
async def _acall(self, *args: Any, **kwargs: Any) -> str:
return self._call(*args, **kwargs)
@property
def _identifying_params(self) -> Dict[str, Any]:
return {}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "fake_list"
def _get_agent(**kwargs: Any) -> AgentExecutor:
"""Get agent for testing."""
bad_action_name = "BadAction"
responses = [
f"I'm turning evil\nAction: {bad_action_name}\nAction Input: misalignment",
"Oh well\nFinal Answer: curses foiled again",
]
fake_llm = FakeListLLM(cache=False, responses=responses)
tools = [
Tool(
name="Search",
func=lambda x: x,
description="Useful for searching",
),
Tool(
name="Lookup",
func=lambda x: x,
description="Useful for looking up things in a table",
),
]
agent = initialize_agent(
tools,
fake_llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
**kwargs,
)
return agent
def test_agent_bad_action() -> None:
"""Test react chain when bad action given."""
agent = _get_agent()
output = agent.run("when was langchain made")
assert output == "curses foiled again"
def test_agent_stopped_early() -> None:
"""Test react chain when max iterations or max execution time is exceeded."""
# iteration limit
agent = _get_agent(max_iterations=0)
output = agent.run("when was langchain made")
assert output == "Agent stopped due to iteration limit or time limit."
# execution time limit
agent = _get_agent(max_execution_time=0.0)
output = agent.run("when was langchain made")
assert output == "Agent stopped due to iteration limit or time limit."
def test_agent_with_callbacks() -> None:
"""Test react chain with callbacks by setting verbose globally."""
handler1 = FakeCallbackHandler()
handler2 = FakeCallbackHandler()
tool = "Search"
responses = [
f"FooBarBaz\nAction: {tool}\nAction Input: misalignment",
"Oh well\nFinal Answer: curses foiled again",
]
# Only fake LLM gets callbacks for handler2
fake_llm = FakeListLLM(responses=responses, callbacks=[handler2])
tools = [
Tool(
name="Search",
func=lambda x: x,
description="Useful for searching",
),
]
agent = initialize_agent(
tools,
fake_llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
)
output = agent.run("when was langchain made", callbacks=[handler1])
assert output == "curses foiled again"
# 1 top level chain run runs, 2 LLMChain runs, 2 LLM runs, 1 tool run
assert handler1.chain_starts == handler1.chain_ends == 3
assert handler1.llm_starts == handler1.llm_ends == 2
assert handler1.tool_starts == 1
assert handler1.tool_ends == 1
# 1 extra agent action
assert handler1.starts == 7
# 1 extra agent end
assert handler1.ends == 7
assert handler1.errors == 0
# during LLMChain
assert handler1.text == 2
assert handler2.llm_starts == 2
assert handler2.llm_ends == 2
assert (
handler2.chain_starts
== handler2.tool_starts
== handler2.tool_ends
== handler2.chain_ends
== 0
)
def test_agent_stream() -> None:
"""Test react chain with callbacks by setting verbose globally."""
tool = "Search"
responses = [
f"FooBarBaz\nAction: {tool}\nAction Input: misalignment",
f"FooBarBaz\nAction: {tool}\nAction Input: something else",
"Oh well\nFinal Answer: curses foiled again",
]
# Only fake LLM gets callbacks for handler2
fake_llm = FakeListLLM(responses=responses)
tools = [
Tool(
name="Search",
func=lambda x: f"Results for: {x}",
description="Useful for searching",
),
]
agent = initialize_agent(
tools,
fake_llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
)
output = [a for a in agent.stream("when was langchain made")]
assert output == [
{
"actions": [
AgentAction(
tool="Search",
tool_input="misalignment",
log="FooBarBaz\nAction: Search\nAction Input: misalignment",
)
],
"messages": [
AIMessage(
content="FooBarBaz\nAction: Search\nAction Input: misalignment"
)
],
},
{
"steps": [
AgentStep(
action=AgentAction(
tool="Search",
tool_input="misalignment",
log="FooBarBaz\nAction: Search\nAction Input: misalignment",
),
observation="Results for: misalignment",
)
],
"messages": [HumanMessage(content="Results for: misalignment")],
},
{
"actions": [
AgentAction(
tool="Search",
tool_input="something else",
log="FooBarBaz\nAction: Search\nAction Input: something else",
)
],
"messages": [
AIMessage(
content="FooBarBaz\nAction: Search\nAction Input: something else"
)
],
},
{
"steps": [
AgentStep(
action=AgentAction(
tool="Search",
tool_input="something else",
log="FooBarBaz\nAction: Search\nAction Input: something else",
),
observation="Results for: something else",
)
],
"messages": [HumanMessage(content="Results for: something else")],
},
{
"output": "curses foiled again",
"messages": [
AIMessage(content="Oh well\nFinal Answer: curses foiled again")
],
},
]
assert add(output) == {
"actions": [
AgentAction(
tool="Search",
tool_input="misalignment",
log="FooBarBaz\nAction: Search\nAction Input: misalignment",
),
AgentAction(
tool="Search",
tool_input="something else",
log="FooBarBaz\nAction: Search\nAction Input: something else",
),
],
"steps": [
AgentStep(
action=AgentAction(
tool="Search",
tool_input="misalignment",
log="FooBarBaz\nAction: Search\nAction Input: misalignment",
),
observation="Results for: misalignment",
),
AgentStep(
action=AgentAction(
tool="Search",
tool_input="something else",
log="FooBarBaz\nAction: Search\nAction Input: something else",
),
observation="Results for: something else",
),
],
"messages": [
AIMessage(content="FooBarBaz\nAction: Search\nAction Input: misalignment"),
HumanMessage(content="Results for: misalignment"),
AIMessage(
content="FooBarBaz\nAction: Search\nAction Input: something else"
),
HumanMessage(content="Results for: something else"),
AIMessage(content="Oh well\nFinal Answer: curses foiled again"),
],
"output": "curses foiled again",
}
def test_agent_tool_return_direct() -> None:
"""Test agent using tools that return directly."""
tool = "Search"
responses = [
f"FooBarBaz\nAction: {tool}\nAction Input: misalignment",
"Oh well\nFinal Answer: curses foiled again",
]
fake_llm = FakeListLLM(responses=responses)
tools = [
Tool(
name="Search",
func=lambda x: x,
description="Useful for searching",
return_direct=True,
),
]
agent = initialize_agent(
tools,
fake_llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
)
output = agent.run("when was langchain made")
assert output == "misalignment"
def test_agent_tool_return_direct_in_intermediate_steps() -> None:
"""Test agent using tools that return directly."""
tool = "Search"
responses = [
f"FooBarBaz\nAction: {tool}\nAction Input: misalignment",
"Oh well\nFinal Answer: curses foiled again",
]
fake_llm = FakeListLLM(responses=responses)
tools = [
Tool(
name="Search",
func=lambda x: x,
description="Useful for searching",
return_direct=True,
),
]
agent = initialize_agent(
tools,
fake_llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
return_intermediate_steps=True,
)
resp = agent("when was langchain made")
assert isinstance(resp, dict)
assert resp["output"] == "misalignment"
assert len(resp["intermediate_steps"]) == 1
action, _action_intput = resp["intermediate_steps"][0]
assert action.tool == "Search"
def test_agent_with_new_prefix_suffix() -> None:
"""Test agent initialization kwargs with new prefix and suffix."""
fake_llm = FakeListLLM(
responses=["FooBarBaz\nAction: Search\nAction Input: misalignment"]
)
tools = [
Tool(
name="Search",
func=lambda x: x,
description="Useful for searching",
return_direct=True,
),
]
prefix = "FooBarBaz"
suffix = "Begin now!\nInput: {input}\nThought: {agent_scratchpad}"
agent = initialize_agent(
tools=tools,
llm=fake_llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
agent_kwargs={"prefix": prefix, "suffix": suffix},
)
# avoids "BasePromptTemplate" has no attribute "template" error
assert hasattr(agent.agent.llm_chain.prompt, "template") # type: ignore
prompt_str = agent.agent.llm_chain.prompt.template # type: ignore
assert prompt_str.startswith(prefix), "Prompt does not start with prefix"
assert prompt_str.endswith(suffix), "Prompt does not end with suffix"
def test_agent_lookup_tool() -> None:
"""Test agent lookup tool."""
fake_llm = FakeListLLM(
responses=["FooBarBaz\nAction: Search\nAction Input: misalignment"]
)
tools = [
Tool(
name="Search",
func=lambda x: x,
description="Useful for searching",
return_direct=True,
),
]
agent = initialize_agent(
tools=tools,
llm=fake_llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
)
assert agent.lookup_tool("Search") == tools[0]
def test_agent_invalid_tool() -> None:
"""Test agent invalid tool and correct suggestions."""
fake_llm = FakeListLLM(responses=["FooBarBaz\nAction: Foo\nAction Input: Bar"])
tools = [
Tool(
name="Search",
func=lambda x: x,
description="Useful for searching",
return_direct=True,
),
]
agent = initialize_agent(
tools=tools,
llm=fake_llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
return_intermediate_steps=True,
max_iterations=1,
)
resp = agent("when was langchain made")
resp["intermediate_steps"][0][1] == "Foo is not a valid tool, try one of [Search]."
async def test_runnable_agent() -> None:
"""Simple test to verify that an agent built with LCEL works."""
# Will alternate between responding with hello and goodbye
infinite_cycle = cycle([AIMessage(content="hello world!")])
# When streaming GenericFakeChatModel breaks AIMessage into chunks based on spaces
model = GenericFakeChatModel(messages=infinite_cycle)
template = ChatPromptTemplate.from_messages(
[("system", "You are Cat Agent 007"), ("human", "{question}")]
)
def fake_parse(inputs: dict) -> Union[AgentFinish, AgentAction]:
"""A parser."""
return AgentFinish(return_values={"foo": "meow"}, log="hard-coded-message")
agent = template | model | fake_parse
executor = AgentExecutor(agent=agent, tools=[]) # type: ignore[arg-type]
# Invoke
result = executor.invoke({"question": "hello"})
assert result == {"foo": "meow", "question": "hello"}
# ainvoke
result = await executor.ainvoke({"question": "hello"})
assert result == {"foo": "meow", "question": "hello"}
# Batch
result = executor.batch( # type: ignore[assignment]
[{"question": "hello"}, {"question": "hello"}]
)
assert result == [
{"foo": "meow", "question": "hello"},
{"foo": "meow", "question": "hello"},
]
# abatch
result = await executor.abatch( # type: ignore[assignment]
[{"question": "hello"}, {"question": "hello"}]
)
assert result == [
{"foo": "meow", "question": "hello"},
{"foo": "meow", "question": "hello"},
]
# Stream
results = list(executor.stream({"question": "hello"}))
assert results == [
{"foo": "meow", "messages": [AIMessage(content="hard-coded-message")]}
]
# astream
results = [r async for r in executor.astream({"question": "hello"})]
assert results == [
{
"foo": "meow",
"messages": [
AIMessage(content="hard-coded-message"),
],
}
]
# stream log
results: List[RunLogPatch] = [ # type: ignore[no-redef]
r async for r in executor.astream_log({"question": "hello"})
]
# # Let's stream just the llm tokens.
messages = []
for log_record in results:
for op in log_record.ops: # type: ignore[attr-defined]
if op["op"] == "add" and isinstance(op["value"], AIMessageChunk):
messages.append(op["value"])
assert messages != []
# Aggregate state
run_log = None
for result in results:
if run_log is None:
run_log = result
else:
# `+` is defined for RunLogPatch
run_log = run_log + result # type: ignore[union-attr]
assert isinstance(run_log, RunLog)
assert run_log.state["final_output"] == {
"foo": "meow",
"messages": [AIMessage(content="hard-coded-message")],
}
async def test_runnable_agent_with_function_calls() -> None:
"""Test agent with intermediate agent actions."""
# Will alternate between responding with hello and goodbye
infinite_cycle = cycle(
[AIMessage(content="looking for pet..."), AIMessage(content="Found Pet")]
)
model = GenericFakeChatModel(messages=infinite_cycle)
template = ChatPromptTemplate.from_messages(
[("system", "You are Cat Agent 007"), ("human", "{question}")]
)
parser_responses = cycle(
[
AgentAction(
tool="find_pet",
tool_input={
"pet": "cat",
},
log="find_pet()",
),
AgentFinish(
return_values={"foo": "meow"},
log="hard-coded-message",
),
],
)
def fake_parse(inputs: dict) -> Union[AgentFinish, AgentAction]:
"""A parser."""
return cast(Union[AgentFinish, AgentAction], next(parser_responses))
@tool
def find_pet(pet: str) -> str:
"""Find the given pet."""
if pet != "cat":
raise ValueError("Only cats allowed")
return "Spying from under the bed."
agent = template | model | fake_parse
executor = AgentExecutor(agent=agent, tools=[find_pet]) # type: ignore[arg-type, list-item]
# Invoke
result = executor.invoke({"question": "hello"})
assert result == {"foo": "meow", "question": "hello"}
# ainvoke
result = await executor.ainvoke({"question": "hello"})
assert result == {"foo": "meow", "question": "hello"}
# astream
results = [r async for r in executor.astream({"question": "hello"})]
assert results == [
{
"actions": [
AgentAction(
tool="find_pet", tool_input={"pet": "cat"}, log="find_pet()"
)
],
"messages": [AIMessage(content="find_pet()")],
},
{
"messages": [HumanMessage(content="Spying from under the bed.")],
"steps": [
AgentStep(
action=AgentAction(
tool="find_pet", tool_input={"pet": "cat"}, log="find_pet()"
),
observation="Spying from under the bed.",
)
],
},
{"foo": "meow", "messages": [AIMessage(content="hard-coded-message")]},
]
# astream log
messages = []
async for patch in executor.astream_log({"question": "hello"}):
for op in patch.ops:
if op["op"] != "add":
continue
value = op["value"]
if not isinstance(value, AIMessageChunk):
continue
if value.content == "": # Then it's a function invocation message
continue
messages.append(value.content)
assert messages == ["looking", " ", "for", " ", "pet...", "Found", " ", "Pet"]
async def test_runnable_with_multi_action_per_step() -> None:
"""Test an agent that can make multiple function calls at once."""
# Will alternate between responding with hello and goodbye
infinite_cycle = cycle(
[AIMessage(content="looking for pet..."), AIMessage(content="Found Pet")]
)
model = GenericFakeChatModel(messages=infinite_cycle)
template = ChatPromptTemplate.from_messages(
[("system", "You are Cat Agent 007"), ("human", "{question}")]
)
parser_responses = cycle(
[
[
AgentAction(
tool="find_pet",
tool_input={
"pet": "cat",
},
log="find_pet()",
),
AgentAction(
tool="pet_pet", # A function that allows you to pet the given pet.
tool_input={
"pet": "cat",
},
log="pet_pet()",
),
],
AgentFinish(
return_values={"foo": "meow"},
log="hard-coded-message",
),
],
)
def fake_parse(inputs: dict) -> Union[AgentFinish, AgentAction]:
"""A parser."""
return cast(Union[AgentFinish, AgentAction], next(parser_responses))
@tool
def find_pet(pet: str) -> str:
"""Find the given pet."""
if pet != "cat":
raise ValueError("Only cats allowed")
return "Spying from under the bed."
@tool
def pet_pet(pet: str) -> str:
"""Pet the given pet."""
if pet != "cat":
raise ValueError("Only cats should be petted.")
return "purrrr"
agent = template | model | fake_parse
executor = AgentExecutor(agent=agent, tools=[find_pet]) # type: ignore[arg-type, list-item]
# Invoke
result = executor.invoke({"question": "hello"})
assert result == {"foo": "meow", "question": "hello"}
# ainvoke
result = await executor.ainvoke({"question": "hello"})
assert result == {"foo": "meow", "question": "hello"}
# astream
results = [r async for r in executor.astream({"question": "hello"})]
assert results == [
{
"actions": [
AgentAction(
tool="find_pet", tool_input={"pet": "cat"}, log="find_pet()"
)
],
"messages": [AIMessage(content="find_pet()")],
},
{
"actions": [
AgentAction(tool="pet_pet", tool_input={"pet": "cat"}, log="pet_pet()")
],
"messages": [AIMessage(content="pet_pet()")],
},
{
# By-default observation gets converted into human message.
"messages": [HumanMessage(content="Spying from under the bed.")],
"steps": [
AgentStep(
action=AgentAction(
tool="find_pet", tool_input={"pet": "cat"}, log="find_pet()"
),
observation="Spying from under the bed.",
)
],
},
{
"messages": [
HumanMessage(
content="pet_pet is not a valid tool, try one of [find_pet]."
)
],
"steps": [
AgentStep(
action=AgentAction(
tool="pet_pet", tool_input={"pet": "cat"}, log="pet_pet()"
),
observation="pet_pet is not a valid tool, try one of [find_pet].",
)
],
},
{"foo": "meow", "messages": [AIMessage(content="hard-coded-message")]},
]
# astream log
messages = []
async for patch in executor.astream_log({"question": "hello"}):
for op in patch.ops:
if op["op"] != "add":
continue
value = op["value"]
if not isinstance(value, AIMessageChunk):
continue
if value.content == "": # Then it's a function invocation message
continue
messages.append(value.content)
assert messages == ["looking", " ", "for", " ", "pet...", "Found", " ", "Pet"]
def _make_func_invocation(name: str, **kwargs: Any) -> AIMessage:
"""Create an AIMessage that represents a function invocation.
Args:
name: Name of the function to invoke.
kwargs: Keyword arguments to pass to the function.
Returns:
AIMessage that represents a request to invoke a function.
"""
return AIMessage(
content="",
additional_kwargs={
"function_call": {
"name": name,
"arguments": json.dumps(kwargs),
}
},
)
def _recursive_dump(obj: Any) -> Any:
"""Recursively dump the object if encountering any pydantic models."""
if isinstance(obj, dict):
return {
k: _recursive_dump(v)
for k, v in obj.items()
if k != "id" # Remove the id field for testing purposes
}
if isinstance(obj, list):
return [_recursive_dump(v) for v in obj]
if hasattr(obj, "dict"):
# if the object contains an ID field, we'll remove it for testing purposes
if hasattr(obj, "id"):
d = obj.model_dump()
d.pop("id")
return _recursive_dump(d)
return _recursive_dump(obj.model_dump())
return obj
async def test_openai_agent_with_streaming() -> None:
"""Test openai agent with streaming."""
infinite_cycle = cycle(
[
_make_func_invocation("find_pet", pet="cat"),
AIMessage(content="The cat is spying from under the bed."),
]
)
model = GenericFakeChatModel(messages=infinite_cycle)
@tool
def find_pet(pet: str) -> str:
"""Find the given pet."""
if pet != "cat":
raise ValueError("Only cats allowed")
return "Spying from under the bed."
template = ChatPromptTemplate.from_messages(
[
("system", "You are a helpful AI bot. Your name is kitty power meow."),
("human", "{question}"),
MessagesPlaceholder(
variable_name="agent_scratchpad",
),
]
)
# type error due to base tool type below -- would need to be adjusted on tool
# decorator.
agent = create_openai_functions_agent(
model,
[find_pet], # type: ignore[list-item]
template,
)
executor = AgentExecutor(agent=agent, tools=[find_pet]) # type: ignore[arg-type, list-item]
# Invoke
result = executor.invoke({"question": "hello"})
assert result == {
"output": "The cat is spying from under the bed.",
"question": "hello",
}
# astream
chunks = [chunk async for chunk in executor.astream({"question": "hello"})]
assert _recursive_dump(chunks) == [
{
"actions": [
{
"log": "\nInvoking: `find_pet` with `{'pet': 'cat'}`\n\n\n",
"message_log": [
{
"additional_kwargs": {
"function_call": {
"arguments": '{"pet": ' '"cat"}',
"name": "find_pet",
}
},
"content": "",
"name": None,
"response_metadata": {},
"type": "AIMessageChunk",
}
],
"tool": "find_pet",
"tool_input": {"pet": "cat"},
"type": "AgentActionMessageLog",
}
],
"messages": [
{
"additional_kwargs": {
"function_call": {
"arguments": '{"pet": ' '"cat"}',
"name": "find_pet",
}
},
"content": "",
"example": False,
"invalid_tool_calls": [],
"name": None,
"response_metadata": {},
"tool_call_chunks": [],
"tool_calls": [],
"type": "AIMessageChunk",
"usage_metadata": None,
}
],
},
{
"messages": [
{
"additional_kwargs": {},
"content": "Spying from under the bed.",
"name": "find_pet",
"response_metadata": {},
"type": "function",
}
],
"steps": [
{
"action": {
"log": "\n"
"Invoking: `find_pet` with `{'pet': 'cat'}`\n"
"\n"
"\n",
"tool": "find_pet",
"tool_input": {"pet": "cat"},
"type": "AgentActionMessageLog",
},
"observation": "Spying from under the bed.",
}
],
},
{
"messages": [
{
"additional_kwargs": {},
"content": "The cat is spying from under the bed.",
"example": False,
"invalid_tool_calls": [],
"name": None,
"response_metadata": {},
"tool_calls": [],
"type": "ai",
"usage_metadata": None,
}
],
"output": "The cat is spying from under the bed.",
},
]
#
# # astream_log
log_patches = [
log_patch async for log_patch in executor.astream_log({"question": "hello"})
]
messages = []
for log_patch in log_patches:
for op in log_patch.ops:
if op["op"] == "add" and isinstance(op["value"], AIMessageChunk):
value = op["value"]
if value.content: # Filter out function call messages
messages.append(value.content)
assert messages == [
"The",
" ",
"cat",
" ",
"is",
" ",
"spying",
" ",
"from",
" ",
"under",
" ",
"the",
" ",
"bed.",
]
def _make_tools_invocation(name_to_arguments: Dict[str, Dict[str, Any]]) -> AIMessage:
"""Create an AIMessage that represents a tools invocation.
Args:
name_to_arguments: A dictionary mapping tool names to an invocation.
Returns:
AIMessage that represents a request to invoke a tool.
"""
raw_tool_calls = [
{"function": {"name": name, "arguments": json.dumps(arguments)}, "id": str(idx)}
for idx, (name, arguments) in enumerate(name_to_arguments.items())
]
tool_calls = [
ToolCall(name=name, args=args, id=str(idx))
for idx, (name, args) in enumerate(name_to_arguments.items())
]
return AIMessage(
content="",
additional_kwargs={
"tool_calls": raw_tool_calls,
},
tool_calls=tool_calls, # type: ignore[arg-type]
)
async def test_openai_agent_tools_agent() -> None:
"""Test OpenAI tools agent."""
infinite_cycle = cycle(
[
_make_tools_invocation(
{
"find_pet": {"pet": "cat"},
"check_time": {},
}
),
AIMessage(content="The cat is spying from under the bed."),
]
)
GenericFakeChatModel.bind_tools = lambda self, x: self # type: ignore
model = GenericFakeChatModel(messages=infinite_cycle)
@tool
def find_pet(pet: str) -> str:
"""Find the given pet."""
if pet != "cat":
raise ValueError("Only cats allowed")
return "Spying from under the bed."
@tool
def check_time() -> str:
"""Find the given pet."""
return "It's time to pet the cat."
template = ChatPromptTemplate.from_messages(
[
("system", "You are a helpful AI bot. Your name is kitty power meow."),
("human", "{question}"),
MessagesPlaceholder(
variable_name="agent_scratchpad",
),
]
)
# type error due to base tool type below -- would need to be adjusted on tool
# decorator.
openai_agent = create_openai_tools_agent(
model,
[find_pet], # type: ignore[list-item]
template,
)
tool_calling_agent = create_tool_calling_agent(
model,
[find_pet], # type: ignore[list-item]
template,
)
for agent in [openai_agent, tool_calling_agent]:
executor = AgentExecutor(agent=agent, tools=[find_pet]) # type: ignore[arg-type, list-item]
# Invoke
result = executor.invoke({"question": "hello"})
assert result == {
"output": "The cat is spying from under the bed.",
"question": "hello",
}
# astream
chunks = [chunk async for chunk in executor.astream({"question": "hello"})]
assert (
chunks
== [
{
"actions": [
OpenAIToolAgentAction(
tool="find_pet",
tool_input={"pet": "cat"},
log="\nInvoking: `find_pet` with `{'pet': 'cat'}`\n\n\n",
message_log=[
_AnyIdAIMessageChunk(
content="",
additional_kwargs={
"tool_calls": [
{
"function": {
"name": "find_pet",
"arguments": '{"pet": "cat"}',
},
"id": "0",
},
{
"function": {
"name": "check_time",
"arguments": "{}",
},
"id": "1",
},
]
},
)
],
tool_call_id="0",
)
],
"messages": [
_AnyIdAIMessageChunk(
content="",
additional_kwargs={
"tool_calls": [
{
"function": {
"name": "find_pet",
"arguments": '{"pet": "cat"}',
},
"id": "0",
},
{
"function": {
"name": "check_time",
"arguments": "{}",
},
"id": "1",
},
]
},
)
],
},
{
"actions": [
OpenAIToolAgentAction(
tool="check_time",
tool_input={},
log="\nInvoking: `check_time` with `{}`\n\n\n",
message_log=[
_AnyIdAIMessageChunk(
content="",
additional_kwargs={
"tool_calls": [
{
"function": {
"name": "find_pet",
"arguments": '{"pet": "cat"}',
},
"id": "0",
},
{
"function": {
"name": "check_time",
"arguments": "{}",
},
"id": "1",
},
]
},
)
],
tool_call_id="1",
)
],
"messages": [
_AnyIdAIMessageChunk(
content="",
additional_kwargs={
"tool_calls": [
{
"function": {
"name": "find_pet",
"arguments": '{"pet": "cat"}',
},
"id": "0",
},
{
"function": {
"name": "check_time",
"arguments": "{}",
},
"id": "1",
},
]
},
)
],
},
{
"messages": [
FunctionMessage(
content="Spying from under the bed.", name="find_pet"
)
],
"steps": [
AgentStep(
action=OpenAIToolAgentAction(
tool="find_pet",
tool_input={"pet": "cat"},
log="\nInvoking: `find_pet` with `{'pet': 'cat'}`\n\n\n", # noqa: E501
message_log=[
_AnyIdAIMessageChunk(
content="",
additional_kwargs={
"tool_calls": [
{
"function": {
"name": "find_pet",
"arguments": '{"pet": "cat"}',
},
"id": "0",
},
{
"function": {
"name": "check_time",
"arguments": "{}",
},
"id": "1",
},
]
},
)
],
tool_call_id="0",
),
observation="Spying from under the bed.",
)
],
},
{
"messages": [
FunctionMessage(
content="check_time is not a valid tool, try one of [find_pet].", # noqa: E501
name="check_time",
)
],
"steps": [
AgentStep(
action=OpenAIToolAgentAction(
tool="check_time",
tool_input={},
log="\nInvoking: `check_time` with `{}`\n\n\n",
message_log=[
_AnyIdAIMessageChunk(
content="",
additional_kwargs={
"tool_calls": [
{
"function": {
"name": "find_pet",
"arguments": '{"pet": "cat"}',
},
"id": "0",
},
{
"function": {
"name": "check_time",
"arguments": "{}",
},
"id": "1",
},
]
},
)
],
tool_call_id="1",
),
observation="check_time is not a valid tool, "
"try one of [find_pet].",
)
],
},
{
"messages": [
AIMessage(content="The cat is spying from under the bed.")
],
"output": "The cat is spying from under the bed.",
},
]
)
# astream_log
log_patches = [
log_patch async for log_patch in executor.astream_log({"question": "hello"})
]
# Get the tokens from the astream log response.
messages = []
for log_patch in log_patches:
for op in log_patch.ops:
if op["op"] == "add" and isinstance(op["value"], AIMessageChunk):
value = op["value"]
if value.content: # Filter out function call messages
messages.append(value.content)
assert messages == [
"The",
" ",
"cat",
" ",
"is",
" ",
"spying",
" ",
"from",
" ",
"under",
" ",
"the",
" ",
"bed.",
]
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/agents/test_agent_async.py | """Unit tests for agents."""
from typing import Any, Dict, List, Optional
from langchain_core.agents import AgentAction, AgentStep
from langchain_core.callbacks.manager import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from langchain_core.messages import AIMessage, HumanMessage
from langchain_core.runnables.utils import add
from langchain_core.tools import Tool
from langchain.agents import AgentExecutor, AgentType, initialize_agent
from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
class FakeListLLM(LLM):
"""Fake LLM for testing that outputs elements of a list."""
responses: List[str]
i: int = -1
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Increment counter, and then return response in that index."""
self.i += 1
print(f"=== Mock Response #{self.i} ===") # noqa: T201
print(self.responses[self.i]) # noqa: T201
return self.responses[self.i]
def get_num_tokens(self, text: str) -> int:
"""Return number of tokens in text."""
return len(text.split())
async def _acall(self, *args: Any, **kwargs: Any) -> str:
return self._call(*args, **kwargs)
@property
def _identifying_params(self) -> Dict[str, Any]:
return {}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "fake_list"
def _get_agent(**kwargs: Any) -> AgentExecutor:
"""Get agent for testing."""
bad_action_name = "BadAction"
responses = [
f"I'm turning evil\nAction: {bad_action_name}\nAction Input: misalignment",
"Oh well\nFinal Answer: curses foiled again",
]
fake_llm = FakeListLLM(cache=False, responses=responses)
tools = [
Tool(
name="Search",
func=lambda x: x,
description="Useful for searching",
),
Tool(
name="Lookup",
func=lambda x: x,
description="Useful for looking up things in a table",
),
]
agent = initialize_agent(
tools,
fake_llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
**kwargs,
)
return agent
async def test_agent_bad_action() -> None:
"""Test react chain when bad action given."""
agent = _get_agent()
output = await agent.arun("when was langchain made")
assert output == "curses foiled again"
async def test_agent_stopped_early() -> None:
"""Test react chain when max iterations or max execution time is exceeded."""
# iteration limit
agent = _get_agent(max_iterations=0)
output = await agent.arun("when was langchain made")
assert output == "Agent stopped due to iteration limit or time limit."
# execution time limit
agent = _get_agent(max_execution_time=0.0)
output = await agent.arun("when was langchain made")
assert output == "Agent stopped due to iteration limit or time limit."
async def test_agent_with_callbacks() -> None:
"""Test react chain with callbacks by setting verbose globally."""
handler1 = FakeCallbackHandler()
handler2 = FakeCallbackHandler()
tool = "Search"
responses = [
f"FooBarBaz\nAction: {tool}\nAction Input: misalignment",
"Oh well\nFinal Answer: curses foiled again",
]
# Only fake LLM gets callbacks for handler2
fake_llm = FakeListLLM(responses=responses, callbacks=[handler2])
tools = [
Tool(
name="Search",
func=lambda x: x,
description="Useful for searching",
),
]
agent = initialize_agent(
tools,
fake_llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
)
output = await agent.arun("when was langchain made", callbacks=[handler1])
assert output == "curses foiled again"
# 1 top level chain run runs, 2 LLMChain runs, 2 LLM runs, 1 tool run
assert handler1.chain_starts == handler1.chain_ends == 3
assert handler1.llm_starts == handler1.llm_ends == 2
assert handler1.tool_starts == 1
assert handler1.tool_ends == 1
# 1 extra agent action
assert handler1.starts == 7
# 1 extra agent end
assert handler1.ends == 7
assert handler1.errors == 0
# during LLMChain
assert handler1.text == 2
assert handler2.llm_starts == 2
assert handler2.llm_ends == 2
assert (
handler2.chain_starts
== handler2.tool_starts
== handler2.tool_ends
== handler2.chain_ends
== 0
)
async def test_agent_stream() -> None:
"""Test react chain with callbacks by setting verbose globally."""
tool = "Search"
responses = [
f"FooBarBaz\nAction: {tool}\nAction Input: misalignment",
f"FooBarBaz\nAction: {tool}\nAction Input: something else",
"Oh well\nFinal Answer: curses foiled again",
]
# Only fake LLM gets callbacks for handler2
fake_llm = FakeListLLM(responses=responses)
tools = [
Tool(
name="Search",
func=lambda x: f"Results for: {x}",
description="Useful for searching",
),
]
agent = initialize_agent(
tools,
fake_llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
)
output = [a async for a in agent.astream("when was langchain made")]
assert output == [
{
"actions": [
AgentAction(
tool="Search",
tool_input="misalignment",
log="FooBarBaz\nAction: Search\nAction Input: misalignment",
)
],
"messages": [
AIMessage(
content="FooBarBaz\nAction: Search\nAction Input: misalignment"
)
],
},
{
"steps": [
AgentStep(
action=AgentAction(
tool="Search",
tool_input="misalignment",
log="FooBarBaz\nAction: Search\nAction Input: misalignment",
),
observation="Results for: misalignment",
)
],
"messages": [HumanMessage(content="Results for: misalignment")],
},
{
"actions": [
AgentAction(
tool="Search",
tool_input="something else",
log="FooBarBaz\nAction: Search\nAction Input: something else",
)
],
"messages": [
AIMessage(
content="FooBarBaz\nAction: Search\nAction Input: something else"
)
],
},
{
"steps": [
AgentStep(
action=AgentAction(
tool="Search",
tool_input="something else",
log="FooBarBaz\nAction: Search\nAction Input: something else",
),
observation="Results for: something else",
)
],
"messages": [HumanMessage(content="Results for: something else")],
},
{
"output": "curses foiled again",
"messages": [
AIMessage(content="Oh well\nFinal Answer: curses foiled again")
],
},
]
assert add(output) == {
"actions": [
AgentAction(
tool="Search",
tool_input="misalignment",
log="FooBarBaz\nAction: Search\nAction Input: misalignment",
),
AgentAction(
tool="Search",
tool_input="something else",
log="FooBarBaz\nAction: Search\nAction Input: something else",
),
],
"steps": [
AgentStep(
action=AgentAction(
tool="Search",
tool_input="misalignment",
log="FooBarBaz\nAction: Search\nAction Input: misalignment",
),
observation="Results for: misalignment",
),
AgentStep(
action=AgentAction(
tool="Search",
tool_input="something else",
log="FooBarBaz\nAction: Search\nAction Input: something else",
),
observation="Results for: something else",
),
],
"messages": [
AIMessage(content="FooBarBaz\nAction: Search\nAction Input: misalignment"),
HumanMessage(content="Results for: misalignment"),
AIMessage(
content="FooBarBaz\nAction: Search\nAction Input: something else"
),
HumanMessage(content="Results for: something else"),
AIMessage(content="Oh well\nFinal Answer: curses foiled again"),
],
"output": "curses foiled again",
}
async def test_agent_tool_return_direct() -> None:
"""Test agent using tools that return directly."""
tool = "Search"
responses = [
f"FooBarBaz\nAction: {tool}\nAction Input: misalignment",
"Oh well\nFinal Answer: curses foiled again",
]
fake_llm = FakeListLLM(responses=responses)
tools = [
Tool(
name="Search",
func=lambda x: x,
description="Useful for searching",
return_direct=True,
),
]
agent = initialize_agent(
tools,
fake_llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
)
output = await agent.arun("when was langchain made")
assert output == "misalignment"
async def test_agent_tool_return_direct_in_intermediate_steps() -> None:
"""Test agent using tools that return directly."""
tool = "Search"
responses = [
f"FooBarBaz\nAction: {tool}\nAction Input: misalignment",
"Oh well\nFinal Answer: curses foiled again",
]
fake_llm = FakeListLLM(responses=responses)
tools = [
Tool(
name="Search",
func=lambda x: x,
description="Useful for searching",
return_direct=True,
),
]
agent = initialize_agent(
tools,
fake_llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
return_intermediate_steps=True,
)
resp = await agent.acall("when was langchain made")
assert isinstance(resp, dict)
assert resp["output"] == "misalignment"
assert len(resp["intermediate_steps"]) == 1
action, _action_intput = resp["intermediate_steps"][0]
assert action.tool == "Search"
async def test_agent_invalid_tool() -> None:
"""Test agent invalid tool and correct suggestions."""
fake_llm = FakeListLLM(responses=["FooBarBaz\nAction: Foo\nAction Input: Bar"])
tools = [
Tool(
name="Search",
func=lambda x: x,
description="Useful for searching",
return_direct=True,
),
]
agent = initialize_agent(
tools=tools,
llm=fake_llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
return_intermediate_steps=True,
max_iterations=1,
)
resp = await agent.acall("when was langchain made")
resp["intermediate_steps"][0][1] == "Foo is not a valid tool, try one of [Search]."
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/agents/test_initialize.py | """Test the initialize module."""
from langchain_core.tools import tool
from langchain.agents.agent_types import AgentType
from langchain.agents.initialize import initialize_agent
from tests.unit_tests.llms.fake_llm import FakeLLM
@tool
def my_tool(query: str) -> str:
"""A fake tool."""
return "fake tool"
def test_initialize_agent_with_str_agent_type() -> None:
"""Test initialize_agent with a string."""
fake_llm = FakeLLM()
agent_executor = initialize_agent(
[my_tool], # type: ignore[list-item]
fake_llm,
"zero-shot-react-description", # type: ignore[arg-type]
)
assert (
agent_executor._action_agent._agent_type
== AgentType.ZERO_SHOT_REACT_DESCRIPTION
)
assert isinstance(agent_executor.tags, list)
assert "zero-shot-react-description" in agent_executor.tags
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests/agents | lc_public_repos/langchain/libs/langchain/tests/unit_tests/agents/format_scratchpad/test_openai_tools.py | from langchain_core.messages import AIMessage, ToolCall, ToolMessage
from langchain.agents.format_scratchpad.openai_tools import (
format_to_openai_tool_messages,
)
from langchain.agents.output_parsers.openai_tools import (
parse_ai_message_to_openai_tool_action,
)
def test_calls_convert_agent_action_to_messages() -> None:
additional_kwargs1 = {
"tool_calls": [
{
"id": "call_abcd12345",
"function": {"arguments": '{"a": 3, "b": 5}', "name": "add"},
"type": "function",
}
],
}
message1 = AIMessage(content="", additional_kwargs=additional_kwargs1)
actions1 = parse_ai_message_to_openai_tool_action(message1)
additional_kwargs2 = {
"tool_calls": [
{
"id": "call_abcd54321",
"function": {"arguments": '{"a": 3, "b": 5}', "name": "subtract"},
"type": "function",
}
],
}
message2 = AIMessage(content="", additional_kwargs=additional_kwargs2)
actions2 = parse_ai_message_to_openai_tool_action(message2)
additional_kwargs3 = {
"tool_calls": [
{
"id": "call_abcd67890",
"function": {"arguments": '{"a": 3, "b": 5}', "name": "multiply"},
"type": "function",
},
{
"id": "call_abcd09876",
"function": {"arguments": '{"a": 3, "b": 5}', "name": "divide"},
"type": "function",
},
],
}
message3 = AIMessage(content="", additional_kwargs=additional_kwargs3)
actions3 = parse_ai_message_to_openai_tool_action(message3)
message4 = AIMessage(
content="",
tool_calls=[
ToolCall(name="exponentiate", args={"a": 3, "b": 5}, id="call_abc02468")
],
)
actions4 = parse_ai_message_to_openai_tool_action(message4)
# for mypy
assert isinstance(actions1, list)
assert isinstance(actions2, list)
assert isinstance(actions3, list)
assert isinstance(actions4, list)
intermediate_steps = [
(actions1[0], "observation1"),
(actions2[0], "observation2"),
(actions3[0], "observation3"),
(actions3[1], "observation4"),
(actions4[0], "observation4"),
]
expected_messages = [
message1,
ToolMessage(
tool_call_id="call_abcd12345",
content="observation1",
additional_kwargs={"name": "add"},
),
message2,
ToolMessage(
tool_call_id="call_abcd54321",
content="observation2",
additional_kwargs={"name": "subtract"},
),
message3,
ToolMessage(
tool_call_id="call_abcd67890",
content="observation3",
additional_kwargs={"name": "multiply"},
),
ToolMessage(
tool_call_id="call_abcd09876",
content="observation4",
additional_kwargs={"name": "divide"},
),
message4,
ToolMessage(
tool_call_id="call_abc02468",
content="observation4",
additional_kwargs={"name": "exponentiate"},
),
]
output = format_to_openai_tool_messages(intermediate_steps)
assert output == expected_messages
def test_handles_empty_input_list() -> None:
output = format_to_openai_tool_messages([])
assert output == []
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests/agents | lc_public_repos/langchain/libs/langchain/tests/unit_tests/agents/format_scratchpad/test_openai_functions.py | from langchain_core.agents import AgentActionMessageLog
from langchain_core.messages import AIMessage, FunctionMessage
from langchain.agents.format_scratchpad.openai_functions import (
format_to_openai_function_messages,
)
def test_calls_convert_agent_action_to_messages() -> None:
additional_kwargs1 = {
"function_call": {
"name": "tool1",
"arguments": "input1",
}
}
message1 = AIMessage(content="", additional_kwargs=additional_kwargs1)
action1 = AgentActionMessageLog(
tool="tool1", tool_input="input1", log="log1", message_log=[message1]
)
additional_kwargs2 = {
"function_call": {
"name": "tool2",
"arguments": "input2",
}
}
message2 = AIMessage(content="", additional_kwargs=additional_kwargs2)
action2 = AgentActionMessageLog(
tool="tool2", tool_input="input2", log="log2", message_log=[message2]
)
additional_kwargs3 = {
"function_call": {
"name": "tool3",
"arguments": "input3",
}
}
message3 = AIMessage(content="", additional_kwargs=additional_kwargs3)
action3 = AgentActionMessageLog(
tool="tool3", tool_input="input3", log="log3", message_log=[message3]
)
intermediate_steps = [
(action1, "observation1"),
(action2, "observation2"),
(action3, "observation3"),
]
expected_messages = [
message1,
FunctionMessage(name="tool1", content="observation1"),
message2,
FunctionMessage(name="tool2", content="observation2"),
message3,
FunctionMessage(name="tool3", content="observation3"),
]
output = format_to_openai_function_messages(intermediate_steps)
assert output == expected_messages
def test_handles_empty_input_list() -> None:
output = format_to_openai_function_messages([])
assert output == []
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests/agents | lc_public_repos/langchain/libs/langchain/tests/unit_tests/agents/format_scratchpad/test_xml.py | from langchain_core.agents import AgentAction
from langchain.agents.format_scratchpad.xml import format_xml
def test_single_agent_action_observation() -> None:
# Arrange
agent_action = AgentAction(tool="Tool1", tool_input="Input1", log="Log1")
observation = "Observation1"
intermediate_steps = [(agent_action, observation)]
# Act
result = format_xml(intermediate_steps)
expected_result = """<tool>Tool1</tool><tool_input>Input1\
</tool_input><observation>Observation1</observation>"""
# Assert
assert result == expected_result
def test_multiple_agent_actions_observations() -> None:
# Arrange
agent_action1 = AgentAction(tool="Tool1", tool_input="Input1", log="Log1")
agent_action2 = AgentAction(tool="Tool2", tool_input="Input2", log="Log2")
observation1 = "Observation1"
observation2 = "Observation2"
intermediate_steps = [(agent_action1, observation1), (agent_action2, observation2)]
# Act
result = format_xml(intermediate_steps)
# Assert
expected_result = """<tool>Tool1</tool><tool_input>Input1\
</tool_input><observation>Observation1</observation><tool>\
Tool2</tool><tool_input>Input2</tool_input><observation>\
Observation2</observation>"""
assert result == expected_result
def test_empty_list_agent_actions() -> None:
result = format_xml([])
assert result == ""
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests/agents | lc_public_repos/langchain/libs/langchain/tests/unit_tests/agents/format_scratchpad/test_log_to_messages.py | from langchain_core.agents import AgentAction
from langchain_core.messages import AIMessage, HumanMessage
from langchain.agents.format_scratchpad.log_to_messages import format_log_to_messages
def test_single_intermediate_step_default_response() -> None:
intermediate_steps = [
(AgentAction(tool="Tool1", tool_input="input1", log="Log1"), "Observation1")
]
expected_result = [AIMessage(content="Log1"), HumanMessage(content="Observation1")]
assert format_log_to_messages(intermediate_steps) == expected_result
def test_multiple_intermediate_steps_default_response() -> None:
intermediate_steps = [
(AgentAction(tool="Tool1", tool_input="input1", log="Log1"), "Observation1"),
(AgentAction(tool="Tool2", tool_input="input2", log="Log2"), "Observation2"),
(AgentAction(tool="Tool3", tool_input="input3", log="Log3"), "Observation3"),
]
expected_result = [
AIMessage(content="Log1"),
HumanMessage(content="Observation1"),
AIMessage(content="Log2"),
HumanMessage(content="Observation2"),
AIMessage(content="Log3"),
HumanMessage(content="Observation3"),
]
assert format_log_to_messages(intermediate_steps) == expected_result
def test_custom_template_tool_response() -> None:
intermediate_steps = [
(AgentAction(tool="Tool1", tool_input="input1", log="Log1"), "Observation1")
]
template_tool_response = "Response: {observation}"
expected_result = [
AIMessage(content="Log1"),
HumanMessage(content="Response: Observation1"),
]
assert (
format_log_to_messages(
intermediate_steps, template_tool_response=template_tool_response
)
== expected_result
)
def test_empty_steps() -> None:
assert format_log_to_messages([]) == []
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests/agents | lc_public_repos/langchain/libs/langchain/tests/unit_tests/agents/format_scratchpad/test_log.py | from langchain_core.agents import AgentAction
from langchain.agents.format_scratchpad.log import format_log_to_str
def test_single_agent_action_observation() -> None:
intermediate_steps = [
(AgentAction(tool="Tool1", tool_input="input1", log="Log1"), "Observation1")
]
expected_result = "Log1\nObservation: Observation1\nThought: "
assert format_log_to_str(intermediate_steps) == expected_result
def test_multiple_agent_actions_observations() -> None:
intermediate_steps = [
(AgentAction(tool="Tool1", tool_input="input1", log="Log1"), "Observation1"),
(AgentAction(tool="Tool2", tool_input="input2", log="Log2"), "Observation2"),
(AgentAction(tool="Tool3", tool_input="input3", log="Log3"), "Observation3"),
]
expected_result = """Log1\nObservation: Observation1\nThought: \
Log2\nObservation: Observation2\nThought: Log3\nObservation: \
Observation3\nThought: """
assert format_log_to_str(intermediate_steps) == expected_result
def test_custom_prefixes() -> None:
intermediate_steps = [
(AgentAction(tool="Tool1", tool_input="input1", log="Log1"), "Observation1")
]
observation_prefix = "Custom Observation: "
llm_prefix = "Custom Thought: "
expected_result = "Log1\nCustom Observation: Observation1\nCustom Thought: "
assert (
format_log_to_str(intermediate_steps, observation_prefix, llm_prefix)
== expected_result
)
def test_empty_intermediate_steps() -> None:
output = format_log_to_str([])
assert output == ""
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests/agents | lc_public_repos/langchain/libs/langchain/tests/unit_tests/agents/agent_toolkits/test_imports.py | from langchain.agents import agent_toolkits
EXPECTED_ALL = [
"AINetworkToolkit",
"AmadeusToolkit",
"AzureCognitiveServicesToolkit",
"FileManagementToolkit",
"GmailToolkit",
"JiraToolkit",
"JsonToolkit",
"MultionToolkit",
"NasaToolkit",
"NLAToolkit",
"O365Toolkit",
"OpenAPIToolkit",
"PlayWrightBrowserToolkit",
"PowerBIToolkit",
"SlackToolkit",
"SteamToolkit",
"SQLDatabaseToolkit",
"SparkSQLToolkit",
"VectorStoreInfo",
"VectorStoreRouterToolkit",
"VectorStoreToolkit",
"ZapierToolkit",
"create_json_agent",
"create_openapi_agent",
"create_pbi_agent",
"create_pbi_chat_agent",
"create_spark_sql_agent",
"create_sql_agent",
"create_vectorstore_agent",
"create_vectorstore_router_agent",
"create_conversational_retrieval_agent",
"create_retriever_tool",
]
def test_imports() -> None:
assert sorted(agent_toolkits.__all__) == sorted(EXPECTED_ALL)
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests/agents | lc_public_repos/langchain/libs/langchain/tests/unit_tests/agents/output_parsers/test_react_single_input.py | import pytest
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain.agents.output_parsers.react_single_input import (
ReActSingleInputOutputParser,
)
def test_action() -> None:
"""Test standard parsing of action/action input."""
parser = ReActSingleInputOutputParser()
_input = """Thought: agent thought here
Action: search
Action Input: what is the temperature in SF?"""
output = parser.invoke(_input)
expected_output = AgentAction(
tool="search", tool_input="what is the temperature in SF?", log=_input
)
assert output == expected_output
def test_finish() -> None:
"""Test standard parsing of agent finish."""
parser = ReActSingleInputOutputParser()
_input = """Thought: agent thought here
Final Answer: The temperature is 100"""
output = parser.invoke(_input)
expected_output = AgentFinish(
return_values={"output": "The temperature is 100"}, log=_input
)
assert output == expected_output
def test_action_with_finish() -> None:
"""Test that if final thought is in action/action input, error is raised."""
parser = ReActSingleInputOutputParser()
_input = """Thought: agent thought here
Action: search Final Answer:
Action Input: what is the temperature in SF?"""
with pytest.raises(OutputParserException):
parser.invoke(_input)
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests/agents | lc_public_repos/langchain/libs/langchain/tests/unit_tests/agents/output_parsers/test_self_ask.py | from langchain_core.agents import AgentAction, AgentFinish
from langchain.agents.output_parsers.self_ask import SelfAskOutputParser
def test_follow_up() -> None:
"""Test follow up parsing."""
parser = SelfAskOutputParser()
_input = "Follow up: what is two + 2"
output = parser.invoke(_input)
expected_output = AgentAction(
tool="Intermediate Answer", tool_input="what is two + 2", log=_input
)
assert output == expected_output
# Test that also handles one word by default
_input = "Followup: what is two + 2"
output = parser.invoke(_input)
expected_output = AgentAction(
tool="Intermediate Answer", tool_input="what is two + 2", log=_input
)
assert output == expected_output
def test_follow_up_custom() -> None:
"""Test follow up parsing for custom followups."""
parser = SelfAskOutputParser(followups=("Now:",))
_input = "Now: what is two + 2"
output = parser.invoke(_input)
expected_output = AgentAction(
tool="Intermediate Answer", tool_input="what is two + 2", log=_input
)
assert output == expected_output
def test_finish() -> None:
"""Test standard finish."""
parser = SelfAskOutputParser()
_input = "So the final answer is: 4"
output = parser.invoke(_input)
expected_output = AgentFinish(return_values={"output": "4"}, log=_input)
assert output == expected_output
def test_finish_custom() -> None:
"""Test custom finish."""
parser = SelfAskOutputParser(finish_string="Finally: ")
_input = "Finally: 4"
output = parser.invoke(_input)
expected_output = AgentFinish(return_values={"output": "4"}, log=_input)
assert output == expected_output
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests/agents | lc_public_repos/langchain/libs/langchain/tests/unit_tests/agents/output_parsers/test_react_json_single_input.py | from langchain_core.agents import AgentAction, AgentFinish
from langchain.agents.output_parsers.react_json_single_input import (
ReActJsonSingleInputOutputParser,
)
def test_action() -> None:
"""Test standard parsing of action/action input."""
parser = ReActJsonSingleInputOutputParser()
_input = """Thought: agent thought here
```
{
"action": "search",
"action_input": "what is the temperature in SF?"
}
```
"""
output = parser.invoke(_input)
expected_output = AgentAction(
tool="search", tool_input="what is the temperature in SF?", log=_input
)
assert output == expected_output
def test_finish() -> None:
"""Test standard parsing of agent finish."""
parser = ReActJsonSingleInputOutputParser()
_input = """Thought: agent thought here
Final Answer: The temperature is 100"""
output = parser.invoke(_input)
expected_output = AgentFinish(
return_values={"output": "The temperature is 100"}, log=_input
)
assert output == expected_output
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests/agents | lc_public_repos/langchain/libs/langchain/tests/unit_tests/agents/output_parsers/test_openai_functions.py | import pytest
from langchain_core.agents import (
AgentActionMessageLog,
AgentFinish,
)
from langchain_core.exceptions import OutputParserException
from langchain_core.messages import AIMessage, SystemMessage
from langchain.agents.output_parsers.openai_functions import (
OpenAIFunctionsAgentOutputParser,
)
def test_not_an_ai() -> None:
parser = OpenAIFunctionsAgentOutputParser()
err = f"Expected an AI message got {str(SystemMessage)}"
with pytest.raises(TypeError, match=err):
parser.invoke(SystemMessage(content="x"))
# Test: Model response (not a function call).
def test_model_response() -> None:
parser = OpenAIFunctionsAgentOutputParser()
msg = AIMessage(content="Model response.")
result = parser.invoke(msg)
assert isinstance(result, AgentFinish)
assert result.return_values == {"output": "Model response."}
assert result.log == "Model response."
# Test: Model response with a function call.
def test_func_call() -> None:
parser = OpenAIFunctionsAgentOutputParser()
msg = AIMessage(
content="LLM thoughts.",
additional_kwargs={
"function_call": {"name": "foo", "arguments": '{"param": 42}'}
},
)
result = parser.invoke(msg)
assert isinstance(result, AgentActionMessageLog)
assert result.tool == "foo"
assert result.tool_input == {"param": 42}
assert result.log == (
"\nInvoking: `foo` with `{'param': 42}`\nresponded: LLM thoughts.\n\n"
)
assert result.message_log == [msg]
# Test: Model response with a function call for a function taking no arguments
def test_func_call_no_args() -> None:
parser = OpenAIFunctionsAgentOutputParser()
msg = AIMessage(
content="LLM thoughts.",
additional_kwargs={"function_call": {"name": "foo", "arguments": ""}},
)
result = parser.invoke(msg)
assert isinstance(result, AgentActionMessageLog)
assert result.tool == "foo"
assert result.tool_input == {}
assert result.log == ("\nInvoking: `foo` with `{}`\nresponded: LLM thoughts.\n\n")
assert result.message_log == [msg]
# Test: Model response with a function call (old style tools).
def test_func_call_oldstyle() -> None:
parser = OpenAIFunctionsAgentOutputParser()
msg = AIMessage(
content="LLM thoughts.",
additional_kwargs={
"function_call": {"name": "foo", "arguments": '{"__arg1": "42"}'}
},
)
result = parser.invoke(msg)
assert isinstance(result, AgentActionMessageLog)
assert result.tool == "foo"
assert result.tool_input == "42"
assert result.log == "\nInvoking: `foo` with `42`\nresponded: LLM thoughts.\n\n"
assert result.message_log == [msg]
# Test: Invalid function call args.
def test_func_call_invalid() -> None:
parser = OpenAIFunctionsAgentOutputParser()
msg = AIMessage(
content="LLM thoughts.",
additional_kwargs={"function_call": {"name": "foo", "arguments": "{42]"}},
)
err = (
"Could not parse tool input: {'name': 'foo', 'arguments': '{42]'} "
"because the `arguments` is not valid JSON."
)
with pytest.raises(OutputParserException, match=err):
parser.invoke(msg)
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests/agents | lc_public_repos/langchain/libs/langchain/tests/unit_tests/agents/output_parsers/test_xml.py | from langchain_core.agents import AgentAction, AgentFinish
from langchain.agents.output_parsers.xml import XMLAgentOutputParser
def test_tool_usage() -> None:
parser = XMLAgentOutputParser()
# Test when final closing </tool_input> is included
_input = """<tool>search</tool><tool_input>foo</tool_input>"""
output = parser.invoke(_input)
expected_output = AgentAction(tool="search", tool_input="foo", log=_input)
assert output == expected_output
# Test when final closing </tool_input> is NOT included
# This happens when it's used as a stop token
_input = """<tool>search</tool><tool_input>foo</tool_input>"""
output = parser.invoke(_input)
expected_output = AgentAction(tool="search", tool_input="foo", log=_input)
assert output == expected_output
def test_finish() -> None:
parser = XMLAgentOutputParser()
# Test when final closing <final_answer> is included
_input = """<final_answer>bar</final_answer>"""
output = parser.invoke(_input)
expected_output = AgentFinish(return_values={"output": "bar"}, log=_input)
assert output == expected_output
# Test when final closing <final_answer> is NOT included
# This happens when it's used as a stop token
_input = """<final_answer>bar</final_answer>"""
output = parser.invoke(_input)
expected_output = AgentFinish(return_values={"output": "bar"}, log=_input)
assert output == expected_output
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests/agents | lc_public_repos/langchain/libs/langchain/tests/unit_tests/agents/output_parsers/test_json.py | from langchain_core.agents import AgentAction, AgentFinish
from langchain.agents.output_parsers.json import JSONAgentOutputParser
def test_tool_usage() -> None:
parser = JSONAgentOutputParser()
_input = """ ```
{
"action": "search",
"action_input": "2+2"
}
```"""
output = parser.invoke(_input)
expected_output = AgentAction(tool="search", tool_input="2+2", log=_input)
assert output == expected_output
def test_finish() -> None:
parser = JSONAgentOutputParser()
_input = """```
{
"action": "Final Answer",
"action_input": "4"
}
```"""
output = parser.invoke(_input)
expected_output = AgentFinish(return_values={"output": "4"}, log=_input)
assert output == expected_output
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests/agents | lc_public_repos/langchain/libs/langchain/tests/unit_tests/agents/output_parsers/test_convo_output_parser.py | from langchain_core.agents import AgentAction
from langchain.agents.conversational.output_parser import ConvoOutputParser
def test_normal_output_parsing() -> None:
_test_convo_output(
"""
Action: my_action
Action Input: my action input
""",
"my_action",
"my action input",
)
def test_multiline_output_parsing() -> None:
_test_convo_output(
"""
Thought: Do I need to use a tool? Yes
Action: evaluate_code
Action Input: Evaluate Code with the following Python content:
```python
print("Hello fifty shades of gray mans!"[::-1]) # noqa: T201
```
""",
"evaluate_code",
"""
Evaluate Code with the following Python content:
```python
print("Hello fifty shades of gray mans!"[::-1]) # noqa: T201
```""".lstrip(),
)
def _test_convo_output(
input: str, expected_tool: str, expected_tool_input: str
) -> None:
result = ConvoOutputParser().parse(input.strip())
assert isinstance(result, AgentAction)
assert result.tool == expected_tool
assert result.tool_input == expected_tool_input
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/vectorstores/test_public_api.py | """Test the public API of the tools package."""
from langchain.vectorstores import __all__ as public_api
_EXPECTED = [
"AlibabaCloudOpenSearch",
"AlibabaCloudOpenSearchSettings",
"AnalyticDB",
"Annoy",
"AstraDB",
"AtlasDB",
"AwaDB",
"AzureCosmosDBVectorSearch",
"AzureSearch",
"Bagel",
"Cassandra",
"Chroma",
"Clarifai",
"Clickhouse",
"ClickhouseSettings",
"DashVector",
"DatabricksVectorSearch",
"DeepLake",
"Dingo",
"DocArrayHnswSearch",
"DocArrayInMemorySearch",
"DuckDB",
"EcloudESVectorStore",
"ElasticKnnSearch",
"ElasticsearchStore",
"ElasticVectorSearch",
"Epsilla",
"FAISS",
"Hologres",
"LanceDB",
"LLMRails",
"Marqo",
"MatchingEngine",
"Meilisearch",
"Milvus",
"MomentoVectorIndex",
"MongoDBAtlasVectorSearch",
"MyScale",
"MyScaleSettings",
"Neo4jVector",
"NeuralDBClientVectorStore",
"NeuralDBVectorStore",
"OpenSearchVectorSearch",
"PGEmbedding",
"PGVector",
"Pinecone",
"Qdrant",
"Redis",
"Rockset",
"ScaNN",
"SemaDB",
"SingleStoreDB",
"SKLearnVectorStore",
"SQLiteVSS",
"StarRocks",
"SupabaseVectorStore",
"Tair",
"TencentVectorDB",
"Tigris",
"TileDB",
"TimescaleVector",
"Typesense",
"USearch",
"Vald",
"Vearch",
"Vectara",
"VectorStore",
"VespaStore",
"Weaviate",
"Yellowbrick",
"ZepVectorStore",
"Zilliz",
]
def test_public_api() -> None:
"""Test for regressions or changes in the public API."""
# Check that the public API is as expected
assert set(public_api) == set(_EXPECTED)
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/runnables/test_hub.py | from typing import Any
from unittest.mock import Mock, patch
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables.base import ConfigurableField
from langchain.runnables.hub import HubRunnable
@patch("langchain.hub.pull")
def test_hub_runnable(mock_pull: Mock) -> None:
mock_pull.return_value = ChatPromptTemplate.from_messages(
[("system", "a"), ("user", "b")]
)
basic: HubRunnable = HubRunnable("efriis/my-prompt")
bound = basic.bound
assert isinstance(bound, ChatPromptTemplate)
assert len(bound.messages) == 2
repo_dict = {
"efriis/my-prompt-1": ChatPromptTemplate.from_messages(
[("system", "a"), ("user", "1")]
),
"efriis/my-prompt-2": ChatPromptTemplate.from_messages(
[("system", "a"), ("user", "2")]
),
}
def repo_lookup(owner_repo_commit: str, **kwargs: Any) -> ChatPromptTemplate:
return repo_dict[owner_repo_commit]
@patch("langchain.hub.pull")
def test_hub_runnable_configurable_alternative(mock_pull: Mock) -> None:
mock_pull.side_effect = repo_lookup
original: HubRunnable = HubRunnable("efriis/my-prompt-1")
obj_a1 = original.configurable_alternatives(
ConfigurableField(id="owner_repo_commit", name="Hub ID"),
default_key="a1",
a2=HubRunnable("efriis/my-prompt-2"),
)
obj_a2 = obj_a1.with_config(configurable={"owner_repo_commit": "a2"})
templated = obj_a1.invoke({})
message_a1 = templated.messages[1]
assert message_a1.content == "1"
templated_2 = obj_a2.invoke({})
message_a2 = templated_2.messages[1]
assert message_a2.content == "2"
@patch("langchain.hub.pull")
def test_hub_runnable_configurable_fields(mock_pull: Mock) -> None:
mock_pull.side_effect = repo_lookup
original: HubRunnable = HubRunnable("efriis/my-prompt-1")
obj_configurable = original.configurable_fields(
owner_repo_commit=ConfigurableField(id="owner_repo_commit", name="Hub ID"),
)
templated_1 = obj_configurable.invoke({})
assert templated_1.messages[1].content == "1"
templated_2 = obj_configurable.with_config(
configurable={"owner_repo_commit": "efriis/my-prompt-2"}
).invoke({})
assert templated_2.messages[1].content == "2"
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/runnables/test_openai_functions.py | from typing import Any, List, Optional
from langchain_core.callbacks.manager import CallbackManagerForLLMRun
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.messages import AIMessage, BaseMessage
from langchain_core.outputs import ChatGeneration, ChatResult
from pytest_mock import MockerFixture
from syrupy import SnapshotAssertion
from langchain.runnables.openai_functions import OpenAIFunctionsRouter
class FakeChatOpenAI(BaseChatModel):
@property
def _llm_type(self) -> str:
return "fake-openai-chat-model"
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
return ChatResult(
generations=[
ChatGeneration(
message=AIMessage(
content="",
additional_kwargs={
"function_call": {
"name": "accept",
"arguments": '{\n "draft": "turtles"\n}',
}
},
)
)
]
)
def test_openai_functions_router(
snapshot: SnapshotAssertion, mocker: MockerFixture
) -> None:
revise = mocker.Mock(
side_effect=lambda kw: f'Revised draft: no more {kw["notes"]}!'
)
accept = mocker.Mock(side_effect=lambda kw: f'Accepted draft: {kw["draft"]}!')
router = OpenAIFunctionsRouter(
{
"revise": revise,
"accept": accept,
},
functions=[
{
"name": "revise",
"description": "Sends the draft for revision.",
"parameters": {
"type": "object",
"properties": {
"notes": {
"type": "string",
"description": "The editor's notes to guide the revision.",
},
},
},
},
{
"name": "accept",
"description": "Accepts the draft.",
"parameters": {
"type": "object",
"properties": {
"draft": {
"type": "string",
"description": "The draft to accept.",
},
},
},
},
],
)
model = FakeChatOpenAI()
chain = model.bind(functions=router.functions) | router
assert router.functions == snapshot
assert chain.invoke("Something about turtles?") == "Accepted draft: turtles!"
revise.assert_not_called()
accept.assert_called_once_with({"draft": "turtles"})
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests/runnables | lc_public_repos/langchain/libs/langchain/tests/unit_tests/runnables/__snapshots__/test_openai_functions.ambr | # serializer version: 1
# name: test_openai_functions_router
list([
dict({
'description': 'Sends the draft for revision.',
'name': 'revise',
'parameters': dict({
'properties': dict({
'notes': dict({
'description': "The editor's notes to guide the revision.",
'type': 'string',
}),
}),
'type': 'object',
}),
}),
dict({
'description': 'Accepts the draft.',
'name': 'accept',
'parameters': dict({
'properties': dict({
'draft': dict({
'description': 'The draft to accept.',
'type': 'string',
}),
}),
'type': 'object',
}),
}),
])
# ---
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/storage/test_filesystem.py | import os
import tempfile
from typing import Generator
import pytest
from langchain_core.stores import InvalidKeyException
from langchain.storage.file_system import LocalFileStore
@pytest.fixture
def file_store() -> Generator[LocalFileStore, None, None]:
# Create a temporary directory for testing
with tempfile.TemporaryDirectory() as temp_dir:
# Instantiate the LocalFileStore with the temporary directory as the root path
store = LocalFileStore(temp_dir)
yield store
def test_mset_and_mget(file_store: LocalFileStore) -> None:
# Set values for keys
key_value_pairs = [("key1", b"value1"), ("key2", b"value2")]
file_store.mset(key_value_pairs)
# Get values for keys
values = file_store.mget(["key1", "key2"])
# Assert that the retrieved values match the original values
assert values == [b"value1", b"value2"]
@pytest.mark.parametrize(
"chmod_dir_s, chmod_file_s", [("777", "666"), ("770", "660"), ("700", "600")]
)
def test_mset_chmod(chmod_dir_s: str, chmod_file_s: str) -> None:
chmod_dir = int(chmod_dir_s, base=8)
chmod_file = int(chmod_file_s, base=8)
# Create a temporary directory for testing
with tempfile.TemporaryDirectory() as temp_dir:
# Instantiate the LocalFileStore with a directory inside the temporary directory
# as the root path
temp_dir = os.path.join(temp_dir, "store_dir")
file_store = LocalFileStore(
temp_dir, chmod_dir=chmod_dir, chmod_file=chmod_file
)
# Set values for keys
key_value_pairs = [("key1", b"value1"), ("key2", b"value2")]
file_store.mset(key_value_pairs)
# verify the permissions are set correctly
# (test only the standard user/group/other bits)
dir_path = str(file_store.root_path)
file_path = os.path.join(dir_path, "key1")
assert (os.stat(dir_path).st_mode & 0o777) == chmod_dir
assert (os.stat(file_path).st_mode & 0o777) == chmod_file
def test_mget_update_atime() -> None:
# Create a temporary directory for testing
with tempfile.TemporaryDirectory() as temp_dir:
# Instantiate the LocalFileStore with a directory inside the temporary directory
# as the root path
temp_dir = os.path.join(temp_dir, "store_dir")
file_store = LocalFileStore(temp_dir, update_atime=True)
# Set values for keys
key_value_pairs = [("key1", b"value1"), ("key2", b"value2")]
file_store.mset(key_value_pairs)
# Get original access time
dir_path = str(file_store.root_path)
file_path = os.path.join(dir_path, "key1")
atime1 = os.stat(file_path).st_atime
# Get values for keys
_ = file_store.mget(["key1", "key2"])
# Make sure the filesystem access time has been updated
atime2 = os.stat(file_path).st_atime
assert atime2 != atime1
def test_mdelete(file_store: LocalFileStore) -> None:
# Set values for keys
key_value_pairs = [("key1", b"value1"), ("key2", b"value2")]
file_store.mset(key_value_pairs)
# Delete keys
file_store.mdelete(["key1"])
# Check if the deleted key is present
values = file_store.mget(["key1"])
# Assert that the value is None after deletion
assert values == [None]
def test_set_invalid_key(file_store: LocalFileStore) -> None:
"""Test that an exception is raised when an invalid key is set."""
# Set a key-value pair
key = "crying-cat/😿"
value = b"This is a test value"
with pytest.raises(InvalidKeyException):
file_store.mset([(key, value)])
def test_set_key_and_verify_content(file_store: LocalFileStore) -> None:
"""Test that the content of the file is the same as the value set."""
# Set a key-value pair
key = "test_key"
value = b"This is a test value"
file_store.mset([(key, value)])
# Verify the content of the actual file
full_path = file_store._get_full_path(key)
assert full_path.exists()
assert full_path.read_bytes() == b"This is a test value"
def test_yield_keys(file_store: LocalFileStore) -> None:
# Set values for keys
key_value_pairs = [("key1", b"value1"), ("subdir/key2", b"value2")]
file_store.mset(key_value_pairs)
# Iterate over keys
keys = list(file_store.yield_keys())
# Assert that the yielded keys match the expected keys
expected_keys = ["key1", os.path.join("subdir", "key2")]
assert keys == expected_keys
def test_catches_forbidden_keys(file_store: LocalFileStore) -> None:
"""Make sure we raise exception on keys that are not allowed; e.g., absolute path"""
with pytest.raises(InvalidKeyException):
file_store.mset([("/etc", b"value1")])
with pytest.raises(InvalidKeyException):
list(file_store.yield_keys("/etc/passwd"))
with pytest.raises(InvalidKeyException):
file_store.mget(["/etc/passwd"])
# check relative paths
with pytest.raises(InvalidKeyException):
list(file_store.yield_keys(".."))
with pytest.raises(InvalidKeyException):
file_store.mget(["../etc/passwd"])
with pytest.raises(InvalidKeyException):
file_store.mset([("../etc", b"value1")])
with pytest.raises(InvalidKeyException):
list(file_store.yield_keys("../etc/passwd"))
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/storage/test_imports.py | from langchain import storage
EXPECTED_ALL = [
"EncoderBackedStore",
"InMemoryStore",
"InMemoryByteStore",
"LocalFileStore",
"RedisStore",
"InvalidKeyException",
"create_lc_store",
"create_kv_docstore",
"UpstashRedisByteStore",
"UpstashRedisStore",
]
def test_all_imports() -> None:
assert set(storage.__all__) == set(EXPECTED_ALL)
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/storage/test_lc_store.py | import tempfile
from typing import Generator, cast
import pytest
from langchain_core.documents import Document
from langchain.storage._lc_store import create_kv_docstore, create_lc_store
from langchain.storage.file_system import LocalFileStore
@pytest.fixture
def file_store() -> Generator[LocalFileStore, None, None]:
# Create a temporary directory for testing
with tempfile.TemporaryDirectory() as temp_dir:
# Instantiate the LocalFileStore with the temporary directory as the root path
store = LocalFileStore(temp_dir)
yield store
def test_create_lc_store(file_store: LocalFileStore) -> None:
"""Test that a docstore is created from a base store."""
docstore = create_lc_store(file_store)
docstore.mset([("key1", Document(page_content="hello", metadata={"key": "value"}))])
fetched_doc = cast(Document, docstore.mget(["key1"])[0])
assert fetched_doc.page_content == "hello"
assert fetched_doc.metadata == {"key": "value"}
def test_create_kv_store(file_store: LocalFileStore) -> None:
"""Test that a docstore is created from a base store."""
docstore = create_kv_docstore(file_store)
docstore.mset([("key1", Document(page_content="hello", metadata={"key": "value"}))])
fetched_doc = docstore.mget(["key1"])[0]
assert isinstance(fetched_doc, Document)
assert fetched_doc.page_content == "hello"
assert fetched_doc.metadata == {"key": "value"}
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/indexes/test_api.py | from langchain.indexes import __all__
def test_all() -> None:
"""Use to catch obvious breaking changes."""
expected = [
"aindex",
"GraphIndexCreator",
"index",
"IndexingResult",
"SQLRecordManager",
"VectorstoreIndexCreator",
]
assert __all__ == sorted(expected, key=lambda x: x.lower())
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/indexes/test_imports.py | from langchain.indexes import __all__
EXPECTED_ALL = [
# Keep sorted
"aindex",
"GraphIndexCreator",
"index",
"IndexingResult",
"SQLRecordManager",
"VectorstoreIndexCreator",
]
def test_all_imports() -> None:
assert set(__all__) == set(EXPECTED_ALL)
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/indexes/test_hashed_document.py | import pytest
from langchain_core.documents import Document
from langchain.indexes._api import _HashedDocument
def test_hashed_document_hashing() -> None:
hashed_document = _HashedDocument( # type: ignore[call-arg]
uid="123", page_content="Lorem ipsum dolor sit amet", metadata={"key": "value"}
)
assert isinstance(hashed_document.hash_, str)
def test_hashing_with_missing_content() -> None:
"""Check that ValueError is raised if page_content is missing."""
with pytest.raises(TypeError):
_HashedDocument(
metadata={"key": "value"},
) # type: ignore
def test_uid_auto_assigned_to_hash() -> None:
"""Test uid is auto-assigned to the hashed_document hash."""
hashed_document = _HashedDocument( # type: ignore[call-arg]
page_content="Lorem ipsum dolor sit amet", metadata={"key": "value"}
)
assert hashed_document.uid == hashed_document.hash_
def test_to_document() -> None:
"""Test to_document method."""
hashed_document = _HashedDocument( # type: ignore[call-arg]
page_content="Lorem ipsum dolor sit amet", metadata={"key": "value"}
)
doc = hashed_document.to_document()
assert isinstance(doc, Document)
assert doc.page_content == "Lorem ipsum dolor sit amet"
assert doc.metadata == {"key": "value"}
def test_from_document() -> None:
"""Test from document class method."""
document = Document(
page_content="Lorem ipsum dolor sit amet", metadata={"key": "value"}
)
hashed_document = _HashedDocument.from_document(document)
# hash should be deterministic
assert hashed_document.hash_ == "fd1dc827-051b-537d-a1fe-1fa043e8b276"
assert hashed_document.uid == hashed_document.hash_
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/indexes/test_indexing.py | from datetime import datetime
from typing import (
Any,
AsyncIterator,
Dict,
Iterable,
Iterator,
List,
Optional,
Sequence,
Type,
)
from unittest.mock import patch
import pytest
import pytest_asyncio
from langchain_core.document_loaders import BaseLoader
from langchain_core.documents import Document
from langchain_core.embeddings import Embeddings
from langchain_core.vectorstores import VST, VectorStore
from langchain.indexes import aindex, index
from langchain.indexes._api import _abatch, _HashedDocument
from langchain.indexes._sql_record_manager import SQLRecordManager
class ToyLoader(BaseLoader):
"""Toy loader that always returns the same documents."""
def __init__(self, documents: Sequence[Document]) -> None:
"""Initialize with the documents to return."""
self.documents = documents
def lazy_load(
self,
) -> Iterator[Document]:
yield from self.documents
async def alazy_load(
self,
) -> AsyncIterator[Document]:
for document in self.documents:
yield document
class InMemoryVectorStore(VectorStore):
"""In-memory implementation of VectorStore using a dictionary."""
def __init__(self, permit_upserts: bool = False) -> None:
"""Vector store interface for testing things in memory."""
self.store: Dict[str, Document] = {}
self.permit_upserts = permit_upserts
def delete(self, ids: Optional[Sequence[str]] = None, **kwargs: Any) -> None:
"""Delete the given documents from the store using their IDs."""
if ids:
for _id in ids:
self.store.pop(_id, None)
async def adelete(self, ids: Optional[Sequence[str]] = None, **kwargs: Any) -> None:
"""Delete the given documents from the store using their IDs."""
if ids:
for _id in ids:
self.store.pop(_id, None)
def add_documents( # type: ignore
self,
documents: Sequence[Document],
*,
ids: Optional[Sequence[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Add the given documents to the store (insert behavior)."""
if ids and len(ids) != len(documents):
raise ValueError(
f"Expected {len(ids)} ids, got {len(documents)} documents."
)
if not ids:
raise NotImplementedError("This is not implemented yet.")
for _id, document in zip(ids, documents):
if _id in self.store and not self.permit_upserts:
raise ValueError(
f"Document with uid {_id} already exists in the store."
)
self.store[_id] = document
return list(ids)
async def aadd_documents(
self,
documents: Sequence[Document],
*,
ids: Optional[Sequence[str]] = None,
**kwargs: Any,
) -> List[str]:
if ids and len(ids) != len(documents):
raise ValueError(
f"Expected {len(ids)} ids, got {len(documents)} documents."
)
if not ids:
raise NotImplementedError("This is not implemented yet.")
for _id, document in zip(ids, documents):
if _id in self.store and not self.permit_upserts:
raise ValueError(
f"Document with uid {_id} already exists in the store."
)
self.store[_id] = document
return list(ids)
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[Dict[Any, Any]]] = None,
**kwargs: Any,
) -> List[str]:
"""Add the given texts to the store (insert behavior)."""
raise NotImplementedError()
@classmethod
def from_texts(
cls: Type[VST],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[Dict[Any, Any]]] = None,
**kwargs: Any,
) -> VST:
"""Create a vector store from a list of texts."""
raise NotImplementedError()
def similarity_search(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
"""Find the most similar documents to the given query."""
raise NotImplementedError()
@pytest.fixture
def record_manager() -> SQLRecordManager:
"""Timestamped set fixture."""
record_manager = SQLRecordManager("kittens", db_url="sqlite:///:memory:")
record_manager.create_schema()
return record_manager
@pytest_asyncio.fixture # type: ignore
@pytest.mark.requires("aiosqlite")
async def arecord_manager() -> SQLRecordManager:
"""Timestamped set fixture."""
record_manager = SQLRecordManager(
"kittens",
db_url="sqlite+aiosqlite:///:memory:",
async_mode=True,
)
await record_manager.acreate_schema()
return record_manager
@pytest.fixture
def vector_store() -> InMemoryVectorStore:
"""Vector store fixture."""
return InMemoryVectorStore()
@pytest.fixture
def upserting_vector_store() -> InMemoryVectorStore:
"""Vector store fixture."""
return InMemoryVectorStore(permit_upserts=True)
def test_indexing_same_content(
record_manager: SQLRecordManager, vector_store: InMemoryVectorStore
) -> None:
"""Indexing some content to confirm it gets added only once."""
loader = ToyLoader(
documents=[
Document(
page_content="This is a test document.",
),
Document(
page_content="This is another document.",
),
]
)
assert index(loader, record_manager, vector_store) == {
"num_added": 2,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 0,
}
assert len(list(vector_store.store)) == 2
for _ in range(2):
# Run the indexing again
assert index(loader, record_manager, vector_store) == {
"num_added": 0,
"num_deleted": 0,
"num_skipped": 2,
"num_updated": 0,
}
@pytest.mark.requires("aiosqlite")
async def test_aindexing_same_content(
arecord_manager: SQLRecordManager, vector_store: InMemoryVectorStore
) -> None:
"""Indexing some content to confirm it gets added only once."""
loader = ToyLoader(
documents=[
Document(
page_content="This is a test document.",
),
Document(
page_content="This is another document.",
),
]
)
assert await aindex(loader, arecord_manager, vector_store) == {
"num_added": 2,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 0,
}
assert len(list(vector_store.store)) == 2
for _ in range(2):
# Run the indexing again
assert await aindex(loader, arecord_manager, vector_store) == {
"num_added": 0,
"num_deleted": 0,
"num_skipped": 2,
"num_updated": 0,
}
def test_index_simple_delete_full(
record_manager: SQLRecordManager, vector_store: InMemoryVectorStore
) -> None:
"""Indexing some content to confirm it gets added only once."""
loader = ToyLoader(
documents=[
Document(
page_content="This is a test document.",
),
Document(
page_content="This is another document.",
),
]
)
with patch.object(
record_manager, "get_time", return_value=datetime(2021, 1, 1).timestamp()
):
assert index(loader, record_manager, vector_store, cleanup="full") == {
"num_added": 2,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 0,
}
with patch.object(
record_manager, "get_time", return_value=datetime(2021, 1, 1).timestamp()
):
assert index(loader, record_manager, vector_store, cleanup="full") == {
"num_added": 0,
"num_deleted": 0,
"num_skipped": 2,
"num_updated": 0,
}
loader = ToyLoader(
documents=[
Document(
page_content="mutated document 1",
),
Document(
page_content="This is another document.", # <-- Same as original
),
]
)
with patch.object(
record_manager, "get_time", return_value=datetime(2021, 1, 2).timestamp()
):
assert index(loader, record_manager, vector_store, cleanup="full") == {
"num_added": 1,
"num_deleted": 1,
"num_skipped": 1,
"num_updated": 0,
}
doc_texts = set(
# Ignoring type since doc should be in the store and not a None
vector_store.store.get(uid).page_content # type: ignore
for uid in vector_store.store
)
assert doc_texts == {"mutated document 1", "This is another document."}
# Attempt to index again verify that nothing changes
with patch.object(
record_manager, "get_time", return_value=datetime(2021, 1, 2).timestamp()
):
assert index(loader, record_manager, vector_store, cleanup="full") == {
"num_added": 0,
"num_deleted": 0,
"num_skipped": 2,
"num_updated": 0,
}
@pytest.mark.requires("aiosqlite")
async def test_aindex_simple_delete_full(
arecord_manager: SQLRecordManager, vector_store: InMemoryVectorStore
) -> None:
"""Indexing some content to confirm it gets added only once."""
loader = ToyLoader(
documents=[
Document(
page_content="This is a test document.",
),
Document(
page_content="This is another document.",
),
]
)
with patch.object(
arecord_manager, "aget_time", return_value=datetime(2021, 1, 1).timestamp()
):
assert await aindex(loader, arecord_manager, vector_store, cleanup="full") == {
"num_added": 2,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 0,
}
with patch.object(
arecord_manager, "aget_time", return_value=datetime(2021, 1, 1).timestamp()
):
assert await aindex(loader, arecord_manager, vector_store, cleanup="full") == {
"num_added": 0,
"num_deleted": 0,
"num_skipped": 2,
"num_updated": 0,
}
loader = ToyLoader(
documents=[
Document(
page_content="mutated document 1",
),
Document(
page_content="This is another document.", # <-- Same as original
),
]
)
with patch.object(
arecord_manager, "aget_time", return_value=datetime(2021, 1, 2).timestamp()
):
assert await aindex(loader, arecord_manager, vector_store, cleanup="full") == {
"num_added": 1,
"num_deleted": 1,
"num_skipped": 1,
"num_updated": 0,
}
doc_texts = set(
# Ignoring type since doc should be in the store and not a None
vector_store.store.get(uid).page_content # type: ignore
for uid in vector_store.store
)
assert doc_texts == {"mutated document 1", "This is another document."}
# Attempt to index again verify that nothing changes
with patch.object(
arecord_manager, "aget_time", return_value=datetime(2021, 1, 2).timestamp()
):
assert await aindex(loader, arecord_manager, vector_store, cleanup="full") == {
"num_added": 0,
"num_deleted": 0,
"num_skipped": 2,
"num_updated": 0,
}
def test_incremental_fails_with_bad_source_ids(
record_manager: SQLRecordManager, vector_store: InMemoryVectorStore
) -> None:
"""Test indexing with incremental deletion strategy."""
loader = ToyLoader(
documents=[
Document(
page_content="This is a test document.",
metadata={"source": "1"},
),
Document(
page_content="This is another document.",
metadata={"source": "2"},
),
Document(
page_content="This is yet another document.",
metadata={"source": None},
),
]
)
with pytest.raises(ValueError):
# Should raise an error because no source id function was specified
index(loader, record_manager, vector_store, cleanup="incremental")
with pytest.raises(ValueError):
# Should raise an error because no source id function was specified
index(
loader,
record_manager,
vector_store,
cleanup="incremental",
source_id_key="source",
)
@pytest.mark.requires("aiosqlite")
async def test_aincremental_fails_with_bad_source_ids(
arecord_manager: SQLRecordManager, vector_store: InMemoryVectorStore
) -> None:
"""Test indexing with incremental deletion strategy."""
loader = ToyLoader(
documents=[
Document(
page_content="This is a test document.",
metadata={"source": "1"},
),
Document(
page_content="This is another document.",
metadata={"source": "2"},
),
Document(
page_content="This is yet another document.",
metadata={"source": None},
),
]
)
with pytest.raises(ValueError):
# Should raise an error because no source id function was specified
await aindex(
loader,
arecord_manager,
vector_store,
cleanup="incremental",
)
with pytest.raises(ValueError):
# Should raise an error because no source id function was specified
await aindex(
loader,
arecord_manager,
vector_store,
cleanup="incremental",
source_id_key="source",
)
def test_no_delete(
record_manager: SQLRecordManager, vector_store: InMemoryVectorStore
) -> None:
"""Test indexing without a deletion strategy."""
loader = ToyLoader(
documents=[
Document(
page_content="This is a test document.",
metadata={"source": "1"},
),
Document(
page_content="This is another document.",
metadata={"source": "2"},
),
]
)
with patch.object(
record_manager, "get_time", return_value=datetime(2021, 1, 2).timestamp()
):
assert index(
loader,
record_manager,
vector_store,
cleanup=None,
source_id_key="source",
) == {
"num_added": 2,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 0,
}
# If we add the same content twice it should be skipped
with patch.object(
record_manager, "get_time", return_value=datetime(2021, 1, 2).timestamp()
):
assert index(
loader,
record_manager,
vector_store,
cleanup=None,
source_id_key="source",
) == {
"num_added": 0,
"num_deleted": 0,
"num_skipped": 2,
"num_updated": 0,
}
loader = ToyLoader(
documents=[
Document(
page_content="mutated content",
metadata={"source": "1"},
),
Document(
page_content="This is another document.",
metadata={"source": "2"},
),
]
)
# Should result in no updates or deletions!
with patch.object(
record_manager, "get_time", return_value=datetime(2021, 1, 2).timestamp()
):
assert index(
loader,
record_manager,
vector_store,
cleanup=None,
source_id_key="source",
) == {
"num_added": 1,
"num_deleted": 0,
"num_skipped": 1,
"num_updated": 0,
}
@pytest.mark.requires("aiosqlite")
async def test_ano_delete(
arecord_manager: SQLRecordManager, vector_store: InMemoryVectorStore
) -> None:
"""Test indexing without a deletion strategy."""
loader = ToyLoader(
documents=[
Document(
page_content="This is a test document.",
metadata={"source": "1"},
),
Document(
page_content="This is another document.",
metadata={"source": "2"},
),
]
)
with patch.object(
arecord_manager, "aget_time", return_value=datetime(2021, 1, 2).timestamp()
):
assert await aindex(
loader,
arecord_manager,
vector_store,
cleanup=None,
source_id_key="source",
) == {
"num_added": 2,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 0,
}
# If we add the same content twice it should be skipped
with patch.object(
arecord_manager, "aget_time", return_value=datetime(2021, 1, 2).timestamp()
):
assert await aindex(
loader,
arecord_manager,
vector_store,
cleanup=None,
source_id_key="source",
) == {
"num_added": 0,
"num_deleted": 0,
"num_skipped": 2,
"num_updated": 0,
}
loader = ToyLoader(
documents=[
Document(
page_content="mutated content",
metadata={"source": "1"},
),
Document(
page_content="This is another document.",
metadata={"source": "2"},
),
]
)
# Should result in no updates or deletions!
with patch.object(
arecord_manager, "aget_time", return_value=datetime(2021, 1, 2).timestamp()
):
assert await aindex(
loader,
arecord_manager,
vector_store,
cleanup=None,
source_id_key="source",
) == {
"num_added": 1,
"num_deleted": 0,
"num_skipped": 1,
"num_updated": 0,
}
def test_incremental_delete(
record_manager: SQLRecordManager, vector_store: InMemoryVectorStore
) -> None:
"""Test indexing with incremental deletion strategy."""
loader = ToyLoader(
documents=[
Document(
page_content="This is a test document.",
metadata={"source": "1"},
),
Document(
page_content="This is another document.",
metadata={"source": "2"},
),
]
)
with patch.object(
record_manager, "get_time", return_value=datetime(2021, 1, 2).timestamp()
):
assert index(
loader,
record_manager,
vector_store,
cleanup="incremental",
source_id_key="source",
) == {
"num_added": 2,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 0,
}
doc_texts = set(
# Ignoring type since doc should be in the store and not a None
vector_store.store.get(uid).page_content # type: ignore
for uid in vector_store.store
)
assert doc_texts == {"This is another document.", "This is a test document."}
# Attempt to index again verify that nothing changes
with patch.object(
record_manager, "get_time", return_value=datetime(2021, 1, 2).timestamp()
):
assert index(
loader,
record_manager,
vector_store,
cleanup="incremental",
source_id_key="source",
) == {
"num_added": 0,
"num_deleted": 0,
"num_skipped": 2,
"num_updated": 0,
}
# Create 2 documents from the same source all with mutated content
loader = ToyLoader(
documents=[
Document(
page_content="mutated document 1",
metadata={"source": "1"},
),
Document(
page_content="mutated document 2",
metadata={"source": "1"},
),
Document(
page_content="This is another document.", # <-- Same as original
metadata={"source": "2"},
),
]
)
# Attempt to index again verify that nothing changes
with patch.object(
record_manager, "get_time", return_value=datetime(2021, 1, 3).timestamp()
):
assert index(
loader,
record_manager,
vector_store,
cleanup="incremental",
source_id_key="source",
) == {
"num_added": 2,
"num_deleted": 1,
"num_skipped": 1,
"num_updated": 0,
}
doc_texts = set(
# Ignoring type since doc should be in the store and not a None
vector_store.store.get(uid).page_content # type: ignore
for uid in vector_store.store
)
assert doc_texts == {
"mutated document 1",
"mutated document 2",
"This is another document.",
}
def test_incremental_indexing_with_batch_size(
record_manager: SQLRecordManager, vector_store: InMemoryVectorStore
) -> None:
"""Test indexing with incremental indexing"""
loader = ToyLoader(
documents=[
Document(
page_content="1",
metadata={"source": "1"},
),
Document(
page_content="2",
metadata={"source": "1"},
),
Document(
page_content="3",
metadata={"source": "1"},
),
Document(
page_content="4",
metadata={"source": "1"},
),
]
)
with patch.object(
record_manager, "get_time", return_value=datetime(2021, 1, 2).timestamp()
):
assert index(
loader,
record_manager,
vector_store,
cleanup="incremental",
source_id_key="source",
batch_size=2,
) == {
"num_added": 4,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 0,
}
assert index(
loader,
record_manager,
vector_store,
cleanup="incremental",
source_id_key="source",
batch_size=2,
) == {
"num_added": 0,
"num_deleted": 0,
"num_skipped": 4,
"num_updated": 0,
}
doc_texts = set(
# Ignoring type since doc should be in the store and not a None
vector_store.store.get(uid).page_content # type: ignore
for uid in vector_store.store
)
assert doc_texts == {"1", "2", "3", "4"}
def test_incremental_delete_with_batch_size(
record_manager: SQLRecordManager, vector_store: InMemoryVectorStore
) -> None:
"""Test indexing with incremental deletion strategy and batch size."""
loader = ToyLoader(
documents=[
Document(
page_content="1",
metadata={"source": "1"},
),
Document(
page_content="2",
metadata={"source": "2"},
),
Document(
page_content="3",
metadata={"source": "3"},
),
Document(
page_content="4",
metadata={"source": "4"},
),
]
)
with patch.object(
record_manager, "get_time", return_value=datetime(2021, 1, 2).timestamp()
):
assert index(
loader,
record_manager,
vector_store,
cleanup="incremental",
source_id_key="source",
batch_size=3,
) == {
"num_added": 4,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 0,
}
doc_texts = set(
# Ignoring type since doc should be in the store and not a None
vector_store.store.get(uid).page_content # type: ignore
for uid in vector_store.store
)
assert doc_texts == {"1", "2", "3", "4"}
# Attempt to index again verify that nothing changes
with patch.object(
record_manager, "get_time", return_value=datetime(2021, 1, 2).timestamp()
):
assert index(
loader,
record_manager,
vector_store,
cleanup="incremental",
source_id_key="source",
batch_size=3,
) == {
"num_added": 0,
"num_deleted": 0,
"num_skipped": 4,
"num_updated": 0,
}
# Attempt to index again verify that nothing changes
with patch.object(
record_manager, "get_time", return_value=datetime(2022, 1, 3).timestamp()
):
# Docs with same content
docs = [
Document(
page_content="1",
metadata={"source": "1"},
),
Document(
page_content="2",
metadata={"source": "2"},
),
]
assert index(
docs,
record_manager,
vector_store,
cleanup="incremental",
source_id_key="source",
batch_size=1,
) == {
"num_added": 0,
"num_deleted": 0,
"num_skipped": 2,
"num_updated": 0,
}
# Attempt to index again verify that nothing changes
with patch.object(
record_manager, "get_time", return_value=datetime(2023, 1, 3).timestamp()
):
# Docs with same content
docs = [
Document(
page_content="1",
metadata={"source": "1"},
),
Document(
page_content="2",
metadata={"source": "2"},
),
]
assert index(
docs,
record_manager,
vector_store,
cleanup="incremental",
source_id_key="source",
batch_size=1,
) == {
"num_added": 0,
"num_deleted": 0,
"num_skipped": 2,
"num_updated": 0,
}
# Try to index with changed docs now
with patch.object(
record_manager, "get_time", return_value=datetime(2024, 1, 3).timestamp()
):
# Docs with same content
docs = [
Document(
page_content="changed 1",
metadata={"source": "1"},
),
Document(
page_content="changed 2",
metadata={"source": "2"},
),
]
assert index(
docs,
record_manager,
vector_store,
cleanup="incremental",
source_id_key="source",
) == {
"num_added": 2,
"num_deleted": 2,
"num_skipped": 0,
"num_updated": 0,
}
@pytest.mark.requires("aiosqlite")
async def test_aincremental_delete(
arecord_manager: SQLRecordManager, vector_store: InMemoryVectorStore
) -> None:
"""Test indexing with incremental deletion strategy."""
loader = ToyLoader(
documents=[
Document(
page_content="This is a test document.",
metadata={"source": "1"},
),
Document(
page_content="This is another document.",
metadata={"source": "2"},
),
]
)
with patch.object(
arecord_manager, "aget_time", return_value=datetime(2021, 1, 2).timestamp()
):
assert await aindex(
loader.lazy_load(),
arecord_manager,
vector_store,
cleanup="incremental",
source_id_key="source",
) == {
"num_added": 2,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 0,
}
doc_texts = set(
# Ignoring type since doc should be in the store and not a None
vector_store.store.get(uid).page_content # type: ignore
for uid in vector_store.store
)
assert doc_texts == {"This is another document.", "This is a test document."}
# Attempt to index again verify that nothing changes
with patch.object(
arecord_manager, "aget_time", return_value=datetime(2021, 1, 2).timestamp()
):
assert await aindex(
loader.lazy_load(),
arecord_manager,
vector_store,
cleanup="incremental",
source_id_key="source",
) == {
"num_added": 0,
"num_deleted": 0,
"num_skipped": 2,
"num_updated": 0,
}
# Create 2 documents from the same source all with mutated content
loader = ToyLoader(
documents=[
Document(
page_content="mutated document 1",
metadata={"source": "1"},
),
Document(
page_content="mutated document 2",
metadata={"source": "1"},
),
Document(
page_content="This is another document.", # <-- Same as original
metadata={"source": "2"},
),
]
)
# Attempt to index again verify that nothing changes
with patch.object(
arecord_manager, "aget_time", return_value=datetime(2021, 1, 3).timestamp()
):
assert await aindex(
loader.lazy_load(),
arecord_manager,
vector_store,
cleanup="incremental",
source_id_key="source",
) == {
"num_added": 2,
"num_deleted": 1,
"num_skipped": 1,
"num_updated": 0,
}
doc_texts = set(
# Ignoring type since doc should be in the store and not a None
vector_store.store.get(uid).page_content # type: ignore
for uid in vector_store.store
)
assert doc_texts == {
"mutated document 1",
"mutated document 2",
"This is another document.",
}
def test_indexing_with_no_docs(
record_manager: SQLRecordManager, vector_store: VectorStore
) -> None:
"""Check edge case when loader returns no new docs."""
loader = ToyLoader(documents=[])
assert index(loader, record_manager, vector_store, cleanup="full") == {
"num_added": 0,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 0,
}
@pytest.mark.requires("aiosqlite")
async def test_aindexing_with_no_docs(
arecord_manager: SQLRecordManager, vector_store: VectorStore
) -> None:
"""Check edge case when loader returns no new docs."""
loader = ToyLoader(documents=[])
assert await aindex(loader, arecord_manager, vector_store, cleanup="full") == {
"num_added": 0,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 0,
}
def test_deduplication(
record_manager: SQLRecordManager, vector_store: VectorStore
) -> None:
"""Check edge case when loader returns no new docs."""
docs = [
Document(
page_content="This is a test document.",
metadata={"source": "1"},
),
Document(
page_content="This is a test document.",
metadata={"source": "1"},
),
]
# Should result in only a single document being added
assert index(docs, record_manager, vector_store, cleanup="full") == {
"num_added": 1,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 0,
}
@pytest.mark.requires("aiosqlite")
async def test_adeduplication(
arecord_manager: SQLRecordManager, vector_store: VectorStore
) -> None:
"""Check edge case when loader returns no new docs."""
docs = [
Document(
page_content="This is a test document.",
metadata={"source": "1"},
),
Document(
page_content="This is a test document.",
metadata={"source": "1"},
),
]
# Should result in only a single document being added
assert await aindex(docs, arecord_manager, vector_store, cleanup="full") == {
"num_added": 1,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 0,
}
def test_cleanup_with_different_batchsize(
record_manager: SQLRecordManager, vector_store: VectorStore
) -> None:
"""Check that we can clean up with different batch size."""
docs = [
Document(
page_content="This is a test document.",
metadata={"source": str(d)},
)
for d in range(1000)
]
assert index(docs, record_manager, vector_store, cleanup="full") == {
"num_added": 1000,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 0,
}
docs = [
Document(
page_content="Different doc",
metadata={"source": str(d)},
)
for d in range(1001)
]
assert index(
docs, record_manager, vector_store, cleanup="full", cleanup_batch_size=17
) == {
"num_added": 1001,
"num_deleted": 1000,
"num_skipped": 0,
"num_updated": 0,
}
@pytest.mark.requires("aiosqlite")
async def test_async_cleanup_with_different_batchsize(
arecord_manager: SQLRecordManager, vector_store: InMemoryVectorStore
) -> None:
"""Check that we can clean up with different batch size."""
docs = [
Document(
page_content="This is a test document.",
metadata={"source": str(d)},
)
for d in range(1000)
]
assert await aindex(docs, arecord_manager, vector_store, cleanup="full") == {
"num_added": 1000,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 0,
}
docs = [
Document(
page_content="Different doc",
metadata={"source": str(d)},
)
for d in range(1001)
]
assert await aindex(
docs, arecord_manager, vector_store, cleanup="full", cleanup_batch_size=17
) == {
"num_added": 1001,
"num_deleted": 1000,
"num_skipped": 0,
"num_updated": 0,
}
def test_deduplication_v2(
record_manager: SQLRecordManager, vector_store: VectorStore
) -> None:
"""Check edge case when loader returns no new docs."""
docs = [
Document(
page_content="1",
metadata={"source": "1"},
),
Document(
page_content="1",
metadata={"source": "1"},
),
Document(
page_content="2",
metadata={"source": "2"},
),
Document(
page_content="3",
metadata={"source": "3"},
),
]
assert index(docs, record_manager, vector_store, cleanup="full") == {
"num_added": 3,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 0,
}
# using in memory implementation here
assert isinstance(vector_store, InMemoryVectorStore)
contents = sorted(
[document.page_content for document in vector_store.store.values()]
)
assert contents == ["1", "2", "3"]
async def _to_async_iter(it: Iterable[Any]) -> AsyncIterator[Any]:
"""Convert an iterable to an async iterator."""
for i in it:
yield i
async def test_abatch() -> None:
"""Test the abatch function."""
batches = _abatch(5, _to_async_iter(range(12)))
assert isinstance(batches, AsyncIterator)
assert [batch async for batch in batches] == [
[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9],
[10, 11],
]
batches = _abatch(1, _to_async_iter(range(3)))
assert isinstance(batches, AsyncIterator)
assert [batch async for batch in batches] == [[0], [1], [2]]
batches = _abatch(2, _to_async_iter(range(5)))
assert isinstance(batches, AsyncIterator)
assert [batch async for batch in batches] == [[0, 1], [2, 3], [4]]
def test_indexing_force_update(
record_manager: SQLRecordManager, upserting_vector_store: VectorStore
) -> None:
"""Test indexing with force update."""
docs = [
Document(
page_content="This is a test document.",
metadata={"source": "1"},
),
Document(
page_content="This is another document.",
metadata={"source": "2"},
),
Document(
page_content="This is a test document.",
metadata={"source": "1"},
),
]
assert index(docs, record_manager, upserting_vector_store, cleanup="full") == {
"num_added": 2,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 0,
}
assert index(docs, record_manager, upserting_vector_store, cleanup="full") == {
"num_added": 0,
"num_deleted": 0,
"num_skipped": 2,
"num_updated": 0,
}
assert index(
docs, record_manager, upserting_vector_store, cleanup="full", force_update=True
) == {
"num_added": 0,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 2,
}
@pytest.mark.requires("aiosqlite")
async def test_aindexing_force_update(
arecord_manager: SQLRecordManager, upserting_vector_store: VectorStore
) -> None:
"""Test indexing with force update."""
docs = [
Document(
page_content="This is a test document.",
metadata={"source": "1"},
),
Document(
page_content="This is another document.",
metadata={"source": "2"},
),
Document(
page_content="This is a test document.",
metadata={"source": "1"},
),
]
assert await aindex(
docs, arecord_manager, upserting_vector_store, cleanup="full"
) == {
"num_added": 2,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 0,
}
assert await aindex(
docs, arecord_manager, upserting_vector_store, cleanup="full"
) == {
"num_added": 0,
"num_deleted": 0,
"num_skipped": 2,
"num_updated": 0,
}
assert await aindex(
docs,
arecord_manager,
upserting_vector_store,
cleanup="full",
force_update=True,
) == {
"num_added": 0,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 2,
}
def test_indexing_custom_batch_size(
record_manager: SQLRecordManager, vector_store: InMemoryVectorStore
) -> None:
"""Test indexing with a custom batch size."""
docs = [
Document(
page_content="This is a test document.",
metadata={"source": "1"},
),
]
ids = [_HashedDocument.from_document(doc).uid for doc in docs]
batch_size = 1
with patch.object(vector_store, "add_documents") as mock_add_documents:
index(docs, record_manager, vector_store, batch_size=batch_size)
args, kwargs = mock_add_documents.call_args
docs_with_id = [
Document(
page_content="This is a test document.",
metadata={"source": "1"},
id=ids[0],
)
]
assert args == (docs_with_id,)
assert kwargs == {"ids": ids, "batch_size": batch_size}
@pytest.mark.requires("aiosqlite")
async def test_aindexing_custom_batch_size(
arecord_manager: SQLRecordManager, vector_store: InMemoryVectorStore
) -> None:
"""Test indexing with a custom batch size."""
docs = [
Document(
page_content="This is a test document.",
metadata={"source": "1"},
),
]
ids = [_HashedDocument.from_document(doc).uid for doc in docs]
batch_size = 1
with patch.object(vector_store, "aadd_documents") as mock_add_documents:
await aindex(docs, arecord_manager, vector_store, batch_size=batch_size)
args, kwargs = mock_add_documents.call_args
docs_with_id = [
Document(
page_content="This is a test document.",
metadata={"source": "1"},
id=ids[0],
)
]
assert args == (docs_with_id,)
assert kwargs == {"ids": ids, "batch_size": batch_size}
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/load/test_imports.py | from langchain.load import __all__
EXPECTED_ALL = [
"dumpd",
"dumps",
"load",
"loads",
]
def test_all_imports() -> None:
assert set(__all__) == set(EXPECTED_ALL)
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/load/test_load.py | """Test for Serializable base class"""
import pytest
from langchain_core.load.dump import dumpd, dumps
from langchain_core.load.load import load, loads
from langchain_core.prompts.prompt import PromptTemplate
from langchain.chains.llm import LLMChain
pytest.importorskip("langchain_openai", reason="langchain_openai not installed")
pytest.importorskip("langchain_community", reason="langchain_community not installed")
from langchain_community.llms.openai import ( # noqa: E402 # ignore: community-import
OpenAI as CommunityOpenAI,
)
class NotSerializable:
pass
@pytest.mark.requires("openai", "langchain_openai")
def test_loads_openai_llm() -> None:
from langchain_openai import OpenAI
llm = CommunityOpenAI(
model="davinci", temperature=0.5, openai_api_key="hello", top_p=0.8
) # type: ignore[call-arg]
llm_string = dumps(llm)
llm2 = loads(llm_string, secrets_map={"OPENAI_API_KEY": "hello"})
assert llm2 == llm
llm_string_2 = dumps(llm2)
assert llm_string_2 == llm_string
assert isinstance(llm2, OpenAI)
@pytest.mark.requires("openai", "langchain_openai")
def test_loads_llmchain() -> None:
from langchain_openai import OpenAI
llm = CommunityOpenAI(
model="davinci", temperature=0.5, openai_api_key="hello", top_p=0.8
) # type: ignore[call-arg]
prompt = PromptTemplate.from_template("hello {name}!")
chain = LLMChain(llm=llm, prompt=prompt)
chain_string = dumps(chain)
chain2 = loads(chain_string, secrets_map={"OPENAI_API_KEY": "hello"})
assert chain2 == chain
assert dumps(chain2) == chain_string
assert isinstance(chain2, LLMChain)
assert isinstance(chain2.llm, OpenAI)
assert isinstance(chain2.prompt, PromptTemplate)
@pytest.mark.requires("openai", "langchain_openai")
def test_loads_llmchain_env() -> None:
import os
from langchain_openai import OpenAI
has_env = "OPENAI_API_KEY" in os.environ
if not has_env:
os.environ["OPENAI_API_KEY"] = "env_variable"
llm = OpenAI(model="davinci", temperature=0.5, top_p=0.8) # type: ignore[call-arg]
prompt = PromptTemplate.from_template("hello {name}!")
chain = LLMChain(llm=llm, prompt=prompt)
chain_string = dumps(chain)
chain2 = loads(chain_string)
assert chain2 == chain
assert dumps(chain2) == chain_string
assert isinstance(chain2, LLMChain)
assert isinstance(chain2.llm, OpenAI)
assert isinstance(chain2.prompt, PromptTemplate)
if not has_env:
del os.environ["OPENAI_API_KEY"]
@pytest.mark.requires("openai")
def test_loads_llmchain_with_non_serializable_arg() -> None:
llm = CommunityOpenAI( # type: ignore[call-arg]
model="davinci",
temperature=0.5,
openai_api_key="hello",
model_kwargs={"a": NotSerializable},
)
prompt = PromptTemplate.from_template("hello {name}!")
chain = LLMChain(llm=llm, prompt=prompt)
chain_string = dumps(chain, pretty=True)
with pytest.raises(NotImplementedError):
loads(chain_string, secrets_map={"OPENAI_API_KEY": "hello"})
@pytest.mark.requires("openai", "langchain_openai")
def test_load_openai_llm() -> None:
from langchain_openai import OpenAI
llm = CommunityOpenAI(model="davinci", temperature=0.5, openai_api_key="hello") # type: ignore[call-arg]
llm_obj = dumpd(llm)
llm2 = load(llm_obj, secrets_map={"OPENAI_API_KEY": "hello"})
assert llm2 == llm
assert dumpd(llm2) == llm_obj
assert isinstance(llm2, OpenAI)
@pytest.mark.requires("openai", "langchain_openai")
def test_load_llmchain() -> None:
from langchain_openai import OpenAI
llm = CommunityOpenAI(model="davinci", temperature=0.5, openai_api_key="hello") # type: ignore[call-arg]
prompt = PromptTemplate.from_template("hello {name}!")
chain = LLMChain(llm=llm, prompt=prompt)
chain_obj = dumpd(chain)
chain2 = load(chain_obj, secrets_map={"OPENAI_API_KEY": "hello"})
assert chain2 == chain
assert dumpd(chain2) == chain_obj
assert isinstance(chain2, LLMChain)
assert isinstance(chain2.llm, OpenAI)
assert isinstance(chain2.prompt, PromptTemplate)
@pytest.mark.requires("openai", "langchain_openai")
def test_load_llmchain_env() -> None:
import os
from langchain_openai import OpenAI
has_env = "OPENAI_API_KEY" in os.environ
if not has_env:
os.environ["OPENAI_API_KEY"] = "env_variable"
llm = CommunityOpenAI(model="davinci", temperature=0.5) # type: ignore[call-arg]
prompt = PromptTemplate.from_template("hello {name}!")
chain = LLMChain(llm=llm, prompt=prompt)
chain_obj = dumpd(chain)
chain2 = load(chain_obj)
assert chain2 == chain
assert dumpd(chain2) == chain_obj
assert isinstance(chain2, LLMChain)
assert isinstance(chain2.llm, OpenAI)
assert isinstance(chain2.prompt, PromptTemplate)
if not has_env:
del os.environ["OPENAI_API_KEY"]
@pytest.mark.requires("openai", "langchain_openai")
def test_load_llmchain_with_non_serializable_arg() -> None:
import httpx
from langchain_openai import OpenAI
llm = OpenAI( # type: ignore[call-arg]
model="davinci",
temperature=0.5,
openai_api_key="hello",
http_client=httpx.Client(),
)
prompt = PromptTemplate.from_template("hello {name}!")
chain = LLMChain(llm=llm, prompt=prompt)
chain_obj = dumpd(chain)
with pytest.raises(NotImplementedError):
load(chain_obj, secrets_map={"OPENAI_API_KEY": "hello"})
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/load/test_dump.py | """Test for Serializable base class"""
import json
import os
from typing import Any, Dict, List
from unittest.mock import patch
import pytest
from langchain_core.load.dump import dumps
from langchain_core.load.serializable import Serializable
from pydantic import ConfigDict, Field, model_validator
class Person(Serializable):
secret: str
you_can_see_me: str = "hello"
@classmethod
def is_lc_serializable(cls) -> bool:
return True
@property
def lc_secrets(self) -> Dict[str, str]:
return {"secret": "SECRET"}
@property
def lc_attributes(self) -> Dict[str, str]:
return {"you_can_see_me": self.you_can_see_me}
class SpecialPerson(Person):
another_secret: str
another_visible: str = "bye"
@classmethod
def get_lc_namespace(cls) -> List[str]:
return ["my", "special", "namespace"]
# Gets merged with parent class's secrets
@property
def lc_secrets(self) -> Dict[str, str]:
return {"another_secret": "ANOTHER_SECRET"}
# Gets merged with parent class's attributes
@property
def lc_attributes(self) -> Dict[str, str]:
return {"another_visible": self.another_visible}
class NotSerializable:
pass
def test_person(snapshot: Any) -> None:
p = Person(secret="hello")
assert dumps(p, pretty=True) == snapshot
sp = SpecialPerson(another_secret="Wooo", secret="Hmm")
assert dumps(sp, pretty=True) == snapshot
assert Person.lc_id() == ["tests", "unit_tests", "load", "test_dump", "Person"]
assert SpecialPerson.lc_id() == ["my", "special", "namespace", "SpecialPerson"]
def test_typeerror() -> None:
assert (
dumps({(1, 2): 3})
== """{"lc": 1, "type": "not_implemented", "id": ["builtins", "dict"], "repr": "{(1, 2): 3}"}""" # noqa: E501
)
def test_person_with_kwargs(snapshot: Any) -> None:
person = Person(secret="hello")
assert dumps(person, separators=(",", ":")) == snapshot
def test_person_with_invalid_kwargs() -> None:
person = Person(secret="hello")
with pytest.raises(TypeError):
dumps(person, invalid_kwarg="hello")
class TestClass(Serializable):
my_favorite_secret: str = Field(alias="my_favorite_secret_alias")
my_other_secret: str = Field()
model_config = ConfigDict(
populate_by_name=True,
)
@model_validator(mode="before")
@classmethod
def get_from_env(cls, values: Dict) -> Any:
"""Get the values from the environment."""
if "my_favorite_secret" not in values:
values["my_favorite_secret"] = os.getenv("MY_FAVORITE_SECRET")
if "my_other_secret" not in values:
values["my_other_secret"] = os.getenv("MY_OTHER_SECRET")
return values
@classmethod
def is_lc_serializable(cls) -> bool:
return True
@classmethod
def get_lc_namespace(cls) -> List[str]:
return ["my", "special", "namespace"]
@property
def lc_secrets(self) -> Dict[str, str]:
return {
"my_favorite_secret": "MY_FAVORITE_SECRET",
"my_other_secret": "MY_OTHER_SECRET",
}
def test_aliases_hidden() -> None:
test_class = TestClass(my_favorite_secret="hello", my_other_secret="world") # type: ignore[call-arg]
dumped = json.loads(dumps(test_class, pretty=True))
expected_dump = {
"lc": 1,
"type": "constructor",
"id": ["my", "special", "namespace", "TestClass"],
"kwargs": {
"my_favorite_secret": {
"lc": 1,
"type": "secret",
"id": ["MY_FAVORITE_SECRET"],
},
"my_other_secret": {"lc": 1, "type": "secret", "id": ["MY_OTHER_SECRET"]},
},
}
assert dumped == expected_dump
# Check while patching the os environment
with patch.dict(
os.environ, {"MY_FAVORITE_SECRET": "hello", "MY_OTHER_SECRET": "world"}
):
test_class = TestClass() # type: ignore[call-arg]
dumped = json.loads(dumps(test_class, pretty=True))
# Check by alias
test_class = TestClass(my_favorite_secret_alias="hello", my_other_secret="world") # type: ignore[call-arg]
dumped = json.loads(dumps(test_class, pretty=True))
expected_dump = {
"lc": 1,
"type": "constructor",
"id": ["my", "special", "namespace", "TestClass"],
"kwargs": {
"my_favorite_secret": {
"lc": 1,
"type": "secret",
"id": ["MY_FAVORITE_SECRET"],
},
"my_other_secret": {"lc": 1, "type": "secret", "id": ["MY_OTHER_SECRET"]},
},
}
assert dumped == expected_dump
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests/load | lc_public_repos/langchain/libs/langchain/tests/unit_tests/load/__snapshots__/test_dump.ambr | # serializer version: 1
# name: test_person
'''
{
"lc": 1,
"type": "constructor",
"id": [
"tests",
"unit_tests",
"load",
"test_dump",
"Person"
],
"kwargs": {
"secret": {
"lc": 1,
"type": "secret",
"id": [
"SECRET"
]
},
"you_can_see_me": "hello"
}
}
'''
# ---
# name: test_person.1
'''
{
"lc": 1,
"type": "constructor",
"id": [
"my",
"special",
"namespace",
"SpecialPerson"
],
"kwargs": {
"secret": {
"lc": 1,
"type": "secret",
"id": [
"SECRET"
]
},
"you_can_see_me": "hello",
"another_secret": {
"lc": 1,
"type": "secret",
"id": [
"ANOTHER_SECRET"
]
},
"another_visible": "bye"
}
}
'''
# ---
# name: test_person_with_kwargs
'{"lc":1,"type":"constructor","id":["tests","unit_tests","load","test_dump","Person"],"kwargs":{"secret":{"lc":1,"type":"secret","id":["SECRET"]},"you_can_see_me":"hello"}}'
# ---
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/schema/test_language_model.py | from langchain.schema.language_model import __all__
EXPECTED_ALL = [
"BaseLanguageModel",
"_get_token_ids_default_method",
"get_tokenizer",
"LanguageModelOutput",
"LanguageModelInput",
]
def test_all_imports() -> None:
assert set(__all__) == set(EXPECTED_ALL)
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/schema/test_storage.py | from langchain.schema.storage import __all__
EXPECTED_ALL = ["BaseStore", "K", "V"]
def test_all_imports() -> None:
assert set(__all__) == set(EXPECTED_ALL)
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/schema/test_output.py | from langchain.schema.output import __all__
EXPECTED_ALL = [
"ChatGeneration",
"ChatGenerationChunk",
"ChatResult",
"Generation",
"GenerationChunk",
"LLMResult",
"RunInfo",
]
def test_all_imports() -> None:
assert set(__all__) == set(EXPECTED_ALL)
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/schema/test_retriever.py | from langchain.schema.retriever import __all__
EXPECTED_ALL = ["BaseRetriever"]
def test_all_imports() -> None:
assert set(__all__) == set(EXPECTED_ALL)
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/schema/test_chat.py | from langchain.schema.chat import __all__
EXPECTED_ALL = ["ChatSession"]
def test_all_imports() -> None:
assert set(__all__) == set(EXPECTED_ALL)
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/schema/test_cache.py | from langchain.schema.cache import __all__
EXPECTED_ALL = ["BaseCache", "RETURN_VAL_TYPE"]
def test_all_imports() -> None:
assert set(__all__) == set(EXPECTED_ALL)
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/schema/test_imports.py | from langchain.schema import __all__
EXPECTED_ALL = [
"BaseCache",
"BaseMemory",
"BaseStore",
"AgentFinish",
"AgentAction",
"Document",
"BaseChatMessageHistory",
"BaseDocumentTransformer",
"BaseMessage",
"ChatMessage",
"FunctionMessage",
"HumanMessage",
"AIMessage",
"SystemMessage",
"messages_from_dict",
"messages_to_dict",
"message_to_dict",
"_message_to_dict",
"_message_from_dict",
"get_buffer_string",
"RunInfo",
"LLMResult",
"ChatResult",
"ChatGeneration",
"Generation",
"PromptValue",
"LangChainException",
"BaseRetriever",
"RUN_KEY",
"Memory",
"OutputParserException",
"StrOutputParser",
"BaseOutputParser",
"BaseLLMOutputParser",
"BasePromptTemplate",
"format_document",
]
def test_all_imports() -> None:
assert set(__all__) == set(EXPECTED_ALL)
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/schema/test_embeddings.py | from langchain.schema.embeddings import __all__
EXPECTED_ALL = ["Embeddings"]
def test_all_imports() -> None:
assert set(__all__) == set(EXPECTED_ALL)
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/schema/test_exceptions.py | from langchain.schema.exceptions import __all__
EXPECTED_ALL = ["LangChainException"]
def test_all_imports() -> None:
assert set(__all__) == set(EXPECTED_ALL)
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/schema/test_messages.py | from langchain.schema.messages import __all__
EXPECTED_ALL = [
"AIMessage",
"AIMessageChunk",
"BaseMessage",
"BaseMessageChunk",
"ChatMessage",
"ChatMessageChunk",
"FunctionMessage",
"FunctionMessageChunk",
"HumanMessage",
"HumanMessageChunk",
"SystemMessage",
"SystemMessageChunk",
"ToolMessage",
"ToolMessageChunk",
"_message_from_dict",
"_message_to_dict",
"message_to_dict",
"get_buffer_string",
"merge_content",
"messages_from_dict",
"messages_to_dict",
"AnyMessage",
]
def test_all_imports() -> None:
assert set(__all__) == set(EXPECTED_ALL)
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/schema/test_document.py | from langchain.schema.document import __all__
EXPECTED_ALL = ["BaseDocumentTransformer", "Document"]
def test_all_imports() -> None:
assert set(__all__) == set(EXPECTED_ALL)
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/schema/test_output_parser.py | from langchain.schema.output_parser import __all__
EXPECTED_ALL = [
"BaseCumulativeTransformOutputParser",
"BaseGenerationOutputParser",
"BaseLLMOutputParser",
"BaseOutputParser",
"BaseTransformOutputParser",
"NoOpOutputParser",
"OutputParserException",
"StrOutputParser",
"T",
]
def test_all_imports() -> None:
assert set(__all__) == set(EXPECTED_ALL)
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/schema/test_prompt.py | from langchain.schema.prompt import __all__
EXPECTED_ALL = ["PromptValue"]
def test_all_imports() -> None:
assert set(__all__) == set(EXPECTED_ALL)
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/schema/test_agent.py | from langchain.schema.agent import __all__
EXPECTED_ALL = ["AgentAction", "AgentActionMessageLog", "AgentFinish"]
def test_all_imports() -> None:
assert set(__all__) == set(EXPECTED_ALL)
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/schema/test_chat_history.py | from langchain.schema.chat_history import __all__
EXPECTED_ALL = ["BaseChatMessageHistory"]
def test_all_imports() -> None:
assert set(__all__) == set(EXPECTED_ALL)
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/schema/test_prompt_template.py | from langchain.schema.prompt_template import __all__
EXPECTED_ALL = ["BasePromptTemplate", "format_document"]
def test_all_imports() -> None:
assert set(__all__) == set(EXPECTED_ALL)
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/schema/test_vectorstore.py | from langchain.schema.vectorstore import __all__
EXPECTED_ALL = ["VectorStore", "VectorStoreRetriever", "VST"]
def test_all_imports() -> None:
assert set(__all__) == set(EXPECTED_ALL)
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/schema/test_memory.py | from langchain.schema.memory import __all__
EXPECTED_ALL = ["BaseMemory"]
def test_all_imports() -> None:
assert set(__all__) == set(EXPECTED_ALL)
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests/schema | lc_public_repos/langchain/libs/langchain/tests/unit_tests/schema/runnable/test_passthrough.py | from langchain.schema.runnable.passthrough import __all__
EXPECTED_ALL = ["RunnableAssign", "RunnablePassthrough", "aidentity", "identity"]
def test_all_imports() -> None:
assert set(__all__) == set(EXPECTED_ALL)
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests/schema | lc_public_repos/langchain/libs/langchain/tests/unit_tests/schema/runnable/test_configurable.py | from langchain.schema.runnable.configurable import __all__
EXPECTED_ALL = [
"DynamicRunnable",
"RunnableConfigurableAlternatives",
"RunnableConfigurableFields",
"StrEnum",
"make_options_spec",
]
def test_all_imports() -> None:
assert set(__all__) == set(EXPECTED_ALL)
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests/schema | lc_public_repos/langchain/libs/langchain/tests/unit_tests/schema/runnable/test_base.py | from langchain.schema.runnable.base import __all__
EXPECTED_ALL = [
"Runnable",
"RunnableBinding",
"RunnableBindingBase",
"RunnableEach",
"RunnableEachBase",
"RunnableGenerator",
"RunnableLambda",
"RunnableMap",
"RunnableParallel",
"RunnableSequence",
"RunnableSerializable",
"coerce_to_runnable",
"Input",
"Output",
"Other",
"RunnableLike",
]
def test_all_imports() -> None:
assert set(__all__) == set(EXPECTED_ALL)
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests/schema | lc_public_repos/langchain/libs/langchain/tests/unit_tests/schema/runnable/test_imports.py | from langchain.schema.runnable import __all__
EXPECTED_ALL = [
"ConfigurableField",
"ConfigurableFieldSingleOption",
"ConfigurableFieldMultiOption",
"patch_config",
"RouterInput",
"RouterRunnable",
"Runnable",
"RunnableSerializable",
"RunnableBinding",
"RunnableBranch",
"RunnableConfig",
"RunnableGenerator",
"RunnableLambda",
"RunnableMap",
"RunnableParallel",
"RunnablePassthrough",
"RunnableSequence",
"RunnableWithFallbacks",
]
def test_all_imports() -> None:
assert set(__all__) == set(EXPECTED_ALL)
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests/schema | lc_public_repos/langchain/libs/langchain/tests/unit_tests/schema/runnable/test_history.py | from langchain.schema.runnable.history import __all__
EXPECTED_ALL = [
"RunnableWithMessageHistory",
"GetSessionHistoryCallable",
"MessagesOrDictWithMessages",
]
def test_all_imports() -> None:
assert set(__all__) == set(EXPECTED_ALL)
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests/schema | lc_public_repos/langchain/libs/langchain/tests/unit_tests/schema/runnable/test_fallbacks.py | from langchain.schema.runnable.fallbacks import __all__
EXPECTED_ALL = ["RunnableWithFallbacks"]
def test_all_imports() -> None:
assert set(__all__) == set(EXPECTED_ALL)
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests/schema | lc_public_repos/langchain/libs/langchain/tests/unit_tests/schema/runnable/test_router.py | from langchain.schema.runnable.router import __all__
EXPECTED_ALL = ["RouterInput", "RouterRunnable"]
def test_all_imports() -> None:
assert set(__all__) == set(EXPECTED_ALL)
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests/schema | lc_public_repos/langchain/libs/langchain/tests/unit_tests/schema/runnable/test_branch.py | from langchain.schema.runnable.branch import __all__
EXPECTED_ALL = ["RunnableBranch"]
def test_all_imports() -> None:
assert set(__all__) == set(EXPECTED_ALL)
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests/schema | lc_public_repos/langchain/libs/langchain/tests/unit_tests/schema/runnable/test_utils.py | from langchain.schema.runnable.utils import __all__
EXPECTED_ALL = [
"AddableDict",
"ConfigurableField",
"ConfigurableFieldMultiOption",
"ConfigurableFieldSingleOption",
"ConfigurableFieldSpec",
"GetLambdaSource",
"IsFunctionArgDict",
"IsLocalDict",
"SupportsAdd",
"aadd",
"accepts_config",
"accepts_run_manager",
"add",
"gated_coro",
"gather_with_concurrency",
"get_function_first_arg_dict_keys",
"get_lambda_source",
"get_unique_config_specs",
"indent_lines_after_first",
"Input",
"Output",
"Addable",
"AnyConfigurableField",
]
def test_all_imports() -> None:
assert set(__all__) == set(EXPECTED_ALL)
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests/schema | lc_public_repos/langchain/libs/langchain/tests/unit_tests/schema/runnable/test_retry.py | from langchain.schema.runnable.retry import __all__
EXPECTED_ALL = ["RunnableRetry", "U"]
def test_all_imports() -> None:
assert set(__all__) == set(EXPECTED_ALL)
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests/schema | lc_public_repos/langchain/libs/langchain/tests/unit_tests/schema/runnable/test_config.py | from langchain.schema.runnable.config import __all__
EXPECTED_ALL = [
"EmptyDict",
"RunnableConfig",
"acall_func_with_variable_args",
"call_func_with_variable_args",
"ensure_config",
"get_async_callback_manager_for_config",
"get_callback_manager_for_config",
"get_config_list",
"get_executor_for_config",
"merge_configs",
"patch_config",
]
def test_all_imports() -> None:
assert set(__all__) == set(EXPECTED_ALL)
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/_api/test_importing.py | from langchain._api.module_import import create_importer
def test_import_from_non_deprecated_path() -> None:
"""Test importing all modules in langchain."""
module_lookup = {
"Document": "langchain_core.documents",
}
lookup = create_importer(__package__, module_lookup=module_lookup)
imported_doc = lookup("Document")
from langchain_core.documents import Document
assert imported_doc is Document
def test_import_from_deprecated_path() -> None:
"""Test importing all modules in langchain."""
module_lookup = {
"Document": "langchain_core.documents",
}
lookup = create_importer(__package__, deprecated_lookups=module_lookup)
imported_doc = lookup("Document")
from langchain_core.documents import Document
assert imported_doc is Document
def test_import_using_fallback_module() -> None:
"""Test import using fallback module."""
lookup = create_importer(__package__, fallback_module="langchain_core.documents")
imported_doc = lookup("Document")
from langchain_core.documents import Document
assert imported_doc is Document
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/prompts/test_few_shot.py | from langchain.prompts.few_shot import __all__
EXPECTED_ALL = [
"FewShotChatMessagePromptTemplate",
"FewShotPromptTemplate",
"_FewShotPromptTemplateMixin",
]
def test_all_imports() -> None:
assert set(__all__) == set(EXPECTED_ALL)
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/prompts/test_chat.py | from langchain.prompts.chat import __all__
EXPECTED_ALL = [
"MessageLike",
"MessageLikeRepresentation",
"MessagePromptTemplateT",
"AIMessagePromptTemplate",
"BaseChatPromptTemplate",
"BaseMessagePromptTemplate",
"BaseStringMessagePromptTemplate",
"ChatMessagePromptTemplate",
"ChatPromptTemplate",
"ChatPromptValue",
"ChatPromptValueConcrete",
"HumanMessagePromptTemplate",
"MessagesPlaceholder",
"SystemMessagePromptTemplate",
"_convert_to_message",
"_create_template_from_message_type",
]
def test_all_imports() -> None:
assert set(__all__) == set(EXPECTED_ALL)
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/prompts/test_base.py | from langchain.prompts.base import __all__
EXPECTED_ALL = [
"BasePromptTemplate",
"StringPromptTemplate",
"StringPromptValue",
"_get_jinja2_variables_from_template",
"check_valid_template",
"get_template_variables",
"jinja2_formatter",
"validate_jinja2",
]
def test_all_imports() -> None:
assert set(__all__) == set(EXPECTED_ALL)
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/prompts/test_imports.py | from langchain import prompts
EXPECTED_ALL = [
"AIMessagePromptTemplate",
"BaseChatPromptTemplate",
"BasePromptTemplate",
"ChatMessagePromptTemplate",
"ChatPromptTemplate",
"FewShotPromptTemplate",
"FewShotPromptWithTemplates",
"HumanMessagePromptTemplate",
"LengthBasedExampleSelector",
"MaxMarginalRelevanceExampleSelector",
"MessagesPlaceholder",
"NGramOverlapExampleSelector",
"PipelinePromptTemplate",
"Prompt",
"PromptTemplate",
"SemanticSimilarityExampleSelector",
"StringPromptTemplate",
"SystemMessagePromptTemplate",
"load_prompt",
"FewShotChatMessagePromptTemplate",
]
def test_all_imports() -> None:
assert set(prompts.__all__) == set(EXPECTED_ALL)
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/prompts/test_prompt.py | from langchain.prompts.prompt import __all__
EXPECTED_ALL = ["Prompt", "PromptTemplate"]
def test_all_imports() -> None:
assert set(__all__) == set(EXPECTED_ALL)
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/prompts/test_loading.py | from langchain.prompts.loading import __all__
EXPECTED_ALL = [
"_load_examples",
"_load_few_shot_prompt",
"_load_output_parser",
"_load_prompt",
"_load_prompt_from_file",
"_load_template",
"load_prompt",
"load_prompt_from_config",
"try_load_from_hub",
]
def test_all_imports() -> None:
assert set(__all__) == set(EXPECTED_ALL)
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/prompts/__init__.py | """Test prompt functionality."""
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/prompts/test_few_shot_with_templates.py | from langchain.prompts.few_shot_with_templates import __all__
EXPECTED_ALL = ["FewShotPromptWithTemplates"]
def test_all_imports() -> None:
assert set(__all__) == set(EXPECTED_ALL)
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/prompts/test_pipeline.py | from langchain.prompts.pipeline import __all__
EXPECTED_ALL = ["PipelinePromptTemplate", "_get_inputs"]
def test_all_imports() -> None:
assert set(__all__) == set(EXPECTED_ALL)
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/llms/test_base.py | """Test base LLM functionality."""
from langchain_core.caches import InMemoryCache
from langchain_core.outputs import Generation, LLMResult
from langchain.globals import get_llm_cache, set_llm_cache
from langchain.llms.base import __all__
from tests.unit_tests.llms.fake_llm import FakeLLM
EXPECTED_ALL = [
"BaseLLM",
"LLM",
"BaseLanguageModel",
]
def test_all_imports() -> None:
assert set(__all__) == set(EXPECTED_ALL)
def test_caching() -> None:
"""Test caching behavior."""
set_llm_cache(InMemoryCache())
llm = FakeLLM()
params = llm.dict()
params["stop"] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
get_llm_cache().update("foo", llm_string, [Generation(text="fizz")])
output = llm.generate(["foo", "bar", "foo"])
expected_cache_output = [Generation(text="foo")]
cache_output = get_llm_cache().lookup("bar", llm_string)
assert cache_output == expected_cache_output
set_llm_cache(None)
expected_generations = [
[Generation(text="fizz")],
[Generation(text="foo")],
[Generation(text="fizz")],
]
expected_output = LLMResult(
generations=expected_generations,
llm_output=None,
)
assert output == expected_output
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/llms/test_imports.py | from langchain import llms
EXPECT_ALL = [
"AI21",
"AlephAlpha",
"AmazonAPIGateway",
"Anthropic",
"Anyscale",
"Arcee",
"Aviary",
"AzureMLOnlineEndpoint",
"AzureOpenAI",
"Banana",
"Baseten",
"Beam",
"Bedrock",
"CTransformers",
"CTranslate2",
"CerebriumAI",
"ChatGLM",
"Clarifai",
"Cohere",
"Databricks",
"DeepInfra",
"DeepSparse",
"EdenAI",
"FakeListLLM",
"Fireworks",
"ForefrontAI",
"GigaChat",
"GPT4All",
"GooglePalm",
"GooseAI",
"GradientLLM",
"HuggingFaceEndpoint",
"HuggingFaceHub",
"HuggingFacePipeline",
"HuggingFaceTextGenInference",
"HumanInputLLM",
"KoboldApiLLM",
"LlamaCpp",
"TextGen",
"ManifestWrapper",
"Minimax",
"MlflowAIGateway",
"Modal",
"MosaicML",
"Nebula",
"NIBittensorLLM",
"NLPCloud",
"Ollama",
"OpenAI",
"OpenAIChat",
"OpenLLM",
"OpenLM",
"PaiEasEndpoint",
"Petals",
"PipelineAI",
"Predibase",
"PredictionGuard",
"PromptLayerOpenAI",
"PromptLayerOpenAIChat",
"OpaquePrompts",
"RWKV",
"Replicate",
"SagemakerEndpoint",
"SelfHostedHuggingFaceLLM",
"SelfHostedPipeline",
"StochasticAI",
"TitanTakeoff",
"TitanTakeoffPro",
"Tongyi",
"VertexAI",
"VertexAIModelGarden",
"VLLM",
"VLLMOpenAI",
"Writer",
"OctoAIEndpoint",
"Xinference",
"JavelinAIGateway",
"QianfanLLMEndpoint",
"YandexGPT",
"VolcEngineMaasLLM",
"WatsonxLLM",
]
def test_all_imports() -> None:
"""Simple test to make sure all things can be imported."""
assert set(llms.__all__) == set(EXPECT_ALL)
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/llms/fake_llm.py | """Fake LLM wrapper for testing purposes."""
from typing import Any, Dict, List, Mapping, Optional, cast
from langchain_core.callbacks.manager import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from pydantic import model_validator
class FakeLLM(LLM):
"""Fake LLM wrapper for testing purposes."""
queries: Optional[Mapping] = None
sequential_responses: Optional[bool] = False
response_index: int = 0
@model_validator(mode="before")
@classmethod
def check_queries_required(cls, values: dict) -> dict:
if values.get("sequential_response") and not values.get("queries"):
raise ValueError(
"queries is required when sequential_response is set to True"
)
return values
def get_num_tokens(self, text: str) -> int:
"""Return number of tokens."""
return len(text.split())
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "fake"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
if self.sequential_responses:
return self._get_next_response_in_sequence
if self.queries is not None:
return self.queries[prompt]
if stop is None:
return "foo"
else:
return "bar"
@property
def _identifying_params(self) -> Dict[str, Any]:
return {}
@property
def _get_next_response_in_sequence(self) -> str:
queries = cast(Mapping, self.queries)
response = queries[list(queries.keys())[self.response_index]]
self.response_index = self.response_index + 1
return response
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/llms/fake_chat_model.py | """Fake Chat Model wrapper for testing purposes."""
import re
from typing import Any, AsyncIterator, Dict, Iterator, List, Optional, cast
from langchain_core.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models.chat_models import BaseChatModel, SimpleChatModel
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
)
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from langchain_core.runnables import run_in_executor
class FakeChatModel(SimpleChatModel):
"""Fake Chat Model wrapper for testing purposes."""
def _call(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
return "fake response"
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
output_str = "fake response"
message = AIMessage(content=output_str)
generation = ChatGeneration(message=message)
return ChatResult(generations=[generation])
@property
def _llm_type(self) -> str:
return "fake-chat-model"
@property
def _identifying_params(self) -> Dict[str, Any]:
return {"key": "fake"}
class GenericFakeChatModel(BaseChatModel):
"""A generic fake chat model that can be used to test the chat model interface.
* Chat model should be usable in both sync and async tests
* Invokes on_llm_new_token to allow for testing of callback related code for new
tokens.
* Includes logic to break messages into message chunk to facilitate testing of
streaming.
"""
messages: Iterator[AIMessage]
"""Get an iterator over messages.
This can be expanded to accept other types like Callables / dicts / strings
to make the interface more generic if needed.
Note: if you want to pass a list, you can use `iter` to convert it to an iterator.
Please note that streaming is not implemented yet. We should try to implement it
in the future by delegating to invoke and then breaking the resulting output
into message chunks.
"""
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
"""Top Level call"""
message = next(self.messages)
generation = ChatGeneration(message=message)
return ChatResult(generations=[generation])
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
"""Stream the output of the model."""
chat_result = self._generate(
messages, stop=stop, run_manager=run_manager, **kwargs
)
if not isinstance(chat_result, ChatResult):
raise ValueError(
f"Expected generate to return a ChatResult, "
f"but got {type(chat_result)} instead."
)
message = chat_result.generations[0].message
if not isinstance(message, AIMessage):
raise ValueError(
f"Expected invoke to return an AIMessage, "
f"but got {type(message)} instead."
)
content = message.content
if content:
# Use a regular expression to split on whitespace with a capture group
# so that we can preserve the whitespace in the output.
assert isinstance(content, str)
content_chunks = cast(List[str], re.split(r"(\s)", content))
for token in content_chunks:
chunk = ChatGenerationChunk(
message=AIMessageChunk(id=message.id, content=token)
)
if run_manager:
run_manager.on_llm_new_token(token, chunk=chunk)
yield chunk
if message.additional_kwargs:
for key, value in message.additional_kwargs.items():
# We should further break down the additional kwargs into chunks
# Special case for function call
if key == "function_call":
for fkey, fvalue in value.items():
if isinstance(fvalue, str):
# Break function call by `,`
fvalue_chunks = cast(List[str], re.split(r"(,)", fvalue))
for fvalue_chunk in fvalue_chunks:
chunk = ChatGenerationChunk(
message=AIMessageChunk(
id=message.id,
content="",
additional_kwargs={
"function_call": {fkey: fvalue_chunk}
},
)
)
if run_manager:
run_manager.on_llm_new_token(
"",
chunk=chunk, # No token for function call
)
yield chunk
else:
chunk = ChatGenerationChunk(
message=AIMessageChunk(
id=message.id,
content="",
additional_kwargs={"function_call": {fkey: fvalue}},
)
)
if run_manager:
run_manager.on_llm_new_token(
"",
chunk=chunk, # No token for function call
)
yield chunk
else:
chunk = ChatGenerationChunk(
message=AIMessageChunk(
id=message.id, content="", additional_kwargs={key: value}
)
)
if run_manager:
run_manager.on_llm_new_token(
"",
chunk=chunk, # No token for function call
)
yield chunk
async def _astream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[ChatGenerationChunk]:
"""Stream the output of the model."""
result = await run_in_executor(
None,
self._stream,
messages,
stop=stop,
run_manager=run_manager.get_sync() if run_manager else None,
**kwargs,
)
for chunk in result:
yield chunk
@property
def _llm_type(self) -> str:
return "generic-fake-chat-model"
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/llms/test_fake_chat_model.py | """Tests for verifying that testing utility code works as expected."""
from itertools import cycle
from typing import Any, Dict, List, Optional, Union
from uuid import UUID
from langchain_core.callbacks.base import AsyncCallbackHandler
from langchain_core.messages import AIMessage, AIMessageChunk, BaseMessage
from langchain_core.outputs import ChatGenerationChunk, GenerationChunk
from tests.unit_tests.llms.fake_chat_model import GenericFakeChatModel
from tests.unit_tests.stubs import _AnyIdAIMessage, _AnyIdAIMessageChunk
def test_generic_fake_chat_model_invoke() -> None:
# Will alternate between responding with hello and goodbye
infinite_cycle = cycle([AIMessage(content="hello"), AIMessage(content="goodbye")])
model = GenericFakeChatModel(messages=infinite_cycle)
response = model.invoke("meow")
assert response == _AnyIdAIMessage(content="hello")
response = model.invoke("kitty")
assert response == _AnyIdAIMessage(content="goodbye")
response = model.invoke("meow")
assert response == _AnyIdAIMessage(content="hello")
async def test_generic_fake_chat_model_ainvoke() -> None:
# Will alternate between responding with hello and goodbye
infinite_cycle = cycle([AIMessage(content="hello"), AIMessage(content="goodbye")])
model = GenericFakeChatModel(messages=infinite_cycle)
response = await model.ainvoke("meow")
assert response == _AnyIdAIMessage(content="hello")
response = await model.ainvoke("kitty")
assert response == _AnyIdAIMessage(content="goodbye")
response = await model.ainvoke("meow")
assert response == _AnyIdAIMessage(content="hello")
async def test_generic_fake_chat_model_stream() -> None:
"""Test streaming."""
infinite_cycle = cycle(
[
AIMessage(content="hello goodbye"),
]
)
model = GenericFakeChatModel(messages=infinite_cycle)
chunks = [chunk async for chunk in model.astream("meow")]
assert chunks == [
_AnyIdAIMessageChunk(content="hello"),
_AnyIdAIMessageChunk(content=" "),
_AnyIdAIMessageChunk(content="goodbye"),
]
chunks = [chunk for chunk in model.stream("meow")]
assert chunks == [
_AnyIdAIMessageChunk(content="hello"),
_AnyIdAIMessageChunk(content=" "),
_AnyIdAIMessageChunk(content="goodbye"),
]
# Test streaming of additional kwargs.
# Relying on insertion order of the additional kwargs dict
message = AIMessage(content="", additional_kwargs={"foo": 42, "bar": 24})
model = GenericFakeChatModel(messages=cycle([message]))
chunks = [chunk async for chunk in model.astream("meow")]
assert chunks == [
_AnyIdAIMessageChunk(content="", additional_kwargs={"foo": 42}),
_AnyIdAIMessageChunk(content="", additional_kwargs={"bar": 24}),
]
message = AIMessage(
id="a1",
content="",
additional_kwargs={
"function_call": {
"name": "move_file",
"arguments": '{\n "source_path": "foo",\n "'
'destination_path": "bar"\n}',
}
},
)
model = GenericFakeChatModel(messages=cycle([message]))
chunks = [chunk async for chunk in model.astream("meow")]
assert chunks == [
AIMessageChunk(
content="",
additional_kwargs={"function_call": {"name": "move_file"}},
id="a1",
),
AIMessageChunk(
id="a1",
content="",
additional_kwargs={
"function_call": {"arguments": '{\n "source_path": "foo"'}
},
),
AIMessageChunk(
id="a1", content="", additional_kwargs={"function_call": {"arguments": ","}}
),
AIMessageChunk(
id="a1",
content="",
additional_kwargs={
"function_call": {"arguments": '\n "destination_path": "bar"\n}'}
},
),
]
accumulate_chunks = None
for chunk in chunks:
if accumulate_chunks is None:
accumulate_chunks = chunk
else:
accumulate_chunks += chunk
assert accumulate_chunks == AIMessageChunk(
id="a1",
content="",
additional_kwargs={
"function_call": {
"name": "move_file",
"arguments": '{\n "source_path": "foo",\n "'
'destination_path": "bar"\n}',
}
},
)
async def test_generic_fake_chat_model_astream_log() -> None:
"""Test streaming."""
infinite_cycle = cycle([AIMessage(content="hello goodbye")])
model = GenericFakeChatModel(messages=infinite_cycle)
log_patches = [
log_patch async for log_patch in model.astream_log("meow", diff=False)
]
final = log_patches[-1]
assert final.state["streamed_output"] == [
_AnyIdAIMessageChunk(content="hello"),
_AnyIdAIMessageChunk(content=" "),
_AnyIdAIMessageChunk(content="goodbye"),
]
async def test_callback_handlers() -> None:
"""Verify that model is implemented correctly with handlers working."""
class MyCustomAsyncHandler(AsyncCallbackHandler):
def __init__(self, store: List[str]) -> None:
self.store = store
async def on_chat_model_start(
self,
serialized: Dict[str, Any],
messages: List[List[BaseMessage]],
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
tags: Optional[List[str]] = None,
metadata: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> Any:
# Do nothing
# Required to implement since this is an abstract method
pass
async def on_llm_new_token(
self,
token: str,
*,
chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]] = None,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
tags: Optional[List[str]] = None,
**kwargs: Any,
) -> None:
self.store.append(token)
infinite_cycle = cycle(
[
AIMessage(content="hello goodbye"),
]
)
model = GenericFakeChatModel(messages=infinite_cycle)
tokens: List[str] = []
# New model
results = list(model.stream("meow", {"callbacks": [MyCustomAsyncHandler(tokens)]}))
assert results == [
_AnyIdAIMessageChunk(content="hello"),
_AnyIdAIMessageChunk(content=" "),
_AnyIdAIMessageChunk(content="goodbye"),
]
assert tokens == ["hello", " ", "goodbye"]
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/tools/test_base.py | from langchain.tools.base import __all__
EXPECTED_ALL = [
"BaseTool",
"SchemaAnnotationError",
"StructuredTool",
"Tool",
"ToolException",
"create_schema_from_function",
"tool",
]
def test_all_imports() -> None:
assert set(__all__) == set(EXPECTED_ALL)
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/tools/test_imports.py | from langchain import tools
EXPECTED_ALL = [
"AINAppOps",
"AINOwnerOps",
"AINRuleOps",
"AINTransfer",
"AINValueOps",
"AIPluginTool",
"APIOperation",
"ArxivQueryRun",
"AzureCogsFormRecognizerTool",
"AzureCogsImageAnalysisTool",
"AzureCogsSpeech2TextTool",
"AzureCogsText2SpeechTool",
"AzureCogsTextAnalyticsHealthTool",
"BaseGraphQLTool",
"BaseRequestsTool",
"BaseSQLDatabaseTool",
"BaseSparkSQLTool",
"BaseTool",
"BearlyInterpreterTool",
"BingSearchResults",
"BingSearchRun",
"BraveSearch",
"ClickTool",
"CopyFileTool",
"CurrentWebPageTool",
"DeleteFileTool",
"DuckDuckGoSearchResults",
"DuckDuckGoSearchRun",
"E2BDataAnalysisTool",
"EdenAiExplicitImageTool",
"EdenAiObjectDetectionTool",
"EdenAiParsingIDTool",
"EdenAiParsingInvoiceTool",
"EdenAiSpeechToTextTool",
"EdenAiTextModerationTool",
"EdenAiTextToSpeechTool",
"EdenaiTool",
"ElevenLabsText2SpeechTool",
"ExtractHyperlinksTool",
"ExtractTextTool",
"FileSearchTool",
"GetElementsTool",
"GmailCreateDraft",
"GmailGetMessage",
"GmailGetThread",
"GmailSearch",
"GmailSendMessage",
"GoogleCloudTextToSpeechTool",
"GooglePlacesTool",
"GoogleSearchResults",
"GoogleSearchRun",
"GoogleSerperResults",
"GoogleSerperRun",
"HumanInputRun",
"IFTTTWebhook",
"InfoPowerBITool",
"InfoSQLDatabaseTool",
"InfoSparkSQLTool",
"JiraAction",
"JsonGetValueTool",
"JsonListKeysTool",
"ListDirectoryTool",
"ListPowerBITool",
"ListSQLDatabaseTool",
"ListSparkSQLTool",
"MetaphorSearchResults",
"MoveFileTool",
"NasaAction",
"NavigateBackTool",
"NavigateTool",
"O365CreateDraftMessage",
"O365SearchEmails",
"O365SearchEvents",
"O365SendEvent",
"O365SendMessage",
"OpenAPISpec",
"OpenWeatherMapQueryRun",
"PubmedQueryRun",
"RedditSearchRun",
"QueryCheckerTool",
"QueryPowerBITool",
"QuerySQLCheckerTool",
"QuerySQLDataBaseTool",
"QuerySparkSQLTool",
"ReadFileTool",
"RequestsDeleteTool",
"RequestsGetTool",
"RequestsPatchTool",
"RequestsPostTool",
"RequestsPutTool",
"SceneXplainTool",
"SearchAPIRun",
"SearchAPIResults",
"SearxSearchResults",
"SearxSearchRun",
"ShellTool",
"SlackGetChannel",
"SlackGetMessage",
"SlackScheduleMessage",
"SlackSendMessage",
"SleepTool",
"StackExchangeTool",
"StdInInquireTool",
"SteamWebAPIQueryRun",
"SteamshipImageGenerationTool",
"StructuredTool",
"Tool",
"VectorStoreQATool",
"VectorStoreQAWithSourcesTool",
"WikipediaQueryRun",
"WolframAlphaQueryRun",
"WriteFileTool",
"YahooFinanceNewsTool",
"YouTubeSearchTool",
"ZapierNLAListActions",
"ZapierNLARunAction",
"format_tool_to_openai_function",
"tool",
"MerriamWebsterQueryRun",
]
def test_all_imports() -> None:
assert set(tools.__all__) == set(EXPECTED_ALL)
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/tools/test_render.py | from typing import List
import pytest
from langchain_core.tools import BaseTool, tool
from langchain.tools.render import (
render_text_description,
render_text_description_and_args,
)
@tool
def search(query: str) -> str:
"""Lookup things online."""
return "foo"
@tool
def calculator(expression: str) -> str:
"""Do math."""
return "bar"
@pytest.fixture
def tools() -> List[BaseTool]:
return [search, calculator] # type: ignore
def test_render_text_description(tools: List[BaseTool]) -> None:
tool_string = render_text_description(tools)
expected_string = """search(query: str) -> str - Lookup things online.
calculator(expression: str) -> str - Do math."""
assert tool_string == expected_string
def test_render_text_description_and_args(tools: List[BaseTool]) -> None:
tool_string = render_text_description_and_args(tools)
expected_string = """search(query: str) -> str - Lookup things online., \
args: {'query': {'title': 'Query', 'type': 'string'}}
calculator(expression: str) -> str - Do math., \
args: {'expression': {'title': 'Expression', 'type': 'string'}}"""
assert tool_string == expected_string
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/utilities/test_imports.py | from langchain import utilities
EXPECTED_ALL = [
"AlphaVantageAPIWrapper",
"ApifyWrapper",
"ArceeWrapper",
"ArxivAPIWrapper",
"BibtexparserWrapper",
"BingSearchAPIWrapper",
"BraveSearchWrapper",
"DuckDuckGoSearchAPIWrapper",
"GoldenQueryAPIWrapper",
"GoogleFinanceAPIWrapper",
"GoogleJobsAPIWrapper",
"GoogleLensAPIWrapper",
"GooglePlacesAPIWrapper",
"GoogleScholarAPIWrapper",
"GoogleSearchAPIWrapper",
"GoogleSerperAPIWrapper",
"GoogleTrendsAPIWrapper",
"GraphQLAPIWrapper",
"JiraAPIWrapper",
"LambdaWrapper",
"MaxComputeAPIWrapper",
"MetaphorSearchAPIWrapper",
"NasaAPIWrapper",
"OpenWeatherMapAPIWrapper",
"OutlineAPIWrapper",
"Portkey",
"PowerBIDataset",
"PubMedAPIWrapper",
"Requests",
"RequestsWrapper",
"SQLDatabase",
"SceneXplainAPIWrapper",
"SearchApiAPIWrapper",
"SearxSearchWrapper",
"SerpAPIWrapper",
"SparkSQL",
"StackExchangeAPIWrapper",
"SteamWebAPIWrapper",
"TensorflowDatasets",
"TextRequestsWrapper",
"TwilioAPIWrapper",
"WikipediaAPIWrapper",
"WolframAlphaAPIWrapper",
"ZapierNLAWrapper",
"MerriamWebsterAPIWrapper",
]
def test_all_imports() -> None:
assert set(utilities.__all__) == set(EXPECTED_ALL)
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/output_parsers/test_enum_parser.py | from enum import Enum
from langchain_core.exceptions import OutputParserException
from langchain.output_parsers.enum import EnumOutputParser
class Colors(Enum):
RED = "red"
GREEN = "green"
BLUE = "blue"
def test_enum_output_parser_parse() -> None:
parser = EnumOutputParser(enum=Colors)
# Test valid inputs
result = parser.parse("red")
assert result == Colors.RED
result = parser.parse("green")
assert result == Colors.GREEN
result = parser.parse("blue")
assert result == Colors.BLUE
# Test invalid input
try:
parser.parse("INVALID")
assert False, "Should have raised OutputParserException"
except OutputParserException:
pass
def test_enum_output_parser_output_type() -> None:
"""Test the output type of the enum output parser is the expected enum."""
assert EnumOutputParser(enum=Colors).OutputType is Colors
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/output_parsers/test_datetime_parser.py | from datetime import datetime
from time import sleep
from langchain.output_parsers.datetime import DatetimeOutputParser
def test_datetime_output_parser_parse() -> None:
parser = DatetimeOutputParser()
# Test valid input
date = datetime.now()
datestr = date.strftime(parser.format)
result = parser.parse(datestr)
assert result == date
# Test valid input
parser.format = "%Y-%m-%dT%H:%M:%S"
date = datetime.now()
datestr = date.strftime(parser.format)
result = parser.parse(datestr)
assert (
result.year == date.year
and result.month == date.month
and result.day == date.day
and result.hour == date.hour
and result.minute == date.minute
and result.second == date.second
)
# Test valid input
parser.format = "%H:%M:%S"
date = datetime.now()
datestr = date.strftime(parser.format)
result = parser.parse(datestr)
assert (
result.hour == date.hour
and result.minute == date.minute
and result.second == date.second
)
# Test invalid input
try:
sleep(0.001)
datestr = date.strftime(parser.format)
result = parser.parse(datestr)
assert result == date
assert False, "Should have raised AssertionError"
except AssertionError:
pass
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/output_parsers/test_regex.py | from typing import Dict
from langchain.output_parsers.regex import RegexParser
# NOTE: The almost same constant variables in ./test_combining_parser.py
DEF_EXPECTED_RESULT = {
"confidence": "A",
"explanation": "Paris is the capital of France according to Wikipedia.",
}
DEF_README = """```json
{
"answer": "Paris",
"source": "https://en.wikipedia.org/wiki/France"
}
```
//Confidence: A, Explanation: Paris is the capital of France according to Wikipedia."""
def test_regex_parser_parse() -> None:
"""Test regex parser parse."""
parser = RegexParser(
regex=r"Confidence: (A|B|C), Explanation: (.*)",
output_keys=["confidence", "explanation"],
default_output_key="noConfidence",
)
assert DEF_EXPECTED_RESULT == parser.parse(DEF_README)
def test_regex_parser_output_type() -> None:
"""Test regex parser output type is Dict[str, str]."""
parser = RegexParser(
regex=r"Confidence: (A|B|C), Explanation: (.*)",
output_keys=["confidence", "explanation"],
default_output_key="noConfidence",
)
assert parser.OutputType is Dict[str, str]
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/output_parsers/test_pandas_dataframe_parser.py | """Test PandasDataframeParser"""
from typing import Any, Dict
import pandas as pd
from langchain_core.exceptions import OutputParserException
from langchain.output_parsers.pandas_dataframe import PandasDataFrameOutputParser
df = pd.DataFrame(
{"chicken": [1, 2, 3, 4], "veggies": [5, 4, 3, 2], "steak": [9, 8, 7, 6]}
)
parser = PandasDataFrameOutputParser(dataframe=df)
# Test Invalid Column
def test_pandas_output_parser_col_no_array() -> None:
try:
parser.parse("column:num_legs")
assert False, "Should have raised OutputParserException"
except OutputParserException:
assert True
# Test Column with invalid array (above DataFrame max index)
def test_pandas_output_parser_col_oob() -> None:
try:
parser.parse("row:10")
assert False, "Should have raised OutputParserException"
except OutputParserException:
assert True
# Test Column with array [x]
def test_pandas_output_parser_col_first_elem() -> None:
expected_output = {"chicken": 1}
actual_output = parser.parse("column:chicken[0]")
assert actual_output == expected_output
# Test Column with array [x,y,z]
def test_pandas_output_parser_col_multi_elem() -> None:
expected_output = {"chicken": pd.Series([1, 2], name="chicken", dtype="int64")}
actual_output = parser.parse("column:chicken[0, 1]")
for key in actual_output.keys():
assert expected_output["chicken"].equals(actual_output[key])
# Test Row with invalid row entry
def test_pandas_output_parser_row_no_array() -> None:
try:
parser.parse("row:5")
assert False, "Should have raised OutputParserException"
except OutputParserException:
assert True
# Test Row with valid row entry
def test_pandas_output_parser_row_first() -> None:
expected_output = {"1": pd.Series({"chicken": 2, "veggies": 4, "steak": 8})}
actual_output = parser.parse("row:1")
assert actual_output["1"].equals(expected_output["1"])
# Test Row with invalid col entry
def test_pandas_output_parser_row_no_column() -> None:
try:
parser.parse("row:1[num_legs]")
assert False, "Should have raised OutputParserException"
except OutputParserException:
assert True
# Test Row with valid col entry
def test_pandas_output_parser_row_col_1() -> None:
expected_output = {"1": 2}
actual_output = parser.parse("row:1[chicken]")
assert actual_output == expected_output
def test_pandas_output_parser_special_ops() -> None:
actual_output = [
{"mean": 3.0},
{"median": 3.0},
{"min": 2},
{"max": 4},
{"var": 1.0},
{"std": 1.0},
{"count": 3},
{"quantile": 3.0},
]
expected_output = [
parser.parse("mean:chicken[1..3]"),
parser.parse("median:chicken[1..3]"),
parser.parse("min:chicken[1..3]"),
parser.parse("max:chicken[1..3]"),
parser.parse("var:chicken[1..3]"),
parser.parse("std:chicken[1..3]"),
parser.parse("count:chicken[1..3]"),
parser.parse("quantile:chicken[1..3]"),
]
assert actual_output == expected_output
def test_pandas_output_parser_invalid_special_op() -> None:
try:
parser.parse("riemann_sum:chicken")
assert False, "Should have raised OutputParserException"
except OutputParserException:
assert True
def test_pandas_output_parser_output_type() -> None:
"""Test the output type of the pandas dataframe output parser is a pandas dataframe.""" # noqa: E501
assert parser.OutputType is Dict[str, Any]
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/output_parsers/test_imports.py | from langchain import output_parsers
EXPECTED_ALL = [
"BooleanOutputParser",
"CombiningOutputParser",
"CommaSeparatedListOutputParser",
"DatetimeOutputParser",
"EnumOutputParser",
"GuardrailsOutputParser",
"ListOutputParser",
"MarkdownListOutputParser",
"NumberedListOutputParser",
"OutputFixingParser",
"PandasDataFrameOutputParser",
"PydanticOutputParser",
"RegexDictParser",
"RegexParser",
"ResponseSchema",
"RetryOutputParser",
"RetryWithErrorOutputParser",
"StructuredOutputParser",
"XMLOutputParser",
"JsonOutputToolsParser",
"PydanticToolsParser",
"JsonOutputKeyToolsParser",
"YamlOutputParser",
]
def test_all_imports() -> None:
assert set(output_parsers.__all__) == set(EXPECTED_ALL)
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/output_parsers/test_yaml_parser.py | """Test yamlOutputParser"""
from enum import Enum
from typing import Optional
import pytest
from langchain_core.exceptions import OutputParserException
from pydantic import BaseModel, Field
from langchain.output_parsers.yaml import YamlOutputParser
class Actions(Enum):
SEARCH = "Search"
CREATE = "Create"
UPDATE = "Update"
DELETE = "Delete"
class TestModel(BaseModel):
action: Actions = Field(description="Action to be performed")
action_input: str = Field(description="Input to be used in the action")
additional_fields: Optional[str] = Field(
description="Additional fields", default=None
)
for_new_lines: str = Field(description="To be used to test newlines")
# Prevent pytest from trying to run tests on TestModel
TestModel.__test__ = False # type: ignore[attr-defined]
DEF_RESULT = """```yaml
---
action: Update
action_input: The yamlOutputParser class is powerful
additional_fields: null
for_new_lines: |
not_escape_newline:
escape_newline:
```"""
DEF_RESULT_NO_BACKTICKS = """
action: Update
action_input: The yamlOutputParser class is powerful
additional_fields: null
for_new_lines: |
not_escape_newline:
escape_newline:
"""
# action 'update' with a lowercase 'u' to test schema validation failure.
DEF_RESULT_FAIL = """```yaml
action: update
action_input: The yamlOutputParser class is powerful
additional_fields: null
```"""
DEF_EXPECTED_RESULT = TestModel(
action=Actions.UPDATE,
action_input="The yamlOutputParser class is powerful",
additional_fields=None,
for_new_lines="not_escape_newline:\n escape_newline: \n",
)
@pytest.mark.parametrize("result", [DEF_RESULT, DEF_RESULT_NO_BACKTICKS])
def test_yaml_output_parser(result: str) -> None:
"""Test yamlOutputParser."""
yaml_parser: YamlOutputParser[TestModel] = YamlOutputParser(
pydantic_object=TestModel
)
model = yaml_parser.parse(result)
print("parse_result:", result) # noqa: T201
assert DEF_EXPECTED_RESULT == model
def test_yaml_output_parser_fail() -> None:
"""Test YamlOutputParser where completion result fails schema validation."""
yaml_parser: YamlOutputParser[TestModel] = YamlOutputParser(
pydantic_object=TestModel
)
try:
yaml_parser.parse(DEF_RESULT_FAIL)
except OutputParserException as e:
print("parse_result:", e) # noqa: T201
assert "Failed to parse TestModel from completion" in str(e)
else:
assert False, "Expected OutputParserException"
def test_yaml_output_parser_output_type() -> None:
"""Test YamlOutputParser OutputType."""
yaml_parser = YamlOutputParser(pydantic_object=TestModel)
assert yaml_parser.OutputType is TestModel
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/output_parsers/test_regex_dict.py | """Test in memory docstore."""
from typing import Dict
from langchain.output_parsers.regex_dict import RegexDictParser
DEF_EXPECTED_RESULT = {"action": "Search", "action_input": "How to use this class?"}
DEF_OUTPUT_KEY_TO_FORMAT = {"action": "Action", "action_input": "Action Input"}
DEF_README = """We have just received a new result from the LLM, and our next step is
to filter and read its format using regular expressions to identify specific fields,
such as:
- Action: Search
- Action Input: How to use this class?
- Additional Fields: "N/A"
To assist us in this task, we use the regex_dict class. This class allows us to send a
dictionary containing an output key and the expected format, which in turn enables us to
retrieve the result of the matching formats and extract specific information from it.
To exclude irrelevant information from our return dictionary, we can instruct the LLM to
use a specific command that notifies us when it doesn't know the answer. We call this
variable the "no_update_value", and for our current case, we set it to "N/A". Therefore,
we expect the result to only contain the following fields:
{
{key = action, value = search}
{key = action_input, value = "How to use this class?"}.
}"""
def test_regex_dict_result() -> None:
"""Test regex dict result."""
regex_dict_parser = RegexDictParser(
output_key_to_format=DEF_OUTPUT_KEY_TO_FORMAT, no_update_value="N/A"
)
result_dict = regex_dict_parser.parse(DEF_README)
print("parse_result:", result_dict) # noqa: T201
assert DEF_EXPECTED_RESULT == result_dict
def test_regex_dict_output_type() -> None:
"""Test regex dict output type."""
regex_dict_parser = RegexDictParser(
output_key_to_format=DEF_OUTPUT_KEY_TO_FORMAT, no_update_value="N/A"
)
assert regex_dict_parser.OutputType is Dict[str, str]
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/output_parsers/test_structured_parser.py | from typing import Any, Dict
from langchain_core.exceptions import OutputParserException
from langchain.output_parsers import ResponseSchema, StructuredOutputParser
def test_parse() -> None:
"""Test parsing structured output."""
response_schemas = [
ResponseSchema(name="name", description="desc"),
ResponseSchema(name="age", description="desc"),
]
parser = StructuredOutputParser.from_response_schemas(response_schemas)
# Test valid JSON input
text = '```json\n{"name": "John", "age": 30}\n```'
expected_result = {"name": "John", "age": 30}
result = parser.parse(text)
assert result == expected_result, f"Expected {expected_result}, but got {result}"
# Test invalid JSON input
text = '```json\n{"name": "John"}\n```'
try:
parser.parse(text)
except OutputParserException:
pass # Test passes if OutputParserException is raised
else:
assert False, f"Expected OutputParserException, but got {parser.parse(text)}"
def test_output_type() -> None:
"""Test the output type of the structured output parser is Dict[str, Any]."""
response_schemas = [
ResponseSchema(name="name", description="desc"),
ResponseSchema(name="age", description="desc"),
]
parser = StructuredOutputParser.from_response_schemas(response_schemas)
assert parser.OutputType == Dict[str, Any]
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/output_parsers/test_combining_parser.py | """Test in memory docstore."""
from typing import Any, Dict
from langchain.output_parsers.combining import CombiningOutputParser
from langchain.output_parsers.regex import RegexParser
from langchain.output_parsers.structured import ResponseSchema, StructuredOutputParser
DEF_EXPECTED_RESULT = {
"answer": "Paris",
"source": "https://en.wikipedia.org/wiki/France",
"confidence": "A",
"explanation": "Paris is the capital of France according to Wikipedia.",
}
DEF_README = """```json
{
"answer": "Paris",
"source": "https://en.wikipedia.org/wiki/France"
}
```
//Confidence: A, Explanation: Paris is the capital of France according to Wikipedia."""
def test_combining_dict_result() -> None:
"""Test combining result."""
parsers = [
StructuredOutputParser(
response_schemas=[
ResponseSchema(
name="answer", description="answer to the user's question"
),
ResponseSchema(
name="source",
description="source used to answer the user's question",
),
]
),
RegexParser(
regex=r"Confidence: (A|B|C), Explanation: (.*)",
output_keys=["confidence", "explanation"],
default_output_key="noConfidence",
),
]
combining_parser = CombiningOutputParser(parsers=parsers)
result_dict = combining_parser.parse(DEF_README)
assert DEF_EXPECTED_RESULT == result_dict
def test_combining_output_parser_output_type() -> None:
"""Test combining output parser output type is Dict[str, Any]."""
parsers = [
StructuredOutputParser(
response_schemas=[
ResponseSchema(
name="answer", description="answer to the user's question"
),
ResponseSchema(
name="source",
description="source used to answer the user's question",
),
]
),
RegexParser(
regex=r"Confidence: (A|B|C), Explanation: (.*)",
output_keys=["confidence", "explanation"],
default_output_key="noConfidence",
),
]
combining_parser = CombiningOutputParser(parsers=parsers)
assert combining_parser.OutputType is Dict[str, Any]
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/output_parsers/test_json.py | from typing import Any, AsyncIterator, Iterator
from langchain_core.messages import AIMessageChunk
from langchain_core.output_parsers.openai_functions import JsonOutputFunctionsParser
GOOD_JSON = """```json
{
"foo": "bar"
}
```"""
JSON_WITH_NEW_LINES = """
```json
{
"foo": "bar"
}
```
"""
JSON_WITH_NEW_LINES_INSIDE = """```json
{
"foo": "bar"
}
```"""
JSON_WITH_NEW_LINES_EVERYWHERE = """
```json
{
"foo": "bar"
}
```
"""
TICKS_WITH_NEW_LINES_EVERYWHERE = """
```
{
"foo": "bar"
}
```
"""
JSON_WITH_MARKDOWN_CODE_BLOCK = """```json
{
"foo": "```bar```"
}
```"""
JSON_WITH_MARKDOWN_CODE_BLOCK_AND_NEWLINES = """```json
{
"action": "Final Answer",
"action_input": "```bar\n<div id="1" class=\"value\">\n\ttext\n</div>```"
}
```"""
JSON_WITH_UNESCAPED_QUOTES_IN_NESTED_JSON = """```json
{
"action": "Final Answer",
"action_input": "{"foo": "bar", "bar": "foo"}"
}
```"""
JSON_WITH_ESCAPED_QUOTES_IN_NESTED_JSON = """```json
{
"action": "Final Answer",
"action_input": "{\"foo\": \"bar\", \"bar\": \"foo\"}"
}
```"""
JSON_WITH_PYTHON_DICT = """```json
{
"action": "Final Answer",
"action_input": {"foo": "bar", "bar": "foo"}
}
```"""
JSON_WITH_ESCAPED_DOUBLE_QUOTES_IN_NESTED_JSON = """```json
{
"action": "Final Answer",
"action_input": "{\\"foo\\": \\"bar\\", \\"bar\\": \\"foo\\"}"
}
```"""
NO_TICKS = """{
"foo": "bar"
}"""
NO_TICKS_WHITE_SPACE = """
{
"foo": "bar"
}
"""
TEXT_BEFORE = """Thought: I need to use the search tool
Action:
```
{
"foo": "bar"
}
```"""
TEXT_AFTER = """```
{
"foo": "bar"
}
```
This should do the trick"""
TEXT_BEFORE_AND_AFTER = """Action: Testing
```
{
"foo": "bar"
}
```
This should do the trick"""
TEST_CASES = [
GOOD_JSON,
JSON_WITH_NEW_LINES,
JSON_WITH_NEW_LINES_INSIDE,
JSON_WITH_NEW_LINES_EVERYWHERE,
TICKS_WITH_NEW_LINES_EVERYWHERE,
NO_TICKS,
NO_TICKS_WHITE_SPACE,
TEXT_BEFORE,
TEXT_AFTER,
]
TEST_CASES_ESCAPED_QUOTES = [
JSON_WITH_UNESCAPED_QUOTES_IN_NESTED_JSON,
JSON_WITH_ESCAPED_QUOTES_IN_NESTED_JSON,
JSON_WITH_ESCAPED_DOUBLE_QUOTES_IN_NESTED_JSON,
]
TEST_CASES_PARTIAL = [
('{"foo": "bar", "bar": "foo"}', '{"foo": "bar", "bar": "foo"}'),
('{"foo": "bar", "bar": "foo', '{"foo": "bar", "bar": "foo"}'),
('{"foo": "bar", "bar": "foo}', '{"foo": "bar", "bar": "foo}"}'),
('{"foo": "bar", "bar": "foo[', '{"foo": "bar", "bar": "foo["}'),
('{"foo": "bar", "bar": "foo\\"', '{"foo": "bar", "bar": "foo\\""}'),
]
STREAMED_TOKENS = """
{
"
setup
":
"
Why
did
the
bears
start
a
band
called
Bears
Bears
Bears
?
"
,
"
punchline
":
"
Because
they
wanted
to
play
bear
-y
good
music
!
"
,
"
audience
":
[
"
Haha
"
,
"
So
funny
"
]
}
""".splitlines()
EXPECTED_STREAMED_JSON = [
{},
{"setup": ""},
{"setup": "Why"},
{"setup": "Why did"},
{"setup": "Why did the"},
{"setup": "Why did the bears"},
{"setup": "Why did the bears start"},
{"setup": "Why did the bears start a"},
{"setup": "Why did the bears start a band"},
{"setup": "Why did the bears start a band called"},
{"setup": "Why did the bears start a band called Bears"},
{"setup": "Why did the bears start a band called Bears Bears"},
{"setup": "Why did the bears start a band called Bears Bears Bears"},
{"setup": "Why did the bears start a band called Bears Bears Bears ?"},
{
"setup": "Why did the bears start a band called Bears Bears Bears ?",
"punchline": "",
},
{
"setup": "Why did the bears start a band called Bears Bears Bears ?",
"punchline": "Because",
},
{
"setup": "Why did the bears start a band called Bears Bears Bears ?",
"punchline": "Because they",
},
{
"setup": "Why did the bears start a band called Bears Bears Bears ?",
"punchline": "Because they wanted",
},
{
"setup": "Why did the bears start a band called Bears Bears Bears ?",
"punchline": "Because they wanted to",
},
{
"setup": "Why did the bears start a band called Bears Bears Bears ?",
"punchline": "Because they wanted to play",
},
{
"setup": "Why did the bears start a band called Bears Bears Bears ?",
"punchline": "Because they wanted to play bear",
},
{
"setup": "Why did the bears start a band called Bears Bears Bears ?",
"punchline": "Because they wanted to play bear -y",
},
{
"setup": "Why did the bears start a band called Bears Bears Bears ?",
"punchline": "Because they wanted to play bear -y good",
},
{
"setup": "Why did the bears start a band called Bears Bears Bears ?",
"punchline": "Because they wanted to play bear -y good music",
},
{
"setup": "Why did the bears start a band called Bears Bears Bears ?",
"punchline": "Because they wanted to play bear -y good music !",
},
{
"punchline": "Because they wanted to play bear -y good music !",
"setup": "Why did the bears start a band called Bears Bears Bears ?",
"audience": [],
},
{
"punchline": "Because they wanted to play bear -y good music !",
"setup": "Why did the bears start a band called Bears Bears Bears ?",
"audience": [""],
},
{
"punchline": "Because they wanted to play bear -y good music !",
"setup": "Why did the bears start a band called Bears Bears Bears ?",
"audience": ["Haha"],
},
{
"punchline": "Because they wanted to play bear -y good music !",
"setup": "Why did the bears start a band called Bears Bears Bears ?",
"audience": ["Haha", ""],
},
{
"punchline": "Because they wanted to play bear -y good music !",
"setup": "Why did the bears start a band called Bears Bears Bears ?",
"audience": ["Haha", "So"],
},
{
"punchline": "Because they wanted to play bear -y good music !",
"setup": "Why did the bears start a band called Bears Bears Bears ?",
"audience": ["Haha", "So funny"],
},
]
EXPECTED_STREAMED_JSON_DIFF = [
[{"op": "replace", "path": "", "value": {}}],
[{"op": "add", "path": "/setup", "value": ""}],
[{"op": "replace", "path": "/setup", "value": "Why"}],
[{"op": "replace", "path": "/setup", "value": "Why did"}],
[{"op": "replace", "path": "/setup", "value": "Why did the"}],
[{"op": "replace", "path": "/setup", "value": "Why did the bears"}],
[{"op": "replace", "path": "/setup", "value": "Why did the bears start"}],
[{"op": "replace", "path": "/setup", "value": "Why did the bears start a"}],
[{"op": "replace", "path": "/setup", "value": "Why did the bears start a band"}],
[
{
"op": "replace",
"path": "/setup",
"value": "Why did the bears start a band called",
}
],
[
{
"op": "replace",
"path": "/setup",
"value": "Why did the bears start a band called Bears",
}
],
[
{
"op": "replace",
"path": "/setup",
"value": "Why did the bears start a band called Bears Bears",
}
],
[
{
"op": "replace",
"path": "/setup",
"value": "Why did the bears start a band called Bears Bears Bears",
}
],
[
{
"op": "replace",
"path": "/setup",
"value": "Why did the bears start a band called Bears Bears Bears ?",
}
],
[{"op": "add", "path": "/punchline", "value": ""}],
[{"op": "replace", "path": "/punchline", "value": "Because"}],
[{"op": "replace", "path": "/punchline", "value": "Because they"}],
[{"op": "replace", "path": "/punchline", "value": "Because they wanted"}],
[{"op": "replace", "path": "/punchline", "value": "Because they wanted to"}],
[{"op": "replace", "path": "/punchline", "value": "Because they wanted to play"}],
[
{
"op": "replace",
"path": "/punchline",
"value": "Because they wanted to play bear",
}
],
[
{
"op": "replace",
"path": "/punchline",
"value": "Because they wanted to play bear -y",
}
],
[
{
"op": "replace",
"path": "/punchline",
"value": "Because they wanted to play bear -y good",
}
],
[
{
"op": "replace",
"path": "/punchline",
"value": "Because they wanted to play bear -y good music",
}
],
[
{
"op": "replace",
"path": "/punchline",
"value": "Because they wanted to play bear -y good music !",
}
],
[{"op": "add", "path": "/audience", "value": []}],
[{"op": "add", "path": "/audience/0", "value": ""}],
[{"op": "replace", "path": "/audience/0", "value": "Haha"}],
[{"op": "add", "path": "/audience/1", "value": ""}],
[{"op": "replace", "path": "/audience/1", "value": "So"}],
[{"op": "replace", "path": "/audience/1", "value": "So funny"}],
]
def test_partial_functions_json_output_parser() -> None:
def input_iter(_: Any) -> Iterator[AIMessageChunk]:
for token in STREAMED_TOKENS:
yield AIMessageChunk(
content="", additional_kwargs={"function_call": {"arguments": token}}
)
chain = input_iter | JsonOutputFunctionsParser()
assert list(chain.stream(None)) == EXPECTED_STREAMED_JSON
def test_partial_functions_json_output_parser_diff() -> None:
def input_iter(_: Any) -> Iterator[AIMessageChunk]:
for token in STREAMED_TOKENS:
yield AIMessageChunk(
content="", additional_kwargs={"function_call": {"arguments": token}}
)
chain = input_iter | JsonOutputFunctionsParser(diff=True)
assert list(chain.stream(None)) == EXPECTED_STREAMED_JSON_DIFF
async def test_partial_functions_json_output_parser_async() -> None:
async def input_iter(_: Any) -> AsyncIterator[AIMessageChunk]:
for token in STREAMED_TOKENS:
yield AIMessageChunk(
content="", additional_kwargs={"function_call": {"arguments": token}}
)
chain = input_iter | JsonOutputFunctionsParser()
assert [p async for p in chain.astream(None)] == EXPECTED_STREAMED_JSON
async def test_partial_functions_json_output_parser_diff_async() -> None:
async def input_iter(_: Any) -> AsyncIterator[AIMessageChunk]:
for token in STREAMED_TOKENS:
yield AIMessageChunk(
content="", additional_kwargs={"function_call": {"arguments": token}}
)
chain = input_iter | JsonOutputFunctionsParser(diff=True)
assert [p async for p in chain.astream(None)] == EXPECTED_STREAMED_JSON_DIFF
|
0 | lc_public_repos/langchain/libs/langchain/tests/unit_tests | lc_public_repos/langchain/libs/langchain/tests/unit_tests/output_parsers/test_fix.py | from datetime import datetime as dt
from typing import Any, Callable, Dict, Optional, TypeVar
import pytest
from langchain_core.exceptions import OutputParserException
from langchain_core.messages import AIMessage
from langchain_core.prompts.prompt import PromptTemplate
from langchain_core.runnables import Runnable, RunnableLambda, RunnablePassthrough
from langchain.output_parsers.boolean import BooleanOutputParser
from langchain.output_parsers.datetime import DatetimeOutputParser
from langchain.output_parsers.fix import BaseOutputParser, OutputFixingParser
from langchain.output_parsers.prompts import NAIVE_FIX_PROMPT
T = TypeVar("T")
class SuccessfulParseAfterRetries(BaseOutputParser[str]):
parse_count: int = 0 # Number of times parse has been called
attemp_count_before_success: (
int # Number of times to fail before succeeding # noqa
)
def parse(self, *args: Any, **kwargs: Any) -> str:
self.parse_count += 1
if self.parse_count <= self.attemp_count_before_success:
raise OutputParserException("error")
return "parsed"
class SuccessfulParseAfterRetriesWithGetFormatInstructions(SuccessfulParseAfterRetries):
def get_format_instructions(self) -> str:
return "instructions"
@pytest.mark.parametrize(
"base_parser",
[
SuccessfulParseAfterRetries(attemp_count_before_success=5),
SuccessfulParseAfterRetriesWithGetFormatInstructions(
attemp_count_before_success=5
), # noqa: E501
],
)
def test_output_fixing_parser_parse(
base_parser: SuccessfulParseAfterRetries,
) -> None:
# preparation
n: int = (
base_parser.attemp_count_before_success
) # Success on the (n+1)-th attempt # noqa
base_parser = SuccessfulParseAfterRetries(attemp_count_before_success=n)
parser = OutputFixingParser[str](
parser=base_parser,
max_retries=n, # n times to retry, that is, (n+1) times call
retry_chain=RunnablePassthrough(),
legacy=False,
)
# test
assert parser.parse("completion") == "parsed"
assert base_parser.parse_count == n + 1
# TODO: test whether "instructions" is passed to the retry_chain
def test_output_fixing_parser_from_llm() -> None:
def fake_llm(prompt: str) -> AIMessage:
return AIMessage("2024-07-08T00:00:00.000000Z")
llm = RunnableLambda(fake_llm)
n = 1
parser = OutputFixingParser.from_llm(
llm=llm,
parser=DatetimeOutputParser(),
max_retries=n,
)
assert parser.parse("not a date")
@pytest.mark.parametrize(
"base_parser",
[
SuccessfulParseAfterRetries(attemp_count_before_success=5),
SuccessfulParseAfterRetriesWithGetFormatInstructions(
attemp_count_before_success=5
), # noqa: E501
],
)
async def test_output_fixing_parser_aparse(
base_parser: SuccessfulParseAfterRetries,
) -> None:
n: int = (
base_parser.attemp_count_before_success
) # Success on the (n+1)-th attempt # noqa
base_parser = SuccessfulParseAfterRetries(attemp_count_before_success=n)
parser = OutputFixingParser[str](
parser=base_parser,
max_retries=n, # n times to retry, that is, (n+1) times call
retry_chain=RunnablePassthrough(),
legacy=False,
)
assert (await parser.aparse("completion")) == "parsed"
assert base_parser.parse_count == n + 1
# TODO: test whether "instructions" is passed to the retry_chain
def test_output_fixing_parser_parse_fail() -> None:
n: int = 5 # Success on the (n+1)-th attempt
base_parser = SuccessfulParseAfterRetries(attemp_count_before_success=n)
parser = OutputFixingParser[str](
parser=base_parser,
max_retries=n - 1, # n-1 times to retry, that is, n times call
retry_chain=RunnablePassthrough(),
legacy=False,
)
with pytest.raises(OutputParserException):
parser.parse("completion")
assert base_parser.parse_count == n
async def test_output_fixing_parser_aparse_fail() -> None:
n: int = 5 # Success on the (n+1)-th attempt
base_parser = SuccessfulParseAfterRetries(attemp_count_before_success=n)
parser = OutputFixingParser[str](
parser=base_parser,
max_retries=n - 1, # n-1 times to retry, that is, n times call
retry_chain=RunnablePassthrough(),
legacy=False,
)
with pytest.raises(OutputParserException):
await parser.aparse("completion")
assert base_parser.parse_count == n
@pytest.mark.parametrize(
"base_parser",
[
BooleanOutputParser(),
DatetimeOutputParser(),
],
)
def test_output_fixing_parser_output_type(
base_parser: BaseOutputParser,
) -> None:
parser = OutputFixingParser[str](
parser=base_parser, retry_chain=RunnablePassthrough()
)
assert parser.OutputType is base_parser.OutputType
@pytest.mark.parametrize(
"input,base_parser,retry_chain,expected",
[
(
"2024/07/08",
DatetimeOutputParser(),
NAIVE_FIX_PROMPT | RunnableLambda(lambda _: "2024-07-08T00:00:00.000000Z"),
dt(2024, 7, 8),
),
(
# Case: retry_chain.InputType does not have 'instructions' key
"2024/07/08",
DatetimeOutputParser(),
PromptTemplate.from_template("{completion}\n{error}")
| RunnableLambda(lambda _: "2024-07-08T00:00:00.000000Z"),
dt(2024, 7, 8),
),
],
)
def test_output_fixing_parser_parse_with_retry_chain(
input: str,
base_parser: BaseOutputParser[T],
retry_chain: Runnable[Dict[str, Any], str],
expected: T,
) -> None:
# NOTE: get_format_instructions of some parsers behave randomly
instructions = base_parser.get_format_instructions()
object.__setattr__(base_parser, "get_format_instructions", lambda: instructions)
# test
parser = OutputFixingParser[str](
parser=base_parser,
retry_chain=retry_chain,
legacy=False,
)
assert parser.parse(input) == expected
@pytest.mark.parametrize(
"input,base_parser,retry_chain,expected",
[
(
"2024/07/08",
DatetimeOutputParser(),
NAIVE_FIX_PROMPT | RunnableLambda(lambda _: "2024-07-08T00:00:00.000000Z"),
dt(2024, 7, 8),
),
(
# Case: retry_chain.InputType does not have 'instructions' key
"2024/07/08",
DatetimeOutputParser(),
PromptTemplate.from_template("{completion}\n{error}")
| RunnableLambda(lambda _: "2024-07-08T00:00:00.000000Z"),
dt(2024, 7, 8),
),
],
)
async def test_output_fixing_parser_aparse_with_retry_chain(
input: str,
base_parser: BaseOutputParser[T],
retry_chain: Runnable[Dict[str, Any], str],
expected: T,
) -> None:
instructions = base_parser.get_format_instructions()
object.__setattr__(base_parser, "get_format_instructions", lambda: instructions)
# test
parser = OutputFixingParser[str](
parser=base_parser,
retry_chain=retry_chain,
legacy=False,
)
assert (await parser.aparse(input)) == expected
def _extract_exception(
func: Callable[..., Any],
*args: Any,
**kwargs: Any,
) -> Optional[Exception]:
try:
func(*args, **kwargs)
except Exception as e:
return e
return None
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.