sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
langchain-ai/langchain:libs/langchain/langchain_classic/agents/agent_toolkits/openapi/prompt.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.openapi.prompt import (
DESCRIPTION,
OPENAPI_PREFIX,
OPENAPI_SUFFIX,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"DESCRIPTION": "langchain_community.agent_toolkits.openapi.prompt",
"OPENAPI_PREFIX": "langchain_community.agent_toolkits.openapi.prompt",
"OPENAPI_SUFFIX": "langchain_community.agent_toolkits.openapi.prompt",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = ["DESCRIPTION", "OPENAPI_PREFIX", "OPENAPI_SUFFIX"]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/agents/agent_toolkits/openapi/prompt.py",
"license": "MIT License",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/agents/agent_toolkits/openapi/toolkit.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.openapi.toolkit import (
OpenAPIToolkit,
RequestsToolkit,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"RequestsToolkit": "langchain_community.agent_toolkits.openapi.toolkit",
"OpenAPIToolkit": "langchain_community.agent_toolkits.openapi.toolkit",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"OpenAPIToolkit",
"RequestsToolkit",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/agents/agent_toolkits/openapi/toolkit.py",
"license": "MIT License",
"lines": 22,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/agents/agent_toolkits/playwright/toolkit.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.playwright.toolkit import (
PlayWrightBrowserToolkit,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"PlayWrightBrowserToolkit": "langchain_community.agent_toolkits.playwright.toolkit",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"PlayWrightBrowserToolkit",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/agents/agent_toolkits/playwright/toolkit.py",
"license": "MIT License",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/agents/agent_toolkits/powerbi/base.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.powerbi.base import create_pbi_agent
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"create_pbi_agent": "langchain_community.agent_toolkits.powerbi.base",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"create_pbi_agent",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/agents/agent_toolkits/powerbi/base.py",
"license": "MIT License",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/agents/agent_toolkits/powerbi/prompt.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.powerbi.prompt import (
POWERBI_CHAT_PREFIX,
POWERBI_CHAT_SUFFIX,
POWERBI_PREFIX,
POWERBI_SUFFIX,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"POWERBI_CHAT_PREFIX": "langchain_community.agent_toolkits.powerbi.prompt",
"POWERBI_CHAT_SUFFIX": "langchain_community.agent_toolkits.powerbi.prompt",
"POWERBI_PREFIX": "langchain_community.agent_toolkits.powerbi.prompt",
"POWERBI_SUFFIX": "langchain_community.agent_toolkits.powerbi.prompt",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"POWERBI_CHAT_PREFIX",
"POWERBI_CHAT_SUFFIX",
"POWERBI_PREFIX",
"POWERBI_SUFFIX",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/agents/agent_toolkits/powerbi/prompt.py",
"license": "MIT License",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/agents/agent_toolkits/powerbi/toolkit.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.powerbi.toolkit import PowerBIToolkit
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"PowerBIToolkit": "langchain_community.agent_toolkits.powerbi.toolkit",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"PowerBIToolkit",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/agents/agent_toolkits/powerbi/toolkit.py",
"license": "MIT License",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/agents/agent_toolkits/slack/toolkit.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.slack.toolkit import SlackToolkit
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"SlackToolkit": "langchain_community.agent_toolkits.slack.toolkit"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"SlackToolkit",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/agents/agent_toolkits/slack/toolkit.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/agents/agent_toolkits/spark_sql/base.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.spark_sql.base import create_spark_sql_agent
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"create_spark_sql_agent": "langchain_community.agent_toolkits.spark_sql.base",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"create_spark_sql_agent",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/agents/agent_toolkits/spark_sql/base.py",
"license": "MIT License",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/agents/agent_toolkits/spark_sql/prompt.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.spark_sql.prompt import (
SQL_PREFIX,
SQL_SUFFIX,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"SQL_PREFIX": "langchain_community.agent_toolkits.spark_sql.prompt",
"SQL_SUFFIX": "langchain_community.agent_toolkits.spark_sql.prompt",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = ["SQL_PREFIX", "SQL_SUFFIX"]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/agents/agent_toolkits/spark_sql/prompt.py",
"license": "MIT License",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/agents/agent_toolkits/spark_sql/toolkit.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.spark_sql.toolkit import SparkSQLToolkit
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"SparkSQLToolkit": "langchain_community.agent_toolkits.spark_sql.toolkit",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"SparkSQLToolkit",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/agents/agent_toolkits/spark_sql/toolkit.py",
"license": "MIT License",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/agents/agent_toolkits/sql/base.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.sql.base import create_sql_agent
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"create_sql_agent": "langchain_community.agent_toolkits.sql.base"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"create_sql_agent",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/agents/agent_toolkits/sql/base.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/agents/agent_toolkits/sql/prompt.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.sql.prompt import (
SQL_FUNCTIONS_SUFFIX,
SQL_PREFIX,
SQL_SUFFIX,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"SQL_PREFIX": "langchain_community.agent_toolkits.sql.prompt",
"SQL_SUFFIX": "langchain_community.agent_toolkits.sql.prompt",
"SQL_FUNCTIONS_SUFFIX": "langchain_community.agent_toolkits.sql.prompt",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = ["SQL_FUNCTIONS_SUFFIX", "SQL_PREFIX", "SQL_SUFFIX"]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/agents/agent_toolkits/sql/prompt.py",
"license": "MIT License",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/agents/agent_toolkits/sql/toolkit.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.sql.toolkit import SQLDatabaseToolkit
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"SQLDatabaseToolkit": "langchain_community.agent_toolkits.sql.toolkit",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"SQLDatabaseToolkit",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/agents/agent_toolkits/sql/toolkit.py",
"license": "MIT License",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/agents/agent_toolkits/steam/toolkit.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.steam.toolkit import SteamToolkit
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"SteamToolkit": "langchain_community.agent_toolkits.steam.toolkit"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"SteamToolkit",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/agents/agent_toolkits/steam/toolkit.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/agents/agent_toolkits/vectorstore/base.py | """VectorStore agent."""
from typing import Any
from langchain_core._api import deprecated
from langchain_core.callbacks.base import BaseCallbackManager
from langchain_core.language_models import BaseLanguageModel
from langchain_classic.agents.agent import AgentExecutor
from langchain_classic.agents.agent_toolkits.vectorstore.prompt import (
PREFIX,
ROUTER_PREFIX,
)
from langchain_classic.agents.agent_toolkits.vectorstore.toolkit import (
VectorStoreRouterToolkit,
VectorStoreToolkit,
)
from langchain_classic.agents.mrkl.base import ZeroShotAgent
from langchain_classic.chains.llm import LLMChain
@deprecated(
since="0.2.13",
removal="1.0",
message=(
"This function will continue to be supported, but it is recommended for new "
"use cases to be built with LangGraph. LangGraph offers a more flexible and "
"full-featured framework for building agents, including support for "
"tool-calling, persistence of state, and human-in-the-loop workflows. "
"See API reference for this function for a replacement implementation: "
"https://api.python.langchain.com/en/latest/agents/langchain.agents.agent_toolkits.vectorstore.base.create_vectorstore_agent.html " # noqa: E501
"Read more here on how to create agents that query vector stores: "
"https://python.langchain.com/docs/how_to/qa_chat_history_how_to/#agents"
),
)
def create_vectorstore_agent(
llm: BaseLanguageModel,
toolkit: VectorStoreToolkit,
callback_manager: BaseCallbackManager | None = None,
prefix: str = PREFIX,
verbose: bool = False, # noqa: FBT001,FBT002
agent_executor_kwargs: dict[str, Any] | None = None,
**kwargs: Any,
) -> AgentExecutor:
"""Construct a VectorStore agent from an LLM and tools.
!!! note
This class is deprecated. See below for a replacement that uses tool
calling methods and LangGraph. Install LangGraph with:
```bash
pip install -U langgraph
```
```python
from langchain_core.tools import create_retriever_tool
from langchain_core.vectorstores import InMemoryVectorStore
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
from langgraph.prebuilt import create_react_agent
model = ChatOpenAI(model="gpt-4o-mini", temperature=0)
vector_store = InMemoryVectorStore.from_texts(
[
"Dogs are great companions, known for their loyalty and friendliness.",
"Cats are independent pets that often enjoy their own space.",
],
OpenAIEmbeddings(),
)
tool = create_retriever_tool(
vector_store.as_retriever(),
"pet_information_retriever",
"Fetches information about pets.",
)
agent = create_react_agent(model, [tool])
for step in agent.stream(
{"messages": [("human", "What are dogs known for?")]},
stream_mode="values",
):
step["messages"][-1].pretty_print()
```
Args:
llm: LLM that will be used by the agent
toolkit: Set of tools for the agent
callback_manager: Object to handle the callback
prefix: The prefix prompt for the agent.
verbose: If you want to see the content of the scratchpad.
agent_executor_kwargs: If there is any other parameter you want to send to the
agent.
kwargs: Additional named parameters to pass to the `ZeroShotAgent`.
Returns:
Returns a callable AgentExecutor object.
Either you can call it or use run method with the query to get the response.
"""
tools = toolkit.get_tools()
prompt = ZeroShotAgent.create_prompt(tools, prefix=prefix)
llm_chain = LLMChain(
llm=llm,
prompt=prompt,
callback_manager=callback_manager,
)
tool_names = [tool.name for tool in tools]
agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, **kwargs)
return AgentExecutor.from_agent_and_tools(
agent=agent,
tools=tools,
callback_manager=callback_manager,
verbose=verbose,
**(agent_executor_kwargs or {}),
)
@deprecated(
since="0.2.13",
removal="1.0",
message=(
"This function will continue to be supported, but it is recommended for new "
"use cases to be built with LangGraph. LangGraph offers a more flexible and "
"full-featured framework for building agents, including support for "
"tool-calling, persistence of state, and human-in-the-loop workflows. "
"See API reference for this function for a replacement implementation: "
"https://api.python.langchain.com/en/latest/agents/langchain.agents.agent_toolkits.vectorstore.base.create_vectorstore_router_agent.html " # noqa: E501
"Read more here on how to create agents that query vector stores: "
"https://python.langchain.com/docs/how_to/qa_chat_history_how_to/#agents"
),
)
def create_vectorstore_router_agent(
llm: BaseLanguageModel,
toolkit: VectorStoreRouterToolkit,
callback_manager: BaseCallbackManager | None = None,
prefix: str = ROUTER_PREFIX,
verbose: bool = False, # noqa: FBT001,FBT002
agent_executor_kwargs: dict[str, Any] | None = None,
**kwargs: Any,
) -> AgentExecutor:
"""Construct a VectorStore router agent from an LLM and tools.
!!! note
This class is deprecated. See below for a replacement that uses tool calling
methods and LangGraph. Install LangGraph with:
```bash
pip install -U langgraph
```
```python
from langchain_core.tools import create_retriever_tool
from langchain_core.vectorstores import InMemoryVectorStore
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
from langgraph.prebuilt import create_react_agent
model = ChatOpenAI(model="gpt-4o-mini", temperature=0)
pet_vector_store = InMemoryVectorStore.from_texts(
[
"Dogs are great companions, known for their loyalty and friendliness.",
"Cats are independent pets that often enjoy their own space.",
],
OpenAIEmbeddings(),
)
food_vector_store = InMemoryVectorStore.from_texts(
[
"Carrots are orange and delicious.",
"Apples are red and delicious.",
],
OpenAIEmbeddings(),
)
tools = [
create_retriever_tool(
pet_vector_store.as_retriever(),
"pet_information_retriever",
"Fetches information about pets.",
),
create_retriever_tool(
food_vector_store.as_retriever(),
"food_information_retriever",
"Fetches information about food.",
),
]
agent = create_react_agent(model, tools)
for step in agent.stream(
{"messages": [("human", "Tell me about carrots.")]},
stream_mode="values",
):
step["messages"][-1].pretty_print()
```
Args:
llm: LLM that will be used by the agent
toolkit: Set of tools for the agent which have routing capability with multiple
vector stores
callback_manager: Object to handle the callback
prefix: The prefix prompt for the router agent.
If not provided uses default `ROUTER_PREFIX`.
verbose: If you want to see the content of the scratchpad.
agent_executor_kwargs: If there is any other parameter you want to send to the
agent.
kwargs: Additional named parameters to pass to the `ZeroShotAgent`.
Returns:
Returns a callable `AgentExecutor` object.
Either you can call it or use run method with the query to get the response.
"""
tools = toolkit.get_tools()
prompt = ZeroShotAgent.create_prompt(tools, prefix=prefix)
llm_chain = LLMChain(
llm=llm,
prompt=prompt,
callback_manager=callback_manager,
)
tool_names = [tool.name for tool in tools]
agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, **kwargs)
return AgentExecutor.from_agent_and_tools(
agent=agent,
tools=tools,
callback_manager=callback_manager,
verbose=verbose,
**(agent_executor_kwargs or {}),
)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/agents/agent_toolkits/vectorstore/base.py",
"license": "MIT License",
"lines": 200,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
langchain-ai/langchain:libs/langchain/langchain_classic/agents/agent_toolkits/zapier/toolkit.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.zapier.toolkit import ZapierToolkit
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"ZapierToolkit": "langchain_community.agent_toolkits.zapier.toolkit",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"ZapierToolkit",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/agents/agent_toolkits/zapier/toolkit.py",
"license": "MIT License",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/agents/chat/base.py | from collections.abc import Sequence
from typing import Any
from langchain_core._api import deprecated
from langchain_core.agents import AgentAction
from langchain_core.callbacks import BaseCallbackManager
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import BasePromptTemplate
from langchain_core.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
from langchain_core.tools import BaseTool
from pydantic import Field
from typing_extensions import override
from langchain_classic._api.deprecation import AGENT_DEPRECATION_WARNING
from langchain_classic.agents.agent import Agent, AgentOutputParser
from langchain_classic.agents.chat.output_parser import ChatOutputParser
from langchain_classic.agents.chat.prompt import (
FORMAT_INSTRUCTIONS,
HUMAN_MESSAGE,
SYSTEM_MESSAGE_PREFIX,
SYSTEM_MESSAGE_SUFFIX,
)
from langchain_classic.agents.utils import validate_tools_single_input
from langchain_classic.chains.llm import LLMChain
@deprecated(
"0.1.0",
message=AGENT_DEPRECATION_WARNING,
removal="1.0",
)
class ChatAgent(Agent):
"""Chat Agent."""
output_parser: AgentOutputParser = Field(default_factory=ChatOutputParser)
"""Output parser for the agent."""
@property
def observation_prefix(self) -> str:
"""Prefix to append the observation with."""
return "Observation: "
@property
def llm_prefix(self) -> str:
"""Prefix to append the llm call with."""
return "Thought:"
def _construct_scratchpad(
self,
intermediate_steps: list[tuple[AgentAction, str]],
) -> str:
agent_scratchpad = super()._construct_scratchpad(intermediate_steps)
if not isinstance(agent_scratchpad, str):
msg = "agent_scratchpad should be of type string."
raise ValueError(msg) # noqa: TRY004
if agent_scratchpad:
return (
f"This was your previous work "
f"(but I haven't seen any of it! I only see what "
f"you return as final answer):\n{agent_scratchpad}"
)
return agent_scratchpad
@classmethod
@override
def _get_default_output_parser(cls, **kwargs: Any) -> AgentOutputParser:
return ChatOutputParser()
@classmethod
def _validate_tools(cls, tools: Sequence[BaseTool]) -> None:
super()._validate_tools(tools)
validate_tools_single_input(class_name=cls.__name__, tools=tools)
@property
def _stop(self) -> list[str]:
return ["Observation:"]
@classmethod
def create_prompt(
cls,
tools: Sequence[BaseTool],
system_message_prefix: str = SYSTEM_MESSAGE_PREFIX,
system_message_suffix: str = SYSTEM_MESSAGE_SUFFIX,
human_message: str = HUMAN_MESSAGE,
format_instructions: str = FORMAT_INSTRUCTIONS,
input_variables: list[str] | None = None,
) -> BasePromptTemplate:
"""Create a prompt from a list of tools.
Args:
tools: A list of tools.
system_message_prefix: The system message prefix.
system_message_suffix: The system message suffix.
human_message: The `HumanMessage`.
format_instructions: The format instructions.
input_variables: The input variables.
Returns:
A prompt template.
"""
tool_strings = "\n".join([f"{tool.name}: {tool.description}" for tool in tools])
tool_names = ", ".join([tool.name for tool in tools])
format_instructions = format_instructions.format(tool_names=tool_names)
template = (
f"{system_message_prefix}\n\n"
f"{tool_strings}\n\n"
f"{format_instructions}\n\n"
f"{system_message_suffix}"
)
messages = [
SystemMessagePromptTemplate.from_template(template),
HumanMessagePromptTemplate.from_template(human_message),
]
if input_variables is None:
input_variables = ["input", "agent_scratchpad"]
return ChatPromptTemplate(input_variables=input_variables, messages=messages)
@classmethod
def from_llm_and_tools(
cls,
llm: BaseLanguageModel,
tools: Sequence[BaseTool],
callback_manager: BaseCallbackManager | None = None,
output_parser: AgentOutputParser | None = None,
system_message_prefix: str = SYSTEM_MESSAGE_PREFIX,
system_message_suffix: str = SYSTEM_MESSAGE_SUFFIX,
human_message: str = HUMAN_MESSAGE,
format_instructions: str = FORMAT_INSTRUCTIONS,
input_variables: list[str] | None = None,
**kwargs: Any,
) -> Agent:
"""Construct an agent from an LLM and tools.
Args:
llm: The language model.
tools: A list of tools.
callback_manager: The callback manager.
output_parser: The output parser.
system_message_prefix: The system message prefix.
system_message_suffix: The system message suffix.
human_message: The `HumanMessage`.
format_instructions: The format instructions.
input_variables: The input variables.
kwargs: Additional keyword arguments.
Returns:
An agent.
"""
cls._validate_tools(tools)
prompt = cls.create_prompt(
tools,
system_message_prefix=system_message_prefix,
system_message_suffix=system_message_suffix,
human_message=human_message,
format_instructions=format_instructions,
input_variables=input_variables,
)
llm_chain = LLMChain(
llm=llm,
prompt=prompt,
callback_manager=callback_manager,
)
tool_names = [tool.name for tool in tools]
_output_parser = output_parser or cls._get_default_output_parser()
return cls(
llm_chain=llm_chain,
allowed_tools=tool_names,
output_parser=_output_parser,
**kwargs,
)
@property
def _agent_type(self) -> str:
raise ValueError
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/agents/chat/base.py",
"license": "MIT License",
"lines": 160,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/agents/chat/output_parser.py | import json
import re
from re import Pattern
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain_classic.agents.agent import AgentOutputParser
from langchain_classic.agents.chat.prompt import FORMAT_INSTRUCTIONS
FINAL_ANSWER_ACTION = "Final Answer:"
class ChatOutputParser(AgentOutputParser):
"""Output parser for the chat agent."""
format_instructions: str = FORMAT_INSTRUCTIONS
"""Default formatting instructions"""
pattern: Pattern = re.compile(r"^.*?`{3}(?:json)?\n(.*?)`{3}.*?$", re.DOTALL)
"""Regex pattern to parse the output."""
def get_format_instructions(self) -> str:
"""Returns formatting instructions for the given output parser."""
return self.format_instructions
def parse(self, text: str) -> AgentAction | AgentFinish:
"""Parse the output from the agent into an AgentAction or AgentFinish object.
Args:
text: The text to parse.
Returns:
An AgentAction or AgentFinish object.
Raises:
OutputParserException: If the output could not be parsed.
ValueError: If the action could not be found.
"""
includes_answer = FINAL_ANSWER_ACTION in text
try:
found = self.pattern.search(text)
if not found:
# Fast fail to parse Final Answer.
msg = "action not found"
raise ValueError(msg)
action = found.group(1)
response = json.loads(action.strip())
includes_action = "action" in response
if includes_answer and includes_action:
msg = (
"Parsing LLM output produced a final answer "
f"and a parse-able action: {text}"
)
raise OutputParserException(msg)
return AgentAction(
response["action"],
response.get("action_input", {}),
text,
)
except Exception as exc:
if not includes_answer:
msg = f"Could not parse LLM output: {text}"
raise OutputParserException(msg) from exc
output = text.rsplit(FINAL_ANSWER_ACTION, maxsplit=1)[-1].strip()
return AgentFinish({"output": output}, text)
@property
def _type(self) -> str:
return "chat"
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/agents/chat/output_parser.py",
"license": "MIT License",
"lines": 57,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/agents/conversational/base.py | """An agent designed to hold a conversation in addition to using tools."""
from __future__ import annotations
from collections.abc import Sequence
from typing import Any
from langchain_core._api import deprecated
from langchain_core.callbacks import BaseCallbackManager
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import PromptTemplate
from langchain_core.tools import BaseTool
from pydantic import Field
from typing_extensions import override
from langchain_classic._api.deprecation import AGENT_DEPRECATION_WARNING
from langchain_classic.agents.agent import Agent, AgentOutputParser
from langchain_classic.agents.agent_types import AgentType
from langchain_classic.agents.conversational.output_parser import ConvoOutputParser
from langchain_classic.agents.conversational.prompt import (
FORMAT_INSTRUCTIONS,
PREFIX,
SUFFIX,
)
from langchain_classic.agents.utils import validate_tools_single_input
from langchain_classic.chains import LLMChain
@deprecated(
"0.1.0",
message=AGENT_DEPRECATION_WARNING,
removal="1.0",
)
class ConversationalAgent(Agent):
"""An agent that holds a conversation in addition to using tools."""
ai_prefix: str = "AI"
"""Prefix to use before AI output."""
output_parser: AgentOutputParser = Field(default_factory=ConvoOutputParser)
"""Output parser for the agent."""
@classmethod
@override
def _get_default_output_parser(
cls,
ai_prefix: str = "AI",
**kwargs: Any,
) -> AgentOutputParser:
return ConvoOutputParser(ai_prefix=ai_prefix)
@property
def _agent_type(self) -> str:
"""Return Identifier of agent type."""
return AgentType.CONVERSATIONAL_REACT_DESCRIPTION
@property
def observation_prefix(self) -> str:
"""Prefix to append the observation with.
Returns:
"Observation: "
"""
return "Observation: "
@property
def llm_prefix(self) -> str:
"""Prefix to append the llm call with.
Returns:
"Thought: "
"""
return "Thought:"
@classmethod
def create_prompt(
cls,
tools: Sequence[BaseTool],
prefix: str = PREFIX,
suffix: str = SUFFIX,
format_instructions: str = FORMAT_INSTRUCTIONS,
ai_prefix: str = "AI",
human_prefix: str = "Human",
input_variables: list[str] | None = None,
) -> PromptTemplate:
"""Create prompt in the style of the zero-shot agent.
Args:
tools: List of tools the agent will have access to, used to format the
prompt.
prefix: String to put before the list of tools.
suffix: String to put after the list of tools.
format_instructions: Instructions on how to use the tools.
ai_prefix: String to use before AI output.
human_prefix: String to use before human output.
input_variables: List of input variables the final prompt will expect.
Defaults to `["input", "chat_history", "agent_scratchpad"]`.
Returns:
A PromptTemplate with the template assembled from the pieces here.
"""
tool_strings = "\n".join(
[f"> {tool.name}: {tool.description}" for tool in tools],
)
tool_names = ", ".join([tool.name for tool in tools])
format_instructions = format_instructions.format(
tool_names=tool_names,
ai_prefix=ai_prefix,
human_prefix=human_prefix,
)
template = f"{prefix}\n\n{tool_strings}\n\n{format_instructions}\n\n{suffix}"
if input_variables is None:
input_variables = ["input", "chat_history", "agent_scratchpad"]
return PromptTemplate(template=template, input_variables=input_variables)
@classmethod
def _validate_tools(cls, tools: Sequence[BaseTool]) -> None:
super()._validate_tools(tools)
validate_tools_single_input(cls.__name__, tools)
@classmethod
def from_llm_and_tools(
cls,
llm: BaseLanguageModel,
tools: Sequence[BaseTool],
callback_manager: BaseCallbackManager | None = None,
output_parser: AgentOutputParser | None = None,
prefix: str = PREFIX,
suffix: str = SUFFIX,
format_instructions: str = FORMAT_INSTRUCTIONS,
ai_prefix: str = "AI",
human_prefix: str = "Human",
input_variables: list[str] | None = None,
**kwargs: Any,
) -> Agent:
"""Construct an agent from an LLM and tools.
Args:
llm: The language model to use.
tools: A list of tools to use.
callback_manager: The callback manager to use.
output_parser: The output parser to use.
prefix: The prefix to use in the prompt.
suffix: The suffix to use in the prompt.
format_instructions: The format instructions to use.
ai_prefix: The prefix to use before AI output.
human_prefix: The prefix to use before human output.
input_variables: The input variables to use.
**kwargs: Any additional keyword arguments to pass to the agent.
Returns:
An agent.
"""
cls._validate_tools(tools)
prompt = cls.create_prompt(
tools,
ai_prefix=ai_prefix,
human_prefix=human_prefix,
prefix=prefix,
suffix=suffix,
format_instructions=format_instructions,
input_variables=input_variables,
)
llm_chain = LLMChain(
llm=llm,
prompt=prompt,
callback_manager=callback_manager,
)
tool_names = [tool.name for tool in tools]
_output_parser = output_parser or cls._get_default_output_parser(
ai_prefix=ai_prefix,
)
return cls(
llm_chain=llm_chain,
allowed_tools=tool_names,
ai_prefix=ai_prefix,
output_parser=_output_parser,
**kwargs,
)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/agents/conversational/base.py",
"license": "MIT License",
"lines": 158,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/agents/conversational/output_parser.py | import re
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain_classic.agents.agent import AgentOutputParser
from langchain_classic.agents.conversational.prompt import FORMAT_INSTRUCTIONS
class ConvoOutputParser(AgentOutputParser):
"""Output parser for the conversational agent."""
ai_prefix: str = "AI"
"""Prefix to use before AI output."""
format_instructions: str = FORMAT_INSTRUCTIONS
"""Default formatting instructions"""
def get_format_instructions(self) -> str:
"""Returns formatting instructions for the given output parser."""
return self.format_instructions
def parse(self, text: str) -> AgentAction | AgentFinish:
"""Parse the output from the agent into an AgentAction or AgentFinish object.
Args:
text: The text to parse.
Returns:
An AgentAction or AgentFinish object.
"""
if f"{self.ai_prefix}:" in text:
return AgentFinish(
{"output": text.rsplit(f"{self.ai_prefix}:", maxsplit=1)[-1].strip()},
text,
)
regex = r"Action: (.*?)[\n]*Action Input: ([\s\S]*)"
match = re.search(regex, text, re.DOTALL)
if not match:
msg = f"Could not parse LLM output: `{text}`"
raise OutputParserException(msg)
action = match.group(1)
action_input = match.group(2)
return AgentAction(action.strip(), action_input.strip(" ").strip('"'), text)
@property
def _type(self) -> str:
return "conversational"
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/agents/conversational/output_parser.py",
"license": "MIT License",
"lines": 37,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/agents/conversational_chat/base.py | """An agent designed to hold a conversation in addition to using tools."""
from __future__ import annotations
from collections.abc import Sequence
from typing import Any
from langchain_core._api import deprecated
from langchain_core.agents import AgentAction
from langchain_core.callbacks import BaseCallbackManager
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import AIMessage, BaseMessage, HumanMessage
from langchain_core.output_parsers import BaseOutputParser
from langchain_core.prompts import BasePromptTemplate
from langchain_core.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
MessagesPlaceholder,
SystemMessagePromptTemplate,
)
from langchain_core.tools import BaseTool
from pydantic import Field
from typing_extensions import override
from langchain_classic.agents.agent import Agent, AgentOutputParser
from langchain_classic.agents.conversational_chat.output_parser import ConvoOutputParser
from langchain_classic.agents.conversational_chat.prompt import (
PREFIX,
SUFFIX,
TEMPLATE_TOOL_RESPONSE,
)
from langchain_classic.agents.utils import validate_tools_single_input
from langchain_classic.chains import LLMChain
@deprecated("0.1.0", alternative="create_json_chat_agent", removal="1.0")
class ConversationalChatAgent(Agent):
"""An agent designed to hold a conversation in addition to using tools."""
output_parser: AgentOutputParser = Field(default_factory=ConvoOutputParser)
"""Output parser for the agent."""
template_tool_response: str = TEMPLATE_TOOL_RESPONSE
"""Template for the tool response."""
@classmethod
@override
def _get_default_output_parser(cls, **kwargs: Any) -> AgentOutputParser:
return ConvoOutputParser()
@property
def _agent_type(self) -> str:
raise NotImplementedError
@property
def observation_prefix(self) -> str:
"""Prefix to append the observation with.
Returns:
"Observation: "
"""
return "Observation: "
@property
def llm_prefix(self) -> str:
"""Prefix to append the llm call with.
Returns:
"Thought: "
"""
return "Thought:"
@classmethod
def _validate_tools(cls, tools: Sequence[BaseTool]) -> None:
super()._validate_tools(tools)
validate_tools_single_input(cls.__name__, tools)
@classmethod
def create_prompt(
cls,
tools: Sequence[BaseTool],
system_message: str = PREFIX,
human_message: str = SUFFIX,
input_variables: list[str] | None = None,
output_parser: BaseOutputParser | None = None,
) -> BasePromptTemplate:
"""Create a prompt for the agent.
Args:
tools: The tools to use.
system_message: The `SystemMessage` to use.
human_message: The `HumanMessage` to use.
input_variables: The input variables to use.
output_parser: The output parser to use.
Returns:
A `PromptTemplate`.
"""
tool_strings = "\n".join(
[f"> {tool.name}: {tool.description}" for tool in tools],
)
tool_names = ", ".join([tool.name for tool in tools])
_output_parser = output_parser or cls._get_default_output_parser()
format_instructions = human_message.format(
format_instructions=_output_parser.get_format_instructions(),
)
final_prompt = format_instructions.format(
tool_names=tool_names,
tools=tool_strings,
)
if input_variables is None:
input_variables = ["input", "chat_history", "agent_scratchpad"]
messages = [
SystemMessagePromptTemplate.from_template(system_message),
MessagesPlaceholder(variable_name="chat_history"),
HumanMessagePromptTemplate.from_template(final_prompt),
MessagesPlaceholder(variable_name="agent_scratchpad"),
]
return ChatPromptTemplate(input_variables=input_variables, messages=messages)
def _construct_scratchpad(
self,
intermediate_steps: list[tuple[AgentAction, str]],
) -> list[BaseMessage]:
"""Construct the scratchpad that lets the agent continue its thought process."""
thoughts: list[BaseMessage] = []
for action, observation in intermediate_steps:
thoughts.append(AIMessage(content=action.log))
human_message = HumanMessage(
content=self.template_tool_response.format(observation=observation),
)
thoughts.append(human_message)
return thoughts
@classmethod
def from_llm_and_tools(
cls,
llm: BaseLanguageModel,
tools: Sequence[BaseTool],
callback_manager: BaseCallbackManager | None = None,
output_parser: AgentOutputParser | None = None,
system_message: str = PREFIX,
human_message: str = SUFFIX,
input_variables: list[str] | None = None,
**kwargs: Any,
) -> Agent:
"""Construct an agent from an LLM and tools.
Args:
llm: The language model to use.
tools: A list of tools to use.
callback_manager: The callback manager to use.
output_parser: The output parser to use.
system_message: The `SystemMessage` to use.
human_message: The `HumanMessage` to use.
input_variables: The input variables to use.
**kwargs: Any additional arguments.
Returns:
An agent.
"""
cls._validate_tools(tools)
_output_parser = output_parser or cls._get_default_output_parser()
prompt = cls.create_prompt(
tools,
system_message=system_message,
human_message=human_message,
input_variables=input_variables,
output_parser=_output_parser,
)
llm_chain = LLMChain(
llm=llm,
prompt=prompt,
callback_manager=callback_manager,
)
tool_names = [tool.name for tool in tools]
return cls(
llm_chain=llm_chain,
allowed_tools=tool_names,
output_parser=_output_parser,
**kwargs,
)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/agents/conversational_chat/base.py",
"license": "MIT License",
"lines": 160,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/agents/conversational_chat/output_parser.py | from __future__ import annotations
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain_core.utils.json import parse_json_markdown
from langchain_classic.agents import AgentOutputParser
from langchain_classic.agents.conversational_chat.prompt import FORMAT_INSTRUCTIONS
# Define a class that parses output for conversational agents
class ConvoOutputParser(AgentOutputParser):
"""Output parser for the conversational agent."""
format_instructions: str = FORMAT_INSTRUCTIONS
"""Default formatting instructions"""
def get_format_instructions(self) -> str:
"""Returns formatting instructions for the given output parser."""
return self.format_instructions
def parse(self, text: str) -> AgentAction | AgentFinish:
"""Attempts to parse the given text into an AgentAction or AgentFinish.
Raises:
OutputParserException if parsing fails.
"""
try:
# Attempt to parse the text into a structured format (assumed to be JSON
# stored as markdown)
response = parse_json_markdown(text)
# If the response contains an 'action' and 'action_input'
if "action" in response and "action_input" in response:
action, action_input = response["action"], response["action_input"]
# If the action indicates a final answer, return an AgentFinish
if action == "Final Answer":
return AgentFinish({"output": action_input}, text)
# Otherwise, return an AgentAction with the specified action and
# input
return AgentAction(action, action_input, text)
# If the necessary keys aren't present in the response, raise an
# exception
msg = f"Missing 'action' or 'action_input' in LLM output: {text}"
raise OutputParserException(msg)
except Exception as e:
# If any other exception is raised during parsing, also raise an
# OutputParserException
msg = f"Could not parse LLM output: {text}"
raise OutputParserException(msg) from e
@property
def _type(self) -> str:
return "conversational_chat"
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/agents/conversational_chat/output_parser.py",
"license": "MIT License",
"lines": 44,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langchain:libs/langchain/langchain_classic/agents/format_scratchpad/tools.py | import json
import logging
from collections.abc import Sequence
from typing import Any
from langchain_core.agents import AgentAction
from langchain_core.messages import (
AIMessage,
BaseMessage,
ToolMessage,
)
from langchain_classic.agents.output_parsers.tools import ToolAgentAction
_logger = logging.getLogger(__name__)
def _create_tool_message(
agent_action: ToolAgentAction,
observation: Any,
) -> ToolMessage:
"""Convert agent action and observation into a tool message.
Args:
agent_action: the tool invocation request from the agent.
observation: the result of the tool invocation.
Returns:
ToolMessage that corresponds to the original tool invocation.
Raises:
ValueError: if the observation cannot be converted to a string.
"""
if not isinstance(observation, str):
try:
content = json.dumps(observation, ensure_ascii=False)
except TypeError:
content = str(observation)
except Exception:
_logger.exception("Unexpected error converting observation to string.")
content = str(observation)
else:
content = observation
return ToolMessage(
tool_call_id=agent_action.tool_call_id,
content=content,
additional_kwargs={"name": agent_action.tool},
)
def format_to_tool_messages(
intermediate_steps: Sequence[tuple[AgentAction, str]],
) -> list[BaseMessage]:
"""Convert (AgentAction, tool output) tuples into `ToolMessage` objects.
Args:
intermediate_steps: Steps the LLM has taken to date, along with observations.
Returns:
list of messages to send to the LLM for the next prediction.
"""
messages = []
for agent_action, observation in intermediate_steps:
if isinstance(agent_action, ToolAgentAction):
new_messages = [
*list(agent_action.message_log),
_create_tool_message(agent_action, observation),
]
messages.extend([new for new in new_messages if new not in messages])
else:
messages.append(AIMessage(content=agent_action.log))
return messages
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/agents/format_scratchpad/tools.py",
"license": "MIT License",
"lines": 60,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/agents/json_chat/base.py | from collections.abc import Sequence
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts.chat import ChatPromptTemplate
from langchain_core.runnables import Runnable, RunnablePassthrough
from langchain_core.tools import BaseTool
from langchain_core.tools.render import ToolsRenderer, render_text_description
from langchain_classic.agents.format_scratchpad import format_log_to_messages
from langchain_classic.agents.json_chat.prompt import TEMPLATE_TOOL_RESPONSE
from langchain_classic.agents.output_parsers import JSONAgentOutputParser
def create_json_chat_agent(
llm: BaseLanguageModel,
tools: Sequence[BaseTool],
prompt: ChatPromptTemplate,
stop_sequence: bool | list[str] = True, # noqa: FBT001,FBT002
tools_renderer: ToolsRenderer = render_text_description,
template_tool_response: str = TEMPLATE_TOOL_RESPONSE,
) -> Runnable:
r"""Create an agent that uses JSON to format its logic, build for Chat Models.
Args:
llm: LLM to use as the agent.
tools: Tools this agent has access to.
prompt: The prompt to use. See Prompt section below for more.
stop_sequence: bool or list of str.
If `True`, adds a stop token of "Observation:" to avoid hallucinates.
If `False`, does not add a stop token.
If a list of str, uses the provided list as the stop tokens.
You may to set this to False if the LLM you are using does not support stop
sequences.
tools_renderer: This controls how the tools are converted into a string and
then passed into the LLM.
template_tool_response: Template prompt that uses the tool response
(observation) to make the LLM generate the next action to take.
Returns:
A Runnable sequence representing an agent. It takes as input all the same input
variables as the prompt passed in does. It returns as output either an
AgentAction or AgentFinish.
Raises:
ValueError: If the prompt is missing required variables.
ValueError: If the template_tool_response is missing
the required variable 'observation'.
Example:
```python
from langchain_classic import hub
from langchain_openai import ChatOpenAI
from langchain_classic.agents import AgentExecutor, create_json_chat_agent
prompt = hub.pull("hwchase17/react-chat-json")
model = ChatOpenAI()
tools = ...
agent = create_json_chat_agent(model, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools)
agent_executor.invoke({"input": "hi"})
# Using with chat history
from langchain_core.messages import AIMessage, HumanMessage
agent_executor.invoke(
{
"input": "what's my name?",
"chat_history": [
HumanMessage(content="hi! my name is bob"),
AIMessage(content="Hello Bob! How can I assist you today?"),
],
}
)
```
Prompt:
The prompt must have input keys:
* `tools`: contains descriptions and arguments for each tool.
* `tool_names`: contains all tool names.
* `agent_scratchpad`: must be a MessagesPlaceholder. Contains previous
agent actions and tool outputs as messages.
Here's an example:
```python
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
system = '''Assistant is a large language model trained by OpenAI.
Assistant is designed to be able to assist with a wide range of tasks, from answering
simple questions to providing in-depth explanations and discussions on a wide range of
topics. As a language model, Assistant is able to generate human-like text based on
the input it receives, allowing it to engage in natural-sounding conversations and
provide responses that are coherent and relevant to the topic at hand.
Assistant is constantly learning and improving, and its capabilities are constantly
evolving. It is able to process and understand large amounts of text, and can use this
knowledge to provide accurate and informative responses to a wide range of questions.
Additionally, Assistant is able to generate its own text based on the input it
receives, allowing it to engage in discussions and provide explanations and
descriptions on a wide range of topics.
Overall, Assistant is a powerful system that can help with a wide range of tasks
and provide valuable insights and information on a wide range of topics. Whether
you need help with a specific question or just want to have a conversation about
a particular topic, Assistant is here to assist.'''
human = '''TOOLS
------
Assistant can ask the user to use tools to look up information that may be helpful in
answering the users original question. The tools the human can use are:
{tools}
RESPONSE FORMAT INSTRUCTIONS
----------------------------
When responding to me, please output a response in one of two formats:
**Option 1:**
Use this if you want the human to use a tool.
Markdown code snippet formatted in the following schema:
```json
{{
"action": string, \\\\ The action to take. Must be one of {tool_names}
"action_input": string \\\\ The input to the action
}}
```
**Option #2:**
Use this if you want to respond directly to the human. Markdown code snippet formatted
in the following schema:
```json
{{
"action": "Final Answer",
"action_input": string \\\\ You should put what you want to return to use here
}}
```
USER'S INPUT
--------------------
Here is the user's input (remember to respond with a markdown code snippet of a json
blob with a single action, and NOTHING else):
{input}'''
prompt = ChatPromptTemplate.from_messages(
[
("system", system),
MessagesPlaceholder("chat_history", optional=True),
("human", human),
MessagesPlaceholder("agent_scratchpad"),
]
)
```
""" # noqa: E501
missing_vars = {"tools", "tool_names", "agent_scratchpad"}.difference(
prompt.input_variables + list(prompt.partial_variables),
)
if missing_vars:
msg = f"Prompt missing required variables: {missing_vars}"
raise ValueError(msg)
if "{observation}" not in template_tool_response:
msg = "Template tool response missing required variable 'observation'"
raise ValueError(msg)
prompt = prompt.partial(
tools=tools_renderer(list(tools)),
tool_names=", ".join([t.name for t in tools]),
)
if stop_sequence:
stop = ["\nObservation"] if stop_sequence is True else stop_sequence
llm_to_use = llm.bind(stop=stop)
else:
llm_to_use = llm
return (
RunnablePassthrough.assign(
agent_scratchpad=lambda x: format_log_to_messages(
x["intermediate_steps"],
template_tool_response=template_tool_response,
),
)
| prompt
| llm_to_use
| JSONAgentOutputParser()
)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/agents/json_chat/base.py",
"license": "MIT License",
"lines": 158,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
langchain-ai/langchain:libs/langchain/langchain_classic/agents/loading.py | """Functionality for loading agents."""
import json
import logging
from pathlib import Path
from typing import Any
import yaml
from langchain_core._api import deprecated
from langchain_core.language_models import BaseLanguageModel
from langchain_core.tools import Tool
from langchain_classic.agents.agent import BaseMultiActionAgent, BaseSingleActionAgent
from langchain_classic.agents.types import AGENT_TO_CLASS
from langchain_classic.chains.loading import load_chain, load_chain_from_config
logger = logging.getLogger(__name__)
URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/agents/"
def _load_agent_from_tools(
config: dict,
llm: BaseLanguageModel,
tools: list[Tool],
**kwargs: Any,
) -> BaseSingleActionAgent | BaseMultiActionAgent:
config_type = config.pop("_type")
if config_type not in AGENT_TO_CLASS:
msg = f"Loading {config_type} agent not supported"
raise ValueError(msg)
agent_cls = AGENT_TO_CLASS[config_type]
combined_config = {**config, **kwargs}
return agent_cls.from_llm_and_tools(llm, tools, **combined_config)
@deprecated("0.1.0", removal="1.0")
def load_agent_from_config(
config: dict,
llm: BaseLanguageModel | None = None,
tools: list[Tool] | None = None,
**kwargs: Any,
) -> BaseSingleActionAgent | BaseMultiActionAgent:
"""Load agent from Config Dict.
Args:
config: Config dict to load agent from.
llm: Language model to use as the agent.
tools: List of tools this agent has access to.
kwargs: Additional keyword arguments passed to the agent executor.
Returns:
An agent executor.
Raises:
ValueError: If agent type is not specified in the config.
"""
if "_type" not in config:
msg = "Must specify an agent Type in config"
raise ValueError(msg)
load_from_tools = config.pop("load_from_llm_and_tools", False)
if load_from_tools:
if llm is None:
msg = (
"If `load_from_llm_and_tools` is set to True, then LLM must be provided"
)
raise ValueError(msg)
if tools is None:
msg = (
"If `load_from_llm_and_tools` is set to True, "
"then tools must be provided"
)
raise ValueError(msg)
return _load_agent_from_tools(config, llm, tools, **kwargs)
config_type = config.pop("_type")
if config_type not in AGENT_TO_CLASS:
msg = f"Loading {config_type} agent not supported"
raise ValueError(msg)
agent_cls = AGENT_TO_CLASS[config_type]
if "llm_chain" in config:
config["llm_chain"] = load_chain_from_config(config.pop("llm_chain"))
elif "llm_chain_path" in config:
config["llm_chain"] = load_chain(config.pop("llm_chain_path"))
else:
msg = "One of `llm_chain` and `llm_chain_path` should be specified."
raise ValueError(msg)
if "output_parser" in config:
logger.warning(
"Currently loading output parsers on agent is not supported, "
"will just use the default one.",
)
del config["output_parser"]
combined_config = {**config, **kwargs}
return agent_cls(**combined_config)
@deprecated("0.1.0", removal="1.0")
def load_agent(
path: str | Path,
**kwargs: Any,
) -> BaseSingleActionAgent | BaseMultiActionAgent:
"""Unified method for loading an agent from LangChainHub or local fs.
Args:
path: Path to the agent file.
kwargs: Additional keyword arguments passed to the agent executor.
Returns:
An agent executor.
Raises:
RuntimeError: If loading from the deprecated github-based
Hub is attempted.
"""
if isinstance(path, str) and path.startswith("lc://"):
msg = (
"Loading from the deprecated github-based Hub is no longer supported. "
"Please use the new LangChain Hub at https://smith.langchain.com/hub "
"instead."
)
raise RuntimeError(msg)
return _load_agent_from_file(path, **kwargs)
def _load_agent_from_file(
file: str | Path,
**kwargs: Any,
) -> BaseSingleActionAgent | BaseMultiActionAgent:
"""Load agent from file."""
valid_suffixes = {"json", "yaml"}
# Convert file to Path object.
file_path = Path(file) if isinstance(file, str) else file
# Load from either json or yaml.
if file_path.suffix[1:] == "json":
with file_path.open() as f:
config = json.load(f)
elif file_path.suffix[1:] == "yaml":
with file_path.open() as f:
config = yaml.safe_load(f)
else:
msg = f"Unsupported file type, must be one of {valid_suffixes}."
raise ValueError(msg)
# Load the agent from the config now.
return load_agent_from_config(config, **kwargs)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/agents/loading.py",
"license": "MIT License",
"lines": 125,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langchain:libs/langchain/langchain_classic/agents/mrkl/base.py | """Attempt to implement MRKL systems as described in arxiv.org/pdf/2205.00445.pdf."""
from __future__ import annotations
from collections.abc import Callable, Sequence
from typing import Any, NamedTuple
from langchain_core._api import deprecated
from langchain_core.callbacks import BaseCallbackManager
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import PromptTemplate
from langchain_core.tools import BaseTool, Tool
from langchain_core.tools.render import render_text_description
from pydantic import Field
from typing_extensions import override
from langchain_classic._api.deprecation import AGENT_DEPRECATION_WARNING
from langchain_classic.agents.agent import Agent, AgentExecutor, AgentOutputParser
from langchain_classic.agents.agent_types import AgentType
from langchain_classic.agents.mrkl.output_parser import MRKLOutputParser
from langchain_classic.agents.mrkl.prompt import FORMAT_INSTRUCTIONS, PREFIX, SUFFIX
from langchain_classic.agents.utils import validate_tools_single_input
from langchain_classic.chains import LLMChain
class ChainConfig(NamedTuple):
"""Configuration for a chain to use in MRKL system.
Args:
action_name: Name of the action.
action: Action function to call.
action_description: Description of the action.
"""
action_name: str
action: Callable
action_description: str
@deprecated(
"0.1.0",
message=AGENT_DEPRECATION_WARNING,
removal="1.0",
)
class ZeroShotAgent(Agent):
"""Agent for the MRKL chain.
Args:
output_parser: Output parser for the agent.
"""
output_parser: AgentOutputParser = Field(default_factory=MRKLOutputParser)
@classmethod
@override
def _get_default_output_parser(cls, **kwargs: Any) -> AgentOutputParser:
return MRKLOutputParser()
@property
def _agent_type(self) -> str:
"""Return Identifier of agent type."""
return AgentType.ZERO_SHOT_REACT_DESCRIPTION
@property
def observation_prefix(self) -> str:
"""Prefix to append the observation with.
Returns:
"Observation: "
"""
return "Observation: "
@property
def llm_prefix(self) -> str:
"""Prefix to append the llm call with.
Returns:
"Thought: "
"""
return "Thought:"
@classmethod
def create_prompt(
cls,
tools: Sequence[BaseTool],
prefix: str = PREFIX,
suffix: str = SUFFIX,
format_instructions: str = FORMAT_INSTRUCTIONS,
input_variables: list[str] | None = None,
) -> PromptTemplate:
"""Create prompt in the style of the zero shot agent.
Args:
tools: List of tools the agent will have access to, used to format the
prompt.
prefix: String to put before the list of tools.
suffix: String to put after the list of tools.
format_instructions: Instructions on how to use the tools.
input_variables: List of input variables the final prompt will expect.
Returns:
A PromptTemplate with the template assembled from the pieces here.
"""
tool_strings = render_text_description(list(tools))
tool_names = ", ".join([tool.name for tool in tools])
format_instructions = format_instructions.format(tool_names=tool_names)
template = f"{prefix}\n\n{tool_strings}\n\n{format_instructions}\n\n{suffix}"
if input_variables:
return PromptTemplate(template=template, input_variables=input_variables)
return PromptTemplate.from_template(template)
@classmethod
def from_llm_and_tools(
cls,
llm: BaseLanguageModel,
tools: Sequence[BaseTool],
callback_manager: BaseCallbackManager | None = None,
output_parser: AgentOutputParser | None = None,
prefix: str = PREFIX,
suffix: str = SUFFIX,
format_instructions: str = FORMAT_INSTRUCTIONS,
input_variables: list[str] | None = None,
**kwargs: Any,
) -> Agent:
"""Construct an agent from an LLM and tools.
Args:
llm: The LLM to use as the agent LLM.
tools: The tools to use.
callback_manager: The callback manager to use.
output_parser: The output parser to use.
prefix: The prefix to use.
suffix: The suffix to use.
format_instructions: The format instructions to use.
input_variables: The input variables to use.
kwargs: Additional parameters to pass to the agent.
"""
cls._validate_tools(tools)
prompt = cls.create_prompt(
tools,
prefix=prefix,
suffix=suffix,
format_instructions=format_instructions,
input_variables=input_variables,
)
llm_chain = LLMChain(
llm=llm,
prompt=prompt,
callback_manager=callback_manager,
)
tool_names = [tool.name for tool in tools]
_output_parser = output_parser or cls._get_default_output_parser()
return cls(
llm_chain=llm_chain,
allowed_tools=tool_names,
output_parser=_output_parser,
**kwargs,
)
@classmethod
def _validate_tools(cls, tools: Sequence[BaseTool]) -> None:
validate_tools_single_input(cls.__name__, tools)
if len(tools) == 0:
msg = (
f"Got no tools for {cls.__name__}. At least one tool must be provided."
)
raise ValueError(msg)
for tool in tools:
if tool.description is None:
msg = ( # type: ignore[unreachable]
f"Got a tool {tool.name} without a description. For this agent, "
f"a description must always be provided."
)
raise ValueError(msg)
super()._validate_tools(tools)
@deprecated(
"0.1.0",
message=AGENT_DEPRECATION_WARNING,
removal="1.0",
)
class MRKLChain(AgentExecutor):
"""Chain that implements the MRKL system."""
@classmethod
def from_chains(
cls,
llm: BaseLanguageModel,
chains: list[ChainConfig],
**kwargs: Any,
) -> AgentExecutor:
"""User-friendly way to initialize the MRKL chain.
This is intended to be an easy way to get up and running with the
MRKL chain.
Args:
llm: The LLM to use as the agent LLM.
chains: The chains the MRKL system has access to.
**kwargs: parameters to be passed to initialization.
Returns:
An initialized MRKL chain.
"""
tools = [
Tool(
name=c.action_name,
func=c.action,
description=c.action_description,
)
for c in chains
]
agent = ZeroShotAgent.from_llm_and_tools(llm, tools)
return cls(agent=agent, tools=tools, **kwargs)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/agents/mrkl/base.py",
"license": "MIT License",
"lines": 185,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langchain:libs/langchain/langchain_classic/agents/openai_assistant/base.py | from __future__ import annotations
import asyncio
import json
from collections.abc import Callable, Sequence
from json import JSONDecodeError
from time import sleep
from typing import (
TYPE_CHECKING,
Any,
)
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.callbacks import CallbackManager
from langchain_core.load import dumpd
from langchain_core.runnables import RunnableConfig, RunnableSerializable, ensure_config
from langchain_core.tools import BaseTool
from langchain_core.utils.function_calling import convert_to_openai_tool
from pydantic import BaseModel, Field, model_validator
from typing_extensions import Self, override
if TYPE_CHECKING:
import openai
from openai.types.beta.threads import ThreadMessage
from openai.types.beta.threads.required_action_function_tool_call import (
RequiredActionFunctionToolCall,
)
class OpenAIAssistantFinish(AgentFinish):
"""AgentFinish with run and thread metadata.
Args:
run_id: Run id.
thread_id: Thread id.
"""
run_id: str
thread_id: str
@classmethod
def is_lc_serializable(cls) -> bool:
"""Check if the class is serializable by LangChain.
Returns:
False
"""
return False
class OpenAIAssistantAction(AgentAction):
"""AgentAction with info needed to submit custom tool output to existing run.
Args:
tool_call_id: Tool call id.
run_id: Run id.
thread_id: Thread id
"""
tool_call_id: str
run_id: str
thread_id: str
@classmethod
def is_lc_serializable(cls) -> bool:
"""Check if the class is serializable by LangChain.
Returns:
False
"""
return False
def _get_openai_client() -> openai.OpenAI:
try:
import openai
return openai.OpenAI()
except ImportError as e:
msg = "Unable to import openai, please install with `pip install openai`."
raise ImportError(msg) from e
except AttributeError as e:
msg = (
"Please make sure you are using a v1.1-compatible version of openai. You "
'can install with `pip install "openai>=1.1"`.'
)
raise AttributeError(msg) from e
def _get_openai_async_client() -> openai.AsyncOpenAI:
try:
import openai
return openai.AsyncOpenAI()
except ImportError as e:
msg = "Unable to import openai, please install with `pip install openai`."
raise ImportError(msg) from e
except AttributeError as e:
msg = (
"Please make sure you are using a v1.1-compatible version of openai. You "
'can install with `pip install "openai>=1.1"`.'
)
raise AttributeError(msg) from e
def _is_assistants_builtin_tool(
tool: dict[str, Any] | type[BaseModel] | Callable | BaseTool,
) -> bool:
"""Determine if tool corresponds to OpenAI Assistants built-in."""
assistants_builtin_tools = ("code_interpreter", "file_search")
return (
isinstance(tool, dict)
and ("type" in tool)
and (tool["type"] in assistants_builtin_tools)
)
def _get_assistants_tool(
tool: dict[str, Any] | type[BaseModel] | Callable | BaseTool,
) -> dict[str, Any]:
"""Convert a raw function/class to an OpenAI tool.
Note that OpenAI assistants supports several built-in tools,
such as "code_interpreter" and "file_search".
"""
if _is_assistants_builtin_tool(tool):
return tool # type: ignore[return-value]
return convert_to_openai_tool(tool)
OutputType = (
list[OpenAIAssistantAction]
| OpenAIAssistantFinish
| list["ThreadMessage"]
| list["RequiredActionFunctionToolCall"]
)
class OpenAIAssistantRunnable(RunnableSerializable[dict, OutputType]):
"""Run an OpenAI Assistant.
Example using OpenAI tools:
```python
from langchain_experimental.openai_assistant import OpenAIAssistantRunnable
interpreter_assistant = OpenAIAssistantRunnable.create_assistant(
name="langchain assistant",
instructions="You are a personal math tutor. "
"Write and run code to answer math questions.",
tools=[{"type": "code_interpreter"}],
model="gpt-4-1106-preview",
)
output = interpreter_assistant.invoke(
{"content": "What's 10 - 4 raised to the 2.7"}
)
```
Example using custom tools and AgentExecutor:
```python
from langchain_experimental.openai_assistant import OpenAIAssistantRunnable
from langchain_classic.agents import AgentExecutor
from langchain_classic.tools import E2BDataAnalysisTool
tools = [E2BDataAnalysisTool(api_key="...")]
agent = OpenAIAssistantRunnable.create_assistant(
name="langchain assistant e2b tool",
instructions="You are a personal math tutor. "
"Write and run code to answer math questions.",
tools=tools,
model="gpt-4-1106-preview",
as_agent=True,
)
agent_executor = AgentExecutor(agent=agent, tools=tools)
agent_executor.invoke({"content": "What's 10 - 4 raised to the 2.7"})
```
Example using custom tools and custom execution:
```python
from langchain_experimental.openai_assistant import OpenAIAssistantRunnable
from langchain_classic.agents import AgentExecutor
from langchain_core.agents import AgentFinish
from langchain_classic.tools import E2BDataAnalysisTool
tools = [E2BDataAnalysisTool(api_key="...")]
agent = OpenAIAssistantRunnable.create_assistant(
name="langchain assistant e2b tool",
instructions="You are a personal math tutor. "
"Write and run code to answer math questions.",
tools=tools,
model="gpt-4-1106-preview",
as_agent=True,
)
def execute_agent(agent, tools, input):
tool_map = {tool.name: tool for tool in tools}
response = agent.invoke(input)
while not isinstance(response, AgentFinish):
tool_outputs = []
for action in response:
tool_output = tool_map[action.tool].invoke(action.tool_input)
tool_outputs.append(
{
"output": tool_output,
"tool_call_id": action.tool_call_id,
}
)
response = agent.invoke(
{
"tool_outputs": tool_outputs,
"run_id": action.run_id,
"thread_id": action.thread_id,
}
)
return response
response = execute_agent(
agent, tools, {"content": "What's 10 - 4 raised to the 2.7"}
)
next_response = execute_agent(
agent,
tools,
{"content": "now add 17.241", "thread_id": response.thread_id},
)
```
"""
client: Any = Field(default_factory=_get_openai_client)
"""`OpenAI` or `AzureOpenAI` client."""
async_client: Any = None
"""`OpenAI` or `AzureOpenAI` async client."""
assistant_id: str
"""OpenAI assistant id."""
check_every_ms: float = 1_000.0
"""Frequency with which to check run progress in ms."""
as_agent: bool = False
"""Use as a LangChain agent, compatible with the `AgentExecutor`."""
@model_validator(mode="after")
def _validate_async_client(self) -> Self:
if self.async_client is None:
import openai
api_key = self.client.api_key
self.async_client = openai.AsyncOpenAI(api_key=api_key)
return self
@classmethod
def create_assistant(
cls,
name: str,
instructions: str,
tools: Sequence[BaseTool | dict],
model: str,
*,
client: openai.OpenAI | openai.AzureOpenAI | None = None,
**kwargs: Any,
) -> OpenAIAssistantRunnable:
"""Create an OpenAI Assistant and instantiate the Runnable.
Args:
name: Assistant name.
instructions: Assistant instructions.
tools: Assistant tools. Can be passed in OpenAI format or as BaseTools.
model: Assistant model to use.
client: OpenAI or AzureOpenAI client.
Will create a default OpenAI client if not specified.
kwargs: Additional arguments.
Returns:
OpenAIAssistantRunnable configured to run using the created assistant.
"""
client = client or _get_openai_client()
assistant = client.beta.assistants.create(
name=name,
instructions=instructions,
tools=[_get_assistants_tool(tool) for tool in tools],
model=model,
)
return cls(assistant_id=assistant.id, client=client, **kwargs)
@override
def invoke(
self,
input: dict,
config: RunnableConfig | None = None,
**kwargs: Any,
) -> OutputType:
"""Invoke assistant.
Args:
input: Runnable input dict that can have:
content: User message when starting a new run.
thread_id: Existing thread to use.
run_id: Existing run to use. Should only be supplied when providing
the tool output for a required action after an initial invocation.
message_metadata: Metadata to associate with new message.
thread_metadata: Metadata to associate with new thread. Only relevant
when new thread being created.
instructions: Additional run instructions.
model: Override Assistant model for this run.
tools: Override Assistant tools for this run.
parallel_tool_calls: Allow Assistant to set parallel_tool_calls
for this run.
top_p: Override Assistant top_p for this run.
temperature: Override Assistant temperature for this run.
max_completion_tokens: Allow setting max_completion_tokens for this run.
max_prompt_tokens: Allow setting max_prompt_tokens for this run.
run_metadata: Metadata to associate with new run.
attachments: A list of files attached to the message, and the
tools they should be added to.
config: Runnable config.
**kwargs: Additional arguments.
Returns:
If self.as_agent, will return
Union[List[OpenAIAssistantAction], OpenAIAssistantFinish].
Otherwise, will return OpenAI types
Union[List[ThreadMessage], List[RequiredActionFunctionToolCall]].
"""
config = ensure_config(config)
callback_manager = CallbackManager.configure(
inheritable_callbacks=config.get("callbacks"),
inheritable_tags=config.get("tags"),
inheritable_metadata=config.get("metadata"),
)
run_manager = callback_manager.on_chain_start(
dumpd(self),
input,
name=config.get("run_name") or self.get_name(),
)
try:
# Being run within AgentExecutor and there are tool outputs to submit.
if self.as_agent and input.get("intermediate_steps"):
tool_outputs = self._parse_intermediate_steps(
input["intermediate_steps"],
)
run = self.client.beta.threads.runs.submit_tool_outputs(**tool_outputs)
# Starting a new thread and a new run.
elif "thread_id" not in input:
thread = {
"messages": [
{
"role": "user",
"content": input["content"],
"metadata": input.get("message_metadata"),
"attachments": input.get("attachments"),
},
],
"metadata": input.get("thread_metadata"),
}
run = self._create_thread_and_run(input, thread)
# Starting a new run in an existing thread.
elif "run_id" not in input:
_ = self.client.beta.threads.messages.create(
input["thread_id"],
content=input["content"],
role="user",
metadata=input.get("message_metadata"),
)
run = self._create_run(input)
# Submitting tool outputs to an existing run, outside the AgentExecutor
# framework.
else:
run = self.client.beta.threads.runs.submit_tool_outputs(**input)
run = self._wait_for_run(run.id, run.thread_id)
except BaseException as e:
run_manager.on_chain_error(e)
raise
try:
# Use sync response handler in sync invoke
response = self._get_response(run)
except BaseException as e:
run_manager.on_chain_error(e, metadata=run.dict())
raise
else:
run_manager.on_chain_end(response)
return response
@classmethod
async def acreate_assistant(
cls,
name: str,
instructions: str,
tools: Sequence[BaseTool | dict],
model: str,
*,
async_client: openai.AsyncOpenAI | openai.AsyncAzureOpenAI | None = None,
**kwargs: Any,
) -> OpenAIAssistantRunnable:
"""Async create an AsyncOpenAI Assistant and instantiate the Runnable.
Args:
name: Assistant name.
instructions: Assistant instructions.
tools: Assistant tools. Can be passed in OpenAI format or as BaseTools.
model: Assistant model to use.
async_client: AsyncOpenAI client.
Will create default async_client if not specified.
**kwargs: Additional arguments.
Returns:
AsyncOpenAIAssistantRunnable configured to run using the created assistant.
"""
async_client = async_client or _get_openai_async_client()
openai_tools = [_get_assistants_tool(tool) for tool in tools]
assistant = await async_client.beta.assistants.create(
name=name,
instructions=instructions,
tools=openai_tools,
model=model,
)
return cls(assistant_id=assistant.id, async_client=async_client, **kwargs)
@override
async def ainvoke(
self,
input: dict,
config: RunnableConfig | None = None,
**kwargs: Any,
) -> OutputType:
"""Async invoke assistant.
Args:
input: Runnable input dict that can have:
content: User message when starting a new run.
thread_id: Existing thread to use.
run_id: Existing run to use. Should only be supplied when providing
the tool output for a required action after an initial invocation.
message_metadata: Metadata to associate with a new message.
thread_metadata: Metadata to associate with new thread. Only relevant
when a new thread is created.
instructions: Overrides the instructions of the assistant.
additional_instructions: Appends additional instructions.
model: Override Assistant model for this run.
tools: Override Assistant tools for this run.
parallel_tool_calls: Allow Assistant to set parallel_tool_calls
for this run.
top_p: Override Assistant top_p for this run.
temperature: Override Assistant temperature for this run.
max_completion_tokens: Allow setting max_completion_tokens for this run.
max_prompt_tokens: Allow setting max_prompt_tokens for this run.
run_metadata: Metadata to associate with new run.
config: Runnable config.
kwargs: Additional arguments.
Returns:
If self.as_agent, will return
Union[List[OpenAIAssistantAction], OpenAIAssistantFinish].
Otherwise, will return OpenAI types
Union[List[ThreadMessage], List[RequiredActionFunctionToolCall]].
"""
config = config or {}
callback_manager = CallbackManager.configure(
inheritable_callbacks=config.get("callbacks"),
inheritable_tags=config.get("tags"),
inheritable_metadata=config.get("metadata"),
)
run_manager = callback_manager.on_chain_start(
dumpd(self),
input,
name=config.get("run_name") or self.get_name(),
)
try:
# Being run within AgentExecutor and there are tool outputs to submit.
if self.as_agent and input.get("intermediate_steps"):
tool_outputs = await self._aparse_intermediate_steps(
input["intermediate_steps"],
)
run = await self.async_client.beta.threads.runs.submit_tool_outputs(
**tool_outputs,
)
# Starting a new thread and a new run.
elif "thread_id" not in input:
thread = {
"messages": [
{
"role": "user",
"content": input["content"],
"metadata": input.get("message_metadata"),
},
],
"metadata": input.get("thread_metadata"),
}
run = await self._acreate_thread_and_run(input, thread)
# Starting a new run in an existing thread.
elif "run_id" not in input:
_ = await self.async_client.beta.threads.messages.create(
input["thread_id"],
content=input["content"],
role="user",
metadata=input.get("message_metadata"),
)
run = await self._acreate_run(input)
# Submitting tool outputs to an existing run, outside the AgentExecutor
# framework.
else:
run = await self.async_client.beta.threads.runs.submit_tool_outputs(
**input,
)
run = await self._await_for_run(run.id, run.thread_id)
except BaseException as e:
run_manager.on_chain_error(e)
raise
try:
# Use async response handler in async ainvoke
response = await self._aget_response(run)
except BaseException as e:
run_manager.on_chain_error(e, metadata=run.dict())
raise
else:
run_manager.on_chain_end(response)
return response
def _parse_intermediate_steps(
self,
intermediate_steps: list[tuple[OpenAIAssistantAction, str]],
) -> dict:
last_action, _ = intermediate_steps[-1]
run = self._wait_for_run(last_action.run_id, last_action.thread_id)
required_tool_call_ids = set()
if run.required_action:
required_tool_call_ids = {
tc.id for tc in run.required_action.submit_tool_outputs.tool_calls
}
tool_outputs = [
{"output": str(output), "tool_call_id": action.tool_call_id}
for action, output in intermediate_steps
if action.tool_call_id in required_tool_call_ids
]
return {
"tool_outputs": tool_outputs,
"run_id": last_action.run_id,
"thread_id": last_action.thread_id,
}
def _create_run(self, input_dict: dict) -> Any:
params = {
k: v
for k, v in input_dict.items()
if k
in (
"instructions",
"model",
"tools",
"additional_instructions",
"parallel_tool_calls",
"top_p",
"temperature",
"max_completion_tokens",
"max_prompt_tokens",
"run_metadata",
)
}
return self.client.beta.threads.runs.create(
input_dict["thread_id"],
assistant_id=self.assistant_id,
**params,
)
def _create_thread_and_run(self, input_dict: dict, thread: dict) -> Any:
params = {
k: v
for k, v in input_dict.items()
if k
in (
"instructions",
"model",
"tools",
"parallel_tool_calls",
"top_p",
"temperature",
"max_completion_tokens",
"max_prompt_tokens",
"run_metadata",
)
}
return self.client.beta.threads.create_and_run(
assistant_id=self.assistant_id,
thread=thread,
**params,
)
def _get_response(self, run: Any) -> Any:
# TODO: Pagination
if run.status == "completed":
import openai
major_version = int(openai.version.VERSION.split(".")[0])
minor_version = int(openai.version.VERSION.split(".")[1])
version_gte_1_14 = (major_version > 1) or (
major_version == 1 and minor_version >= 14 # noqa: PLR2004
)
messages = self.client.beta.threads.messages.list(
run.thread_id,
order="asc",
)
new_messages = [msg for msg in messages if msg.run_id == run.id]
if not self.as_agent:
return new_messages
answer: Any = [
msg_content for msg in new_messages for msg_content in msg.content
]
attachments = [
attachment for msg in new_messages for attachment in msg.attachments
]
if all(
(
isinstance(content, openai.types.beta.threads.TextContentBlock)
if version_gte_1_14
else isinstance(
content,
openai.types.beta.threads.MessageContentText,
)
)
for content in answer
):
answer = "\n".join(content.text.value for content in answer)
return OpenAIAssistantFinish(
return_values={
"output": answer,
"thread_id": run.thread_id,
"run_id": run.id,
"attachments": attachments,
},
log="",
run_id=run.id,
thread_id=run.thread_id,
)
if run.status == "requires_action":
if not self.as_agent:
return run.required_action.submit_tool_outputs.tool_calls
actions = []
for tool_call in run.required_action.submit_tool_outputs.tool_calls:
function = tool_call.function
try:
args = json.loads(function.arguments, strict=False)
except JSONDecodeError as e:
msg = (
f"Received invalid JSON function arguments: "
f"{function.arguments} for function {function.name}"
)
raise ValueError(msg) from e
if len(args) == 1 and "__arg1" in args:
args = args["__arg1"]
actions.append(
OpenAIAssistantAction(
tool=function.name,
tool_input=args,
tool_call_id=tool_call.id,
log="",
run_id=run.id,
thread_id=run.thread_id,
),
)
return actions
run_info = json.dumps(run.dict(), indent=2)
msg = f"Unexpected run status: {run.status}. Full run info:\n\n{run_info}"
raise ValueError(msg)
def _wait_for_run(self, run_id: str, thread_id: str) -> Any:
in_progress = True
while in_progress:
run = self.client.beta.threads.runs.retrieve(run_id, thread_id=thread_id)
in_progress = run.status in ("in_progress", "queued")
if in_progress:
sleep(self.check_every_ms / 1000)
return run
async def _aparse_intermediate_steps(
self,
intermediate_steps: list[tuple[OpenAIAssistantAction, str]],
) -> dict:
last_action, _ = intermediate_steps[-1]
run = self._wait_for_run(last_action.run_id, last_action.thread_id)
required_tool_call_ids = set()
if run.required_action:
required_tool_call_ids = {
tc.id for tc in run.required_action.submit_tool_outputs.tool_calls
}
tool_outputs = [
{"output": str(output), "tool_call_id": action.tool_call_id}
for action, output in intermediate_steps
if action.tool_call_id in required_tool_call_ids
]
return {
"tool_outputs": tool_outputs,
"run_id": last_action.run_id,
"thread_id": last_action.thread_id,
}
async def _acreate_run(self, input_dict: dict) -> Any:
params = {
k: v
for k, v in input_dict.items()
if k
in (
"instructions",
"model",
"tools",
"additional_instructions",
"parallel_tool_calls",
"top_p",
"temperature",
"max_completion_tokens",
"max_prompt_tokens",
"run_metadata",
)
}
return await self.async_client.beta.threads.runs.create(
input_dict["thread_id"],
assistant_id=self.assistant_id,
**params,
)
async def _acreate_thread_and_run(self, input_dict: dict, thread: dict) -> Any:
params = {
k: v
for k, v in input_dict.items()
if k
in (
"instructions",
"model",
"tools",
"parallel_tool_calls",
"top_p",
"temperature",
"max_completion_tokens",
"max_prompt_tokens",
"run_metadata",
)
}
return await self.async_client.beta.threads.create_and_run(
assistant_id=self.assistant_id,
thread=thread,
**params,
)
async def _aget_response(self, run: Any) -> Any:
# TODO: Pagination
if run.status == "completed":
import openai
major_version = int(openai.version.VERSION.split(".")[0])
minor_version = int(openai.version.VERSION.split(".")[1])
version_gte_1_14 = (major_version > 1) or (
major_version == 1 and minor_version >= 14 # noqa: PLR2004
)
messages = await self.async_client.beta.threads.messages.list(
run.thread_id,
order="asc",
)
new_messages = [msg for msg in messages if msg.run_id == run.id]
if not self.as_agent:
return new_messages
answer: Any = [
msg_content for msg in new_messages for msg_content in msg.content
]
if all(
(
isinstance(content, openai.types.beta.threads.TextContentBlock)
if version_gte_1_14
else isinstance(
content,
openai.types.beta.threads.MessageContentText,
)
)
for content in answer
):
answer = "\n".join(content.text.value for content in answer)
return OpenAIAssistantFinish(
return_values={
"output": answer,
"thread_id": run.thread_id,
"run_id": run.id,
},
log="",
run_id=run.id,
thread_id=run.thread_id,
)
if run.status == "requires_action":
if not self.as_agent:
return run.required_action.submit_tool_outputs.tool_calls
actions = []
for tool_call in run.required_action.submit_tool_outputs.tool_calls:
function = tool_call.function
try:
args = json.loads(function.arguments, strict=False)
except JSONDecodeError as e:
msg = (
f"Received invalid JSON function arguments: "
f"{function.arguments} for function {function.name}"
)
raise ValueError(msg) from e
if len(args) == 1 and "__arg1" in args:
args = args["__arg1"]
actions.append(
OpenAIAssistantAction(
tool=function.name,
tool_input=args,
tool_call_id=tool_call.id,
log="",
run_id=run.id,
thread_id=run.thread_id,
),
)
return actions
run_info = json.dumps(run.dict(), indent=2)
msg = f"Unexpected run status: {run.status}. Full run info:\n\n{run_info}"
raise ValueError(msg)
async def _await_for_run(self, run_id: str, thread_id: str) -> Any:
in_progress = True
while in_progress:
run = await self.async_client.beta.threads.runs.retrieve(
run_id,
thread_id=thread_id,
)
in_progress = run.status in ("in_progress", "queued")
if in_progress:
await asyncio.sleep(self.check_every_ms / 1000)
return run
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/agents/openai_assistant/base.py",
"license": "MIT License",
"lines": 756,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langchain:libs/langchain/langchain_classic/agents/openai_functions_agent/base.py | """Module implements an agent that uses OpenAI's APIs function enabled API."""
from collections.abc import Sequence
from typing import Any
from langchain_core._api import deprecated
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.callbacks import BaseCallbackManager, Callbacks
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import (
BaseMessage,
SystemMessage,
)
from langchain_core.prompts import BasePromptTemplate
from langchain_core.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
MessagesPlaceholder,
)
from langchain_core.prompts.message import BaseMessagePromptTemplate
from langchain_core.runnables import Runnable, RunnablePassthrough
from langchain_core.tools import BaseTool
from langchain_core.utils.function_calling import convert_to_openai_function
from pydantic import model_validator
from typing_extensions import Self
from langchain_classic.agents import BaseSingleActionAgent
from langchain_classic.agents.format_scratchpad.openai_functions import (
format_to_openai_function_messages,
)
from langchain_classic.agents.output_parsers.openai_functions import (
OpenAIFunctionsAgentOutputParser,
)
_NOT_SET = object()
@deprecated("0.1.0", alternative="create_openai_functions_agent", removal="1.0")
class OpenAIFunctionsAgent(BaseSingleActionAgent):
"""An Agent driven by OpenAIs function powered API.
Args:
llm: This should be an instance of `ChatOpenAI`, specifically a model
that supports using `functions`.
tools: The tools this agent has access to.
prompt: The prompt for this agent, should support agent_scratchpad as one
of the variables. For an easy way to construct this prompt, use
`OpenAIFunctionsAgent.create_prompt(...)`
output_parser: The output parser for this agent. Should be an instance of
`OpenAIFunctionsAgentOutputParser`.
"""
llm: BaseLanguageModel
tools: Sequence[BaseTool]
prompt: BasePromptTemplate
output_parser: type[OpenAIFunctionsAgentOutputParser] = (
OpenAIFunctionsAgentOutputParser
)
def get_allowed_tools(self) -> list[str]:
"""Get allowed tools."""
return [t.name for t in self.tools]
@model_validator(mode="after")
def validate_prompt(self) -> Self:
"""Validate prompt.
Args:
values: Values to validate.
Returns:
Validated values.
Raises:
ValueError: If `agent_scratchpad` is not in the prompt.
"""
prompt: BasePromptTemplate = self.prompt
if "agent_scratchpad" not in prompt.input_variables:
msg = (
"`agent_scratchpad` should be one of the variables in the prompt, "
f"got {prompt.input_variables}"
)
raise ValueError(msg)
return self
@property
def input_keys(self) -> list[str]:
"""Get input keys. Input refers to user input here."""
return ["input"]
@property
def functions(self) -> list[dict]:
"""Get functions."""
return [dict(convert_to_openai_function(t)) for t in self.tools]
def plan(
self,
intermediate_steps: list[tuple[AgentAction, str]],
callbacks: Callbacks = None,
with_functions: bool = True, # noqa: FBT001,FBT002
**kwargs: Any,
) -> AgentAction | AgentFinish:
"""Given input, decided what to do.
Args:
intermediate_steps: Steps the LLM has taken to date,
along with observations.
callbacks: Callbacks to use.
with_functions: Whether to use functions.
**kwargs: User inputs.
Returns:
Action specifying what tool to use.
If the agent is finished, returns an `AgentFinish`.
If the agent is not finished, returns an `AgentAction`.
"""
agent_scratchpad = format_to_openai_function_messages(intermediate_steps)
selected_inputs = {
k: kwargs[k] for k in self.prompt.input_variables if k != "agent_scratchpad"
}
full_inputs = dict(**selected_inputs, agent_scratchpad=agent_scratchpad)
prompt = self.prompt.format_prompt(**full_inputs)
messages = prompt.to_messages()
if with_functions:
predicted_message = self.llm.invoke(
messages,
functions=self.functions,
callbacks=callbacks,
)
else:
predicted_message = self.llm.invoke(
messages,
callbacks=callbacks,
)
return self.output_parser.parse_ai_message(predicted_message)
async def aplan(
self,
intermediate_steps: list[tuple[AgentAction, str]],
callbacks: Callbacks = None,
**kwargs: Any,
) -> AgentAction | AgentFinish:
"""Async given input, decided what to do.
Args:
intermediate_steps: Steps the LLM has taken to date,
along with observations.
callbacks: Callbacks to use.
**kwargs: User inputs.
Returns:
Action specifying what tool to use.
If the agent is finished, returns an AgentFinish.
If the agent is not finished, returns an AgentAction.
"""
agent_scratchpad = format_to_openai_function_messages(intermediate_steps)
selected_inputs = {
k: kwargs[k] for k in self.prompt.input_variables if k != "agent_scratchpad"
}
full_inputs = dict(**selected_inputs, agent_scratchpad=agent_scratchpad)
prompt = self.prompt.format_prompt(**full_inputs)
messages = prompt.to_messages()
predicted_message = await self.llm.ainvoke(
messages,
functions=self.functions,
callbacks=callbacks,
)
return self.output_parser.parse_ai_message(predicted_message)
def return_stopped_response(
self,
early_stopping_method: str,
intermediate_steps: list[tuple[AgentAction, str]],
**kwargs: Any,
) -> AgentFinish:
"""Return response when agent has been stopped due to max iterations.
Args:
early_stopping_method: The early stopping method to use.
intermediate_steps: Intermediate steps.
**kwargs: User inputs.
Returns:
AgentFinish.
Raises:
ValueError: If `early_stopping_method` is not `force` or `generate`.
ValueError: If `agent_decision` is not an AgentAction.
"""
if early_stopping_method == "force":
# `force` just returns a constant string
return AgentFinish(
{"output": "Agent stopped due to iteration limit or time limit."},
"",
)
if early_stopping_method == "generate":
# Generate does one final forward pass
agent_decision = self.plan(
intermediate_steps,
with_functions=False,
**kwargs,
)
if isinstance(agent_decision, AgentFinish):
return agent_decision
msg = f"got AgentAction with no functions provided: {agent_decision}"
raise ValueError(msg)
msg = (
"early_stopping_method should be one of `force` or `generate`, "
f"got {early_stopping_method}"
)
raise ValueError(msg)
@classmethod
def create_prompt(
cls,
system_message: SystemMessage | None = _NOT_SET, # type: ignore[assignment]
extra_prompt_messages: list[BaseMessagePromptTemplate] | None = None,
) -> ChatPromptTemplate:
"""Create prompt for this agent.
Args:
system_message: Message to use as the system message that will be the
first in the prompt.
extra_prompt_messages: Prompt messages that will be placed between the
system message and the new human input.
Returns:
A prompt template to pass into this agent.
"""
_prompts = extra_prompt_messages or []
system_message_ = (
system_message
if system_message is not _NOT_SET
else SystemMessage(content="You are a helpful AI assistant.")
)
messages: list[BaseMessagePromptTemplate | BaseMessage]
messages = [system_message_] if system_message_ else []
messages.extend(
[
*_prompts,
HumanMessagePromptTemplate.from_template("{input}"),
MessagesPlaceholder(variable_name="agent_scratchpad"),
],
)
return ChatPromptTemplate(messages=messages)
@classmethod
def from_llm_and_tools(
cls,
llm: BaseLanguageModel,
tools: Sequence[BaseTool],
callback_manager: BaseCallbackManager | None = None,
extra_prompt_messages: list[BaseMessagePromptTemplate] | None = None,
system_message: SystemMessage | None = _NOT_SET, # type: ignore[assignment]
**kwargs: Any,
) -> BaseSingleActionAgent:
"""Construct an agent from an LLM and tools.
Args:
llm: The LLM to use as the agent.
tools: The tools to use.
callback_manager: The callback manager to use.
extra_prompt_messages: Extra prompt messages to use.
system_message: The system message to use.
Defaults to a default system message.
kwargs: Additional parameters to pass to the agent.
"""
system_message_ = (
system_message
if system_message is not _NOT_SET
else SystemMessage(content="You are a helpful AI assistant.")
)
prompt = cls.create_prompt(
extra_prompt_messages=extra_prompt_messages,
system_message=system_message_,
)
return cls(
llm=llm,
prompt=prompt,
tools=tools,
callback_manager=callback_manager,
**kwargs,
)
def create_openai_functions_agent(
llm: BaseLanguageModel,
tools: Sequence[BaseTool],
prompt: ChatPromptTemplate,
) -> Runnable:
"""Create an agent that uses OpenAI function calling.
Args:
llm: LLM to use as the agent. Should work with OpenAI function calling,
so either be an OpenAI model that supports that or a wrapper of
a different model that adds in equivalent support.
tools: Tools this agent has access to.
prompt: The prompt to use. See Prompt section below for more.
Returns:
A Runnable sequence representing an agent. It takes as input all the same input
variables as the prompt passed in does. It returns as output either an
AgentAction or AgentFinish.
Raises:
ValueError: If `agent_scratchpad` is not in the prompt.
Example:
Creating an agent with no memory
```python
from langchain_openai import ChatOpenAI
from langchain_classic.agents import (
AgentExecutor,
create_openai_functions_agent,
)
from langchain_classic import hub
prompt = hub.pull("hwchase17/openai-functions-agent")
model = ChatOpenAI()
tools = ...
agent = create_openai_functions_agent(model, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools)
agent_executor.invoke({"input": "hi"})
# Using with chat history
from langchain_core.messages import AIMessage, HumanMessage
agent_executor.invoke(
{
"input": "what's my name?",
"chat_history": [
HumanMessage(content="hi! my name is bob"),
AIMessage(content="Hello Bob! How can I assist you today?"),
],
}
)
```
Prompt:
The agent prompt must have an `agent_scratchpad` key that is a
`MessagesPlaceholder`. Intermediate agent actions and tool output
messages will be passed in here.
Here's an example:
```python
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
prompt = ChatPromptTemplate.from_messages(
[
("system", "You are a helpful assistant"),
MessagesPlaceholder("chat_history", optional=True),
("human", "{input}"),
MessagesPlaceholder("agent_scratchpad"),
]
)
```
"""
if "agent_scratchpad" not in (
prompt.input_variables + list(prompt.partial_variables)
):
msg = (
"Prompt must have input variable `agent_scratchpad`, but wasn't found. "
f"Found {prompt.input_variables} instead."
)
raise ValueError(msg)
llm_with_tools = llm.bind(functions=[convert_to_openai_function(t) for t in tools])
return (
RunnablePassthrough.assign(
agent_scratchpad=lambda x: format_to_openai_function_messages(
x["intermediate_steps"],
),
)
| prompt
| llm_with_tools
| OpenAIFunctionsAgentOutputParser()
)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/agents/openai_functions_agent/base.py",
"license": "MIT License",
"lines": 334,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langchain:libs/langchain/langchain_classic/agents/openai_functions_multi_agent/base.py | """Module implements an agent that uses OpenAI's APIs function enabled API."""
import json
from collections.abc import Sequence
from json import JSONDecodeError
from typing import Any
from langchain_core._api import deprecated
from langchain_core.agents import AgentAction, AgentActionMessageLog, AgentFinish
from langchain_core.callbacks import BaseCallbackManager, Callbacks
from langchain_core.exceptions import OutputParserException
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import (
AIMessage,
BaseMessage,
SystemMessage,
)
from langchain_core.prompts import BasePromptTemplate
from langchain_core.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
MessagesPlaceholder,
)
from langchain_core.prompts.message import BaseMessagePromptTemplate
from langchain_core.tools import BaseTool
from pydantic import model_validator
from typing_extensions import Self
from langchain_classic.agents import BaseMultiActionAgent
from langchain_classic.agents.format_scratchpad.openai_functions import (
format_to_openai_function_messages,
)
# For backwards compatibility
_FunctionsAgentAction = AgentActionMessageLog
def _parse_ai_message(message: BaseMessage) -> list[AgentAction] | AgentFinish:
"""Parse an AI message."""
if not isinstance(message, AIMessage):
msg = f"Expected an AI message got {type(message)}"
raise TypeError(msg)
function_call = message.additional_kwargs.get("function_call", {})
if function_call:
try:
arguments = json.loads(function_call["arguments"], strict=False)
except JSONDecodeError as e:
msg = (
f"Could not parse tool input: {function_call} because "
f"the `arguments` is not valid JSON."
)
raise OutputParserException(msg) from e
try:
tools = arguments["actions"]
except (TypeError, KeyError) as e:
msg = (
f"Could not parse tool input: {function_call} because "
f"the `arguments` JSON does not contain `actions` key."
)
raise OutputParserException(msg) from e
final_tools: list[AgentAction] = []
for tool_schema in tools:
if "action" in tool_schema:
_tool_input = tool_schema["action"]
else:
# drop action_name from schema
_tool_input = tool_schema.copy()
del _tool_input["action_name"]
function_name = tool_schema["action_name"]
# A hack here:
# The code that encodes tool input into Open AI uses a special variable
# name called `__arg1` to handle old style tools that do not expose a
# schema and expect a single string argument as an input.
# We unpack the argument here if it exists.
# Open AI does not support passing in a JSON array as an argument.
if "__arg1" in _tool_input:
tool_input = _tool_input["__arg1"]
else:
tool_input = _tool_input
content_msg = f"responded: {message.content}\n" if message.content else "\n"
log = f"\nInvoking: `{function_name}` with `{tool_input}`\n{content_msg}\n"
_tool = _FunctionsAgentAction(
tool=function_name,
tool_input=tool_input,
log=log,
message_log=[message],
)
final_tools.append(_tool)
return final_tools
return AgentFinish(
return_values={"output": message.content},
log=str(message.content),
)
_NOT_SET = object()
@deprecated("0.1.0", alternative="create_openai_tools_agent", removal="1.0")
class OpenAIMultiFunctionsAgent(BaseMultiActionAgent):
"""Agent driven by OpenAIs function powered API.
Args:
llm: This should be an instance of ChatOpenAI, specifically a model
that supports using `functions`.
tools: The tools this agent has access to.
prompt: The prompt for this agent, should support agent_scratchpad as one
of the variables. For an easy way to construct this prompt, use
`OpenAIMultiFunctionsAgent.create_prompt(...)`
"""
llm: BaseLanguageModel
tools: Sequence[BaseTool]
prompt: BasePromptTemplate
def get_allowed_tools(self) -> list[str]:
"""Get allowed tools."""
return [t.name for t in self.tools]
@model_validator(mode="after")
def _validate_prompt(self) -> Self:
prompt: BasePromptTemplate = self.prompt
if "agent_scratchpad" not in prompt.input_variables:
msg = (
"`agent_scratchpad` should be one of the variables in the prompt, "
f"got {prompt.input_variables}"
)
raise ValueError(msg)
return self
@property
def input_keys(self) -> list[str]:
"""Get input keys. Input refers to user input here."""
return ["input"]
@property
def functions(self) -> list[dict]:
"""Get the functions for the agent."""
enum_vals = [t.name for t in self.tools]
tool_selection = {
# OpenAI functions returns a single tool invocation
# Here we force the single tool invocation it returns to
# itself be a list of tool invocations. We do this by constructing
# a new tool that has one argument which is a list of tools
# to use.
"name": "tool_selection",
"description": "A list of actions to take.",
"parameters": {
"title": "tool_selection",
"description": "A list of actions to take.",
"type": "object",
"properties": {
"actions": {
"title": "actions",
"type": "array",
"items": {
# This is a custom item which bundles the action_name
# and the action. We do this because some actions
# could have the same schema, and without this there
# is no way to differentiate them.
"title": "tool_call",
"type": "object",
"properties": {
# This is the name of the action to take
"action_name": {
"title": "action_name",
"enum": enum_vals,
"type": "string",
"description": (
"Name of the action to take. The name "
"provided here should match up with the "
"parameters for the action below."
),
},
# This is the action to take.
"action": {
"title": "Action",
"anyOf": [
{
"title": t.name,
"type": "object",
"properties": t.args,
}
for t in self.tools
],
},
},
"required": ["action_name", "action"],
},
},
},
"required": ["actions"],
},
}
return [tool_selection]
def plan(
self,
intermediate_steps: list[tuple[AgentAction, str]],
callbacks: Callbacks = None,
**kwargs: Any,
) -> list[AgentAction] | AgentFinish:
"""Given input, decided what to do.
Args:
intermediate_steps: Steps the LLM has taken to date,
along with observations.
callbacks: Callbacks to use.
**kwargs: User inputs.
Returns:
Action specifying what tool to use.
"""
agent_scratchpad = format_to_openai_function_messages(intermediate_steps)
selected_inputs = {
k: kwargs[k] for k in self.prompt.input_variables if k != "agent_scratchpad"
}
full_inputs = dict(**selected_inputs, agent_scratchpad=agent_scratchpad)
prompt = self.prompt.format_prompt(**full_inputs)
messages = prompt.to_messages()
predicted_message = self.llm.invoke(
messages,
functions=self.functions,
callbacks=callbacks,
)
return _parse_ai_message(predicted_message)
async def aplan(
self,
intermediate_steps: list[tuple[AgentAction, str]],
callbacks: Callbacks = None,
**kwargs: Any,
) -> list[AgentAction] | AgentFinish:
"""Async given input, decided what to do.
Args:
intermediate_steps: Steps the LLM has taken to date,
along with observations.
callbacks: Callbacks to use.
**kwargs: User inputs.
Returns:
Action specifying what tool to use.
"""
agent_scratchpad = format_to_openai_function_messages(intermediate_steps)
selected_inputs = {
k: kwargs[k] for k in self.prompt.input_variables if k != "agent_scratchpad"
}
full_inputs = dict(**selected_inputs, agent_scratchpad=agent_scratchpad)
prompt = self.prompt.format_prompt(**full_inputs)
messages = prompt.to_messages()
predicted_message = await self.llm.ainvoke(
messages,
functions=self.functions,
callbacks=callbacks,
)
return _parse_ai_message(predicted_message)
@classmethod
def create_prompt(
cls,
system_message: SystemMessage | None = _NOT_SET, # type: ignore[assignment]
extra_prompt_messages: list[BaseMessagePromptTemplate] | None = None,
) -> BasePromptTemplate:
"""Create prompt for this agent.
Args:
system_message: Message to use as the system message that will be the
first in the prompt.
extra_prompt_messages: Prompt messages that will be placed between the
system message and the new human input.
Returns:
A prompt template to pass into this agent.
"""
_prompts = extra_prompt_messages or []
system_message_ = (
system_message
if system_message is not _NOT_SET
else SystemMessage(content="You are a helpful AI assistant.")
)
messages: list[BaseMessagePromptTemplate | BaseMessage]
messages = [system_message_] if system_message_ else []
messages.extend(
[
*_prompts,
HumanMessagePromptTemplate.from_template("{input}"),
MessagesPlaceholder(variable_name="agent_scratchpad"),
],
)
return ChatPromptTemplate(messages=messages)
@classmethod
def from_llm_and_tools(
cls,
llm: BaseLanguageModel,
tools: Sequence[BaseTool],
callback_manager: BaseCallbackManager | None = None,
extra_prompt_messages: list[BaseMessagePromptTemplate] | None = None,
system_message: SystemMessage | None = _NOT_SET, # type: ignore[assignment]
**kwargs: Any,
) -> BaseMultiActionAgent:
"""Construct an agent from an LLM and tools.
Args:
llm: The language model to use.
tools: A list of tools to use.
callback_manager: The callback manager to use.
extra_prompt_messages: Extra prompt messages to use.
system_message: The system message to use. Default is a default system
message.
kwargs: Additional arguments.
"""
system_message_ = (
system_message
if system_message is not _NOT_SET
else SystemMessage(content="You are a helpful AI assistant.")
)
prompt = cls.create_prompt(
extra_prompt_messages=extra_prompt_messages,
system_message=system_message_,
)
return cls(
llm=llm,
prompt=prompt,
tools=tools,
callback_manager=callback_manager,
**kwargs,
)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/agents/openai_functions_multi_agent/base.py",
"license": "MIT License",
"lines": 302,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langchain:libs/langchain/langchain_classic/agents/openai_tools/base.py | from collections.abc import Sequence
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts.chat import ChatPromptTemplate
from langchain_core.runnables import Runnable, RunnablePassthrough
from langchain_core.tools import BaseTool
from langchain_core.utils.function_calling import convert_to_openai_tool
from langchain_classic.agents.format_scratchpad.openai_tools import (
format_to_openai_tool_messages,
)
from langchain_classic.agents.output_parsers.openai_tools import (
OpenAIToolsAgentOutputParser,
)
def create_openai_tools_agent(
llm: BaseLanguageModel,
tools: Sequence[BaseTool],
prompt: ChatPromptTemplate,
strict: bool | None = None, # noqa: FBT001
) -> Runnable:
"""Create an agent that uses OpenAI tools.
Args:
llm: LLM to use as the agent.
tools: Tools this agent has access to.
prompt: The prompt to use. See Prompt section below for more on the expected
input variables.
strict: Whether strict mode should be used for OpenAI tools.
Returns:
A Runnable sequence representing an agent. It takes as input all the same input
variables as the prompt passed in does. It returns as output either an
AgentAction or AgentFinish.
Raises:
ValueError: If the prompt is missing required variables.
Example:
```python
from langchain_classic import hub
from langchain_openai import ChatOpenAI
from langchain_classic.agents import (
AgentExecutor,
create_openai_tools_agent,
)
prompt = hub.pull("hwchase17/openai-tools-agent")
model = ChatOpenAI()
tools = ...
agent = create_openai_tools_agent(model, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools)
agent_executor.invoke({"input": "hi"})
# Using with chat history
from langchain_core.messages import AIMessage, HumanMessage
agent_executor.invoke(
{
"input": "what's my name?",
"chat_history": [
HumanMessage(content="hi! my name is bob"),
AIMessage(content="Hello Bob! How can I assist you today?"),
],
}
)
```
Prompt:
The agent prompt must have an `agent_scratchpad` key that is a
`MessagesPlaceholder`. Intermediate agent actions and tool output
messages will be passed in here.
Here's an example:
```python
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
prompt = ChatPromptTemplate.from_messages(
[
("system", "You are a helpful assistant"),
MessagesPlaceholder("chat_history", optional=True),
("human", "{input}"),
MessagesPlaceholder("agent_scratchpad"),
]
)
```
"""
missing_vars = {"agent_scratchpad"}.difference(
prompt.input_variables + list(prompt.partial_variables),
)
if missing_vars:
msg = f"Prompt missing required variables: {missing_vars}"
raise ValueError(msg)
llm_with_tools = llm.bind(
tools=[convert_to_openai_tool(tool, strict=strict) for tool in tools],
)
return (
RunnablePassthrough.assign(
agent_scratchpad=lambda x: format_to_openai_tool_messages(
x["intermediate_steps"],
),
)
| prompt
| llm_with_tools
| OpenAIToolsAgentOutputParser()
)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/agents/openai_tools/base.py",
"license": "MIT License",
"lines": 93,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
langchain-ai/langchain:libs/langchain/langchain_classic/agents/output_parsers/openai_functions.py | import json
from json import JSONDecodeError
from langchain_core.agents import AgentAction, AgentActionMessageLog, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain_core.messages import (
AIMessage,
BaseMessage,
)
from langchain_core.outputs import ChatGeneration, Generation
from typing_extensions import override
from langchain_classic.agents.agent import AgentOutputParser
class OpenAIFunctionsAgentOutputParser(AgentOutputParser):
"""Parses a message into agent action/finish.
Is meant to be used with OpenAI models, as it relies on the specific
function_call parameter from OpenAI to convey what tools to use.
If a function_call parameter is passed, then that is used to get
the tool and tool input.
If one is not passed, then the AIMessage is assumed to be the final output.
"""
@property
def _type(self) -> str:
return "openai-functions-agent"
@staticmethod
def parse_ai_message(message: BaseMessage) -> AgentAction | AgentFinish:
"""Parse an AI message."""
if not isinstance(message, AIMessage):
msg = f"Expected an AI message got {type(message)}"
raise TypeError(msg)
function_call = message.additional_kwargs.get("function_call", {})
if function_call:
function_name = function_call["name"]
try:
if len(function_call["arguments"].strip()) == 0:
# OpenAI returns an empty string for functions containing no args
_tool_input = {}
else:
# otherwise it returns a json object
_tool_input = json.loads(function_call["arguments"], strict=False)
except JSONDecodeError as e:
msg = (
f"Could not parse tool input: {function_call} because "
f"the `arguments` is not valid JSON."
)
raise OutputParserException(msg) from e
# A hack here:
# The code that encodes tool input into Open AI uses a special variable
# name called `__arg1` to handle old style tools that do not expose a
# schema and expect a single string argument as an input.
# We unpack the argument here if it exists.
# Open AI does not support passing in a JSON array as an argument.
if "__arg1" in _tool_input:
tool_input = _tool_input["__arg1"]
else:
tool_input = _tool_input
content_msg = f"responded: {message.content}\n" if message.content else "\n"
log = f"\nInvoking: `{function_name}` with `{tool_input}`\n{content_msg}\n"
return AgentActionMessageLog(
tool=function_name,
tool_input=tool_input,
log=log,
message_log=[message],
)
return AgentFinish(
return_values={"output": message.content},
log=str(message.content),
)
@override
def parse_result(
self,
result: list[Generation],
*,
partial: bool = False,
) -> AgentAction | AgentFinish:
if not isinstance(result[0], ChatGeneration):
msg = "This output parser only works on ChatGeneration output"
raise ValueError(msg) # noqa: TRY004
message = result[0].message
return self.parse_ai_message(message)
@override
def parse(self, text: str) -> AgentAction | AgentFinish:
msg = "Can only parse messages"
raise ValueError(msg)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/agents/output_parsers/openai_functions.py",
"license": "MIT License",
"lines": 82,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langchain:libs/langchain/langchain_classic/agents/output_parsers/openai_tools.py | from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.messages import BaseMessage
from langchain_core.outputs import ChatGeneration, Generation
from typing_extensions import override
from langchain_classic.agents.agent import MultiActionAgentOutputParser
from langchain_classic.agents.output_parsers.tools import (
ToolAgentAction,
parse_ai_message_to_tool_action,
)
OpenAIToolAgentAction = ToolAgentAction
def parse_ai_message_to_openai_tool_action(
message: BaseMessage,
) -> list[AgentAction] | AgentFinish:
"""Parse an AI message potentially containing tool_calls."""
tool_actions = parse_ai_message_to_tool_action(message)
if isinstance(tool_actions, AgentFinish):
return tool_actions
final_actions: list[AgentAction] = []
for action in tool_actions:
if isinstance(action, ToolAgentAction):
final_actions.append(
OpenAIToolAgentAction(
tool=action.tool,
tool_input=action.tool_input,
log=action.log,
message_log=action.message_log,
tool_call_id=action.tool_call_id,
),
)
else:
final_actions.append(action)
return final_actions
class OpenAIToolsAgentOutputParser(MultiActionAgentOutputParser):
"""Parses a message into agent actions/finish.
Is meant to be used with OpenAI models, as it relies on the specific
tool_calls parameter from OpenAI to convey what tools to use.
If a tool_calls parameter is passed, then that is used to get
the tool names and tool inputs.
If one is not passed, then the AIMessage is assumed to be the final output.
"""
@property
def _type(self) -> str:
return "openai-tools-agent-output-parser"
@override
def parse_result(
self,
result: list[Generation],
*,
partial: bool = False,
) -> list[AgentAction] | AgentFinish:
if not isinstance(result[0], ChatGeneration):
msg = "This output parser only works on ChatGeneration output"
raise ValueError(msg) # noqa: TRY004
message = result[0].message
return parse_ai_message_to_openai_tool_action(message)
@override
def parse(self, text: str) -> list[AgentAction] | AgentFinish:
msg = "Can only parse messages"
raise ValueError(msg)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/agents/output_parsers/openai_tools.py",
"license": "MIT License",
"lines": 59,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/agents/output_parsers/tools.py | import json
from json import JSONDecodeError
from langchain_core.agents import AgentAction, AgentActionMessageLog, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain_core.messages import (
AIMessage,
BaseMessage,
ToolCall,
)
from langchain_core.outputs import ChatGeneration, Generation
from typing_extensions import override
from langchain_classic.agents.agent import MultiActionAgentOutputParser
class ToolAgentAction(AgentActionMessageLog):
"""Tool agent action."""
tool_call_id: str | None
"""Tool call that this message is responding to."""
def parse_ai_message_to_tool_action(
message: BaseMessage,
) -> list[AgentAction] | AgentFinish:
"""Parse an AI message potentially containing tool_calls."""
if not isinstance(message, AIMessage):
msg = f"Expected an AI message got {type(message)}"
raise TypeError(msg)
actions: list = []
if message.tool_calls:
tool_calls = message.tool_calls
else:
if not message.additional_kwargs.get("tool_calls"):
return AgentFinish(
return_values={"output": message.content},
log=str(message.content),
)
# Best-effort parsing
tool_calls = []
for tool_call in message.additional_kwargs["tool_calls"]:
function = tool_call["function"]
function_name = function["name"]
try:
args = json.loads(function["arguments"] or "{}")
tool_calls.append(
ToolCall(
type="tool_call",
name=function_name,
args=args,
id=tool_call["id"],
),
)
except JSONDecodeError as e:
msg = (
f"Could not parse tool input: {function} because "
f"the `arguments` is not valid JSON."
)
raise OutputParserException(msg) from e
for tool_call in tool_calls:
# A hack here:
# The code that encodes tool input into Open AI uses a special variable
# name called `__arg1` to handle old style tools that do not expose a
# schema and expect a single string argument as an input.
# We unpack the argument here if it exists.
# Open AI does not support passing in a JSON array as an argument.
function_name = tool_call["name"]
_tool_input = tool_call["args"]
tool_input = _tool_input.get("__arg1", _tool_input)
content_msg = f"responded: {message.content}\n" if message.content else "\n"
log = f"\nInvoking: `{function_name}` with `{tool_input}`\n{content_msg}\n"
actions.append(
ToolAgentAction(
tool=function_name,
tool_input=tool_input,
log=log,
message_log=[message],
tool_call_id=tool_call["id"],
),
)
return actions
class ToolsAgentOutputParser(MultiActionAgentOutputParser):
"""Parses a message into agent actions/finish.
If a tool_calls parameter is passed, then that is used to get
the tool names and tool inputs.
If one is not passed, then the AIMessage is assumed to be the final output.
"""
@property
def _type(self) -> str:
return "tools-agent-output-parser"
@override
def parse_result(
self,
result: list[Generation],
*,
partial: bool = False,
) -> list[AgentAction] | AgentFinish:
if not isinstance(result[0], ChatGeneration):
msg = "This output parser only works on ChatGeneration output"
raise ValueError(msg) # noqa: TRY004
message = result[0].message
return parse_ai_message_to_tool_action(message)
@override
def parse(self, text: str) -> list[AgentAction] | AgentFinish:
msg = "Can only parse messages"
raise ValueError(msg)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/agents/output_parsers/tools.py",
"license": "MIT License",
"lines": 100,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langchain:libs/langchain/langchain_classic/agents/output_parsers/xml.py | import re
from typing import Literal
from langchain_core.agents import AgentAction, AgentFinish
from pydantic import Field
from typing_extensions import override
from langchain_classic.agents import AgentOutputParser
def _unescape(text: str) -> str:
"""Convert custom tag delimiters back into XML tags."""
replacements = {
"[[tool]]": "<tool>",
"[[/tool]]": "</tool>",
"[[tool_input]]": "<tool_input>",
"[[/tool_input]]": "</tool_input>",
"[[observation]]": "<observation>",
"[[/observation]]": "</observation>",
}
for repl, orig in replacements.items():
text = text.replace(repl, orig)
return text
class XMLAgentOutputParser(AgentOutputParser):
"""Parses tool invocations and final answers from XML-formatted agent output.
This parser extracts structured information from XML tags to determine whether
an agent should perform a tool action or provide a final answer. It includes
built-in escaping support to safely handle tool names and inputs
containing XML special characters.
Args:
escape_format: The escaping format to use when parsing XML content.
Supports 'minimal' which uses custom delimiters like [[tool]] to replace
XML tags within content, preventing parsing conflicts.
Use 'minimal' if using a corresponding encoding format that uses
the _escape function when formatting the output (e.g., with format_xml).
Expected formats:
Tool invocation (returns AgentAction):
<tool>search</tool>
<tool_input>what is 2 + 2</tool_input>
Final answer (returns AgentFinish):
<final_answer>The answer is 4</final_answer>
!!! note
Minimal escaping allows tool names containing XML tags to be safely represented.
For example, a tool named `search<tool>nested</tool>` would be escaped as
`search[[tool]]nested[[/tool]]` in the XML and automatically unescaped during
parsing.
Raises:
ValueError: If the input doesn't match either expected XML format or
contains malformed XML structure.
"""
escape_format: Literal["minimal"] | None = Field(default="minimal")
"""The format to use for escaping XML characters.
minimal - uses custom delimiters to replace XML tags within content,
preventing parsing conflicts. This is the only supported format currently.
None - no escaping is applied, which may lead to parsing conflicts.
"""
@override
def parse(self, text: str) -> AgentAction | AgentFinish:
# Check for tool invocation first
tool_matches = re.findall(r"<tool>(.*?)</tool>", text, re.DOTALL)
if tool_matches:
if len(tool_matches) != 1:
msg = (
f"Malformed tool invocation: expected exactly one <tool> block, "
f"but found {len(tool_matches)}."
)
raise ValueError(msg)
_tool = tool_matches[0]
# Match optional tool input
input_matches = re.findall(
r"<tool_input>(.*?)</tool_input>", text, re.DOTALL
)
if len(input_matches) > 1:
msg = (
f"Malformed tool invocation: expected at most one <tool_input> "
f"block, but found {len(input_matches)}."
)
raise ValueError(msg)
_tool_input = input_matches[0] if input_matches else ""
# Unescape if minimal escape format is used
if self.escape_format == "minimal":
_tool = _unescape(_tool)
_tool_input = _unescape(_tool_input)
return AgentAction(tool=_tool, tool_input=_tool_input, log=text)
# Check for final answer
if "<final_answer>" in text and "</final_answer>" in text:
matches = re.findall(r"<final_answer>(.*?)</final_answer>", text, re.DOTALL)
if len(matches) != 1:
msg = (
"Malformed output: expected exactly one "
"<final_answer>...</final_answer> block."
)
raise ValueError(msg)
answer = matches[0]
# Unescape custom delimiters in final answer
if self.escape_format == "minimal":
answer = _unescape(answer)
return AgentFinish(return_values={"output": answer}, log=text)
msg = (
"Malformed output: expected either a tool invocation "
"or a final answer in XML format."
)
raise ValueError(msg)
@override
def get_format_instructions(self) -> str:
raise NotImplementedError
@property
def _type(self) -> str:
return "xml-agent"
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/agents/output_parsers/xml.py",
"license": "MIT License",
"lines": 105,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langchain:libs/langchain/langchain_classic/agents/react/agent.py | from __future__ import annotations
from collections.abc import Sequence
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import BasePromptTemplate
from langchain_core.runnables import Runnable, RunnablePassthrough
from langchain_core.tools import BaseTool
from langchain_core.tools.render import ToolsRenderer, render_text_description
from langchain_classic.agents import AgentOutputParser
from langchain_classic.agents.format_scratchpad import format_log_to_str
from langchain_classic.agents.output_parsers import ReActSingleInputOutputParser
def create_react_agent(
llm: BaseLanguageModel,
tools: Sequence[BaseTool],
prompt: BasePromptTemplate,
output_parser: AgentOutputParser | None = None,
tools_renderer: ToolsRenderer = render_text_description,
*,
stop_sequence: bool | list[str] = True,
) -> Runnable:
r"""Create an agent that uses ReAct prompting.
Based on paper "ReAct: Synergizing Reasoning and Acting in Language Models"
(https://arxiv.org/abs/2210.03629)
!!! warning
This implementation is based on the foundational ReAct paper but is older and
not well-suited for production applications.
For a more robust and feature-rich implementation, we recommend using the
`create_agent` function from the `langchain` library.
See the
[reference doc](https://reference.langchain.com/python/langchain/agents/)
for more information.
Args:
llm: LLM to use as the agent.
tools: Tools this agent has access to.
prompt: The prompt to use. See Prompt section below for more.
output_parser: AgentOutputParser for parse the LLM output.
tools_renderer: This controls how the tools are converted into a string and
then passed into the LLM.
stop_sequence: bool or list of str.
If `True`, adds a stop token of "Observation:" to avoid hallucinates.
If `False`, does not add a stop token.
If a list of str, uses the provided list as the stop tokens.
You may to set this to False if the LLM you are using
does not support stop sequences.
Returns:
A Runnable sequence representing an agent. It takes as input all the same input
variables as the prompt passed in does. It returns as output either an
AgentAction or AgentFinish.
Examples:
```python
from langchain_classic import hub
from langchain_openai import OpenAI
from langchain_classic.agents import AgentExecutor, create_react_agent
prompt = hub.pull("hwchase17/react")
model = OpenAI()
tools = ...
agent = create_react_agent(model, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools)
agent_executor.invoke({"input": "hi"})
# Use with chat history
from langchain_core.messages import AIMessage, HumanMessage
agent_executor.invoke(
{
"input": "what's my name?",
# Notice that chat_history is a string
# since this prompt is aimed at LLMs, not chat models
"chat_history": "Human: My name is Bob\nAI: Hello Bob!",
}
)
```
Prompt:
The prompt must have input keys:
* `tools`: contains descriptions and arguments for each tool.
* `tool_names`: contains all tool names.
* `agent_scratchpad`: contains previous agent actions and tool outputs as a
string.
Here's an example:
```python
from langchain_core.prompts import PromptTemplate
template = '''Answer the following questions as best you can. You have access to the following tools:
{tools}
Use the following format:
Question: the input question you must answer
Thought: you should always think about what to do
Action: the action to take, should be one of [{tool_names}]
Action Input: the input to the action
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can repeat N times)
Thought: I now know the final answer
Final Answer: the final answer to the original input question
Begin!
Question: {input}
Thought:{agent_scratchpad}'''
prompt = PromptTemplate.from_template(template)
```
""" # noqa: E501
missing_vars = {"tools", "tool_names", "agent_scratchpad"}.difference(
prompt.input_variables + list(prompt.partial_variables),
)
if missing_vars:
msg = f"Prompt missing required variables: {missing_vars}"
raise ValueError(msg)
prompt = prompt.partial(
tools=tools_renderer(list(tools)),
tool_names=", ".join([t.name for t in tools]),
)
if stop_sequence:
stop = ["\nObservation"] if stop_sequence is True else stop_sequence
llm_with_stop = llm.bind(stop=stop)
else:
llm_with_stop = llm
output_parser = output_parser or ReActSingleInputOutputParser()
return (
RunnablePassthrough.assign(
agent_scratchpad=lambda x: format_log_to_str(x["intermediate_steps"]),
)
| prompt
| llm_with_stop
| output_parser
)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/agents/react/agent.py",
"license": "MIT License",
"lines": 119,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
langchain-ai/langchain:libs/langchain/langchain_classic/agents/react/base.py | """Chain that implements the ReAct paper from https://arxiv.org/pdf/2210.03629.pdf."""
from __future__ import annotations
from collections.abc import Sequence
from typing import TYPE_CHECKING, Any
from langchain_core._api import deprecated
from langchain_core.documents import Document
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import BasePromptTemplate
from langchain_core.tools import BaseTool, Tool
from pydantic import Field
from typing_extensions import override
from langchain_classic._api.deprecation import AGENT_DEPRECATION_WARNING
from langchain_classic.agents.agent import Agent, AgentExecutor, AgentOutputParser
from langchain_classic.agents.agent_types import AgentType
from langchain_classic.agents.react.output_parser import ReActOutputParser
from langchain_classic.agents.react.textworld_prompt import TEXTWORLD_PROMPT
from langchain_classic.agents.react.wiki_prompt import WIKI_PROMPT
from langchain_classic.agents.utils import validate_tools_single_input
if TYPE_CHECKING:
from langchain_community.docstore.base import Docstore
_LOOKUP_AND_SEARCH_TOOLS = {"Lookup", "Search"}
@deprecated(
"0.1.0",
message=AGENT_DEPRECATION_WARNING,
removal="1.0",
)
class ReActDocstoreAgent(Agent):
"""Agent for the ReAct chain."""
output_parser: AgentOutputParser = Field(default_factory=ReActOutputParser)
@classmethod
@override
def _get_default_output_parser(cls, **kwargs: Any) -> AgentOutputParser:
return ReActOutputParser()
@property
def _agent_type(self) -> str:
"""Return Identifier of an agent type."""
return AgentType.REACT_DOCSTORE
@classmethod
@override
def create_prompt(cls, tools: Sequence[BaseTool]) -> BasePromptTemplate:
"""Return default prompt."""
return WIKI_PROMPT
@classmethod
def _validate_tools(cls, tools: Sequence[BaseTool]) -> None:
validate_tools_single_input(cls.__name__, tools)
super()._validate_tools(tools)
if len(tools) != len(_LOOKUP_AND_SEARCH_TOOLS):
msg = f"Exactly two tools must be specified, but got {tools}"
raise ValueError(msg)
tool_names = {tool.name for tool in tools}
if tool_names != _LOOKUP_AND_SEARCH_TOOLS:
msg = f"Tool names should be Lookup and Search, got {tool_names}"
raise ValueError(msg)
@property
def observation_prefix(self) -> str:
"""Prefix to append the observation with."""
return "Observation: "
@property
def _stop(self) -> list[str]:
return ["\nObservation:"]
@property
def llm_prefix(self) -> str:
"""Prefix to append the LLM call with."""
return "Thought:"
@deprecated(
"0.1.0",
message=AGENT_DEPRECATION_WARNING,
removal="1.0",
)
class DocstoreExplorer:
"""Class to assist with exploration of a document store."""
def __init__(self, docstore: Docstore):
"""Initialize with a docstore, and set initial document to None."""
self.docstore = docstore
self.document: Document | None = None
self.lookup_str = ""
self.lookup_index = 0
def search(self, term: str) -> str:
"""Search for a term in the docstore, and if found save."""
result = self.docstore.search(term)
if isinstance(result, Document):
self.document = result
return self._summary
self.document = None
return result
def lookup(self, term: str) -> str:
"""Lookup a term in document (if saved)."""
if self.document is None:
msg = "Cannot lookup without a successful search first"
raise ValueError(msg)
if term.lower() != self.lookup_str:
self.lookup_str = term.lower()
self.lookup_index = 0
else:
self.lookup_index += 1
lookups = [p for p in self._paragraphs if self.lookup_str in p.lower()]
if len(lookups) == 0:
return "No Results"
if self.lookup_index >= len(lookups):
return "No More Results"
result_prefix = f"(Result {self.lookup_index + 1}/{len(lookups)})"
return f"{result_prefix} {lookups[self.lookup_index]}"
@property
def _summary(self) -> str:
return self._paragraphs[0]
@property
def _paragraphs(self) -> list[str]:
if self.document is None:
msg = "Cannot get paragraphs without a document"
raise ValueError(msg)
return self.document.page_content.split("\n\n")
@deprecated(
"0.1.0",
message=AGENT_DEPRECATION_WARNING,
removal="1.0",
)
class ReActTextWorldAgent(ReActDocstoreAgent):
"""Agent for the ReAct TextWorld chain."""
@classmethod
@override
def create_prompt(cls, tools: Sequence[BaseTool]) -> BasePromptTemplate:
"""Return default prompt."""
return TEXTWORLD_PROMPT
@classmethod
def _validate_tools(cls, tools: Sequence[BaseTool]) -> None:
validate_tools_single_input(cls.__name__, tools)
super()._validate_tools(tools)
if len(tools) != 1:
msg = f"Exactly one tool must be specified, but got {tools}"
raise ValueError(msg)
tool_names = {tool.name for tool in tools}
if tool_names != {"Play"}:
msg = f"Tool name should be Play, got {tool_names}"
raise ValueError(msg)
@deprecated(
"0.1.0",
message=AGENT_DEPRECATION_WARNING,
removal="1.0",
)
class ReActChain(AgentExecutor):
"""[Deprecated] Chain that implements the ReAct paper."""
def __init__(self, llm: BaseLanguageModel, docstore: Docstore, **kwargs: Any):
"""Initialize with the LLM and a docstore."""
docstore_explorer = DocstoreExplorer(docstore)
tools = [
Tool(
name="Search",
func=docstore_explorer.search,
description="Search for a term in the docstore.",
),
Tool(
name="Lookup",
func=docstore_explorer.lookup,
description="Lookup a term in the docstore.",
),
]
agent = ReActDocstoreAgent.from_llm_and_tools(llm, tools)
super().__init__(agent=agent, tools=tools, **kwargs)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/agents/react/base.py",
"license": "MIT License",
"lines": 158,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langchain:libs/langchain/langchain_classic/agents/react/output_parser.py | import re
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from typing_extensions import override
from langchain_classic.agents.agent import AgentOutputParser
class ReActOutputParser(AgentOutputParser):
"""Output parser for the ReAct agent."""
@override
def parse(self, text: str) -> AgentAction | AgentFinish:
action_prefix = "Action: "
if not text.strip().split("\n")[-1].startswith(action_prefix):
msg = f"Could not parse LLM Output: {text}"
raise OutputParserException(msg)
action_block = text.strip().split("\n")[-1]
action_str = action_block[len(action_prefix) :]
# Parse out the action and the directive.
re_matches = re.search(r"(.*?)\[(.*?)\]", action_str)
if re_matches is None:
msg = f"Could not parse action directive: {action_str}"
raise OutputParserException(msg)
action, action_input = re_matches.group(1), re_matches.group(2)
if action == "Finish":
return AgentFinish({"output": action_input}, text)
return AgentAction(action, action_input, text)
@property
def _type(self) -> str:
return "react"
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/agents/react/output_parser.py",
"license": "MIT License",
"lines": 27,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/agents/self_ask_with_search/base.py | """Chain that does self-ask with search."""
from __future__ import annotations
from collections.abc import Sequence
from typing import TYPE_CHECKING, Any
from langchain_core._api import deprecated
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import BasePromptTemplate
from langchain_core.runnables import Runnable, RunnablePassthrough
from langchain_core.tools import BaseTool, Tool
from pydantic import Field
from typing_extensions import override
from langchain_classic.agents.agent import Agent, AgentExecutor, AgentOutputParser
from langchain_classic.agents.agent_types import AgentType
from langchain_classic.agents.format_scratchpad import format_log_to_str
from langchain_classic.agents.self_ask_with_search.output_parser import (
SelfAskOutputParser,
)
from langchain_classic.agents.self_ask_with_search.prompt import PROMPT
from langchain_classic.agents.utils import validate_tools_single_input
if TYPE_CHECKING:
from langchain_community.utilities.google_serper import GoogleSerperAPIWrapper
from langchain_community.utilities.searchapi import SearchApiAPIWrapper
from langchain_community.utilities.serpapi import SerpAPIWrapper
@deprecated("0.1.0", alternative="create_self_ask_with_search", removal="1.0")
class SelfAskWithSearchAgent(Agent):
"""Agent for the self-ask-with-search paper."""
output_parser: AgentOutputParser = Field(default_factory=SelfAskOutputParser)
@classmethod
@override
def _get_default_output_parser(cls, **kwargs: Any) -> AgentOutputParser:
return SelfAskOutputParser()
@property
def _agent_type(self) -> str:
"""Return Identifier of an agent type."""
return AgentType.SELF_ASK_WITH_SEARCH
@classmethod
@override
def create_prompt(cls, tools: Sequence[BaseTool]) -> BasePromptTemplate:
"""Prompt does not depend on tools."""
return PROMPT
@classmethod
def _validate_tools(cls, tools: Sequence[BaseTool]) -> None:
validate_tools_single_input(cls.__name__, tools)
super()._validate_tools(tools)
if len(tools) != 1:
msg = f"Exactly one tool must be specified, but got {tools}"
raise ValueError(msg)
tool_names = {tool.name for tool in tools}
if tool_names != {"Intermediate Answer"}:
msg = f"Tool name should be Intermediate Answer, got {tool_names}"
raise ValueError(msg)
@property
def observation_prefix(self) -> str:
"""Prefix to append the observation with."""
return "Intermediate answer: "
@property
def llm_prefix(self) -> str:
"""Prefix to append the LLM call with."""
return ""
@deprecated("0.1.0", removal="1.0")
class SelfAskWithSearchChain(AgentExecutor):
"""[Deprecated] Chain that does self-ask with search."""
def __init__(
self,
llm: BaseLanguageModel,
search_chain: GoogleSerperAPIWrapper | SearchApiAPIWrapper | SerpAPIWrapper,
**kwargs: Any,
):
"""Initialize only with an LLM and a search chain."""
search_tool = Tool(
name="Intermediate Answer",
func=search_chain.run,
coroutine=search_chain.arun,
description="Search",
)
agent = SelfAskWithSearchAgent.from_llm_and_tools(llm, [search_tool])
super().__init__(agent=agent, tools=[search_tool], **kwargs)
def create_self_ask_with_search_agent(
llm: BaseLanguageModel,
tools: Sequence[BaseTool],
prompt: BasePromptTemplate,
) -> Runnable:
"""Create an agent that uses self-ask with search prompting.
Args:
llm: LLM to use as the agent.
tools: List of tools. Should just be of length 1, with that tool having
name `Intermediate Answer`
prompt: The prompt to use, must have input key `agent_scratchpad` which will
contain agent actions and tool outputs.
Returns:
A Runnable sequence representing an agent. It takes as input all the same input
variables as the prompt passed in does. It returns as output either an
AgentAction or AgentFinish.
Examples:
```python
from langchain_classic import hub
from langchain_anthropic import ChatAnthropic
from langchain_classic.agents import (
AgentExecutor,
create_self_ask_with_search_agent,
)
prompt = hub.pull("hwchase17/self-ask-with-search")
model = ChatAnthropic(model="claude-3-haiku-20240307")
tools = [...] # Should just be one tool with name `Intermediate Answer`
agent = create_self_ask_with_search_agent(model, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools)
agent_executor.invoke({"input": "hi"})
```
Prompt:
The prompt must have input key `agent_scratchpad` which will
contain agent actions and tool outputs as a string.
Here's an example:
```python
from langchain_core.prompts import PromptTemplate
template = '''Question: Who lived longer, Muhammad Ali or Alan Turing?
Are follow up questions needed here: Yes.
Follow up: How old was Muhammad Ali when he died?
Intermediate answer: Muhammad Ali was 74 years old when he died.
Follow up: How old was Alan Turing when he died?
Intermediate answer: Alan Turing was 41 years old when he died.
So the final answer is: Muhammad Ali
Question: When was the founder of craigslist born?
Are follow up questions needed here: Yes.
Follow up: Who was the founder of craigslist?
Intermediate answer: Craigslist was founded by Craig Newmark.
Follow up: When was Craig Newmark born?
Intermediate answer: Craig Newmark was born on December 6, 1952.
So the final answer is: December 6, 1952
Question: Who was the maternal grandfather of George Washington?
Are follow up questions needed here: Yes.
Follow up: Who was the mother of George Washington?
Intermediate answer: The mother of George Washington was Mary Ball Washington.
Follow up: Who was the father of Mary Ball Washington?
Intermediate answer: The father of Mary Ball Washington was Joseph Ball.
So the final answer is: Joseph Ball
Question: Are both the directors of Jaws and Casino Royale from the same country?
Are follow up questions needed here: Yes.
Follow up: Who is the director of Jaws?
Intermediate answer: The director of Jaws is Steven Spielberg.
Follow up: Where is Steven Spielberg from?
Intermediate answer: The United States.
Follow up: Who is the director of Casino Royale?
Intermediate answer: The director of Casino Royale is Martin Campbell.
Follow up: Where is Martin Campbell from?
Intermediate answer: New Zealand.
So the final answer is: No
Question: {input}
Are followup questions needed here:{agent_scratchpad}'''
prompt = PromptTemplate.from_template(template)
```
""" # noqa: E501
missing_vars = {"agent_scratchpad"}.difference(
prompt.input_variables + list(prompt.partial_variables),
)
if missing_vars:
msg = f"Prompt missing required variables: {missing_vars}"
raise ValueError(msg)
if len(tools) != 1:
msg = "This agent expects exactly one tool"
raise ValueError(msg)
tool = next(iter(tools))
if tool.name != "Intermediate Answer":
msg = "This agent expects the tool to be named `Intermediate Answer`"
raise ValueError(msg)
llm_with_stop = llm.bind(stop=["\nIntermediate answer:"])
return (
RunnablePassthrough.assign(
agent_scratchpad=lambda x: format_log_to_str(
x["intermediate_steps"],
observation_prefix="\nIntermediate answer: ",
llm_prefix="",
),
# Give it a default
chat_history=lambda x: x.get("chat_history", ""),
)
| prompt
| llm_with_stop
| SelfAskOutputParser()
)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/agents/self_ask_with_search/base.py",
"license": "MIT License",
"lines": 179,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langchain:libs/langchain/langchain_classic/agents/structured_chat/base.py | import re
from collections.abc import Sequence
from typing import Any
from langchain_core._api import deprecated
from langchain_core.agents import AgentAction
from langchain_core.callbacks import BaseCallbackManager
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import BasePromptTemplate
from langchain_core.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
from langchain_core.runnables import Runnable, RunnablePassthrough
from langchain_core.tools import BaseTool
from langchain_core.tools.render import ToolsRenderer
from pydantic import Field
from typing_extensions import override
from langchain_classic.agents.agent import Agent, AgentOutputParser
from langchain_classic.agents.format_scratchpad import format_log_to_str
from langchain_classic.agents.output_parsers import JSONAgentOutputParser
from langchain_classic.agents.structured_chat.output_parser import (
StructuredChatOutputParserWithRetries,
)
from langchain_classic.agents.structured_chat.prompt import (
FORMAT_INSTRUCTIONS,
PREFIX,
SUFFIX,
)
from langchain_classic.chains.llm import LLMChain
from langchain_classic.tools.render import render_text_description_and_args
HUMAN_MESSAGE_TEMPLATE = "{input}\n\n{agent_scratchpad}"
@deprecated("0.1.0", alternative="create_structured_chat_agent", removal="1.0")
class StructuredChatAgent(Agent):
"""Structured Chat Agent."""
output_parser: AgentOutputParser = Field(
default_factory=StructuredChatOutputParserWithRetries,
)
"""Output parser for the agent."""
@property
def observation_prefix(self) -> str:
"""Prefix to append the observation with."""
return "Observation: "
@property
def llm_prefix(self) -> str:
"""Prefix to append the llm call with."""
return "Thought:"
def _construct_scratchpad(
self,
intermediate_steps: list[tuple[AgentAction, str]],
) -> str:
agent_scratchpad = super()._construct_scratchpad(intermediate_steps)
if not isinstance(agent_scratchpad, str):
msg = "agent_scratchpad should be of type string."
raise ValueError(msg) # noqa: TRY004
if agent_scratchpad:
return (
f"This was your previous work "
f"(but I haven't seen any of it! I only see what "
f"you return as final answer):\n{agent_scratchpad}"
)
return agent_scratchpad
@classmethod
def _validate_tools(cls, tools: Sequence[BaseTool]) -> None:
pass
@classmethod
@override
def _get_default_output_parser(
cls,
llm: BaseLanguageModel | None = None,
**kwargs: Any,
) -> AgentOutputParser:
return StructuredChatOutputParserWithRetries.from_llm(llm=llm)
@property
@override
def _stop(self) -> list[str]:
return ["Observation:"]
@classmethod
@override
def create_prompt(
cls,
tools: Sequence[BaseTool],
prefix: str = PREFIX,
suffix: str = SUFFIX,
human_message_template: str = HUMAN_MESSAGE_TEMPLATE,
format_instructions: str = FORMAT_INSTRUCTIONS,
input_variables: list[str] | None = None,
memory_prompts: list[BasePromptTemplate] | None = None,
) -> BasePromptTemplate:
tool_strings = []
for tool in tools:
args_schema = re.sub("}", "}}", re.sub("{", "{{", str(tool.args)))
tool_strings.append(f"{tool.name}: {tool.description}, args: {args_schema}")
formatted_tools = "\n".join(tool_strings)
tool_names = ", ".join([tool.name for tool in tools])
format_instructions = format_instructions.format(tool_names=tool_names)
template = f"{prefix}\n\n{formatted_tools}\n\n{format_instructions}\n\n{suffix}"
if input_variables is None:
input_variables = ["input", "agent_scratchpad"]
_memory_prompts = memory_prompts or []
messages = [
SystemMessagePromptTemplate.from_template(template),
*_memory_prompts,
HumanMessagePromptTemplate.from_template(human_message_template),
]
return ChatPromptTemplate(input_variables=input_variables, messages=messages) # type: ignore[arg-type]
@classmethod
def from_llm_and_tools(
cls,
llm: BaseLanguageModel,
tools: Sequence[BaseTool],
callback_manager: BaseCallbackManager | None = None,
output_parser: AgentOutputParser | None = None,
prefix: str = PREFIX,
suffix: str = SUFFIX,
human_message_template: str = HUMAN_MESSAGE_TEMPLATE,
format_instructions: str = FORMAT_INSTRUCTIONS,
input_variables: list[str] | None = None,
memory_prompts: list[BasePromptTemplate] | None = None,
**kwargs: Any,
) -> Agent:
"""Construct an agent from an LLM and tools."""
cls._validate_tools(tools)
prompt = cls.create_prompt(
tools,
prefix=prefix,
suffix=suffix,
human_message_template=human_message_template,
format_instructions=format_instructions,
input_variables=input_variables,
memory_prompts=memory_prompts,
)
llm_chain = LLMChain(
llm=llm,
prompt=prompt,
callback_manager=callback_manager,
)
tool_names = [tool.name for tool in tools]
_output_parser = output_parser or cls._get_default_output_parser(llm=llm)
return cls(
llm_chain=llm_chain,
allowed_tools=tool_names,
output_parser=_output_parser,
**kwargs,
)
@property
def _agent_type(self) -> str:
raise ValueError
def create_structured_chat_agent(
llm: BaseLanguageModel,
tools: Sequence[BaseTool],
prompt: ChatPromptTemplate,
tools_renderer: ToolsRenderer = render_text_description_and_args,
*,
stop_sequence: bool | list[str] = True,
) -> Runnable:
"""Create an agent aimed at supporting tools with multiple inputs.
Args:
llm: LLM to use as the agent.
tools: Tools this agent has access to.
prompt: The prompt to use. See Prompt section below for more.
stop_sequence: bool or list of str.
If `True`, adds a stop token of "Observation:" to avoid hallucinates.
If `False`, does not add a stop token.
If a list of str, uses the provided list as the stop tokens.
You may to set this to False if the LLM you are using
does not support stop sequences.
tools_renderer: This controls how the tools are converted into a string and
then passed into the LLM.
Returns:
A Runnable sequence representing an agent. It takes as input all the same input
variables as the prompt passed in does. It returns as output either an
AgentAction or AgentFinish.
Examples:
```python
from langchain_classic import hub
from langchain_openai import ChatOpenAI
from langchain_classic.agents import (
AgentExecutor,
create_structured_chat_agent,
)
prompt = hub.pull("hwchase17/structured-chat-agent")
model = ChatOpenAI()
tools = ...
agent = create_structured_chat_agent(model, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools)
agent_executor.invoke({"input": "hi"})
# Using with chat history
from langchain_core.messages import AIMessage, HumanMessage
agent_executor.invoke(
{
"input": "what's my name?",
"chat_history": [
HumanMessage(content="hi! my name is bob"),
AIMessage(content="Hello Bob! How can I assist you today?"),
],
}
)
```
Prompt:
The prompt must have input keys:
* `tools`: contains descriptions and arguments for each tool.
* `tool_names`: contains all tool names.
* `agent_scratchpad`: contains previous agent actions and tool outputs as a
string.
Here's an example:
```python
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
system = '''Respond to the human as helpfully and accurately as possible. You have access to the following tools:
{tools}
Use a json blob to specify a tool by providing an action key (tool name) and an action_input key (tool input).
Valid "action" values: "Final Answer" or {tool_names}
Provide only ONE action per $JSON_BLOB, as shown:
```txt
{{
"action": $TOOL_NAME,
"action_input": $INPUT
}}
```
Follow this format:
Question: input question to answer
Thought: consider previous and subsequent steps
Action:
```
$JSON_BLOB
```
Observation: action result
... (repeat Thought/Action/Observation N times)
Thought: I know what to respond
Action:
```txt
{{
"action": "Final Answer",
"action_input": "Final response to human"
}}
Begin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. Respond directly if appropriate. Format is Action:```$JSON_BLOB```then Observation'''
human = '''{input}
{agent_scratchpad}
(reminder to respond in a JSON blob no matter what)'''
prompt = ChatPromptTemplate.from_messages(
[
("system", system),
MessagesPlaceholder("chat_history", optional=True),
("human", human),
]
)
```
""" # noqa: E501
missing_vars = {"tools", "tool_names", "agent_scratchpad"}.difference(
prompt.input_variables + list(prompt.partial_variables),
)
if missing_vars:
msg = f"Prompt missing required variables: {missing_vars}"
raise ValueError(msg)
prompt = prompt.partial(
tools=tools_renderer(list(tools)),
tool_names=", ".join([t.name for t in tools]),
)
if stop_sequence:
stop = ["\nObservation"] if stop_sequence is True else stop_sequence
llm_with_stop = llm.bind(stop=stop)
else:
llm_with_stop = llm
return (
RunnablePassthrough.assign(
agent_scratchpad=lambda x: format_log_to_str(x["intermediate_steps"]),
)
| prompt
| llm_with_stop
| JSONAgentOutputParser()
)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/agents/structured_chat/base.py",
"license": "MIT License",
"lines": 271,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langchain:libs/langchain/langchain_classic/agents/structured_chat/output_parser.py | from __future__ import annotations
import json
import logging
import re
from re import Pattern
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain_core.language_models import BaseLanguageModel
from pydantic import Field
from typing_extensions import override
from langchain_classic.agents.agent import AgentOutputParser
from langchain_classic.agents.structured_chat.prompt import FORMAT_INSTRUCTIONS
from langchain_classic.output_parsers import OutputFixingParser
logger = logging.getLogger(__name__)
class StructuredChatOutputParser(AgentOutputParser):
"""Output parser for the structured chat agent."""
format_instructions: str = FORMAT_INSTRUCTIONS
"""Default formatting instructions"""
pattern: Pattern = re.compile(r"```(?:json\s+)?(\W.*?)```", re.DOTALL)
"""Regex pattern to parse the output."""
@override
def get_format_instructions(self) -> str:
"""Returns formatting instructions for the given output parser."""
return self.format_instructions
@override
def parse(self, text: str) -> AgentAction | AgentFinish:
try:
action_match = self.pattern.search(text)
if action_match is not None:
response = json.loads(action_match.group(1).strip(), strict=False)
if isinstance(response, list):
# gpt turbo frequently ignores the directive to emit a single action
logger.warning("Got multiple action responses: %s", response)
response = response[0]
if response["action"] == "Final Answer":
return AgentFinish({"output": response["action_input"]}, text)
return AgentAction(
response["action"],
response.get("action_input", {}),
text,
)
return AgentFinish({"output": text}, text)
except Exception as e:
msg = f"Could not parse LLM output: {text}"
raise OutputParserException(msg) from e
@property
def _type(self) -> str:
return "structured_chat"
class StructuredChatOutputParserWithRetries(AgentOutputParser):
"""Output parser with retries for the structured chat agent."""
base_parser: AgentOutputParser = Field(default_factory=StructuredChatOutputParser)
"""The base parser to use."""
output_fixing_parser: OutputFixingParser | None = None
"""The output fixing parser to use."""
@override
def get_format_instructions(self) -> str:
return FORMAT_INSTRUCTIONS
@override
def parse(self, text: str) -> AgentAction | AgentFinish:
try:
if self.output_fixing_parser is not None:
return self.output_fixing_parser.parse(text)
return self.base_parser.parse(text)
except Exception as e:
msg = f"Could not parse LLM output: {text}"
raise OutputParserException(msg) from e
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel | None = None,
base_parser: StructuredChatOutputParser | None = None,
) -> StructuredChatOutputParserWithRetries:
"""Create a StructuredChatOutputParserWithRetries from a language model.
Args:
llm: The language model to use.
base_parser: An optional StructuredChatOutputParser to use.
Returns:
An instance of StructuredChatOutputParserWithRetries.
"""
if llm is not None:
base_parser = base_parser or StructuredChatOutputParser()
output_fixing_parser: OutputFixingParser = OutputFixingParser.from_llm(
llm=llm,
parser=base_parser,
)
return cls(output_fixing_parser=output_fixing_parser)
if base_parser is not None:
return cls(base_parser=base_parser)
return cls()
@property
def _type(self) -> str:
return "structured_chat_with_retries"
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/agents/structured_chat/output_parser.py",
"license": "MIT License",
"lines": 92,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langchain:libs/langchain/langchain_classic/agents/tool_calling_agent/base.py | from collections.abc import Callable, Sequence
from langchain_core.agents import AgentAction
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import BaseMessage
from langchain_core.prompts.chat import ChatPromptTemplate
from langchain_core.runnables import Runnable, RunnablePassthrough
from langchain_core.tools import BaseTool
from langchain_classic.agents.format_scratchpad.tools import (
format_to_tool_messages,
)
from langchain_classic.agents.output_parsers.tools import ToolsAgentOutputParser
MessageFormatter = Callable[[Sequence[tuple[AgentAction, str]]], list[BaseMessage]]
def create_tool_calling_agent(
llm: BaseLanguageModel,
tools: Sequence[BaseTool],
prompt: ChatPromptTemplate,
*,
message_formatter: MessageFormatter = format_to_tool_messages,
) -> Runnable:
"""Create an agent that uses tools.
Args:
llm: LLM to use as the agent.
tools: Tools this agent has access to.
prompt: The prompt to use. See Prompt section below for more on the expected
input variables.
message_formatter: Formatter function to convert (AgentAction, tool output)
tuples into FunctionMessages.
Returns:
A Runnable sequence representing an agent. It takes as input all the same input
variables as the prompt passed in does. It returns as output either an
AgentAction or AgentFinish.
Example:
```python
from langchain_classic.agents import (
AgentExecutor,
create_tool_calling_agent,
tool,
)
from langchain_anthropic import ChatAnthropic
from langchain_core.prompts import ChatPromptTemplate
prompt = ChatPromptTemplate.from_messages(
[
("system", "You are a helpful assistant"),
("placeholder", "{chat_history}"),
("human", "{input}"),
("placeholder", "{agent_scratchpad}"),
]
)
model = ChatAnthropic(model="claude-opus-4-1-20250805")
@tool
def magic_function(input: int) -> int:
\"\"\"Applies a magic function to an input.\"\"\"
return input + 2
tools = [magic_function]
agent = create_tool_calling_agent(model, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
agent_executor.invoke({"input": "what is the value of magic_function(3)?"})
# Using with chat history
from langchain_core.messages import AIMessage, HumanMessage
agent_executor.invoke(
{
"input": "what's my name?",
"chat_history": [
HumanMessage(content="hi! my name is bob"),
AIMessage(content="Hello Bob! How can I assist you today?"),
],
}
)
```
Prompt:
The agent prompt must have an `agent_scratchpad` key that is a
`MessagesPlaceholder`. Intermediate agent actions and tool output
messages will be passed in here.
Troubleshooting:
- If you encounter `invalid_tool_calls` errors, ensure that your tool
functions return properly formatted responses. Tool outputs should be
serializable to JSON. For custom objects, implement proper __str__ or
to_dict methods.
"""
missing_vars = {"agent_scratchpad"}.difference(
prompt.input_variables + list(prompt.partial_variables),
)
if missing_vars:
msg = f"Prompt missing required variables: {missing_vars}"
raise ValueError(msg)
if not hasattr(llm, "bind_tools"):
msg = "This function requires a bind_tools() method be implemented on the LLM."
raise ValueError(
msg,
)
llm_with_tools = llm.bind_tools(tools)
return (
RunnablePassthrough.assign(
agent_scratchpad=lambda x: message_formatter(x["intermediate_steps"]),
)
| prompt
| llm_with_tools
| ToolsAgentOutputParser()
)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/agents/tool_calling_agent/base.py",
"license": "MIT License",
"lines": 99,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
langchain-ai/langchain:libs/langchain/langchain_classic/agents/types.py | from langchain_classic.agents.agent import BaseSingleActionAgent
from langchain_classic.agents.agent_types import AgentType
from langchain_classic.agents.chat.base import ChatAgent
from langchain_classic.agents.conversational.base import ConversationalAgent
from langchain_classic.agents.conversational_chat.base import ConversationalChatAgent
from langchain_classic.agents.mrkl.base import ZeroShotAgent
from langchain_classic.agents.openai_functions_agent.base import OpenAIFunctionsAgent
from langchain_classic.agents.openai_functions_multi_agent.base import (
OpenAIMultiFunctionsAgent,
)
from langchain_classic.agents.react.base import ReActDocstoreAgent
from langchain_classic.agents.self_ask_with_search.base import SelfAskWithSearchAgent
from langchain_classic.agents.structured_chat.base import StructuredChatAgent
AGENT_TYPE = type[BaseSingleActionAgent] | type[OpenAIMultiFunctionsAgent]
AGENT_TO_CLASS: dict[AgentType, AGENT_TYPE] = {
AgentType.ZERO_SHOT_REACT_DESCRIPTION: ZeroShotAgent,
AgentType.REACT_DOCSTORE: ReActDocstoreAgent,
AgentType.SELF_ASK_WITH_SEARCH: SelfAskWithSearchAgent,
AgentType.CONVERSATIONAL_REACT_DESCRIPTION: ConversationalAgent,
AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION: ChatAgent,
AgentType.CHAT_CONVERSATIONAL_REACT_DESCRIPTION: ConversationalChatAgent,
AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION: StructuredChatAgent,
AgentType.OPENAI_FUNCTIONS: OpenAIFunctionsAgent,
AgentType.OPENAI_MULTI_FUNCTIONS: OpenAIMultiFunctionsAgent,
}
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/agents/types.py",
"license": "MIT License",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/agents/xml/base.py | from collections.abc import Sequence
from typing import Any
from langchain_core._api import deprecated
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.callbacks import Callbacks
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts.base import BasePromptTemplate
from langchain_core.prompts.chat import AIMessagePromptTemplate, ChatPromptTemplate
from langchain_core.runnables import Runnable, RunnablePassthrough
from langchain_core.tools import BaseTool
from langchain_core.tools.render import ToolsRenderer, render_text_description
from typing_extensions import override
from langchain_classic.agents.agent import BaseSingleActionAgent
from langchain_classic.agents.format_scratchpad import format_xml
from langchain_classic.agents.output_parsers import XMLAgentOutputParser
from langchain_classic.agents.xml.prompt import agent_instructions
from langchain_classic.chains.llm import LLMChain
@deprecated("0.1.0", alternative="create_xml_agent", removal="1.0")
class XMLAgent(BaseSingleActionAgent):
"""Agent that uses XML tags.
Args:
tools: list of tools the agent can choose from
llm_chain: The LLMChain to call to predict the next action
Examples:
```python
from langchain_classic.agents import XMLAgent
from langchain
tools = ...
model =
```
"""
tools: list[BaseTool]
"""List of tools this agent has access to."""
llm_chain: LLMChain
"""Chain to use to predict action."""
@property
@override
def input_keys(self) -> list[str]:
return ["input"]
@staticmethod
def get_default_prompt() -> ChatPromptTemplate:
"""Return the default prompt for the XML agent."""
base_prompt = ChatPromptTemplate.from_template(agent_instructions)
return base_prompt + AIMessagePromptTemplate.from_template(
"{intermediate_steps}",
)
@staticmethod
def get_default_output_parser() -> XMLAgentOutputParser:
"""Return an XMLAgentOutputParser."""
return XMLAgentOutputParser()
@override
def plan(
self,
intermediate_steps: list[tuple[AgentAction, str]],
callbacks: Callbacks = None,
**kwargs: Any,
) -> AgentAction | AgentFinish:
log = ""
for action, observation in intermediate_steps:
log += (
f"<tool>{action.tool}</tool><tool_input>{action.tool_input}"
f"</tool_input><observation>{observation}</observation>"
)
tools = ""
for tool in self.tools:
tools += f"{tool.name}: {tool.description}\n"
inputs = {
"intermediate_steps": log,
"tools": tools,
"question": kwargs["input"],
"stop": ["</tool_input>", "</final_answer>"],
}
response = self.llm_chain(inputs, callbacks=callbacks)
return response[self.llm_chain.output_key]
@override
async def aplan(
self,
intermediate_steps: list[tuple[AgentAction, str]],
callbacks: Callbacks = None,
**kwargs: Any,
) -> AgentAction | AgentFinish:
log = ""
for action, observation in intermediate_steps:
log += (
f"<tool>{action.tool}</tool><tool_input>{action.tool_input}"
f"</tool_input><observation>{observation}</observation>"
)
tools = ""
for tool in self.tools:
tools += f"{tool.name}: {tool.description}\n"
inputs = {
"intermediate_steps": log,
"tools": tools,
"question": kwargs["input"],
"stop": ["</tool_input>", "</final_answer>"],
}
response = await self.llm_chain.acall(inputs, callbacks=callbacks)
return response[self.llm_chain.output_key]
def create_xml_agent(
llm: BaseLanguageModel,
tools: Sequence[BaseTool],
prompt: BasePromptTemplate,
tools_renderer: ToolsRenderer = render_text_description,
*,
stop_sequence: bool | list[str] = True,
) -> Runnable:
r"""Create an agent that uses XML to format its logic.
Args:
llm: LLM to use as the agent.
tools: Tools this agent has access to.
prompt: The prompt to use, must have input keys
`tools`: contains descriptions for each tool.
`agent_scratchpad`: contains previous agent actions and tool outputs.
tools_renderer: This controls how the tools are converted into a string and
then passed into the LLM.
stop_sequence: bool or list of str.
If `True`, adds a stop token of "</tool_input>" to avoid hallucinates.
If `False`, does not add a stop token.
If a list of str, uses the provided list as the stop tokens.
You may to set this to False if the LLM you are using
does not support stop sequences.
Returns:
A Runnable sequence representing an agent. It takes as input all the same input
variables as the prompt passed in does. It returns as output either an
AgentAction or AgentFinish.
Example:
```python
from langchain_classic import hub
from langchain_anthropic import ChatAnthropic
from langchain_classic.agents import AgentExecutor, create_xml_agent
prompt = hub.pull("hwchase17/xml-agent-convo")
model = ChatAnthropic(model="claude-3-haiku-20240307")
tools = ...
agent = create_xml_agent(model, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools)
agent_executor.invoke({"input": "hi"})
# Use with chat history
from langchain_core.messages import AIMessage, HumanMessage
agent_executor.invoke(
{
"input": "what's my name?",
# Notice that chat_history is a string
# since this prompt is aimed at LLMs, not chat models
"chat_history": "Human: My name is Bob\nAI: Hello Bob!",
}
)
```
Prompt:
The prompt must have input keys:
* `tools`: contains descriptions for each tool.
* `agent_scratchpad`: contains previous agent actions and tool outputs as
an XML string.
Here's an example:
```python
from langchain_core.prompts import PromptTemplate
template = '''You are a helpful assistant. Help the user answer any questions.
You have access to the following tools:
{tools}
In order to use a tool, you can use <tool></tool> and <tool_input></tool_input> tags. You will then get back a response in the form <observation></observation>
For example, if you have a tool called 'search' that could run a google search, in order to search for the weather in SF you would respond:
<tool>search</tool><tool_input>weather in SF</tool_input>
<observation>64 degrees</observation>
When you are done, respond with a final answer between <final_answer></final_answer>. For example:
<final_answer>The weather in SF is 64 degrees</final_answer>
Begin!
Previous Conversation:
{chat_history}
Question: {input}
{agent_scratchpad}'''
prompt = PromptTemplate.from_template(template)
```
""" # noqa: E501
missing_vars = {"tools", "agent_scratchpad"}.difference(
prompt.input_variables + list(prompt.partial_variables),
)
if missing_vars:
msg = f"Prompt missing required variables: {missing_vars}"
raise ValueError(msg)
prompt = prompt.partial(
tools=tools_renderer(list(tools)),
)
if stop_sequence:
stop = ["</tool_input>"] if stop_sequence is True else stop_sequence
llm_with_stop = llm.bind(stop=stop)
else:
llm_with_stop = llm
return (
RunnablePassthrough.assign(
agent_scratchpad=lambda x: format_xml(x["intermediate_steps"]),
)
| prompt
| llm_with_stop
| XMLAgentOutputParser()
)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/agents/xml/base.py",
"license": "MIT License",
"lines": 194,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langchain:libs/langchain/langchain_classic/cache.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.cache import (
AstraDBCache,
AstraDBSemanticCache,
AzureCosmosDBSemanticCache,
CassandraCache,
CassandraSemanticCache,
FullLLMCache,
FullMd5LLMCache,
GPTCache,
InMemoryCache,
MomentoCache,
RedisCache,
RedisSemanticCache,
SQLAlchemyCache,
SQLAlchemyMd5Cache,
SQLiteCache,
UpstashRedisCache,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"FullLLMCache": "langchain_community.cache",
"SQLAlchemyCache": "langchain_community.cache",
"SQLiteCache": "langchain_community.cache",
"UpstashRedisCache": "langchain_community.cache",
"RedisCache": "langchain_community.cache",
"RedisSemanticCache": "langchain_community.cache",
"GPTCache": "langchain_community.cache",
"MomentoCache": "langchain_community.cache",
"InMemoryCache": "langchain_community.cache",
"CassandraCache": "langchain_community.cache",
"CassandraSemanticCache": "langchain_community.cache",
"FullMd5LLMCache": "langchain_community.cache",
"SQLAlchemyMd5Cache": "langchain_community.cache",
"AstraDBCache": "langchain_community.cache",
"AstraDBSemanticCache": "langchain_community.cache",
"AzureCosmosDBSemanticCache": "langchain_community.cache",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"AstraDBCache",
"AstraDBSemanticCache",
"AzureCosmosDBSemanticCache",
"CassandraCache",
"CassandraSemanticCache",
"FullLLMCache",
"FullMd5LLMCache",
"GPTCache",
"InMemoryCache",
"MomentoCache",
"RedisCache",
"RedisSemanticCache",
"SQLAlchemyCache",
"SQLAlchemyMd5Cache",
"SQLiteCache",
"UpstashRedisCache",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/cache.py",
"license": "MIT License",
"lines": 64,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/callbacks/human.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.callbacks.human import (
AsyncHumanApprovalCallbackHandler,
HumanApprovalCallbackHandler,
HumanRejectedException,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"HumanRejectedException": "langchain_community.callbacks.human",
"HumanApprovalCallbackHandler": "langchain_community.callbacks.human",
"AsyncHumanApprovalCallbackHandler": "langchain_community.callbacks.human",
}
_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"AsyncHumanApprovalCallbackHandler",
"HumanApprovalCallbackHandler",
"HumanRejectedException",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/callbacks/human.py",
"license": "MIT License",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/callbacks/utils.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.callbacks.utils import (
BaseMetadataCallbackHandler,
_flatten_dict,
flatten_dict,
hash_string,
import_pandas,
import_spacy,
import_textstat,
load_json,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"import_spacy": "langchain_community.callbacks.utils",
"import_pandas": "langchain_community.callbacks.utils",
"import_textstat": "langchain_community.callbacks.utils",
"_flatten_dict": "langchain_community.callbacks.utils",
"flatten_dict": "langchain_community.callbacks.utils",
"hash_string": "langchain_community.callbacks.utils",
"load_json": "langchain_community.callbacks.utils",
"BaseMetadataCallbackHandler": "langchain_community.callbacks.utils",
}
_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"BaseMetadataCallbackHandler",
"_flatten_dict",
"flatten_dict",
"hash_string",
"import_pandas",
"import_spacy",
"import_textstat",
"load_json",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/callbacks/utils.py",
"license": "MIT License",
"lines": 40,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/chains/api/base.py | """Chain that makes API calls and summarizes the responses to answer a question."""
from __future__ import annotations
from collections.abc import Sequence
from typing import Any
from urllib.parse import urlparse
from langchain_core._api import deprecated
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import BasePromptTemplate
from pydantic import Field, model_validator
from typing_extensions import Self
from langchain_classic.chains.api.prompt import API_RESPONSE_PROMPT, API_URL_PROMPT
from langchain_classic.chains.base import Chain
from langchain_classic.chains.llm import LLMChain
def _extract_scheme_and_domain(url: str) -> tuple[str, str]:
"""Extract the scheme + domain from a given URL.
Args:
url: The input URL.
Returns:
A 2-tuple of scheme and domain
"""
parsed_uri = urlparse(url)
return parsed_uri.scheme, parsed_uri.netloc
def _check_in_allowed_domain(url: str, limit_to_domains: Sequence[str]) -> bool:
"""Check if a URL is in the allowed domains.
Args:
url: The input URL.
limit_to_domains: The allowed domains.
Returns:
`True` if the URL is in the allowed domains, `False` otherwise.
"""
scheme, domain = _extract_scheme_and_domain(url)
for allowed_domain in limit_to_domains:
allowed_scheme, allowed_domain_ = _extract_scheme_and_domain(allowed_domain)
if scheme == allowed_scheme and domain == allowed_domain_:
return True
return False
try:
from langchain_community.utilities.requests import TextRequestsWrapper
@deprecated(
since="0.2.13",
message=(
"This class is deprecated and will be removed in langchain 1.0. "
"See API reference for replacement: "
"https://api.python.langchain.com/en/latest/chains/langchain.chains.api.base.APIChain.html"
),
removal="1.0",
)
class APIChain(Chain):
"""Chain that makes API calls and summarizes the responses to answer a question.
**Security Note**: This API chain uses the requests toolkit
to make `GET`, `POST`, `PATCH`, `PUT`, and `DELETE` requests to an API.
Exercise care in who is allowed to use this chain. If exposing
to end users, consider that users will be able to make arbitrary
requests on behalf of the server hosting the code. For example,
users could ask the server to make a request to a private API
that is only accessible from the server.
Control access to who can submit issue requests using this toolkit and
what network access it has.
See https://docs.langchain.com/oss/python/security-policy for more
information.
!!! note
This class is deprecated. See below for a replacement implementation using
LangGraph. The benefits of this implementation are:
- Uses LLM tool calling features to encourage properly-formatted API requests;
- Support for both token-by-token and step-by-step streaming;
- Support for checkpointing and memory of chat history;
- Easier to modify or extend
(e.g., with additional tools, structured responses, etc.)
Install LangGraph with:
```bash
pip install -U langgraph
```
```python
from typing import Annotated, Sequence
from typing_extensions import TypedDict
from langchain_classic.chains.api.prompt import API_URL_PROMPT
from langchain_community.agent_toolkits.openapi.toolkit import RequestsToolkit
from langchain_community.utilities.requests import TextRequestsWrapper
from langchain_core.messages import BaseMessage
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
from langchain_core.runnables import RunnableConfig
from langgraph.graph import END, StateGraph
from langgraph.graph.message import add_messages
from langgraph.prebuilt.tool_node import ToolNode
# NOTE: There are inherent risks in giving models discretion
# to execute real-world actions. We must "opt-in" to these
# risks by setting allow_dangerous_request=True to use these tools.
# This can be dangerous for calling unwanted requests. Please make
# sure your custom OpenAPI spec (yaml) is safe and that permissions
# associated with the tools are narrowly-scoped.
ALLOW_DANGEROUS_REQUESTS = True
# Subset of spec for https://jsonplaceholder.typicode.com
api_spec = \"\"\"
openapi: 3.0.0
info:
title: JSONPlaceholder API
version: 1.0.0
servers:
- url: https://jsonplaceholder.typicode.com
paths:
/posts:
get:
summary: Get posts
parameters: &id001
- name: _limit
in: query
required: false
schema:
type: integer
example: 2
description: Limit the number of results
\"\"\"
model = ChatOpenAI(model="gpt-4o-mini", temperature=0)
toolkit = RequestsToolkit(
requests_wrapper=TextRequestsWrapper(headers={}), # no auth required
allow_dangerous_requests=ALLOW_DANGEROUS_REQUESTS,
)
tools = toolkit.get_tools()
api_request_chain = (
API_URL_PROMPT.partial(api_docs=api_spec)
| model.bind_tools(tools, tool_choice="any")
)
class ChainState(TypedDict):
\"\"\"LangGraph state.\"\"\"
messages: Annotated[Sequence[BaseMessage], add_messages]
async def acall_request_chain(state: ChainState, config: RunnableConfig):
last_message = state["messages"][-1]
response = await api_request_chain.ainvoke(
{"question": last_message.content}, config
)
return {"messages": [response]}
async def acall_model(state: ChainState, config: RunnableConfig):
response = await model.ainvoke(state["messages"], config)
return {"messages": [response]}
graph_builder = StateGraph(ChainState)
graph_builder.add_node("call_tool", acall_request_chain)
graph_builder.add_node("execute_tool", ToolNode(tools))
graph_builder.add_node("call_model", acall_model)
graph_builder.set_entry_point("call_tool")
graph_builder.add_edge("call_tool", "execute_tool")
graph_builder.add_edge("execute_tool", "call_model")
graph_builder.add_edge("call_model", END)
chain = graph_builder.compile()
```
```python
example_query = "Fetch the top two posts. What are their titles?"
events = chain.astream(
{"messages": [("user", example_query)]},
stream_mode="values",
)
async for event in events:
event["messages"][-1].pretty_print()
```
"""
api_request_chain: LLMChain
api_answer_chain: LLMChain
requests_wrapper: TextRequestsWrapper = Field(exclude=True)
api_docs: str
question_key: str = "question"
output_key: str = "output"
limit_to_domains: Sequence[str] | None = Field(default_factory=list)
"""Use to limit the domains that can be accessed by the API chain.
* For example, to limit to just the domain `https://www.example.com`, set
`limit_to_domains=["https://www.example.com"]`.
* The default value is an empty tuple, which means that no domains are
allowed by default. By design this will raise an error on instantiation.
* Use a None if you want to allow all domains by default -- this is not
recommended for security reasons, as it would allow malicious users to
make requests to arbitrary URLS including internal APIs accessible from
the server.
"""
@property
def input_keys(self) -> list[str]:
"""Expect input key."""
return [self.question_key]
@property
def output_keys(self) -> list[str]:
"""Expect output key."""
return [self.output_key]
@model_validator(mode="after")
def validate_api_request_prompt(self) -> Self:
"""Check that api request prompt expects the right variables."""
input_vars = self.api_request_chain.prompt.input_variables
expected_vars = {"question", "api_docs"}
if set(input_vars) != expected_vars:
msg = f"Input variables should be {expected_vars}, got {input_vars}"
raise ValueError(msg)
return self
@model_validator(mode="before")
@classmethod
def validate_limit_to_domains(cls, values: dict) -> Any:
"""Check that allowed domains are valid."""
# This check must be a pre=True check, so that a default of None
# won't be set to limit_to_domains if it's not provided.
if "limit_to_domains" not in values:
msg = (
"You must specify a list of domains to limit access using "
"`limit_to_domains`"
)
raise ValueError(msg)
if (
not values["limit_to_domains"]
and values["limit_to_domains"] is not None
):
msg = (
"Please provide a list of domains to limit access using "
"`limit_to_domains`."
)
raise ValueError(msg)
return values
@model_validator(mode="after")
def validate_api_answer_prompt(self) -> Self:
"""Check that api answer prompt expects the right variables."""
input_vars = self.api_answer_chain.prompt.input_variables
expected_vars = {"question", "api_docs", "api_url", "api_response"}
if set(input_vars) != expected_vars:
msg = f"Input variables should be {expected_vars}, got {input_vars}"
raise ValueError(msg)
return self
def _call(
self,
inputs: dict[str, Any],
run_manager: CallbackManagerForChainRun | None = None,
) -> dict[str, str]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
question = inputs[self.question_key]
api_url = self.api_request_chain.predict(
question=question,
api_docs=self.api_docs,
callbacks=_run_manager.get_child(),
)
_run_manager.on_text(api_url, color="green", end="\n", verbose=self.verbose)
api_url = api_url.strip()
if self.limit_to_domains and not _check_in_allowed_domain(
api_url,
self.limit_to_domains,
):
msg = (
f"{api_url} is not in the allowed domains: {self.limit_to_domains}"
)
raise ValueError(msg)
api_response = self.requests_wrapper.get(api_url)
_run_manager.on_text(
str(api_response),
color="yellow",
end="\n",
verbose=self.verbose,
)
answer = self.api_answer_chain.predict(
question=question,
api_docs=self.api_docs,
api_url=api_url,
api_response=api_response,
callbacks=_run_manager.get_child(),
)
return {self.output_key: answer}
async def _acall(
self,
inputs: dict[str, Any],
run_manager: AsyncCallbackManagerForChainRun | None = None,
) -> dict[str, str]:
_run_manager = (
run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
)
question = inputs[self.question_key]
api_url = await self.api_request_chain.apredict(
question=question,
api_docs=self.api_docs,
callbacks=_run_manager.get_child(),
)
await _run_manager.on_text(
api_url,
color="green",
end="\n",
verbose=self.verbose,
)
api_url = api_url.strip()
if self.limit_to_domains and not _check_in_allowed_domain(
api_url,
self.limit_to_domains,
):
msg = (
f"{api_url} is not in the allowed domains: {self.limit_to_domains}"
)
raise ValueError(msg)
api_response = await self.requests_wrapper.aget(api_url)
await _run_manager.on_text(
str(api_response),
color="yellow",
end="\n",
verbose=self.verbose,
)
answer = await self.api_answer_chain.apredict(
question=question,
api_docs=self.api_docs,
api_url=api_url,
api_response=api_response,
callbacks=_run_manager.get_child(),
)
return {self.output_key: answer}
@classmethod
def from_llm_and_api_docs(
cls,
llm: BaseLanguageModel,
api_docs: str,
headers: dict | None = None,
api_url_prompt: BasePromptTemplate = API_URL_PROMPT,
api_response_prompt: BasePromptTemplate = API_RESPONSE_PROMPT,
limit_to_domains: Sequence[str] | None = (),
**kwargs: Any,
) -> APIChain:
"""Load chain from just an LLM and the api docs."""
get_request_chain = LLMChain(llm=llm, prompt=api_url_prompt)
requests_wrapper = TextRequestsWrapper(headers=headers)
get_answer_chain = LLMChain(llm=llm, prompt=api_response_prompt)
return cls(
api_request_chain=get_request_chain,
api_answer_chain=get_answer_chain,
requests_wrapper=requests_wrapper,
api_docs=api_docs,
limit_to_domains=limit_to_domains,
**kwargs,
)
@property
def _chain_type(self) -> str:
return "api_chain"
except ImportError:
class APIChain: # type: ignore[no-redef]
"""Raise an ImportError if APIChain is used without langchain_community."""
def __init__(self, *_: Any, **__: Any) -> None:
"""Raise an ImportError if APIChain is used without langchain_community."""
msg = (
"To use the APIChain, you must install the langchain_community package."
"pip install langchain_community"
)
raise ImportError(msg)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/chains/api/base.py",
"license": "MIT License",
"lines": 341,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
langchain-ai/langchain:libs/langchain/langchain_classic/chains/api/openapi/chain.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.chains.openapi.chain import OpenAPIEndpointChain
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"OpenAPIEndpointChain": "langchain_community.chains.openapi.chain",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = ["OpenAPIEndpointChain"]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/chains/api/openapi/chain.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/chains/api/openapi/prompts.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.chains.openapi.prompts import (
REQUEST_TEMPLATE,
RESPONSE_TEMPLATE,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"REQUEST_TEMPLATE": "langchain_community.chains.openapi.prompts",
"RESPONSE_TEMPLATE": "langchain_community.chains.openapi.prompts",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = ["REQUEST_TEMPLATE", "RESPONSE_TEMPLATE"]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/chains/api/openapi/prompts.py",
"license": "MIT License",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/chains/base.py | """Base interface that all chains should implement."""
import builtins
import contextlib
import inspect
import json
import logging
import warnings
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Any, cast
import yaml
from langchain_core._api import deprecated
from langchain_core.callbacks import (
AsyncCallbackManager,
AsyncCallbackManagerForChainRun,
BaseCallbackManager,
CallbackManager,
CallbackManagerForChainRun,
Callbacks,
)
from langchain_core.outputs import RunInfo
from langchain_core.runnables import (
RunnableConfig,
RunnableSerializable,
ensure_config,
run_in_executor,
)
from langchain_core.utils.pydantic import create_model
from pydantic import (
BaseModel,
ConfigDict,
Field,
field_validator,
model_validator,
)
from typing_extensions import override
from langchain_classic.base_memory import BaseMemory
from langchain_classic.schema import RUN_KEY
logger = logging.getLogger(__name__)
def _get_verbosity() -> bool:
from langchain_classic.globals import get_verbose
return get_verbose()
class Chain(RunnableSerializable[dict[str, Any], dict[str, Any]], ABC):
"""Abstract base class for creating structured sequences of calls to components.
Chains should be used to encode a sequence of calls to components like
models, document retrievers, other chains, etc., and provide a simple interface
to this sequence.
The Chain interface makes it easy to create apps that are:
- Stateful: add Memory to any Chain to give it state,
- Observable: pass Callbacks to a Chain to execute additional functionality,
like logging, outside the main sequence of component calls,
- Composable: the Chain API is flexible enough that it is easy to combine
Chains with other components, including other Chains.
The main methods exposed by chains are:
- `__call__`: Chains are callable. The `__call__` method is the primary way to
execute a Chain. This takes inputs as a dictionary and returns a
dictionary output.
- `run`: A convenience method that takes inputs as args/kwargs and returns the
output as a string or object. This method can only be used for a subset of
chains and cannot return as rich of an output as `__call__`.
"""
memory: BaseMemory | None = None
"""Optional memory object.
Memory is a class that gets called at the start
and at the end of every chain. At the start, memory loads variables and passes
them along in the chain. At the end, it saves any returned variables.
There are many different types of memory - please see memory docs
for the full catalog."""
callbacks: Callbacks = Field(default=None, exclude=True)
"""Optional list of callback handlers (or callback manager).
Callback handlers are called throughout the lifecycle of a call to a chain,
starting with on_chain_start, ending with on_chain_end or on_chain_error.
Each custom chain can optionally call additional callback methods, see Callback docs
for full details."""
verbose: bool = Field(default_factory=_get_verbosity)
"""Whether or not run in verbose mode. In verbose mode, some intermediate logs
will be printed to the console. Defaults to the global `verbose` value,
accessible via `langchain.globals.get_verbose()`."""
tags: list[str] | None = None
"""Optional list of tags associated with the chain.
These tags will be associated with each call to this chain,
and passed as arguments to the handlers defined in `callbacks`.
You can use these to eg identify a specific instance of a chain with its use case.
"""
metadata: builtins.dict[str, Any] | None = None
"""Optional metadata associated with the chain.
This metadata will be associated with each call to this chain,
and passed as arguments to the handlers defined in `callbacks`.
You can use these to eg identify a specific instance of a chain with its use case.
"""
callback_manager: BaseCallbackManager | None = Field(default=None, exclude=True)
"""[DEPRECATED] Use `callbacks` instead."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
@override
def get_input_schema(
self,
config: RunnableConfig | None = None,
) -> type[BaseModel]:
# This is correct, but pydantic typings/mypy don't think so.
return create_model("ChainInput", **dict.fromkeys(self.input_keys, (Any, None)))
@override
def get_output_schema(
self,
config: RunnableConfig | None = None,
) -> type[BaseModel]:
# This is correct, but pydantic typings/mypy don't think so.
return create_model(
"ChainOutput",
**dict.fromkeys(self.output_keys, (Any, None)),
)
@override
def invoke(
self,
input: dict[str, Any],
config: RunnableConfig | None = None,
**kwargs: Any,
) -> dict[str, Any]:
config = ensure_config(config)
callbacks = config.get("callbacks")
tags = config.get("tags")
metadata = config.get("metadata")
run_name = config.get("run_name") or self.get_name()
run_id = config.get("run_id")
include_run_info = kwargs.get("include_run_info", False)
return_only_outputs = kwargs.get("return_only_outputs", False)
inputs = self.prep_inputs(input)
callback_manager = CallbackManager.configure(
callbacks,
self.callbacks,
self.verbose,
tags,
self.tags,
metadata,
self.metadata,
)
new_arg_supported = inspect.signature(self._call).parameters.get("run_manager")
run_manager = callback_manager.on_chain_start(
None,
inputs,
run_id,
name=run_name,
)
try:
self._validate_inputs(inputs)
outputs = (
self._call(inputs, run_manager=run_manager)
if new_arg_supported
else self._call(inputs)
)
final_outputs: dict[str, Any] = self.prep_outputs(
inputs,
outputs,
return_only_outputs,
)
except BaseException as e:
run_manager.on_chain_error(e)
raise
run_manager.on_chain_end(outputs)
if include_run_info:
final_outputs[RUN_KEY] = RunInfo(run_id=run_manager.run_id)
return final_outputs
@override
async def ainvoke(
self,
input: dict[str, Any],
config: RunnableConfig | None = None,
**kwargs: Any,
) -> dict[str, Any]:
config = ensure_config(config)
callbacks = config.get("callbacks")
tags = config.get("tags")
metadata = config.get("metadata")
run_name = config.get("run_name") or self.get_name()
run_id = config.get("run_id")
include_run_info = kwargs.get("include_run_info", False)
return_only_outputs = kwargs.get("return_only_outputs", False)
inputs = await self.aprep_inputs(input)
callback_manager = AsyncCallbackManager.configure(
callbacks,
self.callbacks,
self.verbose,
tags,
self.tags,
metadata,
self.metadata,
)
new_arg_supported = inspect.signature(self._acall).parameters.get("run_manager")
run_manager = await callback_manager.on_chain_start(
None,
inputs,
run_id,
name=run_name,
)
try:
self._validate_inputs(inputs)
outputs = (
await self._acall(inputs, run_manager=run_manager)
if new_arg_supported
else await self._acall(inputs)
)
final_outputs: dict[str, Any] = await self.aprep_outputs(
inputs,
outputs,
return_only_outputs,
)
except BaseException as e:
await run_manager.on_chain_error(e)
raise
await run_manager.on_chain_end(outputs)
if include_run_info:
final_outputs[RUN_KEY] = RunInfo(run_id=run_manager.run_id)
return final_outputs
@property
def _chain_type(self) -> str:
msg = "Saving not supported for this chain type."
raise NotImplementedError(msg)
@model_validator(mode="before")
@classmethod
def raise_callback_manager_deprecation(cls, values: dict) -> Any:
"""Raise deprecation warning if callback_manager is used."""
if values.get("callback_manager") is not None:
if values.get("callbacks") is not None:
msg = (
"Cannot specify both callback_manager and callbacks. "
"callback_manager is deprecated, callbacks is the preferred "
"parameter to pass in."
)
raise ValueError(msg)
warnings.warn(
"callback_manager is deprecated. Please use callbacks instead.",
DeprecationWarning,
stacklevel=4,
)
values["callbacks"] = values.pop("callback_manager", None)
return values
@field_validator("verbose", mode="before")
@classmethod
def set_verbose(
cls,
verbose: bool | None, # noqa: FBT001
) -> bool:
"""Set the chain verbosity.
Defaults to the global setting if not specified by the user.
"""
if verbose is None:
return _get_verbosity()
return verbose
@property
@abstractmethod
def input_keys(self) -> list[str]:
"""Keys expected to be in the chain input."""
@property
@abstractmethod
def output_keys(self) -> list[str]:
"""Keys expected to be in the chain output."""
def _validate_inputs(self, inputs: Any) -> None:
"""Check that all inputs are present."""
if not isinstance(inputs, dict):
_input_keys = set(self.input_keys)
if self.memory is not None:
# If there are multiple input keys, but some get set by memory so that
# only one is not set, we can still figure out which key it is.
_input_keys = _input_keys.difference(self.memory.memory_variables)
if len(_input_keys) != 1:
msg = (
f"A single string input was passed in, but this chain expects "
f"multiple inputs ({_input_keys}). When a chain expects "
f"multiple inputs, please call it by passing in a dictionary, "
"eg `chain({'foo': 1, 'bar': 2})`"
)
raise ValueError(msg)
missing_keys = set(self.input_keys).difference(inputs)
if missing_keys:
msg = f"Missing some input keys: {missing_keys}"
raise ValueError(msg)
def _validate_outputs(self, outputs: dict[str, Any]) -> None:
missing_keys = set(self.output_keys).difference(outputs)
if missing_keys:
msg = f"Missing some output keys: {missing_keys}"
raise ValueError(msg)
@abstractmethod
def _call(
self,
inputs: builtins.dict[str, Any],
run_manager: CallbackManagerForChainRun | None = None,
) -> builtins.dict[str, Any]:
"""Execute the chain.
This is a private method that is not user-facing. It is only called within
`Chain.__call__`, which is the user-facing wrapper method that handles
callbacks configuration and some input/output processing.
Args:
inputs: A dict of named inputs to the chain. Assumed to contain all inputs
specified in `Chain.input_keys`, including any inputs added by memory.
run_manager: The callbacks manager that contains the callback handlers for
this run of the chain.
Returns:
A dict of named outputs. Should contain all outputs specified in
`Chain.output_keys`.
"""
async def _acall(
self,
inputs: builtins.dict[str, Any],
run_manager: AsyncCallbackManagerForChainRun | None = None,
) -> builtins.dict[str, Any]:
"""Asynchronously execute the chain.
This is a private method that is not user-facing. It is only called within
`Chain.acall`, which is the user-facing wrapper method that handles
callbacks configuration and some input/output processing.
Args:
inputs: A dict of named inputs to the chain. Assumed to contain all inputs
specified in `Chain.input_keys`, including any inputs added by memory.
run_manager: The callbacks manager that contains the callback handlers for
this run of the chain.
Returns:
A dict of named outputs. Should contain all outputs specified in
`Chain.output_keys`.
"""
return await run_in_executor(
None,
self._call,
inputs,
run_manager.get_sync() if run_manager else None,
)
@deprecated("0.1.0", alternative="invoke", removal="1.0")
def __call__(
self,
inputs: dict[str, Any] | Any,
return_only_outputs: bool = False, # noqa: FBT001,FBT002
callbacks: Callbacks = None,
*,
tags: list[str] | None = None,
metadata: dict[str, Any] | None = None,
run_name: str | None = None,
include_run_info: bool = False,
) -> dict[str, Any]:
"""Execute the chain.
Args:
inputs: Dictionary of inputs, or single input if chain expects
only one param. Should contain all inputs specified in
`Chain.input_keys` except for inputs that will be set by the chain's
memory.
return_only_outputs: Whether to return only outputs in the
response. If `True`, only new keys generated by this chain will be
returned. If `False`, both input keys and new keys generated by this
chain will be returned.
callbacks: Callbacks to use for this chain run. These will be called in
addition to callbacks passed to the chain during construction, but only
these runtime callbacks will propagate to calls to other objects.
tags: List of string tags to pass to all callbacks. These will be passed in
addition to tags passed to the chain during construction, but only
these runtime tags will propagate to calls to other objects.
metadata: Optional metadata associated with the chain.
run_name: Optional name for this run of the chain.
include_run_info: Whether to include run info in the response. Defaults
to False.
Returns:
A dict of named outputs. Should contain all outputs specified in
`Chain.output_keys`.
"""
config = {
"callbacks": callbacks,
"tags": tags,
"metadata": metadata,
"run_name": run_name,
}
return self.invoke(
inputs,
cast("RunnableConfig", {k: v for k, v in config.items() if v is not None}),
return_only_outputs=return_only_outputs,
include_run_info=include_run_info,
)
@deprecated("0.1.0", alternative="ainvoke", removal="1.0")
async def acall(
self,
inputs: dict[str, Any] | Any,
return_only_outputs: bool = False, # noqa: FBT001,FBT002
callbacks: Callbacks = None,
*,
tags: list[str] | None = None,
metadata: dict[str, Any] | None = None,
run_name: str | None = None,
include_run_info: bool = False,
) -> dict[str, Any]:
"""Asynchronously execute the chain.
Args:
inputs: Dictionary of inputs, or single input if chain expects
only one param. Should contain all inputs specified in
`Chain.input_keys` except for inputs that will be set by the chain's
memory.
return_only_outputs: Whether to return only outputs in the
response. If `True`, only new keys generated by this chain will be
returned. If `False`, both input keys and new keys generated by this
chain will be returned.
callbacks: Callbacks to use for this chain run. These will be called in
addition to callbacks passed to the chain during construction, but only
these runtime callbacks will propagate to calls to other objects.
tags: List of string tags to pass to all callbacks. These will be passed in
addition to tags passed to the chain during construction, but only
these runtime tags will propagate to calls to other objects.
metadata: Optional metadata associated with the chain.
run_name: Optional name for this run of the chain.
include_run_info: Whether to include run info in the response. Defaults
to False.
Returns:
A dict of named outputs. Should contain all outputs specified in
`Chain.output_keys`.
"""
config = {
"callbacks": callbacks,
"tags": tags,
"metadata": metadata,
"run_name": run_name,
}
return await self.ainvoke(
inputs,
cast("RunnableConfig", {k: v for k, v in config.items() if k is not None}),
return_only_outputs=return_only_outputs,
include_run_info=include_run_info,
)
def prep_outputs(
self,
inputs: dict[str, str],
outputs: dict[str, str],
return_only_outputs: bool = False, # noqa: FBT001,FBT002
) -> dict[str, str]:
"""Validate and prepare chain outputs, and save info about this run to memory.
Args:
inputs: Dictionary of chain inputs, including any inputs added by chain
memory.
outputs: Dictionary of initial chain outputs.
return_only_outputs: Whether to only return the chain outputs. If `False`,
inputs are also added to the final outputs.
Returns:
A dict of the final chain outputs.
"""
self._validate_outputs(outputs)
if self.memory is not None:
self.memory.save_context(inputs, outputs)
if return_only_outputs:
return outputs
return {**inputs, **outputs}
async def aprep_outputs(
self,
inputs: dict[str, str],
outputs: dict[str, str],
return_only_outputs: bool = False, # noqa: FBT001,FBT002
) -> dict[str, str]:
"""Validate and prepare chain outputs, and save info about this run to memory.
Args:
inputs: Dictionary of chain inputs, including any inputs added by chain
memory.
outputs: Dictionary of initial chain outputs.
return_only_outputs: Whether to only return the chain outputs. If `False`,
inputs are also added to the final outputs.
Returns:
A dict of the final chain outputs.
"""
self._validate_outputs(outputs)
if self.memory is not None:
await self.memory.asave_context(inputs, outputs)
if return_only_outputs:
return outputs
return {**inputs, **outputs}
def prep_inputs(self, inputs: dict[str, Any] | Any) -> dict[str, str]:
"""Prepare chain inputs, including adding inputs from memory.
Args:
inputs: Dictionary of raw inputs, or single input if chain expects
only one param. Should contain all inputs specified in
`Chain.input_keys` except for inputs that will be set by the chain's
memory.
Returns:
A dictionary of all inputs, including those added by the chain's memory.
"""
if not isinstance(inputs, dict):
_input_keys = set(self.input_keys)
if self.memory is not None:
# If there are multiple input keys, but some get set by memory so that
# only one is not set, we can still figure out which key it is.
_input_keys = _input_keys.difference(self.memory.memory_variables)
inputs = {next(iter(_input_keys)): inputs}
if self.memory is not None:
external_context = self.memory.load_memory_variables(inputs)
inputs = dict(inputs, **external_context)
return inputs
async def aprep_inputs(self, inputs: dict[str, Any] | Any) -> dict[str, str]:
"""Prepare chain inputs, including adding inputs from memory.
Args:
inputs: Dictionary of raw inputs, or single input if chain expects
only one param. Should contain all inputs specified in
`Chain.input_keys` except for inputs that will be set by the chain's
memory.
Returns:
A dictionary of all inputs, including those added by the chain's memory.
"""
if not isinstance(inputs, dict):
_input_keys = set(self.input_keys)
if self.memory is not None:
# If there are multiple input keys, but some get set by memory so that
# only one is not set, we can still figure out which key it is.
_input_keys = _input_keys.difference(self.memory.memory_variables)
inputs = {next(iter(_input_keys)): inputs}
if self.memory is not None:
external_context = await self.memory.aload_memory_variables(inputs)
inputs = dict(inputs, **external_context)
return inputs
@property
def _run_output_key(self) -> str:
if len(self.output_keys) != 1:
msg = (
f"`run` not supported when there is not exactly "
f"one output key. Got {self.output_keys}."
)
raise ValueError(msg)
return self.output_keys[0]
@deprecated("0.1.0", alternative="invoke", removal="1.0")
def run(
self,
*args: Any,
callbacks: Callbacks = None,
tags: list[str] | None = None,
metadata: dict[str, Any] | None = None,
**kwargs: Any,
) -> Any:
"""Convenience method for executing chain.
The main difference between this method and `Chain.__call__` is that this
method expects inputs to be passed directly in as positional arguments or
keyword arguments, whereas `Chain.__call__` expects a single input dictionary
with all the inputs
Args:
*args: If the chain expects a single input, it can be passed in as the
sole positional argument.
callbacks: Callbacks to use for this chain run. These will be called in
addition to callbacks passed to the chain during construction, but only
these runtime callbacks will propagate to calls to other objects.
tags: List of string tags to pass to all callbacks. These will be passed in
addition to tags passed to the chain during construction, but only
these runtime tags will propagate to calls to other objects.
metadata: Optional metadata associated with the chain.
**kwargs: If the chain expects multiple inputs, they can be passed in
directly as keyword arguments.
Returns:
The chain output.
Example:
```python
# Suppose we have a single-input chain that takes a 'question' string:
chain.run("What's the temperature in Boise, Idaho?")
# -> "The temperature in Boise is..."
# Suppose we have a multi-input chain that takes a 'question' string
# and 'context' string:
question = "What's the temperature in Boise, Idaho?"
context = "Weather report for Boise, Idaho on 07/03/23..."
chain.run(question=question, context=context)
# -> "The temperature in Boise is..."
```
"""
# Run at start to make sure this is possible/defined
_output_key = self._run_output_key
if args and not kwargs:
if len(args) != 1:
msg = "`run` supports only one positional argument."
raise ValueError(msg)
return self(args[0], callbacks=callbacks, tags=tags, metadata=metadata)[
_output_key
]
if kwargs and not args:
return self(kwargs, callbacks=callbacks, tags=tags, metadata=metadata)[
_output_key
]
if not kwargs and not args:
msg = (
"`run` supported with either positional arguments or keyword arguments,"
" but none were provided."
)
raise ValueError(msg)
msg = (
f"`run` supported with either positional arguments or keyword arguments"
f" but not both. Got args: {args} and kwargs: {kwargs}."
)
raise ValueError(msg)
@deprecated("0.1.0", alternative="ainvoke", removal="1.0")
async def arun(
self,
*args: Any,
callbacks: Callbacks = None,
tags: list[str] | None = None,
metadata: dict[str, Any] | None = None,
**kwargs: Any,
) -> Any:
"""Convenience method for executing chain.
The main difference between this method and `Chain.__call__` is that this
method expects inputs to be passed directly in as positional arguments or
keyword arguments, whereas `Chain.__call__` expects a single input dictionary
with all the inputs
Args:
*args: If the chain expects a single input, it can be passed in as the
sole positional argument.
callbacks: Callbacks to use for this chain run. These will be called in
addition to callbacks passed to the chain during construction, but only
these runtime callbacks will propagate to calls to other objects.
tags: List of string tags to pass to all callbacks. These will be passed in
addition to tags passed to the chain during construction, but only
these runtime tags will propagate to calls to other objects.
metadata: Optional metadata associated with the chain.
**kwargs: If the chain expects multiple inputs, they can be passed in
directly as keyword arguments.
Returns:
The chain output.
Example:
```python
# Suppose we have a single-input chain that takes a 'question' string:
await chain.arun("What's the temperature in Boise, Idaho?")
# -> "The temperature in Boise is..."
# Suppose we have a multi-input chain that takes a 'question' string
# and 'context' string:
question = "What's the temperature in Boise, Idaho?"
context = "Weather report for Boise, Idaho on 07/03/23..."
await chain.arun(question=question, context=context)
# -> "The temperature in Boise is..."
```
"""
if len(self.output_keys) != 1:
msg = (
f"`run` not supported when there is not exactly "
f"one output key. Got {self.output_keys}."
)
raise ValueError(msg)
if args and not kwargs:
if len(args) != 1:
msg = "`run` supports only one positional argument."
raise ValueError(msg)
return (
await self.acall(
args[0],
callbacks=callbacks,
tags=tags,
metadata=metadata,
)
)[self.output_keys[0]]
if kwargs and not args:
return (
await self.acall(
kwargs,
callbacks=callbacks,
tags=tags,
metadata=metadata,
)
)[self.output_keys[0]]
msg = (
f"`run` supported with either positional arguments or keyword arguments"
f" but not both. Got args: {args} and kwargs: {kwargs}."
)
raise ValueError(msg)
def dict(self, **kwargs: Any) -> dict:
"""Dictionary representation of chain.
Expects `Chain._chain_type` property to be implemented and for memory to be
null.
Args:
**kwargs: Keyword arguments passed to default `pydantic.BaseModel.dict`
method.
Returns:
A dictionary representation of the chain.
Example:
```python
chain.model_dump(exclude_unset=True)
# -> {"_type": "foo", "verbose": False, ...}
```
"""
_dict = super().model_dump(**kwargs)
with contextlib.suppress(NotImplementedError):
_dict["_type"] = self._chain_type
return _dict
def save(self, file_path: Path | str) -> None:
"""Save the chain.
Expects `Chain._chain_type` property to be implemented and for memory to be
null.
Args:
file_path: Path to file to save the chain to.
Example:
```python
chain.save(file_path="path/chain.yaml")
```
"""
if self.memory is not None:
msg = "Saving of memory is not yet supported."
raise ValueError(msg)
# Fetch dictionary to save
chain_dict = self.model_dump()
if "_type" not in chain_dict:
msg = f"Chain {self} does not support saving."
raise NotImplementedError(msg)
# Convert file to Path object.
save_path = Path(file_path) if isinstance(file_path, str) else file_path
directory_path = save_path.parent
directory_path.mkdir(parents=True, exist_ok=True)
if save_path.suffix == ".json":
with save_path.open("w") as f:
json.dump(chain_dict, f, indent=4)
elif save_path.suffix.endswith((".yaml", ".yml")):
with save_path.open("w") as f:
yaml.dump(chain_dict, f, default_flow_style=False)
else:
msg = f"{save_path} must be json or yaml"
raise ValueError(msg)
@deprecated("0.1.0", alternative="batch", removal="1.0")
def apply(
self,
input_list: list[builtins.dict[str, Any]],
callbacks: Callbacks = None,
) -> list[builtins.dict[str, str]]:
"""Call the chain on all inputs in the list."""
return [self(inputs, callbacks=callbacks) for inputs in input_list]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/chains/base.py",
"license": "MIT License",
"lines": 713,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langchain:libs/langchain/langchain_classic/chains/combine_documents/base.py | """Base interface for chains combining documents."""
from abc import ABC, abstractmethod
from typing import Any
from langchain_core._api import deprecated
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain_core.documents import Document
from langchain_core.prompts import BasePromptTemplate, PromptTemplate
from langchain_core.runnables.config import RunnableConfig
from langchain_core.utils.pydantic import create_model
from langchain_text_splitters import RecursiveCharacterTextSplitter, TextSplitter
from pydantic import BaseModel, Field
from typing_extensions import override
from langchain_classic.chains.base import Chain
DEFAULT_DOCUMENT_SEPARATOR = "\n\n"
DOCUMENTS_KEY = "context"
DEFAULT_DOCUMENT_PROMPT = PromptTemplate.from_template("{page_content}")
def _validate_prompt(prompt: BasePromptTemplate, document_variable_name: str) -> None:
if document_variable_name not in prompt.input_variables:
msg = (
f"Prompt must accept {document_variable_name} as an input variable. "
f"Received prompt with input variables: {prompt.input_variables}"
)
raise ValueError(msg)
class BaseCombineDocumentsChain(Chain, ABC):
"""Base interface for chains combining documents.
Subclasses of this chain deal with combining documents in a variety of
ways. This base class exists to add some uniformity in the interface these types
of chains should expose. Namely, they expect an input key related to the documents
to use (default `input_documents`), and then also expose a method to calculate
the length of a prompt from documents (useful for outside callers to use to
determine whether it's safe to pass a list of documents into this chain or whether
that will be longer than the context length).
"""
input_key: str = "input_documents"
output_key: str = "output_text"
@override
def get_input_schema(
self,
config: RunnableConfig | None = None,
) -> type[BaseModel]:
return create_model(
"CombineDocumentsInput",
**{self.input_key: (list[Document], None)},
)
@override
def get_output_schema(
self,
config: RunnableConfig | None = None,
) -> type[BaseModel]:
return create_model(
"CombineDocumentsOutput",
**{self.output_key: (str, None)},
)
@property
def input_keys(self) -> list[str]:
"""Expect input key."""
return [self.input_key]
@property
def output_keys(self) -> list[str]:
"""Return output key."""
return [self.output_key]
def prompt_length(self, docs: list[Document], **kwargs: Any) -> int | None: # noqa: ARG002
"""Return the prompt length given the documents passed in.
This can be used by a caller to determine whether passing in a list
of documents would exceed a certain prompt length. This useful when
trying to ensure that the size of a prompt remains below a certain
context limit.
Args:
docs: a list of documents to use to calculate the total prompt length.
**kwargs: additional parameters that may be needed to calculate the
prompt length.
Returns:
Returns None if the method does not depend on the prompt length,
otherwise the length of the prompt in tokens.
"""
return None
@abstractmethod
def combine_docs(self, docs: list[Document], **kwargs: Any) -> tuple[str, dict]:
"""Combine documents into a single string.
Args:
docs: List[Document], the documents to combine
**kwargs: Other parameters to use in combining documents, often
other inputs to the prompt.
Returns:
The first element returned is the single string output. The second
element returned is a dictionary of other keys to return.
"""
@abstractmethod
async def acombine_docs(
self,
docs: list[Document],
**kwargs: Any,
) -> tuple[str, dict]:
"""Combine documents into a single string.
Args:
docs: List[Document], the documents to combine
**kwargs: Other parameters to use in combining documents, often
other inputs to the prompt.
Returns:
The first element returned is the single string output. The second
element returned is a dictionary of other keys to return.
"""
def _call(
self,
inputs: dict[str, list[Document]],
run_manager: CallbackManagerForChainRun | None = None,
) -> dict[str, str]:
"""Prepare inputs, call combine docs, prepare outputs."""
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
docs = inputs[self.input_key]
# Other keys are assumed to be needed for LLM prediction
other_keys = {k: v for k, v in inputs.items() if k != self.input_key}
output, extra_return_dict = self.combine_docs(
docs,
callbacks=_run_manager.get_child(),
**other_keys,
)
extra_return_dict[self.output_key] = output
return extra_return_dict
async def _acall(
self,
inputs: dict[str, list[Document]],
run_manager: AsyncCallbackManagerForChainRun | None = None,
) -> dict[str, str]:
"""Prepare inputs, call combine docs, prepare outputs."""
_run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
docs = inputs[self.input_key]
# Other keys are assumed to be needed for LLM prediction
other_keys = {k: v for k, v in inputs.items() if k != self.input_key}
output, extra_return_dict = await self.acombine_docs(
docs,
callbacks=_run_manager.get_child(),
**other_keys,
)
extra_return_dict[self.output_key] = output
return extra_return_dict
@deprecated(
since="0.2.7",
alternative=(
"example in API reference with more detail: "
"https://api.python.langchain.com/en/latest/chains/langchain.chains.combine_documents.base.AnalyzeDocumentChain.html"
),
removal="1.0",
)
class AnalyzeDocumentChain(Chain):
"""Chain that splits documents, then analyzes it in pieces.
This chain is parameterized by a TextSplitter and a CombineDocumentsChain.
This chain takes a single document as input, and then splits it up into chunks
and then passes those chucks to the CombineDocumentsChain.
This class is deprecated. See below for alternative implementations which
supports async and streaming modes of operation.
If the underlying combine documents chain takes one `input_documents` argument
(e.g., chains generated by `load_summarize_chain`):
```python
split_text = lambda x: text_splitter.create_documents([x])
summarize_document_chain = split_text | chain
```
If the underlying chain takes additional arguments (e.g., `load_qa_chain`, which
takes an additional `question` argument), we can use the following:
```python
from operator import itemgetter
from langchain_core.runnables import RunnableLambda, RunnableParallel
split_text = RunnableLambda(lambda x: text_splitter.create_documents([x]))
summarize_document_chain = RunnableParallel(
question=itemgetter("question"),
input_documents=itemgetter("input_document") | split_text,
) | chain.pick("output_text")
```
To additionally return the input parameters, as `AnalyzeDocumentChain` does,
we can wrap this construction with `RunnablePassthrough`:
```python
from operator import itemgetter
from langchain_core.runnables import (
RunnableLambda,
RunnableParallel,
RunnablePassthrough,
)
split_text = RunnableLambda(lambda x: text_splitter.create_documents([x]))
summarize_document_chain = RunnablePassthrough.assign(
output_text=RunnableParallel(
question=itemgetter("question"),
input_documents=itemgetter("input_document") | split_text,
)
| chain.pick("output_text")
)
```
"""
input_key: str = "input_document"
text_splitter: TextSplitter = Field(default_factory=RecursiveCharacterTextSplitter)
combine_docs_chain: BaseCombineDocumentsChain
@property
def input_keys(self) -> list[str]:
"""Expect input key."""
return [self.input_key]
@property
def output_keys(self) -> list[str]:
"""Return output key."""
return self.combine_docs_chain.output_keys
@override
def get_input_schema(
self,
config: RunnableConfig | None = None,
) -> type[BaseModel]:
return create_model(
"AnalyzeDocumentChain",
**{self.input_key: (str, None)},
)
@override
def get_output_schema(
self,
config: RunnableConfig | None = None,
) -> type[BaseModel]:
return self.combine_docs_chain.get_output_schema(config)
def _call(
self,
inputs: dict[str, str],
run_manager: CallbackManagerForChainRun | None = None,
) -> dict[str, str]:
"""Split document into chunks and pass to CombineDocumentsChain."""
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
document = inputs[self.input_key]
docs = self.text_splitter.create_documents([document])
# Other keys are assumed to be needed for LLM prediction
other_keys: dict = {k: v for k, v in inputs.items() if k != self.input_key}
other_keys[self.combine_docs_chain.input_key] = docs
return self.combine_docs_chain(
other_keys,
return_only_outputs=True,
callbacks=_run_manager.get_child(),
)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/chains/combine_documents/base.py",
"license": "MIT License",
"lines": 233,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langchain:libs/langchain/langchain_classic/chains/constitutional_ai/base.py | """Chain for applying constitutional principles to the outputs of another chain."""
from typing import Any
from langchain_core._api import deprecated
from langchain_core.callbacks import CallbackManagerForChainRun
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import BasePromptTemplate
from langchain_classic.chains.base import Chain
from langchain_classic.chains.constitutional_ai.models import ConstitutionalPrinciple
from langchain_classic.chains.constitutional_ai.principles import PRINCIPLES
from langchain_classic.chains.constitutional_ai.prompts import (
CRITIQUE_PROMPT,
REVISION_PROMPT,
)
from langchain_classic.chains.llm import LLMChain
@deprecated(
since="0.2.13",
message=(
"This class is deprecated and will be removed in langchain 1.0. "
"See API reference for replacement: "
"https://api.python.langchain.com/en/latest/chains/langchain.chains.constitutional_ai.base.ConstitutionalChain.html"
),
removal="1.0",
)
class ConstitutionalChain(Chain):
r'''Chain for applying constitutional principles.
!!! note
This class is deprecated. See below for a replacement implementation using
LangGraph. The benefits of this implementation are:
- Uses LLM tool calling features instead of parsing string responses;
- Support for both token-by-token and step-by-step streaming;
- Support for checkpointing and memory of chat history;
- Easier to modify or extend (e.g., with additional tools, structured responses, etc.)
Install LangGraph with:
```bash
pip install -U langgraph
```
```python
from typing import List, Optional, Tuple
from langchain_classic.chains.constitutional_ai.prompts import (
CRITIQUE_PROMPT,
REVISION_PROMPT,
)
from langchain_classic.chains.constitutional_ai.models import ConstitutionalPrinciple
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
from langgraph.graph import END, START, StateGraph
from typing_extensions import Annotated, TypedDict
model = ChatOpenAI(model="gpt-4o-mini")
class Critique(TypedDict):
"""Generate a critique, if needed."""
critique_needed: Annotated[bool, ..., "Whether or not a critique is needed."]
critique: Annotated[str, ..., "If needed, the critique."]
critique_prompt = ChatPromptTemplate.from_template(
"Critique this response according to the critique request. "
"If no critique is needed, specify that.\n\n"
"Query: {query}\n\n"
"Response: {response}\n\n"
"Critique request: {critique_request}"
)
revision_prompt = ChatPromptTemplate.from_template(
"Revise this response according to the critique and reivsion request.\n\n"
"Query: {query}\n\n"
"Response: {response}\n\n"
"Critique request: {critique_request}\n\n"
"Critique: {critique}\n\n"
"If the critique does not identify anything worth changing, ignore the "
"revision request and return 'No revisions needed'. If the critique "
"does identify something worth changing, revise the response based on "
"the revision request.\n\n"
"Revision Request: {revision_request}"
)
chain = model | StrOutputParser()
critique_chain = critique_prompt | model.with_structured_output(Critique)
revision_chain = revision_prompt | model | StrOutputParser()
class State(TypedDict):
query: str
constitutional_principles: List[ConstitutionalPrinciple]
initial_response: str
critiques_and_revisions: List[Tuple[str, str]]
response: str
async def generate_response(state: State):
"""Generate initial response."""
response = await chain.ainvoke(state["query"])
return {"response": response, "initial_response": response}
async def critique_and_revise(state: State):
"""Critique and revise response according to principles."""
critiques_and_revisions = []
response = state["initial_response"]
for principle in state["constitutional_principles"]:
critique = await critique_chain.ainvoke(
{
"query": state["query"],
"response": response,
"critique_request": principle.critique_request,
}
)
if critique["critique_needed"]:
revision = await revision_chain.ainvoke(
{
"query": state["query"],
"response": response,
"critique_request": principle.critique_request,
"critique": critique["critique"],
"revision_request": principle.revision_request,
}
)
response = revision
critiques_and_revisions.append((critique["critique"], revision))
else:
critiques_and_revisions.append((critique["critique"], ""))
return {
"critiques_and_revisions": critiques_and_revisions,
"response": response,
}
graph = StateGraph(State)
graph.add_node("generate_response", generate_response)
graph.add_node("critique_and_revise", critique_and_revise)
graph.add_edge(START, "generate_response")
graph.add_edge("generate_response", "critique_and_revise")
graph.add_edge("critique_and_revise", END)
app = graph.compile()
```
```python
constitutional_principles=[
ConstitutionalPrinciple(
critique_request="Tell if this answer is good.",
revision_request="Give a better answer.",
)
]
query = "What is the meaning of life? Answer in 10 words or fewer."
async for step in app.astream(
{"query": query, "constitutional_principles": constitutional_principles},
stream_mode="values",
):
subset = ["initial_response", "critiques_and_revisions", "response"]
print({k: v for k, v in step.items() if k in subset})
```
Example:
```python
from langchain_openai import OpenAI
from langchain_classic.chains import LLMChain, ConstitutionalChain
from langchain_classic.chains.constitutional_ai.models \
import ConstitutionalPrinciple
llmodelm = OpenAI()
qa_prompt = PromptTemplate(
template="Q: {question} A:",
input_variables=["question"],
)
qa_chain = LLMChain(llm=model, prompt=qa_prompt)
constitutional_chain = ConstitutionalChain.from_llm(
llm=model,
chain=qa_chain,
constitutional_principles=[
ConstitutionalPrinciple(
critique_request="Tell if this answer is good.",
revision_request="Give a better answer.",
)
],
)
constitutional_chain.run(question="What is the meaning of life?")
```
''' # noqa: E501
chain: LLMChain
constitutional_principles: list[ConstitutionalPrinciple]
critique_chain: LLMChain
revision_chain: LLMChain
return_intermediate_steps: bool = False
@classmethod
def get_principles(
cls,
names: list[str] | None = None,
) -> list[ConstitutionalPrinciple]:
"""Get constitutional principles by name.
Args:
names: List of names of constitutional principles to retrieve.
If `None` (Default), all principles are returned.
"""
if names is None:
return list(PRINCIPLES.values())
return [PRINCIPLES[name] for name in names]
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
chain: LLMChain,
critique_prompt: BasePromptTemplate = CRITIQUE_PROMPT,
revision_prompt: BasePromptTemplate = REVISION_PROMPT,
**kwargs: Any,
) -> "ConstitutionalChain":
"""Create a chain from an LLM."""
critique_chain = LLMChain(llm=llm, prompt=critique_prompt)
revision_chain = LLMChain(llm=llm, prompt=revision_prompt)
return cls(
chain=chain,
critique_chain=critique_chain,
revision_chain=revision_chain,
**kwargs,
)
@property
def input_keys(self) -> list[str]:
"""Input keys."""
return self.chain.input_keys
@property
def output_keys(self) -> list[str]:
"""Output keys."""
if self.return_intermediate_steps:
return ["output", "critiques_and_revisions", "initial_output"]
return ["output"]
def _call(
self,
inputs: dict[str, Any],
run_manager: CallbackManagerForChainRun | None = None,
) -> dict[str, Any]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
response = self.chain.run(
**inputs,
callbacks=_run_manager.get_child("original"),
)
initial_response = response
input_prompt = self.chain.prompt.format(**inputs)
_run_manager.on_text(
text="Initial response: " + response + "\n\n",
verbose=self.verbose,
color="yellow",
)
critiques_and_revisions = []
for constitutional_principle in self.constitutional_principles:
# Do critique
raw_critique = self.critique_chain.run(
input_prompt=input_prompt,
output_from_model=response,
critique_request=constitutional_principle.critique_request,
callbacks=_run_manager.get_child("critique"),
)
critique = self._parse_critique(
output_string=raw_critique,
).strip()
# if the critique contains "No critique needed", then we're done
# in this case, initial_output is the same as output,
# but we'll keep it for consistency
if "no critique needed" in critique.lower():
critiques_and_revisions.append((critique, ""))
continue
# Do revision
revision = self.revision_chain.run(
input_prompt=input_prompt,
output_from_model=response,
critique_request=constitutional_principle.critique_request,
critique=critique,
revision_request=constitutional_principle.revision_request,
callbacks=_run_manager.get_child("revision"),
).strip()
response = revision
critiques_and_revisions.append((critique, revision))
_run_manager.on_text(
text=f"Applying {constitutional_principle.name}..." + "\n\n",
verbose=self.verbose,
color="green",
)
_run_manager.on_text(
text="Critique: " + critique + "\n\n",
verbose=self.verbose,
color="blue",
)
_run_manager.on_text(
text="Updated response: " + revision + "\n\n",
verbose=self.verbose,
color="yellow",
)
final_output: dict[str, Any] = {"output": response}
if self.return_intermediate_steps:
final_output["initial_output"] = initial_response
final_output["critiques_and_revisions"] = critiques_and_revisions
return final_output
@staticmethod
def _parse_critique(output_string: str) -> str:
if "Revision request:" not in output_string:
return output_string
output_string = output_string.split("Revision request:", maxsplit=1)[0]
if "\n\n" in output_string:
output_string = output_string.split("\n\n")[0]
return output_string
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/chains/constitutional_ai/base.py",
"license": "MIT License",
"lines": 283,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
langchain-ai/langchain:libs/langchain/langchain_classic/chains/conversation/base.py | """Chain that carries on a conversation and calls an LLM."""
from langchain_core._api import deprecated
from langchain_core.prompts import BasePromptTemplate
from pydantic import ConfigDict, Field, model_validator
from typing_extensions import Self, override
from langchain_classic.base_memory import BaseMemory
from langchain_classic.chains.conversation.prompt import PROMPT
from langchain_classic.chains.llm import LLMChain
from langchain_classic.memory.buffer import ConversationBufferMemory
@deprecated(
since="0.2.7",
alternative="langchain_core.runnables.history.RunnableWithMessageHistory",
removal="1.0",
)
class ConversationChain(LLMChain):
"""Chain to have a conversation and load context from memory.
This class is deprecated in favor of `RunnableWithMessageHistory`. Please refer
to this tutorial for more detail: https://python.langchain.com/docs/tutorials/chatbot/
`RunnableWithMessageHistory` offers several benefits, including:
- Stream, batch, and async support;
- More flexible memory handling, including the ability to manage memory
outside the chain;
- Support for multiple threads.
Below is a minimal implementation, analogous to using `ConversationChain` with
the default `ConversationBufferMemory`:
```python
from langchain_core.chat_history import InMemoryChatMessageHistory
from langchain_core.runnables.history import RunnableWithMessageHistory
from langchain_openai import ChatOpenAI
store = {} # memory is maintained outside the chain
def get_session_history(session_id: str) -> InMemoryChatMessageHistory:
if session_id not in store:
store[session_id] = InMemoryChatMessageHistory()
return store[session_id]
model = ChatOpenAI(model="gpt-3.5-turbo-0125")
chain = RunnableWithMessageHistory(model, get_session_history)
chain.invoke(
"Hi I'm Bob.",
config={"configurable": {"session_id": "1"}},
) # session_id determines thread
```
Memory objects can also be incorporated into the `get_session_history` callable:
```python
from langchain_classic.memory import ConversationBufferWindowMemory
from langchain_core.chat_history import InMemoryChatMessageHistory
from langchain_core.runnables.history import RunnableWithMessageHistory
from langchain_openai import ChatOpenAI
store = {} # memory is maintained outside the chain
def get_session_history(session_id: str) -> InMemoryChatMessageHistory:
if session_id not in store:
store[session_id] = InMemoryChatMessageHistory()
return store[session_id]
memory = ConversationBufferWindowMemory(
chat_memory=store[session_id],
k=3,
return_messages=True,
)
assert len(memory.memory_variables) == 1
key = memory.memory_variables[0]
messages = memory.load_memory_variables({})[key]
store[session_id] = InMemoryChatMessageHistory(messages=messages)
return store[session_id]
model = ChatOpenAI(model="gpt-3.5-turbo-0125")
chain = RunnableWithMessageHistory(model, get_session_history)
chain.invoke(
"Hi I'm Bob.",
config={"configurable": {"session_id": "1"}},
) # session_id determines thread
```
Example:
```python
from langchain_classic.chains import ConversationChain
from langchain_openai import OpenAI
conversation = ConversationChain(llm=OpenAI())
```
"""
memory: BaseMemory = Field(default_factory=ConversationBufferMemory)
"""Default memory store."""
prompt: BasePromptTemplate = PROMPT
"""Default conversation prompt to use."""
input_key: str = "input"
output_key: str = "response"
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
@classmethod
@override
def is_lc_serializable(cls) -> bool:
return False
@property
def input_keys(self) -> list[str]:
"""Use this since so some prompt vars come from history."""
return [self.input_key]
@model_validator(mode="after")
def validate_prompt_input_variables(self) -> Self:
"""Validate that prompt input variables are consistent."""
memory_keys = self.memory.memory_variables
input_key = self.input_key
if input_key in memory_keys:
msg = (
f"The input key {input_key} was also found in the memory keys "
f"({memory_keys}) - please provide keys that don't overlap."
)
raise ValueError(msg)
prompt_variables = self.prompt.input_variables
expected_keys = [*memory_keys, input_key]
if set(expected_keys) != set(prompt_variables):
msg = (
"Got unexpected prompt input variables. The prompt expects "
f"{prompt_variables}, but got {memory_keys} as inputs from "
f"memory, and {input_key} as the normal input key."
)
raise ValueError(msg)
return self
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/chains/conversation/base.py",
"license": "MIT License",
"lines": 115,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
langchain-ai/langchain:libs/langchain/langchain_classic/chains/conversation/memory.py | """Memory modules for conversation prompts."""
from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
from langchain_classic.memory.buffer import (
ConversationBufferMemory,
ConversationStringBufferMemory,
)
from langchain_classic.memory.buffer_window import ConversationBufferWindowMemory
from langchain_classic.memory.combined import CombinedMemory
from langchain_classic.memory.entity import ConversationEntityMemory
from langchain_classic.memory.summary import ConversationSummaryMemory
from langchain_classic.memory.summary_buffer import ConversationSummaryBufferMemory
if TYPE_CHECKING:
from langchain_community.memory.kg import ConversationKGMemory
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"ConversationKGMemory": "langchain_community.memory.kg",
}
_importer = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _importer(name)
# This is only for backwards compatibility.
__all__ = [
"CombinedMemory",
"ConversationBufferMemory",
"ConversationBufferWindowMemory",
"ConversationEntityMemory",
"ConversationKGMemory",
"ConversationStringBufferMemory",
"ConversationSummaryBufferMemory",
"ConversationSummaryMemory",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/chains/conversation/memory.py",
"license": "MIT License",
"lines": 35,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/chains/conversation/prompt.py | from langchain_core.prompts.prompt import PromptTemplate
from langchain_classic.memory.prompt import (
ENTITY_EXTRACTION_PROMPT,
ENTITY_MEMORY_CONVERSATION_TEMPLATE,
ENTITY_SUMMARIZATION_PROMPT,
KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT,
SUMMARY_PROMPT,
)
DEFAULT_TEMPLATE = """The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
Current conversation:
{history}
Human: {input}
AI:""" # noqa: E501
PROMPT = PromptTemplate(input_variables=["history", "input"], template=DEFAULT_TEMPLATE)
# Only for backwards compatibility
__all__ = [
"ENTITY_EXTRACTION_PROMPT",
"ENTITY_MEMORY_CONVERSATION_TEMPLATE",
"ENTITY_SUMMARIZATION_PROMPT",
"KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT",
"PROMPT",
"SUMMARY_PROMPT",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/chains/conversation/prompt.py",
"license": "MIT License",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/chains/conversational_retrieval/base.py | """Chain for chatting with a vector database."""
from __future__ import annotations
import inspect
import warnings
from abc import abstractmethod
from collections.abc import Callable
from pathlib import Path
from typing import Any
from langchain_core._api import deprecated
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
Callbacks,
)
from langchain_core.documents import Document
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import BaseMessage
from langchain_core.prompts import BasePromptTemplate
from langchain_core.retrievers import BaseRetriever
from langchain_core.runnables import RunnableConfig
from langchain_core.vectorstores import VectorStore
from pydantic import BaseModel, ConfigDict, Field, model_validator
from typing_extensions import override
from langchain_classic.chains.base import Chain
from langchain_classic.chains.combine_documents.base import BaseCombineDocumentsChain
from langchain_classic.chains.combine_documents.stuff import StuffDocumentsChain
from langchain_classic.chains.conversational_retrieval.prompts import (
CONDENSE_QUESTION_PROMPT,
)
from langchain_classic.chains.llm import LLMChain
from langchain_classic.chains.question_answering import load_qa_chain
# Depending on the memory type and configuration, the chat history format may differ.
# This needs to be consolidated.
CHAT_TURN_TYPE = tuple[str, str] | BaseMessage
_ROLE_MAP = {"human": "Human: ", "ai": "Assistant: "}
def _get_chat_history(chat_history: list[CHAT_TURN_TYPE]) -> str:
buffer = ""
for dialogue_turn in chat_history:
if isinstance(dialogue_turn, BaseMessage):
if len(dialogue_turn.content) > 0:
role_prefix = _ROLE_MAP.get(
dialogue_turn.type,
f"{dialogue_turn.type}: ",
)
buffer += f"\n{role_prefix}{dialogue_turn.content}"
elif isinstance(dialogue_turn, tuple):
human = "Human: " + dialogue_turn[0]
ai = "Assistant: " + dialogue_turn[1]
buffer += f"\n{human}\n{ai}"
else:
msg = ( # type: ignore[unreachable]
f"Unsupported chat history format: {type(dialogue_turn)}."
f" Full chat history: {chat_history} "
)
raise ValueError(msg) # noqa: TRY004
return buffer
class InputType(BaseModel):
"""Input type for ConversationalRetrievalChain."""
question: str
"""The question to answer."""
chat_history: list[CHAT_TURN_TYPE] = Field(default_factory=list)
"""The chat history to use for retrieval."""
class BaseConversationalRetrievalChain(Chain):
"""Chain for chatting with an index."""
combine_docs_chain: BaseCombineDocumentsChain
"""The chain used to combine any retrieved documents."""
question_generator: LLMChain
"""The chain used to generate a new question for the sake of retrieval.
This chain will take in the current question (with variable `question`)
and any chat history (with variable `chat_history`) and will produce
a new standalone question to be used later on."""
output_key: str = "answer"
"""The output key to return the final answer of this chain in."""
rephrase_question: bool = True
"""Whether or not to pass the new generated question to the combine_docs_chain.
If `True`, will pass the new generated question along.
If `False`, will only use the new generated question for retrieval and pass the
original question along to the combine_docs_chain."""
return_source_documents: bool = False
"""Return the retrieved source documents as part of the final result."""
return_generated_question: bool = False
"""Return the generated question as part of the final result."""
get_chat_history: Callable[[list[CHAT_TURN_TYPE]], str] | None = None
"""An optional function to get a string of the chat history.
If `None` is provided, will use a default."""
response_if_no_docs_found: str | None = None
"""If specified, the chain will return a fixed response if no docs
are found for the question. """
model_config = ConfigDict(
populate_by_name=True,
arbitrary_types_allowed=True,
extra="forbid",
)
@property
def input_keys(self) -> list[str]:
"""Input keys."""
return ["question", "chat_history"]
@override
def get_input_schema(
self,
config: RunnableConfig | None = None,
) -> type[BaseModel]:
return InputType
@property
def output_keys(self) -> list[str]:
"""Return the output keys."""
_output_keys = [self.output_key]
if self.return_source_documents:
_output_keys = [*_output_keys, "source_documents"]
if self.return_generated_question:
_output_keys = [*_output_keys, "generated_question"]
return _output_keys
@abstractmethod
def _get_docs(
self,
question: str,
inputs: dict[str, Any],
*,
run_manager: CallbackManagerForChainRun,
) -> list[Document]:
"""Get docs."""
def _call(
self,
inputs: dict[str, Any],
run_manager: CallbackManagerForChainRun | None = None,
) -> dict[str, Any]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
question = inputs["question"]
get_chat_history = self.get_chat_history or _get_chat_history
chat_history_str = get_chat_history(inputs["chat_history"])
if chat_history_str:
callbacks = _run_manager.get_child()
new_question = self.question_generator.run(
question=question,
chat_history=chat_history_str,
callbacks=callbacks,
)
else:
new_question = question
accepts_run_manager = (
"run_manager" in inspect.signature(self._get_docs).parameters
)
if accepts_run_manager:
docs = self._get_docs(new_question, inputs, run_manager=_run_manager)
else:
docs = self._get_docs(new_question, inputs) # type: ignore[call-arg]
output: dict[str, Any] = {}
if self.response_if_no_docs_found is not None and len(docs) == 0:
output[self.output_key] = self.response_if_no_docs_found
else:
new_inputs = inputs.copy()
if self.rephrase_question:
new_inputs["question"] = new_question
new_inputs["chat_history"] = chat_history_str
answer = self.combine_docs_chain.run(
input_documents=docs,
callbacks=_run_manager.get_child(),
**new_inputs,
)
output[self.output_key] = answer
if self.return_source_documents:
output["source_documents"] = docs
if self.return_generated_question:
output["generated_question"] = new_question
return output
@abstractmethod
async def _aget_docs(
self,
question: str,
inputs: dict[str, Any],
*,
run_manager: AsyncCallbackManagerForChainRun,
) -> list[Document]:
"""Get docs."""
async def _acall(
self,
inputs: dict[str, Any],
run_manager: AsyncCallbackManagerForChainRun | None = None,
) -> dict[str, Any]:
_run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
question = inputs["question"]
get_chat_history = self.get_chat_history or _get_chat_history
chat_history_str = get_chat_history(inputs["chat_history"])
if chat_history_str:
callbacks = _run_manager.get_child()
new_question = await self.question_generator.arun(
question=question,
chat_history=chat_history_str,
callbacks=callbacks,
)
else:
new_question = question
accepts_run_manager = (
"run_manager" in inspect.signature(self._aget_docs).parameters
)
if accepts_run_manager:
docs = await self._aget_docs(new_question, inputs, run_manager=_run_manager)
else:
docs = await self._aget_docs(new_question, inputs) # type: ignore[call-arg]
output: dict[str, Any] = {}
if self.response_if_no_docs_found is not None and len(docs) == 0:
output[self.output_key] = self.response_if_no_docs_found
else:
new_inputs = inputs.copy()
if self.rephrase_question:
new_inputs["question"] = new_question
new_inputs["chat_history"] = chat_history_str
answer = await self.combine_docs_chain.arun(
input_documents=docs,
callbacks=_run_manager.get_child(),
**new_inputs,
)
output[self.output_key] = answer
if self.return_source_documents:
output["source_documents"] = docs
if self.return_generated_question:
output["generated_question"] = new_question
return output
@override
def save(self, file_path: Path | str) -> None:
if self.get_chat_history:
msg = "Chain not saveable when `get_chat_history` is not None."
raise ValueError(msg)
super().save(file_path)
@deprecated(
since="0.1.17",
alternative=(
"create_history_aware_retriever together with create_retrieval_chain "
"(see example in docstring)"
),
removal="1.0",
)
class ConversationalRetrievalChain(BaseConversationalRetrievalChain):
r"""Chain for having a conversation based on retrieved documents.
This class is deprecated. See below for an example implementation using
`create_retrieval_chain`. Additional walkthroughs can be found at
https://python.langchain.com/docs/use_cases/question_answering/chat_history
```python
from langchain_classic.chains import (
create_history_aware_retriever,
create_retrieval_chain,
)
from langchain_classic.chains.combine_documents import (
create_stuff_documents_chain,
)
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_openai import ChatOpenAI
retriever = ... # Your retriever
model = ChatOpenAI()
# Contextualize question
contextualize_q_system_prompt = (
"Given a chat history and the latest user question "
"which might reference context in the chat history, "
"formulate a standalone question which can be understood "
"without the chat history. Do NOT answer the question, just "
"reformulate it if needed and otherwise return it as is."
)
contextualize_q_prompt = ChatPromptTemplate.from_messages(
[
("system", contextualize_q_system_prompt),
MessagesPlaceholder("chat_history"),
("human", "{input}"),
]
)
history_aware_retriever = create_history_aware_retriever(
model, retriever, contextualize_q_prompt
)
# Answer question
qa_system_prompt = (
"You are an assistant for question-answering tasks. Use "
"the following pieces of retrieved context to answer the "
"question. If you don't know the answer, just say that you "
"don't know. Use three sentences maximum and keep the answer "
"concise."
"\n\n"
"{context}"
)
qa_prompt = ChatPromptTemplate.from_messages(
[
("system", qa_system_prompt),
MessagesPlaceholder("chat_history"),
("human", "{input}"),
]
)
# Below we use create_stuff_documents_chain to feed all retrieved context
# into the LLM. Note that we can also use StuffDocumentsChain and other
# instances of BaseCombineDocumentsChain.
question_answer_chain = create_stuff_documents_chain(model, qa_prompt)
rag_chain = create_retrieval_chain(history_aware_retriever, question_answer_chain)
# Usage:
chat_history = [] # Collect chat history here (a sequence of messages)
rag_chain.invoke({"input": query, "chat_history": chat_history})
```
This chain takes in chat history (a list of messages) and new questions,
and then returns an answer to that question.
The algorithm for this chain consists of three parts:
1. Use the chat history and the new question to create a "standalone question".
This is done so that this question can be passed into the retrieval step to
fetch relevant documents. If only the new question was passed in, then relevant
context may be lacking. If the whole conversation was passed into retrieval,
there may be unnecessary information there that would distract from retrieval.
2. This new question is passed to the retriever and relevant documents are
returned.
3. The retrieved documents are passed to an LLM along with either the new question
(default behavior) or the original question and chat history to generate a final
response.
Example:
```python
from langchain_classic.chains import (
StuffDocumentsChain,
LLMChain,
ConversationalRetrievalChain,
)
from langchain_core.prompts import PromptTemplate
from langchain_openai import OpenAI
combine_docs_chain = StuffDocumentsChain(...)
vectorstore = ...
retriever = vectorstore.as_retriever()
# This controls how the standalone question is generated.
# Should take `chat_history` and `question` as input variables.
template = (
"Combine the chat history and follow up question into "
"a standalone question. Chat History: {chat_history}"
"Follow up question: {question}"
)
prompt = PromptTemplate.from_template(template)
model = OpenAI()
question_generator_chain = LLMChain(llm=model, prompt=prompt)
chain = ConversationalRetrievalChain(
combine_docs_chain=combine_docs_chain,
retriever=retriever,
question_generator=question_generator_chain,
)
```
"""
retriever: BaseRetriever
"""Retriever to use to fetch documents."""
max_tokens_limit: int | None = None
"""If set, enforces that the documents returned are less than this limit.
This is only enforced if `combine_docs_chain` is of type StuffDocumentsChain.
"""
def _reduce_tokens_below_limit(self, docs: list[Document]) -> list[Document]:
num_docs = len(docs)
if self.max_tokens_limit and isinstance(
self.combine_docs_chain,
StuffDocumentsChain,
):
tokens = [
self.combine_docs_chain.llm_chain._get_num_tokens(doc.page_content) # noqa: SLF001
for doc in docs
]
token_count = sum(tokens[:num_docs])
while token_count > self.max_tokens_limit:
num_docs -= 1
token_count -= tokens[num_docs]
return docs[:num_docs]
@override
def _get_docs(
self,
question: str,
inputs: dict[str, Any],
*,
run_manager: CallbackManagerForChainRun,
) -> list[Document]:
"""Get docs."""
docs = self.retriever.invoke(
question,
config={"callbacks": run_manager.get_child()},
)
return self._reduce_tokens_below_limit(docs)
@override
async def _aget_docs(
self,
question: str,
inputs: dict[str, Any],
*,
run_manager: AsyncCallbackManagerForChainRun,
) -> list[Document]:
"""Get docs."""
docs = await self.retriever.ainvoke(
question,
config={"callbacks": run_manager.get_child()},
)
return self._reduce_tokens_below_limit(docs)
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
retriever: BaseRetriever,
condense_question_prompt: BasePromptTemplate = CONDENSE_QUESTION_PROMPT,
chain_type: str = "stuff",
verbose: bool = False, # noqa: FBT001,FBT002
condense_question_llm: BaseLanguageModel | None = None,
combine_docs_chain_kwargs: dict | None = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> BaseConversationalRetrievalChain:
"""Convenience method to load chain from LLM and retriever.
This provides some logic to create the `question_generator` chain
as well as the combine_docs_chain.
Args:
llm: The default language model to use at every part of this chain
(eg in both the question generation and the answering)
retriever: The retriever to use to fetch relevant documents from.
condense_question_prompt: The prompt to use to condense the chat history
and new question into a standalone question.
chain_type: The chain type to use to create the combine_docs_chain, will
be sent to `load_qa_chain`.
verbose: Verbosity flag for logging to stdout.
condense_question_llm: The language model to use for condensing the chat
history and new question into a standalone question. If none is
provided, will default to `llm`.
combine_docs_chain_kwargs: Parameters to pass as kwargs to `load_qa_chain`
when constructing the combine_docs_chain.
callbacks: Callbacks to pass to all subchains.
kwargs: Additional parameters to pass when initializing
ConversationalRetrievalChain
"""
combine_docs_chain_kwargs = combine_docs_chain_kwargs or {}
doc_chain = load_qa_chain(
llm,
chain_type=chain_type,
verbose=verbose,
callbacks=callbacks,
**combine_docs_chain_kwargs,
)
_llm = condense_question_llm or llm
condense_question_chain = LLMChain(
llm=_llm,
prompt=condense_question_prompt,
verbose=verbose,
callbacks=callbacks,
)
return cls(
retriever=retriever,
combine_docs_chain=doc_chain,
question_generator=condense_question_chain,
callbacks=callbacks,
**kwargs,
)
class ChatVectorDBChain(BaseConversationalRetrievalChain):
"""Chain for chatting with a vector database."""
vectorstore: VectorStore = Field(alias="vectorstore")
top_k_docs_for_context: int = 4
search_kwargs: dict = Field(default_factory=dict)
@property
def _chain_type(self) -> str:
return "chat-vector-db"
@model_validator(mode="before")
@classmethod
def _raise_deprecation(cls, values: dict) -> Any:
warnings.warn(
"`ChatVectorDBChain` is deprecated - "
"please use `from langchain_classic.chains import "
"ConversationalRetrievalChain`",
stacklevel=4,
)
return values
@override
def _get_docs(
self,
question: str,
inputs: dict[str, Any],
*,
run_manager: CallbackManagerForChainRun,
) -> list[Document]:
"""Get docs."""
vectordbkwargs = inputs.get("vectordbkwargs", {})
full_kwargs = {**self.search_kwargs, **vectordbkwargs}
return self.vectorstore.similarity_search(
question,
k=self.top_k_docs_for_context,
**full_kwargs,
)
async def _aget_docs(
self,
question: str,
inputs: dict[str, Any],
*,
run_manager: AsyncCallbackManagerForChainRun,
) -> list[Document]:
"""Get docs."""
msg = "ChatVectorDBChain does not support async"
raise NotImplementedError(msg)
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
vectorstore: VectorStore,
condense_question_prompt: BasePromptTemplate = CONDENSE_QUESTION_PROMPT,
chain_type: str = "stuff",
combine_docs_chain_kwargs: dict | None = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> BaseConversationalRetrievalChain:
"""Load chain from LLM."""
combine_docs_chain_kwargs = combine_docs_chain_kwargs or {}
doc_chain = load_qa_chain(
llm,
chain_type=chain_type,
callbacks=callbacks,
**combine_docs_chain_kwargs,
)
condense_question_chain = LLMChain(
llm=llm,
prompt=condense_question_prompt,
callbacks=callbacks,
)
return cls(
vectorstore=vectorstore,
combine_docs_chain=doc_chain,
question_generator=condense_question_chain,
callbacks=callbacks,
**kwargs,
)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/chains/conversational_retrieval/base.py",
"license": "MIT License",
"lines": 515,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langchain:libs/langchain/langchain_classic/chains/elasticsearch_database/base.py | """Chain for interacting with Elasticsearch Database."""
from __future__ import annotations
from typing import TYPE_CHECKING, Any
from langchain_core.callbacks import CallbackManagerForChainRun
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers import BaseOutputParser, StrOutputParser
from langchain_core.output_parsers.json import SimpleJsonOutputParser
from langchain_core.prompts import BasePromptTemplate
from langchain_core.runnables import Runnable
from pydantic import ConfigDict, model_validator
from typing_extensions import Self
from langchain_classic.chains.base import Chain
from langchain_classic.chains.elasticsearch_database.prompts import (
ANSWER_PROMPT,
DSL_PROMPT,
)
if TYPE_CHECKING:
from elasticsearch import Elasticsearch
INTERMEDIATE_STEPS_KEY = "intermediate_steps"
class ElasticsearchDatabaseChain(Chain):
"""Chain for interacting with Elasticsearch Database.
Example:
```python
from langchain_classic.chains import ElasticsearchDatabaseChain
from langchain_openai import OpenAI
from elasticsearch import Elasticsearch
database = Elasticsearch("http://localhost:9200")
db_chain = ElasticsearchDatabaseChain.from_llm(OpenAI(), database)
```
"""
query_chain: Runnable
"""Chain for creating the ES query."""
answer_chain: Runnable
"""Chain for answering the user question."""
database: Any = None
"""Elasticsearch database to connect to of type elasticsearch.Elasticsearch."""
top_k: int = 10
"""Number of results to return from the query"""
ignore_indices: list[str] | None = None
include_indices: list[str] | None = None
input_key: str = "question"
output_key: str = "result"
sample_documents_in_index_info: int = 3
return_intermediate_steps: bool = False
"""Whether or not to return the intermediate steps along with the final answer."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
@model_validator(mode="after")
def _validate_indices(self) -> Self:
if self.include_indices and self.ignore_indices:
msg = "Cannot specify both 'include_indices' and 'ignore_indices'."
raise ValueError(msg)
return self
@property
def input_keys(self) -> list[str]:
"""Return the singular input key."""
return [self.input_key]
@property
def output_keys(self) -> list[str]:
"""Return the singular output key."""
if not self.return_intermediate_steps:
return [self.output_key]
return [self.output_key, INTERMEDIATE_STEPS_KEY]
def _list_indices(self) -> list[str]:
all_indices = [
index["index"] for index in self.database.cat.indices(format="json")
]
if self.include_indices:
all_indices = [i for i in all_indices if i in self.include_indices]
if self.ignore_indices:
all_indices = [i for i in all_indices if i not in self.ignore_indices]
return all_indices
def _get_indices_infos(self, indices: list[str]) -> str:
mappings = self.database.indices.get_mapping(index=",".join(indices))
if self.sample_documents_in_index_info > 0:
for k, v in mappings.items():
hits = self.database.search(
index=k,
query={"match_all": {}},
size=self.sample_documents_in_index_info,
)["hits"]["hits"]
hits = [str(hit["_source"]) for hit in hits]
mappings[k]["mappings"] = str(v) + "\n\n/*\n" + "\n".join(hits) + "\n*/"
return "\n\n".join(
[
"Mapping for index {}:\n{}".format(index, mappings[index]["mappings"])
for index in mappings
],
)
def _search(self, indices: list[str], query: str) -> str:
result = self.database.search(index=",".join(indices), body=query)
return str(result)
def _call(
self,
inputs: dict[str, Any],
run_manager: CallbackManagerForChainRun | None = None,
) -> dict[str, Any]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
input_text = f"{inputs[self.input_key]}\nESQuery:"
_run_manager.on_text(input_text, verbose=self.verbose)
indices = self._list_indices()
indices_info = self._get_indices_infos(indices)
query_inputs: dict = {
"input": input_text,
"top_k": str(self.top_k),
"indices_info": indices_info,
"stop": ["\nESResult:"],
}
intermediate_steps: list = []
try:
intermediate_steps.append(query_inputs) # input: es generation
es_cmd = self.query_chain.invoke(
query_inputs,
config={"callbacks": _run_manager.get_child()},
)
_run_manager.on_text(es_cmd, color="green", verbose=self.verbose)
intermediate_steps.append(
es_cmd,
) # output: elasticsearch dsl generation (no checker)
intermediate_steps.append({"es_cmd": es_cmd}) # input: ES search
result = self._search(indices=indices, query=es_cmd)
intermediate_steps.append(str(result)) # output: ES search
_run_manager.on_text("\nESResult: ", verbose=self.verbose)
_run_manager.on_text(result, color="yellow", verbose=self.verbose)
_run_manager.on_text("\nAnswer:", verbose=self.verbose)
answer_inputs: dict = {"data": result, "input": input_text}
intermediate_steps.append(answer_inputs) # input: final answer
final_result = self.answer_chain.invoke(
answer_inputs,
config={"callbacks": _run_manager.get_child()},
)
intermediate_steps.append(final_result) # output: final answer
_run_manager.on_text(final_result, color="green", verbose=self.verbose)
chain_result: dict[str, Any] = {self.output_key: final_result}
if self.return_intermediate_steps:
chain_result[INTERMEDIATE_STEPS_KEY] = intermediate_steps
except Exception as exc:
# Append intermediate steps to exception, to aid in logging and later
# improvement of few shot prompt seeds
exc.intermediate_steps = intermediate_steps # type: ignore[attr-defined]
raise
return chain_result
@property
def _chain_type(self) -> str:
return "elasticsearch_database_chain"
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
database: Elasticsearch,
*,
query_prompt: BasePromptTemplate | None = None,
answer_prompt: BasePromptTemplate | None = None,
query_output_parser: BaseOutputParser | None = None,
**kwargs: Any,
) -> ElasticsearchDatabaseChain:
"""Convenience method to construct ElasticsearchDatabaseChain from an LLM.
Args:
llm: The language model to use.
database: The Elasticsearch db.
query_prompt: The prompt to use for query construction.
answer_prompt: The prompt to use for answering user question given data.
query_output_parser: The output parser to use for parsing model-generated
ES query. Defaults to `SimpleJsonOutputParser`.
kwargs: Additional arguments to pass to the constructor.
"""
query_prompt = query_prompt or DSL_PROMPT
query_output_parser = query_output_parser or SimpleJsonOutputParser()
query_chain = query_prompt | llm | query_output_parser
answer_prompt = answer_prompt or ANSWER_PROMPT
answer_chain = answer_prompt | llm | StrOutputParser()
return cls(
query_chain=query_chain,
answer_chain=answer_chain,
database=database,
**kwargs,
)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/chains/elasticsearch_database/base.py",
"license": "MIT License",
"lines": 179,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langchain:libs/langchain/langchain_classic/chains/ernie_functions/base.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.chains.ernie_functions.base import (
convert_python_function_to_ernie_function,
convert_to_ernie_function,
create_ernie_fn_chain,
create_ernie_fn_runnable,
create_structured_output_chain,
create_structured_output_runnable,
get_ernie_output_parser,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"convert_python_function_to_ernie_function": (
"langchain_community.chains.ernie_functions.base"
),
"convert_to_ernie_function": "langchain_community.chains.ernie_functions.base",
"create_ernie_fn_chain": "langchain_community.chains.ernie_functions.base",
"create_ernie_fn_runnable": "langchain_community.chains.ernie_functions.base",
"create_structured_output_chain": "langchain_community.chains.ernie_functions.base",
"create_structured_output_runnable": (
"langchain_community.chains.ernie_functions.base"
),
"get_ernie_output_parser": "langchain_community.chains.ernie_functions.base",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"convert_python_function_to_ernie_function",
"convert_to_ernie_function",
"create_ernie_fn_chain",
"create_ernie_fn_runnable",
"create_structured_output_chain",
"create_structured_output_runnable",
"get_ernie_output_parser",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/chains/ernie_functions/base.py",
"license": "MIT License",
"lines": 41,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/chains/flare/base.py | from __future__ import annotations
import logging
import re
from collections.abc import Sequence
from typing import Any
from langchain_core.callbacks import (
CallbackManagerForChainRun,
)
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import AIMessage
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import BasePromptTemplate
from langchain_core.retrievers import BaseRetriever
from langchain_core.runnables import Runnable
from pydantic import Field
from typing_extensions import override
from langchain_classic.chains.base import Chain
from langchain_classic.chains.flare.prompts import (
PROMPT,
QUESTION_GENERATOR_PROMPT,
FinishedOutputParser,
)
from langchain_classic.chains.llm import LLMChain
logger = logging.getLogger(__name__)
def _extract_tokens_and_log_probs(response: AIMessage) -> tuple[list[str], list[float]]:
"""Extract tokens and log probabilities from chat model response."""
tokens = []
log_probs = []
for token in response.response_metadata["logprobs"]["content"]:
tokens.append(token["token"])
log_probs.append(token["logprob"])
return tokens, log_probs
class QuestionGeneratorChain(LLMChain):
"""Chain that generates questions from uncertain spans."""
prompt: BasePromptTemplate = QUESTION_GENERATOR_PROMPT
"""Prompt template for the chain."""
@classmethod
@override
def is_lc_serializable(cls) -> bool:
return False
@property
def input_keys(self) -> list[str]:
"""Input keys for the chain."""
return ["user_input", "context", "response"]
def _low_confidence_spans(
tokens: Sequence[str],
log_probs: Sequence[float],
min_prob: float,
min_token_gap: int,
num_pad_tokens: int,
) -> list[str]:
try:
import numpy as np
_low_idx = np.where(np.exp(log_probs) < min_prob)[0]
except ImportError:
logger.warning(
"NumPy not found in the current Python environment. FlareChain will use a "
"pure Python implementation for internal calculations, which may "
"significantly impact performance, especially for large datasets. For "
"optimal speed and efficiency, consider installing NumPy: pip install "
"numpy",
)
import math
_low_idx = [ # type: ignore[assignment]
idx
for idx, log_prob in enumerate(log_probs)
if math.exp(log_prob) < min_prob
]
low_idx = [i for i in _low_idx if re.search(r"\w", tokens[i])]
if len(low_idx) == 0:
return []
spans = [[low_idx[0], low_idx[0] + num_pad_tokens + 1]]
for i, idx in enumerate(low_idx[1:]):
end = idx + num_pad_tokens + 1
if idx - low_idx[i] < min_token_gap:
spans[-1][1] = end
else:
spans.append([idx, end])
return ["".join(tokens[start:end]) for start, end in spans]
class FlareChain(Chain):
"""Flare chain.
Chain that combines a retriever, a question generator,
and a response generator.
See [Active Retrieval Augmented Generation](https://arxiv.org/abs/2305.06983) paper.
"""
question_generator_chain: Runnable
"""Chain that generates questions from uncertain spans."""
response_chain: Runnable
"""Chain that generates responses from user input and context."""
output_parser: FinishedOutputParser = Field(default_factory=FinishedOutputParser)
"""Parser that determines whether the chain is finished."""
retriever: BaseRetriever
"""Retriever that retrieves relevant documents from a user input."""
min_prob: float = 0.2
"""Minimum probability for a token to be considered low confidence."""
min_token_gap: int = 5
"""Minimum number of tokens between two low confidence spans."""
num_pad_tokens: int = 2
"""Number of tokens to pad around a low confidence span."""
max_iter: int = 10
"""Maximum number of iterations."""
start_with_retrieval: bool = True
"""Whether to start with retrieval."""
@property
def input_keys(self) -> list[str]:
"""Input keys for the chain."""
return ["user_input"]
@property
def output_keys(self) -> list[str]:
"""Output keys for the chain."""
return ["response"]
def _do_generation(
self,
questions: list[str],
user_input: str,
response: str,
_run_manager: CallbackManagerForChainRun,
) -> tuple[str, bool]:
callbacks = _run_manager.get_child()
docs = []
for question in questions:
docs.extend(self.retriever.invoke(question))
context = "\n\n".join(d.page_content for d in docs)
result = self.response_chain.invoke(
{
"user_input": user_input,
"context": context,
"response": response,
},
{"callbacks": callbacks},
)
if isinstance(result, AIMessage):
result = result.content
marginal, finished = self.output_parser.parse(result)
return marginal, finished
def _do_retrieval(
self,
low_confidence_spans: list[str],
_run_manager: CallbackManagerForChainRun,
user_input: str,
response: str,
initial_response: str,
) -> tuple[str, bool]:
question_gen_inputs = [
{
"user_input": user_input,
"current_response": initial_response,
"uncertain_span": span,
}
for span in low_confidence_spans
]
callbacks = _run_manager.get_child()
if isinstance(self.question_generator_chain, LLMChain):
question_gen_outputs = self.question_generator_chain.apply(
question_gen_inputs,
callbacks=callbacks,
)
questions = [
output[self.question_generator_chain.output_keys[0]]
for output in question_gen_outputs
]
else:
questions = self.question_generator_chain.batch(
question_gen_inputs,
config={"callbacks": callbacks},
)
_run_manager.on_text(
f"Generated Questions: {questions}",
color="yellow",
end="\n",
)
return self._do_generation(questions, user_input, response, _run_manager)
def _call(
self,
inputs: dict[str, Any],
run_manager: CallbackManagerForChainRun | None = None,
) -> dict[str, Any]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
user_input = inputs[self.input_keys[0]]
response = ""
for _i in range(self.max_iter):
_run_manager.on_text(
f"Current Response: {response}",
color="blue",
end="\n",
)
_input = {"user_input": user_input, "context": "", "response": response}
tokens, log_probs = _extract_tokens_and_log_probs(
self.response_chain.invoke(
_input,
{"callbacks": _run_manager.get_child()},
),
)
low_confidence_spans = _low_confidence_spans(
tokens,
log_probs,
self.min_prob,
self.min_token_gap,
self.num_pad_tokens,
)
initial_response = response.strip() + " " + "".join(tokens)
if not low_confidence_spans:
response = initial_response
final_response, finished = self.output_parser.parse(response)
if finished:
return {self.output_keys[0]: final_response}
continue
marginal, finished = self._do_retrieval(
low_confidence_spans,
_run_manager,
user_input,
response,
initial_response,
)
response = response.strip() + " " + marginal
if finished:
break
return {self.output_keys[0]: response}
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel | None,
max_generation_len: int = 32,
**kwargs: Any,
) -> FlareChain:
"""Creates a FlareChain from a language model.
Args:
llm: Language model to use.
max_generation_len: Maximum length of the generated response.
kwargs: Additional arguments to pass to the constructor.
Returns:
FlareChain class with the given language model.
"""
try:
from langchain_openai import ChatOpenAI
except ImportError as e:
msg = (
"OpenAI is required for FlareChain. "
"Please install langchain-openai."
"pip install langchain-openai"
)
raise ImportError(msg) from e
# Preserve supplied llm instead of always creating a new ChatOpenAI.
# Enforce ChatOpenAI requirement (token logprobs needed for FLARE).
if llm is None:
llm = ChatOpenAI(
max_completion_tokens=max_generation_len,
logprobs=True,
temperature=0,
)
else:
if not isinstance(llm, ChatOpenAI):
msg = (
f"FlareChain.from_llm requires ChatOpenAI; got "
f"{type(llm).__name__}."
)
raise TypeError(msg)
if not getattr(llm, "logprobs", False): # attribute presence may vary
msg = (
"Provided ChatOpenAI instance must be constructed with "
"logprobs=True for FlareChain."
)
raise ValueError(msg)
current_max = getattr(llm, "max_completion_tokens", None)
if current_max is not None and current_max != max_generation_len:
logger.debug(
"FlareChain.from_llm: supplied llm max_completion_tokens=%s "
"differs from requested max_generation_len=%s; "
"leaving model unchanged.",
current_max,
max_generation_len,
)
response_chain = PROMPT | llm
question_gen_chain = QUESTION_GENERATOR_PROMPT | llm | StrOutputParser()
return cls(
question_generator_chain=question_gen_chain,
response_chain=response_chain,
**kwargs,
)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/chains/flare/base.py",
"license": "MIT License",
"lines": 279,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langchain:libs/langchain/langchain_classic/chains/graph_qa/base.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.chains.graph_qa.base import GraphQAChain
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"GraphQAChain": "langchain_community.chains.graph_qa.base",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = ["GraphQAChain"]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/chains/graph_qa/base.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/chains/graph_qa/hugegraph.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.chains.graph_qa.hugegraph import HugeGraphQAChain
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"HugeGraphQAChain": "langchain_community.chains.graph_qa.hugegraph",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = ["HugeGraphQAChain"]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/chains/graph_qa/hugegraph.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/chains/graph_qa/prompts.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.chains.graph_qa.prompts import (
AQL_FIX_TEMPLATE,
AQL_GENERATION_TEMPLATE,
AQL_QA_TEMPLATE,
CYPHER_GENERATION_PROMPT,
CYPHER_GENERATION_TEMPLATE,
CYPHER_QA_PROMPT,
CYPHER_QA_TEMPLATE,
GRAPHDB_QA_TEMPLATE,
GRAPHDB_SPARQL_FIX_TEMPLATE,
GRAPHDB_SPARQL_GENERATION_TEMPLATE,
GREMLIN_GENERATION_TEMPLATE,
KUZU_EXTRA_INSTRUCTIONS,
KUZU_GENERATION_TEMPLATE,
NEBULAGRAPH_EXTRA_INSTRUCTIONS,
NEPTUNE_OPENCYPHER_EXTRA_INSTRUCTIONS,
NEPTUNE_OPENCYPHER_GENERATION_SIMPLE_TEMPLATE,
NEPTUNE_OPENCYPHER_GENERATION_TEMPLATE,
NGQL_GENERATION_TEMPLATE,
SPARQL_GENERATION_SELECT_TEMPLATE,
SPARQL_GENERATION_UPDATE_TEMPLATE,
SPARQL_INTENT_TEMPLATE,
SPARQL_QA_TEMPLATE,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"AQL_FIX_TEMPLATE": "langchain_community.chains.graph_qa.prompts",
"AQL_GENERATION_TEMPLATE": "langchain_community.chains.graph_qa.prompts",
"AQL_QA_TEMPLATE": "langchain_community.chains.graph_qa.prompts",
"CYPHER_GENERATION_TEMPLATE": "langchain_community.chains.graph_qa.prompts",
"CYPHER_QA_TEMPLATE": "langchain_community.chains.graph_qa.prompts",
"CYPHER_QA_PROMPT": "langchain_community.chains.graph_qa.prompts",
"CYPHER_GENERATION_PROMPT": "langchain_community.chains.graph_qa.prompts",
"GRAPHDB_QA_TEMPLATE": "langchain_community.chains.graph_qa.prompts",
"GRAPHDB_SPARQL_FIX_TEMPLATE": "langchain_community.chains.graph_qa.prompts",
"GRAPHDB_SPARQL_GENERATION_TEMPLATE": "langchain_community.chains.graph_qa.prompts",
"GREMLIN_GENERATION_TEMPLATE": "langchain_community.chains.graph_qa.prompts",
"KUZU_EXTRA_INSTRUCTIONS": "langchain_community.chains.graph_qa.prompts",
"KUZU_GENERATION_TEMPLATE": "langchain_community.chains.graph_qa.prompts",
"NEBULAGRAPH_EXTRA_INSTRUCTIONS": "langchain_community.chains.graph_qa.prompts",
"NEPTUNE_OPENCYPHER_EXTRA_INSTRUCTIONS": (
"langchain_community.chains.graph_qa.prompts"
),
"NEPTUNE_OPENCYPHER_GENERATION_SIMPLE_TEMPLATE": (
"langchain_community.chains.graph_qa.prompts"
),
"NEPTUNE_OPENCYPHER_GENERATION_TEMPLATE": (
"langchain_community.chains.graph_qa.prompts"
),
"NGQL_GENERATION_TEMPLATE": "langchain_community.chains.graph_qa.prompts",
"SPARQL_GENERATION_SELECT_TEMPLATE": "langchain_community.chains.graph_qa.prompts",
"SPARQL_GENERATION_UPDATE_TEMPLATE": "langchain_community.chains.graph_qa.prompts",
"SPARQL_INTENT_TEMPLATE": "langchain_community.chains.graph_qa.prompts",
"SPARQL_QA_TEMPLATE": "langchain_community.chains.graph_qa.prompts",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"AQL_FIX_TEMPLATE",
"AQL_GENERATION_TEMPLATE",
"AQL_QA_TEMPLATE",
"CYPHER_GENERATION_PROMPT",
"CYPHER_GENERATION_TEMPLATE",
"CYPHER_QA_PROMPT",
"CYPHER_QA_TEMPLATE",
"GRAPHDB_QA_TEMPLATE",
"GRAPHDB_SPARQL_FIX_TEMPLATE",
"GRAPHDB_SPARQL_GENERATION_TEMPLATE",
"GREMLIN_GENERATION_TEMPLATE",
"KUZU_EXTRA_INSTRUCTIONS",
"KUZU_GENERATION_TEMPLATE",
"NEBULAGRAPH_EXTRA_INSTRUCTIONS",
"NEPTUNE_OPENCYPHER_EXTRA_INSTRUCTIONS",
"NEPTUNE_OPENCYPHER_GENERATION_SIMPLE_TEMPLATE",
"NEPTUNE_OPENCYPHER_GENERATION_TEMPLATE",
"NGQL_GENERATION_TEMPLATE",
"SPARQL_GENERATION_SELECT_TEMPLATE",
"SPARQL_GENERATION_UPDATE_TEMPLATE",
"SPARQL_INTENT_TEMPLATE",
"SPARQL_QA_TEMPLATE",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/chains/graph_qa/prompts.py",
"license": "MIT License",
"lines": 88,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/chains/hyde/base.py | """Hypothetical Document Embeddings.
https://arxiv.org/abs/2212.10496
"""
from __future__ import annotations
import logging
from typing import Any
from langchain_core.callbacks import CallbackManagerForChainRun
from langchain_core.embeddings import Embeddings
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import BasePromptTemplate
from langchain_core.runnables import Runnable
from pydantic import ConfigDict
from langchain_classic.chains.base import Chain
from langchain_classic.chains.hyde.prompts import PROMPT_MAP
from langchain_classic.chains.llm import LLMChain
logger = logging.getLogger(__name__)
class HypotheticalDocumentEmbedder(Chain, Embeddings):
"""Generate hypothetical document for query, and then embed that.
Based on https://arxiv.org/abs/2212.10496
"""
base_embeddings: Embeddings
llm_chain: Runnable
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
@property
def input_keys(self) -> list[str]:
"""Input keys for Hyde's LLM chain."""
return self.llm_chain.input_schema.model_json_schema()["required"]
@property
def output_keys(self) -> list[str]:
"""Output keys for Hyde's LLM chain."""
if isinstance(self.llm_chain, LLMChain):
return self.llm_chain.output_keys
return ["text"]
def embed_documents(self, texts: list[str]) -> list[list[float]]:
"""Call the base embeddings."""
return self.base_embeddings.embed_documents(texts)
def combine_embeddings(self, embeddings: list[list[float]]) -> list[float]:
"""Combine embeddings into final embeddings."""
try:
import numpy as np
return list(np.array(embeddings).mean(axis=0))
except ImportError:
logger.warning(
"NumPy not found in the current Python environment. "
"HypotheticalDocumentEmbedder will use a pure Python implementation "
"for internal calculations, which may significantly impact "
"performance, especially for large datasets. For optimal speed and "
"efficiency, consider installing NumPy: pip install numpy",
)
if not embeddings:
return []
num_vectors = len(embeddings)
return [
sum(dim_values) / num_vectors
for dim_values in zip(*embeddings, strict=False)
]
def embed_query(self, text: str) -> list[float]:
"""Generate a hypothetical document and embedded it."""
var_name = self.input_keys[0]
result = self.llm_chain.invoke({var_name: text})
if isinstance(self.llm_chain, LLMChain):
documents = [result[self.output_keys[0]]]
else:
documents = [result]
embeddings = self.embed_documents(documents)
return self.combine_embeddings(embeddings)
def _call(
self,
inputs: dict[str, Any],
run_manager: CallbackManagerForChainRun | None = None,
) -> dict[str, str]:
"""Call the internal llm chain."""
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
return self.llm_chain.invoke(
inputs,
config={"callbacks": _run_manager.get_child()},
)
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
base_embeddings: Embeddings,
prompt_key: str | None = None,
custom_prompt: BasePromptTemplate | None = None,
**kwargs: Any,
) -> HypotheticalDocumentEmbedder:
"""Load and use LLMChain with either a specific prompt key or custom prompt."""
if custom_prompt is not None:
prompt = custom_prompt
elif prompt_key is not None and prompt_key in PROMPT_MAP:
prompt = PROMPT_MAP[prompt_key]
else:
msg = (
f"Must specify prompt_key if custom_prompt not provided. Should be one "
f"of {list(PROMPT_MAP.keys())}."
)
raise ValueError(msg)
llm_chain = prompt | llm | StrOutputParser()
return cls(base_embeddings=base_embeddings, llm_chain=llm_chain, **kwargs)
@property
def _chain_type(self) -> str:
return "hyde_chain"
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/chains/hyde/base.py",
"license": "MIT License",
"lines": 106,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langchain:libs/langchain/langchain_classic/chains/llm_checker/base.py | """Chain for question-answering with self-verification."""
from __future__ import annotations
import warnings
from typing import Any
from langchain_core._api import deprecated
from langchain_core.callbacks import CallbackManagerForChainRun
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import PromptTemplate
from pydantic import ConfigDict, model_validator
from langchain_classic.chains.base import Chain
from langchain_classic.chains.llm import LLMChain
from langchain_classic.chains.llm_checker.prompt import (
CHECK_ASSERTIONS_PROMPT,
CREATE_DRAFT_ANSWER_PROMPT,
LIST_ASSERTIONS_PROMPT,
REVISED_ANSWER_PROMPT,
)
from langchain_classic.chains.sequential import SequentialChain
def _load_question_to_checked_assertions_chain(
llm: BaseLanguageModel,
create_draft_answer_prompt: PromptTemplate,
list_assertions_prompt: PromptTemplate,
check_assertions_prompt: PromptTemplate,
revised_answer_prompt: PromptTemplate,
) -> SequentialChain:
create_draft_answer_chain = LLMChain(
llm=llm,
prompt=create_draft_answer_prompt,
output_key="statement",
)
list_assertions_chain = LLMChain(
llm=llm,
prompt=list_assertions_prompt,
output_key="assertions",
)
check_assertions_chain = LLMChain(
llm=llm,
prompt=check_assertions_prompt,
output_key="checked_assertions",
)
revised_answer_chain = LLMChain(
llm=llm,
prompt=revised_answer_prompt,
output_key="revised_statement",
)
chains = [
create_draft_answer_chain,
list_assertions_chain,
check_assertions_chain,
revised_answer_chain,
]
return SequentialChain(
chains=chains,
input_variables=["question"],
output_variables=["revised_statement"],
verbose=True,
)
@deprecated(
since="0.2.13",
message=(
"See LangGraph guides for a variety of self-reflection and corrective "
"strategies for question-answering and other tasks: "
"https://docs.langchain.com/oss/python/langchain/overview"
),
removal="1.0",
)
class LLMCheckerChain(Chain):
"""Chain for question-answering with self-verification.
Example:
```python
from langchain_openai import OpenAI
from langchain_classic.chains import LLMCheckerChain
model = OpenAI(temperature=0.7)
checker_chain = LLMCheckerChain.from_llm(model)
```
"""
question_to_checked_assertions_chain: SequentialChain
llm: BaseLanguageModel | None = None
"""[Deprecated] LLM wrapper to use."""
create_draft_answer_prompt: PromptTemplate = CREATE_DRAFT_ANSWER_PROMPT
"""[Deprecated]"""
list_assertions_prompt: PromptTemplate = LIST_ASSERTIONS_PROMPT
"""[Deprecated]"""
check_assertions_prompt: PromptTemplate = CHECK_ASSERTIONS_PROMPT
"""[Deprecated]"""
revised_answer_prompt: PromptTemplate = REVISED_ANSWER_PROMPT
"""[Deprecated] Prompt to use when questioning the documents."""
input_key: str = "query"
output_key: str = "result"
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def _raise_deprecation(cls, values: dict) -> Any:
if "llm" in values:
warnings.warn(
"Directly instantiating an LLMCheckerChain with an llm is deprecated. "
"Please instantiate with question_to_checked_assertions_chain "
"or using the from_llm class method.",
stacklevel=5,
)
if (
"question_to_checked_assertions_chain" not in values
and values["llm"] is not None
):
question_to_checked_assertions_chain = (
_load_question_to_checked_assertions_chain(
values["llm"],
values.get(
"create_draft_answer_prompt",
CREATE_DRAFT_ANSWER_PROMPT,
),
values.get("list_assertions_prompt", LIST_ASSERTIONS_PROMPT),
values.get("check_assertions_prompt", CHECK_ASSERTIONS_PROMPT),
values.get("revised_answer_prompt", REVISED_ANSWER_PROMPT),
)
)
values["question_to_checked_assertions_chain"] = (
question_to_checked_assertions_chain
)
return values
@property
def input_keys(self) -> list[str]:
"""Return the singular input key."""
return [self.input_key]
@property
def output_keys(self) -> list[str]:
"""Return the singular output key."""
return [self.output_key]
def _call(
self,
inputs: dict[str, Any],
run_manager: CallbackManagerForChainRun | None = None,
) -> dict[str, str]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
question = inputs[self.input_key]
output = self.question_to_checked_assertions_chain(
{"question": question},
callbacks=_run_manager.get_child(),
)
return {self.output_key: output["revised_statement"]}
@property
def _chain_type(self) -> str:
return "llm_checker_chain"
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
create_draft_answer_prompt: PromptTemplate = CREATE_DRAFT_ANSWER_PROMPT,
list_assertions_prompt: PromptTemplate = LIST_ASSERTIONS_PROMPT,
check_assertions_prompt: PromptTemplate = CHECK_ASSERTIONS_PROMPT,
revised_answer_prompt: PromptTemplate = REVISED_ANSWER_PROMPT,
**kwargs: Any,
) -> LLMCheckerChain:
"""Create an LLMCheckerChain from a language model.
Args:
llm: a language model
create_draft_answer_prompt: prompt to create a draft answer
list_assertions_prompt: prompt to list assertions
check_assertions_prompt: prompt to check assertions
revised_answer_prompt: prompt to revise the answer
**kwargs: additional arguments
"""
question_to_checked_assertions_chain = (
_load_question_to_checked_assertions_chain(
llm,
create_draft_answer_prompt,
list_assertions_prompt,
check_assertions_prompt,
revised_answer_prompt,
)
)
return cls(
question_to_checked_assertions_chain=question_to_checked_assertions_chain,
**kwargs,
)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/chains/llm_checker/base.py",
"license": "MIT License",
"lines": 178,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/chains/llm_math/base.py | """Chain that interprets a prompt and executes python code to do math."""
from __future__ import annotations
import math
import re
import warnings
from typing import Any
from langchain_core._api import deprecated
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import BasePromptTemplate
from pydantic import ConfigDict, model_validator
from langchain_classic.chains.base import Chain
from langchain_classic.chains.llm import LLMChain
from langchain_classic.chains.llm_math.prompt import PROMPT
@deprecated(
since="0.2.13",
message=(
"This class is deprecated and will be removed in langchain 1.0. "
"See API reference for replacement: "
"https://api.python.langchain.com/en/latest/chains/langchain.chains.llm_math.base.LLMMathChain.html"
),
removal="1.0",
)
class LLMMathChain(Chain):
"""Chain that interprets a prompt and executes python code to do math.
!!! note
This class is deprecated. See below for a replacement implementation using
LangGraph. The benefits of this implementation are:
- Uses LLM tool calling features;
- Support for both token-by-token and step-by-step streaming;
- Support for checkpointing and memory of chat history;
- Easier to modify or extend
(e.g., with additional tools, structured responses, etc.)
Install LangGraph with:
```bash
pip install -U langgraph
```
```python
import math
from typing import Annotated, Sequence
from langchain_core.messages import BaseMessage
from langchain_core.runnables import RunnableConfig
from langchain_core.tools import tool
from langchain_openai import ChatOpenAI
from langgraph.graph import END, StateGraph
from langgraph.graph.message import add_messages
from langgraph.prebuilt.tool_node import ToolNode
import numexpr
from typing_extensions import TypedDict
@tool
def calculator(expression: str) -> str:
\"\"\"Calculate expression using Python's numexpr library.
Expression should be a single line mathematical expression
that solves the problem.
```
Examples:
"37593 * 67" for "37593 times 67"
"37593**(1/5)" for "37593^(1/5)"
\"\"\"
local_dict = {"pi": math.pi, "e": math.e}
return str(
numexpr.evaluate(
expression.strip(),
global_dict={}, # restrict access to globals
local_dict=local_dict, # add common mathematical functions
)
)
model = ChatOpenAI(model="gpt-4o-mini", temperature=0)
tools = [calculator]
model_with_tools = model.bind_tools(tools, tool_choice="any")
class ChainState(TypedDict):
\"\"\"LangGraph state.\"\"\"
messages: Annotated[Sequence[BaseMessage], add_messages]
async def acall_chain(state: ChainState, config: RunnableConfig):
last_message = state["messages"][-1]
response = await model_with_tools.ainvoke(state["messages"], config)
return {"messages": [response]}
async def acall_model(state: ChainState, config: RunnableConfig):
response = await model.ainvoke(state["messages"], config)
return {"messages": [response]}
graph_builder = StateGraph(ChainState)
graph_builder.add_node("call_tool", acall_chain)
graph_builder.add_node("execute_tool", ToolNode(tools))
graph_builder.add_node("call_model", acall_model)
graph_builder.set_entry_point("call_tool")
graph_builder.add_edge("call_tool", "execute_tool")
graph_builder.add_edge("execute_tool", "call_model")
graph_builder.add_edge("call_model", END)
chain = graph_builder.compile()
```python
example_query = "What is 551368 divided by 82"
events = chain.astream(
{"messages": [("user", example_query)]},
stream_mode="values",
)
async for event in events:
event["messages"][-1].pretty_print()
```
```txt
================================ Human Message =================================
What is 551368 divided by 82
================================== Ai Message ==================================
Tool Calls:
calculator (call_MEiGXuJjJ7wGU4aOT86QuGJS)
Call ID: call_MEiGXuJjJ7wGU4aOT86QuGJS
Args:
expression: 551368 / 82
================================= Tool Message =================================
Name: calculator
6724.0
================================== Ai Message ==================================
551368 divided by 82 equals 6724.
```
Example:
```python
from langchain_classic.chains import LLMMathChain
from langchain_openai import OpenAI
llm_math = LLMMathChain.from_llm(OpenAI())
```
"""
llm_chain: LLMChain
llm: BaseLanguageModel | None = None
"""[Deprecated] LLM wrapper to use."""
prompt: BasePromptTemplate = PROMPT
"""[Deprecated] Prompt to use to translate to python if necessary."""
input_key: str = "question"
output_key: str = "answer"
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def _raise_deprecation(cls, values: dict) -> Any:
try:
import numexpr # noqa: F401
except ImportError as e:
msg = (
"LLMMathChain requires the numexpr package. "
"Please install it with `pip install numexpr`."
)
raise ImportError(msg) from e
if "llm" in values:
warnings.warn(
"Directly instantiating an LLMMathChain with an llm is deprecated. "
"Please instantiate with llm_chain argument or using the from_llm "
"class method.",
stacklevel=5,
)
if "llm_chain" not in values and values["llm"] is not None:
prompt = values.get("prompt", PROMPT)
values["llm_chain"] = LLMChain(llm=values["llm"], prompt=prompt)
return values
@property
def input_keys(self) -> list[str]:
"""Expect input key."""
return [self.input_key]
@property
def output_keys(self) -> list[str]:
"""Expect output key."""
return [self.output_key]
def _evaluate_expression(self, expression: str) -> str:
import numexpr
try:
local_dict = {"pi": math.pi, "e": math.e}
output = str(
numexpr.evaluate(
expression.strip(),
global_dict={}, # restrict access to globals
local_dict=local_dict, # add common mathematical functions
),
)
except Exception as e:
msg = (
f'LLMMathChain._evaluate("{expression}") raised error: {e}.'
" Please try again with a valid numerical expression"
)
raise ValueError(msg) from e
# Remove any leading and trailing brackets from the output
return re.sub(r"^\[|\]$", "", output)
def _process_llm_result(
self,
llm_output: str,
run_manager: CallbackManagerForChainRun,
) -> dict[str, str]:
run_manager.on_text(llm_output, color="green", verbose=self.verbose)
llm_output = llm_output.strip()
text_match = re.search(r"^```text(.*?)```", llm_output, re.DOTALL)
if text_match:
expression = text_match.group(1)
output = self._evaluate_expression(expression)
run_manager.on_text("\nAnswer: ", verbose=self.verbose)
run_manager.on_text(output, color="yellow", verbose=self.verbose)
answer = "Answer: " + output
elif llm_output.startswith("Answer:"):
answer = llm_output
elif "Answer:" in llm_output:
answer = "Answer: " + llm_output.split("Answer:")[-1]
else:
msg = f"unknown format from LLM: {llm_output}"
raise ValueError(msg)
return {self.output_key: answer}
async def _aprocess_llm_result(
self,
llm_output: str,
run_manager: AsyncCallbackManagerForChainRun,
) -> dict[str, str]:
await run_manager.on_text(llm_output, color="green", verbose=self.verbose)
llm_output = llm_output.strip()
text_match = re.search(r"^```text(.*?)```", llm_output, re.DOTALL)
if text_match:
expression = text_match.group(1)
output = self._evaluate_expression(expression)
await run_manager.on_text("\nAnswer: ", verbose=self.verbose)
await run_manager.on_text(output, color="yellow", verbose=self.verbose)
answer = "Answer: " + output
elif llm_output.startswith("Answer:"):
answer = llm_output
elif "Answer:" in llm_output:
answer = "Answer: " + llm_output.split("Answer:")[-1]
else:
msg = f"unknown format from LLM: {llm_output}"
raise ValueError(msg)
return {self.output_key: answer}
def _call(
self,
inputs: dict[str, str],
run_manager: CallbackManagerForChainRun | None = None,
) -> dict[str, str]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
_run_manager.on_text(inputs[self.input_key])
llm_output = self.llm_chain.predict(
question=inputs[self.input_key],
stop=["```output"],
callbacks=_run_manager.get_child(),
)
return self._process_llm_result(llm_output, _run_manager)
async def _acall(
self,
inputs: dict[str, str],
run_manager: AsyncCallbackManagerForChainRun | None = None,
) -> dict[str, str]:
_run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
await _run_manager.on_text(inputs[self.input_key])
llm_output = await self.llm_chain.apredict(
question=inputs[self.input_key],
stop=["```output"],
callbacks=_run_manager.get_child(),
)
return await self._aprocess_llm_result(llm_output, _run_manager)
@property
def _chain_type(self) -> str:
return "llm_math_chain"
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
prompt: BasePromptTemplate = PROMPT,
**kwargs: Any,
) -> LLMMathChain:
"""Create a LLMMathChain from a language model.
Args:
llm: a language model
prompt: a prompt template
**kwargs: additional arguments
"""
llm_chain = LLMChain(llm=llm, prompt=prompt)
return cls(llm_chain=llm_chain, **kwargs)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/chains/llm_math/base.py",
"license": "MIT License",
"lines": 271,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langchain:libs/langchain/langchain_classic/chains/llm_summarization_checker/base.py | """Chain for summarization with self-verification."""
from __future__ import annotations
import logging
import warnings
from pathlib import Path
from typing import Any
from langchain_core._api import deprecated
from langchain_core.callbacks import CallbackManagerForChainRun
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts.prompt import PromptTemplate
from pydantic import ConfigDict, model_validator
from langchain_classic.chains.base import Chain
from langchain_classic.chains.llm import LLMChain
from langchain_classic.chains.sequential import SequentialChain
PROMPTS_DIR = Path(__file__).parent / "prompts"
logger = logging.getLogger(__name__)
CREATE_ASSERTIONS_PROMPT = PromptTemplate.from_file(PROMPTS_DIR / "create_facts.txt")
CHECK_ASSERTIONS_PROMPT = PromptTemplate.from_file(PROMPTS_DIR / "check_facts.txt")
REVISED_SUMMARY_PROMPT = PromptTemplate.from_file(PROMPTS_DIR / "revise_summary.txt")
ARE_ALL_TRUE_PROMPT = PromptTemplate.from_file(PROMPTS_DIR / "are_all_true_prompt.txt")
def _load_sequential_chain(
llm: BaseLanguageModel,
create_assertions_prompt: PromptTemplate,
check_assertions_prompt: PromptTemplate,
revised_summary_prompt: PromptTemplate,
are_all_true_prompt: PromptTemplate,
*,
verbose: bool = False,
) -> SequentialChain:
return SequentialChain(
chains=[
LLMChain(
llm=llm,
prompt=create_assertions_prompt,
output_key="assertions",
verbose=verbose,
),
LLMChain(
llm=llm,
prompt=check_assertions_prompt,
output_key="checked_assertions",
verbose=verbose,
),
LLMChain(
llm=llm,
prompt=revised_summary_prompt,
output_key="revised_summary",
verbose=verbose,
),
LLMChain(
llm=llm,
output_key="all_true",
prompt=are_all_true_prompt,
verbose=verbose,
),
],
input_variables=["summary"],
output_variables=["all_true", "revised_summary"],
verbose=verbose,
)
@deprecated(
since="0.2.13",
message=(
"See LangGraph guides for a variety of self-reflection and corrective "
"strategies for question-answering and other tasks: "
"https://docs.langchain.com/oss/python/langgraph/agentic-rag"
),
removal="1.0",
)
class LLMSummarizationCheckerChain(Chain):
"""Chain for question-answering with self-verification.
Example:
```python
from langchain_openai import OpenAI
from langchain_classic.chains import LLMSummarizationCheckerChain
model = OpenAI(temperature=0.0)
checker_chain = LLMSummarizationCheckerChain.from_llm(model)
```
"""
sequential_chain: SequentialChain
llm: BaseLanguageModel | None = None
"""[Deprecated] LLM wrapper to use."""
create_assertions_prompt: PromptTemplate = CREATE_ASSERTIONS_PROMPT
"""[Deprecated]"""
check_assertions_prompt: PromptTemplate = CHECK_ASSERTIONS_PROMPT
"""[Deprecated]"""
revised_summary_prompt: PromptTemplate = REVISED_SUMMARY_PROMPT
"""[Deprecated]"""
are_all_true_prompt: PromptTemplate = ARE_ALL_TRUE_PROMPT
"""[Deprecated]"""
input_key: str = "query"
output_key: str = "result"
max_checks: int = 2
"""Maximum number of times to check the assertions. Default to double-checking."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def _raise_deprecation(cls, values: dict) -> Any:
if "llm" in values:
warnings.warn(
"Directly instantiating an LLMSummarizationCheckerChain with an llm is "
"deprecated. Please instantiate with"
" sequential_chain argument or using the from_llm class method.",
stacklevel=5,
)
if "sequential_chain" not in values and values["llm"] is not None:
values["sequential_chain"] = _load_sequential_chain(
values["llm"],
values.get("create_assertions_prompt", CREATE_ASSERTIONS_PROMPT),
values.get("check_assertions_prompt", CHECK_ASSERTIONS_PROMPT),
values.get("revised_summary_prompt", REVISED_SUMMARY_PROMPT),
values.get("are_all_true_prompt", ARE_ALL_TRUE_PROMPT),
verbose=values.get("verbose", False),
)
return values
@property
def input_keys(self) -> list[str]:
"""Return the singular input key."""
return [self.input_key]
@property
def output_keys(self) -> list[str]:
"""Return the singular output key."""
return [self.output_key]
def _call(
self,
inputs: dict[str, Any],
run_manager: CallbackManagerForChainRun | None = None,
) -> dict[str, str]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
all_true = False
count = 0
output = None
original_input = inputs[self.input_key]
chain_input = original_input
while not all_true and count < self.max_checks:
output = self.sequential_chain(
{"summary": chain_input},
callbacks=_run_manager.get_child(),
)
count += 1
if output["all_true"].strip() == "True":
break
if self.verbose:
logger.info(output["revised_summary"])
chain_input = output["revised_summary"]
if not output:
msg = "No output from chain"
raise ValueError(msg)
return {self.output_key: output["revised_summary"].strip()}
@property
def _chain_type(self) -> str:
return "llm_summarization_checker_chain"
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
create_assertions_prompt: PromptTemplate = CREATE_ASSERTIONS_PROMPT,
check_assertions_prompt: PromptTemplate = CHECK_ASSERTIONS_PROMPT,
revised_summary_prompt: PromptTemplate = REVISED_SUMMARY_PROMPT,
are_all_true_prompt: PromptTemplate = ARE_ALL_TRUE_PROMPT,
verbose: bool = False, # noqa: FBT001,FBT002
**kwargs: Any,
) -> LLMSummarizationCheckerChain:
"""Create a LLMSummarizationCheckerChain from a language model.
Args:
llm: a language model
create_assertions_prompt: prompt to create assertions
check_assertions_prompt: prompt to check assertions
revised_summary_prompt: prompt to revise summary
are_all_true_prompt: prompt to check if all assertions are true
verbose: whether to print verbose output
**kwargs: additional arguments
"""
chain = _load_sequential_chain(
llm,
create_assertions_prompt,
check_assertions_prompt,
revised_summary_prompt,
are_all_true_prompt,
verbose=verbose,
)
return cls(sequential_chain=chain, verbose=verbose, **kwargs)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/chains/llm_summarization_checker/base.py",
"license": "MIT License",
"lines": 185,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langchain:libs/langchain/langchain_classic/chains/loading.py | """Functionality for loading chains."""
from __future__ import annotations
import json
from pathlib import Path
from typing import TYPE_CHECKING, Any
import yaml
from langchain_core._api import deprecated
from langchain_core.prompts.loading import (
_load_output_parser,
load_prompt,
load_prompt_from_config,
)
from langchain_classic.chains import ReduceDocumentsChain
from langchain_classic.chains.api.base import APIChain
from langchain_classic.chains.base import Chain
from langchain_classic.chains.combine_documents.map_reduce import (
MapReduceDocumentsChain,
)
from langchain_classic.chains.combine_documents.map_rerank import (
MapRerankDocumentsChain,
)
from langchain_classic.chains.combine_documents.refine import RefineDocumentsChain
from langchain_classic.chains.combine_documents.stuff import StuffDocumentsChain
from langchain_classic.chains.hyde.base import HypotheticalDocumentEmbedder
from langchain_classic.chains.llm import LLMChain
from langchain_classic.chains.llm_checker.base import LLMCheckerChain
from langchain_classic.chains.llm_math.base import LLMMathChain
from langchain_classic.chains.qa_with_sources.base import QAWithSourcesChain
from langchain_classic.chains.qa_with_sources.retrieval import (
RetrievalQAWithSourcesChain,
)
from langchain_classic.chains.qa_with_sources.vector_db import (
VectorDBQAWithSourcesChain,
)
from langchain_classic.chains.retrieval_qa.base import RetrievalQA, VectorDBQA
if TYPE_CHECKING:
from langchain_community.chains.graph_qa.cypher import GraphCypherQAChain
from langchain_classic.chains.llm_requests import LLMRequestsChain
try:
from langchain_community.llms.loading import load_llm, load_llm_from_config
except ImportError:
def load_llm(*_: Any, **__: Any) -> None:
"""Import error for load_llm."""
msg = (
"To use this load_llm functionality you must install the "
"langchain_community package. "
"You can install it with `pip install langchain_community`"
)
raise ImportError(msg)
def load_llm_from_config(*_: Any, **__: Any) -> None:
"""Import error for load_llm_from_config."""
msg = (
"To use this load_llm_from_config functionality you must install the "
"langchain_community package. "
"You can install it with `pip install langchain_community`"
)
raise ImportError(msg)
URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/chains/"
def _load_llm_chain(config: dict, **kwargs: Any) -> LLMChain:
"""Load LLM chain from config dict."""
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config, **kwargs)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"), **kwargs)
else:
msg = "One of `llm` or `llm_path` must be present."
raise ValueError(msg)
if "prompt" in config:
prompt_config = config.pop("prompt")
prompt = load_prompt_from_config(prompt_config)
elif "prompt_path" in config:
prompt = load_prompt(config.pop("prompt_path"))
else:
msg = "One of `prompt` or `prompt_path` must be present."
raise ValueError(msg)
_load_output_parser(config)
return LLMChain(llm=llm, prompt=prompt, **config)
def _load_hyde_chain(config: dict, **kwargs: Any) -> HypotheticalDocumentEmbedder:
"""Load hypothetical document embedder chain from config dict."""
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config, **kwargs)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"), **kwargs)
else:
msg = "One of `llm_chain` or `llm_chain_path` must be present."
raise ValueError(msg)
if "embeddings" in kwargs:
embeddings = kwargs.pop("embeddings")
else:
msg = "`embeddings` must be present."
raise ValueError(msg)
return HypotheticalDocumentEmbedder(
llm_chain=llm_chain,
base_embeddings=embeddings,
**config,
)
def _load_stuff_documents_chain(config: dict, **kwargs: Any) -> StuffDocumentsChain:
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config, **kwargs)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"), **kwargs)
else:
msg = "One of `llm_chain` or `llm_chain_path` must be present."
raise ValueError(msg)
if not isinstance(llm_chain, LLMChain):
msg = f"Expected LLMChain, got {llm_chain}"
raise ValueError(msg) # noqa: TRY004
if "document_prompt" in config:
prompt_config = config.pop("document_prompt")
document_prompt = load_prompt_from_config(prompt_config)
elif "document_prompt_path" in config:
document_prompt = load_prompt(config.pop("document_prompt_path"))
else:
msg = "One of `document_prompt` or `document_prompt_path` must be present."
raise ValueError(msg)
return StuffDocumentsChain(
llm_chain=llm_chain,
document_prompt=document_prompt,
**config,
)
def _load_map_reduce_documents_chain(
config: dict,
**kwargs: Any,
) -> MapReduceDocumentsChain:
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config, **kwargs)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"), **kwargs)
else:
msg = "One of `llm_chain` or `llm_chain_path` must be present."
raise ValueError(msg)
if not isinstance(llm_chain, LLMChain):
msg = f"Expected LLMChain, got {llm_chain}"
raise ValueError(msg) # noqa: TRY004
if "reduce_documents_chain" in config:
reduce_documents_chain = load_chain_from_config(
config.pop("reduce_documents_chain"),
**kwargs,
)
elif "reduce_documents_chain_path" in config:
reduce_documents_chain = load_chain(
config.pop("reduce_documents_chain_path"),
**kwargs,
)
else:
reduce_documents_chain = _load_reduce_documents_chain(config, **kwargs)
return MapReduceDocumentsChain(
llm_chain=llm_chain,
reduce_documents_chain=reduce_documents_chain,
**config,
)
def _load_reduce_documents_chain(config: dict, **kwargs: Any) -> ReduceDocumentsChain:
combine_documents_chain = None
collapse_documents_chain = None
if "combine_documents_chain" in config:
combine_document_chain_config = config.pop("combine_documents_chain")
combine_documents_chain = load_chain_from_config(
combine_document_chain_config,
**kwargs,
)
elif "combine_document_chain" in config:
combine_document_chain_config = config.pop("combine_document_chain")
combine_documents_chain = load_chain_from_config(
combine_document_chain_config,
**kwargs,
)
elif "combine_documents_chain_path" in config:
combine_documents_chain = load_chain(
config.pop("combine_documents_chain_path"),
**kwargs,
)
elif "combine_document_chain_path" in config:
combine_documents_chain = load_chain(
config.pop("combine_document_chain_path"),
**kwargs,
)
else:
msg = (
"One of `combine_documents_chain` or "
"`combine_documents_chain_path` must be present."
)
raise ValueError(msg)
if "collapse_documents_chain" in config:
collapse_document_chain_config = config.pop("collapse_documents_chain")
if collapse_document_chain_config is None:
collapse_documents_chain = None
else:
collapse_documents_chain = load_chain_from_config(
collapse_document_chain_config,
**kwargs,
)
elif "collapse_documents_chain_path" in config:
collapse_documents_chain = load_chain(
config.pop("collapse_documents_chain_path"),
**kwargs,
)
elif "collapse_document_chain" in config:
collapse_document_chain_config = config.pop("collapse_document_chain")
if collapse_document_chain_config is None:
collapse_documents_chain = None
else:
collapse_documents_chain = load_chain_from_config(
collapse_document_chain_config,
**kwargs,
)
elif "collapse_document_chain_path" in config:
collapse_documents_chain = load_chain(
config.pop("collapse_document_chain_path"),
**kwargs,
)
return ReduceDocumentsChain(
combine_documents_chain=combine_documents_chain,
collapse_documents_chain=collapse_documents_chain,
**config,
)
def _load_llm_bash_chain(config: dict, **kwargs: Any) -> Any:
"""Load LLM Bash chain from config dict."""
msg = (
"LLMBash Chain is not available through LangChain anymore. "
"The relevant code can be found in langchain_experimental, "
"but it is not appropriate for production usage due to security "
"concerns. Please refer to langchain-experimental repository for more details."
)
raise NotImplementedError(msg)
def _load_llm_checker_chain(config: dict, **kwargs: Any) -> LLMCheckerChain:
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config, **kwargs)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"), **kwargs)
else:
msg = "One of `llm` or `llm_path` must be present."
raise ValueError(msg)
if "create_draft_answer_prompt" in config:
create_draft_answer_prompt_config = config.pop("create_draft_answer_prompt")
create_draft_answer_prompt = load_prompt_from_config(
create_draft_answer_prompt_config,
)
elif "create_draft_answer_prompt_path" in config:
create_draft_answer_prompt = load_prompt(
config.pop("create_draft_answer_prompt_path"),
)
if "list_assertions_prompt" in config:
list_assertions_prompt_config = config.pop("list_assertions_prompt")
list_assertions_prompt = load_prompt_from_config(list_assertions_prompt_config)
elif "list_assertions_prompt_path" in config:
list_assertions_prompt = load_prompt(config.pop("list_assertions_prompt_path"))
if "check_assertions_prompt" in config:
check_assertions_prompt_config = config.pop("check_assertions_prompt")
check_assertions_prompt = load_prompt_from_config(
check_assertions_prompt_config,
)
elif "check_assertions_prompt_path" in config:
check_assertions_prompt = load_prompt(
config.pop("check_assertions_prompt_path"),
)
if "revised_answer_prompt" in config:
revised_answer_prompt_config = config.pop("revised_answer_prompt")
revised_answer_prompt = load_prompt_from_config(revised_answer_prompt_config)
elif "revised_answer_prompt_path" in config:
revised_answer_prompt = load_prompt(config.pop("revised_answer_prompt_path"))
return LLMCheckerChain(
llm=llm,
create_draft_answer_prompt=create_draft_answer_prompt,
list_assertions_prompt=list_assertions_prompt,
check_assertions_prompt=check_assertions_prompt,
revised_answer_prompt=revised_answer_prompt,
**config,
)
def _load_llm_math_chain(config: dict, **kwargs: Any) -> LLMMathChain:
llm_chain = None
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config, **kwargs)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"), **kwargs)
# llm attribute is deprecated in favor of llm_chain, here to support old configs
elif "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config, **kwargs)
# llm_path attribute is deprecated in favor of llm_chain_path,
# its to support old configs
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"), **kwargs)
else:
msg = "One of `llm_chain` or `llm_chain_path` must be present."
raise ValueError(msg)
if "prompt" in config:
prompt_config = config.pop("prompt")
prompt = load_prompt_from_config(prompt_config)
elif "prompt_path" in config:
prompt = load_prompt(config.pop("prompt_path"))
if llm_chain:
return LLMMathChain(llm_chain=llm_chain, prompt=prompt, **config)
return LLMMathChain(llm=llm, prompt=prompt, **config)
def _load_map_rerank_documents_chain(
config: dict,
**kwargs: Any,
) -> MapRerankDocumentsChain:
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config, **kwargs)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"), **kwargs)
else:
msg = "One of `llm_chain` or `llm_chain_path` must be present."
raise ValueError(msg)
return MapRerankDocumentsChain(llm_chain=llm_chain, **config)
def _load_pal_chain(config: dict, **kwargs: Any) -> Any:
msg = (
"PALChain is not available through LangChain anymore. "
"The relevant code can be found in langchain_experimental, "
"but it is not appropriate for production usage due to security "
"concerns. Please refer to langchain-experimental repository for more details."
)
raise NotImplementedError(msg)
def _load_refine_documents_chain(config: dict, **kwargs: Any) -> RefineDocumentsChain:
if "initial_llm_chain" in config:
initial_llm_chain_config = config.pop("initial_llm_chain")
initial_llm_chain = load_chain_from_config(initial_llm_chain_config, **kwargs)
elif "initial_llm_chain_path" in config:
initial_llm_chain = load_chain(config.pop("initial_llm_chain_path"), **kwargs)
else:
msg = "One of `initial_llm_chain` or `initial_llm_chain_path` must be present."
raise ValueError(msg)
if "refine_llm_chain" in config:
refine_llm_chain_config = config.pop("refine_llm_chain")
refine_llm_chain = load_chain_from_config(refine_llm_chain_config, **kwargs)
elif "refine_llm_chain_path" in config:
refine_llm_chain = load_chain(config.pop("refine_llm_chain_path"), **kwargs)
else:
msg = "One of `refine_llm_chain` or `refine_llm_chain_path` must be present."
raise ValueError(msg)
if "document_prompt" in config:
prompt_config = config.pop("document_prompt")
document_prompt = load_prompt_from_config(prompt_config)
elif "document_prompt_path" in config:
document_prompt = load_prompt(config.pop("document_prompt_path"))
return RefineDocumentsChain(
initial_llm_chain=initial_llm_chain,
refine_llm_chain=refine_llm_chain,
document_prompt=document_prompt,
**config,
)
def _load_qa_with_sources_chain(config: dict, **kwargs: Any) -> QAWithSourcesChain:
if "combine_documents_chain" in config:
combine_documents_chain_config = config.pop("combine_documents_chain")
combine_documents_chain = load_chain_from_config(
combine_documents_chain_config,
**kwargs,
)
elif "combine_documents_chain_path" in config:
combine_documents_chain = load_chain(
config.pop("combine_documents_chain_path"),
**kwargs,
)
else:
msg = (
"One of `combine_documents_chain` or "
"`combine_documents_chain_path` must be present."
)
raise ValueError(msg)
return QAWithSourcesChain(combine_documents_chain=combine_documents_chain, **config)
def _load_sql_database_chain(config: dict, **kwargs: Any) -> Any:
"""Load SQL Database chain from config dict."""
msg = (
"SQLDatabaseChain is not available through LangChain anymore. "
"The relevant code can be found in langchain_experimental, "
"but it is not appropriate for production usage due to security "
"concerns. Please refer to langchain-experimental repository for more details, "
"or refer to this tutorial for best practices: "
"https://python.langchain.com/docs/tutorials/sql_qa/"
)
raise NotImplementedError(msg)
def _load_vector_db_qa_with_sources_chain(
config: dict,
**kwargs: Any,
) -> VectorDBQAWithSourcesChain:
if "vectorstore" in kwargs:
vectorstore = kwargs.pop("vectorstore")
else:
msg = "`vectorstore` must be present."
raise ValueError(msg)
if "combine_documents_chain" in config:
combine_documents_chain_config = config.pop("combine_documents_chain")
combine_documents_chain = load_chain_from_config(
combine_documents_chain_config,
**kwargs,
)
elif "combine_documents_chain_path" in config:
combine_documents_chain = load_chain(
config.pop("combine_documents_chain_path"),
**kwargs,
)
else:
msg = (
"One of `combine_documents_chain` or "
"`combine_documents_chain_path` must be present."
)
raise ValueError(msg)
return VectorDBQAWithSourcesChain(
combine_documents_chain=combine_documents_chain,
vectorstore=vectorstore,
**config,
)
def _load_retrieval_qa(config: dict, **kwargs: Any) -> RetrievalQA:
if "retriever" in kwargs:
retriever = kwargs.pop("retriever")
else:
msg = "`retriever` must be present."
raise ValueError(msg)
if "combine_documents_chain" in config:
combine_documents_chain_config = config.pop("combine_documents_chain")
combine_documents_chain = load_chain_from_config(
combine_documents_chain_config,
**kwargs,
)
elif "combine_documents_chain_path" in config:
combine_documents_chain = load_chain(
config.pop("combine_documents_chain_path"),
**kwargs,
)
else:
msg = (
"One of `combine_documents_chain` or "
"`combine_documents_chain_path` must be present."
)
raise ValueError(msg)
return RetrievalQA(
combine_documents_chain=combine_documents_chain,
retriever=retriever,
**config,
)
def _load_retrieval_qa_with_sources_chain(
config: dict,
**kwargs: Any,
) -> RetrievalQAWithSourcesChain:
if "retriever" in kwargs:
retriever = kwargs.pop("retriever")
else:
msg = "`retriever` must be present."
raise ValueError(msg)
if "combine_documents_chain" in config:
combine_documents_chain_config = config.pop("combine_documents_chain")
combine_documents_chain = load_chain_from_config(
combine_documents_chain_config,
**kwargs,
)
elif "combine_documents_chain_path" in config:
combine_documents_chain = load_chain(
config.pop("combine_documents_chain_path"),
**kwargs,
)
else:
msg = (
"One of `combine_documents_chain` or "
"`combine_documents_chain_path` must be present."
)
raise ValueError(msg)
return RetrievalQAWithSourcesChain(
combine_documents_chain=combine_documents_chain,
retriever=retriever,
**config,
)
def _load_vector_db_qa(config: dict, **kwargs: Any) -> VectorDBQA:
if "vectorstore" in kwargs:
vectorstore = kwargs.pop("vectorstore")
else:
msg = "`vectorstore` must be present."
raise ValueError(msg)
if "combine_documents_chain" in config:
combine_documents_chain_config = config.pop("combine_documents_chain")
combine_documents_chain = load_chain_from_config(
combine_documents_chain_config,
**kwargs,
)
elif "combine_documents_chain_path" in config:
combine_documents_chain = load_chain(
config.pop("combine_documents_chain_path"),
**kwargs,
)
else:
msg = (
"One of `combine_documents_chain` or "
"`combine_documents_chain_path` must be present."
)
raise ValueError(msg)
return VectorDBQA(
combine_documents_chain=combine_documents_chain,
vectorstore=vectorstore,
**config,
)
def _load_graph_cypher_chain(config: dict, **kwargs: Any) -> GraphCypherQAChain:
if "graph" in kwargs:
graph = kwargs.pop("graph")
else:
msg = "`graph` must be present."
raise ValueError(msg)
if "cypher_generation_chain" in config:
cypher_generation_chain_config = config.pop("cypher_generation_chain")
cypher_generation_chain = load_chain_from_config(
cypher_generation_chain_config,
**kwargs,
)
else:
msg = "`cypher_generation_chain` must be present."
raise ValueError(msg)
if "qa_chain" in config:
qa_chain_config = config.pop("qa_chain")
qa_chain = load_chain_from_config(qa_chain_config, **kwargs)
else:
msg = "`qa_chain` must be present."
raise ValueError(msg)
try:
from langchain_community.chains.graph_qa.cypher import GraphCypherQAChain
except ImportError as e:
msg = (
"To use this GraphCypherQAChain functionality you must install the "
"langchain_community package. "
"You can install it with `pip install langchain_community`"
)
raise ImportError(msg) from e
return GraphCypherQAChain(
graph=graph,
cypher_generation_chain=cypher_generation_chain,
qa_chain=qa_chain,
**config,
)
def _load_api_chain(config: dict, **kwargs: Any) -> APIChain:
if "api_request_chain" in config:
api_request_chain_config = config.pop("api_request_chain")
api_request_chain = load_chain_from_config(api_request_chain_config, **kwargs)
elif "api_request_chain_path" in config:
api_request_chain = load_chain(config.pop("api_request_chain_path"))
else:
msg = "One of `api_request_chain` or `api_request_chain_path` must be present."
raise ValueError(msg)
if "api_answer_chain" in config:
api_answer_chain_config = config.pop("api_answer_chain")
api_answer_chain = load_chain_from_config(api_answer_chain_config, **kwargs)
elif "api_answer_chain_path" in config:
api_answer_chain = load_chain(config.pop("api_answer_chain_path"), **kwargs)
else:
msg = "One of `api_answer_chain` or `api_answer_chain_path` must be present."
raise ValueError(msg)
if "requests_wrapper" in kwargs:
requests_wrapper = kwargs.pop("requests_wrapper")
else:
msg = "`requests_wrapper` must be present."
raise ValueError(msg)
return APIChain(
api_request_chain=api_request_chain,
api_answer_chain=api_answer_chain,
requests_wrapper=requests_wrapper,
**config,
)
def _load_llm_requests_chain(config: dict, **kwargs: Any) -> LLMRequestsChain:
try:
from langchain_classic.chains.llm_requests import LLMRequestsChain
except ImportError as e:
msg = (
"To use this LLMRequestsChain functionality you must install the "
"langchain package. "
"You can install it with `pip install langchain`"
)
raise ImportError(msg) from e
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config, **kwargs)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"), **kwargs)
else:
msg = "One of `llm_chain` or `llm_chain_path` must be present."
raise ValueError(msg)
if "requests_wrapper" in kwargs:
requests_wrapper = kwargs.pop("requests_wrapper")
return LLMRequestsChain(
llm_chain=llm_chain,
requests_wrapper=requests_wrapper,
**config,
)
return LLMRequestsChain(llm_chain=llm_chain, **config)
type_to_loader_dict = {
"api_chain": _load_api_chain,
"hyde_chain": _load_hyde_chain,
"llm_chain": _load_llm_chain,
"llm_bash_chain": _load_llm_bash_chain,
"llm_checker_chain": _load_llm_checker_chain,
"llm_math_chain": _load_llm_math_chain,
"llm_requests_chain": _load_llm_requests_chain,
"pal_chain": _load_pal_chain,
"qa_with_sources_chain": _load_qa_with_sources_chain,
"stuff_documents_chain": _load_stuff_documents_chain,
"map_reduce_documents_chain": _load_map_reduce_documents_chain,
"reduce_documents_chain": _load_reduce_documents_chain,
"map_rerank_documents_chain": _load_map_rerank_documents_chain,
"refine_documents_chain": _load_refine_documents_chain,
"sql_database_chain": _load_sql_database_chain,
"vector_db_qa_with_sources_chain": _load_vector_db_qa_with_sources_chain,
"vector_db_qa": _load_vector_db_qa,
"retrieval_qa": _load_retrieval_qa,
"retrieval_qa_with_sources_chain": _load_retrieval_qa_with_sources_chain,
"graph_cypher_chain": _load_graph_cypher_chain,
}
@deprecated(
since="0.2.13",
message=(
"This function is deprecated and will be removed in langchain 1.0. "
"At that point chains must be imported from their respective modules."
),
removal="1.0",
)
def load_chain_from_config(config: dict, **kwargs: Any) -> Chain:
"""Load chain from Config Dict."""
if "_type" not in config:
msg = "Must specify a chain Type in config"
raise ValueError(msg)
config_type = config.pop("_type")
if config_type not in type_to_loader_dict:
msg = f"Loading {config_type} chain not supported"
raise ValueError(msg)
chain_loader = type_to_loader_dict[config_type]
return chain_loader(config, **kwargs)
@deprecated(
since="0.2.13",
message=(
"This function is deprecated and will be removed in langchain 1.0. "
"At that point chains must be imported from their respective modules."
),
removal="1.0",
)
def load_chain(path: str | Path, **kwargs: Any) -> Chain:
"""Unified method for loading a chain from LangChainHub or local fs."""
if isinstance(path, str) and path.startswith("lc://"):
msg = (
"Loading from the deprecated github-based Hub is no longer supported. "
"Please use the new LangChain Hub at https://smith.langchain.com/hub "
"instead."
)
raise RuntimeError(msg)
return _load_chain_from_file(path, **kwargs)
def _load_chain_from_file(file: str | Path, **kwargs: Any) -> Chain:
"""Load chain from file."""
# Convert file to Path object.
file_path = Path(file) if isinstance(file, str) else file
# Load from either json or yaml.
if file_path.suffix == ".json":
with file_path.open() as f:
config = json.load(f)
elif file_path.suffix.endswith((".yaml", ".yml")):
with file_path.open() as f:
config = yaml.safe_load(f)
else:
msg = "File type must be json or yaml"
raise ValueError(msg)
# Override default 'verbose' and 'memory' for the chain
if "verbose" in kwargs:
config["verbose"] = kwargs.pop("verbose")
if "memory" in kwargs:
config["memory"] = kwargs.pop("memory")
# Load the chain from the config now.
return load_chain_from_config(config, **kwargs)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/chains/loading.py",
"license": "MIT License",
"lines": 666,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langchain:libs/langchain/langchain_classic/chains/natbot/base.py | """Implement an LLM driven browser."""
from __future__ import annotations
import warnings
from typing import Any
from langchain_core._api import deprecated
from langchain_core.callbacks import CallbackManagerForChainRun
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import Runnable
from pydantic import ConfigDict, model_validator
from langchain_classic.chains.base import Chain
from langchain_classic.chains.natbot.prompt import PROMPT
@deprecated(
since="0.2.13",
message=(
"Importing NatBotChain from langchain is deprecated and will be removed in "
"langchain 1.0. Please import from langchain_community instead: "
"from langchain_community.chains.natbot import NatBotChain. "
"You may need to pip install -U langchain-community."
),
removal="1.0",
)
class NatBotChain(Chain):
"""Implement an LLM driven browser.
**Security Note**: This toolkit provides code to control a web-browser.
The web-browser can be used to navigate to:
- Any URL (including any internal network URLs)
- And local files
Exercise care if exposing this chain to end-users. Control who is able to
access and use this chain, and isolate the network access of the server
that hosts this chain.
See https://docs.langchain.com/oss/python/security-policy for more information.
Example:
```python
from langchain_classic.chains import NatBotChain
natbot = NatBotChain.from_default("Buy me a new hat.")
```
"""
llm_chain: Runnable
objective: str
"""Objective that NatBot is tasked with completing."""
llm: BaseLanguageModel | None = None
"""[Deprecated] LLM wrapper to use."""
input_url_key: str = "url"
input_browser_content_key: str = "browser_content"
previous_command: str = ""
output_key: str = "command"
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def _raise_deprecation(cls, values: dict) -> Any:
if "llm" in values:
warnings.warn(
"Directly instantiating an NatBotChain with an llm is deprecated. "
"Please instantiate with llm_chain argument or using the from_llm "
"class method.",
stacklevel=5,
)
if "llm_chain" not in values and values["llm"] is not None:
values["llm_chain"] = PROMPT | values["llm"] | StrOutputParser()
return values
@classmethod
def from_default(cls, objective: str, **kwargs: Any) -> NatBotChain:
"""Load with default LLMChain."""
msg = (
"This method is no longer implemented. Please use from_llm."
"model = OpenAI(temperature=0.5, best_of=10, n=3, max_tokens=50)"
"For example, NatBotChain.from_llm(model, objective)"
)
raise NotImplementedError(msg)
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
objective: str,
**kwargs: Any,
) -> NatBotChain:
"""Load from LLM."""
llm_chain = PROMPT | llm | StrOutputParser()
return cls(llm_chain=llm_chain, objective=objective, **kwargs)
@property
def input_keys(self) -> list[str]:
"""Expect url and browser content."""
return [self.input_url_key, self.input_browser_content_key]
@property
def output_keys(self) -> list[str]:
"""Return command."""
return [self.output_key]
def _call(
self,
inputs: dict[str, str],
run_manager: CallbackManagerForChainRun | None = None,
) -> dict[str, str]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
url = inputs[self.input_url_key]
browser_content = inputs[self.input_browser_content_key]
llm_cmd = self.llm_chain.invoke(
{
"objective": self.objective,
"url": url[:100],
"previous_command": self.previous_command,
"browser_content": browser_content[:4500],
},
config={"callbacks": _run_manager.get_child()},
)
llm_cmd = llm_cmd.strip()
self.previous_command = llm_cmd
return {self.output_key: llm_cmd}
def execute(self, url: str, browser_content: str) -> str:
"""Figure out next browser command to run.
Args:
url: URL of the site currently on.
browser_content: Content of the page as currently displayed by the browser.
Returns:
Next browser command to run.
Example:
```python
browser_content = "...."
llm_command = natbot.run("www.google.com", browser_content)
```
"""
_inputs = {
self.input_url_key: url,
self.input_browser_content_key: browser_content,
}
return self(_inputs)[self.output_key]
@property
def _chain_type(self) -> str:
return "nat_bot_chain"
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/chains/natbot/base.py",
"license": "MIT License",
"lines": 132,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/chains/openai_functions/base.py | """Methods for creating chains that use OpenAI function-calling APIs."""
from collections.abc import Callable, Sequence
from typing import (
Any,
)
from langchain_core._api import deprecated
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers import (
BaseLLMOutputParser,
)
from langchain_core.output_parsers.openai_functions import (
PydanticAttrOutputFunctionsParser,
)
from langchain_core.prompts import BasePromptTemplate
from langchain_core.utils.function_calling import (
PYTHON_TO_JSON_TYPES,
convert_to_openai_function,
)
from pydantic import BaseModel
from langchain_classic.chains import LLMChain
from langchain_classic.chains.structured_output.base import (
create_openai_fn_runnable,
create_structured_output_runnable,
get_openai_output_parser,
)
__all__ = [
"PYTHON_TO_JSON_TYPES", # backwards compatibility
"convert_to_openai_function", # backwards compatibility
"create_openai_fn_chain", # deprecated
"create_openai_fn_runnable",
"create_structured_output_chain", # deprecated
"create_structured_output_runnable", # deprecated
"get_openai_output_parser",
]
@deprecated(since="0.1.1", removal="1.0", alternative="create_openai_fn_runnable")
def create_openai_fn_chain(
functions: Sequence[dict[str, Any] | type[BaseModel] | Callable],
llm: BaseLanguageModel,
prompt: BasePromptTemplate,
*,
enforce_single_function_usage: bool = True,
output_key: str = "function",
output_parser: BaseLLMOutputParser | None = None,
**kwargs: Any,
) -> LLMChain:
"""[Legacy] Create an LLM chain that uses OpenAI functions.
Args:
functions: A sequence of either dictionaries, pydantic.BaseModels classes, or
Python functions. If dictionaries are passed in, they are assumed to
already be a valid OpenAI functions. If only a single
function is passed in, then it will be enforced that the model use that
function. pydantic.BaseModels and Python functions should have docstrings
describing what the function does. For best results, pydantic.BaseModels
should have descriptions of the parameters and Python functions should have
Google Python style args descriptions in the docstring. Additionally,
Python functions should only use primitive types (str, int, float, bool) or
pydantic.BaseModels for arguments.
llm: Language model to use, assumed to support the OpenAI function-calling API.
prompt: BasePromptTemplate to pass to the model.
enforce_single_function_usage: only used if a single function is passed in. If
True, then the model will be forced to use the given function. If `False`,
then the model will be given the option to use the given function or not.
output_key: The key to use when returning the output in LLMChain.__call__.
output_parser: BaseLLMOutputParser to use for parsing model outputs. By default
will be inferred from the function types. If pydantic.BaseModels are passed
in, then the OutputParser will try to parse outputs using those. Otherwise
model outputs will simply be parsed as JSON. If multiple functions are
passed in and they are not pydantic.BaseModels, the chain output will
include both the name of the function that was returned and the arguments
to pass to the function.
**kwargs: Additional keyword arguments to pass to LLMChain.
Returns:
An LLMChain that will pass in the given functions to the model when run.
Example:
```python
from typing import Optional
from langchain_classic.chains.openai_functions import create_openai_fn_chain
from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
from pydantic import BaseModel, Field
class RecordPerson(BaseModel):
\"\"\"Record some identifying information about a person.\"\"\"
name: str = Field(..., description="The person's name")
age: int = Field(..., description="The person's age")
fav_food: str | None = Field(None, description="The person's favorite food")
class RecordDog(BaseModel):
\"\"\"Record some identifying information about a dog.\"\"\"
name: str = Field(..., description="The dog's name")
color: str = Field(..., description="The dog's color")
fav_food: str | None = Field(None, description="The dog's favorite food")
model = ChatOpenAI(model="gpt-4", temperature=0)
prompt = ChatPromptTemplate.from_messages(
[
("system", "You are a world class algorithm for recording entities."),
("human", "Make calls to the relevant function to record the entities in the following input: {input}"),
("human", "Tip: Make sure to answer in the correct format"),
]
)
chain = create_openai_fn_chain([RecordPerson, RecordDog], model, prompt)
chain.run("Harry was a chubby brown beagle who loved chicken")
# -> RecordDog(name="Harry", color="brown", fav_food="chicken")
```
""" # noqa: E501
if not functions:
msg = "Need to pass in at least one function. Received zero."
raise ValueError(msg)
openai_functions = [convert_to_openai_function(f) for f in functions]
output_parser = output_parser or get_openai_output_parser(functions)
llm_kwargs: dict[str, Any] = {
"functions": openai_functions,
}
if len(openai_functions) == 1 and enforce_single_function_usage:
llm_kwargs["function_call"] = {"name": openai_functions[0]["name"]}
return LLMChain(
llm=llm,
prompt=prompt,
output_parser=output_parser,
llm_kwargs=llm_kwargs,
output_key=output_key,
**kwargs,
)
@deprecated(
since="0.1.1",
removal="1.0",
alternative="ChatOpenAI.with_structured_output",
)
def create_structured_output_chain(
output_schema: dict[str, Any] | type[BaseModel],
llm: BaseLanguageModel,
prompt: BasePromptTemplate,
*,
output_key: str = "function",
output_parser: BaseLLMOutputParser | None = None,
**kwargs: Any,
) -> LLMChain:
"""[Legacy] Create an LLMChain that uses an OpenAI function to get a structured output.
Args:
output_schema: Either a dictionary or pydantic.BaseModel class. If a dictionary
is passed in, it's assumed to already be a valid JsonSchema.
For best results, pydantic.BaseModels should have docstrings describing what
the schema represents and descriptions for the parameters.
llm: Language model to use, assumed to support the OpenAI function-calling API.
prompt: BasePromptTemplate to pass to the model.
output_key: The key to use when returning the output in LLMChain.__call__.
output_parser: BaseLLMOutputParser to use for parsing model outputs. By default
will be inferred from the function types. If pydantic.BaseModels are passed
in, then the OutputParser will try to parse outputs using those. Otherwise
model outputs will simply be parsed as JSON.
**kwargs: Additional keyword arguments to pass to LLMChain.
Returns:
An LLMChain that will pass the given function to the model.
Example:
```python
from typing import Optional
from langchain_classic.chains.openai_functions import create_structured_output_chain
from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
from pydantic import BaseModel, Field
class Dog(BaseModel):
\"\"\"Identifying information about a dog.\"\"\"
name: str = Field(..., description="The dog's name")
color: str = Field(..., description="The dog's color")
fav_food: str | None = Field(None, description="The dog's favorite food")
model = ChatOpenAI(model="gpt-3.5-turbo-0613", temperature=0)
prompt = ChatPromptTemplate.from_messages(
[
("system", "You are a world class algorithm for extracting information in structured formats."),
("human", "Use the given format to extract information from the following input: {input}"),
("human", "Tip: Make sure to answer in the correct format"),
]
)
chain = create_structured_output_chain(Dog, model, prompt)
chain.run("Harry was a chubby brown beagle who loved chicken")
# -> Dog(name="Harry", color="brown", fav_food="chicken")
```
""" # noqa: E501
if isinstance(output_schema, dict):
function: Any = {
"name": "output_formatter",
"description": (
"Output formatter. Should always be used to format your response to the"
" user."
),
"parameters": output_schema,
}
else:
class _OutputFormatter(BaseModel):
"""Output formatter.
Should always be used to format your response to the user.
"""
output: output_schema # type: ignore[valid-type]
function = _OutputFormatter
output_parser = output_parser or PydanticAttrOutputFunctionsParser(
pydantic_schema=_OutputFormatter,
attr_name="output",
)
return create_openai_fn_chain(
[function],
llm,
prompt,
output_key=output_key,
output_parser=output_parser,
**kwargs,
)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/chains/openai_functions/base.py",
"license": "MIT License",
"lines": 204,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
langchain-ai/langchain:libs/langchain/langchain_classic/chains/openai_functions/openapi.py | from __future__ import annotations
import json
import logging
import re
from collections import defaultdict
from collections.abc import Callable
from typing import TYPE_CHECKING, Any
import requests
from langchain_core._api import deprecated
from langchain_core.callbacks import CallbackManagerForChainRun
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers.openai_functions import JsonOutputFunctionsParser
from langchain_core.prompts import BasePromptTemplate, ChatPromptTemplate
from langchain_core.utils.input import get_colored_text
from requests import JSONDecodeError, Response
from typing_extensions import override
from langchain_classic.chains.base import Chain
from langchain_classic.chains.llm import LLMChain
from langchain_classic.chains.sequential import SequentialChain
if TYPE_CHECKING:
from langchain_community.utilities.openapi import OpenAPISpec
from openapi_pydantic import Parameter
_logger = logging.getLogger(__name__)
def _format_url(url: str, path_params: dict) -> str:
expected_path_param = re.findall(r"{(.*?)}", url)
new_params = {}
for param in expected_path_param:
clean_param = param.lstrip(".;").rstrip("*")
val = path_params[clean_param]
if isinstance(val, list):
if param[0] == ".":
sep = "." if param[-1] == "*" else ","
new_val = "." + sep.join(val)
elif param[0] == ";":
sep = f"{clean_param}=" if param[-1] == "*" else ","
new_val = f"{clean_param}=" + sep.join(val)
else:
new_val = ",".join(val)
elif isinstance(val, dict):
kv_sep = "=" if param[-1] == "*" else ","
kv_strs = [kv_sep.join((k, v)) for k, v in val.items()]
if param[0] == ".":
sep = "."
new_val = "."
elif param[0] == ";":
sep = ";"
new_val = ";"
else:
sep = ","
new_val = ""
new_val += sep.join(kv_strs)
elif param[0] == ".":
new_val = f".{val}"
elif param[0] == ";":
new_val = f";{clean_param}={val}"
else:
new_val = val
new_params[param] = new_val
return url.format(**new_params)
def _openapi_params_to_json_schema(params: list[Parameter], spec: OpenAPISpec) -> dict:
properties = {}
required = []
for p in params:
if p.param_schema:
schema = spec.get_schema(p.param_schema)
else:
media_type_schema = next(iter(p.content.values())).media_type_schema
schema = spec.get_schema(media_type_schema)
if p.description and not schema.description:
schema.description = p.description
properties[p.name] = json.loads(schema.json(exclude_none=True))
if p.required:
required.append(p.name)
return {"type": "object", "properties": properties, "required": required}
def openapi_spec_to_openai_fn(
spec: OpenAPISpec,
) -> tuple[list[dict[str, Any]], Callable]:
"""OpenAPI spec to OpenAI function JSON Schema.
Convert a valid OpenAPI spec to the JSON Schema format expected for OpenAI
functions.
Args:
spec: OpenAPI spec to convert.
Returns:
Tuple of the OpenAI functions JSON schema and a default function for executing
a request based on the OpenAI function schema.
"""
try:
from langchain_community.tools import APIOperation
except ImportError as e:
msg = (
"Could not import langchain_community.tools. "
"Please install it with `pip install langchain-community`."
)
raise ImportError(msg) from e
if not spec.paths:
return [], lambda: None
functions = []
_name_to_call_map = {}
for path in spec.paths:
path_params = {
(p.name, p.param_in): p for p in spec.get_parameters_for_path(path)
}
for method in spec.get_methods_for_path(path):
request_args = {}
op = spec.get_operation(path, method)
op_params = path_params.copy()
for param in spec.get_parameters_for_operation(op):
op_params[(param.name, param.param_in)] = param
params_by_type = defaultdict(list)
for name_loc, p in op_params.items():
params_by_type[name_loc[1]].append(p)
param_loc_to_arg_name = {
"query": "params",
"header": "headers",
"cookie": "cookies",
"path": "path_params",
}
for param_loc, arg_name in param_loc_to_arg_name.items():
if params_by_type[param_loc]:
request_args[arg_name] = _openapi_params_to_json_schema(
params_by_type[param_loc],
spec,
)
request_body = spec.get_request_body_for_operation(op)
# TODO: Support more MIME types.
if request_body and request_body.content:
media_types = {}
for media_type, media_type_object in request_body.content.items():
if media_type_object.media_type_schema:
schema = spec.get_schema(media_type_object.media_type_schema)
media_types[media_type] = json.loads(
schema.json(exclude_none=True),
)
if len(media_types) == 1:
media_type, schema_dict = next(iter(media_types.items()))
key = "json" if media_type == "application/json" else "data"
request_args[key] = schema_dict
elif len(media_types) > 1:
request_args["data"] = {"anyOf": list(media_types.values())}
api_op = APIOperation.from_openapi_spec(spec, path, method)
fn = {
"name": api_op.operation_id,
"description": api_op.description,
"parameters": {
"type": "object",
"properties": request_args,
},
}
functions.append(fn)
_name_to_call_map[fn["name"]] = {
"method": method,
"url": api_op.base_url + api_op.path,
}
def default_call_api(
name: str,
fn_args: dict,
headers: dict | None = None,
params: dict | None = None,
timeout: int | None = 30,
**kwargs: Any,
) -> Any:
method = _name_to_call_map[name]["method"]
url = _name_to_call_map[name]["url"]
path_params = fn_args.pop("path_params", {})
url = _format_url(url, path_params)
if "data" in fn_args and isinstance(fn_args["data"], dict):
fn_args["data"] = json.dumps(fn_args["data"])
_kwargs = {**fn_args, **kwargs}
if headers is not None:
if "headers" in _kwargs:
_kwargs["headers"].update(headers)
else:
_kwargs["headers"] = headers
if params is not None:
if "params" in _kwargs:
_kwargs["params"].update(params)
else:
_kwargs["params"] = params
return requests.request(method, url, **_kwargs, timeout=timeout)
return functions, default_call_api
class SimpleRequestChain(Chain):
"""Chain for making a simple request to an API endpoint."""
request_method: Callable
"""Method to use for making the request."""
output_key: str = "response"
"""Key to use for the output of the request."""
input_key: str = "function"
"""Key to use for the input of the request."""
@property
@override
def input_keys(self) -> list[str]:
return [self.input_key]
@property
@override
def output_keys(self) -> list[str]:
return [self.output_key]
def _call(
self,
inputs: dict[str, Any],
run_manager: CallbackManagerForChainRun | None = None,
) -> dict[str, Any]:
"""Run the logic of this chain and return the output."""
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
name = inputs[self.input_key].pop("name")
args = inputs[self.input_key].pop("arguments")
_pretty_name = get_colored_text(name, "green")
_pretty_args = get_colored_text(json.dumps(args, indent=2), "green")
_text = f"Calling endpoint {_pretty_name} with arguments:\n" + _pretty_args
_run_manager.on_text(_text)
api_response: Response = self.request_method(name, args)
if api_response.status_code != requests.codes.ok:
response = (
f"{api_response.status_code}: {api_response.reason}"
f"\nFor {name} "
f"Called with args: {args.get('params', '')}"
)
else:
try:
response = api_response.json()
except JSONDecodeError:
response = api_response.text
except Exception:
_logger.exception("Unexpected error parsing response as JSON")
response = api_response.text
return {self.output_key: response}
@deprecated(
since="0.2.13",
message=(
"This function is deprecated and will be removed in langchain 1.0. "
"See API reference for replacement: "
"https://api.python.langchain.com/en/latest/chains/langchain.chains.openai_functions.openapi.get_openapi_chain.html"
),
removal="1.0",
)
def get_openapi_chain(
spec: OpenAPISpec | str,
llm: BaseLanguageModel | None = None,
prompt: BasePromptTemplate | None = None,
request_chain: Chain | None = None,
llm_chain_kwargs: dict | None = None,
verbose: bool = False, # noqa: FBT001,FBT002
headers: dict | None = None,
params: dict | None = None,
**kwargs: Any,
) -> SequentialChain:
r"""Create a chain for querying an API from a OpenAPI spec.
Note: this class is deprecated. See below for a replacement implementation.
The benefits of this implementation are:
- Uses LLM tool calling features to encourage properly-formatted API requests;
- Includes async support.
```python
from typing import Any
from langchain_classic.chains.openai_functions.openapi import openapi_spec_to_openai_fn
from langchain_community.utilities.openapi import OpenAPISpec
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
# Define API spec. Can be JSON or YAML
api_spec = \"\"\"
{
"openapi": "3.1.0",
"info": {
"title": "JSONPlaceholder API",
"version": "1.0.0"
},
"servers": [
{
"url": "https://jsonplaceholder.typicode.com"
}
],
"paths": {
"/posts": {
"get": {
"summary": "Get posts",
"parameters": [
{
"name": "_limit",
"in": "query",
"required": false,
"schema": {
"type": "integer",
"example": 2
},
"description": "Limit the number of results"
}
]
}
}
}
}
\"\"\"
parsed_spec = OpenAPISpec.from_text(api_spec)
openai_fns, call_api_fn = openapi_spec_to_openai_fn(parsed_spec)
tools = [
{"type": "function", "function": fn}
for fn in openai_fns
]
prompt = ChatPromptTemplate.from_template(
"Use the provided APIs to respond to this user query:\\n\\n{query}"
)
model = ChatOpenAI(model="gpt-4o-mini", temperature=0).bind_tools(tools)
def _execute_tool(message) -> Any:
if tool_calls := message.tool_calls:
tool_call = message.tool_calls[0]
response = call_api_fn(name=tool_call["name"], fn_args=tool_call["args"])
response.raise_for_status()
return response.json()
else:
return message.content
chain = prompt | model | _execute_tool
```
```python
response = chain.invoke({"query": "Get me top two posts."})
```
Args:
spec: OpenAPISpec or url/file/text string corresponding to one.
llm: language model, should be an OpenAI function-calling model, e.g.
`ChatOpenAI(model="gpt-3.5-turbo-0613")`.
prompt: Main prompt template to use.
request_chain: Chain for taking the functions output and executing the request.
params: Request parameters.
headers: Request headers.
verbose: Whether to run the chain in verbose mode.
llm_chain_kwargs: LLM chain additional keyword arguments.
**kwargs: Additional keyword arguments to pass to the chain.
""" # noqa: E501
try:
from langchain_community.utilities.openapi import OpenAPISpec
except ImportError as e:
msg = (
"Could not import langchain_community.utilities.openapi. "
"Please install it with `pip install langchain-community`."
)
raise ImportError(msg) from e
if isinstance(spec, str):
for conversion in (
OpenAPISpec.from_url,
OpenAPISpec.from_file,
OpenAPISpec.from_text,
):
try:
spec = conversion(spec)
break
except ImportError:
raise
except Exception: # noqa: BLE001
_logger.debug(
"Parse spec failed for OpenAPISpec.%s",
conversion.__name__,
exc_info=True,
)
if isinstance(spec, str):
msg = f"Unable to parse spec from source {spec}"
raise ValueError(msg) # noqa: TRY004
openai_fns, call_api_fn = openapi_spec_to_openai_fn(spec)
if not llm:
msg = (
"Must provide an LLM for this chain.For example,\n"
"from langchain_openai import ChatOpenAI\n"
"model = ChatOpenAI()\n"
)
raise ValueError(msg)
prompt = prompt or ChatPromptTemplate.from_template(
"Use the provided API's to respond to this user query:\n\n{query}",
)
llm_chain = LLMChain(
llm=llm,
prompt=prompt,
llm_kwargs={"functions": openai_fns},
output_parser=JsonOutputFunctionsParser(args_only=False),
output_key="function",
verbose=verbose,
**(llm_chain_kwargs or {}),
)
request_chain = request_chain or SimpleRequestChain(
request_method=lambda name, args: call_api_fn(
name,
args,
headers=headers,
params=params,
),
verbose=verbose,
)
return SequentialChain(
chains=[llm_chain, request_chain],
input_variables=llm_chain.input_keys,
output_variables=["response"],
verbose=verbose,
**kwargs,
)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/chains/openai_functions/openapi.py",
"license": "MIT License",
"lines": 389,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langchain:libs/langchain/langchain_classic/chains/qa_generation/base.py | from __future__ import annotations
import json
from typing import Any
from langchain_core._api import deprecated
from langchain_core.callbacks import CallbackManagerForChainRun
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import BasePromptTemplate
from langchain_text_splitters import RecursiveCharacterTextSplitter, TextSplitter
from pydantic import Field
from typing_extensions import override
from langchain_classic.chains.base import Chain
from langchain_classic.chains.llm import LLMChain
from langchain_classic.chains.qa_generation.prompt import PROMPT_SELECTOR
@deprecated(
since="0.2.7",
alternative=(
"example in API reference with more detail: "
"https://api.python.langchain.com/en/latest/chains/langchain.chains.qa_generation.base.QAGenerationChain.html"
),
removal="1.0",
)
class QAGenerationChain(Chain):
"""Base class for question-answer generation chains.
This class is deprecated. See below for an alternative implementation.
Advantages of this implementation include:
- Supports async and streaming;
- Surfaces prompt and text splitter for easier customization;
- Use of JsonOutputParser supports JSONPatch operations in streaming mode,
as well as robustness to markdown.
```python
from langchain_classic.chains.qa_generation.prompt import (
CHAT_PROMPT as prompt,
)
# Note: import PROMPT if using a legacy non-chat model.
from langchain_core.output_parsers import JsonOutputParser
from langchain_core.runnables import (
RunnableLambda,
RunnableParallel,
RunnablePassthrough,
)
from langchain_core.runnables.base import RunnableEach
from langchain_openai import ChatOpenAI
from langchain_text_splitters import RecursiveCharacterTextSplitter
model = ChatOpenAI()
text_splitter = RecursiveCharacterTextSplitter(chunk_overlap=500)
split_text = RunnableLambda(lambda x: text_splitter.create_documents([x]))
chain = RunnableParallel(
text=RunnablePassthrough(),
questions=(
split_text | RunnableEach(bound=prompt | model | JsonOutputParser())
),
)
```
"""
llm_chain: LLMChain
"""LLM Chain that generates responses from user input and context."""
text_splitter: TextSplitter = Field(
default=RecursiveCharacterTextSplitter(chunk_overlap=500),
)
"""Text splitter that splits the input into chunks."""
input_key: str = "text"
"""Key of the input to the chain."""
output_key: str = "questions"
"""Key of the output of the chain."""
k: int | None = None
"""Number of questions to generate."""
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
prompt: BasePromptTemplate | None = None,
**kwargs: Any,
) -> QAGenerationChain:
"""Create a QAGenerationChain from a language model.
Args:
llm: a language model
prompt: a prompt template
**kwargs: additional arguments
Returns:
a QAGenerationChain class
"""
_prompt = prompt or PROMPT_SELECTOR.get_prompt(llm)
chain = LLMChain(llm=llm, prompt=_prompt)
return cls(llm_chain=chain, **kwargs)
@property
def _chain_type(self) -> str:
raise NotImplementedError
@property
@override
def input_keys(self) -> list[str]:
return [self.input_key]
@property
@override
def output_keys(self) -> list[str]:
return [self.output_key]
def _call(
self,
inputs: dict[str, Any],
run_manager: CallbackManagerForChainRun | None = None,
) -> dict[str, list]:
docs = self.text_splitter.create_documents([inputs[self.input_key]])
results = self.llm_chain.generate(
[{"text": d.page_content} for d in docs],
run_manager=run_manager,
)
qa = [json.loads(res[0].text) for res in results.generations]
return {self.output_key: qa}
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/chains/qa_generation/base.py",
"license": "MIT License",
"lines": 107,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
langchain-ai/langchain:libs/langchain/langchain_classic/chains/qa_generation/prompt.py | from langchain_core.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
from langchain_core.prompts.prompt import PromptTemplate
from langchain_classic.chains.prompt_selector import (
ConditionalPromptSelector,
is_chat_model,
)
templ1 = """You are a smart assistant designed to help high school teachers come up with reading comprehension questions.
Given a piece of text, you must come up with a question and answer pair that can be used to test a student's reading comprehension abilities.
When coming up with this question/answer pair, you must respond in the following format:
```
{{
"question": "$YOUR_QUESTION_HERE",
"answer": "$THE_ANSWER_HERE"
}}
```
Everything between the ``` must be valid json.
""" # noqa: E501
templ2 = """Please come up with a question/answer pair, in the specified JSON format, for the following text:
----------------
{text}""" # noqa: E501
CHAT_PROMPT = ChatPromptTemplate.from_messages(
[
SystemMessagePromptTemplate.from_template(templ1),
HumanMessagePromptTemplate.from_template(templ2),
]
)
templ = """You are a smart assistant designed to help high school teachers come up with reading comprehension questions.
Given a piece of text, you must come up with a question and answer pair that can be used to test a student's reading comprehension abilities.
When coming up with this question/answer pair, you must respond in the following format:
```
{{
"question": "$YOUR_QUESTION_HERE",
"answer": "$THE_ANSWER_HERE"
}}
```
Everything between the ``` must be valid json.
Please come up with a question/answer pair, in the specified JSON format, for the following text:
----------------
{text}""" # noqa: E501
PROMPT = PromptTemplate.from_template(templ)
PROMPT_SELECTOR = ConditionalPromptSelector(
default_prompt=PROMPT, conditionals=[(is_chat_model, CHAT_PROMPT)]
)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/chains/qa_generation/prompt.py",
"license": "MIT License",
"lines": 47,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
langchain-ai/langchain:libs/langchain/langchain_classic/chains/qa_with_sources/base.py | """Question answering with sources over documents."""
from __future__ import annotations
import inspect
import re
from abc import ABC, abstractmethod
from typing import Any
from langchain_core._api import deprecated
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain_core.documents import Document
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import BasePromptTemplate
from pydantic import ConfigDict, model_validator
from typing_extensions import override
from langchain_classic.chains import ReduceDocumentsChain
from langchain_classic.chains.base import Chain
from langchain_classic.chains.combine_documents.base import BaseCombineDocumentsChain
from langchain_classic.chains.combine_documents.map_reduce import (
MapReduceDocumentsChain,
)
from langchain_classic.chains.combine_documents.stuff import StuffDocumentsChain
from langchain_classic.chains.llm import LLMChain
from langchain_classic.chains.qa_with_sources.loading import load_qa_with_sources_chain
from langchain_classic.chains.qa_with_sources.map_reduce_prompt import (
COMBINE_PROMPT,
EXAMPLE_PROMPT,
QUESTION_PROMPT,
)
@deprecated(
since="0.2.13",
removal="1.0",
message=(
"This class is deprecated. Refer to this guide on retrieval and question "
"answering with sources: "
"https://python.langchain.com/docs/how_to/qa_sources/"
),
)
class BaseQAWithSourcesChain(Chain, ABC):
"""Question answering chain with sources over documents."""
combine_documents_chain: BaseCombineDocumentsChain
"""Chain to use to combine documents."""
question_key: str = "question"
input_docs_key: str = "docs"
answer_key: str = "answer"
sources_answer_key: str = "sources"
return_source_documents: bool = False
"""Return the source documents."""
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
document_prompt: BasePromptTemplate = EXAMPLE_PROMPT,
question_prompt: BasePromptTemplate = QUESTION_PROMPT,
combine_prompt: BasePromptTemplate = COMBINE_PROMPT,
**kwargs: Any,
) -> BaseQAWithSourcesChain:
"""Construct the chain from an LLM."""
llm_question_chain = LLMChain(llm=llm, prompt=question_prompt)
llm_combine_chain = LLMChain(llm=llm, prompt=combine_prompt)
combine_results_chain = StuffDocumentsChain(
llm_chain=llm_combine_chain,
document_prompt=document_prompt,
document_variable_name="summaries",
)
reduce_documents_chain = ReduceDocumentsChain(
combine_documents_chain=combine_results_chain,
)
combine_documents_chain = MapReduceDocumentsChain(
llm_chain=llm_question_chain,
reduce_documents_chain=reduce_documents_chain,
document_variable_name="context",
)
return cls(
combine_documents_chain=combine_documents_chain,
**kwargs,
)
@classmethod
def from_chain_type(
cls,
llm: BaseLanguageModel,
chain_type: str = "stuff",
chain_type_kwargs: dict | None = None,
**kwargs: Any,
) -> BaseQAWithSourcesChain:
"""Load chain from chain type."""
_chain_kwargs = chain_type_kwargs or {}
combine_documents_chain = load_qa_with_sources_chain(
llm,
chain_type=chain_type,
**_chain_kwargs,
)
return cls(combine_documents_chain=combine_documents_chain, **kwargs)
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
@property
def input_keys(self) -> list[str]:
"""Expect input key."""
return [self.question_key]
@property
def output_keys(self) -> list[str]:
"""Return output key."""
_output_keys = [self.answer_key, self.sources_answer_key]
if self.return_source_documents:
_output_keys = [*_output_keys, "source_documents"]
return _output_keys
@model_validator(mode="before")
@classmethod
def validate_naming(cls, values: dict) -> Any:
"""Fix backwards compatibility in naming."""
if "combine_document_chain" in values:
values["combine_documents_chain"] = values.pop("combine_document_chain")
return values
def _split_sources(self, answer: str) -> tuple[str, str]:
"""Split sources from answer."""
if re.search(r"SOURCES?:", answer, re.IGNORECASE):
answer, sources = re.split(
r"SOURCES?:|QUESTION:\s",
answer,
flags=re.IGNORECASE,
)[:2]
sources = re.split(r"\n", sources)[0].strip()
else:
sources = ""
return answer, sources
@abstractmethod
def _get_docs(
self,
inputs: dict[str, Any],
*,
run_manager: CallbackManagerForChainRun,
) -> list[Document]:
"""Get docs to run questioning over."""
def _call(
self,
inputs: dict[str, Any],
run_manager: CallbackManagerForChainRun | None = None,
) -> dict[str, str]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
accepts_run_manager = (
"run_manager" in inspect.signature(self._get_docs).parameters
)
if accepts_run_manager:
docs = self._get_docs(inputs, run_manager=_run_manager)
else:
docs = self._get_docs(inputs) # type: ignore[call-arg]
answer = self.combine_documents_chain.run(
input_documents=docs,
callbacks=_run_manager.get_child(),
**inputs,
)
answer, sources = self._split_sources(answer)
result: dict[str, Any] = {
self.answer_key: answer,
self.sources_answer_key: sources,
}
if self.return_source_documents:
result["source_documents"] = docs
return result
@abstractmethod
async def _aget_docs(
self,
inputs: dict[str, Any],
*,
run_manager: AsyncCallbackManagerForChainRun,
) -> list[Document]:
"""Get docs to run questioning over."""
async def _acall(
self,
inputs: dict[str, Any],
run_manager: AsyncCallbackManagerForChainRun | None = None,
) -> dict[str, Any]:
_run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
accepts_run_manager = (
"run_manager" in inspect.signature(self._aget_docs).parameters
)
if accepts_run_manager:
docs = await self._aget_docs(inputs, run_manager=_run_manager)
else:
docs = await self._aget_docs(inputs) # type: ignore[call-arg]
answer = await self.combine_documents_chain.arun(
input_documents=docs,
callbacks=_run_manager.get_child(),
**inputs,
)
answer, sources = self._split_sources(answer)
result: dict[str, Any] = {
self.answer_key: answer,
self.sources_answer_key: sources,
}
if self.return_source_documents:
result["source_documents"] = docs
return result
@deprecated(
since="0.2.13",
removal="1.0",
message=(
"This class is deprecated. Refer to this guide on retrieval and question "
"answering with sources: "
"https://python.langchain.com/docs/how_to/qa_sources/"
),
)
class QAWithSourcesChain(BaseQAWithSourcesChain):
"""Question answering with sources over documents."""
input_docs_key: str = "docs"
@property
def input_keys(self) -> list[str]:
"""Expect input key."""
return [self.input_docs_key, self.question_key]
@override
def _get_docs(
self,
inputs: dict[str, Any],
*,
run_manager: CallbackManagerForChainRun,
) -> list[Document]:
"""Get docs to run questioning over."""
return inputs.pop(self.input_docs_key)
@override
async def _aget_docs(
self,
inputs: dict[str, Any],
*,
run_manager: AsyncCallbackManagerForChainRun,
) -> list[Document]:
"""Get docs to run questioning over."""
return inputs.pop(self.input_docs_key)
@property
def _chain_type(self) -> str:
return "qa_with_sources_chain"
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/chains/qa_with_sources/base.py",
"license": "MIT License",
"lines": 233,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/chains/qa_with_sources/loading.py | """Load question answering with sources chains."""
from __future__ import annotations
from collections.abc import Mapping
from typing import Any, Protocol
from langchain_core._api import deprecated
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import BasePromptTemplate
from langchain_classic.chains.combine_documents.base import BaseCombineDocumentsChain
from langchain_classic.chains.combine_documents.map_reduce import (
MapReduceDocumentsChain,
)
from langchain_classic.chains.combine_documents.map_rerank import (
MapRerankDocumentsChain,
)
from langchain_classic.chains.combine_documents.reduce import ReduceDocumentsChain
from langchain_classic.chains.combine_documents.refine import RefineDocumentsChain
from langchain_classic.chains.combine_documents.stuff import StuffDocumentsChain
from langchain_classic.chains.llm import LLMChain
from langchain_classic.chains.qa_with_sources import (
map_reduce_prompt,
refine_prompts,
stuff_prompt,
)
from langchain_classic.chains.question_answering.map_rerank_prompt import (
PROMPT as MAP_RERANK_PROMPT,
)
class LoadingCallable(Protocol):
"""Interface for loading the combine documents chain."""
def __call__(
self,
llm: BaseLanguageModel,
**kwargs: Any,
) -> BaseCombineDocumentsChain:
"""Callable to load the combine documents chain."""
def _load_map_rerank_chain(
llm: BaseLanguageModel,
*,
prompt: BasePromptTemplate = MAP_RERANK_PROMPT,
verbose: bool = False,
document_variable_name: str = "context",
rank_key: str = "score",
answer_key: str = "answer",
**kwargs: Any,
) -> MapRerankDocumentsChain:
llm_chain = LLMChain(llm=llm, prompt=prompt, verbose=verbose)
return MapRerankDocumentsChain(
llm_chain=llm_chain,
rank_key=rank_key,
answer_key=answer_key,
document_variable_name=document_variable_name,
**kwargs,
)
def _load_stuff_chain(
llm: BaseLanguageModel,
*,
prompt: BasePromptTemplate = stuff_prompt.PROMPT,
document_prompt: BasePromptTemplate = stuff_prompt.EXAMPLE_PROMPT,
document_variable_name: str = "summaries",
verbose: bool | None = None,
**kwargs: Any,
) -> StuffDocumentsChain:
llm_chain = LLMChain(llm=llm, prompt=prompt, verbose=verbose)
return StuffDocumentsChain(
llm_chain=llm_chain,
document_variable_name=document_variable_name,
document_prompt=document_prompt,
verbose=verbose,
**kwargs,
)
def _load_map_reduce_chain(
llm: BaseLanguageModel,
*,
question_prompt: BasePromptTemplate = map_reduce_prompt.QUESTION_PROMPT,
combine_prompt: BasePromptTemplate = map_reduce_prompt.COMBINE_PROMPT,
document_prompt: BasePromptTemplate = map_reduce_prompt.EXAMPLE_PROMPT,
combine_document_variable_name: str = "summaries",
map_reduce_document_variable_name: str = "context",
collapse_prompt: BasePromptTemplate | None = None,
reduce_llm: BaseLanguageModel | None = None,
collapse_llm: BaseLanguageModel | None = None,
verbose: bool | None = None,
token_max: int = 3000,
**kwargs: Any,
) -> MapReduceDocumentsChain:
map_chain = LLMChain(llm=llm, prompt=question_prompt, verbose=verbose)
_reduce_llm = reduce_llm or llm
reduce_chain = LLMChain(llm=_reduce_llm, prompt=combine_prompt, verbose=verbose)
combine_documents_chain = StuffDocumentsChain(
llm_chain=reduce_chain,
document_variable_name=combine_document_variable_name,
document_prompt=document_prompt,
verbose=verbose,
)
if collapse_prompt is None:
collapse_chain = None
if collapse_llm is not None:
msg = (
"collapse_llm provided, but collapse_prompt was not: please "
"provide one or stop providing collapse_llm."
)
raise ValueError(msg)
else:
_collapse_llm = collapse_llm or llm
collapse_chain = StuffDocumentsChain(
llm_chain=LLMChain(
llm=_collapse_llm,
prompt=collapse_prompt,
verbose=verbose,
),
document_variable_name=combine_document_variable_name,
document_prompt=document_prompt,
)
reduce_documents_chain = ReduceDocumentsChain(
combine_documents_chain=combine_documents_chain,
collapse_documents_chain=collapse_chain,
token_max=token_max,
verbose=verbose,
)
return MapReduceDocumentsChain(
llm_chain=map_chain,
reduce_documents_chain=reduce_documents_chain,
document_variable_name=map_reduce_document_variable_name,
verbose=verbose,
**kwargs,
)
def _load_refine_chain(
llm: BaseLanguageModel,
*,
question_prompt: BasePromptTemplate = refine_prompts.DEFAULT_TEXT_QA_PROMPT,
refine_prompt: BasePromptTemplate = refine_prompts.DEFAULT_REFINE_PROMPT,
document_prompt: BasePromptTemplate = refine_prompts.EXAMPLE_PROMPT,
document_variable_name: str = "context_str",
initial_response_name: str = "existing_answer",
refine_llm: BaseLanguageModel | None = None,
verbose: bool | None = None,
**kwargs: Any,
) -> RefineDocumentsChain:
initial_chain = LLMChain(llm=llm, prompt=question_prompt, verbose=verbose)
_refine_llm = refine_llm or llm
refine_chain = LLMChain(llm=_refine_llm, prompt=refine_prompt, verbose=verbose)
return RefineDocumentsChain(
initial_llm_chain=initial_chain,
refine_llm_chain=refine_chain,
document_variable_name=document_variable_name,
initial_response_name=initial_response_name,
document_prompt=document_prompt,
verbose=verbose,
**kwargs,
)
@deprecated(
since="0.2.13",
removal="1.0",
message=(
"This function is deprecated. Refer to this guide on retrieval and question "
"answering with sources: "
"https://python.langchain.com/docs/how_to/qa_sources/"
"\nSee also the following migration guides for replacements "
"based on `chain_type`:\n"
"stuff: https://python.langchain.com/docs/versions/migrating_chains/stuff_docs_chain\n"
"map_reduce: https://python.langchain.com/docs/versions/migrating_chains/map_reduce_chain\n"
"refine: https://python.langchain.com/docs/versions/migrating_chains/refine_chain\n"
"map_rerank: https://python.langchain.com/docs/versions/migrating_chains/map_rerank_docs_chain\n"
),
)
def load_qa_with_sources_chain(
llm: BaseLanguageModel,
chain_type: str = "stuff",
verbose: bool | None = None, # noqa: FBT001
**kwargs: Any,
) -> BaseCombineDocumentsChain:
"""Load a question answering with sources chain.
Args:
llm: Language Model to use in the chain.
chain_type: Type of document combining chain to use. Should be one of "stuff",
"map_reduce", "refine" and "map_rerank".
verbose: Whether chains should be run in verbose mode or not. Note that this
applies to all chains that make up the final chain.
**kwargs: Additional keyword arguments.
Returns:
A chain to use for question answering with sources.
"""
loader_mapping: Mapping[str, LoadingCallable] = {
"stuff": _load_stuff_chain,
"map_reduce": _load_map_reduce_chain,
"refine": _load_refine_chain,
"map_rerank": _load_map_rerank_chain,
}
if chain_type not in loader_mapping:
msg = (
f"Got unsupported chain type: {chain_type}. "
f"Should be one of {loader_mapping.keys()}"
)
raise ValueError(msg)
_func: LoadingCallable = loader_mapping[chain_type]
return _func(llm, verbose=verbose, **kwargs)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/chains/qa_with_sources/loading.py",
"license": "MIT License",
"lines": 195,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/chains/qa_with_sources/retrieval.py | """Question-answering with sources over an index."""
from typing import Any
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from pydantic import Field
from langchain_classic.chains.combine_documents.stuff import StuffDocumentsChain
from langchain_classic.chains.qa_with_sources.base import BaseQAWithSourcesChain
class RetrievalQAWithSourcesChain(BaseQAWithSourcesChain):
"""Question-answering with sources over an index."""
retriever: BaseRetriever = Field(exclude=True)
"""Index to connect to."""
reduce_k_below_max_tokens: bool = False
"""Reduce the number of results to return from store based on tokens limit"""
max_tokens_limit: int = 3375
"""Restrict the docs to return from store based on tokens,
enforced only for StuffDocumentChain and if reduce_k_below_max_tokens is to true"""
def _reduce_tokens_below_limit(self, docs: list[Document]) -> list[Document]:
num_docs = len(docs)
if self.reduce_k_below_max_tokens and isinstance(
self.combine_documents_chain,
StuffDocumentsChain,
):
tokens = [
self.combine_documents_chain.llm_chain._get_num_tokens(doc.page_content) # noqa: SLF001
for doc in docs
]
token_count = sum(tokens[:num_docs])
while token_count > self.max_tokens_limit:
num_docs -= 1
token_count -= tokens[num_docs]
return docs[:num_docs]
def _get_docs(
self,
inputs: dict[str, Any],
*,
run_manager: CallbackManagerForChainRun,
) -> list[Document]:
question = inputs[self.question_key]
docs = self.retriever.invoke(
question,
config={"callbacks": run_manager.get_child()},
)
return self._reduce_tokens_below_limit(docs)
async def _aget_docs(
self,
inputs: dict[str, Any],
*,
run_manager: AsyncCallbackManagerForChainRun,
) -> list[Document]:
question = inputs[self.question_key]
docs = await self.retriever.ainvoke(
question,
config={"callbacks": run_manager.get_child()},
)
return self._reduce_tokens_below_limit(docs)
@property
def _chain_type(self) -> str:
"""Return the chain type."""
return "retrieval_qa_with_sources_chain"
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/chains/qa_with_sources/retrieval.py",
"license": "MIT License",
"lines": 63,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/chains/query_constructor/base.py | """LLM Chain for turning a user text query into a structured query."""
from __future__ import annotations
import json
from collections.abc import Callable, Sequence
from typing import Any, cast
from langchain_core._api import deprecated
from langchain_core.exceptions import OutputParserException
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers import BaseOutputParser
from langchain_core.output_parsers.json import parse_and_check_json_markdown
from langchain_core.prompts import BasePromptTemplate
from langchain_core.prompts.few_shot import FewShotPromptTemplate
from langchain_core.runnables import Runnable
from langchain_core.structured_query import (
Comparator,
Comparison,
FilterDirective,
Operation,
Operator,
StructuredQuery,
)
from typing_extensions import override
from langchain_classic.chains.llm import LLMChain
from langchain_classic.chains.query_constructor.parser import get_parser
from langchain_classic.chains.query_constructor.prompt import (
DEFAULT_EXAMPLES,
DEFAULT_PREFIX,
DEFAULT_SCHEMA_PROMPT,
DEFAULT_SUFFIX,
EXAMPLE_PROMPT,
EXAMPLES_WITH_LIMIT,
PREFIX_WITH_DATA_SOURCE,
SCHEMA_WITH_LIMIT_PROMPT,
SUFFIX_WITHOUT_DATA_SOURCE,
USER_SPECIFIED_EXAMPLE_PROMPT,
)
from langchain_classic.chains.query_constructor.schema import AttributeInfo
class StructuredQueryOutputParser(BaseOutputParser[StructuredQuery]):
"""Output parser that parses a structured query."""
ast_parse: Callable
"""Callable that parses dict into internal representation of query language."""
@override
def parse(self, text: str) -> StructuredQuery:
try:
expected_keys = ["query", "filter"]
allowed_keys = ["query", "filter", "limit"]
parsed = parse_and_check_json_markdown(text, expected_keys)
if parsed["query"] is None or len(parsed["query"]) == 0:
parsed["query"] = " "
if parsed["filter"] == "NO_FILTER" or not parsed["filter"]:
parsed["filter"] = None
else:
parsed["filter"] = self.ast_parse(parsed["filter"])
if not parsed.get("limit"):
parsed.pop("limit", None)
return StructuredQuery(
**{k: v for k, v in parsed.items() if k in allowed_keys},
)
except Exception as e:
msg = f"Parsing text\n{text}\n raised following error:\n{e}"
raise OutputParserException(msg) from e
@classmethod
def from_components(
cls,
allowed_comparators: Sequence[Comparator] | None = None,
allowed_operators: Sequence[Operator] | None = None,
allowed_attributes: Sequence[str] | None = None,
fix_invalid: bool = False, # noqa: FBT001,FBT002
) -> StructuredQueryOutputParser:
"""Create a structured query output parser from components.
Args:
allowed_comparators: allowed comparators
allowed_operators: allowed operators
allowed_attributes: allowed attributes
fix_invalid: whether to fix invalid filter directives
Returns:
a structured query output parser
"""
ast_parse: Callable
if fix_invalid:
def ast_parse(raw_filter: str) -> FilterDirective | None:
filter_directive = cast(
"FilterDirective | None",
get_parser().parse(raw_filter),
)
return fix_filter_directive(
filter_directive,
allowed_comparators=allowed_comparators,
allowed_operators=allowed_operators,
allowed_attributes=allowed_attributes,
)
else:
ast_parse = get_parser(
allowed_comparators=allowed_comparators,
allowed_operators=allowed_operators,
allowed_attributes=allowed_attributes,
).parse
return cls(ast_parse=ast_parse)
def fix_filter_directive(
filter: FilterDirective | None, # noqa: A002
*,
allowed_comparators: Sequence[Comparator] | None = None,
allowed_operators: Sequence[Operator] | None = None,
allowed_attributes: Sequence[str] | None = None,
) -> FilterDirective | None:
"""Fix invalid filter directive.
Args:
filter: Filter directive to fix.
allowed_comparators: allowed comparators. Defaults to all comparators.
allowed_operators: allowed operators. Defaults to all operators.
allowed_attributes: allowed attributes. Defaults to all attributes.
Returns:
Fixed filter directive.
"""
if (
not (allowed_comparators or allowed_operators or allowed_attributes)
) or not filter:
return filter
if isinstance(filter, Comparison):
if allowed_comparators and filter.comparator not in allowed_comparators:
return None
if allowed_attributes and filter.attribute not in allowed_attributes:
return None
return filter
if isinstance(filter, Operation):
if allowed_operators and filter.operator not in allowed_operators:
return None
args = [
cast(
"FilterDirective",
fix_filter_directive(
arg,
allowed_comparators=allowed_comparators,
allowed_operators=allowed_operators,
allowed_attributes=allowed_attributes,
),
)
for arg in filter.arguments
if arg is not None
]
if not args:
return None
if len(args) == 1 and filter.operator in (Operator.AND, Operator.OR):
return args[0]
return Operation(
operator=filter.operator,
arguments=args,
)
return filter
def _format_attribute_info(info: Sequence[AttributeInfo | dict]) -> str:
info_dicts = {}
for i in info:
i_dict = dict(i)
info_dicts[i_dict.pop("name")] = i_dict
return json.dumps(info_dicts, indent=4).replace("{", "{{").replace("}", "}}")
def construct_examples(input_output_pairs: Sequence[tuple[str, dict]]) -> list[dict]:
"""Construct examples from input-output pairs.
Args:
input_output_pairs: Sequence of input-output pairs.
Returns:
List of examples.
"""
examples = []
for i, (_input, output) in enumerate(input_output_pairs):
structured_request = (
json.dumps(output, indent=4).replace("{", "{{").replace("}", "}}")
)
example = {
"i": i + 1,
"user_query": _input,
"structured_request": structured_request,
}
examples.append(example)
return examples
def get_query_constructor_prompt(
document_contents: str,
attribute_info: Sequence[AttributeInfo | dict],
*,
examples: Sequence | None = None,
allowed_comparators: Sequence[Comparator] = tuple(Comparator),
allowed_operators: Sequence[Operator] = tuple(Operator),
enable_limit: bool = False,
schema_prompt: BasePromptTemplate | None = None,
**kwargs: Any,
) -> BasePromptTemplate:
"""Create query construction prompt.
Args:
document_contents: The contents of the document to be queried.
attribute_info: A list of AttributeInfo objects describing
the attributes of the document.
examples: Optional list of examples to use for the chain.
allowed_comparators: Sequence of allowed comparators.
allowed_operators: Sequence of allowed operators.
enable_limit: Whether to enable the limit operator.
schema_prompt: Prompt for describing query schema. Should have string input
variables allowed_comparators and allowed_operators.
kwargs: Additional named params to pass to FewShotPromptTemplate init.
Returns:
A prompt template that can be used to construct queries.
"""
default_schema_prompt = (
SCHEMA_WITH_LIMIT_PROMPT if enable_limit else DEFAULT_SCHEMA_PROMPT
)
schema_prompt = schema_prompt or default_schema_prompt
attribute_str = _format_attribute_info(attribute_info)
schema = schema_prompt.format(
allowed_comparators=" | ".join(allowed_comparators),
allowed_operators=" | ".join(allowed_operators),
)
if examples and isinstance(examples[0], tuple):
examples = construct_examples(examples)
example_prompt = USER_SPECIFIED_EXAMPLE_PROMPT
prefix = PREFIX_WITH_DATA_SOURCE.format(
schema=schema,
content=document_contents,
attributes=attribute_str,
)
suffix = SUFFIX_WITHOUT_DATA_SOURCE.format(i=len(examples) + 1)
else:
examples = examples or (
EXAMPLES_WITH_LIMIT if enable_limit else DEFAULT_EXAMPLES
)
example_prompt = EXAMPLE_PROMPT
prefix = DEFAULT_PREFIX.format(schema=schema)
suffix = DEFAULT_SUFFIX.format(
i=len(examples) + 1,
content=document_contents,
attributes=attribute_str,
)
return FewShotPromptTemplate(
examples=list(examples),
example_prompt=example_prompt,
input_variables=["query"],
suffix=suffix,
prefix=prefix,
**kwargs,
)
@deprecated(
since="0.2.13",
alternative="load_query_constructor_runnable",
removal="1.0",
)
def load_query_constructor_chain(
llm: BaseLanguageModel,
document_contents: str,
attribute_info: Sequence[AttributeInfo | dict],
examples: list | None = None,
allowed_comparators: Sequence[Comparator] = tuple(Comparator),
allowed_operators: Sequence[Operator] = tuple(Operator),
enable_limit: bool = False, # noqa: FBT001,FBT002
schema_prompt: BasePromptTemplate | None = None,
**kwargs: Any,
) -> LLMChain:
"""Load a query constructor chain.
Args:
llm: BaseLanguageModel to use for the chain.
document_contents: The contents of the document to be queried.
attribute_info: Sequence of attributes in the document.
examples: Optional list of examples to use for the chain.
allowed_comparators: Sequence of allowed comparators. Defaults to all
`Comparator` objects.
allowed_operators: Sequence of allowed operators. Defaults to all `Operator`
objects.
enable_limit: Whether to enable the limit operator.
schema_prompt: Prompt for describing query schema. Should have string input
variables allowed_comparators and allowed_operators.
**kwargs: Arbitrary named params to pass to LLMChain.
Returns:
A LLMChain that can be used to construct queries.
"""
prompt = get_query_constructor_prompt(
document_contents,
attribute_info,
examples=examples,
allowed_comparators=allowed_comparators,
allowed_operators=allowed_operators,
enable_limit=enable_limit,
schema_prompt=schema_prompt,
)
allowed_attributes = [
ainfo.name if isinstance(ainfo, AttributeInfo) else ainfo["name"]
for ainfo in attribute_info
]
output_parser = StructuredQueryOutputParser.from_components(
allowed_comparators=allowed_comparators,
allowed_operators=allowed_operators,
allowed_attributes=allowed_attributes,
)
# For backwards compatibility.
prompt.output_parser = output_parser
return LLMChain(llm=llm, prompt=prompt, output_parser=output_parser, **kwargs)
def load_query_constructor_runnable(
llm: BaseLanguageModel,
document_contents: str,
attribute_info: Sequence[AttributeInfo | dict],
*,
examples: Sequence | None = None,
allowed_comparators: Sequence[Comparator] = tuple(Comparator),
allowed_operators: Sequence[Operator] = tuple(Operator),
enable_limit: bool = False,
schema_prompt: BasePromptTemplate | None = None,
fix_invalid: bool = False,
**kwargs: Any,
) -> Runnable:
"""Load a query constructor runnable chain.
Args:
llm: BaseLanguageModel to use for the chain.
document_contents: Description of the page contents of the document to be
queried.
attribute_info: Sequence of attributes in the document.
examples: Optional list of examples to use for the chain.
allowed_comparators: Sequence of allowed comparators. Defaults to all
`Comparator` objects.
allowed_operators: Sequence of allowed operators. Defaults to all `Operator`
objects.
enable_limit: Whether to enable the limit operator.
schema_prompt: Prompt for describing query schema. Should have string input
variables allowed_comparators and allowed_operators.
fix_invalid: Whether to fix invalid filter directives by ignoring invalid
operators, comparators and attributes.
kwargs: Additional named params to pass to FewShotPromptTemplate init.
Returns:
A Runnable that can be used to construct queries.
"""
prompt = get_query_constructor_prompt(
document_contents,
attribute_info,
examples=examples,
allowed_comparators=allowed_comparators,
allowed_operators=allowed_operators,
enable_limit=enable_limit,
schema_prompt=schema_prompt,
**kwargs,
)
allowed_attributes = [
ainfo.name if isinstance(ainfo, AttributeInfo) else ainfo["name"]
for ainfo in attribute_info
]
output_parser = StructuredQueryOutputParser.from_components(
allowed_comparators=allowed_comparators,
allowed_operators=allowed_operators,
allowed_attributes=allowed_attributes,
fix_invalid=fix_invalid,
)
return prompt | llm | output_parser
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/chains/query_constructor/base.py",
"license": "MIT License",
"lines": 345,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langchain:libs/langchain/langchain_classic/chains/question_answering/chain.py | """Load question answering chains."""
from collections.abc import Mapping
from typing import Any, Protocol
from langchain_core._api import deprecated
from langchain_core.callbacks import BaseCallbackManager, Callbacks
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import BasePromptTemplate
from langchain_classic.chains import ReduceDocumentsChain
from langchain_classic.chains.combine_documents.base import BaseCombineDocumentsChain
from langchain_classic.chains.combine_documents.map_reduce import (
MapReduceDocumentsChain,
)
from langchain_classic.chains.combine_documents.map_rerank import (
MapRerankDocumentsChain,
)
from langchain_classic.chains.combine_documents.refine import RefineDocumentsChain
from langchain_classic.chains.combine_documents.stuff import StuffDocumentsChain
from langchain_classic.chains.llm import LLMChain
from langchain_classic.chains.question_answering import (
map_reduce_prompt,
refine_prompts,
stuff_prompt,
)
from langchain_classic.chains.question_answering.map_rerank_prompt import (
PROMPT as MAP_RERANK_PROMPT,
)
class LoadingCallable(Protocol):
"""Interface for loading the combine documents chain."""
def __call__(
self,
llm: BaseLanguageModel,
**kwargs: Any,
) -> BaseCombineDocumentsChain:
"""Callable to load the combine documents chain."""
def _load_map_rerank_chain(
llm: BaseLanguageModel,
*,
prompt: BasePromptTemplate = MAP_RERANK_PROMPT,
verbose: bool = False,
document_variable_name: str = "context",
rank_key: str = "score",
answer_key: str = "answer",
callback_manager: BaseCallbackManager | None = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> MapRerankDocumentsChain:
llm_chain = LLMChain(
llm=llm,
prompt=prompt,
verbose=verbose,
callback_manager=callback_manager,
callbacks=callbacks,
)
return MapRerankDocumentsChain(
llm_chain=llm_chain,
rank_key=rank_key,
answer_key=answer_key,
document_variable_name=document_variable_name,
verbose=verbose,
callback_manager=callback_manager,
**kwargs,
)
def _load_stuff_chain(
llm: BaseLanguageModel,
*,
prompt: BasePromptTemplate | None = None,
document_variable_name: str = "context",
verbose: bool | None = None,
callback_manager: BaseCallbackManager | None = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> StuffDocumentsChain:
_prompt = prompt or stuff_prompt.PROMPT_SELECTOR.get_prompt(llm)
llm_chain = LLMChain(
llm=llm,
prompt=_prompt,
verbose=verbose,
callback_manager=callback_manager,
callbacks=callbacks,
)
# TODO: document prompt
return StuffDocumentsChain(
llm_chain=llm_chain,
document_variable_name=document_variable_name,
verbose=verbose,
callback_manager=callback_manager,
callbacks=callbacks,
**kwargs,
)
def _load_map_reduce_chain(
llm: BaseLanguageModel,
*,
question_prompt: BasePromptTemplate | None = None,
combine_prompt: BasePromptTemplate | None = None,
combine_document_variable_name: str = "summaries",
map_reduce_document_variable_name: str = "context",
collapse_prompt: BasePromptTemplate | None = None,
reduce_llm: BaseLanguageModel | None = None,
collapse_llm: BaseLanguageModel | None = None,
verbose: bool | None = None,
callback_manager: BaseCallbackManager | None = None,
callbacks: Callbacks = None,
token_max: int = 3000,
**kwargs: Any,
) -> MapReduceDocumentsChain:
_question_prompt = (
question_prompt or map_reduce_prompt.QUESTION_PROMPT_SELECTOR.get_prompt(llm)
)
_combine_prompt = (
combine_prompt or map_reduce_prompt.COMBINE_PROMPT_SELECTOR.get_prompt(llm)
)
map_chain = LLMChain(
llm=llm,
prompt=_question_prompt,
verbose=verbose,
callback_manager=callback_manager,
callbacks=callbacks,
)
_reduce_llm = reduce_llm or llm
reduce_chain = LLMChain(
llm=_reduce_llm,
prompt=_combine_prompt,
verbose=verbose,
callback_manager=callback_manager,
callbacks=callbacks,
)
# TODO: document prompt
combine_documents_chain = StuffDocumentsChain(
llm_chain=reduce_chain,
document_variable_name=combine_document_variable_name,
verbose=verbose,
callback_manager=callback_manager,
callbacks=callbacks,
)
if collapse_prompt is None:
collapse_chain = None
if collapse_llm is not None:
msg = (
"collapse_llm provided, but collapse_prompt was not: please "
"provide one or stop providing collapse_llm."
)
raise ValueError(msg)
else:
_collapse_llm = collapse_llm or llm
collapse_chain = StuffDocumentsChain(
llm_chain=LLMChain(
llm=_collapse_llm,
prompt=collapse_prompt,
verbose=verbose,
callback_manager=callback_manager,
callbacks=callbacks,
),
document_variable_name=combine_document_variable_name,
verbose=verbose,
callback_manager=callback_manager,
)
reduce_documents_chain = ReduceDocumentsChain(
combine_documents_chain=combine_documents_chain,
collapse_documents_chain=collapse_chain,
token_max=token_max,
verbose=verbose,
)
return MapReduceDocumentsChain(
llm_chain=map_chain,
document_variable_name=map_reduce_document_variable_name,
reduce_documents_chain=reduce_documents_chain,
verbose=verbose,
callback_manager=callback_manager,
callbacks=callbacks,
**kwargs,
)
def _load_refine_chain(
llm: BaseLanguageModel,
*,
question_prompt: BasePromptTemplate | None = None,
refine_prompt: BasePromptTemplate | None = None,
document_variable_name: str = "context_str",
initial_response_name: str = "existing_answer",
refine_llm: BaseLanguageModel | None = None,
verbose: bool | None = None,
callback_manager: BaseCallbackManager | None = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> RefineDocumentsChain:
_question_prompt = (
question_prompt or refine_prompts.QUESTION_PROMPT_SELECTOR.get_prompt(llm)
)
_refine_prompt = refine_prompt or refine_prompts.REFINE_PROMPT_SELECTOR.get_prompt(
llm,
)
initial_chain = LLMChain(
llm=llm,
prompt=_question_prompt,
verbose=verbose,
callback_manager=callback_manager,
callbacks=callbacks,
)
_refine_llm = refine_llm or llm
refine_chain = LLMChain(
llm=_refine_llm,
prompt=_refine_prompt,
verbose=verbose,
callback_manager=callback_manager,
callbacks=callbacks,
)
return RefineDocumentsChain(
initial_llm_chain=initial_chain,
refine_llm_chain=refine_chain,
document_variable_name=document_variable_name,
initial_response_name=initial_response_name,
verbose=verbose,
callback_manager=callback_manager,
callbacks=callbacks,
**kwargs,
)
@deprecated(
since="0.2.13",
removal="1.0",
message=(
"This class is deprecated. See the following migration guides for replacements "
"based on `chain_type`:\n"
"stuff: https://python.langchain.com/docs/versions/migrating_chains/stuff_docs_chain\n"
"map_reduce: https://python.langchain.com/docs/versions/migrating_chains/map_reduce_chain\n"
"refine: https://python.langchain.com/docs/versions/migrating_chains/refine_chain\n"
"map_rerank: https://python.langchain.com/docs/versions/migrating_chains/map_rerank_docs_chain\n"
"\nSee also guides on retrieval and question-answering here: "
"https://python.langchain.com/docs/how_to/#qa-with-rag"
),
)
def load_qa_chain(
llm: BaseLanguageModel,
chain_type: str = "stuff",
verbose: bool | None = None, # noqa: FBT001
callback_manager: BaseCallbackManager | None = None,
**kwargs: Any,
) -> BaseCombineDocumentsChain:
"""Load question answering chain.
Args:
llm: Language Model to use in the chain.
chain_type: Type of document combining chain to use. Should be one of "stuff",
"map_reduce", "map_rerank", and "refine".
verbose: Whether chains should be run in verbose mode or not. Note that this
applies to all chains that make up the final chain.
callback_manager: Callback manager to use for the chain.
**kwargs: Additional keyword arguments.
Returns:
A chain to use for question answering.
"""
loader_mapping: Mapping[str, LoadingCallable] = {
"stuff": _load_stuff_chain,
"map_reduce": _load_map_reduce_chain,
"refine": _load_refine_chain,
"map_rerank": _load_map_rerank_chain,
}
if chain_type not in loader_mapping:
msg = (
f"Got unsupported chain type: {chain_type}. "
f"Should be one of {loader_mapping.keys()}"
)
raise ValueError(msg)
return loader_mapping[chain_type](
llm,
verbose=verbose,
callback_manager=callback_manager,
**kwargs,
)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/chains/question_answering/chain.py",
"license": "MIT License",
"lines": 266,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/chains/retrieval.py | from __future__ import annotations
from typing import Any
from langchain_core.retrievers import (
BaseRetriever,
RetrieverOutput,
)
from langchain_core.runnables import Runnable, RunnablePassthrough
def create_retrieval_chain(
retriever: BaseRetriever | Runnable[dict, RetrieverOutput],
combine_docs_chain: Runnable[dict[str, Any], str],
) -> Runnable:
"""Create retrieval chain that retrieves documents and then passes them on.
Args:
retriever: Retriever-like object that returns list of documents. Should
either be a subclass of BaseRetriever or a Runnable that returns
a list of documents. If a subclass of BaseRetriever, then it
is expected that an `input` key be passed in - this is what
is will be used to pass into the retriever. If this is NOT a
subclass of BaseRetriever, then all the inputs will be passed
into this runnable, meaning that runnable should take a dictionary
as input.
combine_docs_chain: Runnable that takes inputs and produces a string output.
The inputs to this will be any original inputs to this chain, a new
context key with the retrieved documents, and chat_history (if not present
in the inputs) with a value of `[]` (to easily enable conversational
retrieval.
Returns:
An LCEL Runnable. The Runnable return is a dictionary containing at the very
least a `context` and `answer` key.
Example:
```python
# pip install -U langchain langchain-openai
from langchain_openai import ChatOpenAI
from langchain_classic.chains.combine_documents import (
create_stuff_documents_chain,
)
from langchain_classic.chains import create_retrieval_chain
from langchain_classic import hub
retrieval_qa_chat_prompt = hub.pull("langchain-ai/retrieval-qa-chat")
model = ChatOpenAI()
retriever = ...
combine_docs_chain = create_stuff_documents_chain(
model, retrieval_qa_chat_prompt
)
retrieval_chain = create_retrieval_chain(retriever, combine_docs_chain)
retrieval_chain.invoke({"input": "..."})
```
"""
if not isinstance(retriever, BaseRetriever):
retrieval_docs: Runnable[dict, RetrieverOutput] = retriever
else:
retrieval_docs = (lambda x: x["input"]) | retriever
return (
RunnablePassthrough.assign(
context=retrieval_docs.with_config(run_name="retrieve_documents"),
).assign(answer=combine_docs_chain)
).with_config(run_name="retrieval_chain")
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/chains/retrieval.py",
"license": "MIT License",
"lines": 57,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
langchain-ai/langchain:libs/langchain/langchain_classic/chains/retrieval_qa/base.py | """Chain for question-answering against a vector database."""
from __future__ import annotations
import inspect
from abc import abstractmethod
from typing import Any
from langchain_core._api import deprecated
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
Callbacks,
)
from langchain_core.documents import Document
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import PromptTemplate
from langchain_core.retrievers import BaseRetriever
from langchain_core.vectorstores import VectorStore
from pydantic import ConfigDict, Field, model_validator
from typing_extensions import override
from langchain_classic.chains.base import Chain
from langchain_classic.chains.combine_documents.base import BaseCombineDocumentsChain
from langchain_classic.chains.combine_documents.stuff import StuffDocumentsChain
from langchain_classic.chains.llm import LLMChain
from langchain_classic.chains.question_answering import load_qa_chain
from langchain_classic.chains.question_answering.stuff_prompt import PROMPT_SELECTOR
@deprecated(
since="0.2.13",
removal="1.0",
message=(
"This class is deprecated. Use the `create_retrieval_chain` constructor "
"instead. See migration guide here: "
"https://python.langchain.com/docs/versions/migrating_chains/retrieval_qa/"
),
)
class BaseRetrievalQA(Chain):
"""Base class for question-answering chains."""
combine_documents_chain: BaseCombineDocumentsChain
"""Chain to use to combine the documents."""
input_key: str = "query"
output_key: str = "result"
return_source_documents: bool = False
"""Return the source documents or not."""
model_config = ConfigDict(
populate_by_name=True,
arbitrary_types_allowed=True,
extra="forbid",
)
@property
def input_keys(self) -> list[str]:
"""Input keys."""
return [self.input_key]
@property
def output_keys(self) -> list[str]:
"""Output keys."""
_output_keys = [self.output_key]
if self.return_source_documents:
_output_keys = [*_output_keys, "source_documents"]
return _output_keys
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
prompt: PromptTemplate | None = None,
callbacks: Callbacks = None,
llm_chain_kwargs: dict | None = None,
**kwargs: Any,
) -> BaseRetrievalQA:
"""Initialize from LLM."""
_prompt = prompt or PROMPT_SELECTOR.get_prompt(llm)
llm_chain = LLMChain(
llm=llm,
prompt=_prompt,
callbacks=callbacks,
**(llm_chain_kwargs or {}),
)
document_prompt = PromptTemplate(
input_variables=["page_content"],
template="Context:\n{page_content}",
)
combine_documents_chain = StuffDocumentsChain(
llm_chain=llm_chain,
document_variable_name="context",
document_prompt=document_prompt,
callbacks=callbacks,
)
return cls(
combine_documents_chain=combine_documents_chain,
callbacks=callbacks,
**kwargs,
)
@classmethod
def from_chain_type(
cls,
llm: BaseLanguageModel,
chain_type: str = "stuff",
chain_type_kwargs: dict | None = None,
**kwargs: Any,
) -> BaseRetrievalQA:
"""Load chain from chain type."""
_chain_type_kwargs = chain_type_kwargs or {}
combine_documents_chain = load_qa_chain(
llm,
chain_type=chain_type,
**_chain_type_kwargs,
)
return cls(combine_documents_chain=combine_documents_chain, **kwargs)
@abstractmethod
def _get_docs(
self,
question: str,
*,
run_manager: CallbackManagerForChainRun,
) -> list[Document]:
"""Get documents to do question answering over."""
def _call(
self,
inputs: dict[str, Any],
run_manager: CallbackManagerForChainRun | None = None,
) -> dict[str, Any]:
"""Run get_relevant_text and llm on input query.
If chain has 'return_source_documents' as 'True', returns
the retrieved documents as well under the key 'source_documents'.
Example:
```python
res = indexqa({"query": "This is my query"})
answer, docs = res["result"], res["source_documents"]
```
"""
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
question = inputs[self.input_key]
accepts_run_manager = (
"run_manager" in inspect.signature(self._get_docs).parameters
)
if accepts_run_manager:
docs = self._get_docs(question, run_manager=_run_manager)
else:
docs = self._get_docs(question) # type: ignore[call-arg]
answer = self.combine_documents_chain.run(
input_documents=docs,
question=question,
callbacks=_run_manager.get_child(),
)
if self.return_source_documents:
return {self.output_key: answer, "source_documents": docs}
return {self.output_key: answer}
@abstractmethod
async def _aget_docs(
self,
question: str,
*,
run_manager: AsyncCallbackManagerForChainRun,
) -> list[Document]:
"""Get documents to do question answering over."""
async def _acall(
self,
inputs: dict[str, Any],
run_manager: AsyncCallbackManagerForChainRun | None = None,
) -> dict[str, Any]:
"""Run get_relevant_text and llm on input query.
If chain has 'return_source_documents' as 'True', returns
the retrieved documents as well under the key 'source_documents'.
Example:
```python
res = indexqa({"query": "This is my query"})
answer, docs = res["result"], res["source_documents"]
```
"""
_run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
question = inputs[self.input_key]
accepts_run_manager = (
"run_manager" in inspect.signature(self._aget_docs).parameters
)
if accepts_run_manager:
docs = await self._aget_docs(question, run_manager=_run_manager)
else:
docs = await self._aget_docs(question) # type: ignore[call-arg]
answer = await self.combine_documents_chain.arun(
input_documents=docs,
question=question,
callbacks=_run_manager.get_child(),
)
if self.return_source_documents:
return {self.output_key: answer, "source_documents": docs}
return {self.output_key: answer}
@deprecated(
since="0.1.17",
removal="1.0",
message=(
"This class is deprecated. Use the `create_retrieval_chain` constructor "
"instead. See migration guide here: "
"https://python.langchain.com/docs/versions/migrating_chains/retrieval_qa/"
),
)
class RetrievalQA(BaseRetrievalQA):
"""Chain for question-answering against an index.
This class is deprecated. See below for an example implementation using
`create_retrieval_chain`:
```python
from langchain_classic.chains import create_retrieval_chain
from langchain_classic.chains.combine_documents import (
create_stuff_documents_chain,
)
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
retriever = ... # Your retriever
model = ChatOpenAI()
system_prompt = (
"Use the given context to answer the question. "
"If you don't know the answer, say you don't know. "
"Use three sentence maximum and keep the answer concise. "
"Context: {context}"
)
prompt = ChatPromptTemplate.from_messages(
[
("system", system_prompt),
("human", "{input}"),
]
)
question_answer_chain = create_stuff_documents_chain(model, prompt)
chain = create_retrieval_chain(retriever, question_answer_chain)
chain.invoke({"input": query})
```
Example:
```python
from langchain_openai import OpenAI
from langchain_classic.chains import RetrievalQA
from langchain_community.vectorstores import FAISS
from langchain_core.vectorstores import VectorStoreRetriever
retriever = VectorStoreRetriever(vectorstore=FAISS(...))
retrievalQA = RetrievalQA.from_llm(llm=OpenAI(), retriever=retriever)
```
"""
retriever: BaseRetriever = Field(exclude=True)
def _get_docs(
self,
question: str,
*,
run_manager: CallbackManagerForChainRun,
) -> list[Document]:
"""Get docs."""
return self.retriever.invoke(
question,
config={"callbacks": run_manager.get_child()},
)
async def _aget_docs(
self,
question: str,
*,
run_manager: AsyncCallbackManagerForChainRun,
) -> list[Document]:
"""Get docs."""
return await self.retriever.ainvoke(
question,
config={"callbacks": run_manager.get_child()},
)
@property
def _chain_type(self) -> str:
"""Return the chain type."""
return "retrieval_qa"
@deprecated(
since="0.2.13",
removal="1.0",
message=(
"This class is deprecated. Use the `create_retrieval_chain` constructor "
"instead. See migration guide here: "
"https://python.langchain.com/docs/versions/migrating_chains/retrieval_qa/"
),
)
class VectorDBQA(BaseRetrievalQA):
"""Chain for question-answering against a vector database."""
vectorstore: VectorStore = Field(exclude=True, alias="vectorstore")
"""Vector Database to connect to."""
k: int = 4
"""Number of documents to query for."""
search_type: str = "similarity"
"""Search type to use over vectorstore. `similarity` or `mmr`."""
search_kwargs: dict[str, Any] = Field(default_factory=dict)
"""Extra search args."""
@model_validator(mode="before")
@classmethod
def validate_search_type(cls, values: dict) -> Any:
"""Validate search type."""
if "search_type" in values:
search_type = values["search_type"]
if search_type not in ("similarity", "mmr"):
msg = f"search_type of {search_type} not allowed."
raise ValueError(msg)
return values
@override
def _get_docs(
self,
question: str,
*,
run_manager: CallbackManagerForChainRun,
) -> list[Document]:
"""Get docs."""
if self.search_type == "similarity":
docs = self.vectorstore.similarity_search(
question,
k=self.k,
**self.search_kwargs,
)
elif self.search_type == "mmr":
docs = self.vectorstore.max_marginal_relevance_search(
question,
k=self.k,
**self.search_kwargs,
)
else:
msg = f"search_type of {self.search_type} not allowed."
raise ValueError(msg)
return docs
async def _aget_docs(
self,
question: str,
*,
run_manager: AsyncCallbackManagerForChainRun,
) -> list[Document]:
"""Get docs."""
msg = "VectorDBQA does not support async"
raise NotImplementedError(msg)
@property
def _chain_type(self) -> str:
"""Return the chain type."""
return "vector_db_qa"
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/chains/retrieval_qa/base.py",
"license": "MIT License",
"lines": 324,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langchain:libs/langchain/langchain_classic/chains/router/base.py | """Base classes for chain routing."""
from __future__ import annotations
from abc import ABC
from collections.abc import Mapping
from typing import Any, NamedTuple
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
Callbacks,
)
from pydantic import ConfigDict
from typing_extensions import override
from langchain_classic.chains.base import Chain
class Route(NamedTuple):
"""A route to a destination chain."""
destination: str | None
next_inputs: dict[str, Any]
class RouterChain(Chain, ABC):
"""Chain that outputs the name of a destination chain and the inputs to it."""
@property
@override
def output_keys(self) -> list[str]:
return ["destination", "next_inputs"]
def route(self, inputs: dict[str, Any], callbacks: Callbacks = None) -> Route:
"""Route inputs to a destination chain.
Args:
inputs: inputs to the chain
callbacks: callbacks to use for the chain
Returns:
a Route object
"""
result = self(inputs, callbacks=callbacks)
return Route(result["destination"], result["next_inputs"])
async def aroute(
self,
inputs: dict[str, Any],
callbacks: Callbacks = None,
) -> Route:
"""Route inputs to a destination chain.
Args:
inputs: inputs to the chain
callbacks: callbacks to use for the chain
Returns:
a Route object
"""
result = await self.acall(inputs, callbacks=callbacks)
return Route(result["destination"], result["next_inputs"])
class MultiRouteChain(Chain):
"""Use a single chain to route an input to one of multiple candidate chains."""
router_chain: RouterChain
"""Chain that routes inputs to destination chains."""
destination_chains: Mapping[str, Chain]
"""Chains that return final answer to inputs."""
default_chain: Chain
"""Default chain to use when none of the destination chains are suitable."""
silent_errors: bool = False
"""If `True`, use default_chain when an invalid destination name is provided."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
@property
def input_keys(self) -> list[str]:
"""Will be whatever keys the router chain prompt expects."""
return self.router_chain.input_keys
@property
def output_keys(self) -> list[str]:
"""Will always return text key."""
return []
def _call(
self,
inputs: dict[str, Any],
run_manager: CallbackManagerForChainRun | None = None,
) -> dict[str, Any]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
callbacks = _run_manager.get_child()
route = self.router_chain.route(inputs, callbacks=callbacks)
_run_manager.on_text(
str(route.destination) + ": " + str(route.next_inputs),
verbose=self.verbose,
)
if not route.destination:
return self.default_chain(route.next_inputs, callbacks=callbacks)
if route.destination in self.destination_chains:
return self.destination_chains[route.destination](
route.next_inputs,
callbacks=callbacks,
)
if self.silent_errors:
return self.default_chain(route.next_inputs, callbacks=callbacks)
msg = f"Received invalid destination chain name '{route.destination}'"
raise ValueError(msg)
async def _acall(
self,
inputs: dict[str, Any],
run_manager: AsyncCallbackManagerForChainRun | None = None,
) -> dict[str, Any]:
_run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
callbacks = _run_manager.get_child()
route = await self.router_chain.aroute(inputs, callbacks=callbacks)
await _run_manager.on_text(
str(route.destination) + ": " + str(route.next_inputs),
verbose=self.verbose,
)
if not route.destination:
return await self.default_chain.acall(
route.next_inputs,
callbacks=callbacks,
)
if route.destination in self.destination_chains:
return await self.destination_chains[route.destination].acall(
route.next_inputs,
callbacks=callbacks,
)
if self.silent_errors:
return await self.default_chain.acall(
route.next_inputs,
callbacks=callbacks,
)
msg = f"Received invalid destination chain name '{route.destination}'"
raise ValueError(msg)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/chains/router/base.py",
"license": "MIT License",
"lines": 121,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/chains/structured_output/base.py | import json
from collections.abc import Callable, Sequence
from typing import Any, Literal
from langchain_core._api import deprecated
from langchain_core.output_parsers import (
BaseGenerationOutputParser,
BaseOutputParser,
JsonOutputParser,
PydanticOutputParser,
)
from langchain_core.output_parsers.openai_functions import (
JsonOutputFunctionsParser,
PydanticAttrOutputFunctionsParser,
PydanticOutputFunctionsParser,
)
from langchain_core.output_parsers.openai_tools import (
JsonOutputKeyToolsParser,
PydanticToolsParser,
)
from langchain_core.prompts import BasePromptTemplate
from langchain_core.runnables import Runnable
from langchain_core.utils.function_calling import (
convert_to_openai_function,
convert_to_openai_tool,
)
from langchain_core.utils.pydantic import is_basemodel_subclass
from pydantic import BaseModel
@deprecated(
since="0.1.14",
message=(
"LangChain has introduced a method called `with_structured_output` that "
"is available on ChatModels capable of tool calling. "
"You can read more about the method here: "
"<https://docs.langchain.com/oss/python/langchain/models#structured-outputs>. "
"Please follow our extraction use case documentation for more guidelines "
"on how to do information extraction with LLMs. "
"<https://python.langchain.com/docs/use_cases/extraction/>. "
"If you notice other issues, please provide "
"feedback here: "
"<https://github.com/langchain-ai/langchain/discussions/18154>"
),
removal="1.0",
alternative=(
"""
from pydantic import BaseModel, Field
from langchain_anthropic import ChatAnthropic
class Joke(BaseModel):
setup: str = Field(description="The setup of the joke")
punchline: str = Field(description="The punchline to the joke")
# Or any other chat model that supports tools.
# Please reference to the documentation of structured_output
# to see an up to date list of which models support
# with_structured_output.
model = ChatAnthropic(model="claude-opus-4-1-20250805", temperature=0)
structured_model = model.with_structured_output(Joke)
structured_model.invoke("Tell me a joke about cats.
Make sure to call the Joke function.")
"""
),
)
def create_openai_fn_runnable(
functions: Sequence[dict[str, Any] | type[BaseModel] | Callable],
llm: Runnable,
prompt: BasePromptTemplate | None = None,
*,
enforce_single_function_usage: bool = True,
output_parser: BaseOutputParser | BaseGenerationOutputParser | None = None,
**llm_kwargs: Any,
) -> Runnable:
"""Create a runnable sequence that uses OpenAI functions.
Args:
functions: A sequence of either dictionaries, pydantic.BaseModels classes, or
Python functions. If dictionaries are passed in, they are assumed to
already be a valid OpenAI functions. If only a single
function is passed in, then it will be enforced that the model use that
function. pydantic.BaseModels and Python functions should have docstrings
describing what the function does. For best results, pydantic.BaseModels
should have descriptions of the parameters and Python functions should have
Google Python style args descriptions in the docstring. Additionally,
Python functions should only use primitive types (str, int, float, bool) or
pydantic.BaseModels for arguments.
llm: Language model to use, assumed to support the OpenAI function-calling API.
prompt: BasePromptTemplate to pass to the model.
enforce_single_function_usage: only used if a single function is passed in. If
True, then the model will be forced to use the given function. If `False`,
then the model will be given the option to use the given function or not.
output_parser: BaseLLMOutputParser to use for parsing model outputs. By default
will be inferred from the function types. If pydantic.BaseModels are passed
in, then the OutputParser will try to parse outputs using those. Otherwise
model outputs will simply be parsed as JSON. If multiple functions are
passed in and they are not pydantic.BaseModels, the chain output will
include both the name of the function that was returned and the arguments
to pass to the function.
**llm_kwargs: Additional named arguments to pass to the language model.
Returns:
A runnable sequence that will pass in the given functions to the model when run.
Example:
```python
from typing import Optional
from langchain_classic.chains.structured_output import create_openai_fn_runnable
from langchain_openai import ChatOpenAI
from pydantic import BaseModel, Field
class RecordPerson(BaseModel):
'''Record some identifying information about a person.'''
name: str = Field(..., description="The person's name")
age: int = Field(..., description="The person's age")
fav_food: str | None = Field(None, description="The person's favorite food")
class RecordDog(BaseModel):
'''Record some identifying information about a dog.'''
name: str = Field(..., description="The dog's name")
color: str = Field(..., description="The dog's color")
fav_food: str | None = Field(None, description="The dog's favorite food")
model = ChatOpenAI(model="gpt-4", temperature=0)
structured_model = create_openai_fn_runnable([RecordPerson, RecordDog], model)
structured_model.invoke("Harry was a chubby brown beagle who loved chicken)
# -> RecordDog(name="Harry", color="brown", fav_food="chicken")
```
"""
if not functions:
msg = "Need to pass in at least one function. Received zero."
raise ValueError(msg)
openai_functions = [convert_to_openai_function(f) for f in functions]
llm_kwargs_: dict[str, Any] = {"functions": openai_functions, **llm_kwargs}
if len(openai_functions) == 1 and enforce_single_function_usage:
llm_kwargs_["function_call"] = {"name": openai_functions[0]["name"]}
output_parser = output_parser or get_openai_output_parser(functions)
if prompt:
return prompt | llm.bind(**llm_kwargs_) | output_parser
return llm.bind(**llm_kwargs_) | output_parser
@deprecated(
since="0.1.17",
message=(
"LangChain has introduced a method called `with_structured_output` that "
"is available on ChatModels capable of tool calling. "
"You can read more about the method here: "
"<https://docs.langchain.com/oss/python/langchain/models#structured-outputs>."
"Please follow our extraction use case documentation for more guidelines "
"on how to do information extraction with LLMs. "
"<https://python.langchain.com/docs/use_cases/extraction/>. "
"If you notice other issues, please provide "
"feedback here: "
"<https://github.com/langchain-ai/langchain/discussions/18154>"
),
removal="1.0",
alternative=(
"""
from pydantic import BaseModel, Field
from langchain_anthropic import ChatAnthropic
class Joke(BaseModel):
setup: str = Field(description="The setup of the joke")
punchline: str = Field(description="The punchline to the joke")
# Or any other chat model that supports tools.
# Please reference to the documentation of structured_output
# to see an up to date list of which models support
# with_structured_output.
model = ChatAnthropic(model="claude-opus-4-1-20250805", temperature=0)
structured_model = model.with_structured_output(Joke)
structured_model.invoke("Tell me a joke about cats.
Make sure to call the Joke function.")
"""
),
)
def create_structured_output_runnable(
output_schema: dict[str, Any] | type[BaseModel],
llm: Runnable,
prompt: BasePromptTemplate | None = None,
*,
output_parser: BaseOutputParser | BaseGenerationOutputParser | None = None,
enforce_function_usage: bool = True,
return_single: bool = True,
mode: Literal[
"openai-functions",
"openai-tools",
"openai-json",
] = "openai-functions",
**kwargs: Any,
) -> Runnable:
"""Create a runnable for extracting structured outputs.
Args:
output_schema: Either a dictionary or pydantic.BaseModel class. If a dictionary
is passed in, it's assumed to already be a valid JsonSchema.
For best results, pydantic.BaseModels should have docstrings describing what
the schema represents and descriptions for the parameters.
llm: Language model to use. Assumed to support the OpenAI function-calling API
if mode is 'openai-function'. Assumed to support OpenAI response_format
parameter if mode is 'openai-json'.
prompt: BasePromptTemplate to pass to the model. If mode is 'openai-json' and
prompt has input variable 'output_schema' then the given output_schema
will be converted to a JsonSchema and inserted in the prompt.
output_parser: Output parser to use for parsing model outputs. By default
will be inferred from the function types. If pydantic.BaseModel is passed
in, then the OutputParser will try to parse outputs using the pydantic
class. Otherwise model outputs will be parsed as JSON.
mode: How structured outputs are extracted from the model. If 'openai-functions'
then OpenAI function calling is used with the deprecated 'functions',
'function_call' schema. If 'openai-tools' then OpenAI function
calling with the latest 'tools', 'tool_choice' schema is used. This is
recommended over 'openai-functions'. If 'openai-json' then OpenAI model
with response_format set to JSON is used.
enforce_function_usage: Only applies when mode is 'openai-tools' or
'openai-functions'. If `True`, then the model will be forced to use the given
output schema. If `False`, then the model can elect whether to use the output
schema.
return_single: Only applies when mode is 'openai-tools'. Whether to a list of
structured outputs or a single one. If `True` and model does not return any
structured outputs then chain output is None. If `False` and model does not
return any structured outputs then chain output is an empty list.
kwargs: Additional named arguments.
Returns:
A runnable sequence that will return a structured output(s) matching the given
output_schema.
OpenAI tools example with Pydantic schema (mode='openai-tools'):
```python
from typing import Optional
from langchain_classic.chains import create_structured_output_runnable
from langchain_openai import ChatOpenAI
from pydantic import BaseModel, Field
class RecordDog(BaseModel):
'''Record some identifying information about a dog.'''
name: str = Field(..., description="The dog's name")
color: str = Field(..., description="The dog's color")
fav_food: str | None = Field(None, description="The dog's favorite food")
model = ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0)
prompt = ChatPromptTemplate.from_messages(
[
("system", "You are an extraction algorithm. Please extract every possible instance"),
('human', '{input}')
]
)
structured_model = create_structured_output_runnable(
RecordDog,
model,
mode="openai-tools",
enforce_function_usage=True,
return_single=True
)
structured_model.invoke({"input": "Harry was a chubby brown beagle who loved chicken"})
# -> RecordDog(name="Harry", color="brown", fav_food="chicken")
```
OpenAI tools example with dict schema (mode="openai-tools"):
```python
from typing import Optional
from langchain_classic.chains import create_structured_output_runnable
from langchain_openai import ChatOpenAI
dog_schema = {
"type": "function",
"function": {
"name": "record_dog",
"description": "Record some identifying information about a dog.",
"parameters": {
"type": "object",
"properties": {
"name": {
"description": "The dog's name",
"type": "string"
},
"color": {
"description": "The dog's color",
"type": "string"
},
"fav_food": {
"description": "The dog's favorite food",
"type": "string"
}
},
"required": ["name", "color"]
}
}
}
model = ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0)
structured_model = create_structured_output_runnable(
dog_schema,
model,
mode="openai-tools",
enforce_function_usage=True,
return_single=True
)
structured_model.invoke("Harry was a chubby brown beagle who loved chicken")
# -> {'name': 'Harry', 'color': 'brown', 'fav_food': 'chicken'}
```
OpenAI functions example (mode="openai-functions"):
```python
from typing import Optional
from langchain_classic.chains import create_structured_output_runnable
from langchain_openai import ChatOpenAI
from pydantic import BaseModel, Field
class Dog(BaseModel):
'''Identifying information about a dog.'''
name: str = Field(..., description="The dog's name")
color: str = Field(..., description="The dog's color")
fav_food: str | None = Field(None, description="The dog's favorite food")
model = ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0)
structured_model = create_structured_output_runnable(Dog, model, mode="openai-functions")
structured_model.invoke("Harry was a chubby brown beagle who loved chicken")
# -> Dog(name="Harry", color="brown", fav_food="chicken")
```
OpenAI functions with prompt example:
```python
from typing import Optional
from langchain_classic.chains import create_structured_output_runnable
from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
from pydantic import BaseModel, Field
class Dog(BaseModel):
'''Identifying information about a dog.'''
name: str = Field(..., description="The dog's name")
color: str = Field(..., description="The dog's color")
fav_food: str | None = Field(None, description="The dog's favorite food")
model = ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0)
structured_model = create_structured_output_runnable(Dog, model, mode="openai-functions")
system = '''Extract information about any dogs mentioned in the user input.'''
prompt = ChatPromptTemplate.from_messages(
[("system", system), ("human", "{input}"),]
)
chain = prompt | structured_model
chain.invoke({"input": "Harry was a chubby brown beagle who loved chicken"})
# -> Dog(name="Harry", color="brown", fav_food="chicken")
```
OpenAI json response format example (mode="openai-json"):
```python
from typing import Optional
from langchain_classic.chains import create_structured_output_runnable
from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
from pydantic import BaseModel, Field
class Dog(BaseModel):
'''Identifying information about a dog.'''
name: str = Field(..., description="The dog's name")
color: str = Field(..., description="The dog's color")
fav_food: str | None = Field(None, description="The dog's favorite food")
model = ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0)
structured_model = create_structured_output_runnable(Dog, model, mode="openai-json")
system = '''You are a world class assistant for extracting information in structured JSON formats. \
Extract a valid JSON blob from the user input that matches the following JSON Schema:
{output_schema}'''
prompt = ChatPromptTemplate.from_messages(
[("system", system), ("human", "{input}"),]
)
chain = prompt | structured_model
chain.invoke({"input": "Harry was a chubby brown beagle who loved chicken"})
```
""" # noqa: E501
# for backwards compatibility
force_function_usage = kwargs.get(
"enforce_single_function_usage",
enforce_function_usage,
)
if mode == "openai-tools":
# Protect against typos in kwargs
keys_in_kwargs = set(kwargs.keys())
# Backwards compatibility keys
unrecognized_keys = keys_in_kwargs - {"enforce_single_function_usage"}
if unrecognized_keys:
msg = f"Got an unexpected keyword argument(s): {unrecognized_keys}."
raise TypeError(msg)
return _create_openai_tools_runnable(
output_schema,
llm,
prompt=prompt,
output_parser=output_parser,
enforce_tool_usage=force_function_usage,
first_tool_only=return_single,
)
if mode == "openai-functions":
return _create_openai_functions_structured_output_runnable(
output_schema,
llm,
prompt=prompt,
output_parser=output_parser,
enforce_single_function_usage=force_function_usage,
**kwargs, # llm-specific kwargs
)
if mode == "openai-json":
if force_function_usage:
msg = (
"enforce_single_function_usage is not supported for mode='openai-json'."
)
raise ValueError(msg)
return _create_openai_json_runnable(
output_schema,
llm,
prompt=prompt,
output_parser=output_parser,
**kwargs,
)
msg = ( # type: ignore[unreachable]
f"Invalid mode {mode}. Expected one of 'openai-tools', 'openai-functions', "
f"'openai-json'."
)
raise ValueError(msg)
def _create_openai_tools_runnable(
tool: dict[str, Any] | type[BaseModel] | Callable,
llm: Runnable,
*,
prompt: BasePromptTemplate | None,
output_parser: BaseOutputParser | BaseGenerationOutputParser | None,
enforce_tool_usage: bool,
first_tool_only: bool,
) -> Runnable:
oai_tool = convert_to_openai_tool(tool)
llm_kwargs: dict[str, Any] = {"tools": [oai_tool]}
if enforce_tool_usage:
llm_kwargs["tool_choice"] = {
"type": "function",
"function": {"name": oai_tool["function"]["name"]},
}
output_parser = output_parser or _get_openai_tool_output_parser(
tool,
first_tool_only=first_tool_only,
)
if prompt:
return prompt | llm.bind(**llm_kwargs) | output_parser
return llm.bind(**llm_kwargs) | output_parser
def _get_openai_tool_output_parser(
tool: dict[str, Any] | type[BaseModel] | Callable,
*,
first_tool_only: bool = False,
) -> BaseOutputParser | BaseGenerationOutputParser:
if isinstance(tool, type) and is_basemodel_subclass(tool):
output_parser: BaseOutputParser | BaseGenerationOutputParser = (
PydanticToolsParser(tools=[tool], first_tool_only=first_tool_only)
)
else:
key_name = convert_to_openai_tool(tool)["function"]["name"]
output_parser = JsonOutputKeyToolsParser(
first_tool_only=first_tool_only,
key_name=key_name,
)
return output_parser
def get_openai_output_parser(
functions: Sequence[dict[str, Any] | type[BaseModel] | Callable],
) -> BaseOutputParser | BaseGenerationOutputParser:
"""Get the appropriate function output parser given the user functions.
Args:
functions: Sequence where element is a dictionary, a pydantic.BaseModel class,
or a Python function. If a dictionary is passed in, it is assumed to
already be a valid OpenAI function.
Returns:
A PydanticOutputFunctionsParser if functions are Pydantic classes, otherwise
a JsonOutputFunctionsParser. If there's only one function and it is
not a Pydantic class, then the output parser will automatically extract
only the function arguments and not the function name.
"""
if isinstance(functions[0], type) and is_basemodel_subclass(functions[0]):
if len(functions) > 1:
pydantic_schema: dict | type[BaseModel] = {
convert_to_openai_function(fn)["name"]: fn for fn in functions
}
else:
pydantic_schema = functions[0]
output_parser: BaseOutputParser | BaseGenerationOutputParser = (
PydanticOutputFunctionsParser(pydantic_schema=pydantic_schema)
)
else:
output_parser = JsonOutputFunctionsParser(args_only=len(functions) <= 1)
return output_parser
def _create_openai_json_runnable(
output_schema: dict[str, Any] | type[BaseModel],
llm: Runnable,
prompt: BasePromptTemplate | None = None,
*,
output_parser: BaseOutputParser | BaseGenerationOutputParser | None = None,
) -> Runnable:
if isinstance(output_schema, type) and is_basemodel_subclass(output_schema):
output_parser = output_parser or PydanticOutputParser(
pydantic_object=output_schema,
)
schema_as_dict = convert_to_openai_function(output_schema)["parameters"]
else:
output_parser = output_parser or JsonOutputParser()
schema_as_dict = output_schema
llm = llm.bind(response_format={"type": "json_object"})
if prompt:
if "output_schema" in prompt.input_variables:
prompt = prompt.partial(output_schema=json.dumps(schema_as_dict, indent=2))
return prompt | llm | output_parser
return llm | output_parser
def _create_openai_functions_structured_output_runnable(
output_schema: dict[str, Any] | type[BaseModel],
llm: Runnable,
prompt: BasePromptTemplate | None = None,
*,
output_parser: BaseOutputParser | BaseGenerationOutputParser | None = None,
**llm_kwargs: Any,
) -> Runnable:
if isinstance(output_schema, dict):
function: Any = {
"name": "output_formatter",
"description": (
"Output formatter. Should always be used to format your response to the"
" user."
),
"parameters": output_schema,
}
else:
class _OutputFormatter(BaseModel):
"""Output formatter.
Should always be used to format your response to the user.
"""
output: output_schema # type: ignore[valid-type]
function = _OutputFormatter
output_parser = output_parser or PydanticAttrOutputFunctionsParser(
pydantic_schema=_OutputFormatter,
attr_name="output",
)
return create_openai_fn_runnable(
[function],
llm,
prompt=prompt,
output_parser=output_parser,
**llm_kwargs,
)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/chains/structured_output/base.py",
"license": "MIT License",
"lines": 512,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
langchain-ai/langchain:libs/langchain/langchain_classic/chains/summarize/chain.py | """Load summarizing chains."""
from collections.abc import Mapping
from typing import Any, Protocol
from langchain_core.callbacks import Callbacks
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import BasePromptTemplate
from langchain_classic.chains.combine_documents.base import BaseCombineDocumentsChain
from langchain_classic.chains.combine_documents.map_reduce import (
MapReduceDocumentsChain,
)
from langchain_classic.chains.combine_documents.reduce import ReduceDocumentsChain
from langchain_classic.chains.combine_documents.refine import RefineDocumentsChain
from langchain_classic.chains.combine_documents.stuff import StuffDocumentsChain
from langchain_classic.chains.llm import LLMChain
from langchain_classic.chains.summarize import (
map_reduce_prompt,
refine_prompts,
stuff_prompt,
)
class LoadingCallable(Protocol):
"""Interface for loading the combine documents chain."""
def __call__(
self,
llm: BaseLanguageModel,
**kwargs: Any,
) -> BaseCombineDocumentsChain:
"""Callable to load the combine documents chain."""
def _load_stuff_chain(
llm: BaseLanguageModel,
*,
prompt: BasePromptTemplate = stuff_prompt.PROMPT,
document_variable_name: str = "text",
verbose: bool | None = None,
**kwargs: Any,
) -> StuffDocumentsChain:
llm_chain = LLMChain(llm=llm, prompt=prompt, verbose=verbose)
"""Load a StuffDocumentsChain for summarization.
Args:
llm: Language Model to use in the chain.
prompt: Prompt template that controls how the documents are formatted and
passed into the LLM.
document_variable_name: Variable name in the prompt template where the
document text will be inserted.
verbose: Whether to log progress and intermediate steps.
**kwargs: Additional keyword arguments passed to the StuffDocumentsChain.
Returns:
A StuffDocumentsChain that takes in documents, formats them with the
given prompt, and runs the chain on the provided LLM.
"""
return StuffDocumentsChain(
llm_chain=llm_chain,
document_variable_name=document_variable_name,
verbose=verbose,
**kwargs,
)
def _load_map_reduce_chain(
llm: BaseLanguageModel,
*,
map_prompt: BasePromptTemplate = map_reduce_prompt.PROMPT,
combine_prompt: BasePromptTemplate = map_reduce_prompt.PROMPT,
combine_document_variable_name: str = "text",
map_reduce_document_variable_name: str = "text",
collapse_prompt: BasePromptTemplate | None = None,
reduce_llm: BaseLanguageModel | None = None,
collapse_llm: BaseLanguageModel | None = None,
verbose: bool | None = None,
token_max: int = 3000,
callbacks: Callbacks = None,
collapse_max_retries: int | None = None,
**kwargs: Any,
) -> MapReduceDocumentsChain:
map_chain = LLMChain(
llm=llm,
prompt=map_prompt,
verbose=verbose,
callbacks=callbacks,
)
_reduce_llm = reduce_llm or llm
reduce_chain = LLMChain(
llm=_reduce_llm,
prompt=combine_prompt,
verbose=verbose,
callbacks=callbacks,
)
"""Load a MapReduceDocumentsChain for summarization.
This chain first applies a "map" step to summarize each document,
then applies a "reduce" step to combine the summaries into a
final result. Optionally, a "collapse" step can be used to handle
long intermediate results.
Args:
llm: Language Model to use for map and reduce steps.
map_prompt: Prompt used to summarize each document in the map step.
combine_prompt: Prompt used to combine summaries in the reduce step.
combine_document_variable_name: Variable name in the `combine_prompt` where
the mapped summaries are inserted.
map_reduce_document_variable_name: Variable name in the `map_prompt`
where document text is inserted.
collapse_prompt: Optional prompt used to collapse intermediate summaries
if they exceed the token limit (`token_max`).
reduce_llm: Optional separate LLM for the reduce step.
which uses the same model as the map step.
collapse_llm: Optional separate LLM for the collapse step.
which uses the same model as the map step.
verbose: Whether to log progress and intermediate steps.
token_max: Token threshold that triggers the collapse step during reduction.
callbacks: Optional callbacks for logging and tracing.
collapse_max_retries: Maximum retries for the collapse step if it fails.
**kwargs: Additional keyword arguments passed to the MapReduceDocumentsChain.
Returns:
A MapReduceDocumentsChain that maps each document to a summary,
then reduces all summaries into a single cohesive result.
"""
combine_documents_chain = StuffDocumentsChain(
llm_chain=reduce_chain,
document_variable_name=combine_document_variable_name,
verbose=verbose,
callbacks=callbacks,
)
if collapse_prompt is None:
collapse_chain = None
if collapse_llm is not None:
msg = (
"collapse_llm provided, but collapse_prompt was not: please "
"provide one or stop providing collapse_llm."
)
raise ValueError(msg)
else:
_collapse_llm = collapse_llm or llm
collapse_chain = StuffDocumentsChain(
llm_chain=LLMChain(
llm=_collapse_llm,
prompt=collapse_prompt,
verbose=verbose,
callbacks=callbacks,
),
document_variable_name=combine_document_variable_name,
)
reduce_documents_chain = ReduceDocumentsChain(
combine_documents_chain=combine_documents_chain,
collapse_documents_chain=collapse_chain,
token_max=token_max,
verbose=verbose,
callbacks=callbacks,
collapse_max_retries=collapse_max_retries,
)
return MapReduceDocumentsChain(
llm_chain=map_chain,
reduce_documents_chain=reduce_documents_chain,
document_variable_name=map_reduce_document_variable_name,
verbose=verbose,
callbacks=callbacks,
**kwargs,
)
def _load_refine_chain(
llm: BaseLanguageModel,
*,
question_prompt: BasePromptTemplate = refine_prompts.PROMPT,
refine_prompt: BasePromptTemplate = refine_prompts.REFINE_PROMPT,
document_variable_name: str = "text",
initial_response_name: str = "existing_answer",
refine_llm: BaseLanguageModel | None = None,
verbose: bool | None = None,
**kwargs: Any,
) -> RefineDocumentsChain:
initial_chain = LLMChain(llm=llm, prompt=question_prompt, verbose=verbose)
_refine_llm = refine_llm or llm
refine_chain = LLMChain(llm=_refine_llm, prompt=refine_prompt, verbose=verbose)
return RefineDocumentsChain(
initial_llm_chain=initial_chain,
refine_llm_chain=refine_chain,
document_variable_name=document_variable_name,
initial_response_name=initial_response_name,
verbose=verbose,
**kwargs,
)
def load_summarize_chain(
llm: BaseLanguageModel,
chain_type: str = "stuff",
verbose: bool | None = None, # noqa: FBT001
**kwargs: Any,
) -> BaseCombineDocumentsChain:
"""Load summarizing chain.
Args:
llm: Language Model to use in the chain.
chain_type: Type of document combining chain to use. Should be one of "stuff",
"map_reduce", and "refine".
verbose: Whether chains should be run in verbose mode or not. Note that this
applies to all chains that make up the final chain.
**kwargs: Additional keyword arguments.
Returns:
A chain to use for summarizing.
"""
loader_mapping: Mapping[str, LoadingCallable] = {
"stuff": _load_stuff_chain,
"map_reduce": _load_map_reduce_chain,
"refine": _load_refine_chain,
}
if chain_type not in loader_mapping:
msg = (
f"Got unsupported chain type: {chain_type}. "
f"Should be one of {loader_mapping.keys()}"
)
raise ValueError(msg)
return loader_mapping[chain_type](llm, verbose=verbose, **kwargs)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/chains/summarize/chain.py",
"license": "MIT License",
"lines": 204,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langchain:libs/langchain/langchain_classic/chat_loaders/telegram.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_loaders.telegram import TelegramChatLoader
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"TelegramChatLoader": "langchain_community.chat_loaders.telegram"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"TelegramChatLoader",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/chat_loaders/telegram.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/chat_loaders/utils.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_loaders.utils import (
map_ai_messages,
map_ai_messages_in_session,
merge_chat_runs,
merge_chat_runs_in_session,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"merge_chat_runs_in_session": "langchain_community.chat_loaders.utils",
"merge_chat_runs": "langchain_community.chat_loaders.utils",
"map_ai_messages_in_session": "langchain_community.chat_loaders.utils",
"map_ai_messages": "langchain_community.chat_loaders.utils",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"map_ai_messages",
"map_ai_messages_in_session",
"merge_chat_runs",
"merge_chat_runs_in_session",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/chat_loaders/utils.py",
"license": "MIT License",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/chat_models/anthropic.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_models.anthropic import (
ChatAnthropic,
convert_messages_to_prompt_anthropic,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"convert_messages_to_prompt_anthropic": "langchain_community.chat_models.anthropic",
"ChatAnthropic": "langchain_community.chat_models.anthropic",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"ChatAnthropic",
"convert_messages_to_prompt_anthropic",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/chat_models/anthropic.py",
"license": "MIT License",
"lines": 22,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/chat_models/anyscale.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_models.anyscale import ChatAnyscale
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"ChatAnyscale": "langchain_community.chat_models.anyscale"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"ChatAnyscale",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/chat_models/anyscale.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/chat_models/azure_openai.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_models.azure_openai import AzureChatOpenAI
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"AzureChatOpenAI": "langchain_community.chat_models.azure_openai"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"AzureChatOpenAI",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/chat_models/azure_openai.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/chat_models/azureml_endpoint.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_models.azureml_endpoint import (
AzureMLChatOnlineEndpoint,
LlamaContentFormatter,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"LlamaContentFormatter": "langchain_community.chat_models.azureml_endpoint",
"AzureMLChatOnlineEndpoint": "langchain_community.chat_models.azureml_endpoint",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"AzureMLChatOnlineEndpoint",
"LlamaContentFormatter",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/chat_models/azureml_endpoint.py",
"license": "MIT License",
"lines": 22,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/chat_models/baidu_qianfan_endpoint.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_models.baidu_qianfan_endpoint import (
QianfanChatEndpoint,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"QianfanChatEndpoint": "langchain_community.chat_models.baidu_qianfan_endpoint",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"QianfanChatEndpoint",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/chat_models/baidu_qianfan_endpoint.py",
"license": "MIT License",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/chat_models/base.py | from __future__ import annotations
import warnings
from collections.abc import AsyncIterator, Callable, Iterator, Sequence
from importlib import util
from typing import Any, Literal, TypeAlias, cast, overload
from langchain_core.language_models import (
BaseChatModel,
LanguageModelInput,
SimpleChatModel,
)
from langchain_core.language_models.chat_models import (
agenerate_from_stream,
generate_from_stream,
)
from langchain_core.messages import AIMessage, AnyMessage
from langchain_core.runnables import Runnable, RunnableConfig, ensure_config
from langchain_core.runnables.schema import StreamEvent
from langchain_core.tools import BaseTool
from langchain_core.tracers import RunLog, RunLogPatch
from pydantic import BaseModel
from typing_extensions import override
__all__ = [
# For backwards compatibility
"BaseChatModel",
"SimpleChatModel",
"agenerate_from_stream",
"generate_from_stream",
"init_chat_model",
]
@overload
def init_chat_model(
model: str,
*,
model_provider: str | None = None,
configurable_fields: None = None,
config_prefix: str | None = None,
**kwargs: Any,
) -> BaseChatModel: ...
@overload
def init_chat_model(
model: None = None,
*,
model_provider: str | None = None,
configurable_fields: None = None,
config_prefix: str | None = None,
**kwargs: Any,
) -> _ConfigurableModel: ...
@overload
def init_chat_model(
model: str | None = None,
*,
model_provider: str | None = None,
configurable_fields: Literal["any"] | list[str] | tuple[str, ...] = ...,
config_prefix: str | None = None,
**kwargs: Any,
) -> _ConfigurableModel: ...
# FOR CONTRIBUTORS: If adding support for a new provider, please append the provider
# name to the supported list in the docstring below. Do *not* change the order of the
# existing providers.
def init_chat_model(
model: str | None = None,
*,
model_provider: str | None = None,
configurable_fields: Literal["any"] | list[str] | tuple[str, ...] | None = None,
config_prefix: str | None = None,
**kwargs: Any,
) -> BaseChatModel | _ConfigurableModel:
"""Initialize a chat model from any supported provider using a unified interface.
**Two main use cases:**
1. **Fixed model** – specify the model upfront and get back a ready-to-use chat
model.
2. **Configurable model** – choose to specify parameters (including model name) at
runtime via `config`. Makes it easy to switch between models/providers without
changing your code
!!! note
Requires the integration package for the chosen model provider to be installed.
See the `model_provider` parameter below for specific package names
(e.g., `pip install langchain-openai`).
Refer to the [provider integration's API reference](https://docs.langchain.com/oss/python/integrations/providers)
for supported model parameters to use as `**kwargs`.
Args:
model: The name or ID of the model, e.g. `'o3-mini'`, `'claude-sonnet-4-5-20250929'`.
You can also specify model and model provider in a single argument using
`'{model_provider}:{model}'` format, e.g. `'openai:o1'`.
Will attempt to infer `model_provider` from model if not specified.
The following providers will be inferred based on these model prefixes:
- `gpt-...` | `o1...` | `o3...` -> `openai`
- `claude...` -> `anthropic`
- `amazon...` -> `bedrock`
- `gemini...` -> `google_vertexai`
- `command...` -> `cohere`
- `accounts/fireworks...` -> `fireworks`
- `mistral...` -> `mistralai`
- `deepseek...` -> `deepseek`
- `grok...` -> `xai`
- `sonar...` -> `perplexity`
model_provider: The model provider if not specified as part of the model arg
(see above).
Supported `model_provider` values and the corresponding integration package
are:
- `openai` -> [`langchain-openai`](https://docs.langchain.com/oss/python/integrations/providers/openai)
- `anthropic` -> [`langchain-anthropic`](https://docs.langchain.com/oss/python/integrations/providers/anthropic)
- `azure_openai` -> [`langchain-openai`](https://docs.langchain.com/oss/python/integrations/providers/openai)
- `azure_ai` -> [`langchain-azure-ai`](https://docs.langchain.com/oss/python/integrations/providers/microsoft)
- `google_vertexai` -> [`langchain-google-vertexai`](https://docs.langchain.com/oss/python/integrations/providers/google)
- `google_genai` -> [`langchain-google-genai`](https://docs.langchain.com/oss/python/integrations/providers/google)
- `bedrock` -> [`langchain-aws`](https://docs.langchain.com/oss/python/integrations/providers/aws)
- `bedrock_converse` -> [`langchain-aws`](https://docs.langchain.com/oss/python/integrations/providers/aws)
- `cohere` -> [`langchain-cohere`](https://docs.langchain.com/oss/python/integrations/providers/cohere)
- `fireworks` -> [`langchain-fireworks`](https://docs.langchain.com/oss/python/integrations/providers/fireworks)
- `together` -> [`langchain-together`](https://docs.langchain.com/oss/python/integrations/providers/together)
- `mistralai` -> [`langchain-mistralai`](https://docs.langchain.com/oss/python/integrations/providers/mistralai)
- `huggingface` -> [`langchain-huggingface`](https://docs.langchain.com/oss/python/integrations/providers/huggingface)
- `groq` -> [`langchain-groq`](https://docs.langchain.com/oss/python/integrations/providers/groq)
- `ollama` -> [`langchain-ollama`](https://docs.langchain.com/oss/python/integrations/providers/ollama)
- `google_anthropic_vertex` -> [`langchain-google-vertexai`](https://docs.langchain.com/oss/python/integrations/providers/google)
- `deepseek` -> [`langchain-deepseek`](https://docs.langchain.com/oss/python/integrations/providers/deepseek)
- `ibm` -> [`langchain-ibm`](https://docs.langchain.com/oss/python/integrations/providers/ibm)
- `nvidia` -> [`langchain-nvidia-ai-endpoints`](https://docs.langchain.com/oss/python/integrations/providers/nvidia)
- `xai` -> [`langchain-xai`](https://docs.langchain.com/oss/python/integrations/providers/xai)
- `perplexity` -> [`langchain-perplexity`](https://docs.langchain.com/oss/python/integrations/providers/perplexity)
configurable_fields: Which model parameters are configurable at runtime:
- `None`: No configurable fields (i.e., a fixed model).
- `'any'`: All fields are configurable. **See security note below.**
- `list[str] | Tuple[str, ...]`: Specified fields are configurable.
Fields are assumed to have `config_prefix` stripped if a `config_prefix` is
specified.
If `model` is specified, then defaults to `None`.
If `model` is not specified, then defaults to `("model", "model_provider")`.
!!! warning "Security note"
Setting `configurable_fields="any"` means fields like `api_key`,
`base_url`, etc., can be altered at runtime, potentially redirecting
model requests to a different service/user.
Make sure that if you're accepting untrusted configurations that you
enumerate the `configurable_fields=(...)` explicitly.
config_prefix: Optional prefix for configuration keys.
Useful when you have multiple configurable models in the same application.
If `'config_prefix'` is a non-empty string then `model` will be configurable
at runtime via the `config["configurable"]["{config_prefix}_{param}"]` keys.
See examples below.
If `'config_prefix'` is an empty string then model will be configurable via
`config["configurable"]["{param}"]`.
**kwargs: Additional model-specific keyword args to pass to the underlying
chat model's `__init__` method. Common parameters include:
- `temperature`: Model temperature for controlling randomness.
- `max_tokens`: Maximum number of output tokens.
- `timeout`: Maximum time (in seconds) to wait for a response.
- `max_retries`: Maximum number of retry attempts for failed requests.
- `base_url`: Custom API endpoint URL.
- `rate_limiter`: A
[`BaseRateLimiter`][langchain_core.rate_limiters.BaseRateLimiter]
instance to control request rate.
Refer to the specific model provider's
[integration reference](https://reference.langchain.com/python/integrations/)
for all available parameters.
Returns:
A [`BaseChatModel`][langchain_core.language_models.BaseChatModel] corresponding
to the `model_name` and `model_provider` specified if configurability is
inferred to be `False`. If configurable, a chat model emulator that
initializes the underlying model at runtime once a config is passed in.
Raises:
ValueError: If `model_provider` cannot be inferred or isn't supported.
ImportError: If the model provider integration package is not installed.
???+ example "Initialize a non-configurable model"
```python
# pip install langchain langchain-openai langchain-anthropic langchain-google-vertexai
from langchain_classic.chat_models import init_chat_model
o3_mini = init_chat_model("openai:o3-mini", temperature=0)
claude_sonnet = init_chat_model("anthropic:claude-sonnet-4-5-20250929", temperature=0)
gemini_2-5_flash = init_chat_model(
"google_vertexai:gemini-2.5-flash", temperature=0
)
o3_mini.invoke("what's your name")
claude_sonnet.invoke("what's your name")
gemini_2-5_flash.invoke("what's your name")
```
??? example "Partially configurable model with no default"
```python
# pip install langchain langchain-openai langchain-anthropic
from langchain_classic.chat_models import init_chat_model
# (We don't need to specify configurable=True if a model isn't specified.)
configurable_model = init_chat_model(temperature=0)
configurable_model.invoke(
"what's your name", config={"configurable": {"model": "gpt-4o"}}
)
# Use GPT-4o to generate the response
configurable_model.invoke(
"what's your name",
config={"configurable": {"model": "claude-sonnet-4-5-20250929"}},
)
```
??? example "Fully configurable model with a default"
```python
# pip install langchain langchain-openai langchain-anthropic
from langchain_classic.chat_models import init_chat_model
configurable_model_with_default = init_chat_model(
"openai:gpt-4o",
configurable_fields="any", # This allows us to configure other params like temperature, max_tokens, etc at runtime.
config_prefix="foo",
temperature=0,
)
configurable_model_with_default.invoke("what's your name")
# GPT-4o response with temperature 0 (as set in default)
configurable_model_with_default.invoke(
"what's your name",
config={
"configurable": {
"foo_model": "anthropic:claude-sonnet-4-5-20250929",
"foo_temperature": 0.6,
}
},
)
# Override default to use Sonnet 4.5 with temperature 0.6 to generate response
```
??? example "Bind tools to a configurable model"
You can call any chat model declarative methods on a configurable model in the
same way that you would with a normal model:
```python
# pip install langchain langchain-openai langchain-anthropic
from langchain_classic.chat_models import init_chat_model
from pydantic import BaseModel, Field
class GetWeather(BaseModel):
'''Get the current weather in a given location'''
location: str = Field(
..., description="The city and state, e.g. San Francisco, CA"
)
class GetPopulation(BaseModel):
'''Get the current population in a given location'''
location: str = Field(
..., description="The city and state, e.g. San Francisco, CA"
)
configurable_model = init_chat_model(
"gpt-4o", configurable_fields=("model", "model_provider"), temperature=0
)
configurable_model_with_tools = configurable_model.bind_tools(
[
GetWeather,
GetPopulation,
]
)
configurable_model_with_tools.invoke(
"Which city is hotter today and which is bigger: LA or NY?"
)
# Use GPT-4o
configurable_model_with_tools.invoke(
"Which city is hotter today and which is bigger: LA or NY?",
config={"configurable": {"model": "claude-sonnet-4-5-20250929"}},
)
# Use Sonnet 4.5
```
!!! warning "Behavior changed in `langchain` 0.2.8"
Support for `configurable_fields` and `config_prefix` added.
!!! warning "Behavior changed in `langchain` 0.2.12"
Support for Ollama via langchain-ollama package added
(`langchain_ollama.ChatOllama`). Previously,
the now-deprecated langchain-community version of Ollama was imported
(`langchain_community.chat_models.ChatOllama`).
Support for AWS Bedrock models via the Converse API added
(`model_provider="bedrock_converse"`).
!!! warning "Behavior changed in `langchain` 0.3.5"
Out of beta.
!!! warning "Behavior changed in `langchain` 0.3.19"
Support for Deepseek, IBM, Nvidia, and xAI models added.
""" # noqa: E501
if not model and not configurable_fields:
configurable_fields = ("model", "model_provider")
config_prefix = config_prefix or ""
if config_prefix and not configurable_fields:
warnings.warn(
f"{config_prefix=} has been set but no fields are configurable. Set "
f"`configurable_fields=(...)` to specify the model params that are "
f"configurable.",
stacklevel=2,
)
if not configurable_fields:
return _init_chat_model_helper(
cast("str", model),
model_provider=model_provider,
**kwargs,
)
if model:
kwargs["model"] = model
if model_provider:
kwargs["model_provider"] = model_provider
return _ConfigurableModel(
default_config=kwargs,
config_prefix=config_prefix,
configurable_fields=configurable_fields,
)
def _init_chat_model_helper(
model: str,
*,
model_provider: str | None = None,
**kwargs: Any,
) -> BaseChatModel:
model, model_provider = _parse_model(model, model_provider)
if model_provider == "openai":
_check_pkg("langchain_openai", "ChatOpenAI")
from langchain_openai import ChatOpenAI
return ChatOpenAI(model=model, **kwargs)
if model_provider == "anthropic":
_check_pkg("langchain_anthropic", "ChatAnthropic")
from langchain_anthropic import ChatAnthropic
return ChatAnthropic(model=model, **kwargs) # type: ignore[call-arg,unused-ignore]
if model_provider == "azure_openai":
_check_pkg("langchain_openai", "AzureChatOpenAI")
from langchain_openai import AzureChatOpenAI
return AzureChatOpenAI(model=model, **kwargs)
if model_provider == "azure_ai":
_check_pkg("langchain_azure_ai", "AzureAIChatCompletionsModel")
from langchain_azure_ai.chat_models import AzureAIChatCompletionsModel
return AzureAIChatCompletionsModel(model=model, **kwargs)
if model_provider == "cohere":
_check_pkg("langchain_cohere", "ChatCohere")
from langchain_cohere import ChatCohere
return ChatCohere(model=model, **kwargs)
if model_provider == "google_vertexai":
_check_pkg("langchain_google_vertexai", "ChatVertexAI")
from langchain_google_vertexai import ChatVertexAI
return ChatVertexAI(model=model, **kwargs)
if model_provider == "google_genai":
_check_pkg("langchain_google_genai", "ChatGoogleGenerativeAI")
from langchain_google_genai import ChatGoogleGenerativeAI
return ChatGoogleGenerativeAI(model=model, **kwargs)
if model_provider == "fireworks":
_check_pkg("langchain_fireworks", "ChatFireworks")
from langchain_fireworks import ChatFireworks
return ChatFireworks(model=model, **kwargs)
if model_provider == "ollama":
try:
_check_pkg("langchain_ollama", "ChatOllama")
from langchain_ollama import ChatOllama
except ImportError:
# For backwards compatibility
try:
_check_pkg("langchain_community", "ChatOllama")
from langchain_community.chat_models import ChatOllama
except ImportError:
# If both langchain-ollama and langchain-community aren't available,
# raise an error related to langchain-ollama
_check_pkg("langchain_ollama", "ChatOllama")
return ChatOllama(model=model, **kwargs)
if model_provider == "together":
_check_pkg("langchain_together", "ChatTogether")
from langchain_together import ChatTogether
return ChatTogether(model=model, **kwargs)
if model_provider == "mistralai":
_check_pkg("langchain_mistralai", "ChatMistralAI")
from langchain_mistralai import ChatMistralAI
return ChatMistralAI(model=model, **kwargs) # type: ignore[call-arg,unused-ignore]
if model_provider == "huggingface":
_check_pkg("langchain_huggingface", "ChatHuggingFace")
from langchain_huggingface import ChatHuggingFace
return ChatHuggingFace.from_model_id(model_id=model, **kwargs)
if model_provider == "groq":
_check_pkg("langchain_groq", "ChatGroq")
from langchain_groq import ChatGroq
return ChatGroq(model=model, **kwargs)
if model_provider == "bedrock":
_check_pkg("langchain_aws", "ChatBedrock")
from langchain_aws import ChatBedrock
# TODO: update to use model= once ChatBedrock supports
return ChatBedrock(model_id=model, **kwargs)
if model_provider == "bedrock_converse":
_check_pkg("langchain_aws", "ChatBedrockConverse")
from langchain_aws import ChatBedrockConverse
return ChatBedrockConverse(model=model, **kwargs)
if model_provider == "google_anthropic_vertex":
_check_pkg("langchain_google_vertexai", "ChatAnthropicVertex")
from langchain_google_vertexai.model_garden import ChatAnthropicVertex
return ChatAnthropicVertex(model=model, **kwargs)
if model_provider == "deepseek":
_check_pkg("langchain_deepseek", "ChatDeepSeek", pkg_kebab="langchain-deepseek")
from langchain_deepseek import ChatDeepSeek
return ChatDeepSeek(model=model, **kwargs)
if model_provider == "nvidia":
_check_pkg("langchain_nvidia_ai_endpoints", "ChatNVIDIA")
from langchain_nvidia_ai_endpoints import ChatNVIDIA
return ChatNVIDIA(model=model, **kwargs)
if model_provider == "ibm":
_check_pkg("langchain_ibm", "ChatWatsonx")
from langchain_ibm import ChatWatsonx
return ChatWatsonx(model_id=model, **kwargs)
if model_provider == "xai":
_check_pkg("langchain_xai", "ChatXAI")
from langchain_xai import ChatXAI
return ChatXAI(model=model, **kwargs)
if model_provider == "perplexity":
_check_pkg("langchain_perplexity", "ChatPerplexity")
from langchain_perplexity import ChatPerplexity
return ChatPerplexity(model=model, **kwargs)
if model_provider == "upstage":
_check_pkg("langchain_upstage", "ChatUpstage")
from langchain_upstage import ChatUpstage
return ChatUpstage(model=model, **kwargs)
supported = ", ".join(_SUPPORTED_PROVIDERS)
msg = (
f"Unsupported {model_provider=}.\n\nSupported model providers are: {supported}"
)
raise ValueError(msg)
_SUPPORTED_PROVIDERS = {
"openai",
"anthropic",
"azure_openai",
"azure_ai",
"cohere",
"google_vertexai",
"google_genai",
"fireworks",
"ollama",
"together",
"mistralai",
"huggingface",
"groq",
"bedrock",
"bedrock_converse",
"google_anthropic_vertex",
"deepseek",
"ibm",
"xai",
"perplexity",
"upstage",
}
def _attempt_infer_model_provider(model_name: str) -> str | None:
"""Attempt to infer model provider from model name.
Args:
model_name: The name of the model to infer provider for.
Returns:
The inferred provider name, or `None` if no provider could be inferred.
"""
model_lower = model_name.lower()
# OpenAI models (including newer models and aliases)
if any(
model_lower.startswith(pre)
for pre in (
"gpt-",
"o1",
"o3",
"chatgpt",
"text-davinci",
)
):
return "openai"
# Anthropic models
if model_lower.startswith("claude"):
return "anthropic"
# Cohere models
if model_lower.startswith("command"):
return "cohere"
# Fireworks models
if model_name.startswith("accounts/fireworks"):
return "fireworks"
# Google models
if model_lower.startswith("gemini"):
return "google_vertexai"
# AWS Bedrock models
if model_name.startswith("amazon.") or model_lower.startswith(
(
"anthropic.",
"meta.",
)
):
return "bedrock"
# Mistral models
if model_lower.startswith(("mistral", "mixtral")):
return "mistralai"
# DeepSeek models
if model_lower.startswith("deepseek"):
return "deepseek"
# xAI models
if model_lower.startswith("grok"):
return "xai"
# Perplexity models
if model_lower.startswith("sonar"):
return "perplexity"
# Upstage models
if model_lower.startswith("solar"):
return "upstage"
return None
def _parse_model(model: str, model_provider: str | None) -> tuple[str, str]:
"""Parse model name and provider, inferring provider if necessary."""
if not model_provider and ":" in model:
prefix, suffix = model.split(":", 1)
if prefix in _SUPPORTED_PROVIDERS:
model_provider = prefix
model = suffix
else:
inferred = _attempt_infer_model_provider(prefix)
if inferred:
model_provider = inferred
model = suffix
model_provider = model_provider or _attempt_infer_model_provider(model)
if not model_provider:
supported_list = ", ".join(sorted(_SUPPORTED_PROVIDERS))
msg = (
f"Unable to infer model provider for {model=}. "
f"Please specify 'model_provider' directly.\n\n"
f"Supported providers: {supported_list}\n\n"
f"For help with specific providers, see: "
f"https://docs.langchain.com/oss/python/integrations/providers"
)
raise ValueError(msg)
# Normalize provider name
model_provider = model_provider.replace("-", "_").lower()
return model, model_provider
def _check_pkg(pkg: str, class_name: str, *, pkg_kebab: str | None = None) -> None:
if not util.find_spec(pkg):
pkg_kebab = pkg_kebab if pkg_kebab is not None else pkg.replace("_", "-")
msg = (
f"Initializing {class_name} requires the {pkg_kebab} package. "
f"Please install it with `pip install {pkg_kebab}`"
)
raise ImportError(msg)
_DECLARATIVE_METHODS = ("bind_tools", "with_structured_output")
class _ConfigurableModel(Runnable[LanguageModelInput, Any]):
def __init__(
self,
*,
default_config: dict | None = None,
configurable_fields: Literal["any"] | list[str] | tuple[str, ...] = "any",
config_prefix: str = "",
queued_declarative_operations: Sequence[tuple[str, tuple, dict]] = (),
) -> None:
self._default_config: dict = default_config or {}
self._configurable_fields: Literal["any"] | list[str] = (
configurable_fields
if configurable_fields == "any"
else list(configurable_fields)
)
self._config_prefix = (
config_prefix + "_"
if config_prefix and not config_prefix.endswith("_")
else config_prefix
)
self._queued_declarative_operations: list[tuple[str, tuple, dict]] = list(
queued_declarative_operations,
)
def __getattr__(self, name: str) -> Any:
if name in _DECLARATIVE_METHODS:
# Declarative operations that cannot be applied until after an actual model
# object is instantiated. So instead of returning the actual operation,
# we record the operation and its arguments in a queue. This queue is
# then applied in order whenever we actually instantiate the model (in
# self._model()).
def queue(*args: Any, **kwargs: Any) -> _ConfigurableModel:
queued_declarative_operations = list(
self._queued_declarative_operations,
)
queued_declarative_operations.append((name, args, kwargs))
return _ConfigurableModel(
default_config=dict(self._default_config),
configurable_fields=list(self._configurable_fields)
if isinstance(self._configurable_fields, list)
else self._configurable_fields,
config_prefix=self._config_prefix,
queued_declarative_operations=queued_declarative_operations,
)
return queue
if self._default_config and (model := self._model()) and hasattr(model, name):
return getattr(model, name)
msg = f"{name} is not a BaseChatModel attribute"
if self._default_config:
msg += " and is not implemented on the default model"
msg += "."
raise AttributeError(msg)
def _model(self, config: RunnableConfig | None = None) -> Runnable:
params = {**self._default_config, **self._model_params(config)}
model = _init_chat_model_helper(**params)
for name, args, kwargs in self._queued_declarative_operations:
model = getattr(model, name)(*args, **kwargs)
return model
def _model_params(self, config: RunnableConfig | None) -> dict:
config = ensure_config(config)
model_params = {
k.removeprefix(self._config_prefix): v
for k, v in config.get("configurable", {}).items()
if k.startswith(self._config_prefix)
}
if self._configurable_fields != "any":
model_params = {
k: v for k, v in model_params.items() if k in self._configurable_fields
}
return model_params
def with_config(
self,
config: RunnableConfig | None = None,
**kwargs: Any,
) -> _ConfigurableModel:
"""Bind config to a `Runnable`, returning a new `Runnable`."""
config = RunnableConfig(**(config or {}), **cast("RunnableConfig", kwargs))
model_params = self._model_params(config)
remaining_config = {k: v for k, v in config.items() if k != "configurable"}
remaining_config["configurable"] = {
k: v
for k, v in config.get("configurable", {}).items()
if k.removeprefix(self._config_prefix) not in model_params
}
queued_declarative_operations = list(self._queued_declarative_operations)
if remaining_config:
queued_declarative_operations.append(
(
"with_config",
(),
{"config": remaining_config},
),
)
return _ConfigurableModel(
default_config={**self._default_config, **model_params},
configurable_fields=list(self._configurable_fields)
if isinstance(self._configurable_fields, list)
else self._configurable_fields,
config_prefix=self._config_prefix,
queued_declarative_operations=queued_declarative_operations,
)
@property
@override
def InputType(self) -> TypeAlias:
"""Get the input type for this `Runnable`."""
from langchain_core.prompt_values import (
ChatPromptValueConcrete,
StringPromptValue,
)
# This is a version of LanguageModelInput which replaces the abstract
# base class BaseMessage with a union of its subclasses, which makes
# for a much better schema.
return str | StringPromptValue | ChatPromptValueConcrete | list[AnyMessage]
@override
def invoke(
self,
input: LanguageModelInput,
config: RunnableConfig | None = None,
**kwargs: Any,
) -> Any:
return self._model(config).invoke(input, config=config, **kwargs)
@override
async def ainvoke(
self,
input: LanguageModelInput,
config: RunnableConfig | None = None,
**kwargs: Any,
) -> Any:
return await self._model(config).ainvoke(input, config=config, **kwargs)
@override
def stream(
self,
input: LanguageModelInput,
config: RunnableConfig | None = None,
**kwargs: Any | None,
) -> Iterator[Any]:
yield from self._model(config).stream(input, config=config, **kwargs)
@override
async def astream(
self,
input: LanguageModelInput,
config: RunnableConfig | None = None,
**kwargs: Any | None,
) -> AsyncIterator[Any]:
async for x in self._model(config).astream(input, config=config, **kwargs):
yield x
def batch(
self,
inputs: list[LanguageModelInput],
config: RunnableConfig | list[RunnableConfig] | None = None,
*,
return_exceptions: bool = False,
**kwargs: Any | None,
) -> list[Any]:
config = config or None
# If <= 1 config use the underlying models batch implementation.
if config is None or isinstance(config, dict) or len(config) <= 1:
if isinstance(config, list):
config = config[0]
return self._model(config).batch(
inputs,
config=config,
return_exceptions=return_exceptions,
**kwargs,
)
# If multiple configs default to Runnable.batch which uses executor to invoke
# in parallel.
return super().batch(
inputs,
config=config,
return_exceptions=return_exceptions,
**kwargs,
)
async def abatch(
self,
inputs: list[LanguageModelInput],
config: RunnableConfig | list[RunnableConfig] | None = None,
*,
return_exceptions: bool = False,
**kwargs: Any | None,
) -> list[Any]:
config = config or None
# If <= 1 config use the underlying models batch implementation.
if config is None or isinstance(config, dict) or len(config) <= 1:
if isinstance(config, list):
config = config[0]
return await self._model(config).abatch(
inputs,
config=config,
return_exceptions=return_exceptions,
**kwargs,
)
# If multiple configs default to Runnable.batch which uses executor to invoke
# in parallel.
return await super().abatch(
inputs,
config=config,
return_exceptions=return_exceptions,
**kwargs,
)
def batch_as_completed(
self,
inputs: Sequence[LanguageModelInput],
config: RunnableConfig | Sequence[RunnableConfig] | None = None,
*,
return_exceptions: bool = False,
**kwargs: Any,
) -> Iterator[tuple[int, Any | Exception]]:
config = config or None
# If <= 1 config use the underlying models batch implementation.
if config is None or isinstance(config, dict) or len(config) <= 1:
if isinstance(config, list):
config = config[0]
yield from self._model(cast("RunnableConfig", config)).batch_as_completed( # type: ignore[call-overload]
inputs,
config=config,
return_exceptions=return_exceptions,
**kwargs,
)
# If multiple configs default to Runnable.batch which uses executor to invoke
# in parallel.
else:
yield from super().batch_as_completed( # type: ignore[call-overload]
inputs,
config=config,
return_exceptions=return_exceptions,
**kwargs,
)
async def abatch_as_completed(
self,
inputs: Sequence[LanguageModelInput],
config: RunnableConfig | Sequence[RunnableConfig] | None = None,
*,
return_exceptions: bool = False,
**kwargs: Any,
) -> AsyncIterator[tuple[int, Any]]:
config = config or None
# If <= 1 config use the underlying models batch implementation.
if config is None or isinstance(config, dict) or len(config) <= 1:
if isinstance(config, list):
config = config[0]
async for x in self._model(
cast("RunnableConfig", config),
).abatch_as_completed( # type: ignore[call-overload]
inputs,
config=config,
return_exceptions=return_exceptions,
**kwargs,
):
yield x
# If multiple configs default to Runnable.batch which uses executor to invoke
# in parallel.
else:
async for x in super().abatch_as_completed( # type: ignore[call-overload]
inputs,
config=config,
return_exceptions=return_exceptions,
**kwargs,
):
yield x
@override
def transform(
self,
input: Iterator[LanguageModelInput],
config: RunnableConfig | None = None,
**kwargs: Any | None,
) -> Iterator[Any]:
yield from self._model(config).transform(input, config=config, **kwargs)
@override
async def atransform(
self,
input: AsyncIterator[LanguageModelInput],
config: RunnableConfig | None = None,
**kwargs: Any | None,
) -> AsyncIterator[Any]:
async for x in self._model(config).atransform(input, config=config, **kwargs):
yield x
@overload
def astream_log(
self,
input: Any,
config: RunnableConfig | None = None,
*,
diff: Literal[True] = True,
with_streamed_output_list: bool = True,
include_names: Sequence[str] | None = None,
include_types: Sequence[str] | None = None,
include_tags: Sequence[str] | None = None,
exclude_names: Sequence[str] | None = None,
exclude_types: Sequence[str] | None = None,
exclude_tags: Sequence[str] | None = None,
**kwargs: Any,
) -> AsyncIterator[RunLogPatch]: ...
@overload
def astream_log(
self,
input: Any,
config: RunnableConfig | None = None,
*,
diff: Literal[False],
with_streamed_output_list: bool = True,
include_names: Sequence[str] | None = None,
include_types: Sequence[str] | None = None,
include_tags: Sequence[str] | None = None,
exclude_names: Sequence[str] | None = None,
exclude_types: Sequence[str] | None = None,
exclude_tags: Sequence[str] | None = None,
**kwargs: Any,
) -> AsyncIterator[RunLog]: ...
@override
async def astream_log(
self,
input: Any,
config: RunnableConfig | None = None,
*,
diff: bool = True,
with_streamed_output_list: bool = True,
include_names: Sequence[str] | None = None,
include_types: Sequence[str] | None = None,
include_tags: Sequence[str] | None = None,
exclude_names: Sequence[str] | None = None,
exclude_types: Sequence[str] | None = None,
exclude_tags: Sequence[str] | None = None,
**kwargs: Any,
) -> AsyncIterator[RunLogPatch] | AsyncIterator[RunLog]:
async for x in self._model(config).astream_log( # type: ignore[call-overload, misc]
input,
config=config,
diff=diff,
with_streamed_output_list=with_streamed_output_list,
include_names=include_names,
include_types=include_types,
include_tags=include_tags,
exclude_tags=exclude_tags,
exclude_types=exclude_types,
exclude_names=exclude_names,
**kwargs,
):
yield x
@override
async def astream_events(
self,
input: Any,
config: RunnableConfig | None = None,
*,
version: Literal["v1", "v2"] = "v2",
include_names: Sequence[str] | None = None,
include_types: Sequence[str] | None = None,
include_tags: Sequence[str] | None = None,
exclude_names: Sequence[str] | None = None,
exclude_types: Sequence[str] | None = None,
exclude_tags: Sequence[str] | None = None,
**kwargs: Any,
) -> AsyncIterator[StreamEvent]:
async for x in self._model(config).astream_events(
input,
config=config,
version=version,
include_names=include_names,
include_types=include_types,
include_tags=include_tags,
exclude_tags=exclude_tags,
exclude_types=exclude_types,
exclude_names=exclude_names,
**kwargs,
):
yield x
# Explicitly added to satisfy downstream linters.
def bind_tools(
self,
tools: Sequence[dict[str, Any] | type[BaseModel] | Callable | BaseTool],
**kwargs: Any,
) -> Runnable[LanguageModelInput, AIMessage]:
return self.__getattr__("bind_tools")(tools, **kwargs)
# Explicitly added to satisfy downstream linters.
def with_structured_output(
self,
schema: dict | type[BaseModel],
**kwargs: Any,
) -> Runnable[LanguageModelInput, dict | BaseModel]:
return self.__getattr__("with_structured_output")(schema, **kwargs)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/chat_models/base.py",
"license": "MIT License",
"lines": 894,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langchain:libs/langchain/langchain_classic/chat_models/bedrock.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_models.bedrock import BedrockChat, ChatPromptAdapter
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"ChatPromptAdapter": "langchain_community.chat_models.bedrock",
"BedrockChat": "langchain_community.chat_models.bedrock",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"BedrockChat",
"ChatPromptAdapter",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/chat_models/bedrock.py",
"license": "MIT License",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/chat_models/cohere.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_models.cohere import ChatCohere
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"ChatCohere": "langchain_community.chat_models.cohere"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"ChatCohere",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/chat_models/cohere.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/chat_models/databricks.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_models.databricks import ChatDatabricks
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"ChatDatabricks": "langchain_community.chat_models.databricks"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"ChatDatabricks",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/chat_models/databricks.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/chat_models/ernie.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_models.ernie import ErnieBotChat
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"ErnieBotChat": "langchain_community.chat_models.ernie"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"ErnieBotChat",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/chat_models/ernie.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/chat_models/fake.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_models.fake import (
FakeListChatModel,
FakeMessagesListChatModel,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"FakeMessagesListChatModel": "langchain_community.chat_models.fake",
"FakeListChatModel": "langchain_community.chat_models.fake",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"FakeListChatModel",
"FakeMessagesListChatModel",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/chat_models/fake.py",
"license": "MIT License",
"lines": 22,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/chat_models/fireworks.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_models.fireworks import ChatFireworks
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"ChatFireworks": "langchain_community.chat_models.fireworks"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"ChatFireworks",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/chat_models/fireworks.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/chat_models/gigachat.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_models.gigachat import GigaChat
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"GigaChat": "langchain_community.chat_models.gigachat"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"GigaChat",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/chat_models/gigachat.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/chat_models/google_palm.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_models.google_palm import (
ChatGooglePalm,
ChatGooglePalmError,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"ChatGooglePalm": "langchain_community.chat_models.google_palm",
"ChatGooglePalmError": "langchain_community.chat_models.google_palm",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"ChatGooglePalm",
"ChatGooglePalmError",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/chat_models/google_palm.py",
"license": "MIT License",
"lines": 22,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/chat_models/human.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_models.human import HumanInputChatModel
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"HumanInputChatModel": "langchain_community.chat_models.human"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"HumanInputChatModel",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/chat_models/human.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/chat_models/javelin_ai_gateway.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_models.javelin_ai_gateway import (
ChatJavelinAIGateway,
ChatParams,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"ChatJavelinAIGateway": "langchain_community.chat_models.javelin_ai_gateway",
"ChatParams": "langchain_community.chat_models.javelin_ai_gateway",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"ChatJavelinAIGateway",
"ChatParams",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/chat_models/javelin_ai_gateway.py",
"license": "MIT License",
"lines": 22,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.