id
stringlengths
14
15
text
stringlengths
49
2.47k
source
stringlengths
61
166
0bc2fb9d19be-1
if agent_scratchpad: return ( f"This was your previous work " f"(but I haven't seen any of it! I only see what " f"you return as final answer):\n{agent_scratchpad}" ) else: return agent_scratchpad @classmethod def _validate_tools(cls, tools: Sequence[BaseTool]) -> None: pass @classmethod def _get_default_output_parser( cls, llm: Optional[BaseLanguageModel] = None, **kwargs: Any ) -> AgentOutputParser: return StructuredChatOutputParserWithRetries.from_llm(llm=llm) @property def _stop(self) -> List[str]: return ["Observation:"] [docs] @classmethod def create_prompt( cls, tools: Sequence[BaseTool], prefix: str = PREFIX, suffix: str = SUFFIX, human_message_template: str = HUMAN_MESSAGE_TEMPLATE, format_instructions: str = FORMAT_INSTRUCTIONS, input_variables: Optional[List[str]] = None, memory_prompts: Optional[List[BasePromptTemplate]] = None, ) -> BasePromptTemplate: tool_strings = [] for tool in tools: args_schema = re.sub("}", "}}}}", re.sub("{", "{{{{", str(tool.args))) tool_strings.append(f"{tool.name}: {tool.description}, args: {args_schema}") formatted_tools = "\n".join(tool_strings) tool_names = ", ".join([tool.name for tool in tools]) format_instructions = format_instructions.format(tool_names=tool_names)
https://api.python.langchain.com/en/latest/_modules/langchain/agents/structured_chat/base.html
0bc2fb9d19be-2
format_instructions = format_instructions.format(tool_names=tool_names) template = "\n\n".join([prefix, formatted_tools, format_instructions, suffix]) if input_variables is None: input_variables = ["input", "agent_scratchpad"] _memory_prompts = memory_prompts or [] messages = [ SystemMessagePromptTemplate.from_template(template), *_memory_prompts, HumanMessagePromptTemplate.from_template(human_message_template), ] return ChatPromptTemplate(input_variables=input_variables, messages=messages) [docs] @classmethod def from_llm_and_tools( cls, llm: BaseLanguageModel, tools: Sequence[BaseTool], callback_manager: Optional[BaseCallbackManager] = None, output_parser: Optional[AgentOutputParser] = None, prefix: str = PREFIX, suffix: str = SUFFIX, human_message_template: str = HUMAN_MESSAGE_TEMPLATE, format_instructions: str = FORMAT_INSTRUCTIONS, input_variables: Optional[List[str]] = None, memory_prompts: Optional[List[BasePromptTemplate]] = None, **kwargs: Any, ) -> Agent: """Construct an agent from an LLM and tools.""" cls._validate_tools(tools) prompt = cls.create_prompt( tools, prefix=prefix, suffix=suffix, human_message_template=human_message_template, format_instructions=format_instructions, input_variables=input_variables, memory_prompts=memory_prompts, ) llm_chain = LLMChain( llm=llm, prompt=prompt, callback_manager=callback_manager, ) tool_names = [tool.name for tool in tools]
https://api.python.langchain.com/en/latest/_modules/langchain/agents/structured_chat/base.html
0bc2fb9d19be-3
) tool_names = [tool.name for tool in tools] _output_parser = output_parser or cls._get_default_output_parser(llm=llm) return cls( llm_chain=llm_chain, allowed_tools=tool_names, output_parser=_output_parser, **kwargs, ) @property def _agent_type(self) -> str: raise ValueError
https://api.python.langchain.com/en/latest/_modules/langchain/agents/structured_chat/base.html
27e1c6445686-0
Source code for langchain.agents.structured_chat.output_parser from __future__ import annotations import json import logging import re from typing import Optional, Union from pydantic import Field from langchain.agents.agent import AgentOutputParser from langchain.agents.structured_chat.prompt import FORMAT_INSTRUCTIONS from langchain.output_parsers import OutputFixingParser from langchain.schema import AgentAction, AgentFinish, OutputParserException from langchain.schema.language_model import BaseLanguageModel logger = logging.getLogger(__name__) [docs]class StructuredChatOutputParser(AgentOutputParser): """Output parser for the structured chat agent.""" [docs] def get_format_instructions(self) -> str: return FORMAT_INSTRUCTIONS [docs] def parse(self, text: str) -> Union[AgentAction, AgentFinish]: try: action_match = re.search(r"```(.*?)```?", text, re.DOTALL) if action_match is not None: response = json.loads(action_match.group(1).strip(), strict=False) if isinstance(response, list): # gpt turbo frequently ignores the directive to emit a single action logger.warning("Got multiple action responses: %s", response) response = response[0] if response["action"] == "Final Answer": return AgentFinish({"output": response["action_input"]}, text) else: return AgentAction( response["action"], response.get("action_input", {}), text ) else: return AgentFinish({"output": text}, text) except Exception as e: raise OutputParserException(f"Could not parse LLM output: {text}") from e @property def _type(self) -> str: return "structured_chat"
https://api.python.langchain.com/en/latest/_modules/langchain/agents/structured_chat/output_parser.html
27e1c6445686-1
def _type(self) -> str: return "structured_chat" [docs]class StructuredChatOutputParserWithRetries(AgentOutputParser): """Output parser with retries for the structured chat agent.""" base_parser: AgentOutputParser = Field(default_factory=StructuredChatOutputParser) """The base parser to use.""" output_fixing_parser: Optional[OutputFixingParser] = None """The output fixing parser to use.""" [docs] def get_format_instructions(self) -> str: return FORMAT_INSTRUCTIONS [docs] def parse(self, text: str) -> Union[AgentAction, AgentFinish]: try: if self.output_fixing_parser is not None: parsed_obj: Union[ AgentAction, AgentFinish ] = self.output_fixing_parser.parse(text) else: parsed_obj = self.base_parser.parse(text) return parsed_obj except Exception as e: raise OutputParserException(f"Could not parse LLM output: {text}") from e [docs] @classmethod def from_llm( cls, llm: Optional[BaseLanguageModel] = None, base_parser: Optional[StructuredChatOutputParser] = None, ) -> StructuredChatOutputParserWithRetries: if llm is not None: base_parser = base_parser or StructuredChatOutputParser() output_fixing_parser = OutputFixingParser.from_llm( llm=llm, parser=base_parser ) return cls(output_fixing_parser=output_fixing_parser) elif base_parser is not None: return cls(base_parser=base_parser) else: return cls() @property def _type(self) -> str:
https://api.python.langchain.com/en/latest/_modules/langchain/agents/structured_chat/output_parser.html
27e1c6445686-2
return cls() @property def _type(self) -> str: return "structured_chat_with_retries"
https://api.python.langchain.com/en/latest/_modules/langchain/agents/structured_chat/output_parser.html
2e658f0dcbb8-0
Source code for langchain.agents.openai_functions_agent.base """Module implements an agent that uses OpenAI's APIs function enabled API.""" import json from dataclasses import dataclass from json import JSONDecodeError from typing import Any, List, Optional, Sequence, Tuple, Union from pydantic import root_validator from langchain.agents import BaseSingleActionAgent from langchain.callbacks.base import BaseCallbackManager from langchain.callbacks.manager import Callbacks from langchain.chat_models.openai import ChatOpenAI from langchain.prompts.chat import ( BaseMessagePromptTemplate, ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, ) from langchain.schema import ( AgentAction, AgentFinish, BasePromptTemplate, OutputParserException, ) from langchain.schema.language_model import BaseLanguageModel from langchain.schema.messages import ( AIMessage, BaseMessage, FunctionMessage, SystemMessage, ) from langchain.tools import BaseTool from langchain.tools.convert_to_openai import format_tool_to_openai_function @dataclass class _FunctionsAgentAction(AgentAction): message_log: List[BaseMessage] def _convert_agent_action_to_messages( agent_action: AgentAction, observation: str ) -> List[BaseMessage]: """Convert an agent action to a message. This code is used to reconstruct the original AI message from the agent action. Args: agent_action: Agent action to convert. Returns: AIMessage that corresponds to the original tool invocation. """ if isinstance(agent_action, _FunctionsAgentAction): return agent_action.message_log + [ _create_function_message(agent_action, observation) ] else:
https://api.python.langchain.com/en/latest/_modules/langchain/agents/openai_functions_agent/base.html
2e658f0dcbb8-1
_create_function_message(agent_action, observation) ] else: return [AIMessage(content=agent_action.log)] def _create_function_message( agent_action: AgentAction, observation: str ) -> FunctionMessage: """Convert agent action and observation into a function message. Args: agent_action: the tool invocation request from the agent observation: the result of the tool invocation Returns: FunctionMessage that corresponds to the original tool invocation """ if not isinstance(observation, str): try: content = json.dumps(observation, ensure_ascii=False) except Exception: content = str(observation) else: content = observation return FunctionMessage( name=agent_action.tool, content=content, ) def _format_intermediate_steps( intermediate_steps: List[Tuple[AgentAction, str]], ) -> List[BaseMessage]: """Format intermediate steps. Args: intermediate_steps: Steps the LLM has taken to date, along with observations Returns: list of messages to send to the LLM for the next prediction """ messages = [] for intermediate_step in intermediate_steps: agent_action, observation = intermediate_step messages.extend(_convert_agent_action_to_messages(agent_action, observation)) return messages def _parse_ai_message(message: BaseMessage) -> Union[AgentAction, AgentFinish]: """Parse an AI message.""" if not isinstance(message, AIMessage): raise TypeError(f"Expected an AI message got {type(message)}") function_call = message.additional_kwargs.get("function_call", {}) if function_call: function_name = function_call["name"] try:
https://api.python.langchain.com/en/latest/_modules/langchain/agents/openai_functions_agent/base.html
2e658f0dcbb8-2
if function_call: function_name = function_call["name"] try: _tool_input = json.loads(function_call["arguments"]) except JSONDecodeError: raise OutputParserException( f"Could not parse tool input: {function_call} because " f"the `arguments` is not valid JSON." ) # HACK HACK HACK: # The code that encodes tool input into Open AI uses a special variable # name called `__arg1` to handle old style tools that do not expose a # schema and expect a single string argument as an input. # We unpack the argument here if it exists. # Open AI does not support passing in a JSON array as an argument. if "__arg1" in _tool_input: tool_input = _tool_input["__arg1"] else: tool_input = _tool_input content_msg = "responded: {content}\n" if message.content else "\n" return _FunctionsAgentAction( tool=function_name, tool_input=tool_input, log=f"\nInvoking: `{function_name}` with `{tool_input}`\n{content_msg}\n", message_log=[message], ) return AgentFinish(return_values={"output": message.content}, log=message.content) [docs]class OpenAIFunctionsAgent(BaseSingleActionAgent): """An Agent driven by OpenAIs function powered API. Args: llm: This should be an instance of ChatOpenAI, specifically a model that supports using `functions`. tools: The tools this agent has access to. prompt: The prompt for this agent, should support agent_scratchpad as one
https://api.python.langchain.com/en/latest/_modules/langchain/agents/openai_functions_agent/base.html
2e658f0dcbb8-3
prompt: The prompt for this agent, should support agent_scratchpad as one of the variables. For an easy way to construct this prompt, use `OpenAIFunctionsAgent.create_prompt(...)` """ llm: BaseLanguageModel tools: Sequence[BaseTool] prompt: BasePromptTemplate [docs] def get_allowed_tools(self) -> List[str]: """Get allowed tools.""" return list([t.name for t in self.tools]) @root_validator def validate_llm(cls, values: dict) -> dict: if not isinstance(values["llm"], ChatOpenAI): raise ValueError("Only supported with ChatOpenAI models.") return values @root_validator def validate_prompt(cls, values: dict) -> dict: prompt: BasePromptTemplate = values["prompt"] if "agent_scratchpad" not in prompt.input_variables: raise ValueError( "`agent_scratchpad` should be one of the variables in the prompt, " f"got {prompt.input_variables}" ) return values @property def input_keys(self) -> List[str]: """Get input keys. Input refers to user input here.""" return ["input"] @property def functions(self) -> List[dict]: return [dict(format_tool_to_openai_function(t)) for t in self.tools] [docs] def plan( self, intermediate_steps: List[Tuple[AgentAction, str]], callbacks: Callbacks = None, with_functions: bool = True, **kwargs: Any, ) -> Union[AgentAction, AgentFinish]: """Given input, decided what to do. Args:
https://api.python.langchain.com/en/latest/_modules/langchain/agents/openai_functions_agent/base.html
2e658f0dcbb8-4
"""Given input, decided what to do. Args: intermediate_steps: Steps the LLM has taken to date, along with observations **kwargs: User inputs. Returns: Action specifying what tool to use. """ agent_scratchpad = _format_intermediate_steps(intermediate_steps) selected_inputs = { k: kwargs[k] for k in self.prompt.input_variables if k != "agent_scratchpad" } full_inputs = dict(**selected_inputs, agent_scratchpad=agent_scratchpad) prompt = self.prompt.format_prompt(**full_inputs) messages = prompt.to_messages() if with_functions: predicted_message = self.llm.predict_messages( messages, functions=self.functions, callbacks=callbacks, ) else: predicted_message = self.llm.predict_messages( messages, callbacks=callbacks, ) agent_decision = _parse_ai_message(predicted_message) return agent_decision [docs] async def aplan( self, intermediate_steps: List[Tuple[AgentAction, str]], callbacks: Callbacks = None, **kwargs: Any, ) -> Union[AgentAction, AgentFinish]: """Given input, decided what to do. Args: intermediate_steps: Steps the LLM has taken to date, along with observations **kwargs: User inputs. Returns: Action specifying what tool to use. """ agent_scratchpad = _format_intermediate_steps(intermediate_steps) selected_inputs = { k: kwargs[k] for k in self.prompt.input_variables if k != "agent_scratchpad" }
https://api.python.langchain.com/en/latest/_modules/langchain/agents/openai_functions_agent/base.html
2e658f0dcbb8-5
} full_inputs = dict(**selected_inputs, agent_scratchpad=agent_scratchpad) prompt = self.prompt.format_prompt(**full_inputs) messages = prompt.to_messages() predicted_message = await self.llm.apredict_messages( messages, functions=self.functions, callbacks=callbacks ) agent_decision = _parse_ai_message(predicted_message) return agent_decision [docs] def return_stopped_response( self, early_stopping_method: str, intermediate_steps: List[Tuple[AgentAction, str]], **kwargs: Any, ) -> AgentFinish: """Return response when agent has been stopped due to max iterations.""" if early_stopping_method == "force": # `force` just returns a constant string return AgentFinish( {"output": "Agent stopped due to iteration limit or time limit."}, "" ) elif early_stopping_method == "generate": # Generate does one final forward pass agent_decision = self.plan( intermediate_steps, with_functions=False, **kwargs ) if type(agent_decision) == AgentFinish: return agent_decision else: raise ValueError( f"got AgentAction with no functions provided: {agent_decision}" ) else: raise ValueError( "early_stopping_method should be one of `force` or `generate`, " f"got {early_stopping_method}" ) [docs] @classmethod def create_prompt( cls, system_message: Optional[SystemMessage] = SystemMessage( content="You are a helpful AI assistant." ), extra_prompt_messages: Optional[List[BaseMessagePromptTemplate]] = None,
https://api.python.langchain.com/en/latest/_modules/langchain/agents/openai_functions_agent/base.html
2e658f0dcbb8-6
), extra_prompt_messages: Optional[List[BaseMessagePromptTemplate]] = None, ) -> BasePromptTemplate: """Create prompt for this agent. Args: system_message: Message to use as the system message that will be the first in the prompt. extra_prompt_messages: Prompt messages that will be placed between the system message and the new human input. Returns: A prompt template to pass into this agent. """ _prompts = extra_prompt_messages or [] messages: List[Union[BaseMessagePromptTemplate, BaseMessage]] if system_message: messages = [system_message] else: messages = [] messages.extend( [ *_prompts, HumanMessagePromptTemplate.from_template("{input}"), MessagesPlaceholder(variable_name="agent_scratchpad"), ] ) return ChatPromptTemplate(messages=messages) [docs] @classmethod def from_llm_and_tools( cls, llm: BaseLanguageModel, tools: Sequence[BaseTool], callback_manager: Optional[BaseCallbackManager] = None, extra_prompt_messages: Optional[List[BaseMessagePromptTemplate]] = None, system_message: Optional[SystemMessage] = SystemMessage( content="You are a helpful AI assistant." ), **kwargs: Any, ) -> BaseSingleActionAgent: """Construct an agent from an LLM and tools.""" if not isinstance(llm, ChatOpenAI): raise ValueError("Only supported with ChatOpenAI models.") prompt = cls.create_prompt( extra_prompt_messages=extra_prompt_messages, system_message=system_message, ) return cls(
https://api.python.langchain.com/en/latest/_modules/langchain/agents/openai_functions_agent/base.html
2e658f0dcbb8-7
system_message=system_message, ) return cls( llm=llm, prompt=prompt, tools=tools, callback_manager=callback_manager, **kwargs, )
https://api.python.langchain.com/en/latest/_modules/langchain/agents/openai_functions_agent/base.html
ebf88a3ca73f-0
Source code for langchain.agents.openai_functions_agent.agent_token_buffer_memory """Memory used to save agent output AND intermediate steps.""" from typing import Any, Dict, List from langchain.agents.openai_functions_agent.base import _format_intermediate_steps from langchain.memory.chat_memory import BaseChatMemory from langchain.schema.language_model import BaseLanguageModel from langchain.schema.messages import BaseMessage, get_buffer_string [docs]class AgentTokenBufferMemory(BaseChatMemory): """Memory used to save agent output AND intermediate steps.""" human_prefix: str = "Human" ai_prefix: str = "AI" llm: BaseLanguageModel memory_key: str = "history" max_token_limit: int = 12000 """The max number of tokens to keep in the buffer. Once the buffer exceeds this many tokens, the oldest messages will be pruned.""" return_messages: bool = True output_key = "output" intermediate_steps_key = "intermediate_steps" @property def buffer(self) -> List[BaseMessage]: """String buffer of memory.""" return self.chat_memory.messages @property def memory_variables(self) -> List[str]: """Will always return list of memory variables. :meta private: """ return [self.memory_key] [docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]: """Return history buffer.""" if self.return_messages: final_buffer: Any = self.buffer else: final_buffer = get_buffer_string( self.buffer, human_prefix=self.human_prefix, ai_prefix=self.ai_prefix, ) return {self.memory_key: final_buffer}
https://api.python.langchain.com/en/latest/_modules/langchain/agents/openai_functions_agent/agent_token_buffer_memory.html
ebf88a3ca73f-1
) return {self.memory_key: final_buffer} [docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, Any]) -> None: """Save context from this conversation to buffer. Pruned.""" input_str, output_str = self._get_input_output(inputs, outputs) self.chat_memory.add_user_message(input_str) steps = _format_intermediate_steps(outputs[self.intermediate_steps_key]) for msg in steps: self.chat_memory.add_message(msg) self.chat_memory.add_ai_message(output_str) # Prune buffer if it exceeds max token limit buffer = self.chat_memory.messages curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer) if curr_buffer_length > self.max_token_limit: while curr_buffer_length > self.max_token_limit: buffer.pop(0) curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer)
https://api.python.langchain.com/en/latest/_modules/langchain/agents/openai_functions_agent/agent_token_buffer_memory.html
04d14d462bdc-0
Source code for langchain.agents.conversational.base """An agent designed to hold a conversation in addition to using tools.""" from __future__ import annotations from typing import Any, List, Optional, Sequence from pydantic import Field from langchain.agents.agent import Agent, AgentOutputParser from langchain.agents.agent_types import AgentType from langchain.agents.conversational.output_parser import ConvoOutputParser from langchain.agents.conversational.prompt import FORMAT_INSTRUCTIONS, PREFIX, SUFFIX from langchain.agents.utils import validate_tools_single_input from langchain.callbacks.base import BaseCallbackManager from langchain.chains import LLMChain from langchain.prompts import PromptTemplate from langchain.schema.language_model import BaseLanguageModel from langchain.tools.base import BaseTool [docs]class ConversationalAgent(Agent): """An agent that holds a conversation in addition to using tools.""" ai_prefix: str = "AI" """Prefix to use before AI output.""" output_parser: AgentOutputParser = Field(default_factory=ConvoOutputParser) """Output parser for the agent.""" @classmethod def _get_default_output_parser( cls, ai_prefix: str = "AI", **kwargs: Any ) -> AgentOutputParser: return ConvoOutputParser(ai_prefix=ai_prefix) @property def _agent_type(self) -> str: """Return Identifier of agent type.""" return AgentType.CONVERSATIONAL_REACT_DESCRIPTION @property def observation_prefix(self) -> str: """Prefix to append the observation with.""" return "Observation: " @property def llm_prefix(self) -> str: """Prefix to append the llm call with.""" return "Thought:"
https://api.python.langchain.com/en/latest/_modules/langchain/agents/conversational/base.html
04d14d462bdc-1
"""Prefix to append the llm call with.""" return "Thought:" [docs] @classmethod def create_prompt( cls, tools: Sequence[BaseTool], prefix: str = PREFIX, suffix: str = SUFFIX, format_instructions: str = FORMAT_INSTRUCTIONS, ai_prefix: str = "AI", human_prefix: str = "Human", input_variables: Optional[List[str]] = None, ) -> PromptTemplate: """Create prompt in the style of the zero-shot agent. Args: tools: List of tools the agent will have access to, used to format the prompt. prefix: String to put before the list of tools. suffix: String to put after the list of tools. ai_prefix: String to use before AI output. human_prefix: String to use before human output. input_variables: List of input variables the final prompt will expect. Returns: A PromptTemplate with the template assembled from the pieces here. """ tool_strings = "\n".join( [f"> {tool.name}: {tool.description}" for tool in tools] ) tool_names = ", ".join([tool.name for tool in tools]) format_instructions = format_instructions.format( tool_names=tool_names, ai_prefix=ai_prefix, human_prefix=human_prefix ) template = "\n\n".join([prefix, tool_strings, format_instructions, suffix]) if input_variables is None: input_variables = ["input", "chat_history", "agent_scratchpad"] return PromptTemplate(template=template, input_variables=input_variables) @classmethod def _validate_tools(cls, tools: Sequence[BaseTool]) -> None:
https://api.python.langchain.com/en/latest/_modules/langchain/agents/conversational/base.html
04d14d462bdc-2
def _validate_tools(cls, tools: Sequence[BaseTool]) -> None: super()._validate_tools(tools) validate_tools_single_input(cls.__name__, tools) [docs] @classmethod def from_llm_and_tools( cls, llm: BaseLanguageModel, tools: Sequence[BaseTool], callback_manager: Optional[BaseCallbackManager] = None, output_parser: Optional[AgentOutputParser] = None, prefix: str = PREFIX, suffix: str = SUFFIX, format_instructions: str = FORMAT_INSTRUCTIONS, ai_prefix: str = "AI", human_prefix: str = "Human", input_variables: Optional[List[str]] = None, **kwargs: Any, ) -> Agent: """Construct an agent from an LLM and tools.""" cls._validate_tools(tools) prompt = cls.create_prompt( tools, ai_prefix=ai_prefix, human_prefix=human_prefix, prefix=prefix, suffix=suffix, format_instructions=format_instructions, input_variables=input_variables, ) llm_chain = LLMChain( llm=llm, prompt=prompt, callback_manager=callback_manager, ) tool_names = [tool.name for tool in tools] _output_parser = output_parser or cls._get_default_output_parser( ai_prefix=ai_prefix ) return cls( llm_chain=llm_chain, allowed_tools=tool_names, ai_prefix=ai_prefix, output_parser=_output_parser, **kwargs, )
https://api.python.langchain.com/en/latest/_modules/langchain/agents/conversational/base.html
5279ede0bb05-0
Source code for langchain.agents.conversational.output_parser import re from typing import Union from langchain.agents.agent import AgentOutputParser from langchain.agents.conversational.prompt import FORMAT_INSTRUCTIONS from langchain.schema import AgentAction, AgentFinish, OutputParserException [docs]class ConvoOutputParser(AgentOutputParser): """Output parser for the conversational agent.""" ai_prefix: str = "AI" """Prefix to use before AI output.""" [docs] def get_format_instructions(self) -> str: return FORMAT_INSTRUCTIONS [docs] def parse(self, text: str) -> Union[AgentAction, AgentFinish]: if f"{self.ai_prefix}:" in text: return AgentFinish( {"output": text.split(f"{self.ai_prefix}:")[-1].strip()}, text ) regex = r"Action: (.*?)[\n]*Action Input: (.*)" match = re.search(regex, text) if not match: raise OutputParserException(f"Could not parse LLM output: `{text}`") action = match.group(1) action_input = match.group(2) return AgentAction(action.strip(), action_input.strip(" ").strip('"'), text) @property def _type(self) -> str: return "conversational"
https://api.python.langchain.com/en/latest/_modules/langchain/agents/conversational/output_parser.html
3d963fc1d07e-0
Source code for langchain.agents.agent_toolkits.base """Toolkits for agents.""" from abc import ABC, abstractmethod from typing import List from pydantic import BaseModel from langchain.tools import BaseTool [docs]class BaseToolkit(BaseModel, ABC): """Base Toolkit representing a collection of related tools.""" [docs] @abstractmethod def get_tools(self) -> List[BaseTool]: """Get the tools in the toolkit."""
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/base.html
85b71b065fdf-0
Source code for langchain.agents.agent_toolkits.azure_cognitive_services from __future__ import annotations import sys from typing import List from langchain.agents.agent_toolkits.base import BaseToolkit from langchain.tools.azure_cognitive_services import ( AzureCogsFormRecognizerTool, AzureCogsImageAnalysisTool, AzureCogsSpeech2TextTool, AzureCogsText2SpeechTool, ) from langchain.tools.base import BaseTool [docs]class AzureCognitiveServicesToolkit(BaseToolkit): """Toolkit for Azure Cognitive Services.""" [docs] def get_tools(self) -> List[BaseTool]: """Get the tools in the toolkit.""" tools = [ AzureCogsFormRecognizerTool(), AzureCogsSpeech2TextTool(), AzureCogsText2SpeechTool(), ] # TODO: Remove check once azure-ai-vision supports MacOS. if sys.platform.startswith("linux") or sys.platform.startswith("win"): tools.append(AzureCogsImageAnalysisTool()) return tools
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/azure_cognitive_services.html
03e5a4f6134d-0
Source code for langchain.agents.agent_toolkits.playwright.toolkit """Playwright web browser toolkit.""" from __future__ import annotations from typing import TYPE_CHECKING, List, Optional, Type, cast from pydantic import Extra, root_validator from langchain.agents.agent_toolkits.base import BaseToolkit from langchain.tools.base import BaseTool from langchain.tools.playwright.base import ( BaseBrowserTool, lazy_import_playwright_browsers, ) from langchain.tools.playwright.click import ClickTool from langchain.tools.playwright.current_page import CurrentWebPageTool from langchain.tools.playwright.extract_hyperlinks import ExtractHyperlinksTool from langchain.tools.playwright.extract_text import ExtractTextTool from langchain.tools.playwright.get_elements import GetElementsTool from langchain.tools.playwright.navigate import NavigateTool from langchain.tools.playwright.navigate_back import NavigateBackTool if TYPE_CHECKING: from playwright.async_api import Browser as AsyncBrowser from playwright.sync_api import Browser as SyncBrowser else: try: # We do this so pydantic can resolve the types when instantiating from playwright.async_api import Browser as AsyncBrowser from playwright.sync_api import Browser as SyncBrowser except ImportError: pass [docs]class PlayWrightBrowserToolkit(BaseToolkit): """Toolkit for PlayWright browser tools.""" sync_browser: Optional["SyncBrowser"] = None async_browser: Optional["AsyncBrowser"] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @root_validator def validate_imports_and_browser_provided(cls, values: dict) -> dict: """Check that the arguments are valid.""" lazy_import_playwright_browsers()
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/playwright/toolkit.html
03e5a4f6134d-1
"""Check that the arguments are valid.""" lazy_import_playwright_browsers() if values.get("async_browser") is None and values.get("sync_browser") is None: raise ValueError("Either async_browser or sync_browser must be specified.") return values [docs] def get_tools(self) -> List[BaseTool]: """Get the tools in the toolkit.""" tool_classes: List[Type[BaseBrowserTool]] = [ ClickTool, NavigateTool, NavigateBackTool, ExtractTextTool, ExtractHyperlinksTool, GetElementsTool, CurrentWebPageTool, ] tools = [ tool_cls.from_browser( sync_browser=self.sync_browser, async_browser=self.async_browser ) for tool_cls in tool_classes ] return cast(List[BaseTool], tools) [docs] @classmethod def from_browser( cls, sync_browser: Optional[SyncBrowser] = None, async_browser: Optional[AsyncBrowser] = None, ) -> PlayWrightBrowserToolkit: """Instantiate the toolkit.""" # This is to raise a better error than the forward ref ones Pydantic would have lazy_import_playwright_browsers() return cls(sync_browser=sync_browser, async_browser=async_browser)
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/playwright/toolkit.html
1b15bd794de8-0
Source code for langchain.agents.agent_toolkits.csv.base from typing import Any, List, Optional, Union from langchain.agents.agent import AgentExecutor from langchain.agents.agent_toolkits.pandas.base import create_pandas_dataframe_agent from langchain.schema.language_model import BaseLanguageModel [docs]def create_csv_agent( llm: BaseLanguageModel, path: Union[str, List[str]], pandas_kwargs: Optional[dict] = None, **kwargs: Any, ) -> AgentExecutor: """Create csv agent by loading to a dataframe and using pandas agent.""" try: import pandas as pd except ImportError: raise ImportError( "pandas package not found, please install with `pip install pandas`" ) _kwargs = pandas_kwargs or {} if isinstance(path, str): df = pd.read_csv(path, **_kwargs) elif isinstance(path, list): df = [] for item in path: if not isinstance(item, str): raise ValueError(f"Expected str, got {type(path)}") df.append(pd.read_csv(item, **_kwargs)) else: raise ValueError(f"Expected str or list, got {type(path)}") return create_pandas_dataframe_agent(llm, df, **kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/csv/base.html
e458a41424af-0
Source code for langchain.agents.agent_toolkits.file_management.toolkit from __future__ import annotations from typing import List, Optional from pydantic import root_validator from langchain.agents.agent_toolkits.base import BaseToolkit from langchain.tools import BaseTool from langchain.tools.file_management.copy import CopyFileTool from langchain.tools.file_management.delete import DeleteFileTool from langchain.tools.file_management.file_search import FileSearchTool from langchain.tools.file_management.list_dir import ListDirectoryTool from langchain.tools.file_management.move import MoveFileTool from langchain.tools.file_management.read import ReadFileTool from langchain.tools.file_management.write import WriteFileTool _FILE_TOOLS = { tool_cls.__fields__["name"].default: tool_cls for tool_cls in [ CopyFileTool, DeleteFileTool, FileSearchTool, MoveFileTool, ReadFileTool, WriteFileTool, ListDirectoryTool, ] } [docs]class FileManagementToolkit(BaseToolkit): """Toolkit for interacting with a Local Files.""" root_dir: Optional[str] = None """If specified, all file operations are made relative to root_dir.""" selected_tools: Optional[List[str]] = None """If provided, only provide the selected tools. Defaults to all.""" @root_validator def validate_tools(cls, values: dict) -> dict: selected_tools = values.get("selected_tools") or [] for tool_name in selected_tools: if tool_name not in _FILE_TOOLS: raise ValueError( f"File Tool of name {tool_name} not supported." f" Permitted tools: {list(_FILE_TOOLS)}" ) return values
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/file_management/toolkit.html
e458a41424af-1
) return values [docs] def get_tools(self) -> List[BaseTool]: """Get the tools in the toolkit.""" allowed_tools = self.selected_tools or _FILE_TOOLS.keys() tools: List[BaseTool] = [] for tool in allowed_tools: tool_cls = _FILE_TOOLS[tool] tools.append(tool_cls(root_dir=self.root_dir)) # type: ignore return tools __all__ = ["FileManagementToolkit"]
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/file_management/toolkit.html
45c72cb886eb-0
Source code for langchain.agents.agent_toolkits.jira.toolkit from typing import Dict, List from langchain.agents.agent_toolkits.base import BaseToolkit from langchain.tools import BaseTool from langchain.tools.jira.prompt import ( JIRA_CATCH_ALL_PROMPT, JIRA_CONFLUENCE_PAGE_CREATE_PROMPT, JIRA_GET_ALL_PROJECTS_PROMPT, JIRA_ISSUE_CREATE_PROMPT, JIRA_JQL_PROMPT, ) from langchain.tools.jira.tool import JiraAction from langchain.utilities.jira import JiraAPIWrapper [docs]class JiraToolkit(BaseToolkit): """Jira Toolkit.""" tools: List[BaseTool] = [] [docs] @classmethod def from_jira_api_wrapper(cls, jira_api_wrapper: JiraAPIWrapper) -> "JiraToolkit": operations: List[Dict] = [ { "mode": "jql", "name": "JQL Query", "description": JIRA_JQL_PROMPT, }, { "mode": "get_projects", "name": "Get Projects", "description": JIRA_GET_ALL_PROJECTS_PROMPT, }, { "mode": "create_issue", "name": "Create Issue", "description": JIRA_ISSUE_CREATE_PROMPT, }, { "mode": "other", "name": "Catch all Jira API call", "description": JIRA_CATCH_ALL_PROMPT, }, { "mode": "create_page", "name": "Create confluence page", "description": JIRA_CONFLUENCE_PAGE_CREATE_PROMPT, }, ]
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/jira/toolkit.html
45c72cb886eb-1
}, ] tools = [ JiraAction( name=action["name"], description=action["description"], mode=action["mode"], api_wrapper=jira_api_wrapper, ) for action in operations ] return cls(tools=tools) [docs] def get_tools(self) -> List[BaseTool]: """Get the tools in the toolkit.""" return self.tools
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/jira/toolkit.html
217a5d1ea88c-0
Source code for langchain.agents.agent_toolkits.python.base """Python agent.""" from typing import Any, Dict, Optional from langchain.agents.agent import AgentExecutor, BaseSingleActionAgent from langchain.agents.agent_toolkits.python.prompt import PREFIX from langchain.agents.mrkl.base import ZeroShotAgent from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent from langchain.agents.types import AgentType from langchain.callbacks.base import BaseCallbackManager from langchain.chains.llm import LLMChain from langchain.schema.language_model import BaseLanguageModel from langchain.schema.messages import SystemMessage from langchain.tools.python.tool import PythonREPLTool [docs]def create_python_agent( llm: BaseLanguageModel, tool: PythonREPLTool, agent_type: AgentType = AgentType.ZERO_SHOT_REACT_DESCRIPTION, callback_manager: Optional[BaseCallbackManager] = None, verbose: bool = False, prefix: str = PREFIX, agent_executor_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Dict[str, Any], ) -> AgentExecutor: """Construct a python agent from an LLM and tool.""" tools = [tool] agent: BaseSingleActionAgent if agent_type == AgentType.ZERO_SHOT_REACT_DESCRIPTION: prompt = ZeroShotAgent.create_prompt(tools, prefix=prefix) llm_chain = LLMChain( llm=llm, prompt=prompt, callback_manager=callback_manager, ) tool_names = [tool.name for tool in tools] agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, **kwargs) elif agent_type == AgentType.OPENAI_FUNCTIONS:
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/python/base.html
217a5d1ea88c-1
elif agent_type == AgentType.OPENAI_FUNCTIONS: system_message = SystemMessage(content=prefix) _prompt = OpenAIFunctionsAgent.create_prompt(system_message=system_message) agent = OpenAIFunctionsAgent( llm=llm, prompt=_prompt, tools=tools, callback_manager=callback_manager, **kwargs, ) else: raise ValueError(f"Agent type {agent_type} not supported at the moment.") return AgentExecutor.from_agent_and_tools( agent=agent, tools=tools, callback_manager=callback_manager, verbose=verbose, **(agent_executor_kwargs or {}), )
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/python/base.html
ae8fde4dddde-0
Source code for langchain.agents.agent_toolkits.spark_sql.base """Spark SQL agent.""" from typing import Any, Dict, List, Optional from langchain.agents.agent import AgentExecutor from langchain.agents.agent_toolkits.spark_sql.prompt import SQL_PREFIX, SQL_SUFFIX from langchain.agents.agent_toolkits.spark_sql.toolkit import SparkSQLToolkit from langchain.agents.mrkl.base import ZeroShotAgent from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS from langchain.callbacks.base import BaseCallbackManager from langchain.chains.llm import LLMChain from langchain.schema.language_model import BaseLanguageModel [docs]def create_spark_sql_agent( llm: BaseLanguageModel, toolkit: SparkSQLToolkit, callback_manager: Optional[BaseCallbackManager] = None, prefix: str = SQL_PREFIX, suffix: str = SQL_SUFFIX, format_instructions: str = FORMAT_INSTRUCTIONS, input_variables: Optional[List[str]] = None, top_k: int = 10, max_iterations: Optional[int] = 15, max_execution_time: Optional[float] = None, early_stopping_method: str = "force", verbose: bool = False, agent_executor_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Dict[str, Any], ) -> AgentExecutor: """Construct a Spark SQL agent from an LLM and tools.""" tools = toolkit.get_tools() prefix = prefix.format(top_k=top_k) prompt = ZeroShotAgent.create_prompt( tools, prefix=prefix, suffix=suffix, format_instructions=format_instructions, input_variables=input_variables, ) llm_chain = LLMChain( llm=llm,
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/spark_sql/base.html
ae8fde4dddde-1
llm_chain = LLMChain( llm=llm, prompt=prompt, callback_manager=callback_manager, ) tool_names = [tool.name for tool in tools] agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, **kwargs) return AgentExecutor.from_agent_and_tools( agent=agent, tools=tools, callback_manager=callback_manager, verbose=verbose, max_iterations=max_iterations, max_execution_time=max_execution_time, early_stopping_method=early_stopping_method, **(agent_executor_kwargs or {}), )
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/spark_sql/base.html
dfbccf728dc0-0
Source code for langchain.agents.agent_toolkits.spark_sql.toolkit """Toolkit for interacting with Spark SQL.""" from typing import List from pydantic import Field from langchain.agents.agent_toolkits.base import BaseToolkit from langchain.schema.language_model import BaseLanguageModel from langchain.tools import BaseTool from langchain.tools.spark_sql.tool import ( InfoSparkSQLTool, ListSparkSQLTool, QueryCheckerTool, QuerySparkSQLTool, ) from langchain.utilities.spark_sql import SparkSQL [docs]class SparkSQLToolkit(BaseToolkit): """Toolkit for interacting with Spark SQL.""" db: SparkSQL = Field(exclude=True) llm: BaseLanguageModel = Field(exclude=True) class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True [docs] def get_tools(self) -> List[BaseTool]: """Get the tools in the toolkit.""" return [ QuerySparkSQLTool(db=self.db), InfoSparkSQLTool(db=self.db), ListSparkSQLTool(db=self.db), QueryCheckerTool(db=self.db, llm=self.llm), ]
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/spark_sql/toolkit.html
c733535c4ff6-0
Source code for langchain.agents.agent_toolkits.multion.toolkit """MultiOn agent.""" from __future__ import annotations from typing import List from langchain.agents.agent_toolkits.base import BaseToolkit from langchain.tools import BaseTool from langchain.tools.multion.create_session import MultionCreateSession from langchain.tools.multion.update_session import MultionUpdateSession [docs]class MultionToolkit(BaseToolkit): """Toolkit for interacting with the Browser Agent""" class Config: """Pydantic config.""" arbitrary_types_allowed = True [docs] def get_tools(self) -> List[BaseTool]: """Get the tools in the toolkit.""" return [MultionCreateSession(), MultionUpdateSession()]
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/multion/toolkit.html
df0debf8d833-0
Source code for langchain.agents.agent_toolkits.powerbi.chat_base """Power BI agent.""" from typing import Any, Dict, List, Optional from langchain.agents import AgentExecutor from langchain.agents.agent import AgentOutputParser from langchain.agents.agent_toolkits.powerbi.prompt import ( POWERBI_CHAT_PREFIX, POWERBI_CHAT_SUFFIX, ) from langchain.agents.agent_toolkits.powerbi.toolkit import PowerBIToolkit from langchain.agents.conversational_chat.base import ConversationalChatAgent from langchain.callbacks.base import BaseCallbackManager from langchain.chat_models.base import BaseChatModel from langchain.memory import ConversationBufferMemory from langchain.memory.chat_memory import BaseChatMemory from langchain.utilities.powerbi import PowerBIDataset [docs]def create_pbi_chat_agent( llm: BaseChatModel, toolkit: Optional[PowerBIToolkit] = None, powerbi: Optional[PowerBIDataset] = None, callback_manager: Optional[BaseCallbackManager] = None, output_parser: Optional[AgentOutputParser] = None, prefix: str = POWERBI_CHAT_PREFIX, suffix: str = POWERBI_CHAT_SUFFIX, examples: Optional[str] = None, input_variables: Optional[List[str]] = None, memory: Optional[BaseChatMemory] = None, top_k: int = 10, verbose: bool = False, agent_executor_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Dict[str, Any], ) -> AgentExecutor: """Construct a Power BI agent from a Chat LLM and tools. If you supply only a toolkit and no Power BI dataset, the same LLM is used for both. """
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/powerbi/chat_base.html
df0debf8d833-1
""" if toolkit is None: if powerbi is None: raise ValueError("Must provide either a toolkit or powerbi dataset") toolkit = PowerBIToolkit(powerbi=powerbi, llm=llm, examples=examples) tools = toolkit.get_tools() tables = powerbi.table_names if powerbi else toolkit.powerbi.table_names agent = ConversationalChatAgent.from_llm_and_tools( llm=llm, tools=tools, system_message=prefix.format(top_k=top_k).format(tables=tables), human_message=suffix, input_variables=input_variables, callback_manager=callback_manager, output_parser=output_parser, verbose=verbose, **kwargs, ) return AgentExecutor.from_agent_and_tools( agent=agent, tools=tools, callback_manager=callback_manager, memory=memory or ConversationBufferMemory(memory_key="chat_history", return_messages=True), verbose=verbose, **(agent_executor_kwargs or {}), )
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/powerbi/chat_base.html
f9e5d1248b8a-0
Source code for langchain.agents.agent_toolkits.powerbi.base """Power BI agent.""" from typing import Any, Dict, List, Optional from langchain.agents import AgentExecutor from langchain.agents.agent_toolkits.powerbi.prompt import ( POWERBI_PREFIX, POWERBI_SUFFIX, ) from langchain.agents.agent_toolkits.powerbi.toolkit import PowerBIToolkit from langchain.agents.mrkl.base import ZeroShotAgent from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS from langchain.callbacks.base import BaseCallbackManager from langchain.chains.llm import LLMChain from langchain.schema.language_model import BaseLanguageModel from langchain.utilities.powerbi import PowerBIDataset [docs]def create_pbi_agent( llm: BaseLanguageModel, toolkit: Optional[PowerBIToolkit] = None, powerbi: Optional[PowerBIDataset] = None, callback_manager: Optional[BaseCallbackManager] = None, prefix: str = POWERBI_PREFIX, suffix: str = POWERBI_SUFFIX, format_instructions: str = FORMAT_INSTRUCTIONS, examples: Optional[str] = None, input_variables: Optional[List[str]] = None, top_k: int = 10, verbose: bool = False, agent_executor_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Dict[str, Any], ) -> AgentExecutor: """Construct a Power BI agent from an LLM and tools.""" if toolkit is None: if powerbi is None: raise ValueError("Must provide either a toolkit or powerbi dataset") toolkit = PowerBIToolkit(powerbi=powerbi, llm=llm, examples=examples) tools = toolkit.get_tools()
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/powerbi/base.html
f9e5d1248b8a-1
tools = toolkit.get_tools() tables = powerbi.table_names if powerbi else toolkit.powerbi.table_names agent = ZeroShotAgent( llm_chain=LLMChain( llm=llm, prompt=ZeroShotAgent.create_prompt( tools, prefix=prefix.format(top_k=top_k).format(tables=tables), suffix=suffix, format_instructions=format_instructions, input_variables=input_variables, ), callback_manager=callback_manager, # type: ignore verbose=verbose, ), allowed_tools=[tool.name for tool in tools], **kwargs, ) return AgentExecutor.from_agent_and_tools( agent=agent, tools=tools, callback_manager=callback_manager, verbose=verbose, **(agent_executor_kwargs or {}), )
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/powerbi/base.html
2158dc18d256-0
Source code for langchain.agents.agent_toolkits.powerbi.toolkit """Toolkit for interacting with a Power BI dataset.""" from typing import List, Optional, Union from pydantic import Field from langchain.agents.agent_toolkits.base import BaseToolkit from langchain.callbacks.base import BaseCallbackManager from langchain.chains.llm import LLMChain from langchain.chat_models.base import BaseChatModel from langchain.prompts import PromptTemplate from langchain.prompts.chat import ( ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate, ) from langchain.schema.language_model import BaseLanguageModel from langchain.tools import BaseTool from langchain.tools.powerbi.prompt import ( QUESTION_TO_QUERY_BASE, SINGLE_QUESTION_TO_QUERY, USER_INPUT, ) from langchain.tools.powerbi.tool import ( InfoPowerBITool, ListPowerBITool, QueryPowerBITool, ) from langchain.utilities.powerbi import PowerBIDataset [docs]class PowerBIToolkit(BaseToolkit): """Toolkit for interacting with Power BI dataset.""" powerbi: PowerBIDataset = Field(exclude=True) llm: Union[BaseLanguageModel, BaseChatModel] = Field(exclude=True) examples: Optional[str] = None max_iterations: int = 5 callback_manager: Optional[BaseCallbackManager] = None output_token_limit: Optional[int] = None tiktoken_model_name: Optional[str] = None class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True [docs] def get_tools(self) -> List[BaseTool]: """Get the tools in the toolkit.""" return [ QueryPowerBITool(
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/powerbi/toolkit.html
2158dc18d256-1
return [ QueryPowerBITool( llm_chain=self._get_chain(), powerbi=self.powerbi, examples=self.examples, max_iterations=self.max_iterations, output_token_limit=self.output_token_limit, tiktoken_model_name=self.tiktoken_model_name, ), InfoPowerBITool(powerbi=self.powerbi), ListPowerBITool(powerbi=self.powerbi), ] def _get_chain(self) -> LLMChain: """Construct the chain based on the callback manager and model type.""" if isinstance(self.llm, BaseLanguageModel): return LLMChain( llm=self.llm, callback_manager=self.callback_manager if self.callback_manager else None, prompt=PromptTemplate( template=SINGLE_QUESTION_TO_QUERY, input_variables=["tool_input", "tables", "schemas", "examples"], ), ) system_prompt = SystemMessagePromptTemplate( prompt=PromptTemplate( template=QUESTION_TO_QUERY_BASE, input_variables=["tables", "schemas", "examples"], ) ) human_prompt = HumanMessagePromptTemplate( prompt=PromptTemplate( template=USER_INPUT, input_variables=["tool_input"], ) ) return LLMChain( llm=self.llm, callback_manager=self.callback_manager if self.callback_manager else None, prompt=ChatPromptTemplate.from_messages([system_prompt, human_prompt]), )
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/powerbi/toolkit.html
f856b847b6ed-0
Source code for langchain.agents.agent_toolkits.gmail.toolkit from __future__ import annotations from typing import TYPE_CHECKING, List from pydantic import Field from langchain.agents.agent_toolkits.base import BaseToolkit from langchain.tools import BaseTool from langchain.tools.gmail.create_draft import GmailCreateDraft from langchain.tools.gmail.get_message import GmailGetMessage from langchain.tools.gmail.get_thread import GmailGetThread from langchain.tools.gmail.search import GmailSearch from langchain.tools.gmail.send_message import GmailSendMessage from langchain.tools.gmail.utils import build_resource_service if TYPE_CHECKING: # This is for linting and IDE typehints from googleapiclient.discovery import Resource else: try: # We do this so pydantic can resolve the types when instantiating from googleapiclient.discovery import Resource except ImportError: pass SCOPES = ["https://mail.google.com/"] [docs]class GmailToolkit(BaseToolkit): """Toolkit for interacting with Gmail.""" api_resource: Resource = Field(default_factory=build_resource_service) class Config: """Pydantic config.""" arbitrary_types_allowed = True [docs] def get_tools(self) -> List[BaseTool]: """Get the tools in the toolkit.""" return [ GmailCreateDraft(api_resource=self.api_resource), GmailSendMessage(api_resource=self.api_resource), GmailSearch(api_resource=self.api_resource), GmailGetMessage(api_resource=self.api_resource), GmailGetThread(api_resource=self.api_resource), ]
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/gmail/toolkit.html
2f822648ea7a-0
Source code for langchain.agents.agent_toolkits.zapier.toolkit """Zapier Toolkit.""" from typing import List from langchain.agents.agent_toolkits.base import BaseToolkit from langchain.tools import BaseTool from langchain.tools.zapier.tool import ZapierNLARunAction from langchain.utilities.zapier import ZapierNLAWrapper [docs]class ZapierToolkit(BaseToolkit): """Zapier Toolkit.""" tools: List[BaseTool] = [] [docs] @classmethod def from_zapier_nla_wrapper( cls, zapier_nla_wrapper: ZapierNLAWrapper ) -> "ZapierToolkit": """Create a toolkit from a ZapierNLAWrapper.""" actions = zapier_nla_wrapper.list() tools = [ ZapierNLARunAction( action_id=action["id"], zapier_description=action["description"], params_schema=action["params"], api_wrapper=zapier_nla_wrapper, ) for action in actions ] return cls(tools=tools) [docs] @classmethod async def async_from_zapier_nla_wrapper( cls, zapier_nla_wrapper: ZapierNLAWrapper ) -> "ZapierToolkit": """Create a toolkit from a ZapierNLAWrapper.""" actions = await zapier_nla_wrapper.alist() tools = [ ZapierNLARunAction( action_id=action["id"], zapier_description=action["description"], params_schema=action["params"], api_wrapper=zapier_nla_wrapper, ) for action in actions ] return cls(tools=tools)
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/zapier/toolkit.html
2f822648ea7a-1
for action in actions ] return cls(tools=tools) [docs] def get_tools(self) -> List[BaseTool]: """Get the tools in the toolkit.""" return self.tools
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/zapier/toolkit.html
13a72f5be346-0
Source code for langchain.agents.agent_toolkits.nla.tool """Tool for interacting with a single API with natural language definition.""" from typing import Any, Optional from langchain.agents.tools import Tool from langchain.chains.api.openapi.chain import OpenAPIEndpointChain from langchain.schema.language_model import BaseLanguageModel from langchain.tools.openapi.utils.api_models import APIOperation from langchain.tools.openapi.utils.openapi_utils import OpenAPISpec from langchain.utilities.requests import Requests [docs]class NLATool(Tool): """Natural Language API Tool.""" [docs] @classmethod def from_open_api_endpoint_chain( cls, chain: OpenAPIEndpointChain, api_title: str ) -> "NLATool": """Convert an endpoint chain to an API endpoint tool.""" expanded_name = ( f'{api_title.replace(" ", "_")}.{chain.api_operation.operation_id}' ) description = ( f"I'm an AI from {api_title}. Instruct what you want," " and I'll assist via an API with description:" f" {chain.api_operation.description}" ) return cls(name=expanded_name, func=chain.run, description=description) [docs] @classmethod def from_llm_and_method( cls, llm: BaseLanguageModel, path: str, method: str, spec: OpenAPISpec, requests: Optional[Requests] = None, verbose: bool = False, return_intermediate_steps: bool = False, **kwargs: Any, ) -> "NLATool": """Instantiate the tool from the specified path and method.""" api_operation = APIOperation.from_openapi_spec(spec, path, method)
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/nla/tool.html
13a72f5be346-1
api_operation = APIOperation.from_openapi_spec(spec, path, method) chain = OpenAPIEndpointChain.from_api_operation( api_operation, llm, requests=requests, verbose=verbose, return_intermediate_steps=return_intermediate_steps, **kwargs, ) return cls.from_open_api_endpoint_chain(chain, spec.info.title)
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/nla/tool.html
421db8f2404b-0
Source code for langchain.agents.agent_toolkits.nla.toolkit from __future__ import annotations from typing import Any, List, Optional, Sequence from pydantic import Field from langchain.agents.agent_toolkits.base import BaseToolkit from langchain.agents.agent_toolkits.nla.tool import NLATool from langchain.schema.language_model import BaseLanguageModel from langchain.tools.base import BaseTool from langchain.tools.openapi.utils.openapi_utils import OpenAPISpec from langchain.tools.plugin import AIPlugin from langchain.utilities.requests import Requests [docs]class NLAToolkit(BaseToolkit): """Natural Language API Toolkit.""" nla_tools: Sequence[NLATool] = Field(...) """List of API Endpoint Tools.""" [docs] def get_tools(self) -> List[BaseTool]: """Get the tools for all the API operations.""" return list(self.nla_tools) @staticmethod def _get_http_operation_tools( llm: BaseLanguageModel, spec: OpenAPISpec, requests: Optional[Requests] = None, verbose: bool = False, **kwargs: Any, ) -> List[NLATool]: """Get the tools for all the API operations.""" if not spec.paths: return [] http_operation_tools = [] for path in spec.paths: for method in spec.get_methods_for_path(path): endpoint_tool = NLATool.from_llm_and_method( llm=llm, path=path, method=method, spec=spec, requests=requests, verbose=verbose, **kwargs, ) http_operation_tools.append(endpoint_tool) return http_operation_tools [docs] @classmethod
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/nla/toolkit.html
421db8f2404b-1
return http_operation_tools [docs] @classmethod def from_llm_and_spec( cls, llm: BaseLanguageModel, spec: OpenAPISpec, requests: Optional[Requests] = None, verbose: bool = False, **kwargs: Any, ) -> NLAToolkit: """Instantiate the toolkit by creating tools for each operation.""" http_operation_tools = cls._get_http_operation_tools( llm=llm, spec=spec, requests=requests, verbose=verbose, **kwargs ) return cls(nla_tools=http_operation_tools) [docs] @classmethod def from_llm_and_url( cls, llm: BaseLanguageModel, open_api_url: str, requests: Optional[Requests] = None, verbose: bool = False, **kwargs: Any, ) -> NLAToolkit: """Instantiate the toolkit from an OpenAPI Spec URL""" spec = OpenAPISpec.from_url(open_api_url) return cls.from_llm_and_spec( llm=llm, spec=spec, requests=requests, verbose=verbose, **kwargs ) [docs] @classmethod def from_llm_and_ai_plugin( cls, llm: BaseLanguageModel, ai_plugin: AIPlugin, requests: Optional[Requests] = None, verbose: bool = False, **kwargs: Any, ) -> NLAToolkit: """Instantiate the toolkit from an OpenAPI Spec URL""" spec = OpenAPISpec.from_url(ai_plugin.api.url) # TODO: Merge optional Auth information with the `requests` argument
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/nla/toolkit.html
421db8f2404b-2
# TODO: Merge optional Auth information with the `requests` argument return cls.from_llm_and_spec( llm=llm, spec=spec, requests=requests, verbose=verbose, **kwargs, ) [docs] @classmethod def from_llm_and_ai_plugin_url( cls, llm: BaseLanguageModel, ai_plugin_url: str, requests: Optional[Requests] = None, verbose: bool = False, **kwargs: Any, ) -> NLAToolkit: """Instantiate the toolkit from an OpenAPI Spec URL""" plugin = AIPlugin.from_url(ai_plugin_url) return cls.from_llm_and_ai_plugin( llm=llm, ai_plugin=plugin, requests=requests, verbose=verbose, **kwargs )
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/nla/toolkit.html
6ff438d79f5f-0
Source code for langchain.agents.agent_toolkits.xorbits.base """Agent for working with xorbits objects.""" from typing import Any, Dict, List, Optional from langchain.agents.agent import AgentExecutor from langchain.agents.agent_toolkits.xorbits.prompt import ( NP_PREFIX, NP_SUFFIX, PD_PREFIX, PD_SUFFIX, ) from langchain.agents.mrkl.base import ZeroShotAgent from langchain.callbacks.base import BaseCallbackManager from langchain.chains.llm import LLMChain from langchain.llms.base import BaseLLM from langchain.tools.python.tool import PythonAstREPLTool [docs]def create_xorbits_agent( llm: BaseLLM, data: Any, callback_manager: Optional[BaseCallbackManager] = None, prefix: str = "", suffix: str = "", input_variables: Optional[List[str]] = None, verbose: bool = False, return_intermediate_steps: bool = False, max_iterations: Optional[int] = 15, max_execution_time: Optional[float] = None, early_stopping_method: str = "force", agent_executor_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Dict[str, Any], ) -> AgentExecutor: """Construct a xorbits agent from an LLM and dataframe.""" try: from xorbits import numpy as np from xorbits import pandas as pd except ImportError: raise ImportError( "Xorbits package not installed, please install with `pip install xorbits`" ) if not isinstance(data, (pd.DataFrame, np.ndarray)): raise ValueError(
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/xorbits/base.html
6ff438d79f5f-1
if not isinstance(data, (pd.DataFrame, np.ndarray)): raise ValueError( f"Expected Xorbits DataFrame or ndarray object, got {type(data)}" ) if input_variables is None: input_variables = ["data", "input", "agent_scratchpad"] tools = [PythonAstREPLTool(locals={"data": data})] prompt, partial_input = None, None if isinstance(data, pd.DataFrame): prompt = ZeroShotAgent.create_prompt( tools, prefix=PD_PREFIX if prefix == "" else prefix, suffix=PD_SUFFIX if suffix == "" else suffix, input_variables=input_variables, ) partial_input = str(data.head()) else: prompt = ZeroShotAgent.create_prompt( tools, prefix=NP_PREFIX if prefix == "" else prefix, suffix=NP_SUFFIX if suffix == "" else suffix, input_variables=input_variables, ) partial_input = str(data[: len(data) // 2]) partial_prompt = prompt.partial(data=partial_input) llm_chain = LLMChain( llm=llm, prompt=partial_prompt, callback_manager=callback_manager, ) tool_names = [tool.name for tool in tools] agent = ZeroShotAgent( llm_chain=llm_chain, allowed_tools=tool_names, callback_manager=callback_manager, **kwargs, ) return AgentExecutor.from_agent_and_tools( agent=agent, tools=tools, callback_manager=callback_manager, verbose=verbose, return_intermediate_steps=return_intermediate_steps, max_iterations=max_iterations, max_execution_time=max_execution_time,
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/xorbits/base.html
6ff438d79f5f-2
max_iterations=max_iterations, max_execution_time=max_execution_time, early_stopping_method=early_stopping_method, **(agent_executor_kwargs or {}), )
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/xorbits/base.html
1d13fcfdc162-0
Source code for langchain.agents.agent_toolkits.vectorstore.base """VectorStore agent.""" from typing import Any, Dict, Optional from langchain.agents.agent import AgentExecutor from langchain.agents.agent_toolkits.vectorstore.prompt import PREFIX, ROUTER_PREFIX from langchain.agents.agent_toolkits.vectorstore.toolkit import ( VectorStoreRouterToolkit, VectorStoreToolkit, ) from langchain.agents.mrkl.base import ZeroShotAgent from langchain.callbacks.base import BaseCallbackManager from langchain.chains.llm import LLMChain from langchain.schema.language_model import BaseLanguageModel [docs]def create_vectorstore_agent( llm: BaseLanguageModel, toolkit: VectorStoreToolkit, callback_manager: Optional[BaseCallbackManager] = None, prefix: str = PREFIX, verbose: bool = False, agent_executor_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Dict[str, Any], ) -> AgentExecutor: """Construct a VectorStore agent from an LLM and tools.""" tools = toolkit.get_tools() prompt = ZeroShotAgent.create_prompt(tools, prefix=prefix) llm_chain = LLMChain( llm=llm, prompt=prompt, callback_manager=callback_manager, ) tool_names = [tool.name for tool in tools] agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, **kwargs) return AgentExecutor.from_agent_and_tools( agent=agent, tools=tools, callback_manager=callback_manager, verbose=verbose, **(agent_executor_kwargs or {}), ) [docs]def create_vectorstore_router_agent(
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/vectorstore/base.html
1d13fcfdc162-1
) [docs]def create_vectorstore_router_agent( llm: BaseLanguageModel, toolkit: VectorStoreRouterToolkit, callback_manager: Optional[BaseCallbackManager] = None, prefix: str = ROUTER_PREFIX, verbose: bool = False, agent_executor_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Dict[str, Any], ) -> AgentExecutor: """Construct a VectorStore router agent from an LLM and tools.""" tools = toolkit.get_tools() prompt = ZeroShotAgent.create_prompt(tools, prefix=prefix) llm_chain = LLMChain( llm=llm, prompt=prompt, callback_manager=callback_manager, ) tool_names = [tool.name for tool in tools] agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, **kwargs) return AgentExecutor.from_agent_and_tools( agent=agent, tools=tools, callback_manager=callback_manager, verbose=verbose, **(agent_executor_kwargs or {}), )
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/vectorstore/base.html
a6f9e4056d9a-0
Source code for langchain.agents.agent_toolkits.vectorstore.toolkit """Toolkit for interacting with a vector store.""" from typing import List from pydantic import BaseModel, Field from langchain.agents.agent_toolkits.base import BaseToolkit from langchain.llms.openai import OpenAI from langchain.schema.language_model import BaseLanguageModel from langchain.tools import BaseTool from langchain.tools.vectorstore.tool import ( VectorStoreQATool, VectorStoreQAWithSourcesTool, ) from langchain.vectorstores.base import VectorStore [docs]class VectorStoreInfo(BaseModel): """Information about a VectorStore.""" vectorstore: VectorStore = Field(exclude=True) name: str description: str class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True [docs]class VectorStoreToolkit(BaseToolkit): """Toolkit for interacting with a Vector Store.""" vectorstore_info: VectorStoreInfo = Field(exclude=True) llm: BaseLanguageModel = Field(default_factory=lambda: OpenAI(temperature=0)) class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True [docs] def get_tools(self) -> List[BaseTool]: """Get the tools in the toolkit.""" description = VectorStoreQATool.get_description( self.vectorstore_info.name, self.vectorstore_info.description ) qa_tool = VectorStoreQATool( name=self.vectorstore_info.name, description=description, vectorstore=self.vectorstore_info.vectorstore, llm=self.llm, ) description = VectorStoreQAWithSourcesTool.get_description( self.vectorstore_info.name, self.vectorstore_info.description )
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/vectorstore/toolkit.html
a6f9e4056d9a-1
self.vectorstore_info.name, self.vectorstore_info.description ) qa_with_sources_tool = VectorStoreQAWithSourcesTool( name=f"{self.vectorstore_info.name}_with_sources", description=description, vectorstore=self.vectorstore_info.vectorstore, llm=self.llm, ) return [qa_tool, qa_with_sources_tool] [docs]class VectorStoreRouterToolkit(BaseToolkit): """Toolkit for routing between Vector Stores.""" vectorstores: List[VectorStoreInfo] = Field(exclude=True) llm: BaseLanguageModel = Field(default_factory=lambda: OpenAI(temperature=0)) class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True [docs] def get_tools(self) -> List[BaseTool]: """Get the tools in the toolkit.""" tools: List[BaseTool] = [] for vectorstore_info in self.vectorstores: description = VectorStoreQATool.get_description( vectorstore_info.name, vectorstore_info.description ) qa_tool = VectorStoreQATool( name=vectorstore_info.name, description=description, vectorstore=vectorstore_info.vectorstore, llm=self.llm, ) tools.append(qa_tool) return tools
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/vectorstore/toolkit.html
62a5557c1c3c-0
Source code for langchain.agents.agent_toolkits.spark.base """Agent for working with pandas objects.""" from typing import Any, Dict, List, Optional from langchain.agents.agent import AgentExecutor from langchain.agents.agent_toolkits.spark.prompt import PREFIX, SUFFIX from langchain.agents.mrkl.base import ZeroShotAgent from langchain.callbacks.base import BaseCallbackManager from langchain.chains.llm import LLMChain from langchain.llms.base import BaseLLM from langchain.tools.python.tool import PythonAstREPLTool def _validate_spark_df(df: Any) -> bool: try: from pyspark.sql import DataFrame as SparkLocalDataFrame return isinstance(df, SparkLocalDataFrame) except ImportError: return False def _validate_spark_connect_df(df: Any) -> bool: try: from pyspark.sql.connect.dataframe import DataFrame as SparkConnectDataFrame return isinstance(df, SparkConnectDataFrame) except ImportError: return False [docs]def create_spark_dataframe_agent( llm: BaseLLM, df: Any, callback_manager: Optional[BaseCallbackManager] = None, prefix: str = PREFIX, suffix: str = SUFFIX, input_variables: Optional[List[str]] = None, verbose: bool = False, return_intermediate_steps: bool = False, max_iterations: Optional[int] = 15, max_execution_time: Optional[float] = None, early_stopping_method: str = "force", agent_executor_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Dict[str, Any], ) -> AgentExecutor: """Construct a Spark agent from an LLM and dataframe."""
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/spark/base.html
62a5557c1c3c-1
) -> AgentExecutor: """Construct a Spark agent from an LLM and dataframe.""" if not _validate_spark_df(df) and not _validate_spark_connect_df(df): raise ImportError("Spark is not installed. run `pip install pyspark`.") if input_variables is None: input_variables = ["df", "input", "agent_scratchpad"] tools = [PythonAstREPLTool(locals={"df": df})] prompt = ZeroShotAgent.create_prompt( tools, prefix=prefix, suffix=suffix, input_variables=input_variables ) partial_prompt = prompt.partial(df=str(df.first())) llm_chain = LLMChain( llm=llm, prompt=partial_prompt, callback_manager=callback_manager, ) tool_names = [tool.name for tool in tools] agent = ZeroShotAgent( llm_chain=llm_chain, allowed_tools=tool_names, callback_manager=callback_manager, **kwargs, ) return AgentExecutor.from_agent_and_tools( agent=agent, tools=tools, callback_manager=callback_manager, verbose=verbose, return_intermediate_steps=return_intermediate_steps, max_iterations=max_iterations, max_execution_time=max_execution_time, early_stopping_method=early_stopping_method, **(agent_executor_kwargs or {}), )
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/spark/base.html
d9072948678e-0
Source code for langchain.agents.agent_toolkits.conversational_retrieval.openai_functions from typing import Any, List, Optional from langchain.agents.agent import AgentExecutor from langchain.agents.openai_functions_agent.agent_token_buffer_memory import ( AgentTokenBufferMemory, ) from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent from langchain.chat_models.openai import ChatOpenAI from langchain.memory.token_buffer import ConversationTokenBufferMemory from langchain.prompts.chat import MessagesPlaceholder from langchain.schema.language_model import BaseLanguageModel from langchain.schema.memory import BaseMemory from langchain.schema.messages import SystemMessage from langchain.tools.base import BaseTool def _get_default_system_message() -> SystemMessage: return SystemMessage( content=( "Do your best to answer the questions. " "Feel free to use any tools available to look up " "relevant information, only if necessary" ) ) [docs]def create_conversational_retrieval_agent( llm: BaseLanguageModel, tools: List[BaseTool], remember_intermediate_steps: bool = True, memory_key: str = "chat_history", system_message: Optional[SystemMessage] = None, verbose: bool = False, max_token_limit: int = 2000, **kwargs: Any ) -> AgentExecutor: """A convenience method for creating a conversational retrieval agent. Args: llm: The language model to use, should be ChatOpenAI tools: A list of tools the agent has access to remember_intermediate_steps: Whether the agent should remember intermediate steps or not. Intermediate steps refer to prior action/observation
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/conversational_retrieval/openai_functions.html
d9072948678e-1
steps or not. Intermediate steps refer to prior action/observation pairs from previous questions. The benefit of remembering these is if there is relevant information in there, the agent can use it to answer follow up questions. The downside is it will take up more tokens. memory_key: The name of the memory key in the prompt. system_message: The system message to use. By default, a basic one will be used. verbose: Whether or not the final AgentExecutor should be verbose or not, defaults to False. max_token_limit: The max number of tokens to keep around in memory. Defaults to 2000. Returns: An agent executor initialized appropriately """ if not isinstance(llm, ChatOpenAI): raise ValueError("Only supported with ChatOpenAI models.") if remember_intermediate_steps: memory: BaseMemory = AgentTokenBufferMemory( memory_key=memory_key, llm=llm, max_token_limit=max_token_limit ) else: memory = ConversationTokenBufferMemory( memory_key=memory_key, return_messages=True, output_key="output", llm=llm, max_token_limit=max_token_limit, ) _system_message = system_message or _get_default_system_message() prompt = OpenAIFunctionsAgent.create_prompt( system_message=_system_message, extra_prompt_messages=[MessagesPlaceholder(variable_name=memory_key)], ) agent = OpenAIFunctionsAgent(llm=llm, tools=tools, prompt=prompt) return AgentExecutor( agent=agent, tools=tools, memory=memory, verbose=verbose,
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/conversational_retrieval/openai_functions.html
d9072948678e-2
tools=tools, memory=memory, verbose=verbose, return_intermediate_steps=remember_intermediate_steps, **kwargs )
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/conversational_retrieval/openai_functions.html
adf935124e84-0
Source code for langchain.agents.agent_toolkits.conversational_retrieval.tool from langchain.schema import BaseRetriever from langchain.tools import Tool [docs]def create_retriever_tool( retriever: BaseRetriever, name: str, description: str ) -> Tool: """Create a tool to do retrieval of documents. Args: retriever: The retriever to use for the retrieval name: The name for the tool. This will be passed to the language model, so should be unique and somewhat descriptive. description: The description for the tool. This will be passed to the language model, so should be descriptive. Returns: Tool class to pass to an agent """ return Tool( name=name, description=description, func=retriever.get_relevant_documents )
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/conversational_retrieval/tool.html
d050c7c8a3af-0
Source code for langchain.agents.agent_toolkits.github.toolkit """GitHub Toolkit.""" from typing import Dict, List from langchain.agents.agent_toolkits.base import BaseToolkit from langchain.tools import BaseTool from langchain.tools.github.prompt import ( COMMENT_ON_ISSUE_PROMPT, CREATE_FILE_PROMPT, CREATE_PULL_REQUEST_PROMPT, DELETE_FILE_PROMPT, GET_ISSUE_PROMPT, GET_ISSUES_PROMPT, READ_FILE_PROMPT, UPDATE_FILE_PROMPT, ) from langchain.tools.github.tool import GitHubAction from langchain.utilities.github import GitHubAPIWrapper [docs]class GitHubToolkit(BaseToolkit): """GitHub Toolkit.""" tools: List[BaseTool] = [] [docs] @classmethod def from_github_api_wrapper( cls, github_api_wrapper: GitHubAPIWrapper ) -> "GitHubToolkit": operations: List[Dict] = [ { "mode": "get_issues", "name": "Get Issues", "description": GET_ISSUES_PROMPT, }, { "mode": "get_issue", "name": "Get Issue", "description": GET_ISSUE_PROMPT, }, { "mode": "comment_on_issue", "name": "Comment on Issue", "description": COMMENT_ON_ISSUE_PROMPT, }, { "mode": "create_pull_request", "name": "Create Pull Request", "description": CREATE_PULL_REQUEST_PROMPT, }, { "mode": "create_file", "name": "Create File", "description": CREATE_FILE_PROMPT, }, {
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/github/toolkit.html
d050c7c8a3af-1
"description": CREATE_FILE_PROMPT, }, { "mode": "read_file", "name": "Read File", "description": READ_FILE_PROMPT, }, { "mode": "update_file", "name": "Update File", "description": UPDATE_FILE_PROMPT, }, { "mode": "delete_file", "name": "Delete File", "description": DELETE_FILE_PROMPT, }, ] tools = [ GitHubAction( name=action["name"], description=action["description"], mode=action["mode"], api_wrapper=github_api_wrapper, ) for action in operations ] return cls(tools=tools) [docs] def get_tools(self) -> List[BaseTool]: """Get the tools in the toolkit.""" return self.tools
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/github/toolkit.html
11b8e9bb2e26-0
Source code for langchain.agents.agent_toolkits.json.base """Json agent.""" from typing import Any, Dict, List, Optional from langchain.agents.agent import AgentExecutor from langchain.agents.agent_toolkits.json.prompt import JSON_PREFIX, JSON_SUFFIX from langchain.agents.agent_toolkits.json.toolkit import JsonToolkit from langchain.agents.mrkl.base import ZeroShotAgent from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS from langchain.callbacks.base import BaseCallbackManager from langchain.chains.llm import LLMChain from langchain.schema.language_model import BaseLanguageModel [docs]def create_json_agent( llm: BaseLanguageModel, toolkit: JsonToolkit, callback_manager: Optional[BaseCallbackManager] = None, prefix: str = JSON_PREFIX, suffix: str = JSON_SUFFIX, format_instructions: str = FORMAT_INSTRUCTIONS, input_variables: Optional[List[str]] = None, verbose: bool = False, agent_executor_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Dict[str, Any], ) -> AgentExecutor: """Construct a json agent from an LLM and tools.""" tools = toolkit.get_tools() prompt = ZeroShotAgent.create_prompt( tools, prefix=prefix, suffix=suffix, format_instructions=format_instructions, input_variables=input_variables, ) llm_chain = LLMChain( llm=llm, prompt=prompt, callback_manager=callback_manager, ) tool_names = [tool.name for tool in tools] agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, **kwargs) return AgentExecutor.from_agent_and_tools(
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/json/base.html
11b8e9bb2e26-1
return AgentExecutor.from_agent_and_tools( agent=agent, tools=tools, callback_manager=callback_manager, verbose=verbose, **(agent_executor_kwargs or {}), )
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/json/base.html
daab9c7194ea-0
Source code for langchain.agents.agent_toolkits.json.toolkit from __future__ import annotations from typing import List from langchain.agents.agent_toolkits.base import BaseToolkit from langchain.tools import BaseTool from langchain.tools.json.tool import JsonGetValueTool, JsonListKeysTool, JsonSpec [docs]class JsonToolkit(BaseToolkit): """Toolkit for interacting with a JSON spec.""" spec: JsonSpec [docs] def get_tools(self) -> List[BaseTool]: """Get the tools in the toolkit.""" return [ JsonListKeysTool(spec=self.spec), JsonGetValueTool(spec=self.spec), ]
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/json/toolkit.html
c8f09dfe4f2c-0
Source code for langchain.agents.agent_toolkits.openapi.base """OpenAPI spec agent.""" from typing import Any, Dict, List, Optional from langchain.agents.agent import AgentExecutor from langchain.agents.agent_toolkits.openapi.prompt import ( OPENAPI_PREFIX, OPENAPI_SUFFIX, ) from langchain.agents.agent_toolkits.openapi.toolkit import OpenAPIToolkit from langchain.agents.mrkl.base import ZeroShotAgent from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS from langchain.callbacks.base import BaseCallbackManager from langchain.chains.llm import LLMChain from langchain.schema.language_model import BaseLanguageModel [docs]def create_openapi_agent( llm: BaseLanguageModel, toolkit: OpenAPIToolkit, callback_manager: Optional[BaseCallbackManager] = None, prefix: str = OPENAPI_PREFIX, suffix: str = OPENAPI_SUFFIX, format_instructions: str = FORMAT_INSTRUCTIONS, input_variables: Optional[List[str]] = None, max_iterations: Optional[int] = 15, max_execution_time: Optional[float] = None, early_stopping_method: str = "force", verbose: bool = False, return_intermediate_steps: bool = False, agent_executor_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Dict[str, Any], ) -> AgentExecutor: """Construct an OpenAPI agent from an LLM and tools.""" tools = toolkit.get_tools() prompt = ZeroShotAgent.create_prompt( tools, prefix=prefix, suffix=suffix, format_instructions=format_instructions, input_variables=input_variables, ) llm_chain = LLMChain(
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/openapi/base.html
c8f09dfe4f2c-1
input_variables=input_variables, ) llm_chain = LLMChain( llm=llm, prompt=prompt, callback_manager=callback_manager, ) tool_names = [tool.name for tool in tools] agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, **kwargs) return AgentExecutor.from_agent_and_tools( agent=agent, tools=tools, callback_manager=callback_manager, verbose=verbose, return_intermediate_steps=return_intermediate_steps, max_iterations=max_iterations, max_execution_time=max_execution_time, early_stopping_method=early_stopping_method, **(agent_executor_kwargs or {}), )
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/openapi/base.html
3fd96de0f1a2-0
Source code for langchain.agents.agent_toolkits.openapi.spec """Quick and dirty representation for OpenAPI specs.""" from dataclasses import dataclass from typing import Any, Dict, List, Tuple, Union [docs]def dereference_refs(spec_obj: dict, full_spec: dict) -> Union[dict, list]: """Try to substitute $refs. The goal is to get the complete docs for each endpoint in context for now. In the few OpenAPI specs I studied, $refs referenced models (or in OpenAPI terms, components) and could be nested. This code most likely misses lots of cases. """ def _retrieve_ref_path(path: str, full_spec: dict) -> dict: components = path.split("/") if components[0] != "#": raise RuntimeError( "All $refs I've seen so far are uri fragments (start with hash)." ) out = full_spec for component in components[1:]: out = out[component] return out def _dereference_refs( obj: Union[dict, list], stop: bool = False ) -> Union[dict, list]: if stop: return obj obj_out: Dict[str, Any] = {} if isinstance(obj, dict): for k, v in obj.items(): if k == "$ref": # stop=True => don't dereference recursively. return _dereference_refs( _retrieve_ref_path(v, full_spec), stop=True ) elif isinstance(v, list): obj_out[k] = [_dereference_refs(el) for el in v] elif isinstance(v, dict): obj_out[k] = _dereference_refs(v) else:
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/openapi/spec.html
3fd96de0f1a2-1
obj_out[k] = _dereference_refs(v) else: obj_out[k] = v return obj_out elif isinstance(obj, list): return [_dereference_refs(el) for el in obj] else: return obj return _dereference_refs(spec_obj) [docs]@dataclass(frozen=True) class ReducedOpenAPISpec: servers: List[dict] description: str endpoints: List[Tuple[str, str, dict]] [docs]def reduce_openapi_spec(spec: dict, dereference: bool = True) -> ReducedOpenAPISpec: """Simplify/distill/minify a spec somehow. I want a smaller target for retrieval and (more importantly) I want smaller results from retrieval. I was hoping https://openapi.tools/ would have some useful bits to this end, but doesn't seem so. """ # 1. Consider only get, post, patch, delete endpoints. endpoints = [ (f"{operation_name.upper()} {route}", docs.get("description"), docs) for route, operation in spec["paths"].items() for operation_name, docs in operation.items() if operation_name in ["get", "post", "patch", "delete"] ] # 2. Replace any refs so that complete docs are retrieved. # Note: probably want to do this post-retrieval, it blows up the size of the spec. if dereference: endpoints = [ (name, description, dereference_refs(docs, spec)) for name, description, docs in endpoints ] # 3. Strip docs down to required request args + happy path response.
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/openapi/spec.html
3fd96de0f1a2-2
# 3. Strip docs down to required request args + happy path response. def reduce_endpoint_docs(docs: dict) -> dict: out = {} if docs.get("description"): out["description"] = docs.get("description") if docs.get("parameters"): out["parameters"] = [ parameter for parameter in docs.get("parameters", []) if parameter.get("required") ] if "200" in docs["responses"]: out["responses"] = docs["responses"]["200"] if docs.get("requestBody"): out["requestBody"] = docs.get("requestBody") return out endpoints = [ (name, description, reduce_endpoint_docs(docs)) for name, description, docs in endpoints ] return ReducedOpenAPISpec( servers=spec["servers"], description=spec["info"].get("description", ""), endpoints=endpoints, )
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/openapi/spec.html
f62169aaf50d-0
Source code for langchain.agents.agent_toolkits.openapi.planner """Agent that interacts with OpenAPI APIs via a hierarchical planning approach.""" import json import re from functools import partial from typing import Any, Callable, Dict, List, Optional import yaml from pydantic import Field from langchain.agents.agent import AgentExecutor from langchain.agents.agent_toolkits.openapi.planner_prompt import ( API_CONTROLLER_PROMPT, API_CONTROLLER_TOOL_DESCRIPTION, API_CONTROLLER_TOOL_NAME, API_ORCHESTRATOR_PROMPT, API_PLANNER_PROMPT, API_PLANNER_TOOL_DESCRIPTION, API_PLANNER_TOOL_NAME, PARSING_DELETE_PROMPT, PARSING_GET_PROMPT, PARSING_PATCH_PROMPT, PARSING_POST_PROMPT, REQUESTS_DELETE_TOOL_DESCRIPTION, REQUESTS_GET_TOOL_DESCRIPTION, REQUESTS_PATCH_TOOL_DESCRIPTION, REQUESTS_POST_TOOL_DESCRIPTION, ) from langchain.agents.agent_toolkits.openapi.spec import ReducedOpenAPISpec from langchain.agents.mrkl.base import ZeroShotAgent from langchain.agents.tools import Tool from langchain.callbacks.base import BaseCallbackManager from langchain.chains.llm import LLMChain from langchain.llms.openai import OpenAI from langchain.memory import ReadOnlySharedMemory from langchain.prompts import PromptTemplate from langchain.schema import BasePromptTemplate from langchain.schema.language_model import BaseLanguageModel from langchain.tools.base import BaseTool from langchain.tools.requests.tool import BaseRequestsTool from langchain.utilities.requests import RequestsWrapper # # Requests tools with LLM-instructed extraction of truncated responses. # # Of course, truncating so bluntly may lose a lot of valuable # information in the response.
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/openapi/planner.html
f62169aaf50d-1
# information in the response. # However, the goal for now is to have only a single inference step. MAX_RESPONSE_LENGTH = 5000 """Maximum length of the response to be returned.""" def _get_default_llm_chain(prompt: BasePromptTemplate) -> LLMChain: return LLMChain( llm=OpenAI(), prompt=prompt, ) def _get_default_llm_chain_factory( prompt: BasePromptTemplate, ) -> Callable[[], LLMChain]: """Returns a default LLMChain factory.""" return partial(_get_default_llm_chain, prompt) [docs]class RequestsGetToolWithParsing(BaseRequestsTool, BaseTool): """Requests GET tool with LLM-instructed extraction of truncated responses.""" name = "requests_get" """Tool name.""" description = REQUESTS_GET_TOOL_DESCRIPTION """Tool description.""" response_length: Optional[int] = MAX_RESPONSE_LENGTH """Maximum length of the response to be returned.""" llm_chain: LLMChain = Field( default_factory=_get_default_llm_chain_factory(PARSING_GET_PROMPT) ) """LLMChain used to extract the response.""" def _run(self, text: str) -> str: try: data = json.loads(text) except json.JSONDecodeError as e: raise e data_params = data.get("params") response = self.requests_wrapper.get(data["url"], params=data_params) response = response[: self.response_length] return self.llm_chain.predict( response=response, instructions=data["output_instructions"] ).strip() async def _arun(self, text: str) -> str: raise NotImplementedError()
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/openapi/planner.html
f62169aaf50d-2
raise NotImplementedError() [docs]class RequestsPostToolWithParsing(BaseRequestsTool, BaseTool): """Requests POST tool with LLM-instructed extraction of truncated responses.""" name = "requests_post" """Tool name.""" description = REQUESTS_POST_TOOL_DESCRIPTION """Tool description.""" response_length: Optional[int] = MAX_RESPONSE_LENGTH """Maximum length of the response to be returned.""" llm_chain: LLMChain = Field( default_factory=_get_default_llm_chain_factory(PARSING_POST_PROMPT) ) """LLMChain used to extract the response.""" def _run(self, text: str) -> str: try: data = json.loads(text) except json.JSONDecodeError as e: raise e response = self.requests_wrapper.post(data["url"], data["data"]) response = response[: self.response_length] return self.llm_chain.predict( response=response, instructions=data["output_instructions"] ).strip() async def _arun(self, text: str) -> str: raise NotImplementedError() [docs]class RequestsPatchToolWithParsing(BaseRequestsTool, BaseTool): """Requests PATCH tool with LLM-instructed extraction of truncated responses.""" name = "requests_patch" """Tool name.""" description = REQUESTS_PATCH_TOOL_DESCRIPTION """Tool description.""" response_length: Optional[int] = MAX_RESPONSE_LENGTH """Maximum length of the response to be returned.""" llm_chain: LLMChain = Field( default_factory=_get_default_llm_chain_factory(PARSING_PATCH_PROMPT) ) """LLMChain used to extract the response.""" def _run(self, text: str) -> str: try:
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/openapi/planner.html
f62169aaf50d-3
def _run(self, text: str) -> str: try: data = json.loads(text) except json.JSONDecodeError as e: raise e response = self.requests_wrapper.patch(data["url"], data["data"]) response = response[: self.response_length] return self.llm_chain.predict( response=response, instructions=data["output_instructions"] ).strip() async def _arun(self, text: str) -> str: raise NotImplementedError() [docs]class RequestsDeleteToolWithParsing(BaseRequestsTool, BaseTool): """A tool that sends a DELETE request and parses the response.""" name = "requests_delete" """The name of the tool.""" description = REQUESTS_DELETE_TOOL_DESCRIPTION """The description of the tool.""" response_length: Optional[int] = MAX_RESPONSE_LENGTH """The maximum length of the response.""" llm_chain: LLMChain = Field( default_factory=_get_default_llm_chain_factory(PARSING_DELETE_PROMPT) ) """The LLM chain used to parse the response.""" def _run(self, text: str) -> str: try: data = json.loads(text) except json.JSONDecodeError as e: raise e response = self.requests_wrapper.delete(data["url"]) response = response[: self.response_length] return self.llm_chain.predict( response=response, instructions=data["output_instructions"] ).strip() async def _arun(self, text: str) -> str: raise NotImplementedError() # # Orchestrator, planner, controller. # def _create_api_planner_tool( api_spec: ReducedOpenAPISpec, llm: BaseLanguageModel ) -> Tool:
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/openapi/planner.html
f62169aaf50d-4
) -> Tool: endpoint_descriptions = [ f"{name} {description}" for name, description, _ in api_spec.endpoints ] prompt = PromptTemplate( template=API_PLANNER_PROMPT, input_variables=["query"], partial_variables={"endpoints": "- " + "- ".join(endpoint_descriptions)}, ) chain = LLMChain(llm=llm, prompt=prompt) tool = Tool( name=API_PLANNER_TOOL_NAME, description=API_PLANNER_TOOL_DESCRIPTION, func=chain.run, ) return tool def _create_api_controller_agent( api_url: str, api_docs: str, requests_wrapper: RequestsWrapper, llm: BaseLanguageModel, ) -> AgentExecutor: get_llm_chain = LLMChain(llm=llm, prompt=PARSING_GET_PROMPT) post_llm_chain = LLMChain(llm=llm, prompt=PARSING_POST_PROMPT) tools: List[BaseTool] = [ RequestsGetToolWithParsing( requests_wrapper=requests_wrapper, llm_chain=get_llm_chain ), RequestsPostToolWithParsing( requests_wrapper=requests_wrapper, llm_chain=post_llm_chain ), ] prompt = PromptTemplate( template=API_CONTROLLER_PROMPT, input_variables=["input", "agent_scratchpad"], partial_variables={ "api_url": api_url, "api_docs": api_docs, "tool_names": ", ".join([tool.name for tool in tools]), "tool_descriptions": "\n".join( [f"{tool.name}: {tool.description}" for tool in tools] ),
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/openapi/planner.html
f62169aaf50d-5
[f"{tool.name}: {tool.description}" for tool in tools] ), }, ) agent = ZeroShotAgent( llm_chain=LLMChain(llm=llm, prompt=prompt), allowed_tools=[tool.name for tool in tools], ) return AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True) def _create_api_controller_tool( api_spec: ReducedOpenAPISpec, requests_wrapper: RequestsWrapper, llm: BaseLanguageModel, ) -> Tool: """Expose controller as a tool. The tool is invoked with a plan from the planner, and dynamically creates a controller agent with relevant documentation only to constrain the context. """ base_url = api_spec.servers[0]["url"] # TODO: do better. def _create_and_run_api_controller_agent(plan_str: str) -> str: pattern = r"\b(GET|POST|PATCH|DELETE)\s+(/\S+)*" matches = re.findall(pattern, plan_str) endpoint_names = [ "{method} {route}".format(method=method, route=route.split("?")[0]) for method, route in matches ] endpoint_docs_by_name = {name: docs for name, _, docs in api_spec.endpoints} docs_str = "" for endpoint_name in endpoint_names: docs = endpoint_docs_by_name.get(endpoint_name) if not docs: raise ValueError(f"{endpoint_name} endpoint does not exist.") docs_str += f"== Docs for {endpoint_name} == \n{yaml.dump(docs)}\n" agent = _create_api_controller_agent(base_url, docs_str, requests_wrapper, llm)
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/openapi/planner.html
f62169aaf50d-6
return agent.run(plan_str) return Tool( name=API_CONTROLLER_TOOL_NAME, func=_create_and_run_api_controller_agent, description=API_CONTROLLER_TOOL_DESCRIPTION, ) [docs]def create_openapi_agent( api_spec: ReducedOpenAPISpec, requests_wrapper: RequestsWrapper, llm: BaseLanguageModel, shared_memory: Optional[ReadOnlySharedMemory] = None, callback_manager: Optional[BaseCallbackManager] = None, verbose: bool = True, agent_executor_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Dict[str, Any], ) -> AgentExecutor: """Instantiate OpenAI API planner and controller for a given spec. Inject credentials via requests_wrapper. We use a top-level "orchestrator" agent to invoke the planner and controller, rather than a top-level planner that invokes a controller with its plan. This is to keep the planner simple. """ tools = [ _create_api_planner_tool(api_spec, llm), _create_api_controller_tool(api_spec, requests_wrapper, llm), ] prompt = PromptTemplate( template=API_ORCHESTRATOR_PROMPT, input_variables=["input", "agent_scratchpad"], partial_variables={ "tool_names": ", ".join([tool.name for tool in tools]), "tool_descriptions": "\n".join( [f"{tool.name}: {tool.description}" for tool in tools] ), }, ) agent = ZeroShotAgent( llm_chain=LLMChain(llm=llm, prompt=prompt, memory=shared_memory), allowed_tools=[tool.name for tool in tools],
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/openapi/planner.html
f62169aaf50d-7
allowed_tools=[tool.name for tool in tools], **kwargs, ) return AgentExecutor.from_agent_and_tools( agent=agent, tools=tools, callback_manager=callback_manager, verbose=verbose, **(agent_executor_kwargs or {}), )
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/openapi/planner.html
b4f01bfe1743-0
Source code for langchain.agents.agent_toolkits.openapi.toolkit """Requests toolkit.""" from __future__ import annotations from typing import Any, List from langchain.agents.agent import AgentExecutor from langchain.agents.agent_toolkits.base import BaseToolkit from langchain.agents.agent_toolkits.json.base import create_json_agent from langchain.agents.agent_toolkits.json.toolkit import JsonToolkit from langchain.agents.agent_toolkits.openapi.prompt import DESCRIPTION from langchain.agents.tools import Tool from langchain.schema.language_model import BaseLanguageModel from langchain.tools import BaseTool from langchain.tools.json.tool import JsonSpec from langchain.tools.requests.tool import ( RequestsDeleteTool, RequestsGetTool, RequestsPatchTool, RequestsPostTool, RequestsPutTool, ) from langchain.utilities.requests import TextRequestsWrapper [docs]class RequestsToolkit(BaseToolkit): """Toolkit for making REST requests.""" requests_wrapper: TextRequestsWrapper [docs] def get_tools(self) -> List[BaseTool]: """Return a list of tools.""" return [ RequestsGetTool(requests_wrapper=self.requests_wrapper), RequestsPostTool(requests_wrapper=self.requests_wrapper), RequestsPatchTool(requests_wrapper=self.requests_wrapper), RequestsPutTool(requests_wrapper=self.requests_wrapper), RequestsDeleteTool(requests_wrapper=self.requests_wrapper), ] [docs]class OpenAPIToolkit(BaseToolkit): """Toolkit for interacting with an OpenAPI API.""" json_agent: AgentExecutor requests_wrapper: TextRequestsWrapper [docs] def get_tools(self) -> List[BaseTool]: """Get the tools in the toolkit.""" json_agent_tool = Tool( name="json_explorer", func=self.json_agent.run,
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/openapi/toolkit.html
b4f01bfe1743-1
name="json_explorer", func=self.json_agent.run, description=DESCRIPTION, ) request_toolkit = RequestsToolkit(requests_wrapper=self.requests_wrapper) return [*request_toolkit.get_tools(), json_agent_tool] [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, json_spec: JsonSpec, requests_wrapper: TextRequestsWrapper, **kwargs: Any, ) -> OpenAPIToolkit: """Create json agent from llm, then initialize.""" json_agent = create_json_agent(llm, JsonToolkit(spec=json_spec), **kwargs) return cls(json_agent=json_agent, requests_wrapper=requests_wrapper)
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/openapi/toolkit.html
79e5854eea85-0
Source code for langchain.agents.agent_toolkits.sql.base """SQL agent.""" from typing import Any, Dict, List, Optional from langchain.agents.agent import AgentExecutor, BaseSingleActionAgent from langchain.agents.agent_toolkits.sql.prompt import ( SQL_FUNCTIONS_SUFFIX, SQL_PREFIX, SQL_SUFFIX, ) from langchain.agents.agent_toolkits.sql.toolkit import SQLDatabaseToolkit from langchain.agents.agent_types import AgentType from langchain.agents.mrkl.base import ZeroShotAgent from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent from langchain.callbacks.base import BaseCallbackManager from langchain.chains.llm import LLMChain from langchain.prompts.chat import ( ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, ) from langchain.schema.language_model import BaseLanguageModel from langchain.schema.messages import AIMessage, SystemMessage [docs]def create_sql_agent( llm: BaseLanguageModel, toolkit: SQLDatabaseToolkit, agent_type: AgentType = AgentType.ZERO_SHOT_REACT_DESCRIPTION, callback_manager: Optional[BaseCallbackManager] = None, prefix: str = SQL_PREFIX, suffix: Optional[str] = None, format_instructions: str = FORMAT_INSTRUCTIONS, input_variables: Optional[List[str]] = None, top_k: int = 10, max_iterations: Optional[int] = 15, max_execution_time: Optional[float] = None, early_stopping_method: str = "force", verbose: bool = False, agent_executor_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Dict[str, Any],
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/sql/base.html
79e5854eea85-1
**kwargs: Dict[str, Any], ) -> AgentExecutor: """Construct an SQL agent from an LLM and tools.""" tools = toolkit.get_tools() prefix = prefix.format(dialect=toolkit.dialect, top_k=top_k) agent: BaseSingleActionAgent if agent_type == AgentType.ZERO_SHOT_REACT_DESCRIPTION: prompt = ZeroShotAgent.create_prompt( tools, prefix=prefix, suffix=suffix or SQL_SUFFIX, format_instructions=format_instructions, input_variables=input_variables, ) llm_chain = LLMChain( llm=llm, prompt=prompt, callback_manager=callback_manager, ) tool_names = [tool.name for tool in tools] agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, **kwargs) elif agent_type == AgentType.OPENAI_FUNCTIONS: messages = [ SystemMessage(content=prefix), HumanMessagePromptTemplate.from_template("{input}"), AIMessage(content=suffix or SQL_FUNCTIONS_SUFFIX), MessagesPlaceholder(variable_name="agent_scratchpad"), ] input_variables = ["input", "agent_scratchpad"] _prompt = ChatPromptTemplate(input_variables=input_variables, messages=messages) agent = OpenAIFunctionsAgent( llm=llm, prompt=_prompt, tools=tools, callback_manager=callback_manager, **kwargs, ) else: raise ValueError(f"Agent type {agent_type} not supported at the moment.") return AgentExecutor.from_agent_and_tools( agent=agent, tools=tools, callback_manager=callback_manager, verbose=verbose,
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/sql/base.html
79e5854eea85-2
tools=tools, callback_manager=callback_manager, verbose=verbose, max_iterations=max_iterations, max_execution_time=max_execution_time, early_stopping_method=early_stopping_method, **(agent_executor_kwargs or {}), )
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/sql/base.html
06ad740505e3-0
Source code for langchain.agents.agent_toolkits.sql.toolkit """Toolkit for interacting with an SQL database.""" from typing import List from pydantic import Field from langchain.agents.agent_toolkits.base import BaseToolkit from langchain.schema.language_model import BaseLanguageModel from langchain.tools import BaseTool from langchain.tools.sql_database.tool import ( InfoSQLDatabaseTool, ListSQLDatabaseTool, QuerySQLCheckerTool, QuerySQLDataBaseTool, ) from langchain.utilities.sql_database import SQLDatabase [docs]class SQLDatabaseToolkit(BaseToolkit): """Toolkit for interacting with SQL databases.""" db: SQLDatabase = Field(exclude=True) llm: BaseLanguageModel = Field(exclude=True) @property def dialect(self) -> str: """Return string representation of SQL dialect to use.""" return self.db.dialect class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True [docs] def get_tools(self) -> List[BaseTool]: """Get the tools in the toolkit.""" list_sql_database_tool = ListSQLDatabaseTool(db=self.db) info_sql_database_tool_description = ( "Input to this tool is a comma-separated list of tables, output is the " "schema and sample rows for those tables. " "Be sure that the tables actually exist by calling " f"{list_sql_database_tool.name} first! " "Example Input: 'table1, table2, table3'" ) info_sql_database_tool = InfoSQLDatabaseTool( db=self.db, description=info_sql_database_tool_description ) query_sql_database_tool_description = (
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/sql/toolkit.html
06ad740505e3-1
) query_sql_database_tool_description = ( "Input to this tool is a detailed and correct SQL query, output is a " "result from the database. If the query is not correct, an error message " "will be returned. If an error is returned, rewrite the query, check the " "query, and try again. If you encounter an issue with Unknown column " f"'xxxx' in 'field list', using {info_sql_database_tool.name} " "to query the correct table fields." ) query_sql_database_tool = QuerySQLDataBaseTool( db=self.db, description=query_sql_database_tool_description ) query_sql_checker_tool_description = ( "Use this tool to double check if your query is correct before executing " "it. Always use this tool before executing a query with " f"{query_sql_database_tool.name}!" ) query_sql_checker_tool = QuerySQLCheckerTool( db=self.db, llm=self.llm, description=query_sql_checker_tool_description ) return [ query_sql_database_tool, info_sql_database_tool, list_sql_database_tool, query_sql_checker_tool, ]
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/sql/toolkit.html
d0222ea7096e-0
Source code for langchain.agents.agent_toolkits.amadeus.toolkit from __future__ import annotations from typing import TYPE_CHECKING, List from pydantic import Field from langchain.agents.agent_toolkits.base import BaseToolkit from langchain.tools import BaseTool from langchain.tools.amadeus.closest_airport import AmadeusClosestAirport from langchain.tools.amadeus.flight_search import AmadeusFlightSearch from langchain.tools.amadeus.utils import authenticate if TYPE_CHECKING: from amadeus import Client [docs]class AmadeusToolkit(BaseToolkit): """Toolkit for interacting with Office365.""" client: Client = Field(default_factory=authenticate) class Config: """Pydantic config.""" arbitrary_types_allowed = True [docs] def get_tools(self) -> List[BaseTool]: """Get the tools in the toolkit.""" return [ AmadeusClosestAirport(), AmadeusFlightSearch(), ]
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/amadeus/toolkit.html
67bc67c9a133-0
Source code for langchain.agents.agent_toolkits.office365.toolkit from __future__ import annotations from typing import TYPE_CHECKING, List from pydantic import Field from langchain.agents.agent_toolkits.base import BaseToolkit from langchain.tools import BaseTool from langchain.tools.office365.create_draft_message import O365CreateDraftMessage from langchain.tools.office365.events_search import O365SearchEvents from langchain.tools.office365.messages_search import O365SearchEmails from langchain.tools.office365.send_event import O365SendEvent from langchain.tools.office365.send_message import O365SendMessage from langchain.tools.office365.utils import authenticate if TYPE_CHECKING: from O365 import Account [docs]class O365Toolkit(BaseToolkit): """Toolkit for interacting with Office 365.""" account: Account = Field(default_factory=authenticate) class Config: """Pydantic config.""" arbitrary_types_allowed = True [docs] def get_tools(self) -> List[BaseTool]: """Get the tools in the toolkit.""" return [ O365SearchEvents(), O365CreateDraftMessage(), O365SearchEmails(), O365SendEvent(), O365SendMessage(), ]
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/office365/toolkit.html
ba0b8d6b9dbd-0
Source code for langchain.agents.agent_toolkits.pandas.base """Agent for working with pandas objects.""" from typing import Any, Dict, List, Optional, Tuple from langchain.agents.agent import AgentExecutor, BaseSingleActionAgent from langchain.agents.agent_toolkits.pandas.prompt import ( FUNCTIONS_WITH_DF, FUNCTIONS_WITH_MULTI_DF, MULTI_DF_PREFIX, MULTI_DF_PREFIX_FUNCTIONS, PREFIX, PREFIX_FUNCTIONS, SUFFIX_NO_DF, SUFFIX_WITH_DF, SUFFIX_WITH_MULTI_DF, ) from langchain.agents.mrkl.base import ZeroShotAgent from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent from langchain.agents.types import AgentType from langchain.callbacks.base import BaseCallbackManager from langchain.chains.llm import LLMChain from langchain.schema import BasePromptTemplate from langchain.schema.language_model import BaseLanguageModel from langchain.schema.messages import SystemMessage from langchain.tools.python.tool import PythonAstREPLTool def _get_multi_prompt( dfs: List[Any], prefix: Optional[str] = None, suffix: Optional[str] = None, input_variables: Optional[List[str]] = None, include_df_in_prompt: Optional[bool] = True, number_of_head_rows: int = 5, ) -> Tuple[BasePromptTemplate, List[PythonAstREPLTool]]: num_dfs = len(dfs) if suffix is not None: suffix_to_use = suffix include_dfs_head = True elif include_df_in_prompt: suffix_to_use = SUFFIX_WITH_MULTI_DF include_dfs_head = True else: suffix_to_use = SUFFIX_NO_DF
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/pandas/base.html
ba0b8d6b9dbd-1
else: suffix_to_use = SUFFIX_NO_DF include_dfs_head = False if input_variables is None: input_variables = ["input", "agent_scratchpad", "num_dfs"] if include_dfs_head: input_variables += ["dfs_head"] if prefix is None: prefix = MULTI_DF_PREFIX df_locals = {} for i, dataframe in enumerate(dfs): df_locals[f"df{i + 1}"] = dataframe tools = [PythonAstREPLTool(locals=df_locals)] prompt = ZeroShotAgent.create_prompt( tools, prefix=prefix, suffix=suffix_to_use, input_variables=input_variables ) partial_prompt = prompt.partial() if "dfs_head" in input_variables: dfs_head = "\n\n".join([d.head(number_of_head_rows).to_markdown() for d in dfs]) partial_prompt = partial_prompt.partial(num_dfs=str(num_dfs), dfs_head=dfs_head) if "num_dfs" in input_variables: partial_prompt = partial_prompt.partial(num_dfs=str(num_dfs)) return partial_prompt, tools def _get_single_prompt( df: Any, prefix: Optional[str] = None, suffix: Optional[str] = None, input_variables: Optional[List[str]] = None, include_df_in_prompt: Optional[bool] = True, number_of_head_rows: int = 5, ) -> Tuple[BasePromptTemplate, List[PythonAstREPLTool]]: if suffix is not None: suffix_to_use = suffix include_df_head = True elif include_df_in_prompt: suffix_to_use = SUFFIX_WITH_DF include_df_head = True
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/pandas/base.html
ba0b8d6b9dbd-2
suffix_to_use = SUFFIX_WITH_DF include_df_head = True else: suffix_to_use = SUFFIX_NO_DF include_df_head = False if input_variables is None: input_variables = ["input", "agent_scratchpad"] if include_df_head: input_variables += ["df_head"] if prefix is None: prefix = PREFIX tools = [PythonAstREPLTool(locals={"df": df})] prompt = ZeroShotAgent.create_prompt( tools, prefix=prefix, suffix=suffix_to_use, input_variables=input_variables ) partial_prompt = prompt.partial() if "df_head" in input_variables: partial_prompt = partial_prompt.partial( df_head=str(df.head(number_of_head_rows).to_markdown()) ) return partial_prompt, tools def _get_prompt_and_tools( df: Any, prefix: Optional[str] = None, suffix: Optional[str] = None, input_variables: Optional[List[str]] = None, include_df_in_prompt: Optional[bool] = True, number_of_head_rows: int = 5, ) -> Tuple[BasePromptTemplate, List[PythonAstREPLTool]]: try: import pandas as pd pd.set_option("display.max_columns", None) except ImportError: raise ImportError( "pandas package not found, please install with `pip install pandas`" ) if include_df_in_prompt is not None and suffix is not None: raise ValueError("If suffix is specified, include_df_in_prompt should not be.") if isinstance(df, list): for item in df: if not isinstance(item, pd.DataFrame):
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/pandas/base.html
ba0b8d6b9dbd-3
for item in df: if not isinstance(item, pd.DataFrame): raise ValueError(f"Expected pandas object, got {type(df)}") return _get_multi_prompt( df, prefix=prefix, suffix=suffix, input_variables=input_variables, include_df_in_prompt=include_df_in_prompt, number_of_head_rows=number_of_head_rows, ) else: if not isinstance(df, pd.DataFrame): raise ValueError(f"Expected pandas object, got {type(df)}") return _get_single_prompt( df, prefix=prefix, suffix=suffix, input_variables=input_variables, include_df_in_prompt=include_df_in_prompt, number_of_head_rows=number_of_head_rows, ) def _get_functions_single_prompt( df: Any, prefix: Optional[str] = None, suffix: Optional[str] = None, include_df_in_prompt: Optional[bool] = True, number_of_head_rows: int = 5, ) -> Tuple[BasePromptTemplate, List[PythonAstREPLTool]]: if suffix is not None: suffix_to_use = suffix if include_df_in_prompt: suffix_to_use = suffix_to_use.format( df_head=str(df.head(number_of_head_rows).to_markdown()) ) elif include_df_in_prompt: suffix_to_use = FUNCTIONS_WITH_DF.format( df_head=str(df.head(number_of_head_rows).to_markdown()) ) else: suffix_to_use = "" if prefix is None: prefix = PREFIX_FUNCTIONS tools = [PythonAstREPLTool(locals={"df": df})]
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/pandas/base.html
ba0b8d6b9dbd-4
tools = [PythonAstREPLTool(locals={"df": df})] system_message = SystemMessage(content=prefix + suffix_to_use) prompt = OpenAIFunctionsAgent.create_prompt(system_message=system_message) return prompt, tools def _get_functions_multi_prompt( dfs: Any, prefix: Optional[str] = None, suffix: Optional[str] = None, include_df_in_prompt: Optional[bool] = True, number_of_head_rows: int = 5, ) -> Tuple[BasePromptTemplate, List[PythonAstREPLTool]]: if suffix is not None: suffix_to_use = suffix if include_df_in_prompt: dfs_head = "\n\n".join( [d.head(number_of_head_rows).to_markdown() for d in dfs] ) suffix_to_use = suffix_to_use.format( dfs_head=dfs_head, ) elif include_df_in_prompt: dfs_head = "\n\n".join([d.head(number_of_head_rows).to_markdown() for d in dfs]) suffix_to_use = FUNCTIONS_WITH_MULTI_DF.format( dfs_head=dfs_head, ) else: suffix_to_use = "" if prefix is None: prefix = MULTI_DF_PREFIX_FUNCTIONS prefix = prefix.format(num_dfs=str(len(dfs))) df_locals = {} for i, dataframe in enumerate(dfs): df_locals[f"df{i + 1}"] = dataframe tools = [PythonAstREPLTool(locals=df_locals)] system_message = SystemMessage(content=prefix + suffix_to_use) prompt = OpenAIFunctionsAgent.create_prompt(system_message=system_message) return prompt, tools
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/pandas/base.html
ba0b8d6b9dbd-5
return prompt, tools def _get_functions_prompt_and_tools( df: Any, prefix: Optional[str] = None, suffix: Optional[str] = None, input_variables: Optional[List[str]] = None, include_df_in_prompt: Optional[bool] = True, number_of_head_rows: int = 5, ) -> Tuple[BasePromptTemplate, List[PythonAstREPLTool]]: try: import pandas as pd pd.set_option("display.max_columns", None) except ImportError: raise ImportError( "pandas package not found, please install with `pip install pandas`" ) if input_variables is not None: raise ValueError("`input_variables` is not supported at the moment.") if include_df_in_prompt is not None and suffix is not None: raise ValueError("If suffix is specified, include_df_in_prompt should not be.") if isinstance(df, list): for item in df: if not isinstance(item, pd.DataFrame): raise ValueError(f"Expected pandas object, got {type(df)}") return _get_functions_multi_prompt( df, prefix=prefix, suffix=suffix, include_df_in_prompt=include_df_in_prompt, number_of_head_rows=number_of_head_rows, ) else: if not isinstance(df, pd.DataFrame): raise ValueError(f"Expected pandas object, got {type(df)}") return _get_functions_single_prompt( df, prefix=prefix, suffix=suffix, include_df_in_prompt=include_df_in_prompt, number_of_head_rows=number_of_head_rows, ) [docs]def create_pandas_dataframe_agent(
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/pandas/base.html
ba0b8d6b9dbd-6
) [docs]def create_pandas_dataframe_agent( llm: BaseLanguageModel, df: Any, agent_type: AgentType = AgentType.ZERO_SHOT_REACT_DESCRIPTION, callback_manager: Optional[BaseCallbackManager] = None, prefix: Optional[str] = None, suffix: Optional[str] = None, input_variables: Optional[List[str]] = None, verbose: bool = False, return_intermediate_steps: bool = False, max_iterations: Optional[int] = 15, max_execution_time: Optional[float] = None, early_stopping_method: str = "force", agent_executor_kwargs: Optional[Dict[str, Any]] = None, include_df_in_prompt: Optional[bool] = True, number_of_head_rows: int = 5, **kwargs: Dict[str, Any], ) -> AgentExecutor: """Construct a pandas agent from an LLM and dataframe.""" agent: BaseSingleActionAgent if agent_type == AgentType.ZERO_SHOT_REACT_DESCRIPTION: prompt, tools = _get_prompt_and_tools( df, prefix=prefix, suffix=suffix, input_variables=input_variables, include_df_in_prompt=include_df_in_prompt, number_of_head_rows=number_of_head_rows, ) llm_chain = LLMChain( llm=llm, prompt=prompt, callback_manager=callback_manager, ) tool_names = [tool.name for tool in tools] agent = ZeroShotAgent( llm_chain=llm_chain, allowed_tools=tool_names, callback_manager=callback_manager, **kwargs, )
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/pandas/base.html
ba0b8d6b9dbd-7
callback_manager=callback_manager, **kwargs, ) elif agent_type == AgentType.OPENAI_FUNCTIONS: _prompt, tools = _get_functions_prompt_and_tools( df, prefix=prefix, suffix=suffix, input_variables=input_variables, include_df_in_prompt=include_df_in_prompt, number_of_head_rows=number_of_head_rows, ) agent = OpenAIFunctionsAgent( llm=llm, prompt=_prompt, tools=tools, callback_manager=callback_manager, **kwargs, ) else: raise ValueError(f"Agent type {agent_type} not supported at the moment.") return AgentExecutor.from_agent_and_tools( agent=agent, tools=tools, callback_manager=callback_manager, verbose=verbose, return_intermediate_steps=return_intermediate_steps, max_iterations=max_iterations, max_execution_time=max_execution_time, early_stopping_method=early_stopping_method, **(agent_executor_kwargs or {}), )
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/pandas/base.html
55f6e020fda4-0
Source code for langchain.agents.chat.base from typing import Any, List, Optional, Sequence, Tuple from pydantic import Field from langchain.agents.agent import Agent, AgentOutputParser from langchain.agents.chat.output_parser import ChatOutputParser from langchain.agents.chat.prompt import ( FORMAT_INSTRUCTIONS, HUMAN_MESSAGE, SYSTEM_MESSAGE_PREFIX, SYSTEM_MESSAGE_SUFFIX, ) from langchain.agents.utils import validate_tools_single_input from langchain.callbacks.base import BaseCallbackManager from langchain.chains.llm import LLMChain from langchain.prompts.chat import ( ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate, ) from langchain.schema import AgentAction, BasePromptTemplate from langchain.schema.language_model import BaseLanguageModel from langchain.tools.base import BaseTool [docs]class ChatAgent(Agent): """Chat Agent.""" output_parser: AgentOutputParser = Field(default_factory=ChatOutputParser) """Output parser for the agent.""" @property def observation_prefix(self) -> str: """Prefix to append the observation with.""" return "Observation: " @property def llm_prefix(self) -> str: """Prefix to append the llm call with.""" return "Thought:" def _construct_scratchpad( self, intermediate_steps: List[Tuple[AgentAction, str]] ) -> str: agent_scratchpad = super()._construct_scratchpad(intermediate_steps) if not isinstance(agent_scratchpad, str): raise ValueError("agent_scratchpad should be of type string.") if agent_scratchpad: return ( f"This was your previous work "
https://api.python.langchain.com/en/latest/_modules/langchain/agents/chat/base.html
55f6e020fda4-1
return ( f"This was your previous work " f"(but I haven't seen any of it! I only see what " f"you return as final answer):\n{agent_scratchpad}" ) else: return agent_scratchpad @classmethod def _get_default_output_parser(cls, **kwargs: Any) -> AgentOutputParser: return ChatOutputParser() @classmethod def _validate_tools(cls, tools: Sequence[BaseTool]) -> None: super()._validate_tools(tools) validate_tools_single_input(class_name=cls.__name__, tools=tools) @property def _stop(self) -> List[str]: return ["Observation:"] [docs] @classmethod def create_prompt( cls, tools: Sequence[BaseTool], system_message_prefix: str = SYSTEM_MESSAGE_PREFIX, system_message_suffix: str = SYSTEM_MESSAGE_SUFFIX, human_message: str = HUMAN_MESSAGE, format_instructions: str = FORMAT_INSTRUCTIONS, input_variables: Optional[List[str]] = None, ) -> BasePromptTemplate: tool_strings = "\n".join([f"{tool.name}: {tool.description}" for tool in tools]) tool_names = ", ".join([tool.name for tool in tools]) format_instructions = format_instructions.format(tool_names=tool_names) template = "\n\n".join( [ system_message_prefix, tool_strings, format_instructions, system_message_suffix, ] ) messages = [ SystemMessagePromptTemplate.from_template(template), HumanMessagePromptTemplate.from_template(human_message), ] if input_variables is None:
https://api.python.langchain.com/en/latest/_modules/langchain/agents/chat/base.html
55f6e020fda4-2
] if input_variables is None: input_variables = ["input", "agent_scratchpad"] return ChatPromptTemplate(input_variables=input_variables, messages=messages) [docs] @classmethod def from_llm_and_tools( cls, llm: BaseLanguageModel, tools: Sequence[BaseTool], callback_manager: Optional[BaseCallbackManager] = None, output_parser: Optional[AgentOutputParser] = None, system_message_prefix: str = SYSTEM_MESSAGE_PREFIX, system_message_suffix: str = SYSTEM_MESSAGE_SUFFIX, human_message: str = HUMAN_MESSAGE, format_instructions: str = FORMAT_INSTRUCTIONS, input_variables: Optional[List[str]] = None, **kwargs: Any, ) -> Agent: """Construct an agent from an LLM and tools.""" cls._validate_tools(tools) prompt = cls.create_prompt( tools, system_message_prefix=system_message_prefix, system_message_suffix=system_message_suffix, human_message=human_message, format_instructions=format_instructions, input_variables=input_variables, ) llm_chain = LLMChain( llm=llm, prompt=prompt, callback_manager=callback_manager, ) tool_names = [tool.name for tool in tools] _output_parser = output_parser or cls._get_default_output_parser() return cls( llm_chain=llm_chain, allowed_tools=tool_names, output_parser=_output_parser, **kwargs, ) @property def _agent_type(self) -> str: raise ValueError
https://api.python.langchain.com/en/latest/_modules/langchain/agents/chat/base.html