id
stringlengths 14
15
| text
stringlengths 35
2.51k
| source
stringlengths 61
154
|
|---|---|---|
04835fe85928-3
|
f"a description must always be provided."
)
super()._validate_tools(tools)
[docs]class MRKLChain(AgentExecutor):
"""Chain that implements the MRKL system.
Example:
.. code-block:: python
from langchain import OpenAI, MRKLChain
from langchain.chains.mrkl.base import ChainConfig
llm = OpenAI(temperature=0)
prompt = PromptTemplate(...)
chains = [...]
mrkl = MRKLChain.from_chains(llm=llm, prompt=prompt)
"""
[docs] @classmethod
def from_chains(
cls, llm: BaseLanguageModel, chains: List[ChainConfig], **kwargs: Any
) -> AgentExecutor:
"""User friendly way to initialize the MRKL chain.
This is intended to be an easy way to get up and running with the
MRKL chain.
Args:
llm: The LLM to use as the agent LLM.
chains: The chains the MRKL system has access to.
**kwargs: parameters to be passed to initialization.
Returns:
An initialized MRKL chain.
Example:
.. code-block:: python
from langchain import LLMMathChain, OpenAI, SerpAPIWrapper, MRKLChain
from langchain.chains.mrkl.base import ChainConfig
llm = OpenAI(temperature=0)
search = SerpAPIWrapper()
llm_math_chain = LLMMathChain(llm=llm)
chains = [
ChainConfig(
action_name = "Search",
action=search.search,
action_description="useful for searching"
),
ChainConfig(
|
https://api.python.langchain.com/en/latest/_modules/langchain/agents/mrkl/base.html
|
04835fe85928-4
|
action_description="useful for searching"
),
ChainConfig(
action_name="Calculator",
action=llm_math_chain.run,
action_description="useful for doing math"
)
]
mrkl = MRKLChain.from_chains(llm, chains)
"""
tools = [
Tool(
name=c.action_name,
func=c.action,
description=c.action_description,
)
for c in chains
]
agent = ZeroShotAgent.from_llm_and_tools(llm, tools)
return cls(agent=agent, tools=tools, **kwargs)
|
https://api.python.langchain.com/en/latest/_modules/langchain/agents/mrkl/base.html
|
34ca081a3eeb-0
|
Source code for langchain.agents.openai_functions_multi_agent.base
"""Module implements an agent that uses OpenAI's APIs function enabled API."""
import json
from dataclasses import dataclass
from json import JSONDecodeError
from typing import Any, List, Optional, Sequence, Tuple, Union
from pydantic import root_validator
from langchain.agents import BaseMultiActionAgent
from langchain.base_language import BaseLanguageModel
from langchain.callbacks.base import BaseCallbackManager
from langchain.callbacks.manager import Callbacks
from langchain.chat_models.openai import ChatOpenAI
from langchain.prompts.base import BasePromptTemplate
from langchain.prompts.chat import (
BaseMessagePromptTemplate,
ChatPromptTemplate,
HumanMessagePromptTemplate,
MessagesPlaceholder,
)
from langchain.schema import (
AgentAction,
AgentFinish,
AIMessage,
BaseMessage,
FunctionMessage,
OutputParserException,
SystemMessage,
)
from langchain.tools import BaseTool
@dataclass
class _FunctionsAgentAction(AgentAction):
message_log: List[BaseMessage]
def _convert_agent_action_to_messages(
agent_action: AgentAction, observation: str
) -> List[BaseMessage]:
"""Convert an agent action to a message.
This code is used to reconstruct the original AI message from the agent action.
Args:
agent_action: Agent action to convert.
Returns:
AIMessage that corresponds to the original tool invocation.
"""
if isinstance(agent_action, _FunctionsAgentAction):
return agent_action.message_log + [
_create_function_message(agent_action, observation)
]
else:
return [AIMessage(content=agent_action.log)]
def _create_function_message(
|
https://api.python.langchain.com/en/latest/_modules/langchain/agents/openai_functions_multi_agent/base.html
|
34ca081a3eeb-1
|
return [AIMessage(content=agent_action.log)]
def _create_function_message(
agent_action: AgentAction, observation: str
) -> FunctionMessage:
"""Convert agent action and observation into a function message.
Args:
agent_action: the tool invocation request from the agent
observation: the result of the tool invocation
Returns:
FunctionMessage that corresponds to the original tool invocation
"""
if not isinstance(observation, str):
try:
content = json.dumps(observation, ensure_ascii=False)
except Exception:
content = str(observation)
else:
content = observation
return FunctionMessage(
name=agent_action.tool,
content=content,
)
def _format_intermediate_steps(
intermediate_steps: List[Tuple[AgentAction, str]],
) -> List[BaseMessage]:
"""Format intermediate steps.
Args:
intermediate_steps: Steps the LLM has taken to date, along with observations
Returns:
list of messages to send to the LLM for the next prediction
"""
messages = []
for intermediate_step in intermediate_steps:
agent_action, observation = intermediate_step
messages.extend(_convert_agent_action_to_messages(agent_action, observation))
return messages
def _parse_ai_message(message: BaseMessage) -> Union[List[AgentAction], AgentFinish]:
"""Parse an AI message."""
if not isinstance(message, AIMessage):
raise TypeError(f"Expected an AI message got {type(message)}")
function_call = message.additional_kwargs.get("function_call", {})
if function_call:
function_call = message.additional_kwargs["function_call"]
try:
tools = json.loads(function_call["arguments"])["actions"]
except JSONDecodeError:
|
https://api.python.langchain.com/en/latest/_modules/langchain/agents/openai_functions_multi_agent/base.html
|
34ca081a3eeb-2
|
except JSONDecodeError:
raise OutputParserException(
f"Could not parse tool input: {function_call} because "
f"the `arguments` is not valid JSON."
)
final_tools: List[AgentAction] = []
for tool_schema in tools:
_tool_input = tool_schema["action"]
function_name = tool_schema["action_name"]
# HACK HACK HACK:
# The code that encodes tool input into Open AI uses a special variable
# name called `__arg1` to handle old style tools that do not expose a
# schema and expect a single string argument as an input.
# We unpack the argument here if it exists.
# Open AI does not support passing in a JSON array as an argument.
if "__arg1" in _tool_input:
tool_input = _tool_input["__arg1"]
else:
tool_input = _tool_input
content_msg = "responded: {content}\n" if message.content else "\n"
log = f"\nInvoking: `{function_name}` with `{tool_input}`\n{content_msg}\n"
_tool = _FunctionsAgentAction(
tool=function_name,
tool_input=tool_input,
log=log,
message_log=[message],
)
final_tools.append(_tool)
return final_tools
return AgentFinish(return_values={"output": message.content}, log=message.content)
[docs]class OpenAIMultiFunctionsAgent(BaseMultiActionAgent):
"""An Agent driven by OpenAIs function powered API.
Args:
llm: This should be an instance of ChatOpenAI, specifically a model
that supports using `functions`.
tools: The tools this agent has access to.
|
https://api.python.langchain.com/en/latest/_modules/langchain/agents/openai_functions_multi_agent/base.html
|
34ca081a3eeb-3
|
that supports using `functions`.
tools: The tools this agent has access to.
prompt: The prompt for this agent, should support agent_scratchpad as one
of the variables. For an easy way to construct this prompt, use
`OpenAIFunctionsAgent.create_prompt(...)`
"""
llm: BaseLanguageModel
tools: Sequence[BaseTool]
prompt: BasePromptTemplate
[docs] def get_allowed_tools(self) -> List[str]:
"""Get allowed tools."""
return [t.name for t in self.tools]
[docs] @root_validator
def validate_llm(cls, values: dict) -> dict:
if not isinstance(values["llm"], ChatOpenAI):
raise ValueError("Only supported with ChatOpenAI models.")
return values
[docs] @root_validator
def validate_prompt(cls, values: dict) -> dict:
prompt: BasePromptTemplate = values["prompt"]
if "agent_scratchpad" not in prompt.input_variables:
raise ValueError(
"`agent_scratchpad` should be one of the variables in the prompt, "
f"got {prompt.input_variables}"
)
return values
@property
def input_keys(self) -> List[str]:
"""Get input keys. Input refers to user input here."""
return ["input"]
@property
def functions(self) -> List[dict]:
enum_vals = [t.name for t in self.tools]
tool_selection = {
# OpenAI functions returns a single tool invocation
# Here we force the single tool invocation it returns to
# itself be a list of tool invocations. We do this by constructing
# a new tool that has one argument which is a list of tools
|
https://api.python.langchain.com/en/latest/_modules/langchain/agents/openai_functions_multi_agent/base.html
|
34ca081a3eeb-4
|
# a new tool that has one argument which is a list of tools
# to use.
"name": "tool_selection",
"description": "A list of actions to take.",
"parameters": {
"title": "tool_selection",
"description": "A list of actions to take.",
"type": "object",
"properties": {
"actions": {
"title": "actions",
"type": "array",
"items": {
# This is a custom item which bundles the action_name
# and the action. We do this because some actions
# could have the same schema, and without this there
# is no way to differentiate them.
"title": "tool_call",
"type": "object",
"properties": {
# This is the name of the action to take
"action_name": {
"title": "action_name",
"enum": enum_vals,
"type": "string",
"description": (
"Name of the action to take. The name "
"provided here should match up with the "
"parameters for the action below."
),
},
# This is the action to take.
"action": {
"title": "Action",
"anyOf": [
{
"title": t.name,
"type": "object",
"properties": t.args,
}
for t in self.tools
],
},
},
"required": ["action_name", "action"],
},
}
},
"required": ["actions"],
},
}
return [tool_selection]
|
https://api.python.langchain.com/en/latest/_modules/langchain/agents/openai_functions_multi_agent/base.html
|
34ca081a3eeb-5
|
},
}
return [tool_selection]
[docs] def plan(
self,
intermediate_steps: List[Tuple[AgentAction, str]],
callbacks: Callbacks = None,
**kwargs: Any,
) -> Union[List[AgentAction], AgentFinish]:
"""Given input, decided what to do.
Args:
intermediate_steps: Steps the LLM has taken to date, along with observations
**kwargs: User inputs.
Returns:
Action specifying what tool to use.
"""
agent_scratchpad = _format_intermediate_steps(intermediate_steps)
selected_inputs = {
k: kwargs[k] for k in self.prompt.input_variables if k != "agent_scratchpad"
}
full_inputs = dict(**selected_inputs, agent_scratchpad=agent_scratchpad)
prompt = self.prompt.format_prompt(**full_inputs)
messages = prompt.to_messages()
predicted_message = self.llm.predict_messages(
messages, functions=self.functions, callbacks=callbacks
)
agent_decision = _parse_ai_message(predicted_message)
return agent_decision
[docs] async def aplan(
self,
intermediate_steps: List[Tuple[AgentAction, str]],
callbacks: Callbacks = None,
**kwargs: Any,
) -> Union[List[AgentAction], AgentFinish]:
"""Given input, decided what to do.
Args:
intermediate_steps: Steps the LLM has taken to date,
along with observations
**kwargs: User inputs.
Returns:
Action specifying what tool to use.
"""
agent_scratchpad = _format_intermediate_steps(intermediate_steps)
selected_inputs = {
|
https://api.python.langchain.com/en/latest/_modules/langchain/agents/openai_functions_multi_agent/base.html
|
34ca081a3eeb-6
|
selected_inputs = {
k: kwargs[k] for k in self.prompt.input_variables if k != "agent_scratchpad"
}
full_inputs = dict(**selected_inputs, agent_scratchpad=agent_scratchpad)
prompt = self.prompt.format_prompt(**full_inputs)
messages = prompt.to_messages()
predicted_message = await self.llm.apredict_messages(
messages, functions=self.functions, callbacks=callbacks
)
agent_decision = _parse_ai_message(predicted_message)
return agent_decision
[docs] @classmethod
def create_prompt(
cls,
system_message: Optional[SystemMessage] = SystemMessage(
content="You are a helpful AI assistant."
),
extra_prompt_messages: Optional[List[BaseMessagePromptTemplate]] = None,
) -> BasePromptTemplate:
"""Create prompt for this agent.
Args:
system_message: Message to use as the system message that will be the
first in the prompt.
extra_prompt_messages: Prompt messages that will be placed between the
system message and the new human input.
Returns:
A prompt template to pass into this agent.
"""
_prompts = extra_prompt_messages or []
messages: List[Union[BaseMessagePromptTemplate, BaseMessage]]
if system_message:
messages = [system_message]
else:
messages = []
messages.extend(
[
*_prompts,
HumanMessagePromptTemplate.from_template("{input}"),
MessagesPlaceholder(variable_name="agent_scratchpad"),
]
)
return ChatPromptTemplate(messages=messages)
[docs] @classmethod
def from_llm_and_tools(
cls,
llm: BaseLanguageModel,
|
https://api.python.langchain.com/en/latest/_modules/langchain/agents/openai_functions_multi_agent/base.html
|
34ca081a3eeb-7
|
cls,
llm: BaseLanguageModel,
tools: Sequence[BaseTool],
callback_manager: Optional[BaseCallbackManager] = None,
extra_prompt_messages: Optional[List[BaseMessagePromptTemplate]] = None,
system_message: Optional[SystemMessage] = SystemMessage(
content="You are a helpful AI assistant."
),
**kwargs: Any,
) -> BaseMultiActionAgent:
"""Construct an agent from an LLM and tools."""
prompt = cls.create_prompt(
extra_prompt_messages=extra_prompt_messages,
system_message=system_message,
)
return cls(
llm=llm,
prompt=prompt,
tools=tools,
callback_manager=callback_manager,
**kwargs,
)
|
https://api.python.langchain.com/en/latest/_modules/langchain/agents/openai_functions_multi_agent/base.html
|
3c6e30d19095-0
|
Source code for langchain.agents.chat.output_parser
import json
from typing import Union
from langchain.agents.agent import AgentOutputParser
from langchain.agents.chat.prompt import FORMAT_INSTRUCTIONS
from langchain.schema import AgentAction, AgentFinish, OutputParserException
FINAL_ANSWER_ACTION = "Final Answer:"
[docs]class ChatOutputParser(AgentOutputParser):
[docs] def get_format_instructions(self) -> str:
return FORMAT_INSTRUCTIONS
[docs] def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
includes_answer = FINAL_ANSWER_ACTION in text
try:
action = text.split("```")[1]
response = json.loads(action.strip())
includes_action = "action" in response
if includes_answer and includes_action:
raise OutputParserException(
"Parsing LLM output produced a final answer "
f"and a parse-able action: {text}"
)
return AgentAction(
response["action"], response.get("action_input", {}), text
)
except Exception:
if not includes_answer:
raise OutputParserException(f"Could not parse LLM output: {text}")
return AgentFinish(
{"output": text.split(FINAL_ANSWER_ACTION)[-1].strip()}, text
)
@property
def _type(self) -> str:
return "chat"
|
https://api.python.langchain.com/en/latest/_modules/langchain/agents/chat/output_parser.html
|
d87bd02f7a94-0
|
Source code for langchain.agents.chat.base
from typing import Any, List, Optional, Sequence, Tuple
from pydantic import Field
from langchain.agents.agent import Agent, AgentOutputParser
from langchain.agents.chat.output_parser import ChatOutputParser
from langchain.agents.chat.prompt import (
FORMAT_INSTRUCTIONS,
HUMAN_MESSAGE,
SYSTEM_MESSAGE_PREFIX,
SYSTEM_MESSAGE_SUFFIX,
)
from langchain.agents.utils import validate_tools_single_input
from langchain.base_language import BaseLanguageModel
from langchain.callbacks.base import BaseCallbackManager
from langchain.chains.llm import LLMChain
from langchain.prompts.base import BasePromptTemplate
from langchain.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
from langchain.schema import AgentAction
from langchain.tools.base import BaseTool
[docs]class ChatAgent(Agent):
output_parser: AgentOutputParser = Field(default_factory=ChatOutputParser)
@property
def observation_prefix(self) -> str:
"""Prefix to append the observation with."""
return "Observation: "
@property
def llm_prefix(self) -> str:
"""Prefix to append the llm call with."""
return "Thought:"
def _construct_scratchpad(
self, intermediate_steps: List[Tuple[AgentAction, str]]
) -> str:
agent_scratchpad = super()._construct_scratchpad(intermediate_steps)
if not isinstance(agent_scratchpad, str):
raise ValueError("agent_scratchpad should be of type string.")
if agent_scratchpad:
return (
f"This was your previous work "
f"(but I haven't seen any of it! I only see what "
|
https://api.python.langchain.com/en/latest/_modules/langchain/agents/chat/base.html
|
d87bd02f7a94-1
|
f"(but I haven't seen any of it! I only see what "
f"you return as final answer):\n{agent_scratchpad}"
)
else:
return agent_scratchpad
@classmethod
def _get_default_output_parser(cls, **kwargs: Any) -> AgentOutputParser:
return ChatOutputParser()
@classmethod
def _validate_tools(cls, tools: Sequence[BaseTool]) -> None:
super()._validate_tools(tools)
validate_tools_single_input(class_name=cls.__name__, tools=tools)
@property
def _stop(self) -> List[str]:
return ["Observation:"]
[docs] @classmethod
def create_prompt(
cls,
tools: Sequence[BaseTool],
system_message_prefix: str = SYSTEM_MESSAGE_PREFIX,
system_message_suffix: str = SYSTEM_MESSAGE_SUFFIX,
human_message: str = HUMAN_MESSAGE,
format_instructions: str = FORMAT_INSTRUCTIONS,
input_variables: Optional[List[str]] = None,
) -> BasePromptTemplate:
tool_strings = "\n".join([f"{tool.name}: {tool.description}" for tool in tools])
tool_names = ", ".join([tool.name for tool in tools])
format_instructions = format_instructions.format(tool_names=tool_names)
template = "\n\n".join(
[
system_message_prefix,
tool_strings,
format_instructions,
system_message_suffix,
]
)
messages = [
SystemMessagePromptTemplate.from_template(template),
HumanMessagePromptTemplate.from_template(human_message),
]
if input_variables is None:
input_variables = ["input", "agent_scratchpad"]
|
https://api.python.langchain.com/en/latest/_modules/langchain/agents/chat/base.html
|
d87bd02f7a94-2
|
input_variables = ["input", "agent_scratchpad"]
return ChatPromptTemplate(input_variables=input_variables, messages=messages)
[docs] @classmethod
def from_llm_and_tools(
cls,
llm: BaseLanguageModel,
tools: Sequence[BaseTool],
callback_manager: Optional[BaseCallbackManager] = None,
output_parser: Optional[AgentOutputParser] = None,
system_message_prefix: str = SYSTEM_MESSAGE_PREFIX,
system_message_suffix: str = SYSTEM_MESSAGE_SUFFIX,
human_message: str = HUMAN_MESSAGE,
format_instructions: str = FORMAT_INSTRUCTIONS,
input_variables: Optional[List[str]] = None,
**kwargs: Any,
) -> Agent:
"""Construct an agent from an LLM and tools."""
cls._validate_tools(tools)
prompt = cls.create_prompt(
tools,
system_message_prefix=system_message_prefix,
system_message_suffix=system_message_suffix,
human_message=human_message,
format_instructions=format_instructions,
input_variables=input_variables,
)
llm_chain = LLMChain(
llm=llm,
prompt=prompt,
callback_manager=callback_manager,
)
tool_names = [tool.name for tool in tools]
_output_parser = output_parser or cls._get_default_output_parser()
return cls(
llm_chain=llm_chain,
allowed_tools=tool_names,
output_parser=_output_parser,
**kwargs,
)
@property
def _agent_type(self) -> str:
raise ValueError
|
https://api.python.langchain.com/en/latest/_modules/langchain/agents/chat/base.html
|
2da8aef2e1b7-0
|
Source code for langchain.agents.conversational_chat.output_parser
from __future__ import annotations
from typing import Union
from langchain.agents import AgentOutputParser
from langchain.agents.conversational_chat.prompt import FORMAT_INSTRUCTIONS
from langchain.output_parsers.json import parse_json_markdown
from langchain.schema import AgentAction, AgentFinish, OutputParserException
[docs]class ConvoOutputParser(AgentOutputParser):
[docs] def get_format_instructions(self) -> str:
return FORMAT_INSTRUCTIONS
[docs] def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
try:
response = parse_json_markdown(text)
action, action_input = response["action"], response["action_input"]
if action == "Final Answer":
return AgentFinish({"output": action_input}, text)
else:
return AgentAction(action, action_input, text)
except Exception as e:
raise OutputParserException(f"Could not parse LLM output: {text}") from e
@property
def _type(self) -> str:
return "conversational_chat"
|
https://api.python.langchain.com/en/latest/_modules/langchain/agents/conversational_chat/output_parser.html
|
371e00018284-0
|
Source code for langchain.agents.conversational_chat.base
"""An agent designed to hold a conversation in addition to using tools."""
from __future__ import annotations
from typing import Any, List, Optional, Sequence, Tuple
from pydantic import Field
from langchain.agents.agent import Agent, AgentOutputParser
from langchain.agents.conversational_chat.output_parser import ConvoOutputParser
from langchain.agents.conversational_chat.prompt import (
PREFIX,
SUFFIX,
TEMPLATE_TOOL_RESPONSE,
)
from langchain.agents.utils import validate_tools_single_input
from langchain.base_language import BaseLanguageModel
from langchain.callbacks.base import BaseCallbackManager
from langchain.chains import LLMChain
from langchain.prompts.base import BasePromptTemplate
from langchain.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
MessagesPlaceholder,
SystemMessagePromptTemplate,
)
from langchain.schema import (
AgentAction,
AIMessage,
BaseMessage,
BaseOutputParser,
HumanMessage,
)
from langchain.tools.base import BaseTool
[docs]class ConversationalChatAgent(Agent):
"""An agent designed to hold a conversation in addition to using tools."""
output_parser: AgentOutputParser = Field(default_factory=ConvoOutputParser)
template_tool_response: str = TEMPLATE_TOOL_RESPONSE
@classmethod
def _get_default_output_parser(cls, **kwargs: Any) -> AgentOutputParser:
return ConvoOutputParser()
@property
def _agent_type(self) -> str:
raise NotImplementedError
@property
def observation_prefix(self) -> str:
"""Prefix to append the observation with."""
return "Observation: "
@property
|
https://api.python.langchain.com/en/latest/_modules/langchain/agents/conversational_chat/base.html
|
371e00018284-1
|
return "Observation: "
@property
def llm_prefix(self) -> str:
"""Prefix to append the llm call with."""
return "Thought:"
@classmethod
def _validate_tools(cls, tools: Sequence[BaseTool]) -> None:
super()._validate_tools(tools)
validate_tools_single_input(cls.__name__, tools)
[docs] @classmethod
def create_prompt(
cls,
tools: Sequence[BaseTool],
system_message: str = PREFIX,
human_message: str = SUFFIX,
input_variables: Optional[List[str]] = None,
output_parser: Optional[BaseOutputParser] = None,
) -> BasePromptTemplate:
tool_strings = "\n".join(
[f"> {tool.name}: {tool.description}" for tool in tools]
)
tool_names = ", ".join([tool.name for tool in tools])
_output_parser = output_parser or cls._get_default_output_parser()
format_instructions = human_message.format(
format_instructions=_output_parser.get_format_instructions()
)
final_prompt = format_instructions.format(
tool_names=tool_names, tools=tool_strings
)
if input_variables is None:
input_variables = ["input", "chat_history", "agent_scratchpad"]
messages = [
SystemMessagePromptTemplate.from_template(system_message),
MessagesPlaceholder(variable_name="chat_history"),
HumanMessagePromptTemplate.from_template(final_prompt),
MessagesPlaceholder(variable_name="agent_scratchpad"),
]
return ChatPromptTemplate(input_variables=input_variables, messages=messages)
def _construct_scratchpad(
self, intermediate_steps: List[Tuple[AgentAction, str]]
) -> List[BaseMessage]:
|
https://api.python.langchain.com/en/latest/_modules/langchain/agents/conversational_chat/base.html
|
371e00018284-2
|
) -> List[BaseMessage]:
"""Construct the scratchpad that lets the agent continue its thought process."""
thoughts: List[BaseMessage] = []
for action, observation in intermediate_steps:
thoughts.append(AIMessage(content=action.log))
human_message = HumanMessage(
content=self.template_tool_response.format(observation=observation)
)
thoughts.append(human_message)
return thoughts
[docs] @classmethod
def from_llm_and_tools(
cls,
llm: BaseLanguageModel,
tools: Sequence[BaseTool],
callback_manager: Optional[BaseCallbackManager] = None,
output_parser: Optional[AgentOutputParser] = None,
system_message: str = PREFIX,
human_message: str = SUFFIX,
input_variables: Optional[List[str]] = None,
**kwargs: Any,
) -> Agent:
"""Construct an agent from an LLM and tools."""
cls._validate_tools(tools)
_output_parser = output_parser or cls._get_default_output_parser()
prompt = cls.create_prompt(
tools,
system_message=system_message,
human_message=human_message,
input_variables=input_variables,
output_parser=_output_parser,
)
llm_chain = LLMChain(
llm=llm,
prompt=prompt,
callback_manager=callback_manager,
)
tool_names = [tool.name for tool in tools]
return cls(
llm_chain=llm_chain,
allowed_tools=tool_names,
output_parser=_output_parser,
**kwargs,
)
|
https://api.python.langchain.com/en/latest/_modules/langchain/agents/conversational_chat/base.html
|
9acb55703625-0
|
Source code for langchain.agents.react.output_parser
import re
from typing import Union
from langchain.agents.agent import AgentOutputParser
from langchain.schema import AgentAction, AgentFinish, OutputParserException
[docs]class ReActOutputParser(AgentOutputParser):
[docs] def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
action_prefix = "Action: "
if not text.strip().split("\n")[-1].startswith(action_prefix):
raise OutputParserException(f"Could not parse LLM Output: {text}")
action_block = text.strip().split("\n")[-1]
action_str = action_block[len(action_prefix) :]
# Parse out the action and the directive.
re_matches = re.search(r"(.*?)\[(.*?)\]", action_str)
if re_matches is None:
raise OutputParserException(
f"Could not parse action directive: {action_str}"
)
action, action_input = re_matches.group(1), re_matches.group(2)
if action == "Finish":
return AgentFinish({"output": action_input}, text)
else:
return AgentAction(action, action_input, text)
@property
def _type(self) -> str:
return "react"
|
https://api.python.langchain.com/en/latest/_modules/langchain/agents/react/output_parser.html
|
20836c481a47-0
|
Source code for langchain.agents.react.base
"""Chain that implements the ReAct paper from https://arxiv.org/pdf/2210.03629.pdf."""
from typing import Any, List, Optional, Sequence
from pydantic import Field
from langchain.agents.agent import Agent, AgentExecutor, AgentOutputParser
from langchain.agents.agent_types import AgentType
from langchain.agents.react.output_parser import ReActOutputParser
from langchain.agents.react.textworld_prompt import TEXTWORLD_PROMPT
from langchain.agents.react.wiki_prompt import WIKI_PROMPT
from langchain.agents.tools import Tool
from langchain.agents.utils import validate_tools_single_input
from langchain.base_language import BaseLanguageModel
from langchain.docstore.base import Docstore
from langchain.docstore.document import Document
from langchain.prompts.base import BasePromptTemplate
from langchain.tools.base import BaseTool
[docs]class ReActDocstoreAgent(Agent):
"""Agent for the ReAct chain."""
output_parser: AgentOutputParser = Field(default_factory=ReActOutputParser)
@classmethod
def _get_default_output_parser(cls, **kwargs: Any) -> AgentOutputParser:
return ReActOutputParser()
@property
def _agent_type(self) -> str:
"""Return Identifier of agent type."""
return AgentType.REACT_DOCSTORE
[docs] @classmethod
def create_prompt(cls, tools: Sequence[BaseTool]) -> BasePromptTemplate:
"""Return default prompt."""
return WIKI_PROMPT
@classmethod
def _validate_tools(cls, tools: Sequence[BaseTool]) -> None:
validate_tools_single_input(cls.__name__, tools)
super()._validate_tools(tools)
if len(tools) != 2:
|
https://api.python.langchain.com/en/latest/_modules/langchain/agents/react/base.html
|
20836c481a47-1
|
super()._validate_tools(tools)
if len(tools) != 2:
raise ValueError(f"Exactly two tools must be specified, but got {tools}")
tool_names = {tool.name for tool in tools}
if tool_names != {"Lookup", "Search"}:
raise ValueError(
f"Tool names should be Lookup and Search, got {tool_names}"
)
@property
def observation_prefix(self) -> str:
"""Prefix to append the observation with."""
return "Observation: "
@property
def _stop(self) -> List[str]:
return ["\nObservation:"]
@property
def llm_prefix(self) -> str:
"""Prefix to append the LLM call with."""
return "Thought:"
class DocstoreExplorer:
"""Class to assist with exploration of a document store."""
def __init__(self, docstore: Docstore):
"""Initialize with a docstore, and set initial document to None."""
self.docstore = docstore
self.document: Optional[Document] = None
self.lookup_str = ""
self.lookup_index = 0
def search(self, term: str) -> str:
"""Search for a term in the docstore, and if found save."""
result = self.docstore.search(term)
if isinstance(result, Document):
self.document = result
return self._summary
else:
self.document = None
return result
def lookup(self, term: str) -> str:
"""Lookup a term in document (if saved)."""
if self.document is None:
raise ValueError("Cannot lookup without a successful search first")
if term.lower() != self.lookup_str:
|
https://api.python.langchain.com/en/latest/_modules/langchain/agents/react/base.html
|
20836c481a47-2
|
if term.lower() != self.lookup_str:
self.lookup_str = term.lower()
self.lookup_index = 0
else:
self.lookup_index += 1
lookups = [p for p in self._paragraphs if self.lookup_str in p.lower()]
if len(lookups) == 0:
return "No Results"
elif self.lookup_index >= len(lookups):
return "No More Results"
else:
result_prefix = f"(Result {self.lookup_index + 1}/{len(lookups)})"
return f"{result_prefix} {lookups[self.lookup_index]}"
@property
def _summary(self) -> str:
return self._paragraphs[0]
@property
def _paragraphs(self) -> List[str]:
if self.document is None:
raise ValueError("Cannot get paragraphs without a document")
return self.document.page_content.split("\n\n")
[docs]class ReActTextWorldAgent(ReActDocstoreAgent):
"""Agent for the ReAct TextWorld chain."""
[docs] @classmethod
def create_prompt(cls, tools: Sequence[BaseTool]) -> BasePromptTemplate:
"""Return default prompt."""
return TEXTWORLD_PROMPT
@classmethod
def _validate_tools(cls, tools: Sequence[BaseTool]) -> None:
validate_tools_single_input(cls.__name__, tools)
super()._validate_tools(tools)
if len(tools) != 1:
raise ValueError(f"Exactly one tool must be specified, but got {tools}")
tool_names = {tool.name for tool in tools}
if tool_names != {"Play"}:
raise ValueError(f"Tool name should be Play, got {tool_names}")
|
https://api.python.langchain.com/en/latest/_modules/langchain/agents/react/base.html
|
20836c481a47-3
|
raise ValueError(f"Tool name should be Play, got {tool_names}")
[docs]class ReActChain(AgentExecutor):
"""Chain that implements the ReAct paper.
Example:
.. code-block:: python
from langchain import ReActChain, OpenAI
react = ReAct(llm=OpenAI())
"""
def __init__(self, llm: BaseLanguageModel, docstore: Docstore, **kwargs: Any):
"""Initialize with the LLM and a docstore."""
docstore_explorer = DocstoreExplorer(docstore)
tools = [
Tool(
name="Search",
func=docstore_explorer.search,
description="Search for a term in the docstore.",
),
Tool(
name="Lookup",
func=docstore_explorer.lookup,
description="Lookup a term in the docstore.",
),
]
agent = ReActDocstoreAgent.from_llm_and_tools(llm, tools)
super().__init__(agent=agent, tools=tools, **kwargs)
|
https://api.python.langchain.com/en/latest/_modules/langchain/agents/react/base.html
|
ef3bf11a48b2-0
|
Source code for langchain.agents.conversational.output_parser
import re
from typing import Union
from langchain.agents.agent import AgentOutputParser
from langchain.agents.conversational.prompt import FORMAT_INSTRUCTIONS
from langchain.schema import AgentAction, AgentFinish, OutputParserException
[docs]class ConvoOutputParser(AgentOutputParser):
ai_prefix: str = "AI"
[docs] def get_format_instructions(self) -> str:
return FORMAT_INSTRUCTIONS
[docs] def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
if f"{self.ai_prefix}:" in text:
return AgentFinish(
{"output": text.split(f"{self.ai_prefix}:")[-1].strip()}, text
)
regex = r"Action: (.*?)[\n]*Action Input: (.*)"
match = re.search(regex, text)
if not match:
raise OutputParserException(f"Could not parse LLM output: `{text}`")
action = match.group(1)
action_input = match.group(2)
return AgentAction(action.strip(), action_input.strip(" ").strip('"'), text)
@property
def _type(self) -> str:
return "conversational"
|
https://api.python.langchain.com/en/latest/_modules/langchain/agents/conversational/output_parser.html
|
2a188175a89b-0
|
Source code for langchain.agents.conversational.base
"""An agent designed to hold a conversation in addition to using tools."""
from __future__ import annotations
from typing import Any, List, Optional, Sequence
from pydantic import Field
from langchain.agents.agent import Agent, AgentOutputParser
from langchain.agents.agent_types import AgentType
from langchain.agents.conversational.output_parser import ConvoOutputParser
from langchain.agents.conversational.prompt import FORMAT_INSTRUCTIONS, PREFIX, SUFFIX
from langchain.agents.utils import validate_tools_single_input
from langchain.base_language import BaseLanguageModel
from langchain.callbacks.base import BaseCallbackManager
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.tools.base import BaseTool
[docs]class ConversationalAgent(Agent):
"""An agent designed to hold a conversation in addition to using tools."""
ai_prefix: str = "AI"
output_parser: AgentOutputParser = Field(default_factory=ConvoOutputParser)
@classmethod
def _get_default_output_parser(
cls, ai_prefix: str = "AI", **kwargs: Any
) -> AgentOutputParser:
return ConvoOutputParser(ai_prefix=ai_prefix)
@property
def _agent_type(self) -> str:
"""Return Identifier of agent type."""
return AgentType.CONVERSATIONAL_REACT_DESCRIPTION
@property
def observation_prefix(self) -> str:
"""Prefix to append the observation with."""
return "Observation: "
@property
def llm_prefix(self) -> str:
"""Prefix to append the llm call with."""
return "Thought:"
[docs] @classmethod
def create_prompt(
cls,
|
https://api.python.langchain.com/en/latest/_modules/langchain/agents/conversational/base.html
|
2a188175a89b-1
|
[docs] @classmethod
def create_prompt(
cls,
tools: Sequence[BaseTool],
prefix: str = PREFIX,
suffix: str = SUFFIX,
format_instructions: str = FORMAT_INSTRUCTIONS,
ai_prefix: str = "AI",
human_prefix: str = "Human",
input_variables: Optional[List[str]] = None,
) -> PromptTemplate:
"""Create prompt in the style of the zero shot agent.
Args:
tools: List of tools the agent will have access to, used to format the
prompt.
prefix: String to put before the list of tools.
suffix: String to put after the list of tools.
ai_prefix: String to use before AI output.
human_prefix: String to use before human output.
input_variables: List of input variables the final prompt will expect.
Returns:
A PromptTemplate with the template assembled from the pieces here.
"""
tool_strings = "\n".join(
[f"> {tool.name}: {tool.description}" for tool in tools]
)
tool_names = ", ".join([tool.name for tool in tools])
format_instructions = format_instructions.format(
tool_names=tool_names, ai_prefix=ai_prefix, human_prefix=human_prefix
)
template = "\n\n".join([prefix, tool_strings, format_instructions, suffix])
if input_variables is None:
input_variables = ["input", "chat_history", "agent_scratchpad"]
return PromptTemplate(template=template, input_variables=input_variables)
@classmethod
def _validate_tools(cls, tools: Sequence[BaseTool]) -> None:
super()._validate_tools(tools)
validate_tools_single_input(cls.__name__, tools)
|
https://api.python.langchain.com/en/latest/_modules/langchain/agents/conversational/base.html
|
2a188175a89b-2
|
validate_tools_single_input(cls.__name__, tools)
[docs] @classmethod
def from_llm_and_tools(
cls,
llm: BaseLanguageModel,
tools: Sequence[BaseTool],
callback_manager: Optional[BaseCallbackManager] = None,
output_parser: Optional[AgentOutputParser] = None,
prefix: str = PREFIX,
suffix: str = SUFFIX,
format_instructions: str = FORMAT_INSTRUCTIONS,
ai_prefix: str = "AI",
human_prefix: str = "Human",
input_variables: Optional[List[str]] = None,
**kwargs: Any,
) -> Agent:
"""Construct an agent from an LLM and tools."""
cls._validate_tools(tools)
prompt = cls.create_prompt(
tools,
ai_prefix=ai_prefix,
human_prefix=human_prefix,
prefix=prefix,
suffix=suffix,
format_instructions=format_instructions,
input_variables=input_variables,
)
llm_chain = LLMChain(
llm=llm,
prompt=prompt,
callback_manager=callback_manager,
)
tool_names = [tool.name for tool in tools]
_output_parser = output_parser or cls._get_default_output_parser(
ai_prefix=ai_prefix
)
return cls(
llm_chain=llm_chain,
allowed_tools=tool_names,
ai_prefix=ai_prefix,
output_parser=_output_parser,
**kwargs,
)
|
https://api.python.langchain.com/en/latest/_modules/langchain/agents/conversational/base.html
|
9592a1d8c1c9-0
|
Source code for langchain.agents.openai_functions_agent.base
"""Module implements an agent that uses OpenAI's APIs function enabled API."""
import json
from dataclasses import dataclass
from json import JSONDecodeError
from typing import Any, List, Optional, Sequence, Tuple, Union
from pydantic import root_validator
from langchain.agents import BaseSingleActionAgent
from langchain.base_language import BaseLanguageModel
from langchain.callbacks.base import BaseCallbackManager
from langchain.callbacks.manager import Callbacks
from langchain.chat_models.openai import ChatOpenAI
from langchain.prompts.base import BasePromptTemplate
from langchain.prompts.chat import (
BaseMessagePromptTemplate,
ChatPromptTemplate,
HumanMessagePromptTemplate,
MessagesPlaceholder,
)
from langchain.schema import (
AgentAction,
AgentFinish,
AIMessage,
BaseMessage,
FunctionMessage,
OutputParserException,
SystemMessage,
)
from langchain.tools import BaseTool
from langchain.tools.convert_to_openai import format_tool_to_openai_function
@dataclass
class _FunctionsAgentAction(AgentAction):
message_log: List[BaseMessage]
def _convert_agent_action_to_messages(
agent_action: AgentAction, observation: str
) -> List[BaseMessage]:
"""Convert an agent action to a message.
This code is used to reconstruct the original AI message from the agent action.
Args:
agent_action: Agent action to convert.
Returns:
AIMessage that corresponds to the original tool invocation.
"""
if isinstance(agent_action, _FunctionsAgentAction):
return agent_action.message_log + [
_create_function_message(agent_action, observation)
]
else:
return [AIMessage(content=agent_action.log)]
|
https://api.python.langchain.com/en/latest/_modules/langchain/agents/openai_functions_agent/base.html
|
9592a1d8c1c9-1
|
]
else:
return [AIMessage(content=agent_action.log)]
def _create_function_message(
agent_action: AgentAction, observation: str
) -> FunctionMessage:
"""Convert agent action and observation into a function message.
Args:
agent_action: the tool invocation request from the agent
observation: the result of the tool invocation
Returns:
FunctionMessage that corresponds to the original tool invocation
"""
if not isinstance(observation, str):
try:
content = json.dumps(observation, ensure_ascii=False)
except Exception:
content = str(observation)
else:
content = observation
return FunctionMessage(
name=agent_action.tool,
content=content,
)
def _format_intermediate_steps(
intermediate_steps: List[Tuple[AgentAction, str]],
) -> List[BaseMessage]:
"""Format intermediate steps.
Args:
intermediate_steps: Steps the LLM has taken to date, along with observations
Returns:
list of messages to send to the LLM for the next prediction
"""
messages = []
for intermediate_step in intermediate_steps:
agent_action, observation = intermediate_step
messages.extend(_convert_agent_action_to_messages(agent_action, observation))
return messages
def _parse_ai_message(message: BaseMessage) -> Union[AgentAction, AgentFinish]:
"""Parse an AI message."""
if not isinstance(message, AIMessage):
raise TypeError(f"Expected an AI message got {type(message)}")
function_call = message.additional_kwargs.get("function_call", {})
if function_call:
function_call = message.additional_kwargs["function_call"]
function_name = function_call["name"]
try:
|
https://api.python.langchain.com/en/latest/_modules/langchain/agents/openai_functions_agent/base.html
|
9592a1d8c1c9-2
|
function_name = function_call["name"]
try:
_tool_input = json.loads(function_call["arguments"])
except JSONDecodeError:
raise OutputParserException(
f"Could not parse tool input: {function_call} because "
f"the `arguments` is not valid JSON."
)
# HACK HACK HACK:
# The code that encodes tool input into Open AI uses a special variable
# name called `__arg1` to handle old style tools that do not expose a
# schema and expect a single string argument as an input.
# We unpack the argument here if it exists.
# Open AI does not support passing in a JSON array as an argument.
if "__arg1" in _tool_input:
tool_input = _tool_input["__arg1"]
else:
tool_input = _tool_input
content_msg = "responded: {content}\n" if message.content else "\n"
return _FunctionsAgentAction(
tool=function_name,
tool_input=tool_input,
log=f"\nInvoking: `{function_name}` with `{tool_input}`\n{content_msg}\n",
message_log=[message],
)
return AgentFinish(return_values={"output": message.content}, log=message.content)
[docs]class OpenAIFunctionsAgent(BaseSingleActionAgent):
"""An Agent driven by OpenAIs function powered API.
Args:
llm: This should be an instance of ChatOpenAI, specifically a model
that supports using `functions`.
tools: The tools this agent has access to.
prompt: The prompt for this agent, should support agent_scratchpad as one
of the variables. For an easy way to construct this prompt, use
|
https://api.python.langchain.com/en/latest/_modules/langchain/agents/openai_functions_agent/base.html
|
9592a1d8c1c9-3
|
of the variables. For an easy way to construct this prompt, use
`OpenAIFunctionsAgent.create_prompt(...)`
"""
llm: BaseLanguageModel
tools: Sequence[BaseTool]
prompt: BasePromptTemplate
[docs] def get_allowed_tools(self) -> List[str]:
"""Get allowed tools."""
return list([t.name for t in self.tools])
[docs] @root_validator
def validate_llm(cls, values: dict) -> dict:
if not isinstance(values["llm"], ChatOpenAI):
raise ValueError("Only supported with ChatOpenAI models.")
return values
[docs] @root_validator
def validate_prompt(cls, values: dict) -> dict:
prompt: BasePromptTemplate = values["prompt"]
if "agent_scratchpad" not in prompt.input_variables:
raise ValueError(
"`agent_scratchpad` should be one of the variables in the prompt, "
f"got {prompt.input_variables}"
)
return values
@property
def input_keys(self) -> List[str]:
"""Get input keys. Input refers to user input here."""
return ["input"]
@property
def functions(self) -> List[dict]:
return [dict(format_tool_to_openai_function(t)) for t in self.tools]
[docs] def plan(
self,
intermediate_steps: List[Tuple[AgentAction, str]],
callbacks: Callbacks = None,
**kwargs: Any,
) -> Union[AgentAction, AgentFinish]:
"""Given input, decided what to do.
Args:
intermediate_steps: Steps the LLM has taken to date, along with observations
**kwargs: User inputs.
|
https://api.python.langchain.com/en/latest/_modules/langchain/agents/openai_functions_agent/base.html
|
9592a1d8c1c9-4
|
**kwargs: User inputs.
Returns:
Action specifying what tool to use.
"""
agent_scratchpad = _format_intermediate_steps(intermediate_steps)
selected_inputs = {
k: kwargs[k] for k in self.prompt.input_variables if k != "agent_scratchpad"
}
full_inputs = dict(**selected_inputs, agent_scratchpad=agent_scratchpad)
prompt = self.prompt.format_prompt(**full_inputs)
messages = prompt.to_messages()
predicted_message = self.llm.predict_messages(
messages, functions=self.functions, callbacks=callbacks
)
agent_decision = _parse_ai_message(predicted_message)
return agent_decision
[docs] async def aplan(
self,
intermediate_steps: List[Tuple[AgentAction, str]],
callbacks: Callbacks = None,
**kwargs: Any,
) -> Union[AgentAction, AgentFinish]:
"""Given input, decided what to do.
Args:
intermediate_steps: Steps the LLM has taken to date,
along with observations
**kwargs: User inputs.
Returns:
Action specifying what tool to use.
"""
agent_scratchpad = _format_intermediate_steps(intermediate_steps)
selected_inputs = {
k: kwargs[k] for k in self.prompt.input_variables if k != "agent_scratchpad"
}
full_inputs = dict(**selected_inputs, agent_scratchpad=agent_scratchpad)
prompt = self.prompt.format_prompt(**full_inputs)
messages = prompt.to_messages()
predicted_message = await self.llm.apredict_messages(
messages, functions=self.functions, callbacks=callbacks
)
agent_decision = _parse_ai_message(predicted_message)
|
https://api.python.langchain.com/en/latest/_modules/langchain/agents/openai_functions_agent/base.html
|
9592a1d8c1c9-5
|
)
agent_decision = _parse_ai_message(predicted_message)
return agent_decision
[docs] @classmethod
def create_prompt(
cls,
system_message: Optional[SystemMessage] = SystemMessage(
content="You are a helpful AI assistant."
),
extra_prompt_messages: Optional[List[BaseMessagePromptTemplate]] = None,
) -> BasePromptTemplate:
"""Create prompt for this agent.
Args:
system_message: Message to use as the system message that will be the
first in the prompt.
extra_prompt_messages: Prompt messages that will be placed between the
system message and the new human input.
Returns:
A prompt template to pass into this agent.
"""
_prompts = extra_prompt_messages or []
messages: List[Union[BaseMessagePromptTemplate, BaseMessage]]
if system_message:
messages = [system_message]
else:
messages = []
messages.extend(
[
*_prompts,
HumanMessagePromptTemplate.from_template("{input}"),
MessagesPlaceholder(variable_name="agent_scratchpad"),
]
)
return ChatPromptTemplate(messages=messages)
[docs] @classmethod
def from_llm_and_tools(
cls,
llm: BaseLanguageModel,
tools: Sequence[BaseTool],
callback_manager: Optional[BaseCallbackManager] = None,
extra_prompt_messages: Optional[List[BaseMessagePromptTemplate]] = None,
system_message: Optional[SystemMessage] = SystemMessage(
content="You are a helpful AI assistant."
),
**kwargs: Any,
) -> BaseSingleActionAgent:
"""Construct an agent from an LLM and tools."""
|
https://api.python.langchain.com/en/latest/_modules/langchain/agents/openai_functions_agent/base.html
|
9592a1d8c1c9-6
|
"""Construct an agent from an LLM and tools."""
if not isinstance(llm, ChatOpenAI):
raise ValueError("Only supported with ChatOpenAI models.")
prompt = cls.create_prompt(
extra_prompt_messages=extra_prompt_messages,
system_message=system_message,
)
return cls(
llm=llm,
prompt=prompt,
tools=tools,
callback_manager=callback_manager,
**kwargs,
)
|
https://api.python.langchain.com/en/latest/_modules/langchain/agents/openai_functions_agent/base.html
|
41a69e91d865-0
|
Source code for langchain.graphs.networkx_graph
"""Networkx wrapper for graph operations."""
from __future__ import annotations
from typing import Any, List, NamedTuple, Optional, Tuple
KG_TRIPLE_DELIMITER = "<|>"
[docs]class KnowledgeTriple(NamedTuple):
"""A triple in the graph."""
subject: str
predicate: str
object_: str
[docs] @classmethod
def from_string(cls, triple_string: str) -> "KnowledgeTriple":
"""Create a KnowledgeTriple from a string."""
subject, predicate, object_ = triple_string.strip().split(", ")
subject = subject[1:]
object_ = object_[:-1]
return cls(subject, predicate, object_)
[docs]def parse_triples(knowledge_str: str) -> List[KnowledgeTriple]:
"""Parse knowledge triples from the knowledge string."""
knowledge_str = knowledge_str.strip()
if not knowledge_str or knowledge_str == "NONE":
return []
triple_strs = knowledge_str.split(KG_TRIPLE_DELIMITER)
results = []
for triple_str in triple_strs:
try:
kg_triple = KnowledgeTriple.from_string(triple_str)
except ValueError:
continue
results.append(kg_triple)
return results
[docs]def get_entities(entity_str: str) -> List[str]:
"""Extract entities from entity string."""
if entity_str.strip() == "NONE":
return []
else:
return [w.strip() for w in entity_str.split(",")]
class NetworkxEntityGraph:
"""Networkx wrapper for entity graph operations."""
def __init__(self, graph: Optional[Any] = None) -> None:
"""Create a new graph."""
try:
|
https://api.python.langchain.com/en/latest/_modules/langchain/graphs/networkx_graph.html
|
41a69e91d865-1
|
"""Create a new graph."""
try:
import networkx as nx
except ImportError:
raise ImportError(
"Could not import networkx python package. "
"Please install it with `pip install networkx`."
)
if graph is not None:
if not isinstance(graph, nx.DiGraph):
raise ValueError("Passed in graph is not of correct shape")
self._graph = graph
else:
self._graph = nx.DiGraph()
@classmethod
def from_gml(cls, gml_path: str) -> NetworkxEntityGraph:
try:
import networkx as nx
except ImportError:
raise ImportError(
"Could not import networkx python package. "
"Please install it with `pip install networkx`."
)
graph = nx.read_gml(gml_path)
return cls(graph)
def add_triple(self, knowledge_triple: KnowledgeTriple) -> None:
"""Add a triple to the graph."""
# Creates nodes if they don't exist
# Overwrites existing edges
if not self._graph.has_node(knowledge_triple.subject):
self._graph.add_node(knowledge_triple.subject)
if not self._graph.has_node(knowledge_triple.object_):
self._graph.add_node(knowledge_triple.object_)
self._graph.add_edge(
knowledge_triple.subject,
knowledge_triple.object_,
relation=knowledge_triple.predicate,
)
def delete_triple(self, knowledge_triple: KnowledgeTriple) -> None:
"""Delete a triple from the graph."""
if self._graph.has_edge(knowledge_triple.subject, knowledge_triple.object_):
|
https://api.python.langchain.com/en/latest/_modules/langchain/graphs/networkx_graph.html
|
41a69e91d865-2
|
if self._graph.has_edge(knowledge_triple.subject, knowledge_triple.object_):
self._graph.remove_edge(knowledge_triple.subject, knowledge_triple.object_)
def get_triples(self) -> List[Tuple[str, str, str]]:
"""Get all triples in the graph."""
return [(u, v, d["relation"]) for u, v, d in self._graph.edges(data=True)]
def get_entity_knowledge(self, entity: str, depth: int = 1) -> List[str]:
"""Get information about an entity."""
import networkx as nx
# TODO: Have more information-specific retrieval methods
if not self._graph.has_node(entity):
return []
results = []
for src, sink in nx.dfs_edges(self._graph, entity, depth_limit=depth):
relation = self._graph[src][sink]["relation"]
results.append(f"{src} {relation} {sink}")
return results
def write_to_gml(self, path: str) -> None:
import networkx as nx
nx.write_gml(self._graph, path)
def clear(self) -> None:
"""Clear the graph."""
self._graph.clear()
|
https://api.python.langchain.com/en/latest/_modules/langchain/graphs/networkx_graph.html
|
d7082c128b0b-0
|
Source code for langchain.chat_models.google_palm
"""Wrapper around Google's PaLM Chat API."""
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Mapping, Optional
from pydantic import BaseModel, root_validator
from tenacity import (
before_sleep_log,
retry,
retry_if_exception_type,
stop_after_attempt,
wait_exponential,
)
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.chat_models.base import BaseChatModel
from langchain.schema import (
AIMessage,
BaseMessage,
ChatGeneration,
ChatMessage,
ChatResult,
HumanMessage,
SystemMessage,
)
from langchain.utils import get_from_dict_or_env
if TYPE_CHECKING:
import google.generativeai as genai
logger = logging.getLogger(__name__)
[docs]class ChatGooglePalmError(Exception):
"""Error raised when there is an issue with the Google PaLM API."""
pass
def _truncate_at_stop_tokens(
text: str,
stop: Optional[List[str]],
) -> str:
"""Truncates text at the earliest stop token found."""
if stop is None:
return text
for stop_token in stop:
stop_token_idx = text.find(stop_token)
if stop_token_idx != -1:
text = text[:stop_token_idx]
return text
def _response_to_result(
response: genai.types.ChatResponse,
stop: Optional[List[str]],
) -> ChatResult:
"""Converts a PaLM API response into a LangChain ChatResult."""
if not response.candidates:
|
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/google_palm.html
|
d7082c128b0b-1
|
if not response.candidates:
raise ChatGooglePalmError("ChatResponse must have at least one candidate.")
generations: List[ChatGeneration] = []
for candidate in response.candidates:
author = candidate.get("author")
if author is None:
raise ChatGooglePalmError(f"ChatResponse must have an author: {candidate}")
content = _truncate_at_stop_tokens(candidate.get("content", ""), stop)
if content is None:
raise ChatGooglePalmError(f"ChatResponse must have a content: {candidate}")
if author == "ai":
generations.append(
ChatGeneration(text=content, message=AIMessage(content=content))
)
elif author == "human":
generations.append(
ChatGeneration(
text=content,
message=HumanMessage(content=content),
)
)
else:
generations.append(
ChatGeneration(
text=content,
message=ChatMessage(role=author, content=content),
)
)
return ChatResult(generations=generations)
def _messages_to_prompt_dict(
input_messages: List[BaseMessage],
) -> genai.types.MessagePromptDict:
"""Converts a list of LangChain messages into a PaLM API MessagePrompt structure."""
import google.generativeai as genai
context: str = ""
examples: List[genai.types.MessageDict] = []
messages: List[genai.types.MessageDict] = []
remaining = list(enumerate(input_messages))
while remaining:
index, input_message = remaining.pop(0)
if isinstance(input_message, SystemMessage):
if index != 0:
|
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/google_palm.html
|
d7082c128b0b-2
|
if isinstance(input_message, SystemMessage):
if index != 0:
raise ChatGooglePalmError("System message must be first input message.")
context = input_message.content
elif isinstance(input_message, HumanMessage) and input_message.example:
if messages:
raise ChatGooglePalmError(
"Message examples must come before other messages."
)
_, next_input_message = remaining.pop(0)
if isinstance(next_input_message, AIMessage) and next_input_message.example:
examples.extend(
[
genai.types.MessageDict(
author="human", content=input_message.content
),
genai.types.MessageDict(
author="ai", content=next_input_message.content
),
]
)
else:
raise ChatGooglePalmError(
"Human example message must be immediately followed by an "
" AI example response."
)
elif isinstance(input_message, AIMessage) and input_message.example:
raise ChatGooglePalmError(
"AI example message must be immediately preceded by a Human "
"example message."
)
elif isinstance(input_message, AIMessage):
messages.append(
genai.types.MessageDict(author="ai", content=input_message.content)
)
elif isinstance(input_message, HumanMessage):
messages.append(
genai.types.MessageDict(author="human", content=input_message.content)
)
elif isinstance(input_message, ChatMessage):
messages.append(
genai.types.MessageDict(
author=input_message.role, content=input_message.content
)
)
else:
raise ChatGooglePalmError(
"Messages without an explicit role not supported by PaLM API."
)
|
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/google_palm.html
|
d7082c128b0b-3
|
"Messages without an explicit role not supported by PaLM API."
)
return genai.types.MessagePromptDict(
context=context,
examples=examples,
messages=messages,
)
def _create_retry_decorator() -> Callable[[Any], Any]:
"""Returns a tenacity retry decorator, preconfigured to handle PaLM exceptions"""
import google.api_core.exceptions
multiplier = 2
min_seconds = 1
max_seconds = 60
max_retries = 10
return retry(
reraise=True,
stop=stop_after_attempt(max_retries),
wait=wait_exponential(multiplier=multiplier, min=min_seconds, max=max_seconds),
retry=(
retry_if_exception_type(google.api_core.exceptions.ResourceExhausted)
| retry_if_exception_type(google.api_core.exceptions.ServiceUnavailable)
| retry_if_exception_type(google.api_core.exceptions.GoogleAPIError)
),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
[docs]def chat_with_retry(llm: ChatGooglePalm, **kwargs: Any) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator()
@retry_decorator
def _chat_with_retry(**kwargs: Any) -> Any:
return llm.client.chat(**kwargs)
return _chat_with_retry(**kwargs)
async def achat_with_retry(llm: ChatGooglePalm, **kwargs: Any) -> Any:
"""Use tenacity to retry the async completion call."""
retry_decorator = _create_retry_decorator()
@retry_decorator
async def _achat_with_retry(**kwargs: Any) -> Any:
|
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/google_palm.html
|
d7082c128b0b-4
|
async def _achat_with_retry(**kwargs: Any) -> Any:
# Use OpenAI's async api https://github.com/openai/openai-python#async-api
return await llm.client.chat_async(**kwargs)
return await _achat_with_retry(**kwargs)
[docs]class ChatGooglePalm(BaseChatModel, BaseModel):
"""Wrapper around Google's PaLM Chat API.
To use you must have the google.generativeai Python package installed and
either:
1. The ``GOOGLE_API_KEY``` environment varaible set with your API key, or
2. Pass your API key using the google_api_key kwarg to the ChatGoogle
constructor.
Example:
.. code-block:: python
from langchain.chat_models import ChatGooglePalm
chat = ChatGooglePalm()
"""
client: Any #: :meta private:
model_name: str = "models/chat-bison-001"
"""Model name to use."""
google_api_key: Optional[str] = None
temperature: Optional[float] = None
"""Run inference with this temperature. Must by in the closed
interval [0.0, 1.0]."""
top_p: Optional[float] = None
"""Decode using nucleus sampling: consider the smallest set of tokens whose
probability sum is at least top_p. Must be in the closed interval [0.0, 1.0]."""
top_k: Optional[int] = None
"""Decode using top-k sampling: consider the set of top_k most probable tokens.
Must be positive."""
n: int = 1
"""Number of chat completions to generate for each prompt. Note that the API may
not return the full n completions if duplicates are generated."""
|
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/google_palm.html
|
d7082c128b0b-5
|
not return the full n completions if duplicates are generated."""
[docs] @root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate api key, python package exists, temperature, top_p, and top_k."""
google_api_key = get_from_dict_or_env(
values, "google_api_key", "GOOGLE_API_KEY"
)
try:
import google.generativeai as genai
genai.configure(api_key=google_api_key)
except ImportError:
raise ChatGooglePalmError(
"Could not import google.generativeai python package. "
"Please install it with `pip install google-generativeai`"
)
values["client"] = genai
if values["temperature"] is not None and not 0 <= values["temperature"] <= 1:
raise ValueError("temperature must be in the range [0.0, 1.0]")
if values["top_p"] is not None and not 0 <= values["top_p"] <= 1:
raise ValueError("top_p must be in the range [0.0, 1.0]")
if values["top_k"] is not None and values["top_k"] <= 0:
raise ValueError("top_k must be positive")
return values
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
prompt = _messages_to_prompt_dict(messages)
response: genai.types.ChatResponse = chat_with_retry(
self,
model=self.model_name,
prompt=prompt,
|
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/google_palm.html
|
d7082c128b0b-6
|
self,
model=self.model_name,
prompt=prompt,
temperature=self.temperature,
top_p=self.top_p,
top_k=self.top_k,
candidate_count=self.n,
**kwargs,
)
return _response_to_result(response, stop)
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
prompt = _messages_to_prompt_dict(messages)
response: genai.types.ChatResponse = await achat_with_retry(
self,
model=self.model_name,
prompt=prompt,
temperature=self.temperature,
top_p=self.top_p,
top_k=self.top_k,
candidate_count=self.n,
)
return _response_to_result(response, stop)
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
"model_name": self.model_name,
"temperature": self.temperature,
"top_p": self.top_p,
"top_k": self.top_k,
"n": self.n,
}
@property
def _llm_type(self) -> str:
return "google-palm-chat"
|
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/google_palm.html
|
c7eea902d638-0
|
Source code for langchain.chat_models.anthropic
from typing import Any, Dict, List, Optional
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.chat_models.base import BaseChatModel
from langchain.llms.anthropic import _AnthropicCommon
from langchain.schema import (
AIMessage,
BaseMessage,
ChatGeneration,
ChatMessage,
ChatResult,
HumanMessage,
SystemMessage,
)
[docs]class ChatAnthropic(BaseChatModel, _AnthropicCommon):
r"""Wrapper around Anthropic's large language model.
To use, you should have the ``anthropic`` python package installed, and the
environment variable ``ANTHROPIC_API_KEY`` set with your API key, or pass
it as a named parameter to the constructor.
Example:
.. code-block:: python
import anthropic
from langchain.llms import Anthropic
model = ChatAnthropic(model="<model_name>", anthropic_api_key="my-api-key")
"""
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "anthropic-chat"
@property
def lc_serializable(self) -> bool:
return True
def _convert_one_message_to_text(self, message: BaseMessage) -> str:
if isinstance(message, ChatMessage):
message_text = f"\n\n{message.role.capitalize()}: {message.content}"
elif isinstance(message, HumanMessage):
message_text = f"{self.HUMAN_PROMPT} {message.content}"
elif isinstance(message, AIMessage):
message_text = f"{self.AI_PROMPT} {message.content}"
|
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/anthropic.html
|
c7eea902d638-1
|
message_text = f"{self.AI_PROMPT} {message.content}"
elif isinstance(message, SystemMessage):
message_text = f"{self.HUMAN_PROMPT} <admin>{message.content}</admin>"
else:
raise ValueError(f"Got unknown type {message}")
return message_text
def _convert_messages_to_text(self, messages: List[BaseMessage]) -> str:
"""Format a list of strings into a single string with necessary newlines.
Args:
messages (List[BaseMessage]): List of BaseMessage to combine.
Returns:
str: Combined string with necessary newlines.
"""
return "".join(
self._convert_one_message_to_text(message) for message in messages
)
def _convert_messages_to_prompt(self, messages: List[BaseMessage]) -> str:
"""Format a list of messages into a full prompt for the Anthropic model
Args:
messages (List[BaseMessage]): List of BaseMessage to combine.
Returns:
str: Combined string with necessary HUMAN_PROMPT and AI_PROMPT tags.
"""
messages = messages.copy() # don't mutate the original list
if not self.AI_PROMPT:
raise NameError("Please ensure the anthropic package is loaded")
if not isinstance(messages[-1], AIMessage):
messages.append(AIMessage(content=""))
text = self._convert_messages_to_text(messages)
return (
text.rstrip()
) # trim off the trailing ' ' that might come from the "Assistant: "
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/anthropic.html
|
c7eea902d638-2
|
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
prompt = self._convert_messages_to_prompt(messages)
params: Dict[str, Any] = {"prompt": prompt, **self._default_params, **kwargs}
if stop:
params["stop_sequences"] = stop
if self.streaming:
completion = ""
stream_resp = self.client.completion_stream(**params)
for data in stream_resp:
delta = data["completion"][len(completion) :]
completion = data["completion"]
if run_manager:
run_manager.on_llm_new_token(
delta,
)
else:
response = self.client.completion(**params)
completion = response["completion"]
message = AIMessage(content=completion)
return ChatResult(generations=[ChatGeneration(message=message)])
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
prompt = self._convert_messages_to_prompt(messages)
params: Dict[str, Any] = {"prompt": prompt, **self._default_params, **kwargs}
if stop:
params["stop_sequences"] = stop
if self.streaming:
completion = ""
stream_resp = await self.client.acompletion_stream(**params)
async for data in stream_resp:
delta = data["completion"][len(completion) :]
completion = data["completion"]
if run_manager:
await run_manager.on_llm_new_token(
delta,
)
else:
|
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/anthropic.html
|
c7eea902d638-3
|
delta,
)
else:
response = await self.client.acompletion(**params)
completion = response["completion"]
message = AIMessage(content=completion)
return ChatResult(generations=[ChatGeneration(message=message)])
[docs] def get_num_tokens(self, text: str) -> int:
"""Calculate number of tokens."""
if not self.count_tokens:
raise NameError("Please ensure the anthropic package is loaded")
return self.count_tokens(text)
|
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/anthropic.html
|
ed25c0639cc7-0
|
Source code for langchain.chat_models.promptlayer_openai
"""PromptLayer wrapper."""
import datetime
from typing import Any, List, Mapping, Optional
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.chat_models import ChatOpenAI
from langchain.schema import BaseMessage, ChatResult
[docs]class PromptLayerChatOpenAI(ChatOpenAI):
"""Wrapper around OpenAI Chat large language models and PromptLayer.
To use, you should have the ``openai`` and ``promptlayer`` python
package installed, and the environment variable ``OPENAI_API_KEY``
and ``PROMPTLAYER_API_KEY`` set with your openAI API key and
promptlayer key respectively.
All parameters that can be passed to the OpenAI LLM can also
be passed here. The PromptLayerChatOpenAI adds to optional
parameters:
``pl_tags``: List of strings to tag the request with.
``return_pl_id``: If True, the PromptLayer request ID will be
returned in the ``generation_info`` field of the
``Generation`` object.
Example:
.. code-block:: python
from langchain.chat_models import PromptLayerChatOpenAI
openai = PromptLayerChatOpenAI(model_name="gpt-3.5-turbo")
"""
pl_tags: Optional[List[str]]
return_pl_id: Optional[bool] = False
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any
) -> ChatResult:
|
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/promptlayer_openai.html
|
ed25c0639cc7-1
|
**kwargs: Any
) -> ChatResult:
"""Call ChatOpenAI generate and then call PromptLayer API to log the request."""
from promptlayer.utils import get_api_key, promptlayer_api_request
request_start_time = datetime.datetime.now().timestamp()
generated_responses = super()._generate(messages, stop, run_manager)
request_end_time = datetime.datetime.now().timestamp()
message_dicts, params = super()._create_message_dicts(messages, stop)
for i, generation in enumerate(generated_responses.generations):
response_dict, params = super()._create_message_dicts(
[generation.message], stop
)
params = {**params, **kwargs}
pl_request_id = promptlayer_api_request(
"langchain.PromptLayerChatOpenAI",
"langchain",
message_dicts,
params,
self.pl_tags,
response_dict,
request_start_time,
request_end_time,
get_api_key(),
return_pl_id=self.return_pl_id,
)
if self.return_pl_id:
if generation.generation_info is None or not isinstance(
generation.generation_info, dict
):
generation.generation_info = {}
generation.generation_info["pl_request_id"] = pl_request_id
return generated_responses
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any
) -> ChatResult:
"""Call ChatOpenAI agenerate and then call PromptLayer to log."""
from promptlayer.utils import get_api_key, promptlayer_api_request_async
request_start_time = datetime.datetime.now().timestamp()
|
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/promptlayer_openai.html
|
ed25c0639cc7-2
|
request_start_time = datetime.datetime.now().timestamp()
generated_responses = await super()._agenerate(messages, stop, run_manager)
request_end_time = datetime.datetime.now().timestamp()
message_dicts, params = super()._create_message_dicts(messages, stop)
for i, generation in enumerate(generated_responses.generations):
response_dict, params = super()._create_message_dicts(
[generation.message], stop
)
params = {**params, **kwargs}
pl_request_id = await promptlayer_api_request_async(
"langchain.PromptLayerChatOpenAI.async",
"langchain",
message_dicts,
params,
self.pl_tags,
response_dict,
request_start_time,
request_end_time,
get_api_key(),
return_pl_id=self.return_pl_id,
)
if self.return_pl_id:
if generation.generation_info is None or not isinstance(
generation.generation_info, dict
):
generation.generation_info = {}
generation.generation_info["pl_request_id"] = pl_request_id
return generated_responses
@property
def _llm_type(self) -> str:
return "promptlayer-openai-chat"
@property
def _identifying_params(self) -> Mapping[str, Any]:
return {
**super()._identifying_params,
"pl_tags": self.pl_tags,
"return_pl_id": self.return_pl_id,
}
|
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/promptlayer_openai.html
|
247f1d3da487-0
|
Source code for langchain.chat_models.azure_openai
"""Azure OpenAI chat wrapper."""
from __future__ import annotations
import logging
from typing import Any, Dict, Mapping
from pydantic import root_validator
from langchain.chat_models.openai import ChatOpenAI
from langchain.schema import ChatResult
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
[docs]class AzureChatOpenAI(ChatOpenAI):
"""Wrapper around Azure OpenAI Chat Completion API. To use this class you
must have a deployed model on Azure OpenAI. Use `deployment_name` in the
constructor to refer to the "Model deployment name" in the Azure portal.
In addition, you should have the ``openai`` python package installed, and the
following environment variables set or passed in constructor in lower case:
- ``OPENAI_API_TYPE`` (default: ``azure``)
- ``OPENAI_API_KEY``
- ``OPENAI_API_BASE``
- ``OPENAI_API_VERSION``
- ``OPENAI_PROXY``
For exmaple, if you have `gpt-35-turbo` deployed, with the deployment name
`35-turbo-dev`, the constructor should look like:
.. code-block:: python
AzureChatOpenAI(
deployment_name="35-turbo-dev",
openai_api_version="2023-03-15-preview",
)
Be aware the API version may change.
Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
"""
deployment_name: str = ""
openai_api_type: str = "azure"
openai_api_base: str = ""
|
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/azure_openai.html
|
247f1d3da487-1
|
openai_api_base: str = ""
openai_api_version: str = ""
openai_api_key: str = ""
openai_organization: str = ""
openai_proxy: str = ""
[docs] @root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
values["openai_api_key"] = get_from_dict_or_env(
values,
"openai_api_key",
"OPENAI_API_KEY",
)
values["openai_api_base"] = get_from_dict_or_env(
values,
"openai_api_base",
"OPENAI_API_BASE",
)
values["openai_api_version"] = get_from_dict_or_env(
values,
"openai_api_version",
"OPENAI_API_VERSION",
)
values["openai_api_type"] = get_from_dict_or_env(
values,
"openai_api_type",
"OPENAI_API_TYPE",
)
values["openai_organization"] = get_from_dict_or_env(
values,
"openai_organization",
"OPENAI_ORGANIZATION",
default="",
)
values["openai_proxy"] = get_from_dict_or_env(
values,
"openai_proxy",
"OPENAI_PROXY",
default="",
)
try:
import openai
except ImportError:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
try:
values["client"] = openai.ChatCompletion
except AttributeError:
raise ValueError(
|
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/azure_openai.html
|
247f1d3da487-2
|
except AttributeError:
raise ValueError(
"`openai` has no `ChatCompletion` attribute, this is likely "
"due to an old version of the openai package. Try upgrading it "
"with `pip install --upgrade openai`."
)
if values["n"] < 1:
raise ValueError("n must be at least 1.")
if values["n"] > 1 and values["streaming"]:
raise ValueError("n must be 1 when streaming.")
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling OpenAI API."""
return {
**super()._default_params,
"engine": self.deployment_name,
}
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {**self._default_params}
@property
def _invocation_params(self) -> Mapping[str, Any]:
openai_creds = {
"api_type": self.openai_api_type,
"api_version": self.openai_api_version,
}
return {**openai_creds, **super()._invocation_params}
@property
def _llm_type(self) -> str:
return "azure-openai-chat"
def _create_chat_result(self, response: Mapping[str, Any]) -> ChatResult:
for res in response["choices"]:
if res.get("finish_reason", None) == "content_filter":
raise ValueError(
"Azure has not provided the response due to a content"
" filter being triggered"
)
return super()._create_chat_result(response)
|
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/azure_openai.html
|
ad47a5d5fbd1-0
|
Source code for langchain.chat_models.vertexai
"""Wrapper around Google VertexAI chat-based models."""
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional
from pydantic import root_validator
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.chat_models.base import BaseChatModel
from langchain.llms.vertexai import _VertexAICommon, is_codey_model
from langchain.schema import (
AIMessage,
BaseMessage,
ChatGeneration,
ChatResult,
HumanMessage,
SystemMessage,
)
from langchain.utilities.vertexai import raise_vertex_import_error
@dataclass
class _MessagePair:
"""InputOutputTextPair represents a pair of input and output texts."""
question: HumanMessage
answer: AIMessage
@dataclass
class _ChatHistory:
"""InputOutputTextPair represents a pair of input and output texts."""
history: List[_MessagePair] = field(default_factory=list)
system_message: Optional[SystemMessage] = None
def _parse_chat_history(history: List[BaseMessage]) -> _ChatHistory:
"""Parse a sequence of messages into history.
A sequence should be either (SystemMessage, HumanMessage, AIMessage,
HumanMessage, AIMessage, ...) or (HumanMessage, AIMessage, HumanMessage,
AIMessage, ...). CodeChat does not support SystemMessage.
Args:
history: The list of messages to re-create the history of the chat.
Returns:
A parsed chat history.
Raises:
ValueError: If a sequence of message is odd, or a human message is not followed
|
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/vertexai.html
|
ad47a5d5fbd1-1
|
ValueError: If a sequence of message is odd, or a human message is not followed
by a message from AI (e.g., Human, Human, AI or AI, AI, Human).
"""
if not history:
return _ChatHistory()
first_message = history[0]
system_message = first_message if isinstance(first_message, SystemMessage) else None
chat_history = _ChatHistory(system_message=system_message)
messages_left = history[1:] if system_message else history
if len(messages_left) % 2 != 0:
raise ValueError(
f"Amount of messages in history should be even, got {len(messages_left)}!"
)
for question, answer in zip(messages_left[::2], messages_left[1::2]):
if not isinstance(question, HumanMessage) or not isinstance(answer, AIMessage):
raise ValueError(
"A human message should follow a bot one, "
f"got {question.type}, {answer.type}."
)
chat_history.history.append(_MessagePair(question=question, answer=answer))
return chat_history
[docs]class ChatVertexAI(_VertexAICommon, BaseChatModel):
"""Wrapper around Vertex AI large language models."""
model_name: str = "chat-bison"
[docs] @root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that the python package exists in environment."""
cls._try_init_vertexai(values)
try:
if is_codey_model(values["model_name"]):
from vertexai.preview.language_models import CodeChatModel
values["client"] = CodeChatModel.from_pretrained(values["model_name"])
else:
from vertexai.preview.language_models import ChatModel
|
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/vertexai.html
|
ad47a5d5fbd1-2
|
else:
from vertexai.preview.language_models import ChatModel
values["client"] = ChatModel.from_pretrained(values["model_name"])
except ImportError:
raise_vertex_import_error()
return values
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
"""Generate next turn in the conversation.
Args:
messages: The history of the conversation as a list of messages. Code chat
does not support context.
stop: The list of stop words (optional).
run_manager: The CallbackManager for LLM run, it's not used at the moment.
Returns:
The ChatResult that contains outputs generated by the model.
Raises:
ValueError: if the last message in the list is not from human.
"""
if not messages:
raise ValueError(
"You should provide at least one message to start the chat!"
)
question = messages[-1]
if not isinstance(question, HumanMessage):
raise ValueError(
f"Last message in the list should be from human, got {question.type}."
)
history = _parse_chat_history(messages[:-1])
context = history.system_message.content if history.system_message else None
params = {**self._default_params, **kwargs}
if not self.is_codey_model:
chat = self.client.start_chat(context=context, **params)
else:
chat = self.client.start_chat(**params)
for pair in history.history:
chat._history.append((pair.question.content, pair.answer.content))
|
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/vertexai.html
|
ad47a5d5fbd1-3
|
chat._history.append((pair.question.content, pair.answer.content))
response = chat.send_message(question.content, **params)
text = self._enforce_stop_words(response.text, stop)
return ChatResult(generations=[ChatGeneration(message=AIMessage(content=text))])
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
raise NotImplementedError(
"""Vertex AI doesn't support async requests at the moment."""
)
|
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/vertexai.html
|
6b438210d91b-0
|
Source code for langchain.chat_models.base
import asyncio
import inspect
import warnings
from abc import ABC, abstractmethod
from functools import partial
from typing import Any, Dict, List, Mapping, Optional, Sequence
from pydantic import Field, root_validator
import langchain
from langchain.base_language import BaseLanguageModel
from langchain.callbacks.base import BaseCallbackManager
from langchain.callbacks.manager import (
AsyncCallbackManager,
AsyncCallbackManagerForLLMRun,
CallbackManager,
CallbackManagerForLLMRun,
Callbacks,
)
from langchain.load.dump import dumpd, dumps
from langchain.schema import (
AIMessage,
BaseMessage,
ChatGeneration,
ChatResult,
HumanMessage,
LLMResult,
PromptValue,
RunInfo,
)
def _get_verbosity() -> bool:
return langchain.verbose
[docs]class BaseChatModel(BaseLanguageModel, ABC):
cache: Optional[bool] = None
verbose: bool = Field(default_factory=_get_verbosity)
"""Whether to print out response text."""
callbacks: Callbacks = Field(default=None, exclude=True)
callback_manager: Optional[BaseCallbackManager] = Field(default=None, exclude=True)
tags: Optional[List[str]] = Field(default=None, exclude=True)
"""Tags to add to the run trace."""
[docs] @root_validator()
def raise_deprecation(cls, values: Dict) -> Dict:
"""Raise deprecation warning if callback_manager is used."""
if values.get("callback_manager") is not None:
warnings.warn(
"callback_manager is deprecated. Please use callbacks instead.",
DeprecationWarning,
)
values["callbacks"] = values.pop("callback_manager", None)
|
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/base.html
|
6b438210d91b-1
|
)
values["callbacks"] = values.pop("callback_manager", None)
return values
[docs] class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
def _combine_llm_outputs(self, llm_outputs: List[Optional[dict]]) -> dict:
return {}
def _get_invocation_params(
self,
stop: Optional[List[str]] = None,
) -> dict:
params = self.dict()
params["stop"] = stop
return params
def _get_llm_string(self, stop: Optional[List[str]] = None, **kwargs: Any) -> str:
if self.lc_serializable:
params = {**kwargs, **{"stop": stop}}
param_string = str(sorted([(k, v) for k, v in params.items()]))
llm_string = dumps(self)
return llm_string + "---" + param_string
else:
params = self._get_invocation_params(stop=stop)
params = {**params, **kwargs}
return str(sorted([(k, v) for k, v in params.items()]))
[docs] def generate(
self,
messages: List[List[BaseMessage]],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
*,
tags: Optional[List[str]] = None,
**kwargs: Any,
) -> LLMResult:
"""Top Level call"""
params = self._get_invocation_params(stop=stop)
options = {"stop": stop}
callback_manager = CallbackManager.configure(
callbacks,
self.callbacks,
self.verbose,
tags,
self.tags,
)
|
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/base.html
|
6b438210d91b-2
|
self.verbose,
tags,
self.tags,
)
run_managers = callback_manager.on_chat_model_start(
dumpd(self), messages, invocation_params=params, options=options
)
results = []
for i, m in enumerate(messages):
try:
results.append(
self._generate_with_cache(
m,
stop=stop,
run_manager=run_managers[i] if run_managers else None,
**kwargs,
)
)
except (KeyboardInterrupt, Exception) as e:
if run_managers:
run_managers[i].on_llm_error(e)
raise e
flattened_outputs = [
LLMResult(generations=[res.generations], llm_output=res.llm_output)
for res in results
]
llm_output = self._combine_llm_outputs([res.llm_output for res in results])
generations = [res.generations for res in results]
output = LLMResult(generations=generations, llm_output=llm_output)
if run_managers:
run_infos = []
for manager, flattened_output in zip(run_managers, flattened_outputs):
manager.on_llm_end(flattened_output)
run_infos.append(RunInfo(run_id=manager.run_id))
output.run = run_infos
return output
[docs] async def agenerate(
self,
messages: List[List[BaseMessage]],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
*,
tags: Optional[List[str]] = None,
**kwargs: Any,
) -> LLMResult:
"""Top Level call"""
|
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/base.html
|
6b438210d91b-3
|
) -> LLMResult:
"""Top Level call"""
params = self._get_invocation_params(stop=stop)
options = {"stop": stop}
callback_manager = AsyncCallbackManager.configure(
callbacks,
self.callbacks,
self.verbose,
tags,
self.tags,
)
run_managers = await callback_manager.on_chat_model_start(
dumpd(self), messages, invocation_params=params, options=options
)
results = await asyncio.gather(
*[
self._agenerate_with_cache(
m,
stop=stop,
run_manager=run_managers[i] if run_managers else None,
**kwargs,
)
for i, m in enumerate(messages)
],
return_exceptions=True,
)
exceptions = []
for i, res in enumerate(results):
if isinstance(res, Exception):
if run_managers:
await run_managers[i].on_llm_error(res)
exceptions.append(res)
if exceptions:
if run_managers:
await asyncio.gather(
*[
run_manager.on_llm_end(
LLMResult(
generations=[res.generations], llm_output=res.llm_output
)
)
for run_manager, res in zip(run_managers, results)
if not isinstance(res, Exception)
]
)
raise exceptions[0]
flattened_outputs = [
LLMResult(generations=[res.generations], llm_output=res.llm_output)
for res in results
]
llm_output = self._combine_llm_outputs([res.llm_output for res in results])
generations = [res.generations for res in results]
|
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/base.html
|
6b438210d91b-4
|
generations = [res.generations for res in results]
output = LLMResult(generations=generations, llm_output=llm_output)
await asyncio.gather(
*[
run_manager.on_llm_end(flattened_output)
for run_manager, flattened_output in zip(
run_managers, flattened_outputs
)
]
)
if run_managers:
output.run = [
RunInfo(run_id=run_manager.run_id) for run_manager in run_managers
]
return output
[docs] def generate_prompt(
self,
prompts: List[PromptValue],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> LLMResult:
prompt_messages = [p.to_messages() for p in prompts]
return self.generate(prompt_messages, stop=stop, callbacks=callbacks, **kwargs)
[docs] async def agenerate_prompt(
self,
prompts: List[PromptValue],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> LLMResult:
prompt_messages = [p.to_messages() for p in prompts]
return await self.agenerate(
prompt_messages, stop=stop, callbacks=callbacks, **kwargs
)
def _generate_with_cache(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
new_arg_supported = inspect.signature(self._generate).parameters.get(
"run_manager"
|
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/base.html
|
6b438210d91b-5
|
"run_manager"
)
disregard_cache = self.cache is not None and not self.cache
if langchain.llm_cache is None or disregard_cache:
# This happens when langchain.cache is None, but self.cache is True
if self.cache is not None and self.cache:
raise ValueError(
"Asked to cache, but no cache found at `langchain.cache`."
)
if new_arg_supported:
return self._generate(
messages, stop=stop, run_manager=run_manager, **kwargs
)
else:
return self._generate(messages, stop=stop, **kwargs)
else:
llm_string = self._get_llm_string(stop=stop, **kwargs)
prompt = dumps(messages)
cache_val = langchain.llm_cache.lookup(prompt, llm_string)
if isinstance(cache_val, list):
return ChatResult(generations=cache_val)
else:
if new_arg_supported:
result = self._generate(
messages, stop=stop, run_manager=run_manager, **kwargs
)
else:
result = self._generate(messages, stop=stop, **kwargs)
langchain.llm_cache.update(prompt, llm_string, result.generations)
return result
async def _agenerate_with_cache(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
new_arg_supported = inspect.signature(self._agenerate).parameters.get(
"run_manager"
)
disregard_cache = self.cache is not None and not self.cache
|
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/base.html
|
6b438210d91b-6
|
)
disregard_cache = self.cache is not None and not self.cache
if langchain.llm_cache is None or disregard_cache:
# This happens when langchain.cache is None, but self.cache is True
if self.cache is not None and self.cache:
raise ValueError(
"Asked to cache, but no cache found at `langchain.cache`."
)
if new_arg_supported:
return await self._agenerate(
messages, stop=stop, run_manager=run_manager, **kwargs
)
else:
return await self._agenerate(messages, stop=stop, **kwargs)
else:
llm_string = self._get_llm_string(stop=stop, **kwargs)
prompt = dumps(messages)
cache_val = langchain.llm_cache.lookup(prompt, llm_string)
if isinstance(cache_val, list):
return ChatResult(generations=cache_val)
else:
if new_arg_supported:
result = await self._agenerate(
messages, stop=stop, run_manager=run_manager, **kwargs
)
else:
result = await self._agenerate(messages, stop=stop, **kwargs)
langchain.llm_cache.update(prompt, llm_string, result.generations)
return result
@abstractmethod
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
"""Top Level call"""
@abstractmethod
async def _agenerate(
self,
messages: List[BaseMessage],
|
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/base.html
|
6b438210d91b-7
|
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
"""Top Level call"""
[docs] def __call__(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> BaseMessage:
generation = self.generate(
[messages], stop=stop, callbacks=callbacks, **kwargs
).generations[0][0]
if isinstance(generation, ChatGeneration):
return generation.message
else:
raise ValueError("Unexpected generation type")
async def _call_async(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> BaseMessage:
result = await self.agenerate(
[messages], stop=stop, callbacks=callbacks, **kwargs
)
generation = result.generations[0][0]
if isinstance(generation, ChatGeneration):
return generation.message
else:
raise ValueError("Unexpected generation type")
[docs] def call_as_llm(
self, message: str, stop: Optional[List[str]] = None, **kwargs: Any
) -> str:
return self.predict(message, stop=stop, **kwargs)
[docs] def predict(
self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any
) -> str:
if stop is None:
|
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/base.html
|
6b438210d91b-8
|
) -> str:
if stop is None:
_stop = None
else:
_stop = list(stop)
result = self([HumanMessage(content=text)], stop=_stop, **kwargs)
return result.content
[docs] def predict_messages(
self,
messages: List[BaseMessage],
*,
stop: Optional[Sequence[str]] = None,
**kwargs: Any,
) -> BaseMessage:
if stop is None:
_stop = None
else:
_stop = list(stop)
return self(messages, stop=_stop, **kwargs)
[docs] async def apredict(
self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any
) -> str:
if stop is None:
_stop = None
else:
_stop = list(stop)
result = await self._call_async(
[HumanMessage(content=text)], stop=_stop, **kwargs
)
return result.content
[docs] async def apredict_messages(
self,
messages: List[BaseMessage],
*,
stop: Optional[Sequence[str]] = None,
**kwargs: Any,
) -> BaseMessage:
if stop is None:
_stop = None
else:
_stop = list(stop)
return await self._call_async(messages, stop=_stop, **kwargs)
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {}
@property
@abstractmethod
def _llm_type(self) -> str:
"""Return type of chat model."""
|
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/base.html
|
6b438210d91b-9
|
"""Return type of chat model."""
[docs] def dict(self, **kwargs: Any) -> Dict:
"""Return a dictionary of the LLM."""
starter_dict = dict(self._identifying_params)
starter_dict["_type"] = self._llm_type
return starter_dict
[docs]class SimpleChatModel(BaseChatModel):
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
output_str = self._call(messages, stop=stop, run_manager=run_manager, **kwargs)
message = AIMessage(content=output_str)
generation = ChatGeneration(message=message)
return ChatResult(generations=[generation])
@abstractmethod
def _call(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Simpler interface."""
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
func = partial(
self._generate, messages, stop=stop, run_manager=run_manager, **kwargs
)
return await asyncio.get_event_loop().run_in_executor(None, func)
|
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/base.html
|
0f3de4aa2d61-0
|
Source code for langchain.chat_models.fake
"""Fake ChatModel for testing purposes."""
from typing import Any, List, Mapping, Optional
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.chat_models.base import SimpleChatModel
from langchain.schema import BaseMessage
[docs]class FakeListChatModel(SimpleChatModel):
"""Fake ChatModel for testing purposes."""
responses: List
i: int = 0
@property
def _llm_type(self) -> str:
return "fake-list-chat-model"
def _call(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""First try to lookup in queries, else return 'foo' or 'bar'."""
response = self.responses[self.i]
self.i += 1
return response
@property
def _identifying_params(self) -> Mapping[str, Any]:
return {"responses": self.responses}
|
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/fake.html
|
0182f441fbdb-0
|
Source code for langchain.chat_models.openai
"""OpenAI chat wrapper."""
from __future__ import annotations
import logging
import sys
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
List,
Mapping,
Optional,
Tuple,
Union,
)
from pydantic import Field, root_validator
from tenacity import (
before_sleep_log,
retry,
retry_if_exception_type,
stop_after_attempt,
wait_exponential,
)
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.chat_models.base import BaseChatModel
from langchain.schema import (
AIMessage,
BaseMessage,
ChatGeneration,
ChatMessage,
ChatResult,
FunctionMessage,
HumanMessage,
SystemMessage,
)
from langchain.utils import get_from_dict_or_env
if TYPE_CHECKING:
import tiktoken
logger = logging.getLogger(__name__)
def _import_tiktoken() -> Any:
try:
import tiktoken
except ImportError:
raise ValueError(
"Could not import tiktoken python package. "
"This is needed in order to calculate get_token_ids. "
"Please install it with `pip install tiktoken`."
)
return tiktoken
def _create_retry_decorator(llm: ChatOpenAI) -> Callable[[Any], Any]:
import openai
min_seconds = 1
max_seconds = 60
# Wait 2^x * 1 second between each retry starting with
# 4 seconds, then up to 10 seconds, then 10 seconds afterwards
return retry(
|
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html
|
0182f441fbdb-1
|
return retry(
reraise=True,
stop=stop_after_attempt(llm.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
retry=(
retry_if_exception_type(openai.error.Timeout)
| retry_if_exception_type(openai.error.APIError)
| retry_if_exception_type(openai.error.APIConnectionError)
| retry_if_exception_type(openai.error.RateLimitError)
| retry_if_exception_type(openai.error.ServiceUnavailableError)
),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
async def acompletion_with_retry(llm: ChatOpenAI, **kwargs: Any) -> Any:
"""Use tenacity to retry the async completion call."""
retry_decorator = _create_retry_decorator(llm)
@retry_decorator
async def _completion_with_retry(**kwargs: Any) -> Any:
# Use OpenAI's async api https://github.com/openai/openai-python#async-api
return await llm.client.acreate(**kwargs)
return await _completion_with_retry(**kwargs)
def _convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage:
role = _dict["role"]
if role == "user":
return HumanMessage(content=_dict["content"])
elif role == "assistant":
content = _dict["content"] or "" # OpenAI returns None for tool invocations
if _dict.get("function_call"):
additional_kwargs = {"function_call": dict(_dict["function_call"])}
else:
additional_kwargs = {}
return AIMessage(content=content, additional_kwargs=additional_kwargs)
elif role == "system":
return SystemMessage(content=_dict["content"])
|
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html
|
0182f441fbdb-2
|
elif role == "system":
return SystemMessage(content=_dict["content"])
elif role == "function":
return FunctionMessage(content=_dict["content"], name=_dict["name"])
else:
return ChatMessage(content=_dict["content"], role=role)
def _convert_message_to_dict(message: BaseMessage) -> dict:
if isinstance(message, ChatMessage):
message_dict = {"role": message.role, "content": message.content}
elif isinstance(message, HumanMessage):
message_dict = {"role": "user", "content": message.content}
elif isinstance(message, AIMessage):
message_dict = {"role": "assistant", "content": message.content}
if "function_call" in message.additional_kwargs:
message_dict["function_call"] = message.additional_kwargs["function_call"]
elif isinstance(message, SystemMessage):
message_dict = {"role": "system", "content": message.content}
elif isinstance(message, FunctionMessage):
message_dict = {
"role": "function",
"content": message.content,
"name": message.name,
}
else:
raise ValueError(f"Got unknown type {message}")
if "name" in message.additional_kwargs:
message_dict["name"] = message.additional_kwargs["name"]
return message_dict
[docs]class ChatOpenAI(BaseChatModel):
"""Wrapper around OpenAI Chat large language models.
To use, you should have the ``openai`` python package installed, and the
environment variable ``OPENAI_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
|
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html
|
0182f441fbdb-3
|
Example:
.. code-block:: python
from langchain.chat_models import ChatOpenAI
openai = ChatOpenAI(model_name="gpt-3.5-turbo")
"""
@property
def lc_secrets(self) -> Dict[str, str]:
return {"openai_api_key": "OPENAI_API_KEY"}
@property
def lc_serializable(self) -> bool:
return True
client: Any #: :meta private:
model_name: str = Field(default="gpt-3.5-turbo", alias="model")
"""Model name to use."""
temperature: float = 0.7
"""What sampling temperature to use."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not explicitly specified."""
openai_api_key: Optional[str] = None
"""Base URL path for API requests,
leave blank if not using a proxy or service emulator."""
openai_api_base: Optional[str] = None
openai_organization: Optional[str] = None
# to support explicit proxy for OpenAI
openai_proxy: Optional[str] = None
request_timeout: Optional[Union[float, Tuple[float, float]]] = None
"""Timeout for requests to OpenAI completion API. Default is 600 seconds."""
max_retries: int = 6
"""Maximum number of retries to make when generating."""
streaming: bool = False
"""Whether to stream the results or not."""
n: int = 1
"""Number of chat completions to generate for each prompt."""
max_tokens: Optional[int] = None
"""Maximum number of tokens to generate."""
|
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html
|
0182f441fbdb-4
|
max_tokens: Optional[int] = None
"""Maximum number of tokens to generate."""
tiktoken_model_name: Optional[str] = None
"""The model name to pass to tiktoken when using this class.
Tiktoken is used to count the number of tokens in documents to constrain
them to be under a certain limit. By default, when set to None, this will
be the same as the embedding model name. However, there are some cases
where you may want to use this Embedding class with a model name not
supported by tiktoken. This can include when using Azure embeddings or
when using one of the many model providers that expose an OpenAI-like
API but with different models. In those cases, in order to avoid erroring
when tiktoken is called, you can specify a model name to use here."""
[docs] class Config:
"""Configuration for this pydantic object."""
allow_population_by_field_name = True
[docs] @root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = cls.all_required_field_names()
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
if field_name not in all_required_field_names:
logger.warning(
f"""WARNING! {field_name} is not default parameter.
{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
|
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html
|
0182f441fbdb-5
|
)
extra[field_name] = values.pop(field_name)
invalid_model_kwargs = all_required_field_names.intersection(extra.keys())
if invalid_model_kwargs:
raise ValueError(
f"Parameters {invalid_model_kwargs} should be specified explicitly. "
f"Instead they were passed in as part of `model_kwargs` parameter."
)
values["model_kwargs"] = extra
return values
[docs] @root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
values["openai_api_key"] = get_from_dict_or_env(
values, "openai_api_key", "OPENAI_API_KEY"
)
values["openai_organization"] = get_from_dict_or_env(
values,
"openai_organization",
"OPENAI_ORGANIZATION",
default="",
)
values["openai_api_base"] = get_from_dict_or_env(
values,
"openai_api_base",
"OPENAI_API_BASE",
default="",
)
values["openai_proxy"] = get_from_dict_or_env(
values,
"openai_proxy",
"OPENAI_PROXY",
default="",
)
try:
import openai
except ImportError:
raise ValueError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
try:
values["client"] = openai.ChatCompletion
except AttributeError:
raise ValueError(
"`openai` has no `ChatCompletion` attribute, this is likely "
"due to an old version of the openai package. Try upgrading it "
|
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html
|
0182f441fbdb-6
|
"due to an old version of the openai package. Try upgrading it "
"with `pip install --upgrade openai`."
)
if values["n"] < 1:
raise ValueError("n must be at least 1.")
if values["n"] > 1 and values["streaming"]:
raise ValueError("n must be 1 when streaming.")
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling OpenAI API."""
return {
"model": self.model_name,
"request_timeout": self.request_timeout,
"max_tokens": self.max_tokens,
"stream": self.streaming,
"n": self.n,
"temperature": self.temperature,
**self.model_kwargs,
}
def _create_retry_decorator(self) -> Callable[[Any], Any]:
import openai
min_seconds = 1
max_seconds = 60
# Wait 2^x * 1 second between each retry starting with
# 4 seconds, then up to 10 seconds, then 10 seconds afterwards
return retry(
reraise=True,
stop=stop_after_attempt(self.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
retry=(
retry_if_exception_type(openai.error.Timeout)
| retry_if_exception_type(openai.error.APIError)
| retry_if_exception_type(openai.error.APIConnectionError)
| retry_if_exception_type(openai.error.RateLimitError)
| retry_if_exception_type(openai.error.ServiceUnavailableError)
),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
|
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html
|
0182f441fbdb-7
|
),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
[docs] def completion_with_retry(self, **kwargs: Any) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = self._create_retry_decorator()
@retry_decorator
def _completion_with_retry(**kwargs: Any) -> Any:
return self.client.create(**kwargs)
return _completion_with_retry(**kwargs)
def _combine_llm_outputs(self, llm_outputs: List[Optional[dict]]) -> dict:
overall_token_usage: dict = {}
for output in llm_outputs:
if output is None:
# Happens in streaming
continue
token_usage = output["token_usage"]
for k, v in token_usage.items():
if k in overall_token_usage:
overall_token_usage[k] += v
else:
overall_token_usage[k] = v
return {"token_usage": overall_token_usage, "model_name": self.model_name}
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs}
if self.streaming:
inner_completion = ""
role = "assistant"
params["stream"] = True
function_call: Optional[dict] = None
for stream_resp in self.completion_with_retry(
messages=message_dicts, **params
):
role = stream_resp["choices"][0]["delta"].get("role", role)
|
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html
|
0182f441fbdb-8
|
role = stream_resp["choices"][0]["delta"].get("role", role)
token = stream_resp["choices"][0]["delta"].get("content") or ""
inner_completion += token
_function_call = stream_resp["choices"][0]["delta"].get("function_call")
if _function_call:
if function_call is None:
function_call = _function_call
else:
function_call["arguments"] += _function_call["arguments"]
if run_manager:
run_manager.on_llm_new_token(token)
message = _convert_dict_to_message(
{
"content": inner_completion,
"role": role,
"function_call": function_call,
}
)
return ChatResult(generations=[ChatGeneration(message=message)])
response = self.completion_with_retry(messages=message_dicts, **params)
return self._create_chat_result(response)
def _create_message_dicts(
self, messages: List[BaseMessage], stop: Optional[List[str]]
) -> Tuple[List[Dict[str, Any]], Dict[str, Any]]:
params = dict(self._invocation_params)
if stop is not None:
if "stop" in params:
raise ValueError("`stop` found in both the input and default params.")
params["stop"] = stop
message_dicts = [_convert_message_to_dict(m) for m in messages]
return message_dicts, params
def _create_chat_result(self, response: Mapping[str, Any]) -> ChatResult:
generations = []
for res in response["choices"]:
message = _convert_dict_to_message(res["message"])
gen = ChatGeneration(message=message)
generations.append(gen)
|
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html
|
0182f441fbdb-9
|
gen = ChatGeneration(message=message)
generations.append(gen)
llm_output = {"token_usage": response["usage"], "model_name": self.model_name}
return ChatResult(generations=generations, llm_output=llm_output)
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs}
if self.streaming:
inner_completion = ""
role = "assistant"
params["stream"] = True
function_call: Optional[dict] = None
async for stream_resp in await acompletion_with_retry(
self, messages=message_dicts, **params
):
role = stream_resp["choices"][0]["delta"].get("role", role)
token = stream_resp["choices"][0]["delta"].get("content", "")
inner_completion += token or ""
_function_call = stream_resp["choices"][0]["delta"].get("function_call")
if _function_call:
if function_call is None:
function_call = _function_call
else:
function_call["arguments"] += _function_call["arguments"]
if run_manager:
await run_manager.on_llm_new_token(token)
message = _convert_dict_to_message(
{
"content": inner_completion,
"role": role,
"function_call": function_call,
}
)
return ChatResult(generations=[ChatGeneration(message=message)])
else:
|
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html
|
0182f441fbdb-10
|
return ChatResult(generations=[ChatGeneration(message=message)])
else:
response = await acompletion_with_retry(
self, messages=message_dicts, **params
)
return self._create_chat_result(response)
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {**{"model_name": self.model_name}, **self._default_params}
@property
def _invocation_params(self) -> Mapping[str, Any]:
"""Get the parameters used to invoke the model."""
openai_creds: Dict[str, Any] = {
"api_key": self.openai_api_key,
"api_base": self.openai_api_base,
"organization": self.openai_organization,
"model": self.model_name,
}
if self.openai_proxy:
import openai
openai.proxy = {"http": self.openai_proxy, "https": self.openai_proxy} # type: ignore[assignment] # noqa: E501
return {**openai_creds, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "openai-chat"
def _get_encoding_model(self) -> Tuple[str, tiktoken.Encoding]:
tiktoken_ = _import_tiktoken()
if self.tiktoken_model_name is not None:
model = self.tiktoken_model_name
else:
model = self.model_name
if model == "gpt-3.5-turbo":
# gpt-3.5-turbo may change over time.
|
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html
|
0182f441fbdb-11
|
# gpt-3.5-turbo may change over time.
# Returning num tokens assuming gpt-3.5-turbo-0301.
model = "gpt-3.5-turbo-0301"
elif model == "gpt-4":
# gpt-4 may change over time.
# Returning num tokens assuming gpt-4-0314.
model = "gpt-4-0314"
# Returns the number of tokens used by a list of messages.
try:
encoding = tiktoken_.encoding_for_model(model)
except KeyError:
logger.warning("Warning: model not found. Using cl100k_base encoding.")
model = "cl100k_base"
encoding = tiktoken_.get_encoding(model)
return model, encoding
[docs] def get_token_ids(self, text: str) -> List[int]:
"""Get the tokens present in the text with tiktoken package."""
# tiktoken NOT supported for Python 3.7 or below
if sys.version_info[1] <= 7:
return super().get_token_ids(text)
_, encoding_model = self._get_encoding_model()
return encoding_model.encode(text)
[docs] def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
"""Calculate num tokens for gpt-3.5-turbo and gpt-4 with tiktoken package.
Official documentation: https://github.com/openai/openai-cookbook/blob/
main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb"""
if sys.version_info[1] <= 7:
return super().get_num_tokens_from_messages(messages)
|
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html
|
0182f441fbdb-12
|
return super().get_num_tokens_from_messages(messages)
model, encoding = self._get_encoding_model()
if model.startswith("gpt-3.5-turbo"):
# every message follows <im_start>{role/name}\n{content}<im_end>\n
tokens_per_message = 4
# if there's a name, the role is omitted
tokens_per_name = -1
elif model.startswith("gpt-4"):
tokens_per_message = 3
tokens_per_name = 1
else:
raise NotImplementedError(
f"get_num_tokens_from_messages() is not presently implemented "
f"for model {model}."
"See https://github.com/openai/openai-python/blob/main/chatml.md for "
"information on how messages are converted to tokens."
)
num_tokens = 0
messages_dict = [_convert_message_to_dict(m) for m in messages]
for message in messages_dict:
num_tokens += tokens_per_message
for key, value in message.items():
num_tokens += len(encoding.encode(value))
if key == "name":
num_tokens += tokens_per_name
# every reply is primed with <im_start>assistant
num_tokens += 3
return num_tokens
|
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html
|
8d1dda5f1ede-0
|
Source code for langchain.llms.gpt4all
"""Wrapper for the GPT4All model."""
from functools import partial
from typing import Any, Dict, List, Mapping, Optional, Set
from pydantic import Extra, Field, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
[docs]class GPT4All(LLM):
r"""Wrapper around GPT4All language models.
To use, you should have the ``gpt4all`` python package installed, the
pre-trained model file, and the model's config information.
Example:
.. code-block:: python
from langchain.llms import GPT4All
model = GPT4All(model="./models/gpt4all-model.bin", n_ctx=512, n_threads=8)
# Simplest invocation
response = model("Once upon a time, ")
"""
model: str
"""Path to the pre-trained GPT4All model file."""
backend: Optional[str] = Field(None, alias="backend")
n_ctx: int = Field(512, alias="n_ctx")
"""Token context window."""
n_parts: int = Field(-1, alias="n_parts")
"""Number of parts to split the model into.
If -1, the number of parts is automatically determined."""
seed: int = Field(0, alias="seed")
"""Seed. If -1, a random seed is used."""
f16_kv: bool = Field(False, alias="f16_kv")
"""Use half-precision for key/value cache."""
logits_all: bool = Field(False, alias="logits_all")
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/gpt4all.html
|
8d1dda5f1ede-1
|
logits_all: bool = Field(False, alias="logits_all")
"""Return logits for all tokens, not just the last token."""
vocab_only: bool = Field(False, alias="vocab_only")
"""Only load the vocabulary, no weights."""
use_mlock: bool = Field(False, alias="use_mlock")
"""Force system to keep model in RAM."""
embedding: bool = Field(False, alias="embedding")
"""Use embedding mode only."""
n_threads: Optional[int] = Field(4, alias="n_threads")
"""Number of threads to use."""
n_predict: Optional[int] = 256
"""The maximum number of tokens to generate."""
temp: Optional[float] = 0.8
"""The temperature to use for sampling."""
top_p: Optional[float] = 0.95
"""The top-p value to use for sampling."""
top_k: Optional[int] = 40
"""The top-k value to use for sampling."""
echo: Optional[bool] = False
"""Whether to echo the prompt."""
stop: Optional[List[str]] = []
"""A list of strings to stop generation when encountered."""
repeat_last_n: Optional[int] = 64
"Last n tokens to penalize"
repeat_penalty: Optional[float] = 1.3
"""The penalty to apply to repeated tokens."""
n_batch: int = Field(1, alias="n_batch")
"""Batch size for prompt processing."""
streaming: bool = False
"""Whether to stream the results or not."""
context_erase: float = 0.5
"""Leave (n_ctx * context_erase) tokens
starting from beginning if the context has run out."""
allow_download: bool = False
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/gpt4all.html
|
8d1dda5f1ede-2
|
starting from beginning if the context has run out."""
allow_download: bool = False
"""If model does not exist in ~/.cache/gpt4all/, download it."""
client: Any = None #: :meta private:
[docs] class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@staticmethod
def _model_param_names() -> Set[str]:
return {
"n_ctx",
"n_predict",
"top_k",
"top_p",
"temp",
"n_batch",
"repeat_penalty",
"repeat_last_n",
"context_erase",
}
def _default_params(self) -> Dict[str, Any]:
return {
"n_ctx": self.n_ctx,
"n_predict": self.n_predict,
"top_k": self.top_k,
"top_p": self.top_p,
"temp": self.temp,
"n_batch": self.n_batch,
"repeat_penalty": self.repeat_penalty,
"repeat_last_n": self.repeat_last_n,
"context_erase": self.context_erase,
}
[docs] @root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that the python package exists in the environment."""
try:
from gpt4all import GPT4All as GPT4AllModel
except ImportError:
raise ImportError(
"Could not import gpt4all python package. "
"Please install it with `pip install gpt4all`."
)
full_path = values["model"]
model_path, delimiter, model_name = full_path.rpartition("/")
model_path += delimiter
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/gpt4all.html
|
8d1dda5f1ede-3
|
model_path += delimiter
values["client"] = GPT4AllModel(
model_name,
model_path=model_path or None,
model_type=values["backend"],
allow_download=values["allow_download"],
)
if values["n_threads"] is not None:
# set n_threads
values["client"].model.set_thread_count(values["n_threads"])
try:
values["backend"] = values["client"].model_type
except AttributeError:
# The below is for compatibility with GPT4All Python bindings <= 0.2.3.
values["backend"] = values["client"].model.model_type
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
"model": self.model,
**self._default_params(),
**{
k: v for k, v in self.__dict__.items() if k in self._model_param_names()
},
}
@property
def _llm_type(self) -> str:
"""Return the type of llm."""
return "gpt4all"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
r"""Call out to GPT4All's generate method.
Args:
prompt: The prompt to pass into the model.
stop: A list of strings to stop generation when encountered.
Returns:
The string generated by the model.
Example:
.. code-block:: python
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/gpt4all.html
|
8d1dda5f1ede-4
|
The string generated by the model.
Example:
.. code-block:: python
prompt = "Once upon a time, "
response = model(prompt, n_predict=55)
"""
text_callback = None
if run_manager:
text_callback = partial(run_manager.on_llm_new_token, verbose=self.verbose)
text = ""
params = {**self._default_params(), **kwargs}
for token in self.client.generate(prompt, **params):
if text_callback:
text_callback(token)
text += token
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/gpt4all.html
|
1421f95d0721-0
|
Source code for langchain.llms.utils
"""Common utility functions for working with LLM APIs."""
import re
from typing import List
[docs]def enforce_stop_tokens(text: str, stop: List[str]) -> str:
"""Cut off the text as soon as any stop words occur."""
return re.split("|".join(stop), text)[0]
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/utils.html
|
c4593bf528e2-0
|
Source code for langchain.llms.databricks
import os
from abc import ABC, abstractmethod
from typing import Any, Callable, Dict, List, Optional
import requests
from pydantic import BaseModel, Extra, Field, PrivateAttr, root_validator, validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
__all__ = ["Databricks"]
class _DatabricksClientBase(BaseModel, ABC):
"""A base JSON API client that talks to Databricks."""
api_url: str
api_token: str
def post_raw(self, request: Any) -> Any:
headers = {"Authorization": f"Bearer {self.api_token}"}
response = requests.post(self.api_url, headers=headers, json=request)
# TODO: error handling and automatic retries
if not response.ok:
raise ValueError(f"HTTP {response.status_code} error: {response.text}")
return response.json()
@abstractmethod
def post(self, request: Any) -> Any:
...
class _DatabricksServingEndpointClient(_DatabricksClientBase):
"""An API client that talks to a Databricks serving endpoint."""
host: str
endpoint_name: str
@root_validator(pre=True)
def set_api_url(cls, values: Dict[str, Any]) -> Dict[str, Any]:
if "api_url" not in values:
host = values["host"]
endpoint_name = values["endpoint_name"]
api_url = f"https://{host}/serving-endpoints/{endpoint_name}/invocations"
values["api_url"] = api_url
return values
def post(self, request: Any) -> Any:
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/databricks.html
|
c4593bf528e2-1
|
return values
def post(self, request: Any) -> Any:
# See https://docs.databricks.com/machine-learning/model-serving/score-model-serving-endpoints.html
wrapped_request = {"dataframe_records": [request]}
response = self.post_raw(wrapped_request)["predictions"]
# For a single-record query, the result is not a list.
if isinstance(response, list):
response = response[0]
return response
class _DatabricksClusterDriverProxyClient(_DatabricksClientBase):
"""An API client that talks to a Databricks cluster driver proxy app."""
host: str
cluster_id: str
cluster_driver_port: str
@root_validator(pre=True)
def set_api_url(cls, values: Dict[str, Any]) -> Dict[str, Any]:
if "api_url" not in values:
host = values["host"]
cluster_id = values["cluster_id"]
port = values["cluster_driver_port"]
api_url = f"https://{host}/driver-proxy-api/o/0/{cluster_id}/{port}"
values["api_url"] = api_url
return values
def post(self, request: Any) -> Any:
return self.post_raw(request)
[docs]def get_repl_context() -> Any:
"""Gets the notebook REPL context if running inside a Databricks notebook.
Returns None otherwise.
"""
try:
from dbruntime.databricks_repl_context import get_context
return get_context()
except ImportError:
raise ValueError(
"Cannot access dbruntime, not running inside a Databricks notebook."
)
[docs]def get_default_host() -> str:
"""Gets the default Databricks workspace hostname.
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/databricks.html
|
c4593bf528e2-2
|
"""Gets the default Databricks workspace hostname.
Raises an error if the hostname cannot be automatically determined.
"""
host = os.getenv("DATABRICKS_HOST")
if not host:
try:
host = get_repl_context().browserHostName
if not host:
raise ValueError("context doesn't contain browserHostName.")
except Exception as e:
raise ValueError(
"host was not set and cannot be automatically inferred. Set "
f"environment variable 'DATABRICKS_HOST'. Received error: {e}"
)
# TODO: support Databricks CLI profile
host = host.lstrip("https://").lstrip("http://").rstrip("/")
return host
[docs]def get_default_api_token() -> str:
"""Gets the default Databricks personal access token.
Raises an error if the token cannot be automatically determined.
"""
if api_token := os.getenv("DATABRICKS_TOKEN"):
return api_token
try:
api_token = get_repl_context().apiToken
if not api_token:
raise ValueError("context doesn't contain apiToken.")
except Exception as e:
raise ValueError(
"api_token was not set and cannot be automatically inferred. Set "
f"environment variable 'DATABRICKS_TOKEN'. Received error: {e}"
)
# TODO: support Databricks CLI profile
return api_token
[docs]class Databricks(LLM):
"""LLM wrapper around a Databricks serving endpoint or a cluster driver proxy app.
It supports two endpoint types:
* **Serving endpoint** (recommended for both production and development).
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/databricks.html
|
c4593bf528e2-3
|
* **Serving endpoint** (recommended for both production and development).
We assume that an LLM was registered and deployed to a serving endpoint.
To wrap it as an LLM you must have "Can Query" permission to the endpoint.
Set ``endpoint_name`` accordingly and do not set ``cluster_id`` and
``cluster_driver_port``.
The expected model signature is:
* inputs::
[{"name": "prompt", "type": "string"},
{"name": "stop", "type": "list[string]"}]
* outputs: ``[{"type": "string"}]``
* **Cluster driver proxy app** (recommended for interactive development).
One can load an LLM on a Databricks interactive cluster and start a local HTTP
server on the driver node to serve the model at ``/`` using HTTP POST method
with JSON input/output.
Please use a port number between ``[3000, 8000]`` and let the server listen to
the driver IP address or simply ``0.0.0.0`` instead of localhost only.
To wrap it as an LLM you must have "Can Attach To" permission to the cluster.
Set ``cluster_id`` and ``cluster_driver_port`` and do not set ``endpoint_name``.
The expected server schema (using JSON schema) is:
* inputs::
{"type": "object",
"properties": {
"prompt": {"type": "string"},
"stop": {"type": "array", "items": {"type": "string"}}},
"required": ["prompt"]}`
* outputs: ``{"type": "string"}``
If the endpoint model signature is different or you want to set extra params,
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/databricks.html
|
c4593bf528e2-4
|
If the endpoint model signature is different or you want to set extra params,
you can use `transform_input_fn` and `transform_output_fn` to apply necessary
transformations before and after the query.
"""
host: str = Field(default_factory=get_default_host)
"""Databricks workspace hostname.
If not provided, the default value is determined by
* the ``DATABRICKS_HOST`` environment variable if present, or
* the hostname of the current Databricks workspace if running inside
a Databricks notebook attached to an interactive cluster in "single user"
or "no isolation shared" mode.
"""
api_token: str = Field(default_factory=get_default_api_token)
"""Databricks personal access token.
If not provided, the default value is determined by
* the ``DATABRICKS_TOKEN`` environment variable if present, or
* an automatically generated temporary token if running inside a Databricks
notebook attached to an interactive cluster in "single user" or
"no isolation shared" mode.
"""
endpoint_name: Optional[str] = None
"""Name of the model serving endpont.
You must specify the endpoint name to connect to a model serving endpoint.
You must not set both ``endpoint_name`` and ``cluster_id``.
"""
cluster_id: Optional[str] = None
"""ID of the cluster if connecting to a cluster driver proxy app.
If neither ``endpoint_name`` nor ``cluster_id`` is not provided and the code runs
inside a Databricks notebook attached to an interactive cluster in "single user"
or "no isolation shared" mode, the current cluster ID is used as default.
You must not set both ``endpoint_name`` and ``cluster_id``.
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/databricks.html
|
c4593bf528e2-5
|
You must not set both ``endpoint_name`` and ``cluster_id``.
"""
cluster_driver_port: Optional[str] = None
"""The port number used by the HTTP server running on the cluster driver node.
The server should listen on the driver IP address or simply ``0.0.0.0`` to connect.
We recommend the server using a port number between ``[3000, 8000]``.
"""
model_kwargs: Optional[Dict[str, Any]] = None
"""Extra parameters to pass to the endpoint."""
transform_input_fn: Optional[Callable] = None
"""A function that transforms ``{prompt, stop, **kwargs}`` into a JSON-compatible
request object that the endpoint accepts.
For example, you can apply a prompt template to the input prompt.
"""
transform_output_fn: Optional[Callable[..., str]] = None
"""A function that transforms the output from the endpoint to the generated text.
"""
_client: _DatabricksClientBase = PrivateAttr()
[docs] class Config:
extra = Extra.forbid
underscore_attrs_are_private = True
[docs] @validator("cluster_id", always=True)
def set_cluster_id(cls, v: Any, values: Dict[str, Any]) -> Optional[str]:
if v and values["endpoint_name"]:
raise ValueError("Cannot set both endpoint_name and cluster_id.")
elif values["endpoint_name"]:
return None
elif v:
return v
else:
try:
if v := get_repl_context().clusterId:
return v
raise ValueError("Context doesn't contain clusterId.")
except Exception as e:
raise ValueError(
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/databricks.html
|
c4593bf528e2-6
|
except Exception as e:
raise ValueError(
"Neither endpoint_name nor cluster_id was set. "
"And the cluster_id cannot be automatically determined. Received"
f" error: {e}"
)
[docs] @validator("cluster_driver_port", always=True)
def set_cluster_driver_port(cls, v: Any, values: Dict[str, Any]) -> Optional[str]:
if v and values["endpoint_name"]:
raise ValueError("Cannot set both endpoint_name and cluster_driver_port.")
elif values["endpoint_name"]:
return None
elif v is None:
raise ValueError(
"Must set cluster_driver_port to connect to a cluster driver."
)
elif int(v) <= 0:
raise ValueError(f"Invalid cluster_driver_port: {v}")
else:
return v
[docs] @validator("model_kwargs", always=True)
def set_model_kwargs(cls, v: Optional[Dict[str, Any]]) -> Optional[Dict[str, Any]]:
if v:
assert "prompt" not in v, "model_kwargs must not contain key 'prompt'"
assert "stop" not in v, "model_kwargs must not contain key 'stop'"
return v
def __init__(self, **data: Any):
super().__init__(**data)
if self.endpoint_name:
self._client = _DatabricksServingEndpointClient(
host=self.host,
api_token=self.api_token,
endpoint_name=self.endpoint_name,
)
elif self.cluster_id and self.cluster_driver_port:
self._client = _DatabricksClusterDriverProxyClient(
host=self.host,
api_token=self.api_token,
cluster_id=self.cluster_id,
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/databricks.html
|
c4593bf528e2-7
|
api_token=self.api_token,
cluster_id=self.cluster_id,
cluster_driver_port=self.cluster_driver_port,
)
else:
raise ValueError(
"Must specify either endpoint_name or cluster_id/cluster_driver_port."
)
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "databricks"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Queries the LLM endpoint with the given prompt and stop sequence."""
# TODO: support callbacks
request = {"prompt": prompt, "stop": stop}
request.update(kwargs)
if self.model_kwargs:
request.update(self.model_kwargs)
if self.transform_input_fn:
request = self.transform_input_fn(**request)
response = self._client.post(request)
if self.transform_output_fn:
response = self.transform_output_fn(response)
return response
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/databricks.html
|
2ed5641a091b-0
|
Source code for langchain.llms.huggingface_endpoint
"""Wrapper around HuggingFace APIs."""
from typing import Any, Dict, List, Mapping, Optional
import requests
from pydantic import Extra, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.utils import get_from_dict_or_env
VALID_TASKS = ("text2text-generation", "text-generation", "summarization")
[docs]class HuggingFaceEndpoint(LLM):
"""Wrapper around HuggingFaceHub Inference Endpoints.
To use, you should have the ``huggingface_hub`` python package installed, and the
environment variable ``HUGGINGFACEHUB_API_TOKEN`` set with your API token, or pass
it as a named parameter to the constructor.
Only supports `text-generation` and `text2text-generation` for now.
Example:
.. code-block:: python
from langchain.llms import HuggingFaceEndpoint
endpoint_url = (
"https://abcdefghijklmnop.us-east-1.aws.endpoints.huggingface.cloud"
)
hf = HuggingFaceEndpoint(
endpoint_url=endpoint_url,
huggingfacehub_api_token="my-api-key"
)
"""
endpoint_url: str = ""
"""Endpoint URL to use."""
task: Optional[str] = None
"""Task to call the model with.
Should be a task that returns `generated_text` or `summary_text`."""
model_kwargs: Optional[dict] = None
"""Key word arguments to pass to the model."""
huggingfacehub_api_token: Optional[str] = None
[docs] class Config:
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/huggingface_endpoint.html
|
2ed5641a091b-1
|
[docs] class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
[docs] @root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
huggingfacehub_api_token = get_from_dict_or_env(
values, "huggingfacehub_api_token", "HUGGINGFACEHUB_API_TOKEN"
)
try:
from huggingface_hub.hf_api import HfApi
try:
HfApi(
endpoint="https://huggingface.co", # Can be a Private Hub endpoint.
token=huggingfacehub_api_token,
).whoami()
except Exception as e:
raise ValueError(
"Could not authenticate with huggingface_hub. "
"Please check your API token."
) from e
except ImportError:
raise ValueError(
"Could not import huggingface_hub python package. "
"Please install it with `pip install huggingface_hub`."
)
values["huggingfacehub_api_token"] = huggingfacehub_api_token
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"endpoint_url": self.endpoint_url, "task": self.task},
**{"model_kwargs": _model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "huggingface_endpoint"
def _call(
self,
prompt: str,
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/huggingface_endpoint.html
|
2ed5641a091b-2
|
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to HuggingFace Hub's inference endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = hf("Tell me a joke.")
"""
_model_kwargs = self.model_kwargs or {}
# payload samples
params = {**_model_kwargs, **kwargs}
parameter_payload = {"inputs": prompt, "parameters": params}
# HTTP headers for authorization
headers = {
"Authorization": f"Bearer {self.huggingfacehub_api_token}",
"Content-Type": "application/json",
}
# send request
try:
response = requests.post(
self.endpoint_url, headers=headers, json=parameter_payload
)
except requests.exceptions.RequestException as e: # This is the correct syntax
raise ValueError(f"Error raised by inference endpoint: {e}")
generated_text = response.json()
if "error" in generated_text:
raise ValueError(
f"Error raised by inference API: {generated_text['error']}"
)
if self.task == "text-generation":
# Text generation return includes the starter text.
text = generated_text[0]["generated_text"][len(prompt) :]
elif self.task == "text2text-generation":
text = generated_text[0]["generated_text"]
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/huggingface_endpoint.html
|
2ed5641a091b-3
|
text = generated_text[0]["generated_text"]
elif self.task == "summarization":
text = generated_text[0]["summary_text"]
else:
raise ValueError(
f"Got invalid task {self.task}, "
f"currently only {VALID_TASKS} are supported"
)
if stop is not None:
# This is a bit hacky, but I can't figure out a better way to enforce
# stop tokens when making calls to huggingface_hub.
text = enforce_stop_tokens(text, stop)
return text
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/huggingface_endpoint.html
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.