id
stringlengths 14
15
| text
stringlengths 49
2.47k
| source
stringlengths 61
166
|
|---|---|---|
e535e2ad4e9f-1
|
pipeline = cast(Text2TextGenerationPipeline, self.pipeline)
model = jsonformer.Jsonformer(
model=pipeline.model,
tokenizer=pipeline.tokenizer,
json_schema=self.json_schema,
prompt=prompt,
max_number_tokens=self.max_new_tokens,
debug=self.debug,
)
text = model()
return json.dumps(text)
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/llms/jsonformer_decoder.html
|
63dabf763904-0
|
Source code for langchain_experimental.llms.anthropic_functions
import json
from collections import defaultdict
from html.parser import HTMLParser
from typing import Any, DefaultDict, Dict, List, Optional
from langchain.callbacks.manager import (
CallbackManagerForLLMRun,
Callbacks,
)
from langchain.chat_models.anthropic import ChatAnthropic
from langchain.chat_models.base import BaseChatModel
from langchain.schema import (
ChatGeneration,
ChatResult,
LLMResult,
)
from langchain.schema.messages import (
AIMessage,
BaseMessage,
SystemMessage,
)
from pydantic import root_validator
prompt = """In addition to responding, you can use tools. \
You have access to the following tools.
{tools}
In order to use a tool, you can use <tool></tool> to specify the name, \
and the <tool_input></tool_input> tags to specify the parameters. \
Each parameter should be passed in as <$param_name>$value</$param_name>, \
Where $param_name is the name of the specific parameter, and $value \
is the value for that parameter.
You will then get back a response in the form <observation></observation>
For example, if you have a tool called 'search' that accepts a single \
parameter 'query' that could run a google search, in order to search \
for the weather in SF you would respond:
<tool>search</tool><tool_input><query>weather in SF</query></tool_input>
<observation>64 degrees</observation>"""
[docs]class TagParser(HTMLParser):
[docs] def __init__(self) -> None:
"""A heavy-handed solution, but it's fast for prototyping.
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/llms/anthropic_functions.html
|
63dabf763904-1
|
"""A heavy-handed solution, but it's fast for prototyping.
Might be re-implemented later to restrict scope to the limited grammar, and
more efficiency.
Uses an HTML parser to parse a limited grammar that allows
for syntax of the form:
INPUT -> JUNK? VALUE*
JUNK -> JUNK_CHARACTER+
JUNK_CHARACTER -> whitespace | ,
VALUE -> <IDENTIFIER>DATA</IDENTIFIER> | OBJECT
OBJECT -> <IDENTIFIER>VALUE+</IDENTIFIER>
IDENTIFIER -> [a-Z][a-Z0-9_]*
DATA -> .*
Interprets the data to allow repetition of tags and recursion
to support representation of complex types.
^ Just another approximately wrong grammar specification.
"""
super().__init__()
self.parse_data: DefaultDict[str, List[Any]] = defaultdict(list)
self.stack: List[DefaultDict[str, List[str]]] = [self.parse_data]
self.success = True
self.depth = 0
self.data: Optional[str] = None
[docs] def handle_starttag(self, tag: str, attrs: Any) -> None:
"""Hook when a new tag is encountered."""
self.depth += 1
self.stack.append(defaultdict(list))
self.data = None
[docs] def handle_endtag(self, tag: str) -> None:
"""Hook when a tag is closed."""
self.depth -= 1
top_of_stack = dict(self.stack.pop(-1)) # Pop the dictionary we don't need it
# If a lead node
is_leaf = self.data is not None
# Annoying to type here, code is tested, hopefully OK
value = self.data if is_leaf else top_of_stack
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/llms/anthropic_functions.html
|
63dabf763904-2
|
value = self.data if is_leaf else top_of_stack
# Difficult to type this correctly with mypy (maybe impossible?)
# Can be nested indefinitely, so requires self referencing type
self.stack[-1][tag].append(value) # type: ignore
# Reset the data so we if we encounter a sequence of end tags, we
# don't confuse an outer end tag for belonging to a leaf node.
self.data = None
[docs] def handle_data(self, data: str) -> None:
"""Hook when handling data."""
stripped_data = data.strip()
# The only data that's allowed is whitespace or a comma surrounded by whitespace
if self.depth == 0 and stripped_data not in (",", ""):
# If this is triggered the parse should be considered invalid.
self.success = False
if stripped_data: # ignore whitespace-only strings
self.data = stripped_data
def _destrip(tool_input: Any) -> Any:
if isinstance(tool_input, dict):
return {k: _destrip(v) for k, v in tool_input.items()}
elif isinstance(tool_input, list):
if isinstance(tool_input[0], str):
if len(tool_input) == 1:
return tool_input[0]
else:
raise ValueError
elif isinstance(tool_input[0], dict):
return [_destrip(v) for v in tool_input]
else:
raise ValueError
else:
raise ValueError
[docs]class AnthropicFunctions(BaseChatModel):
model: ChatAnthropic
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
return {"model": ChatAnthropic(**values)}
def _generate(
self,
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/llms/anthropic_functions.html
|
63dabf763904-3
|
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
forced = False
function_call = ""
if "functions" in kwargs:
content = prompt.format(tools=json.dumps(kwargs["functions"], indent=2))
system = SystemMessage(content=content)
messages = [system] + messages
del kwargs["functions"]
if stop is None:
stop = ["</tool_input>"]
else:
stop.append("</tool_input>")
if "function_call" in kwargs:
forced = True
function_call = kwargs["function_call"]["name"]
AIMessage(content=f"<tool>{function_call}</tool>")
del kwargs["function_call"]
else:
if "function_call" in kwargs:
raise ValueError(
"if `function_call` provided, `functions` must also be"
)
response = self.model.predict_messages(
messages, stop=stop, callbacks=run_manager, **kwargs
)
completion = response.content
if forced:
tag_parser = TagParser()
tag_parser.feed(completion.strip() + "</tool_input>")
v1 = tag_parser.parse_data["tool_input"][0]
kwargs = {
"function_call": {
"name": function_call,
"arguments": json.dumps(_destrip(v1)),
}
}
message = AIMessage(content="", additional_kwargs=kwargs)
return ChatResult(generations=[ChatGeneration(message=message)])
elif "<tool>" in completion:
tag_parser = TagParser()
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/llms/anthropic_functions.html
|
63dabf763904-4
|
elif "<tool>" in completion:
tag_parser = TagParser()
tag_parser.feed(completion.strip() + "</tool_input>")
msg = completion.split("<tool>")[0]
v1 = tag_parser.parse_data["tool_input"][0]
kwargs = {
"function_call": {
"name": tag_parser.parse_data["tool"][0],
"arguments": json.dumps(_destrip(v1)),
}
}
message = AIMessage(content=msg, additional_kwargs=kwargs)
return ChatResult(generations=[ChatGeneration(message=message)])
else:
return ChatResult(generations=[ChatGeneration(message=response)])
[docs] async def agenerate(
self,
messages: List[List[BaseMessage]],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
*,
tags: Optional[List[str]] = None,
metadata: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> LLMResult:
raise NotImplementedError
@property
def _llm_type(self) -> str:
return "anthropic_functions"
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/llms/anthropic_functions.html
|
47bc7c7a3d66-0
|
Source code for langchain_experimental.llms.llamaapi
import json
import logging
from typing import (
Any,
Dict,
List,
Mapping,
Optional,
Tuple,
)
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.chat_models.base import BaseChatModel
from langchain.schema import (
ChatGeneration,
ChatResult,
)
from langchain.schema.messages import (
AIMessage,
BaseMessage,
ChatMessage,
FunctionMessage,
HumanMessage,
SystemMessage,
)
logger = logging.getLogger(__name__)
def _convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage:
role = _dict["role"]
if role == "user":
return HumanMessage(content=_dict["content"])
elif role == "assistant":
# Fix for azure
# Also OpenAI returns None for tool invocations
content = _dict.get("content") or ""
if _dict.get("function_call"):
_dict["function_call"]["arguments"] = json.dumps(
_dict["function_call"]["arguments"]
)
additional_kwargs = {"function_call": dict(_dict["function_call"])}
else:
additional_kwargs = {}
return AIMessage(content=content, additional_kwargs=additional_kwargs)
elif role == "system":
return SystemMessage(content=_dict["content"])
elif role == "function":
return FunctionMessage(content=_dict["content"], name=_dict["name"])
else:
return ChatMessage(content=_dict["content"], role=role)
def _convert_message_to_dict(message: BaseMessage) -> dict:
if isinstance(message, ChatMessage):
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/llms/llamaapi.html
|
47bc7c7a3d66-1
|
if isinstance(message, ChatMessage):
message_dict = {"role": message.role, "content": message.content}
elif isinstance(message, HumanMessage):
message_dict = {"role": "user", "content": message.content}
elif isinstance(message, AIMessage):
message_dict = {"role": "assistant", "content": message.content}
if "function_call" in message.additional_kwargs:
message_dict["function_call"] = message.additional_kwargs["function_call"]
elif isinstance(message, SystemMessage):
message_dict = {"role": "system", "content": message.content}
elif isinstance(message, FunctionMessage):
message_dict = {
"role": "function",
"content": message.content,
"name": message.name,
}
else:
raise ValueError(f"Got unknown type {message}")
if "name" in message.additional_kwargs:
message_dict["name"] = message.additional_kwargs["name"]
return message_dict
[docs]class ChatLlamaAPI(BaseChatModel):
client: Any #: :meta private:
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
message_dicts, params = self._create_message_dicts(messages, stop)
_params = {"messages": message_dicts}
final_params = {**params, **kwargs, **_params}
response = self.client.run(final_params).json()
return self._create_chat_result(response)
def _create_message_dicts(
self, messages: List[BaseMessage], stop: Optional[List[str]]
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/llms/llamaapi.html
|
47bc7c7a3d66-2
|
self, messages: List[BaseMessage], stop: Optional[List[str]]
) -> Tuple[List[Dict[str, Any]], Dict[str, Any]]:
params = dict(self._client_params)
if stop is not None:
if "stop" in params:
raise ValueError("`stop` found in both the input and default params.")
params["stop"] = stop
message_dicts = [_convert_message_to_dict(m) for m in messages]
return message_dicts, params
def _create_chat_result(self, response: Mapping[str, Any]) -> ChatResult:
generations = []
for res in response["choices"]:
message = _convert_dict_to_message(res["message"])
gen = ChatGeneration(
message=message,
generation_info=dict(finish_reason=res.get("finish_reason")),
)
generations.append(gen)
return ChatResult(generations=generations)
@property
def _client_params(self) -> Mapping[str, Any]:
"""Get the parameters used for the client."""
return {}
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "llama-api"
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/llms/llamaapi.html
|
653f1019df89-0
|
Source code for langchain_experimental.autonomous_agents.autogpt.agent
from __future__ import annotations
from typing import List, Optional
from langchain.chains.llm import LLMChain
from langchain.chat_models.base import BaseChatModel
from langchain.memory import ChatMessageHistory
from langchain.schema import (
BaseChatMessageHistory,
Document,
)
from langchain.schema.messages import AIMessage, HumanMessage, SystemMessage
from langchain.tools.base import BaseTool
from langchain.tools.human.tool import HumanInputRun
from langchain.vectorstores.base import VectorStoreRetriever
from pydantic import ValidationError
from langchain_experimental.autonomous_agents.autogpt.output_parser import (
AutoGPTOutputParser,
BaseAutoGPTOutputParser,
)
from langchain_experimental.autonomous_agents.autogpt.prompt import AutoGPTPrompt
from langchain_experimental.autonomous_agents.autogpt.prompt_generator import (
FINISH_NAME,
)
[docs]class AutoGPT:
"""Agent class for interacting with Auto-GPT."""
[docs] def __init__(
self,
ai_name: str,
memory: VectorStoreRetriever,
chain: LLMChain,
output_parser: BaseAutoGPTOutputParser,
tools: List[BaseTool],
feedback_tool: Optional[HumanInputRun] = None,
chat_history_memory: Optional[BaseChatMessageHistory] = None,
):
self.ai_name = ai_name
self.memory = memory
self.next_action_count = 0
self.chain = chain
self.output_parser = output_parser
self.tools = tools
self.feedback_tool = feedback_tool
self.chat_history_memory = chat_history_memory or ChatMessageHistory()
[docs] @classmethod
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/autonomous_agents/autogpt/agent.html
|
653f1019df89-1
|
[docs] @classmethod
def from_llm_and_tools(
cls,
ai_name: str,
ai_role: str,
memory: VectorStoreRetriever,
tools: List[BaseTool],
llm: BaseChatModel,
human_in_the_loop: bool = False,
output_parser: Optional[BaseAutoGPTOutputParser] = None,
chat_history_memory: Optional[BaseChatMessageHistory] = None,
) -> AutoGPT:
prompt = AutoGPTPrompt(
ai_name=ai_name,
ai_role=ai_role,
tools=tools,
input_variables=["memory", "messages", "goals", "user_input"],
token_counter=llm.get_num_tokens,
)
human_feedback_tool = HumanInputRun() if human_in_the_loop else None
chain = LLMChain(llm=llm, prompt=prompt)
return cls(
ai_name,
memory,
chain,
output_parser or AutoGPTOutputParser(),
tools,
feedback_tool=human_feedback_tool,
chat_history_memory=chat_history_memory,
)
[docs] def run(self, goals: List[str]) -> str:
user_input = (
"Determine which next command to use, "
"and respond using the format specified above:"
)
# Interaction Loop
loop_count = 0
while True:
# Discontinue if continuous limit is reached
loop_count += 1
# Send message to AI, get response
assistant_reply = self.chain.run(
goals=goals,
messages=self.chat_history_memory.messages,
memory=self.memory,
user_input=user_input,
)
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/autonomous_agents/autogpt/agent.html
|
653f1019df89-2
|
memory=self.memory,
user_input=user_input,
)
# Print Assistant thoughts
print(assistant_reply)
self.chat_history_memory.add_message(HumanMessage(content=user_input))
self.chat_history_memory.add_message(AIMessage(content=assistant_reply))
# Get command name and arguments
action = self.output_parser.parse(assistant_reply)
tools = {t.name: t for t in self.tools}
if action.name == FINISH_NAME:
return action.args["response"]
if action.name in tools:
tool = tools[action.name]
try:
observation = tool.run(action.args)
except ValidationError as e:
observation = (
f"Validation Error in args: {str(e)}, args: {action.args}"
)
except Exception as e:
observation = (
f"Error: {str(e)}, {type(e).__name__}, args: {action.args}"
)
result = f"Command {tool.name} returned: {observation}"
elif action.name == "ERROR":
result = f"Error: {action.args}. "
else:
result = (
f"Unknown command '{action.name}'. "
f"Please refer to the 'COMMANDS' list for available "
f"commands and only respond in the specified JSON format."
)
memory_to_add = (
f"Assistant Reply: {assistant_reply} " f"\nResult: {result} "
)
if self.feedback_tool is not None:
feedback = f"\n{self.feedback_tool.run('Input: ')}"
if feedback in {"q", "stop"}:
print("EXITING")
return "EXITING"
memory_to_add += feedback
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/autonomous_agents/autogpt/agent.html
|
653f1019df89-3
|
return "EXITING"
memory_to_add += feedback
self.memory.add_documents([Document(page_content=memory_to_add)])
self.chat_history_memory.add_message(SystemMessage(content=result))
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/autonomous_agents/autogpt/agent.html
|
5e844f6d3623-0
|
Source code for langchain_experimental.autonomous_agents.autogpt.prompt
import time
from typing import Any, Callable, List
from langchain.prompts.chat import (
BaseChatPromptTemplate,
)
from langchain.schema.messages import BaseMessage, HumanMessage, SystemMessage
from langchain.tools.base import BaseTool
from langchain.vectorstores.base import VectorStoreRetriever
from pydantic import BaseModel
from langchain_experimental.autonomous_agents.autogpt.prompt_generator import get_prompt
[docs]class AutoGPTPrompt(BaseChatPromptTemplate, BaseModel):
"""Prompt for AutoGPT."""
ai_name: str
ai_role: str
tools: List[BaseTool]
token_counter: Callable[[str], int]
send_token_limit: int = 4196
[docs] def construct_full_prompt(self, goals: List[str]) -> str:
prompt_start = (
"Your decisions must always be made independently "
"without seeking user assistance.\n"
"Play to your strengths as an LLM and pursue simple "
"strategies with no legal complications.\n"
"If you have completed all your tasks, make sure to "
'use the "finish" command.'
)
# Construct full prompt
full_prompt = (
f"You are {self.ai_name}, {self.ai_role}\n{prompt_start}\n\nGOALS:\n\n"
)
for i, goal in enumerate(goals):
full_prompt += f"{i+1}. {goal}\n"
full_prompt += f"\n\n{get_prompt(self.tools)}"
return full_prompt
[docs] def format_messages(self, **kwargs: Any) -> List[BaseMessage]:
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/autonomous_agents/autogpt/prompt.html
|
5e844f6d3623-1
|
base_prompt = SystemMessage(content=self.construct_full_prompt(kwargs["goals"]))
time_prompt = SystemMessage(
content=f"The current time and date is {time.strftime('%c')}"
)
used_tokens = self.token_counter(base_prompt.content) + self.token_counter(
time_prompt.content
)
memory: VectorStoreRetriever = kwargs["memory"]
previous_messages = kwargs["messages"]
relevant_docs = memory.get_relevant_documents(str(previous_messages[-10:]))
relevant_memory = [d.page_content for d in relevant_docs]
relevant_memory_tokens = sum(
[self.token_counter(doc) for doc in relevant_memory]
)
while used_tokens + relevant_memory_tokens > 2500:
relevant_memory = relevant_memory[:-1]
relevant_memory_tokens = sum(
[self.token_counter(doc) for doc in relevant_memory]
)
content_format = (
f"This reminds you of these events "
f"from your past:\n{relevant_memory}\n\n"
)
memory_message = SystemMessage(content=content_format)
used_tokens += self.token_counter(memory_message.content)
historical_messages: List[BaseMessage] = []
for message in previous_messages[-10:][::-1]:
message_tokens = self.token_counter(message.content)
if used_tokens + message_tokens > self.send_token_limit - 1000:
break
historical_messages = [message] + historical_messages
used_tokens += message_tokens
input_message = HumanMessage(content=kwargs["user_input"])
messages: List[BaseMessage] = [base_prompt, time_prompt, memory_message]
messages += historical_messages
messages.append(input_message)
return messages
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/autonomous_agents/autogpt/prompt.html
|
cf829621bce1-0
|
Source code for langchain_experimental.autonomous_agents.autogpt.output_parser
import json
import re
from abc import abstractmethod
from typing import Dict, NamedTuple
from langchain.schema import BaseOutputParser
[docs]class AutoGPTAction(NamedTuple):
"""Action returned by AutoGPTOutputParser."""
name: str
args: Dict
[docs]class BaseAutoGPTOutputParser(BaseOutputParser):
"""Base Output parser for AutoGPT."""
[docs] @abstractmethod
def parse(self, text: str) -> AutoGPTAction:
"""Return AutoGPTAction"""
[docs]def preprocess_json_input(input_str: str) -> str:
"""Preprocesses a string to be parsed as json.
Replace single backslashes with double backslashes,
while leaving already escaped ones intact.
Args:
input_str: String to be preprocessed
Returns:
Preprocessed string
"""
corrected_str = re.sub(
r'(?<!\\)\\(?!["\\/bfnrt]|u[0-9a-fA-F]{4})', r"\\\\", input_str
)
return corrected_str
[docs]class AutoGPTOutputParser(BaseAutoGPTOutputParser):
"""Output parser for AutoGPT."""
[docs] def parse(self, text: str) -> AutoGPTAction:
try:
parsed = json.loads(text, strict=False)
except json.JSONDecodeError:
preprocessed_text = preprocess_json_input(text)
try:
parsed = json.loads(preprocessed_text, strict=False)
except Exception:
return AutoGPTAction(
name="ERROR",
args={"error": f"Could not parse invalid json: {text}"},
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/autonomous_agents/autogpt/output_parser.html
|
cf829621bce1-1
|
args={"error": f"Could not parse invalid json: {text}"},
)
try:
return AutoGPTAction(
name=parsed["command"]["name"],
args=parsed["command"]["args"],
)
except (KeyError, TypeError):
# If the command is null or incomplete, return an erroneous tool
return AutoGPTAction(
name="ERROR", args={"error": f"Incomplete command args: {parsed}"}
)
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/autonomous_agents/autogpt/output_parser.html
|
7b63d671d445-0
|
Source code for langchain_experimental.autonomous_agents.autogpt.prompt_generator
import json
from typing import List
from langchain.tools.base import BaseTool
FINISH_NAME = "finish"
[docs]class PromptGenerator:
"""A class for generating custom prompt strings.
Does this based on constraints, commands, resources, and performance evaluations.
"""
[docs] def __init__(self) -> None:
"""Initialize the PromptGenerator object.
Starts with empty lists of constraints, commands, resources,
and performance evaluations.
"""
self.constraints: List[str] = []
self.commands: List[BaseTool] = []
self.resources: List[str] = []
self.performance_evaluation: List[str] = []
self.response_format = {
"thoughts": {
"text": "thought",
"reasoning": "reasoning",
"plan": "- short bulleted\n- list that conveys\n- long-term plan",
"criticism": "constructive self-criticism",
"speak": "thoughts summary to say to user",
},
"command": {"name": "command name", "args": {"arg name": "value"}},
}
[docs] def add_constraint(self, constraint: str) -> None:
"""
Add a constraint to the constraints list.
Args:
constraint (str): The constraint to be added.
"""
self.constraints.append(constraint)
[docs] def add_tool(self, tool: BaseTool) -> None:
self.commands.append(tool)
def _generate_command_string(self, tool: BaseTool) -> str:
output = f"{tool.name}: {tool.description}"
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/autonomous_agents/autogpt/prompt_generator.html
|
7b63d671d445-1
|
output = f"{tool.name}: {tool.description}"
output += f", args json schema: {json.dumps(tool.args)}"
return output
[docs] def add_resource(self, resource: str) -> None:
"""
Add a resource to the resources list.
Args:
resource (str): The resource to be added.
"""
self.resources.append(resource)
[docs] def add_performance_evaluation(self, evaluation: str) -> None:
"""
Add a performance evaluation item to the performance_evaluation list.
Args:
evaluation (str): The evaluation item to be added.
"""
self.performance_evaluation.append(evaluation)
def _generate_numbered_list(self, items: list, item_type: str = "list") -> str:
"""
Generate a numbered list from given items based on the item_type.
Args:
items (list): A list of items to be numbered.
item_type (str, optional): The type of items in the list.
Defaults to 'list'.
Returns:
str: The formatted numbered list.
"""
if item_type == "command":
command_strings = [
f"{i + 1}. {self._generate_command_string(item)}"
for i, item in enumerate(items)
]
finish_description = (
"use this to signal that you have finished all your objectives"
)
finish_args = (
'"response": "final response to let '
'people know you have finished your objectives"'
)
finish_string = (
f"{len(items) + 1}. {FINISH_NAME}: "
f"{finish_description}, args: {finish_args}"
)
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/autonomous_agents/autogpt/prompt_generator.html
|
7b63d671d445-2
|
f"{finish_description}, args: {finish_args}"
)
return "\n".join(command_strings + [finish_string])
else:
return "\n".join(f"{i+1}. {item}" for i, item in enumerate(items))
[docs] def generate_prompt_string(self) -> str:
"""Generate a prompt string.
Returns:
str: The generated prompt string.
"""
formatted_response_format = json.dumps(self.response_format, indent=4)
prompt_string = (
f"Constraints:\n{self._generate_numbered_list(self.constraints)}\n\n"
f"Commands:\n"
f"{self._generate_numbered_list(self.commands, item_type='command')}\n\n"
f"Resources:\n{self._generate_numbered_list(self.resources)}\n\n"
f"Performance Evaluation:\n"
f"{self._generate_numbered_list(self.performance_evaluation)}\n\n"
f"You should only respond in JSON format as described below "
f"\nResponse Format: \n{formatted_response_format} "
f"\nEnsure the response can be parsed by Python json.loads"
)
return prompt_string
[docs]def get_prompt(tools: List[BaseTool]) -> str:
"""Generates a prompt string.
It includes various constraints, commands, resources, and performance evaluations.
Returns:
str: The generated prompt string.
"""
# Initialize the PromptGenerator object
prompt_generator = PromptGenerator()
# Add constraints to the PromptGenerator object
prompt_generator.add_constraint(
"~4000 word limit for short term memory. "
"Your short term memory is short, "
"so immediately save important information to files."
)
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/autonomous_agents/autogpt/prompt_generator.html
|
7b63d671d445-3
|
"so immediately save important information to files."
)
prompt_generator.add_constraint(
"If you are unsure how you previously did something "
"or want to recall past events, "
"thinking about similar events will help you remember."
)
prompt_generator.add_constraint("No user assistance")
prompt_generator.add_constraint(
'Exclusively use the commands listed in double quotes e.g. "command name"'
)
# Add commands to the PromptGenerator object
for tool in tools:
prompt_generator.add_tool(tool)
# Add resources to the PromptGenerator object
prompt_generator.add_resource(
"Internet access for searches and information gathering."
)
prompt_generator.add_resource("Long Term memory management.")
prompt_generator.add_resource(
"GPT-3.5 powered Agents for delegation of simple tasks."
)
prompt_generator.add_resource("File output.")
# Add performance evaluations to the PromptGenerator object
prompt_generator.add_performance_evaluation(
"Continuously review and analyze your actions "
"to ensure you are performing to the best of your abilities."
)
prompt_generator.add_performance_evaluation(
"Constructively self-criticize your big-picture behavior constantly."
)
prompt_generator.add_performance_evaluation(
"Reflect on past decisions and strategies to refine your approach."
)
prompt_generator.add_performance_evaluation(
"Every command has a cost, so be smart and efficient. "
"Aim to complete tasks in the least number of steps."
)
# Generate the prompt string
prompt_string = prompt_generator.generate_prompt_string()
return prompt_string
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/autonomous_agents/autogpt/prompt_generator.html
|
e645453ce2b4-0
|
Source code for langchain_experimental.autonomous_agents.autogpt.memory
from typing import Any, Dict, List
from langchain.memory.chat_memory import BaseChatMemory, get_prompt_input_key
from langchain.vectorstores.base import VectorStoreRetriever
from pydantic import Field
[docs]class AutoGPTMemory(BaseChatMemory):
"""Memory for AutoGPT."""
retriever: VectorStoreRetriever = Field(exclude=True)
"""VectorStoreRetriever object to connect to."""
@property
def memory_variables(self) -> List[str]:
return ["chat_history", "relevant_context"]
def _get_prompt_input_key(self, inputs: Dict[str, Any]) -> str:
"""Get the input key for the prompt."""
if self.input_key is None:
return get_prompt_input_key(inputs, self.memory_variables)
return self.input_key
[docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
input_key = self._get_prompt_input_key(inputs)
query = inputs[input_key]
docs = self.retriever.get_relevant_documents(query)
return {
"chat_history": self.chat_memory.messages[-10:],
"relevant_context": docs,
}
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/autonomous_agents/autogpt/memory.html
|
0502b20d8357-0
|
Source code for langchain_experimental.autonomous_agents.baby_agi.task_prioritization
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.schema.language_model import BaseLanguageModel
[docs]class TaskPrioritizationChain(LLMChain):
"""Chain to prioritize tasks."""
[docs] @classmethod
def from_llm(cls, llm: BaseLanguageModel, verbose: bool = True) -> LLMChain:
"""Get the response parser."""
task_prioritization_template = (
"You are a task prioritization AI tasked with cleaning the formatting of "
"and reprioritizing the following tasks: {task_names}."
" Consider the ultimate objective of your team: {objective}."
" Do not remove any tasks. Return the result as a numbered list, like:"
" #. First task"
" #. Second task"
" Start the task list with number {next_task_id}."
)
prompt = PromptTemplate(
template=task_prioritization_template,
input_variables=["task_names", "next_task_id", "objective"],
)
return cls(prompt=prompt, llm=llm, verbose=verbose)
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/autonomous_agents/baby_agi/task_prioritization.html
|
90df457249ad-0
|
Source code for langchain_experimental.autonomous_agents.baby_agi.task_execution
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.schema.language_model import BaseLanguageModel
[docs]class TaskExecutionChain(LLMChain):
"""Chain to execute tasks."""
[docs] @classmethod
def from_llm(cls, llm: BaseLanguageModel, verbose: bool = True) -> LLMChain:
"""Get the response parser."""
execution_template = (
"You are an AI who performs one task based on the following objective: "
"{objective}."
"Take into account these previously completed tasks: {context}."
" Your task: {task}. Response:"
)
prompt = PromptTemplate(
template=execution_template,
input_variables=["objective", "context", "task"],
)
return cls(prompt=prompt, llm=llm, verbose=verbose)
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/autonomous_agents/baby_agi/task_execution.html
|
5ddddfa1c363-0
|
Source code for langchain_experimental.autonomous_agents.baby_agi.task_creation
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.schema.language_model import BaseLanguageModel
[docs]class TaskCreationChain(LLMChain):
"""Chain generating tasks."""
[docs] @classmethod
def from_llm(cls, llm: BaseLanguageModel, verbose: bool = True) -> LLMChain:
"""Get the response parser."""
task_creation_template = (
"You are an task creation AI that uses the result of an execution agent"
" to create new tasks with the following objective: {objective},"
" The last completed task has the result: {result}."
" This result was based on this task description: {task_description}."
" These are incomplete tasks: {incomplete_tasks}."
" Based on the result, create new tasks to be completed"
" by the AI system that do not overlap with incomplete tasks."
" Return the tasks as an array."
)
prompt = PromptTemplate(
template=task_creation_template,
input_variables=[
"result",
"task_description",
"incomplete_tasks",
"objective",
],
)
return cls(prompt=prompt, llm=llm, verbose=verbose)
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/autonomous_agents/baby_agi/task_creation.html
|
ccf23a59ea04-0
|
Source code for langchain_experimental.autonomous_agents.baby_agi.baby_agi
"""BabyAGI agent."""
from collections import deque
from typing import Any, Dict, List, Optional
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.chains.base import Chain
from langchain.schema.language_model import BaseLanguageModel
from langchain.vectorstores.base import VectorStore
from pydantic import BaseModel, Field
from langchain_experimental.autonomous_agents.baby_agi.task_creation import (
TaskCreationChain,
)
from langchain_experimental.autonomous_agents.baby_agi.task_execution import (
TaskExecutionChain,
)
from langchain_experimental.autonomous_agents.baby_agi.task_prioritization import (
TaskPrioritizationChain,
)
[docs]class BabyAGI(Chain, BaseModel):
"""Controller model for the BabyAGI agent."""
task_list: deque = Field(default_factory=deque)
task_creation_chain: Chain = Field(...)
task_prioritization_chain: Chain = Field(...)
execution_chain: Chain = Field(...)
task_id_counter: int = Field(1)
vectorstore: VectorStore = Field(init=False)
max_iterations: Optional[int] = None
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
[docs] def add_task(self, task: Dict) -> None:
self.task_list.append(task)
[docs] def print_task_list(self) -> None:
print("\033[95m\033[1m" + "\n*****TASK LIST*****\n" + "\033[0m\033[0m")
for t in self.task_list:
print(str(t["task_id"]) + ": " + t["task_name"])
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/autonomous_agents/baby_agi/baby_agi.html
|
ccf23a59ea04-1
|
print(str(t["task_id"]) + ": " + t["task_name"])
[docs] def print_next_task(self, task: Dict) -> None:
print("\033[92m\033[1m" + "\n*****NEXT TASK*****\n" + "\033[0m\033[0m")
print(str(task["task_id"]) + ": " + task["task_name"])
[docs] def print_task_result(self, result: str) -> None:
print("\033[93m\033[1m" + "\n*****TASK RESULT*****\n" + "\033[0m\033[0m")
print(result)
@property
def input_keys(self) -> List[str]:
return ["objective"]
@property
def output_keys(self) -> List[str]:
return []
[docs] def get_next_task(
self, result: str, task_description: str, objective: str, **kwargs: Any
) -> List[Dict]:
"""Get the next task."""
task_names = [t["task_name"] for t in self.task_list]
incomplete_tasks = ", ".join(task_names)
response = self.task_creation_chain.run(
result=result,
task_description=task_description,
incomplete_tasks=incomplete_tasks,
objective=objective,
**kwargs,
)
new_tasks = response.split("\n")
return [
{"task_name": task_name} for task_name in new_tasks if task_name.strip()
]
[docs] def prioritize_tasks(
self, this_task_id: int, objective: str, **kwargs: Any
) -> List[Dict]:
"""Prioritize tasks."""
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/autonomous_agents/baby_agi/baby_agi.html
|
ccf23a59ea04-2
|
) -> List[Dict]:
"""Prioritize tasks."""
task_names = [t["task_name"] for t in list(self.task_list)]
next_task_id = int(this_task_id) + 1
response = self.task_prioritization_chain.run(
task_names=", ".join(task_names),
next_task_id=str(next_task_id),
objective=objective,
**kwargs,
)
new_tasks = response.split("\n")
prioritized_task_list = []
for task_string in new_tasks:
if not task_string.strip():
continue
task_parts = task_string.strip().split(".", 1)
if len(task_parts) == 2:
task_id = task_parts[0].strip()
task_name = task_parts[1].strip()
prioritized_task_list.append(
{"task_id": task_id, "task_name": task_name}
)
return prioritized_task_list
def _get_top_tasks(self, query: str, k: int) -> List[str]:
"""Get the top k tasks based on the query."""
results = self.vectorstore.similarity_search(query, k=k)
if not results:
return []
return [str(item.metadata["task"]) for item in results]
[docs] def execute_task(self, objective: str, task: str, k: int = 5, **kwargs: Any) -> str:
"""Execute a task."""
context = self._get_top_tasks(query=objective, k=k)
return self.execution_chain.run(
objective=objective, context="\n".join(context), task=task, **kwargs
)
def _call(
self,
inputs: Dict[str, Any],
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/autonomous_agents/baby_agi/baby_agi.html
|
ccf23a59ea04-3
|
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
"""Run the agent."""
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
objective = inputs["objective"]
first_task = inputs.get("first_task", "Make a todo list")
self.add_task({"task_id": 1, "task_name": first_task})
num_iters = 0
while True:
if self.task_list:
self.print_task_list()
# Step 1: Pull the first task
task = self.task_list.popleft()
self.print_next_task(task)
# Step 2: Execute the task
result = self.execute_task(
objective, task["task_name"], callbacks=_run_manager.get_child()
)
this_task_id = int(task["task_id"])
self.print_task_result(result)
# Step 3: Store the result in Pinecone
result_id = f"result_{task['task_id']}"
self.vectorstore.add_texts(
texts=[result],
metadatas=[{"task": task["task_name"]}],
ids=[result_id],
)
# Step 4: Create new tasks and reprioritize task list
new_tasks = self.get_next_task(
result,
task["task_name"],
objective,
callbacks=_run_manager.get_child(),
)
for new_task in new_tasks:
self.task_id_counter += 1
new_task.update({"task_id": self.task_id_counter})
self.add_task(new_task)
self.task_list = deque(
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/autonomous_agents/baby_agi/baby_agi.html
|
ccf23a59ea04-4
|
self.add_task(new_task)
self.task_list = deque(
self.prioritize_tasks(
this_task_id, objective, callbacks=_run_manager.get_child()
)
)
num_iters += 1
if self.max_iterations is not None and num_iters == self.max_iterations:
print(
"\033[91m\033[1m" + "\n*****TASK ENDING*****\n" + "\033[0m\033[0m"
)
break
return {}
[docs] @classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
vectorstore: VectorStore,
verbose: bool = False,
task_execution_chain: Optional[Chain] = None,
**kwargs: Dict[str, Any],
) -> "BabyAGI":
"""Initialize the BabyAGI Controller."""
task_creation_chain = TaskCreationChain.from_llm(llm, verbose=verbose)
task_prioritization_chain = TaskPrioritizationChain.from_llm(
llm, verbose=verbose
)
if task_execution_chain is None:
execution_chain: Chain = TaskExecutionChain.from_llm(llm, verbose=verbose)
else:
execution_chain = task_execution_chain
return cls(
task_creation_chain=task_creation_chain,
task_prioritization_chain=task_prioritization_chain,
execution_chain=execution_chain,
vectorstore=vectorstore,
**kwargs,
)
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/autonomous_agents/baby_agi/baby_agi.html
|
26015b0788fd-0
|
Source code for langchain_experimental.autonomous_agents.hugginggpt.repsonse_generator
from typing import Any, List, Optional
from langchain import LLMChain, PromptTemplate
from langchain.base_language import BaseLanguageModel
from langchain.callbacks.manager import Callbacks
[docs]class ResponseGenerationChain(LLMChain):
"""Chain to execute tasks."""
[docs] @classmethod
def from_llm(cls, llm: BaseLanguageModel, verbose: bool = True) -> LLMChain:
execution_template = (
"The AI assistant has parsed the user input into several tasks"
"and executed them. The results are as follows:\n"
"{task_execution}"
"\nPlease summarize the results and generate a response."
)
prompt = PromptTemplate(
template=execution_template,
input_variables=["task_execution"],
)
return cls(prompt=prompt, llm=llm, verbose=verbose)
[docs]class ResponseGenerator:
[docs] def __init__(self, llm_chain: LLMChain, stop: Optional[List] = None):
self.llm_chain = llm_chain
self.stop = stop
[docs] def generate(self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any) -> str:
"""Given input, decided what to do."""
llm_response = self.llm_chain.run(**inputs, stop=self.stop, callbacks=callbacks)
return llm_response
[docs]def load_response_generator(llm: BaseLanguageModel) -> ResponseGenerator:
llm_chain = ResponseGenerationChain.from_llm(llm)
return ResponseGenerator(
llm_chain=llm_chain,
)
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/autonomous_agents/hugginggpt/repsonse_generator.html
|
a129e297f97a-0
|
Source code for langchain_experimental.autonomous_agents.hugginggpt.task_executor
import copy
import uuid
from typing import Dict, List
import numpy as np
from langchain.tools.base import BaseTool
from langchain_experimental.autonomous_agents.hugginggpt.task_planner import Plan
[docs]class Task:
[docs] def __init__(self, task: str, id: int, dep: List[int], args: Dict, tool: BaseTool):
self.task = task
self.id = id
self.dep = dep
self.args = args
self.tool = tool
self.status = "pending"
self.message = ""
self.result = ""
def __str__(self) -> str:
return f"{self.task}({self.args})"
[docs] def save_product(self) -> None:
import cv2
if self.task == "video_generator":
# ndarray to video
product = np.array(self.product)
nframe, height, width, _ = product.shape
video_filename = uuid.uuid4().hex[:6] + ".mp4"
fps = 30 # Frames per second
fourcc = cv2.VideoWriter_fourcc(*"mp4v") # type: ignore
video_out = cv2.VideoWriter(video_filename, fourcc, fps, (width, height))
for frame in self.product:
video_out.write(frame)
video_out.release()
self.result = video_filename
elif self.task == "image_generator":
# PIL.Image to image
filename = uuid.uuid4().hex[:6] + ".png"
self.product.save(filename) # type: ignore
self.result = filename
[docs] def completed(self) -> bool:
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/autonomous_agents/hugginggpt/task_executor.html
|
a129e297f97a-1
|
self.result = filename
[docs] def completed(self) -> bool:
return self.status == "completed"
[docs] def failed(self) -> bool:
return self.status == "failed"
[docs] def pending(self) -> bool:
return self.status == "pending"
[docs] def run(self) -> str:
from diffusers.utils import load_image
try:
new_args = copy.deepcopy(self.args)
for k, v in new_args.items():
if k == "image":
new_args["image"] = load_image(v)
if self.task in ["video_generator", "image_generator", "text_reader"]:
self.product = self.tool(**new_args)
else:
self.result = self.tool(**new_args)
except Exception as e:
self.status = "failed"
self.message = str(e)
self.status = "completed"
self.save_product()
return self.result
[docs]class TaskExecutor:
"""Load tools to execute tasks."""
[docs] def __init__(self, plan: Plan):
self.plan = plan
self.tasks = []
self.id_task_map = {}
self.status = "pending"
for step in self.plan.steps:
task = Task(step.task, step.id, step.dep, step.args, step.tool)
self.tasks.append(task)
self.id_task_map[step.id] = task
[docs] def completed(self) -> bool:
return all(task.completed() for task in self.tasks)
[docs] def failed(self) -> bool:
return any(task.failed() for task in self.tasks)
[docs] def pending(self) -> bool:
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/autonomous_agents/hugginggpt/task_executor.html
|
a129e297f97a-2
|
[docs] def pending(self) -> bool:
return any(task.pending() for task in self.tasks)
[docs] def check_dependency(self, task: Task) -> bool:
for dep_id in task.dep:
if dep_id == -1:
continue
dep_task = self.id_task_map[dep_id]
if dep_task.failed() or dep_task.pending():
return False
return True
[docs] def update_args(self, task: Task) -> None:
for dep_id in task.dep:
if dep_id == -1:
continue
dep_task = self.id_task_map[dep_id]
for k, v in task.args.items():
if f"<resource-{dep_id}>" in v:
task.args[k].replace(f"<resource-{dep_id}>", dep_task.result)
[docs] def run(self) -> str:
for task in self.tasks:
print(f"running {task}")
if task.pending() and self.check_dependency(task):
self.update_args(task)
task.run()
if self.completed():
self.status = "completed"
elif self.failed():
self.status = "failed"
else:
self.status = "pending"
return self.status
def __str__(self) -> str:
result = ""
for task in self.tasks:
result += f"{task}\n"
result += f"status: {task.status}\n"
if task.failed():
result += f"message: {task.message}\n"
if task.completed():
result += f"result: {task.result}\n"
return result
def __repr__(self) -> str:
return self.__str__()
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/autonomous_agents/hugginggpt/task_executor.html
|
a129e297f97a-3
|
def __repr__(self) -> str:
return self.__str__()
[docs] def describe(self) -> str:
return self.__str__()
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/autonomous_agents/hugginggpt/task_executor.html
|
74c0159ac945-0
|
Source code for langchain_experimental.autonomous_agents.hugginggpt.hugginggpt
from typing import List
from langchain.base_language import BaseLanguageModel
from langchain.tools.base import BaseTool
from langchain_experimental.autonomous_agents.hugginggpt.repsonse_generator import (
load_response_generator,
)
from langchain_experimental.autonomous_agents.hugginggpt.task_executor import (
TaskExecutor,
)
from langchain_experimental.autonomous_agents.hugginggpt.task_planner import (
load_chat_planner,
)
[docs]class HuggingGPT:
[docs] def __init__(self, llm: BaseLanguageModel, tools: List[BaseTool]):
self.llm = llm
self.tools = tools
self.chat_planner = load_chat_planner(llm)
self.response_generator = load_response_generator(llm)
self.task_executor: TaskExecutor
[docs] def run(self, input: str) -> str:
plan = self.chat_planner.plan(inputs={"input": input, "hf_tools": self.tools})
self.task_executor = TaskExecutor(plan)
self.task_executor.run()
response = self.response_generator.generate(
{"task_execution": self.task_executor}
)
return response
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/autonomous_agents/hugginggpt/hugginggpt.html
|
c43fa74789a4-0
|
Source code for langchain_experimental.autonomous_agents.hugginggpt.task_planner
import json
import re
from abc import abstractmethod
from typing import Any, Dict, List, Optional, Union
from langchain import LLMChain
from langchain.base_language import BaseLanguageModel
from langchain.callbacks.manager import Callbacks
from langchain.prompts.chat import (
AIMessagePromptTemplate,
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
from langchain.tools.base import BaseTool
from pydantic import BaseModel
DEMONSTRATIONS = [
{
"role": "user",
"content": "please show me a video and an image of (based on the text) 'a boy is running' and dub it", # noqa: E501
},
{
"role": "assistant",
"content": '[{{"task": "video_generator", "id": 0, "dep": [-1], "args": {{"prompt": "a boy is running" }}}}, {{"task": "text_reader", "id": 1, "dep": [-1], "args": {{"text": "a boy is running" }}}}, {{"task": "image_generator", "id": 2, "dep": [-1], "args": {{"prompt": "a boy is running" }}}}]', # noqa: E501
},
{
"role": "user",
"content": "Give you some pictures e1.jpg, e2.png, e3.jpg, help me count the number of sheep?", # noqa: E501
},
{
"role": "assistant",
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/autonomous_agents/hugginggpt/task_planner.html
|
c43fa74789a4-1
|
},
{
"role": "assistant",
"content": '[ {{"task": "image_qa", "id": 0, "dep": [-1], "args": {{"image": "e1.jpg", "question": "How many sheep in the picture"}}}}, {{"task": "image_qa", "id": 1, "dep": [-1], "args": {{"image": "e2.jpg", "question": "How many sheep in the picture"}}}}, {{"task": "image_qa", "id": 2, "dep": [-1], "args": {{"image": "e3.jpg", "question": "How many sheep in the picture"}}}}]', # noqa: E501
},
]
[docs]class TaskPlaningChain(LLMChain):
"""Chain to execute tasks."""
[docs] @classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
demos: List[Dict] = DEMONSTRATIONS,
verbose: bool = True,
) -> LLMChain:
"""Get the response parser."""
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/autonomous_agents/hugginggpt/task_planner.html
|
c43fa74789a4-2
|
) -> LLMChain:
"""Get the response parser."""
system_template = """#1 Task Planning Stage: The AI assistant can parse user input to several tasks: [{{"task": task, "id": task_id, "dep": dependency_task_id, "args": {{"input name": text may contain <resource-dep_id>}}}}]. The special tag "dep_id" refer to the one generated text/image/audio in the dependency task (Please consider whether the dependency task generates resources of this type.) and "dep_id" must be in "dep" list. The "dep" field denotes the ids of the previous prerequisite tasks which generate a new resource that the current task relies on. The task MUST be selected from the following tools (along with tool description, input name and output type): {tools}. There may be multiple tasks of the same type. Think step by step about all the tasks needed to resolve the user's request. Parse out as few tasks as possible while ensuring that the user request can be resolved. Pay attention to the dependencies and order among tasks. If the user input can't be parsed, you need to reply empty JSON [].""" # noqa: E501
human_template = """Now I input: {input}."""
system_message_prompt = SystemMessagePromptTemplate.from_template(
system_template
)
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
demo_messages: List[
Union[HumanMessagePromptTemplate, AIMessagePromptTemplate]
] = []
for demo in demos:
if demo["role"] == "user":
demo_messages.append(
HumanMessagePromptTemplate.from_template(demo["content"])
)
else:
demo_messages.append(
AIMessagePromptTemplate.from_template(demo["content"])
)
# demo_messages.append(message)
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/autonomous_agents/hugginggpt/task_planner.html
|
c43fa74789a4-3
|
)
# demo_messages.append(message)
prompt = ChatPromptTemplate.from_messages(
[system_message_prompt, *demo_messages, human_message_prompt]
)
return cls(prompt=prompt, llm=llm, verbose=verbose)
[docs]class Step:
[docs] def __init__(
self, task: str, id: int, dep: List[int], args: Dict[str, str], tool: BaseTool
):
self.task = task
self.id = id
self.dep = dep
self.args = args
self.tool = tool
[docs]class Plan:
[docs] def __init__(self, steps: List[Step]):
self.steps = steps
def __str__(self) -> str:
return str([str(step) for step in self.steps])
def __repr__(self) -> str:
return str(self)
[docs]class BasePlanner(BaseModel):
[docs] @abstractmethod
def plan(self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any) -> Plan:
"""Given input, decide what to do."""
[docs] @abstractmethod
async def aplan(
self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any
) -> Plan:
"""Given input, decide what to do."""
[docs]class PlanningOutputParser(BaseModel):
[docs] def parse(self, text: str, hf_tools: List[BaseTool]) -> Plan:
steps = []
for v in json.loads(re.findall(r"\[.*\]", text)[0]):
choose_tool = None
for tool in hf_tools:
if tool.name == v["task"]:
choose_tool = tool
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/autonomous_agents/hugginggpt/task_planner.html
|
c43fa74789a4-4
|
if tool.name == v["task"]:
choose_tool = tool
break
if choose_tool:
steps.append(Step(v["task"], v["id"], v["dep"], v["args"], tool))
return Plan(steps=steps)
[docs]class TaskPlanner(BasePlanner):
llm_chain: LLMChain
output_parser: PlanningOutputParser
stop: Optional[List] = None
[docs] def plan(self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any) -> Plan:
"""Given input, decided what to do."""
inputs["tools"] = [
f"{tool.name}: {tool.description}" for tool in inputs["hf_tools"]
]
llm_response = self.llm_chain.run(**inputs, stop=self.stop, callbacks=callbacks)
return self.output_parser.parse(llm_response, inputs["hf_tools"])
[docs] async def aplan(
self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any
) -> Plan:
"""Given input, decided what to do."""
inputs["hf_tools"] = [
f"{tool.name}: {tool.description}" for tool in inputs["hf_tools"]
]
llm_response = await self.llm_chain.arun(
**inputs, stop=self.stop, callbacks=callbacks
)
return self.output_parser.parse(llm_response, inputs["hf_tools"])
[docs]def load_chat_planner(llm: BaseLanguageModel) -> TaskPlanner:
llm_chain = TaskPlaningChain.from_llm(llm)
return TaskPlanner(llm_chain=llm_chain, output_parser=PlanningOutputParser())
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/autonomous_agents/hugginggpt/task_planner.html
|
b4fa6a571454-0
|
Source code for langchain_experimental.tot.thought
from __future__ import annotations
from enum import Enum
from typing import Set
from pydantic import BaseModel, Field
[docs]class ThoughtValidity(Enum):
VALID_INTERMEDIATE = 0
VALID_FINAL = 1
INVALID = 2
[docs]class Thought(BaseModel):
text: str
validity: ThoughtValidity
children: Set[Thought] = Field(default_factory=set)
def __hash__(self) -> int:
return id(self)
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/tot/thought.html
|
94b17952d25c-0
|
Source code for langchain_experimental.tot.checker
from abc import ABC, abstractmethod
from typing import Any, Dict, List, Optional, Tuple
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.chains.base import Chain
from langchain_experimental.tot.thought import ThoughtValidity
[docs]class ToTChecker(Chain, ABC):
"""
Tree of Thought (ToT) checker.
This is an abstract ToT checker that must be implemented by the user. You
can implement a simple rule-based checker or a more sophisticated
neural network based classifier.
"""
output_key: str = "validity" #: :meta private:
@property
def input_keys(self) -> List[str]:
"""The checker input keys.
:meta private:
"""
return ["problem_description", "thoughts"]
@property
def output_keys(self) -> List[str]:
"""The checker output keys.
:meta private:
"""
return [self.output_key]
[docs] @abstractmethod
def evaluate(
self,
problem_description: str,
thoughts: Tuple[str, ...] = (),
) -> ThoughtValidity:
"""
Evaluate the response to the problem description and return the solution type.
"""
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, ThoughtValidity]:
return {self.output_key: self.evaluate(**inputs)}
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/tot/checker.html
|
b9db61e8f8be-0
|
Source code for langchain_experimental.tot.base
"""
This a Tree of Thought (ToT) chain based on the paper "Large Language Model
Guided Tree-of-Thought"
https://arxiv.org/pdf/2305.08291.pdf
The Tree of Thought (ToT) chain uses a tree structure to explore the space of
possible solutions to a problem.
"""
from __future__ import annotations
from textwrap import indent
from typing import Any, Dict, List, Optional, Type
from langchain.base_language import BaseLanguageModel
from langchain.callbacks.manager import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain.chains.base import Chain
from pydantic import Extra
from langchain_experimental.tot.checker import ToTChecker
from langchain_experimental.tot.controller import ToTController
from langchain_experimental.tot.memory import ToTDFSMemory
from langchain_experimental.tot.thought import Thought, ThoughtValidity
from langchain_experimental.tot.thought_generation import (
BaseThoughtGenerationStrategy,
ProposePromptStrategy,
)
[docs]class ToTChain(Chain):
"""
A Chain implementing the Tree of Thought (ToT).
"""
llm: BaseLanguageModel
"""
Language model to use. It must be set to produce different variations for
the same prompt.
"""
checker: ToTChecker
"""ToT Checker to use."""
output_key: str = "response" #: :meta private:
k: int = 10
"""The maximmum number of conversation rounds"""
c: int = 3
"""The number of children to explore at each node"""
tot_memory: ToTDFSMemory = ToTDFSMemory()
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/tot/base.html
|
b9db61e8f8be-1
|
tot_memory: ToTDFSMemory = ToTDFSMemory()
tot_controller: ToTController = ToTController()
tot_strategy_class: Type[BaseThoughtGenerationStrategy] = ProposePromptStrategy
verbose_llm: bool = False
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
[docs] @classmethod
def from_llm(cls, llm: BaseLanguageModel, **kwargs: Any) -> ToTChain:
"""
Create a ToTChain from a language model.
:param llm: The language model to use.
:param kwargs: Additional arguments to pass to the ToTChain constructor.
"""
return cls(llm=llm, **kwargs)
def __init__(self, **kwargs: Any):
super().__init__(**kwargs)
self.tot_controller.c = self.c
@property
def input_keys(self) -> List[str]:
"""Will be whatever keys the prompt expects.
:meta private:
"""
return ["problem_description"]
@property
def output_keys(self) -> List[str]:
"""Will always return text key.
:meta private:
"""
return [self.output_key]
[docs] def log_thought(
self,
thought: Thought,
level: int,
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> None:
if run_manager:
colors = {
ThoughtValidity.VALID_FINAL: "green",
ThoughtValidity.VALID_INTERMEDIATE: "yellow",
ThoughtValidity.INVALID: "red",
}
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/tot/base.html
|
b9db61e8f8be-2
|
ThoughtValidity.INVALID: "red",
}
text = indent(f"Thought: {thought.text}\n", prefix=" " * level)
run_manager.on_text(
text=text, color=colors[thought.validity], verbose=self.verbose
)
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
if run_manager:
run_manager.on_text(text="Starting the ToT solve procedure.\n")
problem_description = inputs["problem_description"]
checker_inputs = {"problem_description": problem_description}
thoughts_path: tuple[str, ...] = ()
thought_generator = self.tot_strategy_class(
llm=self.llm, c=self.c, verbose=self.verbose_llm
)
level = 0
for _ in range(self.k):
level = self.tot_memory.level
thought_text = thought_generator.next_thought(
problem_description, thoughts_path, callbacks=_run_manager.get_child()
)
checker_inputs["thoughts"] = thoughts_path + (thought_text,)
thought_validity = self.checker(
checker_inputs, callbacks=_run_manager.get_child()
)["validity"]
thought = Thought(text=thought_text, validity=thought_validity)
if thought.validity == ThoughtValidity.VALID_FINAL:
self.log_thought(thought, level, run_manager)
return {self.output_key: thought.text}
self.tot_memory.store(thought)
self.log_thought(thought, level, run_manager)
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/tot/base.html
|
b9db61e8f8be-3
|
self.log_thought(thought, level, run_manager)
thoughts_path = self.tot_controller(self.tot_memory)
return {self.output_key: "No solution found"}
async def _acall(
self,
inputs: Dict[str, Any],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> Dict[str, str]:
raise NotImplementedError("Async not implemented yet")
@property
def _chain_type(self) -> str:
return "tot"
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/tot/base.html
|
9ec7325497ce-0
|
Source code for langchain_experimental.tot.controller
from typing import Tuple
from langchain_experimental.tot.memory import ToTDFSMemory
from langchain_experimental.tot.thought import ThoughtValidity
[docs]class ToTController:
"""
Tree of Thought (ToT) controller.
This is a version of a ToT controller, dubbed in the paper as a "Simple
Controller".
It has one parameter `c` which is the number of children to explore for each
thought.
"""
[docs] def __init__(self, c: int = 3):
"""
Initialize the controller.
Args:
c: The number of children to explore at each node.
"""
self.c = c
def __call__(self, memory: ToTDFSMemory) -> Tuple[str, ...]:
next_thought = memory.top()
parent_thought = memory.top_parent()
validity = (
ThoughtValidity.VALID_INTERMEDIATE
if next_thought is None
else next_thought.validity
)
# 1 if the current partial solution is invalid, backtrack to the parent
# thought.
if validity == ThoughtValidity.INVALID:
memory.pop()
next_thought = memory.top()
if next_thought and len(next_thought.children) >= self.c:
memory.pop()
# 2 if the current partial solution is valid but C children were
# explored and yet failed to find a final solution, backtrack to the
# parent thought.
elif (
validity == ThoughtValidity.VALID_INTERMEDIATE
and parent_thought
and len(parent_thought.children) >= self.c
):
memory.pop(2)
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/tot/controller.html
|
9ec7325497ce-1
|
):
memory.pop(2)
return tuple(thought.text for thought in memory.current_path())
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/tot/controller.html
|
b5978489ce09-0
|
Source code for langchain_experimental.tot.prompts
import json
from textwrap import dedent
from typing import List
from langchain.prompts import PromptTemplate
from langchain.schema import BaseOutputParser
from langchain_experimental.tot.thought import ThoughtValidity
COT_PROMPT = PromptTemplate(
template_format="jinja2",
input_variables=["problem_description", "thoughts"],
template=dedent(
"""
You are an intelligent agent that is generating one thought at a time in
a tree of thoughts setting.
PROBLEM
{{problem_description}}
{% if thoughts %}
THOUGHTS
{% for thought in thoughts %}
{{ thought }}
{% endfor %}
{% endif %}
Let's think step by step.
"""
).strip(),
)
[docs]class JSONListOutputParser(BaseOutputParser):
"""Class to parse the output of a PROPOSE_PROMPT response."""
@property
def _type(self) -> str:
return "json_list"
[docs] def parse(self, text: str) -> List[str]:
"""Parse the output of an LLM call."""
json_string = text.split("```json")[1].strip().strip("```").strip()
try:
return json.loads(json_string)
except json.JSONDecodeError:
return []
PROPOSE_PROMPT = PromptTemplate(
template_format="jinja2",
input_variables=["problem_description", "thoughts", "n"],
output_parser=JSONListOutputParser(),
template=dedent(
"""
You are an intelligent agent that is generating thoughts in a tree of
thoughts setting.
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/tot/prompts.html
|
b5978489ce09-1
|
You are an intelligent agent that is generating thoughts in a tree of
thoughts setting.
The output should be a markdown code snippet formatted as a JSON list of
strings, including the leading and trailing "```json" and "```":
```json
[
"<thought-1>",
"<thought-2>",
"<thought-3>"
]
```
PROBLEM
{{ problem_description }}
{% if thoughts %}
VALID THOUGHTS
{% for thought in thoughts %}
{{ thought }}
{% endfor %}
Possible next {{ n }} valid thoughts based on the last valid thought:
{% else %}
Possible next {{ n }} valid thoughts based on the PROBLEM:
{%- endif -%}
"""
).strip(),
)
[docs]class CheckerOutputParser(BaseOutputParser):
[docs] def parse(self, text: str) -> ThoughtValidity:
"""Parse the output of the language model."""
text = text.upper()
if "INVALID" in text:
return ThoughtValidity.INVALID
elif "INTERMEDIATE" in text:
return ThoughtValidity.VALID_INTERMEDIATE
elif "VALID" in text:
return ThoughtValidity.VALID_FINAL
else:
return ThoughtValidity.INVALID
@property
def _type(self) -> str:
return "tot_llm_checker_output"
CHECKER_PROMPT = PromptTemplate(
input_variables=["problem_description", "thoughts"],
template=dedent(
"""
You are an intelligent agent, validating thoughts of another intelligent agent.
PROBLEM
{problem_description}
THOUGHTS
{thoughts}
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/tot/prompts.html
|
b5978489ce09-2
|
{problem_description}
THOUGHTS
{thoughts}
Evaluate the thoughts and respond with one word.
- Respond VALID if the last thought is a valid final solution to the
poblem.
- Respond INVALID if the last thought is invalid.
- Respond INTERMEDIATE if the last thought is valid but not the final
solution to the problem.
This chain of thoughts is"""
).strip(),
output_parser=CheckerOutputParser(),
)
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/tot/prompts.html
|
a8b1c436ba96-0
|
Source code for langchain_experimental.tot.memory
from __future__ import annotations
from typing import List, Optional
from langchain_experimental.tot.thought import Thought
[docs]class ToTDFSMemory:
"""
Memory for the Tree of Thought (ToT) chain. Implemented as a stack of
thoughts. This allows for a depth first search (DFS) of the ToT.
"""
[docs] def __init__(self, stack: Optional[List[Thought]] = None):
self.stack: list[Thought] = stack or []
[docs] def top(self) -> Optional[Thought]:
"Get the top of the stack without popping it."
return self.stack[-1] if len(self.stack) > 0 else None
[docs] def pop(self, n: int = 1) -> Optional[Thought]:
"Pop the top n elements of the stack and return the last one."
if len(self.stack) < n:
return None
for _ in range(n):
node = self.stack.pop()
return node
[docs] def top_parent(self) -> Optional[Thought]:
"Get the parent of the top of the stack without popping it."
return self.stack[-2] if len(self.stack) > 1 else None
[docs] def store(self, node: Thought) -> None:
"Add a node on the top of the stack."
if len(self.stack) > 0:
self.stack[-1].children.add(node)
self.stack.append(node)
@property
def level(self) -> int:
"Return the current level of the stack."
return len(self.stack)
[docs] def current_path(self) -> List[Thought]:
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/tot/memory.html
|
a8b1c436ba96-1
|
[docs] def current_path(self) -> List[Thought]:
"Return the thoughts path."
return self.stack[:]
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/tot/memory.html
|
d4134f71d99f-0
|
Source code for langchain_experimental.tot.thought_generation
"""
We provide two strategies for generating thoughts in the Tree of Thoughts (ToT)
framework to avoid repetition:
These strategies ensure that the language model generates diverse and
non-repeating thoughts, which are crucial for problem-solving tasks that require
exploration.
"""
from abc import abstractmethod
from typing import Any, Dict, List, Tuple
from langchain.chains.llm import LLMChain
from langchain.prompts.base import BasePromptTemplate
from pydantic import Field
from langchain_experimental.tot.prompts import COT_PROMPT, PROPOSE_PROMPT
[docs]class BaseThoughtGenerationStrategy(LLMChain):
"""
Base class for a thought generation strategy.
"""
c: int = 3
"""The number of children thoughts to propose at each step."""
[docs] @abstractmethod
def next_thought(
self,
problem_description: str,
thoughts_path: Tuple[str, ...] = (),
**kwargs: Any
) -> str:
"""
Generate the next thought given the problem description and the thoughts
generated so far.
"""
[docs]class SampleCoTStrategy(BaseThoughtGenerationStrategy):
"""
Sample thoughts from a Chain-of-Thought (CoT) prompt.
This strategy works better when the thought space is rich, such as when each
thought is a paragraph. Independent and identically distributed samples
lead to diversity, which helps to avoid repetition.
"""
prompt: BasePromptTemplate = COT_PROMPT
[docs] def next_thought(
self,
problem_description: str,
thoughts_path: Tuple[str, ...] = (),
**kwargs: Any
) -> str:
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/tot/thought_generation.html
|
d4134f71d99f-1
|
**kwargs: Any
) -> str:
response_text = self.predict_and_parse(
problem_description=problem_description, thoughts=thoughts_path, **kwargs
)
return response_text if isinstance(response_text, str) else ""
[docs]class ProposePromptStrategy(BaseThoughtGenerationStrategy):
"""
Propose thoughts sequentially using a "propose prompt".
This strategy works better when the thought space is more constrained, such
as when each thought is just a word or a line. Proposing different thoughts
in the same prompt completion helps to avoid duplication.
"""
prompt: BasePromptTemplate = PROPOSE_PROMPT
tot_memory: Dict[Tuple[str, ...], List[str]] = Field(default_factory=dict)
[docs] def next_thought(
self,
problem_description: str,
thoughts_path: Tuple[str, ...] = (),
**kwargs: Any
) -> str:
if thoughts_path not in self.tot_memory or not self.tot_memory[thoughts_path]:
new_thoughts = self.predict_and_parse(
problem_description=problem_description,
thoughts=thoughts_path,
n=self.c,
**kwargs
)
if not new_thoughts:
return ""
if isinstance(new_thoughts, list):
self.tot_memory[thoughts_path] = new_thoughts[::-1]
else:
return ""
return self.tot_memory[thoughts_path].pop()
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/tot/thought_generation.html
|
0294013b318c-0
|
Source code for langchain_experimental.plan_and_execute.agent_executor
from typing import Any, Dict, List, Optional
from langchain.callbacks.manager import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain.chains.base import Chain
from pydantic import Field
from langchain_experimental.plan_and_execute.executors.base import BaseExecutor
from langchain_experimental.plan_and_execute.planners.base import BasePlanner
from langchain_experimental.plan_and_execute.schema import (
BaseStepContainer,
ListStepContainer,
)
[docs]class PlanAndExecute(Chain):
"""Plan and execute a chain of steps."""
planner: BasePlanner
"""The planner to use."""
executor: BaseExecutor
"""The executor to use."""
step_container: BaseStepContainer = Field(default_factory=ListStepContainer)
"""The step container to use."""
input_key: str = "input"
output_key: str = "output"
@property
def input_keys(self) -> List[str]:
return [self.input_key]
@property
def output_keys(self) -> List[str]:
return [self.output_key]
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
plan = self.planner.plan(
inputs,
callbacks=run_manager.get_child() if run_manager else None,
)
if run_manager:
run_manager.on_text(str(plan), verbose=self.verbose)
for step in plan.steps:
_new_inputs = {
"previous_steps": self.step_container,
"current_step": step,
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/plan_and_execute/agent_executor.html
|
0294013b318c-1
|
"previous_steps": self.step_container,
"current_step": step,
"objective": inputs[self.input_key],
}
new_inputs = {**_new_inputs, **inputs}
response = self.executor.step(
new_inputs,
callbacks=run_manager.get_child() if run_manager else None,
)
if run_manager:
run_manager.on_text(
f"*****\n\nStep: {step.value}", verbose=self.verbose
)
run_manager.on_text(
f"\n\nResponse: {response.response}", verbose=self.verbose
)
self.step_container.add_step(step, response)
return {self.output_key: self.step_container.get_final_response()}
async def _acall(
self,
inputs: Dict[str, Any],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
plan = await self.planner.aplan(
inputs,
callbacks=run_manager.get_child() if run_manager else None,
)
if run_manager:
await run_manager.on_text(str(plan), verbose=self.verbose)
for step in plan.steps:
_new_inputs = {
"previous_steps": self.step_container,
"current_step": step,
"objective": inputs[self.input_key],
}
new_inputs = {**_new_inputs, **inputs}
response = await self.executor.astep(
new_inputs,
callbacks=run_manager.get_child() if run_manager else None,
)
if run_manager:
await run_manager.on_text(
f"*****\n\nStep: {step.value}", verbose=self.verbose
)
await run_manager.on_text(
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/plan_and_execute/agent_executor.html
|
0294013b318c-2
|
)
await run_manager.on_text(
f"\n\nResponse: {response.response}", verbose=self.verbose
)
self.step_container.add_step(step, response)
return {self.output_key: self.step_container.get_final_response()}
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/plan_and_execute/agent_executor.html
|
504ac70b8b19-0
|
Source code for langchain_experimental.plan_and_execute.schema
from abc import abstractmethod
from typing import List, Tuple
from langchain.schema import BaseOutputParser
from pydantic import BaseModel, Field
[docs]class Step(BaseModel):
"""Step."""
value: str
"""The value."""
[docs]class Plan(BaseModel):
"""Plan."""
steps: List[Step]
"""The steps."""
[docs]class StepResponse(BaseModel):
"""Step response."""
response: str
"""The response."""
[docs]class BaseStepContainer(BaseModel):
"""Base step container."""
[docs] @abstractmethod
def add_step(self, step: Step, step_response: StepResponse) -> None:
"""Add step and step response to the container."""
[docs] @abstractmethod
def get_final_response(self) -> str:
"""Return the final response based on steps taken."""
[docs]class ListStepContainer(BaseStepContainer):
"""List step container."""
steps: List[Tuple[Step, StepResponse]] = Field(default_factory=list)
"""The steps."""
[docs] def add_step(self, step: Step, step_response: StepResponse) -> None:
self.steps.append((step, step_response))
[docs] def get_steps(self) -> List[Tuple[Step, StepResponse]]:
return self.steps
[docs] def get_final_response(self) -> str:
return self.steps[-1][1].response
[docs]class PlanOutputParser(BaseOutputParser):
"""Plan output parser."""
[docs] @abstractmethod
def parse(self, text: str) -> Plan:
"""Parse into a plan."""
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/plan_and_execute/schema.html
|
45a6af9bd197-0
|
Source code for langchain_experimental.plan_and_execute.planners.chat_planner
import re
from langchain.chains import LLMChain
from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate
from langchain.schema.language_model import BaseLanguageModel
from langchain.schema.messages import SystemMessage
from langchain_experimental.plan_and_execute.planners.base import LLMPlanner
from langchain_experimental.plan_and_execute.schema import (
Plan,
PlanOutputParser,
Step,
)
SYSTEM_PROMPT = (
"Let's first understand the problem and devise a plan to solve the problem."
" Please output the plan starting with the header 'Plan:' "
"and then followed by a numbered list of steps. "
"Please make the plan the minimum number of steps required "
"to accurately complete the task. If the task is a question, "
"the final step should almost always be 'Given the above steps taken, "
"please respond to the users original question'. "
"At the end of your plan, say '<END_OF_PLAN>'"
)
[docs]class PlanningOutputParser(PlanOutputParser):
"""Planning output parser."""
[docs] def parse(self, text: str) -> Plan:
steps = [Step(value=v) for v in re.split("\n\s*\d+\. ", text)[1:]]
return Plan(steps=steps)
[docs]def load_chat_planner(
llm: BaseLanguageModel, system_prompt: str = SYSTEM_PROMPT
) -> LLMPlanner:
"""
Load a chat planner.
Args:
llm: Language model.
system_prompt: System prompt.
Returns:
LLMPlanner
"""
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/plan_and_execute/planners/chat_planner.html
|
45a6af9bd197-1
|
Returns:
LLMPlanner
"""
prompt_template = ChatPromptTemplate.from_messages(
[
SystemMessage(content=system_prompt),
HumanMessagePromptTemplate.from_template("{input}"),
]
)
llm_chain = LLMChain(llm=llm, prompt=prompt_template)
return LLMPlanner(
llm_chain=llm_chain,
output_parser=PlanningOutputParser(),
stop=["<END_OF_PLAN>"],
)
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/plan_and_execute/planners/chat_planner.html
|
fe10c9ba737e-0
|
Source code for langchain_experimental.plan_and_execute.planners.base
from abc import abstractmethod
from typing import Any, List, Optional
from langchain.callbacks.manager import Callbacks
from langchain.chains.llm import LLMChain
from pydantic import BaseModel
from langchain_experimental.plan_and_execute.schema import Plan, PlanOutputParser
[docs]class BasePlanner(BaseModel):
"""Base planner."""
[docs] @abstractmethod
def plan(self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any) -> Plan:
"""Given input, decide what to do."""
[docs] @abstractmethod
async def aplan(
self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any
) -> Plan:
"""Given input, asynchronously decide what to do."""
[docs]class LLMPlanner(BasePlanner):
"""LLM planner."""
llm_chain: LLMChain
"""The LLM chain to use."""
output_parser: PlanOutputParser
"""The output parser to use."""
stop: Optional[List] = None
"""The stop list to use."""
[docs] def plan(self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any) -> Plan:
"""Given input, decide what to do."""
llm_response = self.llm_chain.run(**inputs, stop=self.stop, callbacks=callbacks)
return self.output_parser.parse(llm_response)
[docs] async def aplan(
self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any
) -> Plan:
"""Given input, asynchronously decide what to do."""
llm_response = await self.llm_chain.arun(
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/plan_and_execute/planners/base.html
|
fe10c9ba737e-1
|
llm_response = await self.llm_chain.arun(
**inputs, stop=self.stop, callbacks=callbacks
)
return self.output_parser.parse(llm_response)
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/plan_and_execute/planners/base.html
|
960e9217fca7-0
|
Source code for langchain_experimental.plan_and_execute.executors.base
from abc import abstractmethod
from typing import Any
from langchain.callbacks.manager import Callbacks
from langchain.chains.base import Chain
from pydantic import BaseModel
from langchain_experimental.plan_and_execute.schema import StepResponse
[docs]class BaseExecutor(BaseModel):
"""Base executor."""
[docs] @abstractmethod
def step(
self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any
) -> StepResponse:
"""Take step."""
[docs] @abstractmethod
async def astep(
self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any
) -> StepResponse:
"""Take async step."""
[docs]class ChainExecutor(BaseExecutor):
"""Chain executor."""
chain: Chain
"""The chain to use."""
[docs] def step(
self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any
) -> StepResponse:
"""Take step."""
response = self.chain.run(**inputs, callbacks=callbacks)
return StepResponse(response=response)
[docs] async def astep(
self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any
) -> StepResponse:
"""Take step."""
response = await self.chain.arun(**inputs, callbacks=callbacks)
return StepResponse(response=response)
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/plan_and_execute/executors/base.html
|
443bd942af33-0
|
Source code for langchain_experimental.plan_and_execute.executors.agent_executor
from typing import List
from langchain.agents.agent import AgentExecutor
from langchain.agents.structured_chat.base import StructuredChatAgent
from langchain.schema.language_model import BaseLanguageModel
from langchain.tools import BaseTool
from langchain_experimental.plan_and_execute.executors.base import ChainExecutor
HUMAN_MESSAGE_TEMPLATE = """Previous steps: {previous_steps}
Current objective: {current_step}
{agent_scratchpad}"""
TASK_PREFIX = """{objective}
"""
[docs]def load_agent_executor(
llm: BaseLanguageModel,
tools: List[BaseTool],
verbose: bool = False,
include_task_in_prompt: bool = False,
) -> ChainExecutor:
"""
Load an agent executor.
Args:
llm: BaseLanguageModel
tools: List[BaseTool]
verbose: bool. Defaults to False.
include_task_in_prompt: bool. Defaults to False.
Returns:
ChainExecutor
"""
input_variables = ["previous_steps", "current_step", "agent_scratchpad"]
template = HUMAN_MESSAGE_TEMPLATE
if include_task_in_prompt:
input_variables.append("objective")
template = TASK_PREFIX + template
agent = StructuredChatAgent.from_llm_and_tools(
llm,
tools,
human_message_template=template,
input_variables=input_variables,
)
agent_executor = AgentExecutor.from_agent_and_tools(
agent=agent, tools=tools, verbose=verbose
)
return ChainExecutor(chain=agent_executor)
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/plan_and_execute/executors/agent_executor.html
|
21c6b61d8208-0
|
Source code for langchain_experimental.cpal.constants
from enum import Enum
[docs]class Constant(Enum):
"""Enum for constants used in the CPAL."""
narrative_input = "narrative_input"
chain_answer = "chain_answer" # natural language answer
chain_data = "chain_data" # pydantic instance
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/cpal/constants.html
|
1971c83e74d0-0
|
Source code for langchain_experimental.sql.base
"""Chain for interacting with SQL Database."""
from __future__ import annotations
import warnings
from typing import Any, Dict, List, Optional
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain.chains.sql_database.prompt import DECIDER_PROMPT, PROMPT, SQL_PROMPTS
from langchain.prompts.prompt import PromptTemplate
from langchain.schema import BasePromptTemplate
from langchain.schema.language_model import BaseLanguageModel
from langchain.tools.sql_database.prompt import QUERY_CHECKER
from langchain.utilities.sql_database import SQLDatabase
from pydantic import Extra, Field, root_validator
INTERMEDIATE_STEPS_KEY = "intermediate_steps"
[docs]class SQLDatabaseChain(Chain):
"""Chain for interacting with SQL Database.
Example:
.. code-block:: python
from langchain_experimental.sql import SQLDatabaseChain
from langchain import OpenAI, SQLDatabase
db = SQLDatabase(...)
db_chain = SQLDatabaseChain.from_llm(OpenAI(), db)
"""
llm_chain: LLMChain
llm: Optional[BaseLanguageModel] = None
"""[Deprecated] LLM wrapper to use."""
database: SQLDatabase = Field(exclude=True)
"""SQL Database to connect to."""
prompt: Optional[BasePromptTemplate] = None
"""[Deprecated] Prompt to use to translate natural language to SQL."""
top_k: int = 5
"""Number of results to return from the query"""
input_key: str = "query" #: :meta private:
output_key: str = "result" #: :meta private:
return_sql: bool = False
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/sql/base.html
|
1971c83e74d0-1
|
return_sql: bool = False
"""Will return sql-command directly without executing it"""
return_intermediate_steps: bool = False
"""Whether or not to return the intermediate steps along with the final answer."""
return_direct: bool = False
"""Whether or not to return the result of querying the SQL table directly."""
use_query_checker: bool = False
"""Whether or not the query checker tool should be used to attempt
to fix the initial SQL from the LLM."""
query_checker_prompt: Optional[BasePromptTemplate] = None
"""The prompt template that should be used by the query checker"""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@root_validator(pre=True)
def raise_deprecation(cls, values: Dict) -> Dict:
if "llm" in values:
warnings.warn(
"Directly instantiating an SQLDatabaseChain with an llm is deprecated. "
"Please instantiate with llm_chain argument or using the from_llm "
"class method."
)
if "llm_chain" not in values and values["llm"] is not None:
database = values["database"]
prompt = values.get("prompt") or SQL_PROMPTS.get(
database.dialect, PROMPT
)
values["llm_chain"] = LLMChain(llm=values["llm"], prompt=prompt)
return values
@property
def input_keys(self) -> List[str]:
"""Return the singular input key.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Return the singular output key.
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/sql/base.html
|
1971c83e74d0-2
|
def output_keys(self) -> List[str]:
"""Return the singular output key.
:meta private:
"""
if not self.return_intermediate_steps:
return [self.output_key]
else:
return [self.output_key, INTERMEDIATE_STEPS_KEY]
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
input_text = f"{inputs[self.input_key]}\nSQLQuery:"
_run_manager.on_text(input_text, verbose=self.verbose)
# If not present, then defaults to None which is all tables.
table_names_to_use = inputs.get("table_names_to_use")
table_info = self.database.get_table_info(table_names=table_names_to_use)
llm_inputs = {
"input": input_text,
"top_k": str(self.top_k),
"dialect": self.database.dialect,
"table_info": table_info,
"stop": ["\nSQLResult:"],
}
intermediate_steps: List = []
try:
intermediate_steps.append(llm_inputs) # input: sql generation
sql_cmd = self.llm_chain.predict(
callbacks=_run_manager.get_child(),
**llm_inputs,
).strip()
if self.return_sql:
return {self.output_key: sql_cmd}
if not self.use_query_checker:
_run_manager.on_text(sql_cmd, color="green", verbose=self.verbose)
intermediate_steps.append(
sql_cmd
) # output: sql generation (no checker)
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/sql/base.html
|
1971c83e74d0-3
|
sql_cmd
) # output: sql generation (no checker)
intermediate_steps.append({"sql_cmd": sql_cmd}) # input: sql exec
result = self.database.run(sql_cmd)
intermediate_steps.append(str(result)) # output: sql exec
else:
query_checker_prompt = self.query_checker_prompt or PromptTemplate(
template=QUERY_CHECKER, input_variables=["query", "dialect"]
)
query_checker_chain = LLMChain(
llm=self.llm_chain.llm, prompt=query_checker_prompt
)
query_checker_inputs = {
"query": sql_cmd,
"dialect": self.database.dialect,
}
checked_sql_command: str = query_checker_chain.predict(
callbacks=_run_manager.get_child(), **query_checker_inputs
).strip()
intermediate_steps.append(
checked_sql_command
) # output: sql generation (checker)
_run_manager.on_text(
checked_sql_command, color="green", verbose=self.verbose
)
intermediate_steps.append(
{"sql_cmd": checked_sql_command}
) # input: sql exec
result = self.database.run(checked_sql_command)
intermediate_steps.append(str(result)) # output: sql exec
sql_cmd = checked_sql_command
_run_manager.on_text("\nSQLResult: ", verbose=self.verbose)
_run_manager.on_text(result, color="yellow", verbose=self.verbose)
# If return direct, we just set the final result equal to
# the result of the sql query result, otherwise try to get a human readable
# final answer
if self.return_direct:
final_result = result
else:
_run_manager.on_text("\nAnswer:", verbose=self.verbose)
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/sql/base.html
|
1971c83e74d0-4
|
else:
_run_manager.on_text("\nAnswer:", verbose=self.verbose)
input_text += f"{sql_cmd}\nSQLResult: {result}\nAnswer:"
llm_inputs["input"] = input_text
intermediate_steps.append(llm_inputs) # input: final answer
final_result = self.llm_chain.predict(
callbacks=_run_manager.get_child(),
**llm_inputs,
).strip()
intermediate_steps.append(final_result) # output: final answer
_run_manager.on_text(final_result, color="green", verbose=self.verbose)
chain_result: Dict[str, Any] = {self.output_key: final_result}
if self.return_intermediate_steps:
chain_result[INTERMEDIATE_STEPS_KEY] = intermediate_steps
return chain_result
except Exception as exc:
# Append intermediate steps to exception, to aid in logging and later
# improvement of few shot prompt seeds
exc.intermediate_steps = intermediate_steps # type: ignore
raise exc
@property
def _chain_type(self) -> str:
return "sql_database_chain"
[docs] @classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
db: SQLDatabase,
prompt: Optional[BasePromptTemplate] = None,
**kwargs: Any,
) -> SQLDatabaseChain:
prompt = prompt or SQL_PROMPTS.get(db.dialect, PROMPT)
llm_chain = LLMChain(llm=llm, prompt=prompt)
return cls(llm_chain=llm_chain, database=db, **kwargs)
[docs]class SQLDatabaseSequentialChain(Chain):
"""Chain for querying SQL database that is a sequential chain.
The chain is as follows:
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/sql/base.html
|
1971c83e74d0-5
|
The chain is as follows:
1. Based on the query, determine which tables to use.
2. Based on those tables, call the normal SQL database chain.
This is useful in cases where the number of tables in the database is large.
"""
decider_chain: LLMChain
sql_chain: SQLDatabaseChain
input_key: str = "query" #: :meta private:
output_key: str = "result" #: :meta private:
return_intermediate_steps: bool = False
[docs] @classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
database: SQLDatabase,
query_prompt: BasePromptTemplate = PROMPT,
decider_prompt: BasePromptTemplate = DECIDER_PROMPT,
**kwargs: Any,
) -> SQLDatabaseSequentialChain:
"""Load the necessary chains."""
sql_chain = SQLDatabaseChain.from_llm(
llm, database, prompt=query_prompt, **kwargs
)
decider_chain = LLMChain(
llm=llm, prompt=decider_prompt, output_key="table_names"
)
return cls(sql_chain=sql_chain, decider_chain=decider_chain, **kwargs)
@property
def input_keys(self) -> List[str]:
"""Return the singular input key.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Return the singular output key.
:meta private:
"""
if not self.return_intermediate_steps:
return [self.output_key]
else:
return [self.output_key, INTERMEDIATE_STEPS_KEY]
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/sql/base.html
|
1971c83e74d0-6
|
else:
return [self.output_key, INTERMEDIATE_STEPS_KEY]
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
_table_names = self.sql_chain.database.get_usable_table_names()
table_names = ", ".join(_table_names)
llm_inputs = {
"query": inputs[self.input_key],
"table_names": table_names,
}
_lowercased_table_names = [name.lower() for name in _table_names]
table_names_from_chain = self.decider_chain.predict_and_parse(**llm_inputs)
table_names_to_use = [
name
for name in table_names_from_chain
if name.lower() in _lowercased_table_names
]
_run_manager.on_text("Table names to use:", end="\n", verbose=self.verbose)
_run_manager.on_text(
str(table_names_to_use), color="yellow", verbose=self.verbose
)
new_inputs = {
self.sql_chain.input_key: inputs[self.input_key],
"table_names_to_use": table_names_to_use,
}
return self.sql_chain(
new_inputs, callbacks=_run_manager.get_child(), return_only_outputs=True
)
@property
def _chain_type(self) -> str:
return "sql_database_sequential_chain"
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/sql/base.html
|
076f150f7566-0
|
Source code for langchain_experimental.generative_agents.generative_agent
import re
from datetime import datetime
from typing import Any, Dict, List, Optional, Tuple
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.schema.language_model import BaseLanguageModel
from pydantic import BaseModel, Field
from langchain_experimental.generative_agents.memory import GenerativeAgentMemory
[docs]class GenerativeAgent(BaseModel):
"""An Agent as a character with memory and innate characteristics."""
name: str
"""The character's name."""
age: Optional[int] = None
"""The optional age of the character."""
traits: str = "N/A"
"""Permanent traits to ascribe to the character."""
status: str
"""The traits of the character you wish not to change."""
memory: GenerativeAgentMemory
"""The memory object that combines relevance, recency, and 'importance'."""
llm: BaseLanguageModel
"""The underlying language model."""
verbose: bool = False
summary: str = "" #: :meta private:
"""Stateful self-summary generated via reflection on the character's memory."""
summary_refresh_seconds: int = 3600 #: :meta private:
"""How frequently to re-generate the summary."""
last_refreshed: datetime = Field(default_factory=datetime.now) # : :meta private:
"""The last time the character's summary was regenerated."""
daily_summaries: List[str] = Field(default_factory=list) # : :meta private:
"""Summary of the events in the plan that the agent took."""
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
# LLM-related methods
@staticmethod
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/generative_agents/generative_agent.html
|
076f150f7566-1
|
arbitrary_types_allowed = True
# LLM-related methods
@staticmethod
def _parse_list(text: str) -> List[str]:
"""Parse a newline-separated string into a list of strings."""
lines = re.split(r"\n", text.strip())
return [re.sub(r"^\s*\d+\.\s*", "", line).strip() for line in lines]
[docs] def chain(self, prompt: PromptTemplate) -> LLMChain:
return LLMChain(
llm=self.llm, prompt=prompt, verbose=self.verbose, memory=self.memory
)
def _get_entity_from_observation(self, observation: str) -> str:
prompt = PromptTemplate.from_template(
"What is the observed entity in the following observation? {observation}"
+ "\nEntity="
)
return self.chain(prompt).run(observation=observation).strip()
def _get_entity_action(self, observation: str, entity_name: str) -> str:
prompt = PromptTemplate.from_template(
"What is the {entity} doing in the following observation? {observation}"
+ "\nThe {entity} is"
)
return (
self.chain(prompt).run(entity=entity_name, observation=observation).strip()
)
[docs] def summarize_related_memories(self, observation: str) -> str:
"""Summarize memories that are most relevant to an observation."""
prompt = PromptTemplate.from_template(
"""
{q1}?
Context from memory:
{relevant_memories}
Relevant context:
"""
)
entity_name = self._get_entity_from_observation(observation)
entity_action = self._get_entity_action(observation, entity_name)
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/generative_agents/generative_agent.html
|
076f150f7566-2
|
entity_action = self._get_entity_action(observation, entity_name)
q1 = f"What is the relationship between {self.name} and {entity_name}"
q2 = f"{entity_name} is {entity_action}"
return self.chain(prompt=prompt).run(q1=q1, queries=[q1, q2]).strip()
def _generate_reaction(
self, observation: str, suffix: str, now: Optional[datetime] = None
) -> str:
"""React to a given observation or dialogue act."""
prompt = PromptTemplate.from_template(
"{agent_summary_description}"
+ "\nIt is {current_time}."
+ "\n{agent_name}'s status: {agent_status}"
+ "\nSummary of relevant context from {agent_name}'s memory:"
+ "\n{relevant_memories}"
+ "\nMost recent observations: {most_recent_memories}"
+ "\nObservation: {observation}"
+ "\n\n"
+ suffix
)
agent_summary_description = self.get_summary(now=now)
relevant_memories_str = self.summarize_related_memories(observation)
current_time_str = (
datetime.now().strftime("%B %d, %Y, %I:%M %p")
if now is None
else now.strftime("%B %d, %Y, %I:%M %p")
)
kwargs: Dict[str, Any] = dict(
agent_summary_description=agent_summary_description,
current_time=current_time_str,
relevant_memories=relevant_memories_str,
agent_name=self.name,
observation=observation,
agent_status=self.status,
)
consumed_tokens = self.llm.get_num_tokens(
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/generative_agents/generative_agent.html
|
076f150f7566-3
|
)
consumed_tokens = self.llm.get_num_tokens(
prompt.format(most_recent_memories="", **kwargs)
)
kwargs[self.memory.most_recent_memories_token_key] = consumed_tokens
return self.chain(prompt=prompt).run(**kwargs).strip()
def _clean_response(self, text: str) -> str:
return re.sub(f"^{self.name} ", "", text.strip()).strip()
[docs] def generate_reaction(
self, observation: str, now: Optional[datetime] = None
) -> Tuple[bool, str]:
"""React to a given observation."""
call_to_action_template = (
"Should {agent_name} react to the observation, and if so,"
+ " what would be an appropriate reaction? Respond in one line."
+ ' If the action is to engage in dialogue, write:\nSAY: "what to say"'
+ "\notherwise, write:\nREACT: {agent_name}'s reaction (if anything)."
+ "\nEither do nothing, react, or say something but not both.\n\n"
)
full_result = self._generate_reaction(
observation, call_to_action_template, now=now
)
result = full_result.strip().split("\n")[0]
# AAA
self.memory.save_context(
{},
{
self.memory.add_memory_key: f"{self.name} observed "
f"{observation} and reacted by {result}",
self.memory.now_key: now,
},
)
if "REACT:" in result:
reaction = self._clean_response(result.split("REACT:")[-1])
return False, f"{self.name} {reaction}"
if "SAY:" in result:
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/generative_agents/generative_agent.html
|
076f150f7566-4
|
if "SAY:" in result:
said_value = self._clean_response(result.split("SAY:")[-1])
return True, f"{self.name} said {said_value}"
else:
return False, result
[docs] def generate_dialogue_response(
self, observation: str, now: Optional[datetime] = None
) -> Tuple[bool, str]:
"""React to a given observation."""
call_to_action_template = (
"What would {agent_name} say? To end the conversation, write:"
' GOODBYE: "what to say". Otherwise to continue the conversation,'
' write: SAY: "what to say next"\n\n'
)
full_result = self._generate_reaction(
observation, call_to_action_template, now=now
)
result = full_result.strip().split("\n")[0]
if "GOODBYE:" in result:
farewell = self._clean_response(result.split("GOODBYE:")[-1])
self.memory.save_context(
{},
{
self.memory.add_memory_key: f"{self.name} observed "
f"{observation} and said {farewell}",
self.memory.now_key: now,
},
)
return False, f"{self.name} said {farewell}"
if "SAY:" in result:
response_text = self._clean_response(result.split("SAY:")[-1])
self.memory.save_context(
{},
{
self.memory.add_memory_key: f"{self.name} observed "
f"{observation} and said {response_text}",
self.memory.now_key: now,
},
)
return True, f"{self.name} said {response_text}"
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/generative_agents/generative_agent.html
|
076f150f7566-5
|
)
return True, f"{self.name} said {response_text}"
else:
return False, result
######################################################
# Agent stateful' summary methods. #
# Each dialog or response prompt includes a header #
# summarizing the agent's self-description. This is #
# updated periodically through probing its memories #
######################################################
def _compute_agent_summary(self) -> str:
""""""
prompt = PromptTemplate.from_template(
"How would you summarize {name}'s core characteristics given the"
+ " following statements:\n"
+ "{relevant_memories}"
+ "Do not embellish."
+ "\n\nSummary: "
)
# The agent seeks to think about their core characteristics.
return (
self.chain(prompt)
.run(name=self.name, queries=[f"{self.name}'s core characteristics"])
.strip()
)
[docs] def get_summary(
self, force_refresh: bool = False, now: Optional[datetime] = None
) -> str:
"""Return a descriptive summary of the agent."""
current_time = datetime.now() if now is None else now
since_refresh = (current_time - self.last_refreshed).seconds
if (
not self.summary
or since_refresh >= self.summary_refresh_seconds
or force_refresh
):
self.summary = self._compute_agent_summary()
self.last_refreshed = current_time
age = self.age if self.age is not None else "N/A"
return (
f"Name: {self.name} (age: {age})"
+ f"\nInnate traits: {self.traits}"
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/generative_agents/generative_agent.html
|
076f150f7566-6
|
+ f"\nInnate traits: {self.traits}"
+ f"\n{self.summary}"
)
[docs] def get_full_header(
self, force_refresh: bool = False, now: Optional[datetime] = None
) -> str:
"""Return a full header of the agent's status, summary, and current time."""
now = datetime.now() if now is None else now
summary = self.get_summary(force_refresh=force_refresh, now=now)
current_time_str = now.strftime("%B %d, %Y, %I:%M %p")
return (
f"{summary}\nIt is {current_time_str}.\n{self.name}'s status: {self.status}"
)
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/generative_agents/generative_agent.html
|
bca8c092d05a-0
|
Source code for langchain_experimental.generative_agents.memory
import logging
import re
from datetime import datetime
from typing import Any, Dict, List, Optional
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.retrievers import TimeWeightedVectorStoreRetriever
from langchain.schema import BaseMemory, Document
from langchain.schema.language_model import BaseLanguageModel
from langchain.utils import mock_now
logger = logging.getLogger(__name__)
[docs]class GenerativeAgentMemory(BaseMemory):
"""Memory for the generative agent."""
llm: BaseLanguageModel
"""The core language model."""
memory_retriever: TimeWeightedVectorStoreRetriever
"""The retriever to fetch related memories."""
verbose: bool = False
reflection_threshold: Optional[float] = None
"""When aggregate_importance exceeds reflection_threshold, stop to reflect."""
current_plan: List[str] = []
"""The current plan of the agent."""
# A weight of 0.15 makes this less important than it
# would be otherwise, relative to salience and time
importance_weight: float = 0.15
"""How much weight to assign the memory importance."""
aggregate_importance: float = 0.0 # : :meta private:
"""Track the sum of the 'importance' of recent memories.
Triggers reflection when it reaches reflection_threshold."""
max_tokens_limit: int = 1200 # : :meta private:
# input keys
queries_key: str = "queries"
most_recent_memories_token_key: str = "recent_memories_token"
add_memory_key: str = "add_memory"
# output keys
relevant_memories_key: str = "relevant_memories"
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/generative_agents/memory.html
|
bca8c092d05a-1
|
# output keys
relevant_memories_key: str = "relevant_memories"
relevant_memories_simple_key: str = "relevant_memories_simple"
most_recent_memories_key: str = "most_recent_memories"
now_key: str = "now"
reflecting: bool = False
[docs] def chain(self, prompt: PromptTemplate) -> LLMChain:
return LLMChain(llm=self.llm, prompt=prompt, verbose=self.verbose)
@staticmethod
def _parse_list(text: str) -> List[str]:
"""Parse a newline-separated string into a list of strings."""
lines = re.split(r"\n", text.strip())
lines = [line for line in lines if line.strip()] # remove empty lines
return [re.sub(r"^\s*\d+\.\s*", "", line).strip() for line in lines]
def _get_topics_of_reflection(self, last_k: int = 50) -> List[str]:
"""Return the 3 most salient high-level questions about recent observations."""
prompt = PromptTemplate.from_template(
"{observations}\n\n"
"Given only the information above, what are the 3 most salient "
"high-level questions we can answer about the subjects in the statements?\n"
"Provide each question on a new line."
)
observations = self.memory_retriever.memory_stream[-last_k:]
observation_str = "\n".join(
[self._format_memory_detail(o) for o in observations]
)
result = self.chain(prompt).run(observations=observation_str)
return self._parse_list(result)
def _get_insights_on_topic(
self, topic: str, now: Optional[datetime] = None
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/generative_agents/memory.html
|
bca8c092d05a-2
|
self, topic: str, now: Optional[datetime] = None
) -> List[str]:
"""Generate 'insights' on a topic of reflection, based on pertinent memories."""
prompt = PromptTemplate.from_template(
"Statements relevant to: '{topic}'\n"
"---\n"
"{related_statements}\n"
"---\n"
"What 5 high-level novel insights can you infer from the above statements "
"that are relevant for answering the following question?\n"
"Do not include any insights that are not relevant to the question.\n"
"Do not repeat any insights that have already been made.\n\n"
"Question: {topic}\n\n"
"(example format: insight (because of 1, 5, 3))\n"
)
related_memories = self.fetch_memories(topic, now=now)
related_statements = "\n".join(
[
self._format_memory_detail(memory, prefix=f"{i+1}. ")
for i, memory in enumerate(related_memories)
]
)
result = self.chain(prompt).run(
topic=topic, related_statements=related_statements
)
# TODO: Parse the connections between memories and insights
return self._parse_list(result)
[docs] def pause_to_reflect(self, now: Optional[datetime] = None) -> List[str]:
"""Reflect on recent observations and generate 'insights'."""
if self.verbose:
logger.info("Character is reflecting")
new_insights = []
topics = self._get_topics_of_reflection()
for topic in topics:
insights = self._get_insights_on_topic(topic, now=now)
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/generative_agents/memory.html
|
bca8c092d05a-3
|
insights = self._get_insights_on_topic(topic, now=now)
for insight in insights:
self.add_memory(insight, now=now)
new_insights.extend(insights)
return new_insights
def _score_memory_importance(self, memory_content: str) -> float:
"""Score the absolute importance of the given memory."""
prompt = PromptTemplate.from_template(
"On the scale of 1 to 10, where 1 is purely mundane"
+ " (e.g., brushing teeth, making bed) and 10 is"
+ " extremely poignant (e.g., a break up, college"
+ " acceptance), rate the likely poignancy of the"
+ " following piece of memory. Respond with a single integer."
+ "\nMemory: {memory_content}"
+ "\nRating: "
)
score = self.chain(prompt).run(memory_content=memory_content).strip()
if self.verbose:
logger.info(f"Importance score: {score}")
match = re.search(r"^\D*(\d+)", score)
if match:
return (float(match.group(1)) / 10) * self.importance_weight
else:
return 0.0
def _score_memories_importance(self, memory_content: str) -> List[float]:
"""Score the absolute importance of the given memory."""
prompt = PromptTemplate.from_template(
"On the scale of 1 to 10, where 1 is purely mundane"
+ " (e.g., brushing teeth, making bed) and 10 is"
+ " extremely poignant (e.g., a break up, college"
+ " acceptance), rate the likely poignancy of the"
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/generative_agents/memory.html
|
bca8c092d05a-4
|
+ " acceptance), rate the likely poignancy of the"
+ " following piece of memory. Always answer with only a list of numbers."
+ " If just given one memory still respond in a list."
+ " Memories are separated by semi colans (;)"
+ "\Memories: {memory_content}"
+ "\nRating: "
)
scores = self.chain(prompt).run(memory_content=memory_content).strip()
if self.verbose:
logger.info(f"Importance scores: {scores}")
# Split into list of strings and convert to floats
scores_list = [float(x) for x in scores.split(";")]
return scores_list
[docs] def add_memories(
self, memory_content: str, now: Optional[datetime] = None
) -> List[str]:
"""Add an observations or memories to the agent's memory."""
importance_scores = self._score_memories_importance(memory_content)
self.aggregate_importance += max(importance_scores)
memory_list = memory_content.split(";")
documents = []
for i in range(len(memory_list)):
documents.append(
Document(
page_content=memory_list[i],
metadata={"importance": importance_scores[i]},
)
)
result = self.memory_retriever.add_documents(documents, current_time=now)
# After an agent has processed a certain amount of memories (as measured by
# aggregate importance), it is time to reflect on recent events to add
# more synthesized memories to the agent's memory stream.
if (
self.reflection_threshold is not None
and self.aggregate_importance > self.reflection_threshold
and not self.reflecting
):
self.reflecting = True
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/generative_agents/memory.html
|
bca8c092d05a-5
|
and not self.reflecting
):
self.reflecting = True
self.pause_to_reflect(now=now)
# Hack to clear the importance from reflection
self.aggregate_importance = 0.0
self.reflecting = False
return result
[docs] def add_memory(
self, memory_content: str, now: Optional[datetime] = None
) -> List[str]:
"""Add an observation or memory to the agent's memory."""
importance_score = self._score_memory_importance(memory_content)
self.aggregate_importance += importance_score
document = Document(
page_content=memory_content, metadata={"importance": importance_score}
)
result = self.memory_retriever.add_documents([document], current_time=now)
# After an agent has processed a certain amount of memories (as measured by
# aggregate importance), it is time to reflect on recent events to add
# more synthesized memories to the agent's memory stream.
if (
self.reflection_threshold is not None
and self.aggregate_importance > self.reflection_threshold
and not self.reflecting
):
self.reflecting = True
self.pause_to_reflect(now=now)
# Hack to clear the importance from reflection
self.aggregate_importance = 0.0
self.reflecting = False
return result
[docs] def fetch_memories(
self, observation: str, now: Optional[datetime] = None
) -> List[Document]:
"""Fetch related memories."""
if now is not None:
with mock_now(now):
return self.memory_retriever.get_relevant_documents(observation)
else:
return self.memory_retriever.get_relevant_documents(observation)
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/generative_agents/memory.html
|
bca8c092d05a-6
|
else:
return self.memory_retriever.get_relevant_documents(observation)
[docs] def format_memories_detail(self, relevant_memories: List[Document]) -> str:
content = []
for mem in relevant_memories:
content.append(self._format_memory_detail(mem, prefix="- "))
return "\n".join([f"{mem}" for mem in content])
def _format_memory_detail(self, memory: Document, prefix: str = "") -> str:
created_time = memory.metadata["created_at"].strftime("%B %d, %Y, %I:%M %p")
return f"{prefix}[{created_time}] {memory.page_content.strip()}"
[docs] def format_memories_simple(self, relevant_memories: List[Document]) -> str:
return "; ".join([f"{mem.page_content}" for mem in relevant_memories])
def _get_memories_until_limit(self, consumed_tokens: int) -> str:
"""Reduce the number of tokens in the documents."""
result = []
for doc in self.memory_retriever.memory_stream[::-1]:
if consumed_tokens >= self.max_tokens_limit:
break
consumed_tokens += self.llm.get_num_tokens(doc.page_content)
if consumed_tokens < self.max_tokens_limit:
result.append(doc)
return self.format_memories_simple(result)
@property
def memory_variables(self) -> List[str]:
"""Input keys this memory class will load dynamically."""
return []
[docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
"""Return key-value pairs given the text input to the chain."""
queries = inputs.get(self.queries_key)
now = inputs.get(self.now_key)
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/generative_agents/memory.html
|
bca8c092d05a-7
|
now = inputs.get(self.now_key)
if queries is not None:
relevant_memories = [
mem for query in queries for mem in self.fetch_memories(query, now=now)
]
return {
self.relevant_memories_key: self.format_memories_detail(
relevant_memories
),
self.relevant_memories_simple_key: self.format_memories_simple(
relevant_memories
),
}
most_recent_memories_token = inputs.get(self.most_recent_memories_token_key)
if most_recent_memories_token is not None:
return {
self.most_recent_memories_key: self._get_memories_until_limit(
most_recent_memories_token
)
}
return {}
[docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, Any]) -> None:
"""Save the context of this model run to memory."""
# TODO: fix the save memory key
mem = outputs.get(self.add_memory_key)
now = outputs.get(self.now_key)
if mem:
self.add_memory(mem, now=now)
[docs] def clear(self) -> None:
"""Clear memory contents."""
# TODO
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/generative_agents/memory.html
|
252bf1f7620a-0
|
Source code for langchain_experimental.pal_chain.base
"""Implements Program-Aided Language Models.
This module implements the Program-Aided Language Models (PAL) for generating code
solutions. PAL is a technique described in the paper "Program-Aided Language Models"
(https://arxiv.org/pdf/2211.10435.pdf).
"""
from __future__ import annotations
import ast
from typing import Any, Dict, List, Optional
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain.schema.language_model import BaseLanguageModel
from langchain.utilities import PythonREPL
from pydantic import Extra, Field
from langchain_experimental.pal_chain.colored_object_prompt import COLORED_OBJECT_PROMPT
from langchain_experimental.pal_chain.math_prompt import MATH_PROMPT
COMMAND_EXECUTION_FUNCTIONS = ["system", "exec", "execfile", "eval"]
[docs]class PALValidation:
SOLUTION_EXPRESSION_TYPE_FUNCTION = ast.FunctionDef
SOLUTION_EXPRESSION_TYPE_VARIABLE = ast.Name
[docs] def __init__(
self,
solution_expression_name: Optional[str] = None,
solution_expression_type: Optional[type] = None,
allow_imports: bool = False,
allow_command_exec: bool = False,
):
"""Initialize a PALValidation instance.
Args:
solution_expression_name (str): Name of the expected solution expression.
If passed, solution_expression_type must be passed as well.
solution_expression_type (str): AST type of the expected solution
expression. If passed, solution_expression_name must be passed as well.
Must be one of PALValidation.SOLUTION_EXPRESSION_TYPE_FUNCTION,
PALValidation.SOLUTION_EXPRESSION_TYPE_VARIABLE.
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/pal_chain/base.html
|
252bf1f7620a-1
|
PALValidation.SOLUTION_EXPRESSION_TYPE_VARIABLE.
allow_imports (bool): Allow import statements.
allow_command_exec (bool): Allow using known command execution functions.
"""
self.solution_expression_name = solution_expression_name
self.solution_expression_type = solution_expression_type
if solution_expression_name is not None:
if not isinstance(self.solution_expression_name, str):
raise ValueError(
f"Expected solution_expression_name to be str, "
f"instead found {type(self.solution_expression_name)}"
)
if solution_expression_type is not None:
if (
self.solution_expression_type
is not self.SOLUTION_EXPRESSION_TYPE_FUNCTION
and self.solution_expression_type
is not self.SOLUTION_EXPRESSION_TYPE_VARIABLE
):
raise ValueError(
f"Expected solution_expression_type to be one of "
f"({self.SOLUTION_EXPRESSION_TYPE_FUNCTION},"
f"{self.SOLUTION_EXPRESSION_TYPE_VARIABLE}),"
f"instead found {self.solution_expression_type}"
)
if solution_expression_name is not None and solution_expression_type is None:
raise TypeError(
"solution_expression_name "
"requires solution_expression_type to be passed as well"
)
if solution_expression_name is None and solution_expression_type is not None:
raise TypeError(
"solution_expression_type "
"requires solution_expression_name to be passed as well"
)
self.allow_imports = allow_imports
self.allow_command_exec = allow_command_exec
[docs]class PALChain(Chain):
"""Implements Program-Aided Language Models (PAL).
This class implements the Program-Aided Language Models (PAL) for generating code
solutions. PAL is a technique described in the paper "Program-Aided Language Models"
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/pal_chain/base.html
|
252bf1f7620a-2
|
solutions. PAL is a technique described in the paper "Program-Aided Language Models"
(https://arxiv.org/pdf/2211.10435.pdf).
"""
llm_chain: LLMChain
stop: str = "\n\n"
"""Stop token to use when generating code."""
get_answer_expr: str = "print(solution())"
"""Expression to use to get the answer from the generated code."""
python_globals: Optional[Dict[str, Any]] = None
"""Python globals and locals to use when executing the generated code."""
python_locals: Optional[Dict[str, Any]] = None
"""Python globals and locals to use when executing the generated code."""
output_key: str = "result" #: :meta private:
return_intermediate_steps: bool = False
"""Whether to return intermediate steps in the generated code."""
code_validations: PALValidation = Field(default_factory=PALValidation)
"""Validations to perform on the generated code."""
timeout: Optional[int] = 10
"""Timeout in seconds for the generated code to execute."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@property
def input_keys(self) -> List[str]:
"""Return the singular input key.
:meta private:
"""
return self.llm_chain.prompt.input_variables
@property
def output_keys(self) -> List[str]:
"""Return the singular output key.
:meta private:
"""
if not self.return_intermediate_steps:
return [self.output_key]
else:
return [self.output_key, "intermediate_steps"]
def _call(
self,
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/pal_chain/base.html
|
252bf1f7620a-3
|
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
code = self.llm_chain.predict(
stop=[self.stop], callbacks=_run_manager.get_child(), **inputs
)
_run_manager.on_text(code, color="green", end="\n", verbose=self.verbose)
PALChain.validate_code(code, self.code_validations)
repl = PythonREPL(_globals=self.python_globals, _locals=self.python_locals)
res = repl.run(code + f"\n{self.get_answer_expr}", timeout=self.timeout)
output = {self.output_key: res.strip()}
if self.return_intermediate_steps:
output["intermediate_steps"] = code
return output
[docs] @classmethod
def validate_code(cls, code: str, code_validations: PALValidation) -> None:
try:
code_tree = ast.parse(code)
except (SyntaxError, UnicodeDecodeError):
raise ValueError(f"Generated code is not valid python code: {code}")
except TypeError:
raise ValueError(
f"Generated code is expected to be a string, "
f"instead found {type(code)}"
)
except OverflowError:
raise ValueError(
f"Generated code too long / complex to be parsed by ast: {code}"
)
found_solution_expr = False
if code_validations.solution_expression_name is None:
# Skip validation if no solution_expression_name was given
found_solution_expr = True
has_imports = False
top_level_nodes = list(ast.iter_child_nodes(code_tree))
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/pal_chain/base.html
|
252bf1f7620a-4
|
top_level_nodes = list(ast.iter_child_nodes(code_tree))
for node in top_level_nodes:
if (
code_validations.solution_expression_name is not None
and code_validations.solution_expression_type is not None
):
# Check root nodes (like func def)
if (
isinstance(node, code_validations.solution_expression_type)
and hasattr(node, "name")
and node.name == code_validations.solution_expression_name
):
found_solution_expr = True
# Check assigned nodes (like answer variable)
if isinstance(node, ast.Assign):
for target_node in node.targets:
if (
isinstance(
target_node, code_validations.solution_expression_type
)
and hasattr(target_node, "id")
and target_node.id
== code_validations.solution_expression_name
):
found_solution_expr = True
if isinstance(node, ast.Import) or isinstance(node, ast.ImportFrom):
has_imports = True
if not found_solution_expr:
raise ValueError(
f"Generated code is missing the solution expression: "
f"{code_validations.solution_expression_name} of type: "
f"{code_validations.solution_expression_type}"
)
if not code_validations.allow_imports and has_imports:
raise ValueError(f"Generated code has disallowed imports: {code}")
if (
not code_validations.allow_command_exec
or not code_validations.allow_imports
):
for node in ast.walk(code_tree):
if (
(not code_validations.allow_command_exec)
and isinstance(node, ast.Call)
and (
(
hasattr(node.func, "id")
and node.func.id in COMMAND_EXECUTION_FUNCTIONS
)
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/pal_chain/base.html
|
252bf1f7620a-5
|
and node.func.id in COMMAND_EXECUTION_FUNCTIONS
)
or (
isinstance(node.func, ast.Attribute)
and node.func.attr in COMMAND_EXECUTION_FUNCTIONS
)
)
):
raise ValueError(
f"Found illegal command execution function "
f"{node.func.id} in code {code}"
)
if (not code_validations.allow_imports) and (
isinstance(node, ast.Import) or isinstance(node, ast.ImportFrom)
):
raise ValueError(f"Generated code has disallowed imports: {code}")
[docs] @classmethod
def from_math_prompt(cls, llm: BaseLanguageModel, **kwargs: Any) -> PALChain:
"""Load PAL from math prompt.
Args:
llm (BaseLanguageModel): The language model to use for generating code.
Returns:
PALChain: An instance of PALChain.
"""
llm_chain = LLMChain(llm=llm, prompt=MATH_PROMPT)
code_validations = PALValidation(
solution_expression_name="solution",
solution_expression_type=PALValidation.SOLUTION_EXPRESSION_TYPE_FUNCTION,
)
return cls(
llm_chain=llm_chain,
stop="\n\n",
get_answer_expr="print(solution())",
code_validations=code_validations,
**kwargs,
)
[docs] @classmethod
def from_colored_object_prompt(
cls, llm: BaseLanguageModel, **kwargs: Any
) -> PALChain:
"""Load PAL from colored object prompt.
Args:
llm (BaseLanguageModel): The language model to use for generating code.
Returns:
PALChain: An instance of PALChain.
"""
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/pal_chain/base.html
|
252bf1f7620a-6
|
Returns:
PALChain: An instance of PALChain.
"""
llm_chain = LLMChain(llm=llm, prompt=COLORED_OBJECT_PROMPT)
code_validations = PALValidation(
solution_expression_name="answer",
solution_expression_type=PALValidation.SOLUTION_EXPRESSION_TYPE_VARIABLE,
)
return cls(
llm_chain=llm_chain,
stop="\n\n\n",
get_answer_expr="print(answer)",
code_validations=code_validations,
**kwargs,
)
@property
def _chain_type(self) -> str:
return "pal_chain"
|
https://api.python.langchain.com/en/latest/_modules/langchain_experimental/pal_chain/base.html
|
2258e317d7da-0
|
langchain.storage.exceptions.InvalidKeyException¶
class langchain.storage.exceptions.InvalidKeyException[source]¶
Raised when a key is invalid; e.g., uses incorrect characters.
|
https://api.python.langchain.com/en/latest/storage/langchain.storage.exceptions.InvalidKeyException.html
|
24ced2ad9ef3-0
|
langchain.storage.in_memory.InMemoryStore¶
class langchain.storage.in_memory.InMemoryStore[source]¶
In-memory implementation of the BaseStore using a dictionary.
store¶
The underlying dictionary that stores
the key-value pairs.
Type
Dict[str, Any]
Examples
… code-block:: python
from langchain.storage import InMemoryStore
store = InMemoryStore()
store.mset([(‘key1’, ‘value1’), (‘key2’, ‘value2’)])
store.mget([‘key1’, ‘key2’])
# [‘value1’, ‘value2’]
store.mdelete([‘key1’])
list(store.yield_keys())
# [‘key2’]
list(store.yield_keys(prefix=’k’))
# [‘key2’]
Initialize an empty store.
Methods
__init__()
Initialize an empty store.
mdelete(keys)
Delete the given keys and their associated values.
mget(keys)
Get the values associated with the given keys.
mset(key_value_pairs)
Set the values for the given keys.
yield_keys([prefix])
Get an iterator over keys that match the given prefix.
__init__() → None[source]¶
Initialize an empty store.
mdelete(keys: Sequence[str]) → None[source]¶
Delete the given keys and their associated values.
Parameters
keys (Sequence[str]) – A sequence of keys to delete.
mget(keys: Sequence[str]) → List[Optional[Any]][source]¶
Get the values associated with the given keys.
Parameters
keys (Sequence[str]) – A sequence of keys.
Returns
A sequence of optional values associated with the keys.
If a key is not found, the corresponding value will be None.
mset(key_value_pairs: Sequence[Tuple[str, Any]]) → None[source]¶
|
https://api.python.langchain.com/en/latest/storage/langchain.storage.in_memory.InMemoryStore.html
|
24ced2ad9ef3-1
|
mset(key_value_pairs: Sequence[Tuple[str, Any]]) → None[source]¶
Set the values for the given keys.
Parameters
key_value_pairs (Sequence[Tuple[str, V]]) – A sequence of key-value pairs.
Returns
None
yield_keys(prefix: Optional[str] = None) → Iterator[str][source]¶
Get an iterator over keys that match the given prefix.
Parameters
prefix (str, optional) – The prefix to match. Defaults to None.
Returns
An iterator over keys that match the given prefix.
Return type
Iterator[str]
|
https://api.python.langchain.com/en/latest/storage/langchain.storage.in_memory.InMemoryStore.html
|
549a5b53223c-0
|
langchain.storage.encoder_backed.EncoderBackedStore¶
class langchain.storage.encoder_backed.EncoderBackedStore(store: BaseStore[str, Any], key_encoder: Callable[[K], str], value_serializer: Callable[[V], bytes], value_deserializer: Callable[[Any], V])[source]¶
Wraps a store with key and value encoders/decoders.
Examples that uses JSON for encoding/decoding:
import json
def key_encoder(key: int) -> str:
return json.dumps(key)
def value_serializer(value: float) -> str:
return json.dumps(value)
def value_deserializer(serialized_value: str) -> float:
return json.loads(serialized_value)
# Create an instance of the abstract store
abstract_store = MyCustomStore()
# Create an instance of the encoder-backed store
store = EncoderBackedStore(
store=abstract_store,
key_encoder=key_encoder,
value_serializer=value_serializer,
value_deserializer=value_deserializer
)
# Use the encoder-backed store methods
store.mset([(1, 3.14), (2, 2.718)])
values = store.mget([1, 2]) # Retrieves [3.14, 2.718]
store.mdelete([1, 2]) # Deletes the keys 1 and 2
Initialize an EncodedStore.
Methods
__init__(store, key_encoder, ...)
Initialize an EncodedStore.
mdelete(keys)
Delete the given keys and their associated values.
mget(keys)
Get the values associated with the given keys.
mset(key_value_pairs)
Set the values for the given keys.
yield_keys(*[, prefix])
Get an iterator over keys that match the given prefix.
|
https://api.python.langchain.com/en/latest/storage/langchain.storage.encoder_backed.EncoderBackedStore.html
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.