id
stringlengths
14
15
text
stringlengths
35
2.51k
source
stringlengths
61
154
4ba16e318613-0
Source code for langchain.llms.openlm from typing import Any, Dict from pydantic import root_validator from langchain.llms.openai import BaseOpenAI [docs]class OpenLM(BaseOpenAI): @property def _invocation_params(self) -> Dict[str, Any]: return {**{"model": self.model_name}, **super()._invocation_params} [docs] @root_validator() def validate_environment(cls, values: Dict) -> Dict: try: import openlm values["client"] = openlm.Completion except ImportError: raise ValueError( "Could not import openlm python package. " "Please install it with `pip install openlm`." ) if values["streaming"]: raise ValueError("Streaming not supported with openlm") return values
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openlm.html
314e28a061a5-0
Source code for langchain.llms.aviary """Wrapper around Aviary""" import dataclasses import os from typing import Any, Dict, List, Mapping, Optional, Union, cast import requests from pydantic import Extra, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env TIMEOUT = 60 @dataclasses.dataclass class AviaryBackend: backend_url: str bearer: str def __post_init__(self) -> None: self.header = {"Authorization": self.bearer} @classmethod def from_env(cls) -> "AviaryBackend": aviary_url = os.getenv("AVIARY_URL") assert aviary_url, "AVIARY_URL must be set" aviary_token = os.getenv("AVIARY_TOKEN", "") bearer = f"Bearer {aviary_token}" if aviary_token else "" aviary_url += "/" if not aviary_url.endswith("/") else "" return cls(aviary_url, bearer) [docs]def get_models() -> List[str]: """List available models""" backend = AviaryBackend.from_env() request_url = backend.backend_url + "-/routes" response = requests.get(request_url, headers=backend.header, timeout=TIMEOUT) try: result = response.json() except requests.JSONDecodeError as e: raise RuntimeError( f"Error decoding JSON from {request_url}. Text response: {response.text}" ) from e result = sorted(
https://api.python.langchain.com/en/latest/_modules/langchain/llms/aviary.html
314e28a061a5-1
) from e result = sorted( [k.lstrip("/").replace("--", "/") for k in result.keys() if "--" in k] ) return result [docs]def get_completions( model: str, prompt: str, use_prompt_format: bool = True, version: str = "", ) -> Dict[str, Union[str, float, int]]: """Get completions from Aviary models.""" backend = AviaryBackend.from_env() url = backend.backend_url + model.replace("/", "--") + "/" + version + "query" response = requests.post( url, headers=backend.header, json={"prompt": prompt, "use_prompt_format": use_prompt_format}, timeout=TIMEOUT, ) try: return response.json() except requests.JSONDecodeError as e: raise RuntimeError( f"Error decoding JSON from {url}. Text response: {response.text}" ) from e [docs]class Aviary(LLM): """Allow you to use an Aviary. Aviary is a backend for hosted models. You can find out more about aviary at http://github.com/ray-project/aviary To get a list of the models supported on an aviary, follow the instructions on the web site to install the aviary CLI and then use: `aviary models` AVIARY_URL and AVIARY_TOKEN environement variables must be set. Example: .. code-block:: python from langchain.llms import Aviary os.environ["AVIARY_URL"] = "<URL>"
https://api.python.langchain.com/en/latest/_modules/langchain/llms/aviary.html
314e28a061a5-2
os.environ["AVIARY_URL"] = "<URL>" os.environ["AVIARY_TOKEN"] = "<TOKEN>" light = Aviary(model='amazon/LightGPT') output = light('How do you make fried rice?') """ model: str = "amazon/LightGPT" aviary_url: Optional[str] = None aviary_token: Optional[str] = None # If True the prompt template for the model will be ignored. use_prompt_format: bool = True # API version to use for Aviary version: Optional[str] = None [docs] class Config: """Configuration for this pydantic object.""" extra = Extra.forbid [docs] @root_validator(pre=True) def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" aviary_url = get_from_dict_or_env(values, "aviary_url", "AVIARY_URL") aviary_token = get_from_dict_or_env(values, "aviary_token", "AVIARY_TOKEN") # Set env viarables for aviary sdk os.environ["AVIARY_URL"] = aviary_url os.environ["AVIARY_TOKEN"] = aviary_token try: aviary_models = get_models() except requests.exceptions.RequestException as e: raise ValueError(e) model = values.get("model") if model and model not in aviary_models: raise ValueError(f"{aviary_url} does not support model {values['model']}.") return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {
https://api.python.langchain.com/en/latest/_modules/langchain/llms/aviary.html
314e28a061a5-3
"""Get the identifying parameters.""" return { "model_name": self.model, "aviary_url": self.aviary_url, } @property def _llm_type(self) -> str: """Return type of llm.""" return f"aviary-{self.model.replace('/', '-')}" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call out to Aviary Args: prompt: The prompt to pass into the model. Returns: The string generated by the model. Example: .. code-block:: python response = aviary("Tell me a joke.") """ kwargs = {"use_prompt_format": self.use_prompt_format} if self.version: kwargs["version"] = self.version output = get_completions( model=self.model, prompt=prompt, **kwargs, ) text = cast(str, output["generated_text"]) if stop: text = enforce_stop_tokens(text, stop) return text
https://api.python.langchain.com/en/latest/_modules/langchain/llms/aviary.html
ee85d9e5f640-0
Source code for langchain.llms.deepinfra """Wrapper around DeepInfra APIs.""" from typing import Any, Dict, List, Mapping, Optional import requests from pydantic import Extra, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env DEFAULT_MODEL_ID = "google/flan-t5-xl" [docs]class DeepInfra(LLM): """Wrapper around DeepInfra deployed models. To use, you should have the ``requests`` python package installed, and the environment variable ``DEEPINFRA_API_TOKEN`` set with your API token, or pass it as a named parameter to the constructor. Only supports `text-generation` and `text2text-generation` for now. Example: .. code-block:: python from langchain.llms import DeepInfra di = DeepInfra(model_id="google/flan-t5-xl", deepinfra_api_token="my-api-key") """ model_id: str = DEFAULT_MODEL_ID model_kwargs: Optional[dict] = None deepinfra_api_token: Optional[str] = None [docs] class Config: """Configuration for this pydantic object.""" extra = Extra.forbid [docs] @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" deepinfra_api_token = get_from_dict_or_env( values, "deepinfra_api_token", "DEEPINFRA_API_TOKEN" ) values["deepinfra_api_token"] = deepinfra_api_token return values @property
https://api.python.langchain.com/en/latest/_modules/langchain/llms/deepinfra.html
ee85d9e5f640-1
return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { **{"model_id": self.model_id}, **{"model_kwargs": self.model_kwargs}, } @property def _llm_type(self) -> str: """Return type of llm.""" return "deepinfra" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call out to DeepInfra's inference API endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = di("Tell me a joke.") """ _model_kwargs = self.model_kwargs or {} _model_kwargs = {**_model_kwargs, **kwargs} # HTTP headers for authorization headers = { "Authorization": f"bearer {self.deepinfra_api_token}", "Content-Type": "application/json", } try: res = requests.post( f"https://api.deepinfra.com/v1/inference/{self.model_id}", headers=headers, json={"input": prompt, **_model_kwargs}, ) except requests.exceptions.RequestException as e: raise ValueError(f"Error raised by inference endpoint: {e}") if res.status_code != 200: raise ValueError(
https://api.python.langchain.com/en/latest/_modules/langchain/llms/deepinfra.html
ee85d9e5f640-2
if res.status_code != 200: raise ValueError( "Error raised by inference API HTTP code: %s, %s" % (res.status_code, res.text) ) try: t = res.json() text = t["results"][0]["generated_text"] except requests.exceptions.JSONDecodeError as e: raise ValueError( f"Error raised by inference API: {e}.\nResponse: {res.text}" ) if stop is not None: # I believe this is required since the stop tokens # are not enforced by the model parameters text = enforce_stop_tokens(text, stop) return text
https://api.python.langchain.com/en/latest/_modules/langchain/llms/deepinfra.html
107f01b2a08c-0
Source code for langchain.llms.writer """Wrapper around Writer APIs.""" from typing import Any, Dict, List, Mapping, Optional import requests from pydantic import Extra, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env [docs]class Writer(LLM): """Wrapper around Writer large language models. To use, you should have the environment variable ``WRITER_API_KEY`` and ``WRITER_ORG_ID`` set with your API key and organization ID respectively. Example: .. code-block:: python from langchain import Writer writer = Writer(model_id="palmyra-base") """ writer_org_id: Optional[str] = None """Writer organization ID.""" model_id: str = "palmyra-instruct" """Model name to use.""" min_tokens: Optional[int] = None """Minimum number of tokens to generate.""" max_tokens: Optional[int] = None """Maximum number of tokens to generate.""" temperature: Optional[float] = None """What sampling temperature to use.""" top_p: Optional[float] = None """Total probability mass of tokens to consider at each step.""" stop: Optional[List[str]] = None """Sequences when completion generation will stop.""" presence_penalty: Optional[float] = None """Penalizes repeated tokens regardless of frequency.""" repetition_penalty: Optional[float] = None """Penalizes repeated tokens according to frequency.""" best_of: Optional[int] = None """Generates this many completions server-side and returns the "best".""" logprobs: bool = False
https://api.python.langchain.com/en/latest/_modules/langchain/llms/writer.html
107f01b2a08c-1
logprobs: bool = False """Whether to return log probabilities.""" n: Optional[int] = None """How many completions to generate.""" writer_api_key: Optional[str] = None """Writer API key.""" base_url: Optional[str] = None """Base url to use, if None decides based on model name.""" [docs] class Config: """Configuration for this pydantic object.""" extra = Extra.forbid [docs] @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and organization id exist in environment.""" writer_api_key = get_from_dict_or_env( values, "writer_api_key", "WRITER_API_KEY" ) values["writer_api_key"] = writer_api_key writer_org_id = get_from_dict_or_env(values, "writer_org_id", "WRITER_ORG_ID") values["writer_org_id"] = writer_org_id return values @property def _default_params(self) -> Mapping[str, Any]: """Get the default parameters for calling Writer API.""" return { "minTokens": self.min_tokens, "maxTokens": self.max_tokens, "temperature": self.temperature, "topP": self.top_p, "stop": self.stop, "presencePenalty": self.presence_penalty, "repetitionPenalty": self.repetition_penalty, "bestOf": self.best_of, "logprobs": self.logprobs, "n": self.n, } @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {
https://api.python.langchain.com/en/latest/_modules/langchain/llms/writer.html
107f01b2a08c-2
"""Get the identifying parameters.""" return { **{"model_id": self.model_id, "writer_org_id": self.writer_org_id}, **self._default_params, } @property def _llm_type(self) -> str: """Return type of llm.""" return "writer" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call out to Writer's completions endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = Writer("Tell me a joke.") """ if self.base_url is not None: base_url = self.base_url else: base_url = ( "https://enterprise-api.writer.com/llm" f"/organization/{self.writer_org_id}" f"/model/{self.model_id}/completions" ) params = {**self._default_params, **kwargs} response = requests.post( url=base_url, headers={ "Authorization": f"{self.writer_api_key}", "Content-Type": "application/json", "Accept": "application/json", }, json={"prompt": prompt, **params}, ) text = response.text if stop is not None: # I believe this is required since the stop tokens # are not enforced by the model parameters
https://api.python.langchain.com/en/latest/_modules/langchain/llms/writer.html
107f01b2a08c-3
# are not enforced by the model parameters text = enforce_stop_tokens(text, stop) return text
https://api.python.langchain.com/en/latest/_modules/langchain/llms/writer.html
b31b5b132b18-0
Source code for langchain.indexes.vectorstore from typing import Any, List, Optional, Type from pydantic import BaseModel, Extra, Field from langchain.base_language import BaseLanguageModel from langchain.chains.qa_with_sources.retrieval import RetrievalQAWithSourcesChain from langchain.chains.retrieval_qa.base import RetrievalQA from langchain.document_loaders.base import BaseLoader from langchain.embeddings.base import Embeddings from langchain.embeddings.openai import OpenAIEmbeddings from langchain.llms.openai import OpenAI from langchain.schema import Document from langchain.text_splitter import RecursiveCharacterTextSplitter, TextSplitter from langchain.vectorstores.base import VectorStore from langchain.vectorstores.chroma import Chroma def _get_default_text_splitter() -> TextSplitter: return RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=0) [docs]class VectorStoreIndexWrapper(BaseModel): """Wrapper around a vectorstore for easy access.""" vectorstore: VectorStore [docs] class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True [docs] def query( self, question: str, llm: Optional[BaseLanguageModel] = None, **kwargs: Any ) -> str: """Query the vectorstore.""" llm = llm or OpenAI(temperature=0) chain = RetrievalQA.from_chain_type( llm, retriever=self.vectorstore.as_retriever(), **kwargs ) return chain.run(question) [docs] def query_with_sources( self, question: str, llm: Optional[BaseLanguageModel] = None, **kwargs: Any
https://api.python.langchain.com/en/latest/_modules/langchain/indexes/vectorstore.html
b31b5b132b18-1
) -> dict: """Query the vectorstore and get back sources.""" llm = llm or OpenAI(temperature=0) chain = RetrievalQAWithSourcesChain.from_chain_type( llm, retriever=self.vectorstore.as_retriever(), **kwargs ) return chain({chain.question_key: question}) [docs]class VectorstoreIndexCreator(BaseModel): """Logic for creating indexes.""" vectorstore_cls: Type[VectorStore] = Chroma embedding: Embeddings = Field(default_factory=OpenAIEmbeddings) text_splitter: TextSplitter = Field(default_factory=_get_default_text_splitter) vectorstore_kwargs: dict = Field(default_factory=dict) [docs] class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True [docs] def from_loaders(self, loaders: List[BaseLoader]) -> VectorStoreIndexWrapper: """Create a vectorstore index from loaders.""" docs = [] for loader in loaders: docs.extend(loader.load()) return self.from_documents(docs) [docs] def from_documents(self, documents: List[Document]) -> VectorStoreIndexWrapper: """Create a vectorstore index from documents.""" sub_docs = self.text_splitter.split_documents(documents) vectorstore = self.vectorstore_cls.from_documents( sub_docs, self.embedding, **self.vectorstore_kwargs ) return VectorStoreIndexWrapper(vectorstore=vectorstore)
https://api.python.langchain.com/en/latest/_modules/langchain/indexes/vectorstore.html
2eca2ebdb66e-0
Source code for langchain.indexes.graph """Graph Index Creator.""" from typing import Optional, Type from pydantic import BaseModel from langchain.base_language import BaseLanguageModel from langchain.chains.llm import LLMChain from langchain.graphs.networkx_graph import NetworkxEntityGraph, parse_triples from langchain.indexes.prompts.knowledge_triplet_extraction import ( KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT, ) [docs]class GraphIndexCreator(BaseModel): """Functionality to create graph index.""" llm: Optional[BaseLanguageModel] = None graph_type: Type[NetworkxEntityGraph] = NetworkxEntityGraph [docs] def from_text(self, text: str) -> NetworkxEntityGraph: """Create graph index from text.""" if self.llm is None: raise ValueError("llm should not be None") graph = self.graph_type() chain = LLMChain(llm=self.llm, prompt=KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT) output = chain.predict(text=text) knowledge = parse_triples(output) for triple in knowledge: graph.add_triple(triple) return graph [docs] async def afrom_text(self, text: str) -> NetworkxEntityGraph: """Create graph index from text asynchronously.""" if self.llm is None: raise ValueError("llm should not be None") graph = self.graph_type() chain = LLMChain(llm=self.llm, prompt=KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT) output = await chain.apredict(text=text) knowledge = parse_triples(output) for triple in knowledge: graph.add_triple(triple) return graph
https://api.python.langchain.com/en/latest/_modules/langchain/indexes/graph.html
b549446a09da-0
Source code for langchain.experimental.autonomous_agents.autogpt.prompt import time from typing import Any, Callable, List from pydantic import BaseModel from langchain.experimental.autonomous_agents.autogpt.prompt_generator import get_prompt from langchain.prompts.chat import ( BaseChatPromptTemplate, ) from langchain.schema import BaseMessage, HumanMessage, SystemMessage from langchain.tools.base import BaseTool from langchain.vectorstores.base import VectorStoreRetriever [docs]class AutoGPTPrompt(BaseChatPromptTemplate, BaseModel): ai_name: str ai_role: str tools: List[BaseTool] token_counter: Callable[[str], int] send_token_limit: int = 4196 [docs] def construct_full_prompt(self, goals: List[str]) -> str: prompt_start = ( "Your decisions must always be made independently " "without seeking user assistance.\n" "Play to your strengths as an LLM and pursue simple " "strategies with no legal complications.\n" "If you have completed all your tasks, make sure to " 'use the "finish" command.' ) # Construct full prompt full_prompt = ( f"You are {self.ai_name}, {self.ai_role}\n{prompt_start}\n\nGOALS:\n\n" ) for i, goal in enumerate(goals): full_prompt += f"{i+1}. {goal}\n" full_prompt += f"\n\n{get_prompt(self.tools)}" return full_prompt [docs] def format_messages(self, **kwargs: Any) -> List[BaseMessage]: base_prompt = SystemMessage(content=self.construct_full_prompt(kwargs["goals"])) time_prompt = SystemMessage(
https://api.python.langchain.com/en/latest/_modules/langchain/experimental/autonomous_agents/autogpt/prompt.html
b549446a09da-1
time_prompt = SystemMessage( content=f"The current time and date is {time.strftime('%c')}" ) used_tokens = self.token_counter(base_prompt.content) + self.token_counter( time_prompt.content ) memory: VectorStoreRetriever = kwargs["memory"] previous_messages = kwargs["messages"] relevant_docs = memory.get_relevant_documents(str(previous_messages[-10:])) relevant_memory = [d.page_content for d in relevant_docs] relevant_memory_tokens = sum( [self.token_counter(doc) for doc in relevant_memory] ) while used_tokens + relevant_memory_tokens > 2500: relevant_memory = relevant_memory[:-1] relevant_memory_tokens = sum( [self.token_counter(doc) for doc in relevant_memory] ) content_format = ( f"This reminds you of these events " f"from your past:\n{relevant_memory}\n\n" ) memory_message = SystemMessage(content=content_format) used_tokens += self.token_counter(memory_message.content) historical_messages: List[BaseMessage] = [] for message in previous_messages[-10:][::-1]: message_tokens = self.token_counter(message.content) if used_tokens + message_tokens > self.send_token_limit - 1000: break historical_messages = [message] + historical_messages used_tokens += message_tokens input_message = HumanMessage(content=kwargs["user_input"]) messages: List[BaseMessage] = [base_prompt, time_prompt, memory_message] messages += historical_messages messages.append(input_message) return messages
https://api.python.langchain.com/en/latest/_modules/langchain/experimental/autonomous_agents/autogpt/prompt.html
79f8174f5ecf-0
Source code for langchain.experimental.autonomous_agents.autogpt.output_parser import json import re from abc import abstractmethod from typing import Dict, NamedTuple from langchain.schema import BaseOutputParser [docs]class AutoGPTAction(NamedTuple): name: str args: Dict [docs]class BaseAutoGPTOutputParser(BaseOutputParser): [docs] @abstractmethod def parse(self, text: str) -> AutoGPTAction: """Return AutoGPTAction""" [docs]def preprocess_json_input(input_str: str) -> str: """Preprocesses a string to be parsed as json. Replace single backslashes with double backslashes, while leaving already escaped ones intact. Args: input_str: String to be preprocessed Returns: Preprocessed string """ corrected_str = re.sub( r'(?<!\\)\\(?!["\\/bfnrt]|u[0-9a-fA-F]{4})', r"\\\\", input_str ) return corrected_str [docs]class AutoGPTOutputParser(BaseAutoGPTOutputParser): [docs] def parse(self, text: str) -> AutoGPTAction: try: parsed = json.loads(text, strict=False) except json.JSONDecodeError: preprocessed_text = preprocess_json_input(text) try: parsed = json.loads(preprocessed_text, strict=False) except Exception: return AutoGPTAction( name="ERROR", args={"error": f"Could not parse invalid json: {text}"}, ) try: return AutoGPTAction( name=parsed["command"]["name"], args=parsed["command"]["args"],
https://api.python.langchain.com/en/latest/_modules/langchain/experimental/autonomous_agents/autogpt/output_parser.html
79f8174f5ecf-1
name=parsed["command"]["name"], args=parsed["command"]["args"], ) except (KeyError, TypeError): # If the command is null or incomplete, return an erroneous tool return AutoGPTAction( name="ERROR", args={"error": f"Incomplete command args: {parsed}"} )
https://api.python.langchain.com/en/latest/_modules/langchain/experimental/autonomous_agents/autogpt/output_parser.html
7a4113b8cdd8-0
Source code for langchain.experimental.autonomous_agents.autogpt.prompt_generator import json from typing import List from langchain.tools.base import BaseTool FINISH_NAME = "finish" class PromptGenerator: """A class for generating custom prompt strings. Does this based on constraints, commands, resources, and performance evaluations. """ def __init__(self) -> None: """Initialize the PromptGenerator object. Starts with empty lists of constraints, commands, resources, and performance evaluations. """ self.constraints: List[str] = [] self.commands: List[BaseTool] = [] self.resources: List[str] = [] self.performance_evaluation: List[str] = [] self.response_format = { "thoughts": { "text": "thought", "reasoning": "reasoning", "plan": "- short bulleted\n- list that conveys\n- long-term plan", "criticism": "constructive self-criticism", "speak": "thoughts summary to say to user", }, "command": {"name": "command name", "args": {"arg name": "value"}}, } def add_constraint(self, constraint: str) -> None: """ Add a constraint to the constraints list. Args: constraint (str): The constraint to be added. """ self.constraints.append(constraint) def add_tool(self, tool: BaseTool) -> None: self.commands.append(tool) def _generate_command_string(self, tool: BaseTool) -> str: output = f"{tool.name}: {tool.description}" output += f", args json schema: {json.dumps(tool.args)}" return output
https://api.python.langchain.com/en/latest/_modules/langchain/experimental/autonomous_agents/autogpt/prompt_generator.html
7a4113b8cdd8-1
return output def add_resource(self, resource: str) -> None: """ Add a resource to the resources list. Args: resource (str): The resource to be added. """ self.resources.append(resource) def add_performance_evaluation(self, evaluation: str) -> None: """ Add a performance evaluation item to the performance_evaluation list. Args: evaluation (str): The evaluation item to be added. """ self.performance_evaluation.append(evaluation) def _generate_numbered_list(self, items: list, item_type: str = "list") -> str: """ Generate a numbered list from given items based on the item_type. Args: items (list): A list of items to be numbered. item_type (str, optional): The type of items in the list. Defaults to 'list'. Returns: str: The formatted numbered list. """ if item_type == "command": command_strings = [ f"{i + 1}. {self._generate_command_string(item)}" for i, item in enumerate(items) ] finish_description = ( "use this to signal that you have finished all your objectives" ) finish_args = ( '"response": "final response to let ' 'people know you have finished your objectives"' ) finish_string = ( f"{len(items) + 1}. {FINISH_NAME}: " f"{finish_description}, args: {finish_args}" ) return "\n".join(command_strings + [finish_string]) else: return "\n".join(f"{i+1}. {item}" for i, item in enumerate(items))
https://api.python.langchain.com/en/latest/_modules/langchain/experimental/autonomous_agents/autogpt/prompt_generator.html
7a4113b8cdd8-2
def generate_prompt_string(self) -> str: """Generate a prompt string. Returns: str: The generated prompt string. """ formatted_response_format = json.dumps(self.response_format, indent=4) prompt_string = ( f"Constraints:\n{self._generate_numbered_list(self.constraints)}\n\n" f"Commands:\n" f"{self._generate_numbered_list(self.commands, item_type='command')}\n\n" f"Resources:\n{self._generate_numbered_list(self.resources)}\n\n" f"Performance Evaluation:\n" f"{self._generate_numbered_list(self.performance_evaluation)}\n\n" f"You should only respond in JSON format as described below " f"\nResponse Format: \n{formatted_response_format} " f"\nEnsure the response can be parsed by Python json.loads" ) return prompt_string [docs]def get_prompt(tools: List[BaseTool]) -> str: """This function generates a prompt string. It includes various constraints, commands, resources, and performance evaluations. Returns: str: The generated prompt string. """ # Initialize the PromptGenerator object prompt_generator = PromptGenerator() # Add constraints to the PromptGenerator object prompt_generator.add_constraint( "~4000 word limit for short term memory. " "Your short term memory is short, " "so immediately save important information to files." ) prompt_generator.add_constraint( "If you are unsure how you previously did something " "or want to recall past events, " "thinking about similar events will help you remember." ) prompt_generator.add_constraint("No user assistance")
https://api.python.langchain.com/en/latest/_modules/langchain/experimental/autonomous_agents/autogpt/prompt_generator.html
7a4113b8cdd8-3
) prompt_generator.add_constraint("No user assistance") prompt_generator.add_constraint( 'Exclusively use the commands listed in double quotes e.g. "command name"' ) # Add commands to the PromptGenerator object for tool in tools: prompt_generator.add_tool(tool) # Add resources to the PromptGenerator object prompt_generator.add_resource( "Internet access for searches and information gathering." ) prompt_generator.add_resource("Long Term memory management.") prompt_generator.add_resource( "GPT-3.5 powered Agents for delegation of simple tasks." ) prompt_generator.add_resource("File output.") # Add performance evaluations to the PromptGenerator object prompt_generator.add_performance_evaluation( "Continuously review and analyze your actions " "to ensure you are performing to the best of your abilities." ) prompt_generator.add_performance_evaluation( "Constructively self-criticize your big-picture behavior constantly." ) prompt_generator.add_performance_evaluation( "Reflect on past decisions and strategies to refine your approach." ) prompt_generator.add_performance_evaluation( "Every command has a cost, so be smart and efficient. " "Aim to complete tasks in the least number of steps." ) # Generate the prompt string prompt_string = prompt_generator.generate_prompt_string() return prompt_string
https://api.python.langchain.com/en/latest/_modules/langchain/experimental/autonomous_agents/autogpt/prompt_generator.html
1fe8740bb56b-0
Source code for langchain.experimental.autonomous_agents.autogpt.memory from typing import Any, Dict, List from pydantic import Field from langchain.memory.chat_memory import BaseChatMemory, get_prompt_input_key from langchain.vectorstores.base import VectorStoreRetriever [docs]class AutoGPTMemory(BaseChatMemory): retriever: VectorStoreRetriever = Field(exclude=True) """VectorStoreRetriever object to connect to.""" @property def memory_variables(self) -> List[str]: return ["chat_history", "relevant_context"] def _get_prompt_input_key(self, inputs: Dict[str, Any]) -> str: """Get the input key for the prompt.""" if self.input_key is None: return get_prompt_input_key(inputs, self.memory_variables) return self.input_key [docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]: input_key = self._get_prompt_input_key(inputs) query = inputs[input_key] docs = self.retriever.get_relevant_documents(query) return { "chat_history": self.chat_memory.messages[-10:], "relevant_context": docs, }
https://api.python.langchain.com/en/latest/_modules/langchain/experimental/autonomous_agents/autogpt/memory.html
41b46c68a4a4-0
Source code for langchain.experimental.autonomous_agents.baby_agi.task_creation from langchain import LLMChain, PromptTemplate from langchain.base_language import BaseLanguageModel [docs]class TaskCreationChain(LLMChain): """Chain to generates tasks.""" [docs] @classmethod def from_llm(cls, llm: BaseLanguageModel, verbose: bool = True) -> LLMChain: """Get the response parser.""" task_creation_template = ( "You are an task creation AI that uses the result of an execution agent" " to create new tasks with the following objective: {objective}," " The last completed task has the result: {result}." " This result was based on this task description: {task_description}." " These are incomplete tasks: {incomplete_tasks}." " Based on the result, create new tasks to be completed" " by the AI system that do not overlap with incomplete tasks." " Return the tasks as an array." ) prompt = PromptTemplate( template=task_creation_template, input_variables=[ "result", "task_description", "incomplete_tasks", "objective", ], ) return cls(prompt=prompt, llm=llm, verbose=verbose)
https://api.python.langchain.com/en/latest/_modules/langchain/experimental/autonomous_agents/baby_agi/task_creation.html
61380d6c4155-0
Source code for langchain.experimental.autonomous_agents.baby_agi.task_prioritization from langchain import LLMChain, PromptTemplate from langchain.base_language import BaseLanguageModel [docs]class TaskPrioritizationChain(LLMChain): """Chain to prioritize tasks.""" [docs] @classmethod def from_llm(cls, llm: BaseLanguageModel, verbose: bool = True) -> LLMChain: """Get the response parser.""" task_prioritization_template = ( "You are a task prioritization AI tasked with cleaning the formatting of " "and reprioritizing the following tasks: {task_names}." " Consider the ultimate objective of your team: {objective}." " Do not remove any tasks. Return the result as a numbered list, like:" " #. First task" " #. Second task" " Start the task list with number {next_task_id}." ) prompt = PromptTemplate( template=task_prioritization_template, input_variables=["task_names", "next_task_id", "objective"], ) return cls(prompt=prompt, llm=llm, verbose=verbose)
https://api.python.langchain.com/en/latest/_modules/langchain/experimental/autonomous_agents/baby_agi/task_prioritization.html
55741eda3eff-0
Source code for langchain.experimental.autonomous_agents.baby_agi.baby_agi """BabyAGI agent.""" from collections import deque from typing import Any, Dict, List, Optional from pydantic import BaseModel, Field from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.experimental.autonomous_agents.baby_agi.task_creation import ( TaskCreationChain, ) from langchain.experimental.autonomous_agents.baby_agi.task_execution import ( TaskExecutionChain, ) from langchain.experimental.autonomous_agents.baby_agi.task_prioritization import ( TaskPrioritizationChain, ) from langchain.vectorstores.base import VectorStore [docs]class BabyAGI(Chain, BaseModel): """Controller model for the BabyAGI agent.""" task_list: deque = Field(default_factory=deque) task_creation_chain: Chain = Field(...) task_prioritization_chain: Chain = Field(...) execution_chain: Chain = Field(...) task_id_counter: int = Field(1) vectorstore: VectorStore = Field(init=False) max_iterations: Optional[int] = None [docs] class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True [docs] def add_task(self, task: Dict) -> None: self.task_list.append(task) [docs] def print_task_list(self) -> None: print("\033[95m\033[1m" + "\n*****TASK LIST*****\n" + "\033[0m\033[0m") for t in self.task_list: print(str(t["task_id"]) + ": " + t["task_name"])
https://api.python.langchain.com/en/latest/_modules/langchain/experimental/autonomous_agents/baby_agi/baby_agi.html
55741eda3eff-1
print(str(t["task_id"]) + ": " + t["task_name"]) [docs] def print_next_task(self, task: Dict) -> None: print("\033[92m\033[1m" + "\n*****NEXT TASK*****\n" + "\033[0m\033[0m") print(str(task["task_id"]) + ": " + task["task_name"]) [docs] def print_task_result(self, result: str) -> None: print("\033[93m\033[1m" + "\n*****TASK RESULT*****\n" + "\033[0m\033[0m") print(result) @property def input_keys(self) -> List[str]: return ["objective"] @property def output_keys(self) -> List[str]: return [] [docs] def get_next_task( self, result: str, task_description: str, objective: str ) -> List[Dict]: """Get the next task.""" task_names = [t["task_name"] for t in self.task_list] incomplete_tasks = ", ".join(task_names) response = self.task_creation_chain.run( result=result, task_description=task_description, incomplete_tasks=incomplete_tasks, objective=objective, ) new_tasks = response.split("\n") return [ {"task_name": task_name} for task_name in new_tasks if task_name.strip() ] [docs] def prioritize_tasks(self, this_task_id: int, objective: str) -> List[Dict]: """Prioritize tasks.""" task_names = [t["task_name"] for t in list(self.task_list)]
https://api.python.langchain.com/en/latest/_modules/langchain/experimental/autonomous_agents/baby_agi/baby_agi.html
55741eda3eff-2
task_names = [t["task_name"] for t in list(self.task_list)] next_task_id = int(this_task_id) + 1 response = self.task_prioritization_chain.run( task_names=", ".join(task_names), next_task_id=str(next_task_id), objective=objective, ) new_tasks = response.split("\n") prioritized_task_list = [] for task_string in new_tasks: if not task_string.strip(): continue task_parts = task_string.strip().split(".", 1) if len(task_parts) == 2: task_id = task_parts[0].strip() task_name = task_parts[1].strip() prioritized_task_list.append( {"task_id": task_id, "task_name": task_name} ) return prioritized_task_list def _get_top_tasks(self, query: str, k: int) -> List[str]: """Get the top k tasks based on the query.""" results = self.vectorstore.similarity_search(query, k=k) if not results: return [] return [str(item.metadata["task"]) for item in results] [docs] def execute_task(self, objective: str, task: str, k: int = 5) -> str: """Execute a task.""" context = self._get_top_tasks(query=objective, k=k) return self.execution_chain.run( objective=objective, context="\n".join(context), task=task ) def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, Any]: """Run the agent."""
https://api.python.langchain.com/en/latest/_modules/langchain/experimental/autonomous_agents/baby_agi/baby_agi.html
55741eda3eff-3
) -> Dict[str, Any]: """Run the agent.""" objective = inputs["objective"] first_task = inputs.get("first_task", "Make a todo list") self.add_task({"task_id": 1, "task_name": first_task}) num_iters = 0 while True: if self.task_list: self.print_task_list() # Step 1: Pull the first task task = self.task_list.popleft() self.print_next_task(task) # Step 2: Execute the task result = self.execute_task(objective, task["task_name"]) this_task_id = int(task["task_id"]) self.print_task_result(result) # Step 3: Store the result in Pinecone result_id = f"result_{task['task_id']}" self.vectorstore.add_texts( texts=[result], metadatas=[{"task": task["task_name"]}], ids=[result_id], ) # Step 4: Create new tasks and reprioritize task list new_tasks = self.get_next_task(result, task["task_name"], objective) for new_task in new_tasks: self.task_id_counter += 1 new_task.update({"task_id": self.task_id_counter}) self.add_task(new_task) self.task_list = deque(self.prioritize_tasks(this_task_id, objective)) num_iters += 1 if self.max_iterations is not None and num_iters == self.max_iterations: print( "\033[91m\033[1m" + "\n*****TASK ENDING*****\n" + "\033[0m\033[0m" ) break return {} [docs] @classmethod
https://api.python.langchain.com/en/latest/_modules/langchain/experimental/autonomous_agents/baby_agi/baby_agi.html
55741eda3eff-4
) break return {} [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, vectorstore: VectorStore, verbose: bool = False, task_execution_chain: Optional[Chain] = None, **kwargs: Dict[str, Any], ) -> "BabyAGI": """Initialize the BabyAGI Controller.""" task_creation_chain = TaskCreationChain.from_llm(llm, verbose=verbose) task_prioritization_chain = TaskPrioritizationChain.from_llm( llm, verbose=verbose ) if task_execution_chain is None: execution_chain: Chain = TaskExecutionChain.from_llm(llm, verbose=verbose) else: execution_chain = task_execution_chain return cls( task_creation_chain=task_creation_chain, task_prioritization_chain=task_prioritization_chain, execution_chain=execution_chain, vectorstore=vectorstore, **kwargs, )
https://api.python.langchain.com/en/latest/_modules/langchain/experimental/autonomous_agents/baby_agi/baby_agi.html
9f359bfc4520-0
Source code for langchain.experimental.autonomous_agents.baby_agi.task_execution from langchain import LLMChain, PromptTemplate from langchain.base_language import BaseLanguageModel [docs]class TaskExecutionChain(LLMChain): """Chain to execute tasks.""" [docs] @classmethod def from_llm(cls, llm: BaseLanguageModel, verbose: bool = True) -> LLMChain: """Get the response parser.""" execution_template = ( "You are an AI who performs one task based on the following objective: " "{objective}." "Take into account these previously completed tasks: {context}." " Your task: {task}. Response:" ) prompt = PromptTemplate( template=execution_template, input_variables=["objective", "context", "task"], ) return cls(prompt=prompt, llm=llm, verbose=verbose)
https://api.python.langchain.com/en/latest/_modules/langchain/experimental/autonomous_agents/baby_agi/task_execution.html
d8f9378f5859-0
Source code for langchain.experimental.generative_agents.generative_agent import re from datetime import datetime from typing import Any, Dict, List, Optional, Tuple from pydantic import BaseModel, Field from langchain import LLMChain from langchain.base_language import BaseLanguageModel from langchain.experimental.generative_agents.memory import GenerativeAgentMemory from langchain.prompts import PromptTemplate [docs]class GenerativeAgent(BaseModel): """A character with memory and innate characteristics.""" name: str """The character's name.""" age: Optional[int] = None """The optional age of the character.""" traits: str = "N/A" """Permanent traits to ascribe to the character.""" status: str """The traits of the character you wish not to change.""" memory: GenerativeAgentMemory """The memory object that combines relevance, recency, and 'importance'.""" llm: BaseLanguageModel """The underlying language model.""" verbose: bool = False summary: str = "" #: :meta private: """Stateful self-summary generated via reflection on the character's memory.""" summary_refresh_seconds: int = 3600 #: :meta private: """How frequently to re-generate the summary.""" last_refreshed: datetime = Field(default_factory=datetime.now) # : :meta private: """The last time the character's summary was regenerated.""" daily_summaries: List[str] = Field(default_factory=list) # : :meta private: """Summary of the events in the plan that the agent took.""" [docs] class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True # LLM-related methods @staticmethod
https://api.python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/generative_agent.html
d8f9378f5859-1
arbitrary_types_allowed = True # LLM-related methods @staticmethod def _parse_list(text: str) -> List[str]: """Parse a newline-separated string into a list of strings.""" lines = re.split(r"\n", text.strip()) return [re.sub(r"^\s*\d+\.\s*", "", line).strip() for line in lines] [docs] def chain(self, prompt: PromptTemplate) -> LLMChain: return LLMChain( llm=self.llm, prompt=prompt, verbose=self.verbose, memory=self.memory ) def _get_entity_from_observation(self, observation: str) -> str: prompt = PromptTemplate.from_template( "What is the observed entity in the following observation? {observation}" + "\nEntity=" ) return self.chain(prompt).run(observation=observation).strip() def _get_entity_action(self, observation: str, entity_name: str) -> str: prompt = PromptTemplate.from_template( "What is the {entity} doing in the following observation? {observation}" + "\nThe {entity} is" ) return ( self.chain(prompt).run(entity=entity_name, observation=observation).strip() ) [docs] def summarize_related_memories(self, observation: str) -> str: """Summarize memories that are most relevant to an observation.""" prompt = PromptTemplate.from_template( """ {q1}? Context from memory: {relevant_memories} Relevant context: """ ) entity_name = self._get_entity_from_observation(observation) entity_action = self._get_entity_action(observation, entity_name)
https://api.python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/generative_agent.html
d8f9378f5859-2
entity_action = self._get_entity_action(observation, entity_name) q1 = f"What is the relationship between {self.name} and {entity_name}" q2 = f"{entity_name} is {entity_action}" return self.chain(prompt=prompt).run(q1=q1, queries=[q1, q2]).strip() def _generate_reaction( self, observation: str, suffix: str, now: Optional[datetime] = None ) -> str: """React to a given observation or dialogue act.""" prompt = PromptTemplate.from_template( "{agent_summary_description}" + "\nIt is {current_time}." + "\n{agent_name}'s status: {agent_status}" + "\nSummary of relevant context from {agent_name}'s memory:" + "\n{relevant_memories}" + "\nMost recent observations: {most_recent_memories}" + "\nObservation: {observation}" + "\n\n" + suffix ) agent_summary_description = self.get_summary(now=now) relevant_memories_str = self.summarize_related_memories(observation) current_time_str = ( datetime.now().strftime("%B %d, %Y, %I:%M %p") if now is None else now.strftime("%B %d, %Y, %I:%M %p") ) kwargs: Dict[str, Any] = dict( agent_summary_description=agent_summary_description, current_time=current_time_str, relevant_memories=relevant_memories_str, agent_name=self.name, observation=observation, agent_status=self.status, ) consumed_tokens = self.llm.get_num_tokens(
https://api.python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/generative_agent.html
d8f9378f5859-3
) consumed_tokens = self.llm.get_num_tokens( prompt.format(most_recent_memories="", **kwargs) ) kwargs[self.memory.most_recent_memories_token_key] = consumed_tokens return self.chain(prompt=prompt).run(**kwargs).strip() def _clean_response(self, text: str) -> str: return re.sub(f"^{self.name} ", "", text.strip()).strip() [docs] def generate_reaction( self, observation: str, now: Optional[datetime] = None ) -> Tuple[bool, str]: """React to a given observation.""" call_to_action_template = ( "Should {agent_name} react to the observation, and if so," + " what would be an appropriate reaction? Respond in one line." + ' If the action is to engage in dialogue, write:\nSAY: "what to say"' + "\notherwise, write:\nREACT: {agent_name}'s reaction (if anything)." + "\nEither do nothing, react, or say something but not both.\n\n" ) full_result = self._generate_reaction( observation, call_to_action_template, now=now ) result = full_result.strip().split("\n")[0] # AAA self.memory.save_context( {}, { self.memory.add_memory_key: f"{self.name} observed " f"{observation} and reacted by {result}", self.memory.now_key: now, }, ) if "REACT:" in result: reaction = self._clean_response(result.split("REACT:")[-1]) return False, f"{self.name} {reaction}" if "SAY:" in result:
https://api.python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/generative_agent.html
d8f9378f5859-4
if "SAY:" in result: said_value = self._clean_response(result.split("SAY:")[-1]) return True, f"{self.name} said {said_value}" else: return False, result [docs] def generate_dialogue_response( self, observation: str, now: Optional[datetime] = None ) -> Tuple[bool, str]: """React to a given observation.""" call_to_action_template = ( "What would {agent_name} say? To end the conversation, write:" ' GOODBYE: "what to say". Otherwise to continue the conversation,' ' write: SAY: "what to say next"\n\n' ) full_result = self._generate_reaction( observation, call_to_action_template, now=now ) result = full_result.strip().split("\n")[0] if "GOODBYE:" in result: farewell = self._clean_response(result.split("GOODBYE:")[-1]) self.memory.save_context( {}, { self.memory.add_memory_key: f"{self.name} observed " f"{observation} and said {farewell}", self.memory.now_key: now, }, ) return False, f"{self.name} said {farewell}" if "SAY:" in result: response_text = self._clean_response(result.split("SAY:")[-1]) self.memory.save_context( {}, { self.memory.add_memory_key: f"{self.name} observed " f"{observation} and said {response_text}", self.memory.now_key: now, }, ) return True, f"{self.name} said {response_text}"
https://api.python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/generative_agent.html
d8f9378f5859-5
) return True, f"{self.name} said {response_text}" else: return False, result ###################################################### # Agent stateful' summary methods. # # Each dialog or response prompt includes a header # # summarizing the agent's self-description. This is # # updated periodically through probing its memories # ###################################################### def _compute_agent_summary(self) -> str: """""" prompt = PromptTemplate.from_template( "How would you summarize {name}'s core characteristics given the" + " following statements:\n" + "{relevant_memories}" + "Do not embellish." + "\n\nSummary: " ) # The agent seeks to think about their core characteristics. return ( self.chain(prompt) .run(name=self.name, queries=[f"{self.name}'s core characteristics"]) .strip() ) [docs] def get_summary( self, force_refresh: bool = False, now: Optional[datetime] = None ) -> str: """Return a descriptive summary of the agent.""" current_time = datetime.now() if now is None else now since_refresh = (current_time - self.last_refreshed).seconds if ( not self.summary or since_refresh >= self.summary_refresh_seconds or force_refresh ): self.summary = self._compute_agent_summary() self.last_refreshed = current_time age = self.age if self.age is not None else "N/A" return ( f"Name: {self.name} (age: {age})" + f"\nInnate traits: {self.traits}"
https://api.python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/generative_agent.html
d8f9378f5859-6
+ f"\nInnate traits: {self.traits}" + f"\n{self.summary}" ) [docs] def get_full_header( self, force_refresh: bool = False, now: Optional[datetime] = None ) -> str: """Return a full header of the agent's status, summary, and current time.""" now = datetime.now() if now is None else now summary = self.get_summary(force_refresh=force_refresh, now=now) current_time_str = now.strftime("%B %d, %Y, %I:%M %p") return ( f"{summary}\nIt is {current_time_str}.\n{self.name}'s status: {self.status}" )
https://api.python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/generative_agent.html
285ba5cee8c3-0
Source code for langchain.experimental.generative_agents.memory import logging import re from datetime import datetime from typing import Any, Dict, List, Optional from langchain import LLMChain from langchain.base_language import BaseLanguageModel from langchain.prompts import PromptTemplate from langchain.retrievers import TimeWeightedVectorStoreRetriever from langchain.schema import BaseMemory, Document from langchain.utils import mock_now logger = logging.getLogger(__name__) [docs]class GenerativeAgentMemory(BaseMemory): llm: BaseLanguageModel """The core language model.""" memory_retriever: TimeWeightedVectorStoreRetriever """The retriever to fetch related memories.""" verbose: bool = False reflection_threshold: Optional[float] = None """When aggregate_importance exceeds reflection_threshold, stop to reflect.""" current_plan: List[str] = [] """The current plan of the agent.""" # A weight of 0.15 makes this less important than it # would be otherwise, relative to salience and time importance_weight: float = 0.15 """How much weight to assign the memory importance.""" aggregate_importance: float = 0.0 # : :meta private: """Track the sum of the 'importance' of recent memories. Triggers reflection when it reaches reflection_threshold.""" max_tokens_limit: int = 1200 # : :meta private: # input keys queries_key: str = "queries" most_recent_memories_token_key: str = "recent_memories_token" add_memory_key: str = "add_memory" # output keys relevant_memories_key: str = "relevant_memories"
https://api.python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/memory.html
285ba5cee8c3-1
# output keys relevant_memories_key: str = "relevant_memories" relevant_memories_simple_key: str = "relevant_memories_simple" most_recent_memories_key: str = "most_recent_memories" now_key: str = "now" reflecting: bool = False [docs] def chain(self, prompt: PromptTemplate) -> LLMChain: return LLMChain(llm=self.llm, prompt=prompt, verbose=self.verbose) @staticmethod def _parse_list(text: str) -> List[str]: """Parse a newline-separated string into a list of strings.""" lines = re.split(r"\n", text.strip()) lines = [line for line in lines if line.strip()] # remove empty lines return [re.sub(r"^\s*\d+\.\s*", "", line).strip() for line in lines] def _get_topics_of_reflection(self, last_k: int = 50) -> List[str]: """Return the 3 most salient high-level questions about recent observations.""" prompt = PromptTemplate.from_template( "{observations}\n\n" "Given only the information above, what are the 3 most salient " "high-level questions we can answer about the subjects in the statements?\n" "Provide each question on a new line." ) observations = self.memory_retriever.memory_stream[-last_k:] observation_str = "\n".join( [self._format_memory_detail(o) for o in observations] ) result = self.chain(prompt).run(observations=observation_str) return self._parse_list(result) def _get_insights_on_topic( self, topic: str, now: Optional[datetime] = None
https://api.python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/memory.html
285ba5cee8c3-2
self, topic: str, now: Optional[datetime] = None ) -> List[str]: """Generate 'insights' on a topic of reflection, based on pertinent memories.""" prompt = PromptTemplate.from_template( "Statements relevant to: '{topic}'\n" "---\n" "{related_statements}\n" "---\n" "What 5 high-level novel insights can you infer from the above statements " "that are relevant for answering the following question?\n" "Do not include any insights that are not relevant to the question.\n" "Do not repeat any insights that have already been made.\n\n" "Question: {topic}\n\n" "(example format: insight (because of 1, 5, 3))\n" ) related_memories = self.fetch_memories(topic, now=now) related_statements = "\n".join( [ self._format_memory_detail(memory, prefix=f"{i+1}. ") for i, memory in enumerate(related_memories) ] ) result = self.chain(prompt).run( topic=topic, related_statements=related_statements ) # TODO: Parse the connections between memories and insights return self._parse_list(result) [docs] def pause_to_reflect(self, now: Optional[datetime] = None) -> List[str]: """Reflect on recent observations and generate 'insights'.""" if self.verbose: logger.info("Character is reflecting") new_insights = [] topics = self._get_topics_of_reflection() for topic in topics: insights = self._get_insights_on_topic(topic, now=now)
https://api.python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/memory.html
285ba5cee8c3-3
insights = self._get_insights_on_topic(topic, now=now) for insight in insights: self.add_memory(insight, now=now) new_insights.extend(insights) return new_insights def _score_memory_importance(self, memory_content: str) -> float: """Score the absolute importance of the given memory.""" prompt = PromptTemplate.from_template( "On the scale of 1 to 10, where 1 is purely mundane" + " (e.g., brushing teeth, making bed) and 10 is" + " extremely poignant (e.g., a break up, college" + " acceptance), rate the likely poignancy of the" + " following piece of memory. Respond with a single integer." + "\nMemory: {memory_content}" + "\nRating: " ) score = self.chain(prompt).run(memory_content=memory_content).strip() if self.verbose: logger.info(f"Importance score: {score}") match = re.search(r"^\D*(\d+)", score) if match: return (float(match.group(1)) / 10) * self.importance_weight else: return 0.0 def _score_memories_importance(self, memory_content: str) -> List[float]: """Score the absolute importance of the given memory.""" prompt = PromptTemplate.from_template( "On the scale of 1 to 10, where 1 is purely mundane" + " (e.g., brushing teeth, making bed) and 10 is" + " extremely poignant (e.g., a break up, college" + " acceptance), rate the likely poignancy of the"
https://api.python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/memory.html
285ba5cee8c3-4
+ " acceptance), rate the likely poignancy of the" + " following piece of memory. Always answer with only a list of numbers." + " If just given one memory still respond in a list." + " Memories are separated by semi colans (;)" + "\Memories: {memory_content}" + "\nRating: " ) scores = self.chain(prompt).run(memory_content=memory_content).strip() if self.verbose: logger.info(f"Importance scores: {scores}") # Split into list of strings and convert to floats scores_list = [float(x) for x in scores.split(";")] return scores_list [docs] def add_memories( self, memory_content: str, now: Optional[datetime] = None ) -> List[str]: """Add an observations or memories to the agent's memory.""" importance_scores = self._score_memories_importance(memory_content) self.aggregate_importance += max(importance_scores) memory_list = memory_content.split(";") documents = [] for i in range(len(memory_list)): documents.append( Document( page_content=memory_list[i], metadata={"importance": importance_scores[i]}, ) ) result = self.memory_retriever.add_documents(documents, current_time=now) # After an agent has processed a certain amount of memories (as measured by # aggregate importance), it is time to reflect on recent events to add # more synthesized memories to the agent's memory stream. if ( self.reflection_threshold is not None and self.aggregate_importance > self.reflection_threshold and not self.reflecting ): self.reflecting = True
https://api.python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/memory.html
285ba5cee8c3-5
and not self.reflecting ): self.reflecting = True self.pause_to_reflect(now=now) # Hack to clear the importance from reflection self.aggregate_importance = 0.0 self.reflecting = False return result [docs] def add_memory( self, memory_content: str, now: Optional[datetime] = None ) -> List[str]: """Add an observation or memory to the agent's memory.""" importance_score = self._score_memory_importance(memory_content) self.aggregate_importance += importance_score document = Document( page_content=memory_content, metadata={"importance": importance_score} ) result = self.memory_retriever.add_documents([document], current_time=now) # After an agent has processed a certain amount of memories (as measured by # aggregate importance), it is time to reflect on recent events to add # more synthesized memories to the agent's memory stream. if ( self.reflection_threshold is not None and self.aggregate_importance > self.reflection_threshold and not self.reflecting ): self.reflecting = True self.pause_to_reflect(now=now) # Hack to clear the importance from reflection self.aggregate_importance = 0.0 self.reflecting = False return result [docs] def fetch_memories( self, observation: str, now: Optional[datetime] = None ) -> List[Document]: """Fetch related memories.""" if now is not None: with mock_now(now): return self.memory_retriever.get_relevant_documents(observation) else: return self.memory_retriever.get_relevant_documents(observation)
https://api.python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/memory.html
285ba5cee8c3-6
else: return self.memory_retriever.get_relevant_documents(observation) [docs] def format_memories_detail(self, relevant_memories: List[Document]) -> str: content = [] for mem in relevant_memories: content.append(self._format_memory_detail(mem, prefix="- ")) return "\n".join([f"{mem}" for mem in content]) def _format_memory_detail(self, memory: Document, prefix: str = "") -> str: created_time = memory.metadata["created_at"].strftime("%B %d, %Y, %I:%M %p") return f"{prefix}[{created_time}] {memory.page_content.strip()}" [docs] def format_memories_simple(self, relevant_memories: List[Document]) -> str: return "; ".join([f"{mem.page_content}" for mem in relevant_memories]) def _get_memories_until_limit(self, consumed_tokens: int) -> str: """Reduce the number of tokens in the documents.""" result = [] for doc in self.memory_retriever.memory_stream[::-1]: if consumed_tokens >= self.max_tokens_limit: break consumed_tokens += self.llm.get_num_tokens(doc.page_content) if consumed_tokens < self.max_tokens_limit: result.append(doc) return self.format_memories_simple(result) @property def memory_variables(self) -> List[str]: """Input keys this memory class will load dynamically.""" return [] [docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]: """Return key-value pairs given the text input to the chain.""" queries = inputs.get(self.queries_key) now = inputs.get(self.now_key)
https://api.python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/memory.html
285ba5cee8c3-7
now = inputs.get(self.now_key) if queries is not None: relevant_memories = [ mem for query in queries for mem in self.fetch_memories(query, now=now) ] return { self.relevant_memories_key: self.format_memories_detail( relevant_memories ), self.relevant_memories_simple_key: self.format_memories_simple( relevant_memories ), } most_recent_memories_token = inputs.get(self.most_recent_memories_token_key) if most_recent_memories_token is not None: return { self.most_recent_memories_key: self._get_memories_until_limit( most_recent_memories_token ) } return {} [docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, Any]) -> None: """Save the context of this model run to memory.""" # TODO: fix the save memory key mem = outputs.get(self.add_memory_key) now = outputs.get(self.now_key) if mem: self.add_memory(mem, now=now) [docs] def clear(self) -> None: """Clear memory contents.""" # TODO
https://api.python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/memory.html
797fc32fc2c9-0
Source code for langchain.experimental.llms.rellm_decoder """Experimental implementation of RELLM wrapped LLM.""" from __future__ import annotations from typing import TYPE_CHECKING, Any, List, Optional, cast from pydantic import Field, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.huggingface_pipeline import HuggingFacePipeline from langchain.llms.utils import enforce_stop_tokens if TYPE_CHECKING: import rellm from regex import Pattern as RegexPattern else: try: from regex import Pattern as RegexPattern except ImportError: pass [docs]def import_rellm() -> rellm: """Lazily import rellm.""" try: import rellm except ImportError: raise ValueError( "Could not import rellm python package. " "Please install it with `pip install rellm`." ) return rellm [docs]class RELLM(HuggingFacePipeline): regex: RegexPattern = Field(..., description="The structured format to complete.") max_new_tokens: int = Field( default=200, description="Maximum number of new tokens to generate." ) [docs] @root_validator def check_rellm_installation(cls, values: dict) -> dict: import_rellm() return values def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: rellm = import_rellm() from transformers import Text2TextGenerationPipeline
https://api.python.langchain.com/en/latest/_modules/langchain/experimental/llms/rellm_decoder.html
797fc32fc2c9-1
from transformers import Text2TextGenerationPipeline pipeline = cast(Text2TextGenerationPipeline, self.pipeline) text = rellm.complete_re( prompt, self.regex, tokenizer=pipeline.tokenizer, model=pipeline.model, max_new_tokens=self.max_new_tokens, ) if stop is not None: # This is a bit hacky, but I can't figure out a better way to enforce # stop tokens when making calls to huggingface_hub. text = enforce_stop_tokens(text, stop) return text
https://api.python.langchain.com/en/latest/_modules/langchain/experimental/llms/rellm_decoder.html
11877270ce07-0
Source code for langchain.experimental.llms.jsonformer_decoder """Experimental implementation of jsonformer wrapped LLM.""" from __future__ import annotations import json from typing import TYPE_CHECKING, Any, List, Optional, cast from pydantic import Field, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.huggingface_pipeline import HuggingFacePipeline if TYPE_CHECKING: import jsonformer [docs]def import_jsonformer() -> jsonformer: """Lazily import jsonformer.""" try: import jsonformer except ImportError: raise ValueError( "Could not import jsonformer python package. " "Please install it with `pip install jsonformer`." ) return jsonformer [docs]class JsonFormer(HuggingFacePipeline): json_schema: dict = Field(..., description="The JSON Schema to complete.") max_new_tokens: int = Field( default=200, description="Maximum number of new tokens to generate." ) debug: bool = Field(default=False, description="Debug mode.") [docs] @root_validator def check_jsonformer_installation(cls, values: dict) -> dict: import_jsonformer() return values def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: jsonformer = import_jsonformer() from transformers import Text2TextGenerationPipeline pipeline = cast(Text2TextGenerationPipeline, self.pipeline) model = jsonformer.Jsonformer( model=pipeline.model, tokenizer=pipeline.tokenizer,
https://api.python.langchain.com/en/latest/_modules/langchain/experimental/llms/jsonformer_decoder.html
11877270ce07-1
model=pipeline.model, tokenizer=pipeline.tokenizer, json_schema=self.json_schema, prompt=prompt, max_number_tokens=self.max_new_tokens, debug=self.debug, ) text = model() return json.dumps(text)
https://api.python.langchain.com/en/latest/_modules/langchain/experimental/llms/jsonformer_decoder.html
6b253142c513-0
Source code for langchain.experimental.plan_and_execute.agent_executor from typing import Any, Dict, List, Optional from pydantic import Field from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.experimental.plan_and_execute.executors.base import BaseExecutor from langchain.experimental.plan_and_execute.planners.base import BasePlanner from langchain.experimental.plan_and_execute.schema import ( BaseStepContainer, ListStepContainer, ) [docs]class PlanAndExecute(Chain): planner: BasePlanner executor: BaseExecutor step_container: BaseStepContainer = Field(default_factory=ListStepContainer) input_key: str = "input" output_key: str = "output" @property def input_keys(self) -> List[str]: return [self.input_key] @property def output_keys(self) -> List[str]: return [self.output_key] def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, Any]: plan = self.planner.plan( inputs, callbacks=run_manager.get_child() if run_manager else None, ) if run_manager: run_manager.on_text(str(plan), verbose=self.verbose) for step in plan.steps: _new_inputs = { "previous_steps": self.step_container, "current_step": step, "objective": inputs[self.input_key], } new_inputs = {**_new_inputs, **inputs} response = self.executor.step( new_inputs, callbacks=run_manager.get_child() if run_manager else None, )
https://api.python.langchain.com/en/latest/_modules/langchain/experimental/plan_and_execute/agent_executor.html
6b253142c513-1
callbacks=run_manager.get_child() if run_manager else None, ) if run_manager: run_manager.on_text( f"*****\n\nStep: {step.value}", verbose=self.verbose ) run_manager.on_text( f"\n\nResponse: {response.response}", verbose=self.verbose ) self.step_container.add_step(step, response) return {self.output_key: self.step_container.get_final_response()}
https://api.python.langchain.com/en/latest/_modules/langchain/experimental/plan_and_execute/agent_executor.html
fe94faadb51e-0
Source code for langchain.experimental.plan_and_execute.schema from abc import abstractmethod from typing import List, Tuple from pydantic import BaseModel, Field from langchain.schema import BaseOutputParser [docs]class Step(BaseModel): value: str [docs]class Plan(BaseModel): steps: List[Step] [docs]class StepResponse(BaseModel): response: str [docs]class BaseStepContainer(BaseModel): [docs] @abstractmethod def add_step(self, step: Step, step_response: StepResponse) -> None: """Add step and step response to the container.""" [docs] @abstractmethod def get_final_response(self) -> str: """Return the final response based on steps taken.""" [docs]class ListStepContainer(BaseModel): steps: List[Tuple[Step, StepResponse]] = Field(default_factory=list) [docs] def add_step(self, step: Step, step_response: StepResponse) -> None: self.steps.append((step, step_response)) [docs] def get_steps(self) -> List[Tuple[Step, StepResponse]]: return self.steps [docs] def get_final_response(self) -> str: return self.steps[-1][1].response [docs]class PlanOutputParser(BaseOutputParser): [docs] @abstractmethod def parse(self, text: str) -> Plan: """Parse into a plan."""
https://api.python.langchain.com/en/latest/_modules/langchain/experimental/plan_and_execute/schema.html
dc68bc85faa4-0
Source code for langchain.experimental.plan_and_execute.planners.base from abc import abstractmethod from typing import Any, List, Optional from pydantic import BaseModel from langchain.callbacks.manager import Callbacks from langchain.chains.llm import LLMChain from langchain.experimental.plan_and_execute.schema import Plan, PlanOutputParser [docs]class BasePlanner(BaseModel): [docs] @abstractmethod def plan(self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any) -> Plan: """Given input, decide what to do.""" [docs] @abstractmethod async def aplan( self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any ) -> Plan: """Given input, decide what to do.""" [docs]class LLMPlanner(BasePlanner): llm_chain: LLMChain output_parser: PlanOutputParser stop: Optional[List] = None [docs] def plan(self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any) -> Plan: """Given input, decide what to do.""" llm_response = self.llm_chain.run(**inputs, stop=self.stop, callbacks=callbacks) return self.output_parser.parse(llm_response) [docs] async def aplan( self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any ) -> Plan: """Given input, decide what to do.""" llm_response = await self.llm_chain.arun( **inputs, stop=self.stop, callbacks=callbacks ) return self.output_parser.parse(llm_response)
https://api.python.langchain.com/en/latest/_modules/langchain/experimental/plan_and_execute/planners/base.html
5028c53af037-0
Source code for langchain.experimental.plan_and_execute.planners.chat_planner import re from langchain.base_language import BaseLanguageModel from langchain.chains import LLMChain from langchain.experimental.plan_and_execute.planners.base import LLMPlanner from langchain.experimental.plan_and_execute.schema import ( Plan, PlanOutputParser, Step, ) from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate from langchain.schema import SystemMessage SYSTEM_PROMPT = ( "Let's first understand the problem and devise a plan to solve the problem." " Please output the plan starting with the header 'Plan:' " "and then followed by a numbered list of steps. " "Please make the plan the minimum number of steps required " "to accurately complete the task. If the task is a question, " "the final step should almost always be 'Given the above steps taken, " "please respond to the users original question'. " "At the end of your plan, say '<END_OF_PLAN>'" ) [docs]class PlanningOutputParser(PlanOutputParser): [docs] def parse(self, text: str) -> Plan: steps = [Step(value=v) for v in re.split("\n\s*\d+\. ", text)[1:]] return Plan(steps=steps) [docs]def load_chat_planner( llm: BaseLanguageModel, system_prompt: str = SYSTEM_PROMPT ) -> LLMPlanner: """ Load a chat planner. Args: llm: Language model. system_prompt: System prompt. Returns: LLMPlanner """ prompt_template = ChatPromptTemplate.from_messages( [
https://api.python.langchain.com/en/latest/_modules/langchain/experimental/plan_and_execute/planners/chat_planner.html
5028c53af037-1
""" prompt_template = ChatPromptTemplate.from_messages( [ SystemMessage(content=system_prompt), HumanMessagePromptTemplate.from_template("{input}"), ] ) llm_chain = LLMChain(llm=llm, prompt=prompt_template) return LLMPlanner( llm_chain=llm_chain, output_parser=PlanningOutputParser(), stop=["<END_OF_PLAN>"], )
https://api.python.langchain.com/en/latest/_modules/langchain/experimental/plan_and_execute/planners/chat_planner.html
3307acd638dd-0
Source code for langchain.experimental.plan_and_execute.executors.agent_executor from typing import List from langchain.agents.agent import AgentExecutor from langchain.agents.structured_chat.base import StructuredChatAgent from langchain.base_language import BaseLanguageModel from langchain.experimental.plan_and_execute.executors.base import ChainExecutor from langchain.tools import BaseTool HUMAN_MESSAGE_TEMPLATE = """Previous steps: {previous_steps} Current objective: {current_step} {agent_scratchpad}""" TASK_PREFIX = """{objective} """ [docs]def load_agent_executor( llm: BaseLanguageModel, tools: List[BaseTool], verbose: bool = False, include_task_in_prompt: bool = False, ) -> ChainExecutor: """ Load an agent executor. Args: llm: BaseLanguageModel tools: List[BaseTool] verbose: bool. Defaults to False. include_task_in_prompt: bool. Defaults to False. Returns: ChainExecutor """ input_variables = ["previous_steps", "current_step", "agent_scratchpad"] template = HUMAN_MESSAGE_TEMPLATE if include_task_in_prompt: input_variables.append("objective") template = TASK_PREFIX + template agent = StructuredChatAgent.from_llm_and_tools( llm, tools, human_message_template=template, input_variables=input_variables, ) agent_executor = AgentExecutor.from_agent_and_tools( agent=agent, tools=tools, verbose=verbose ) return ChainExecutor(chain=agent_executor)
https://api.python.langchain.com/en/latest/_modules/langchain/experimental/plan_and_execute/executors/agent_executor.html
a201fe07733d-0
Source code for langchain.experimental.plan_and_execute.executors.base from abc import abstractmethod from typing import Any from pydantic import BaseModel from langchain.callbacks.manager import Callbacks from langchain.chains.base import Chain from langchain.experimental.plan_and_execute.schema import StepResponse [docs]class BaseExecutor(BaseModel): [docs] @abstractmethod def step( self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any ) -> StepResponse: """Take step.""" [docs] @abstractmethod async def astep( self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any ) -> StepResponse: """Take step.""" [docs]class ChainExecutor(BaseExecutor): chain: Chain [docs] def step( self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any ) -> StepResponse: """Take step.""" response = self.chain.run(**inputs, callbacks=callbacks) return StepResponse(response=response) [docs] async def astep( self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any ) -> StepResponse: """Take step.""" response = await self.chain.arun(**inputs, callbacks=callbacks) return StepResponse(response=response)
https://api.python.langchain.com/en/latest/_modules/langchain/experimental/plan_and_execute/executors/base.html
d4c36500784e-0
Source code for langchain.tools.plugin from __future__ import annotations import json from typing import Optional, Type import requests import yaml from pydantic import BaseModel from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool [docs]class ApiConfig(BaseModel): type: str url: str has_user_authentication: Optional[bool] = False [docs]class AIPlugin(BaseModel): """AI Plugin Definition.""" schema_version: str name_for_model: str name_for_human: str description_for_model: str description_for_human: str auth: Optional[dict] = None api: ApiConfig logo_url: Optional[str] contact_email: Optional[str] legal_info_url: Optional[str] [docs] @classmethod def from_url(cls, url: str) -> AIPlugin: """Instantiate AIPlugin from a URL.""" response = requests.get(url).json() return cls(**response) [docs]def marshal_spec(txt: str) -> dict: """Convert the yaml or json serialized spec to a dict. Args: txt: The yaml or json serialized spec. Returns: dict: The spec as a dict. """ try: return json.loads(txt) except json.JSONDecodeError: return yaml.safe_load(txt) [docs]class AIPluginToolSchema(BaseModel): """AIPLuginToolSchema.""" tool_input: Optional[str] = "" [docs]class AIPluginTool(BaseTool): plugin: AIPlugin api_spec: str
https://api.python.langchain.com/en/latest/_modules/langchain/tools/plugin.html
d4c36500784e-1
plugin: AIPlugin api_spec: str args_schema: Type[AIPluginToolSchema] = AIPluginToolSchema [docs] @classmethod def from_plugin_url(cls, url: str) -> AIPluginTool: plugin = AIPlugin.from_url(url) description = ( f"Call this tool to get the OpenAPI spec (and usage guide) " f"for interacting with the {plugin.name_for_human} API. " f"You should only call this ONCE! What is the " f"{plugin.name_for_human} API useful for? " ) + plugin.description_for_human open_api_spec_str = requests.get(plugin.api.url).text open_api_spec = marshal_spec(open_api_spec_str) api_spec = ( f"Usage Guide: {plugin.description_for_model}\n\n" f"OpenAPI Spec: {open_api_spec}" ) return cls( name=plugin.name_for_model, description=description, plugin=plugin, api_spec=api_spec, ) def _run( self, tool_input: Optional[str] = "", run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" return self.api_spec async def _arun( self, tool_input: Optional[str] = None, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool asynchronously.""" return self.api_spec
https://api.python.langchain.com/en/latest/_modules/langchain/tools/plugin.html
99c0d61aae16-0
Source code for langchain.tools.ifttt """From https://github.com/SidU/teams-langchain-js/wiki/Connecting-IFTTT-Services. # Creating a webhook - Go to https://ifttt.com/create # Configuring the "If This" - Click on the "If This" button in the IFTTT interface. - Search for "Webhooks" in the search bar. - Choose the first option for "Receive a web request with a JSON payload." - Choose an Event Name that is specific to the service you plan to connect to. This will make it easier for you to manage the webhook URL. For example, if you're connecting to Spotify, you could use "Spotify" as your Event Name. - Click the "Create Trigger" button to save your settings and create your webhook. # Configuring the "Then That" - Tap on the "Then That" button in the IFTTT interface. - Search for the service you want to connect, such as Spotify. - Choose an action from the service, such as "Add track to a playlist". - Configure the action by specifying the necessary details, such as the playlist name, e.g., "Songs from AI". - Reference the JSON Payload received by the Webhook in your action. For the Spotify scenario, choose "{{JsonPayload}}" as your search query. - Tap the "Create Action" button to save your action settings. - Once you have finished configuring your action, click the "Finish" button to complete the setup. - Congratulations! You have successfully connected the Webhook to the desired service, and you're ready to start receiving data and triggering actions 🎉 # Finishing up - To get your webhook URL go to https://ifttt.com/maker_webhooks/settings
https://api.python.langchain.com/en/latest/_modules/langchain/tools/ifttt.html
99c0d61aae16-1
- To get your webhook URL go to https://ifttt.com/maker_webhooks/settings - Copy the IFTTT key value from there. The URL is of the form https://maker.ifttt.com/use/YOUR_IFTTT_KEY. Grab the YOUR_IFTTT_KEY value. """ from typing import Optional import requests from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool [docs]class IFTTTWebhook(BaseTool): """IFTTT Webhook. Args: name: name of the tool description: description of the tool url: url to hit with the json event. """ url: str def _run( self, tool_input: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: body = {"this": tool_input} response = requests.post(self.url, data=body) return response.text async def _arun( self, tool_input: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: raise NotImplementedError("Not implemented.")
https://api.python.langchain.com/en/latest/_modules/langchain/tools/ifttt.html
060a5f581749-0
Source code for langchain.tools.convert_to_openai from typing import TypedDict from langchain.tools import BaseTool, StructuredTool [docs]class FunctionDescription(TypedDict): """Representation of a callable function to the OpenAI API.""" name: str """The name of the function.""" description: str """A description of the function.""" parameters: dict """The parameters of the function.""" [docs]def format_tool_to_openai_function(tool: BaseTool) -> FunctionDescription: """Format tool into the OpenAI function API.""" if isinstance(tool, StructuredTool): schema_ = tool.args_schema.schema() # Bug with required missing for structured tools. required = sorted(schema_["properties"]) # BUG WORKAROUND return { "name": tool.name, "description": tool.description, "parameters": { "type": "object", "properties": schema_["properties"], "required": required, }, } else: if tool.args_schema: parameters = tool.args_schema.schema() else: parameters = { # This is a hack to get around the fact that some tools # do not expose an args_schema, and expect an argument # which is a string. # And Open AI does not support an array type for the # parameters. "properties": { "__arg1": {"title": "__arg1", "type": "string"}, }, "required": ["__arg1"], "type": "object", } return { "name": tool.name, "description": tool.description, "parameters": parameters, }
https://api.python.langchain.com/en/latest/_modules/langchain/tools/convert_to_openai.html
67c9cccc59c5-0
Source code for langchain.tools.base """Base implementation for tools or skills.""" from __future__ import annotations import warnings from abc import ABC, abstractmethod from inspect import signature from typing import Any, Awaitable, Callable, Dict, Optional, Tuple, Type, Union from pydantic import ( BaseModel, Extra, Field, create_model, root_validator, validate_arguments, ) from pydantic.main import ModelMetaclass from langchain.callbacks.base import BaseCallbackManager from langchain.callbacks.manager import ( AsyncCallbackManager, AsyncCallbackManagerForToolRun, CallbackManager, CallbackManagerForToolRun, Callbacks, ) [docs]class SchemaAnnotationError(TypeError): """Raised when 'args_schema' is missing or has an incorrect type annotation.""" [docs]class ToolMetaclass(ModelMetaclass): """Metaclass for BaseTool to ensure the provided args_schema doesn't silently ignored.""" def __new__( cls: Type[ToolMetaclass], name: str, bases: Tuple[Type, ...], dct: dict ) -> ToolMetaclass: """Create the definition of the new tool class.""" schema_type: Optional[Type[BaseModel]] = dct.get("args_schema") if schema_type is not None: schema_annotations = dct.get("__annotations__", {}) args_schema_type = schema_annotations.get("args_schema", None) if args_schema_type is None or args_schema_type == BaseModel: # Throw errors for common mis-annotations. # TODO: Use get_args / get_origin and fully # specify valid annotations. typehint_mandate = """ class ChildTool(BaseTool): ...
https://api.python.langchain.com/en/latest/_modules/langchain/tools/base.html
67c9cccc59c5-1
typehint_mandate = """ class ChildTool(BaseTool): ... args_schema: Type[BaseModel] = SchemaClass ...""" raise SchemaAnnotationError( f"Tool definition for {name} must include valid type annotations" f" for argument 'args_schema' to behave as expected.\n" f"Expected annotation of 'Type[BaseModel]'" f" but got '{args_schema_type}'.\n" f"Expected class looks like:\n" f"{typehint_mandate}" ) # Pass through to Pydantic's metaclass return super().__new__(cls, name, bases, dct) def _create_subset_model( name: str, model: BaseModel, field_names: list ) -> Type[BaseModel]: """Create a pydantic model with only a subset of model's fields.""" fields = {} for field_name in field_names: field = model.__fields__[field_name] fields[field_name] = (field.type_, field.field_info) return create_model(name, **fields) # type: ignore def _get_filtered_args( inferred_model: Type[BaseModel], func: Callable, ) -> dict: """Get the arguments from a function's signature.""" schema = inferred_model.schema()["properties"] valid_keys = signature(func).parameters return {k: schema[k] for k in valid_keys if k not in ("run_manager", "callbacks")} class _SchemaConfig: """Configuration for the pydantic model.""" extra = Extra.forbid arbitrary_types_allowed = True [docs]def create_schema_from_function( model_name: str, func: Callable,
https://api.python.langchain.com/en/latest/_modules/langchain/tools/base.html
67c9cccc59c5-2
model_name: str, func: Callable, ) -> Type[BaseModel]: """Create a pydantic schema from a function's signature. Args: model_name: Name to assign to the generated pydandic schema func: Function to generate the schema from Returns: A pydantic model with the same arguments as the function """ # https://docs.pydantic.dev/latest/usage/validation_decorator/ validated = validate_arguments(func, config=_SchemaConfig) # type: ignore inferred_model = validated.model # type: ignore if "run_manager" in inferred_model.__fields__: del inferred_model.__fields__["run_manager"] if "callbacks" in inferred_model.__fields__: del inferred_model.__fields__["callbacks"] # Pydantic adds placeholder virtual fields we need to strip valid_properties = _get_filtered_args(inferred_model, func) return _create_subset_model( f"{model_name}Schema", inferred_model, list(valid_properties) ) [docs]class ToolException(Exception): """An optional exception that tool throws when execution error occurs. When this exception is thrown, the agent will not stop working, but will handle the exception according to the handle_tool_error variable of the tool, and the processing result will be returned to the agent as observation, and printed in red on the console. """ pass [docs]class BaseTool(ABC, BaseModel, metaclass=ToolMetaclass): """Interface LangChain tools must implement.""" name: str """The unique name of the tool that clearly communicates its purpose.""" description: str """Used to tell the model how/when/why to use the tool.
https://api.python.langchain.com/en/latest/_modules/langchain/tools/base.html
67c9cccc59c5-3
"""Used to tell the model how/when/why to use the tool. You can provide few-shot examples as a part of the description. """ args_schema: Optional[Type[BaseModel]] = None """Pydantic model class to validate and parse the tool's input arguments.""" return_direct: bool = False """Whether to return the tool's output directly. Setting this to True means that after the tool is called, the AgentExecutor will stop looping. """ verbose: bool = False """Whether to log the tool's progress.""" callbacks: Callbacks = Field(default=None, exclude=True) """Callbacks to be called during tool execution.""" callback_manager: Optional[BaseCallbackManager] = Field(default=None, exclude=True) """Deprecated. Please use callbacks instead.""" handle_tool_error: Optional[ Union[bool, str, Callable[[ToolException], str]] ] = False """Handle the content of the ToolException thrown.""" [docs] class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @property def is_single_input(self) -> bool: """Whether the tool only accepts a single input.""" keys = {k for k in self.args if k != "kwargs"} return len(keys) == 1 @property def args(self) -> dict: if self.args_schema is not None: return self.args_schema.schema()["properties"] else: schema = create_schema_from_function(self.name, self._run) return schema.schema()["properties"] def _parse_input( self, tool_input: Union[str, Dict],
https://api.python.langchain.com/en/latest/_modules/langchain/tools/base.html
67c9cccc59c5-4
self, tool_input: Union[str, Dict], ) -> Union[str, Dict[str, Any]]: """Convert tool input to pydantic model.""" input_args = self.args_schema if isinstance(tool_input, str): if input_args is not None: key_ = next(iter(input_args.__fields__.keys())) input_args.validate({key_: tool_input}) return tool_input else: if input_args is not None: result = input_args.parse_obj(tool_input) return {k: v for k, v in result.dict().items() if k in tool_input} return tool_input [docs] @root_validator() def raise_deprecation(cls, values: Dict) -> Dict: """Raise deprecation warning if callback_manager is used.""" if values.get("callback_manager") is not None: warnings.warn( "callback_manager is deprecated. Please use callbacks instead.", DeprecationWarning, ) values["callbacks"] = values.pop("callback_manager", None) return values @abstractmethod def _run( self, *args: Any, **kwargs: Any, ) -> Any: """Use the tool. Add run_manager: Optional[CallbackManagerForToolRun] = None to child implementations to enable tracing, """ @abstractmethod async def _arun( self, *args: Any, **kwargs: Any, ) -> Any: """Use the tool asynchronously. Add run_manager: Optional[AsyncCallbackManagerForToolRun] = None to child implementations to enable tracing, """
https://api.python.langchain.com/en/latest/_modules/langchain/tools/base.html
67c9cccc59c5-5
to child implementations to enable tracing, """ def _to_args_and_kwargs(self, tool_input: Union[str, Dict]) -> Tuple[Tuple, Dict]: # For backwards compatibility, if run_input is a string, # pass as a positional argument. if isinstance(tool_input, str): return (tool_input,), {} else: return (), tool_input [docs] def run( self, tool_input: Union[str, Dict], verbose: Optional[bool] = None, start_color: Optional[str] = "green", color: Optional[str] = "green", callbacks: Callbacks = None, **kwargs: Any, ) -> Any: """Run the tool.""" parsed_input = self._parse_input(tool_input) if not self.verbose and verbose is not None: verbose_ = verbose else: verbose_ = self.verbose callback_manager = CallbackManager.configure( callbacks, self.callbacks, verbose=verbose_ ) # TODO: maybe also pass through run_manager is _run supports kwargs new_arg_supported = signature(self._run).parameters.get("run_manager") run_manager = callback_manager.on_tool_start( {"name": self.name, "description": self.description}, tool_input if isinstance(tool_input, str) else str(tool_input), color=start_color, **kwargs, ) try: tool_args, tool_kwargs = self._to_args_and_kwargs(parsed_input) observation = ( self._run(*tool_args, run_manager=run_manager, **tool_kwargs) if new_arg_supported else self._run(*tool_args, **tool_kwargs) ) except ToolException as e:
https://api.python.langchain.com/en/latest/_modules/langchain/tools/base.html
67c9cccc59c5-6
) except ToolException as e: if not self.handle_tool_error: run_manager.on_tool_error(e) raise e elif isinstance(self.handle_tool_error, bool): if e.args: observation = e.args[0] else: observation = "Tool execution error" elif isinstance(self.handle_tool_error, str): observation = self.handle_tool_error elif callable(self.handle_tool_error): observation = self.handle_tool_error(e) else: raise ValueError( f"Got unexpected type of `handle_tool_error`. Expected bool, str " f"or callable. Received: {self.handle_tool_error}" ) run_manager.on_tool_end( str(observation), color="red", name=self.name, **kwargs ) return observation except (Exception, KeyboardInterrupt) as e: run_manager.on_tool_error(e) raise e else: run_manager.on_tool_end( str(observation), color=color, name=self.name, **kwargs ) return observation [docs] async def arun( self, tool_input: Union[str, Dict], verbose: Optional[bool] = None, start_color: Optional[str] = "green", color: Optional[str] = "green", callbacks: Callbacks = None, **kwargs: Any, ) -> Any: """Run the tool asynchronously.""" parsed_input = self._parse_input(tool_input) if not self.verbose and verbose is not None: verbose_ = verbose else: verbose_ = self.verbose callback_manager = AsyncCallbackManager.configure( callbacks, self.callbacks, verbose=verbose_ )
https://api.python.langchain.com/en/latest/_modules/langchain/tools/base.html
67c9cccc59c5-7
callbacks, self.callbacks, verbose=verbose_ ) new_arg_supported = signature(self._arun).parameters.get("run_manager") run_manager = await callback_manager.on_tool_start( {"name": self.name, "description": self.description}, tool_input if isinstance(tool_input, str) else str(tool_input), color=start_color, **kwargs, ) try: # We then call the tool on the tool input to get an observation tool_args, tool_kwargs = self._to_args_and_kwargs(parsed_input) observation = ( await self._arun(*tool_args, run_manager=run_manager, **tool_kwargs) if new_arg_supported else await self._arun(*tool_args, **tool_kwargs) ) except ToolException as e: if not self.handle_tool_error: await run_manager.on_tool_error(e) raise e elif isinstance(self.handle_tool_error, bool): if e.args: observation = e.args[0] else: observation = "Tool execution error" elif isinstance(self.handle_tool_error, str): observation = self.handle_tool_error elif callable(self.handle_tool_error): observation = self.handle_tool_error(e) else: raise ValueError( f"Got unexpected type of `handle_tool_error`. Expected bool, str " f"or callable. Received: {self.handle_tool_error}" ) await run_manager.on_tool_end( str(observation), color="red", name=self.name, **kwargs ) return observation except (Exception, KeyboardInterrupt) as e: await run_manager.on_tool_error(e) raise e else: await run_manager.on_tool_end(
https://api.python.langchain.com/en/latest/_modules/langchain/tools/base.html
67c9cccc59c5-8
raise e else: await run_manager.on_tool_end( str(observation), color=color, name=self.name, **kwargs ) return observation [docs] def __call__(self, tool_input: str, callbacks: Callbacks = None) -> str: """Make tool callable.""" return self.run(tool_input, callbacks=callbacks) [docs]class Tool(BaseTool): """Tool that takes in function or coroutine directly.""" description: str = "" func: Callable[..., str] """The function to run when the tool is called.""" coroutine: Optional[Callable[..., Awaitable[str]]] = None """The asynchronous version of the function.""" @property def args(self) -> dict: """The tool's input arguments.""" if self.args_schema is not None: return self.args_schema.schema()["properties"] # For backwards compatibility, if the function signature is ambiguous, # assume it takes a single string input. return {"tool_input": {"type": "string"}} def _to_args_and_kwargs(self, tool_input: Union[str, Dict]) -> Tuple[Tuple, Dict]: """Convert tool input to pydantic model.""" args, kwargs = super()._to_args_and_kwargs(tool_input) # For backwards compatibility. The tool must be run with a single input all_args = list(args) + list(kwargs.values()) if len(all_args) != 1: raise ToolException( f"Too many arguments to single-input tool {self.name}." f" Args: {all_args}" ) return tuple(all_args), {} def _run( self, *args: Any,
https://api.python.langchain.com/en/latest/_modules/langchain/tools/base.html
67c9cccc59c5-9
def _run( self, *args: Any, run_manager: Optional[CallbackManagerForToolRun] = None, **kwargs: Any, ) -> Any: """Use the tool.""" new_argument_supported = signature(self.func).parameters.get("callbacks") return ( self.func( *args, callbacks=run_manager.get_child() if run_manager else None, **kwargs, ) if new_argument_supported else self.func(*args, **kwargs) ) async def _arun( self, *args: Any, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, **kwargs: Any, ) -> Any: """Use the tool asynchronously.""" if self.coroutine: new_argument_supported = signature(self.coroutine).parameters.get( "callbacks" ) return ( await self.coroutine( *args, callbacks=run_manager.get_child() if run_manager else None, **kwargs, ) if new_argument_supported else await self.coroutine(*args, **kwargs) ) raise NotImplementedError("Tool does not support async") # TODO: this is for backwards compatibility, remove in future def __init__( self, name: str, func: Callable, description: str, **kwargs: Any ) -> None: """Initialize tool.""" super(Tool, self).__init__( name=name, func=func, description=description, **kwargs ) [docs] @classmethod def from_function( cls, func: Callable,
https://api.python.langchain.com/en/latest/_modules/langchain/tools/base.html
67c9cccc59c5-10
def from_function( cls, func: Callable, name: str, # We keep these required to support backwards compatibility description: str, return_direct: bool = False, args_schema: Optional[Type[BaseModel]] = None, **kwargs: Any, ) -> Tool: """Initialize tool from a function.""" return cls( name=name, func=func, description=description, return_direct=return_direct, args_schema=args_schema, **kwargs, ) [docs]class StructuredTool(BaseTool): """Tool that can operate on any number of inputs.""" description: str = "" args_schema: Type[BaseModel] = Field(..., description="The tool schema.") """The input arguments' schema.""" func: Callable[..., Any] """The function to run when the tool is called.""" coroutine: Optional[Callable[..., Awaitable[Any]]] = None """The asynchronous version of the function.""" @property def args(self) -> dict: """The tool's input arguments.""" return self.args_schema.schema()["properties"] def _run( self, *args: Any, run_manager: Optional[CallbackManagerForToolRun] = None, **kwargs: Any, ) -> Any: """Use the tool.""" new_argument_supported = signature(self.func).parameters.get("callbacks") return ( self.func( *args, callbacks=run_manager.get_child() if run_manager else None, **kwargs, ) if new_argument_supported else self.func(*args, **kwargs) ) async def _arun(
https://api.python.langchain.com/en/latest/_modules/langchain/tools/base.html
67c9cccc59c5-11
) async def _arun( self, *args: Any, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, **kwargs: Any, ) -> str: """Use the tool asynchronously.""" if self.coroutine: new_argument_supported = signature(self.coroutine).parameters.get( "callbacks" ) return ( await self.coroutine( *args, callbacks=run_manager.get_child() if run_manager else None, **kwargs, ) if new_argument_supported else await self.coroutine(*args, **kwargs) ) raise NotImplementedError("Tool does not support async") [docs] @classmethod def from_function( cls, func: Callable, name: Optional[str] = None, description: Optional[str] = None, return_direct: bool = False, args_schema: Optional[Type[BaseModel]] = None, infer_schema: bool = True, **kwargs: Any, ) -> StructuredTool: """Create tool from a given function. A classmethod that helps to create a tool from a function. Args: func: The function from which to create a tool name: The name of the tool. Defaults to the function name description: The description of the tool. Defaults to the function docstring return_direct: Whether to return the result directly or as a callback args_schema: The schema of the tool's input arguments infer_schema: Whether to infer the schema from the function's signature **kwargs: Additional arguments to pass to the tool Returns: The tool Examples: ... code-block:: python
https://api.python.langchain.com/en/latest/_modules/langchain/tools/base.html
67c9cccc59c5-12
Returns: The tool Examples: ... code-block:: python def add(a: int, b: int) -> int: \"\"\"Add two numbers\"\"\" return a + b tool = StructuredTool.from_function(add) tool.run(1, 2) # 3 """ name = name or func.__name__ description = description or func.__doc__ assert ( description is not None ), "Function must have a docstring if description not provided." # Description example: # search_api(query: str) - Searches the API for the query. description = f"{name}{signature(func)} - {description.strip()}" _args_schema = args_schema if _args_schema is None and infer_schema: _args_schema = create_schema_from_function(f"{name}Schema", func) return cls( name=name, func=func, args_schema=_args_schema, description=description, return_direct=return_direct, **kwargs, ) [docs]def tool( *args: Union[str, Callable], return_direct: bool = False, args_schema: Optional[Type[BaseModel]] = None, infer_schema: bool = True, ) -> Callable: """Make tools out of functions, can be used with or without arguments. Args: *args: The arguments to the tool. return_direct: Whether to return directly from the tool rather than continuing the agent loop. args_schema: optional argument schema for user to specify infer_schema: Whether to infer the schema of the arguments from the function's signature. This also makes the resultant tool
https://api.python.langchain.com/en/latest/_modules/langchain/tools/base.html
67c9cccc59c5-13
the function's signature. This also makes the resultant tool accept a dictionary input to its `run()` function. Requires: - Function must be of type (str) -> str - Function must have a docstring Examples: .. code-block:: python @tool def search_api(query: str) -> str: # Searches the API for the query. return @tool("search", return_direct=True) def search_api(query: str) -> str: # Searches the API for the query. return """ def _make_with_name(tool_name: str) -> Callable: def _make_tool(func: Callable) -> BaseTool: if infer_schema or args_schema is not None: return StructuredTool.from_function( func, name=tool_name, return_direct=return_direct, args_schema=args_schema, infer_schema=infer_schema, ) # If someone doesn't want a schema applied, we must treat it as # a simple string->string function assert func.__doc__ is not None, "Function must have a docstring" return Tool( name=tool_name, func=func, description=f"{tool_name} tool", return_direct=return_direct, ) return _make_tool if len(args) == 1 and isinstance(args[0], str): # if the argument is a string, then we use the string as the tool name # Example usage: @tool("search", return_direct=True) return _make_with_name(args[0]) elif len(args) == 1 and callable(args[0]):
https://api.python.langchain.com/en/latest/_modules/langchain/tools/base.html
67c9cccc59c5-14
elif len(args) == 1 and callable(args[0]): # if the argument is a function, then we use the function name as the tool name # Example usage: @tool return _make_with_name(args[0].__name__)(args[0]) elif len(args) == 0: # if there are no arguments, then we use the function name as the tool name # Example usage: @tool(return_direct=True) def _partial(func: Callable[[str], str]) -> BaseTool: return _make_with_name(func.__name__)(func) return _partial else: raise ValueError("Too many arguments for tool decorator")
https://api.python.langchain.com/en/latest/_modules/langchain/tools/base.html
a694b319b5b6-0
Source code for langchain.tools.zapier.tool """## Zapier Natural Language Actions API \ Full docs here: https://nla.zapier.com/start/ **Zapier Natural Language Actions** gives you access to the 5k+ apps, 20k+ actions on Zapier's platform through a natural language API interface. NLA supports apps like Gmail, Salesforce, Trello, Slack, Asana, HubSpot, Google Sheets, Microsoft Teams, and thousands more apps: https://zapier.com/apps Zapier NLA handles ALL the underlying API auth and translation from natural language --> underlying API call --> return simplified output for LLMs The key idea is you, or your users, expose a set of actions via an oauth-like setup window, which you can then query and execute via a REST API. NLA offers both API Key and OAuth for signing NLA API requests. 1. Server-side (API Key): for quickly getting started, testing, and production scenarios where LangChain will only use actions exposed in the developer's Zapier account (and will use the developer's connected accounts on Zapier.com) 2. User-facing (Oauth): for production scenarios where you are deploying an end-user facing application and LangChain needs access to end-user's exposed actions and connected accounts on Zapier.com This quick start will focus on the server-side use case for brevity. Review [full docs](https://nla.zapier.com/start/) for user-facing oauth developer support. Typically, you'd use SequentialChain, here's a basic example: 1. Use NLA to find an email in Gmail 2. Use LLMChain to generate a draft reply to (1)
https://api.python.langchain.com/en/latest/_modules/langchain/tools/zapier/tool.html
a694b319b5b6-1
2. Use LLMChain to generate a draft reply to (1) 3. Use NLA to send the draft reply (2) to someone in Slack via direct message In code, below: ```python import os # get from https://platform.openai.com/ os.environ["OPENAI_API_KEY"] = os.environ.get("OPENAI_API_KEY", "") # get from https://nla.zapier.com/docs/authentication/ os.environ["ZAPIER_NLA_API_KEY"] = os.environ.get("ZAPIER_NLA_API_KEY", "") from langchain.llms import OpenAI from langchain.agents import initialize_agent from langchain.agents.agent_toolkits import ZapierToolkit from langchain.utilities.zapier import ZapierNLAWrapper ## step 0. expose gmail 'find email' and slack 'send channel message' actions # first go here, log in, expose (enable) the two actions: # https://nla.zapier.com/demo/start # -- for this example, can leave all fields "Have AI guess" # in an oauth scenario, you'd get your own <provider> id (instead of 'demo') # which you route your users through first llm = OpenAI(temperature=0) zapier = ZapierNLAWrapper() ## To leverage OAuth you may pass the value `nla_oauth_access_token` to ## the ZapierNLAWrapper. If you do this there is no need to initialize ## the ZAPIER_NLA_API_KEY env variable # zapier = ZapierNLAWrapper(zapier_nla_oauth_access_token="TOKEN_HERE") toolkit = ZapierToolkit.from_zapier_nla_wrapper(zapier) agent = initialize_agent( toolkit.get_tools(), llm,
https://api.python.langchain.com/en/latest/_modules/langchain/tools/zapier/tool.html
a694b319b5b6-2
agent = initialize_agent( toolkit.get_tools(), llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True ) agent.run(("Summarize the last email I received regarding Silicon Valley Bank. " "Send the summary to the #test-zapier channel in slack.")) ``` """ from typing import Any, Dict, Optional from pydantic import Field, root_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool from langchain.tools.zapier.prompt import BASE_ZAPIER_TOOL_PROMPT from langchain.utilities.zapier import ZapierNLAWrapper [docs]class ZapierNLARunAction(BaseTool): """ Args: action_id: a specific action ID (from list actions) of the action to execute (the set api_key must be associated with the action owner) instructions: a natural language instruction string for using the action (eg. "get the latest email from Mike Knoop" for "Gmail: find email" action) params: a dict, optional. Any params provided will *override* AI guesses from `instructions` (see "understanding the AI guessing flow" here: https://nla.zapier.com/docs/using-the-api#ai-guessing) """ api_wrapper: ZapierNLAWrapper = Field(default_factory=ZapierNLAWrapper) action_id: str params: Optional[dict] = None base_prompt: str = BASE_ZAPIER_TOOL_PROMPT zapier_description: str params_schema: Dict[str, str] = Field(default_factory=dict) name = "" description = ""
https://api.python.langchain.com/en/latest/_modules/langchain/tools/zapier/tool.html
a694b319b5b6-3
name = "" description = "" [docs] @root_validator def set_name_description(cls, values: Dict[str, Any]) -> Dict[str, Any]: zapier_description = values["zapier_description"] params_schema = values["params_schema"] if "instructions" in params_schema: del params_schema["instructions"] # Ensure base prompt (if overrided) contains necessary input fields necessary_fields = {"{zapier_description}", "{params}"} if not all(field in values["base_prompt"] for field in necessary_fields): raise ValueError( "Your custom base Zapier prompt must contain input fields for " "{zapier_description} and {params}." ) values["name"] = zapier_description values["description"] = values["base_prompt"].format( zapier_description=zapier_description, params=str(list(params_schema.keys())), ) return values def _run( self, instructions: str, run_manager: Optional[CallbackManagerForToolRun] = None ) -> str: """Use the Zapier NLA tool to return a list of all exposed user actions.""" return self.api_wrapper.run_as_str(self.action_id, instructions, self.params) async def _arun( self, instructions: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the Zapier NLA tool to return a list of all exposed user actions.""" return await self.api_wrapper.arun_as_str( self.action_id, instructions, self.params, ) ZapierNLARunAction.__doc__ = (
https://api.python.langchain.com/en/latest/_modules/langchain/tools/zapier/tool.html
a694b319b5b6-4
) ZapierNLARunAction.__doc__ = ( ZapierNLAWrapper.run.__doc__ + ZapierNLARunAction.__doc__ # type: ignore ) # other useful actions [docs]class ZapierNLAListActions(BaseTool): """ Args: None """ name = "ZapierNLA_list_actions" description = BASE_ZAPIER_TOOL_PROMPT + ( "This tool returns a list of the user's exposed actions." ) api_wrapper: ZapierNLAWrapper = Field(default_factory=ZapierNLAWrapper) def _run( self, _: str = "", run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the Zapier NLA tool to return a list of all exposed user actions.""" return self.api_wrapper.list_as_str() async def _arun( self, _: str = "", run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the Zapier NLA tool to return a list of all exposed user actions.""" return await self.api_wrapper.alist_as_str() ZapierNLAListActions.__doc__ = ( ZapierNLAWrapper.list.__doc__ + ZapierNLAListActions.__doc__ # type: ignore )
https://api.python.langchain.com/en/latest/_modules/langchain/tools/zapier/tool.html
1d38c1a54394-0
Source code for langchain.tools.bing_search.tool """Tool for the Bing search API.""" from typing import Optional from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool from langchain.utilities.bing_search import BingSearchAPIWrapper [docs]class BingSearchRun(BaseTool): """Tool that adds the capability to query the Bing search API.""" name = "bing_search" description = ( "A wrapper around Bing Search. " "Useful for when you need to answer questions about current events. " "Input should be a search query." ) api_wrapper: BingSearchAPIWrapper def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" return self.api_wrapper.run(query) async def _arun( self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool asynchronously.""" raise NotImplementedError("BingSearchRun does not support async") [docs]class BingSearchResults(BaseTool): """Tool that has capability to query the Bing Search API and get back json.""" name = "Bing Search Results JSON" description = ( "A wrapper around Bing Search. " "Useful for when you need to answer questions about current events. " "Input should be a search query. Output is a JSON array of the query results" ) num_results: int = 4 api_wrapper: BingSearchAPIWrapper def _run( self,
https://api.python.langchain.com/en/latest/_modules/langchain/tools/bing_search/tool.html
1d38c1a54394-1
api_wrapper: BingSearchAPIWrapper def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" return str(self.api_wrapper.results(query, self.num_results)) async def _arun( self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool asynchronously.""" raise NotImplementedError("BingSearchResults does not support async")
https://api.python.langchain.com/en/latest/_modules/langchain/tools/bing_search/tool.html
ef1abdf64c5b-0
Source code for langchain.tools.python.tool """A tool for running python code in a REPL.""" import ast import re import sys from contextlib import redirect_stdout from io import StringIO from typing import Any, Dict, Optional from pydantic import Field, root_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool from langchain.utilities import PythonREPL def _get_default_python_repl() -> PythonREPL: return PythonREPL(_globals=globals(), _locals=None) [docs]def sanitize_input(query: str) -> str: """Sanitize input to the python REPL. Remove whitespace, backtick & python (if llm mistakes python console as terminal) Args: query: The query to sanitize Returns: str: The sanitized query """ # Removes `, whitespace & python from start query = re.sub(r"^(\s|`)*(?i:python)?\s*", "", query) # Removes whitespace & ` from end query = re.sub(r"(\s|`)*$", "", query) return query [docs]class PythonREPLTool(BaseTool): """A tool for running python code in a REPL.""" name = "Python_REPL" description = ( "A Python shell. Use this to execute python commands. " "Input should be a valid python command. " "If you want to see the output of a value, you should print it out " "with `print(...)`." ) python_repl: PythonREPL = Field(default_factory=_get_default_python_repl) sanitize_input: bool = True def _run(
https://api.python.langchain.com/en/latest/_modules/langchain/tools/python/tool.html
ef1abdf64c5b-1
sanitize_input: bool = True def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> Any: """Use the tool.""" if self.sanitize_input: query = sanitize_input(query) return self.python_repl.run(query) async def _arun( self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> Any: """Use the tool asynchronously.""" raise NotImplementedError("PythonReplTool does not support async") [docs]class PythonAstREPLTool(BaseTool): """A tool for running python code in a REPL.""" name = "python_repl_ast" description = ( "A Python shell. Use this to execute python commands. " "Input should be a valid python command. " "When using this tool, sometimes output is abbreviated - " "make sure it does not look abbreviated before using it in your answer." ) globals: Optional[Dict] = Field(default_factory=dict) locals: Optional[Dict] = Field(default_factory=dict) sanitize_input: bool = True [docs] @root_validator(pre=True) def validate_python_version(cls, values: Dict) -> Dict: """Validate valid python version.""" if sys.version_info < (3, 9): raise ValueError( "This tool relies on Python 3.9 or higher " "(as it uses new functionality in the `ast` module, " f"you have Python version: {sys.version}" ) return values def _run( self, query: str,
https://api.python.langchain.com/en/latest/_modules/langchain/tools/python/tool.html
ef1abdf64c5b-2
return values def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" try: if self.sanitize_input: query = sanitize_input(query) tree = ast.parse(query) module = ast.Module(tree.body[:-1], type_ignores=[]) exec(ast.unparse(module), self.globals, self.locals) # type: ignore module_end = ast.Module(tree.body[-1:], type_ignores=[]) module_end_str = ast.unparse(module_end) # type: ignore io_buffer = StringIO() try: with redirect_stdout(io_buffer): ret = eval(module_end_str, self.globals, self.locals) if ret is None: return io_buffer.getvalue() else: return ret except Exception: with redirect_stdout(io_buffer): exec(module_end_str, self.globals, self.locals) return io_buffer.getvalue() except Exception as e: return "{}: {}".format(type(e).__name__, str(e)) async def _arun( self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool asynchronously.""" raise NotImplementedError("PythonReplTool does not support async")
https://api.python.langchain.com/en/latest/_modules/langchain/tools/python/tool.html
d87c200d13c6-0
Source code for langchain.tools.arxiv.tool """Tool for the Arxiv API.""" from typing import Optional from pydantic import Field from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool from langchain.utilities.arxiv import ArxivAPIWrapper [docs]class ArxivQueryRun(BaseTool): """Tool that adds the capability to search using the Arxiv API.""" name = "arxiv" description = ( "A wrapper around Arxiv.org " "Useful for when you need to answer questions about Physics, Mathematics, " "Computer Science, Quantitative Biology, Quantitative Finance, Statistics, " "Electrical Engineering, and Economics " "from scientific articles on arxiv.org. " "Input should be a search query." ) api_wrapper: ArxivAPIWrapper = Field(default_factory=ArxivAPIWrapper) def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the Arxiv tool.""" return self.api_wrapper.run(query) async def _arun( self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the Arxiv tool asynchronously.""" raise NotImplementedError("ArxivAPIWrapper does not support async")
https://api.python.langchain.com/en/latest/_modules/langchain/tools/arxiv/tool.html
393603ad69f6-0
Source code for langchain.tools.vectorstore.tool """Tools for interacting with vectorstores.""" import json from typing import Any, Dict, Optional from pydantic import BaseModel, Field from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.chains import RetrievalQA, RetrievalQAWithSourcesChain from langchain.llms.openai import OpenAI from langchain.tools.base import BaseTool from langchain.vectorstores.base import VectorStore [docs]class BaseVectorStoreTool(BaseModel): """Base class for tools that use a VectorStore.""" vectorstore: VectorStore = Field(exclude=True) llm: BaseLanguageModel = Field(default_factory=lambda: OpenAI(temperature=0)) [docs] class Config(BaseTool.Config): """Configuration for this pydantic object.""" arbitrary_types_allowed = True def _create_description_from_template(values: Dict[str, Any]) -> Dict[str, Any]: values["description"] = values["template"].format(name=values["name"]) return values [docs]class VectorStoreQATool(BaseVectorStoreTool, BaseTool): """Tool for the VectorDBQA chain. To be initialized with name and chain.""" [docs] @staticmethod def get_description(name: str, description: str) -> str: template: str = ( "Useful for when you need to answer questions about {name}. " "Whenever you need information about {description} " "you should ALWAYS use this. " "Input should be a fully formed question." ) return template.format(name=name, description=description) def _run( self, query: str,
https://api.python.langchain.com/en/latest/_modules/langchain/tools/vectorstore/tool.html
393603ad69f6-1
def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" chain = RetrievalQA.from_chain_type( self.llm, retriever=self.vectorstore.as_retriever() ) return chain.run(query) async def _arun( self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool asynchronously.""" raise NotImplementedError("VectorStoreQATool does not support async") [docs]class VectorStoreQAWithSourcesTool(BaseVectorStoreTool, BaseTool): """Tool for the VectorDBQAWithSources chain.""" [docs] @staticmethod def get_description(name: str, description: str) -> str: template: str = ( "Useful for when you need to answer questions about {name} and the sources " "used to construct the answer. " "Whenever you need information about {description} " "you should ALWAYS use this. " " Input should be a fully formed question. " "Output is a json serialized dictionary with keys `answer` and `sources`. " "Only use this tool if the user explicitly asks for sources." ) return template.format(name=name, description=description) def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" chain = RetrievalQAWithSourcesChain.from_chain_type( self.llm, retriever=self.vectorstore.as_retriever() )
https://api.python.langchain.com/en/latest/_modules/langchain/tools/vectorstore/tool.html
393603ad69f6-2
self.llm, retriever=self.vectorstore.as_retriever() ) return json.dumps(chain({chain.question_key: query}, return_only_outputs=True)) async def _arun( self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool asynchronously.""" raise NotImplementedError("VectorStoreQAWithSourcesTool does not support async")
https://api.python.langchain.com/en/latest/_modules/langchain/tools/vectorstore/tool.html
e0983a4e3b8a-0
Source code for langchain.tools.gmail.utils """Gmail tool utils.""" from __future__ import annotations import logging import os from typing import TYPE_CHECKING, List, Optional, Tuple if TYPE_CHECKING: from google.auth.transport.requests import Request from google.oauth2.credentials import Credentials from google_auth_oauthlib.flow import InstalledAppFlow from googleapiclient.discovery import Resource from googleapiclient.discovery import build as build_resource logger = logging.getLogger(__name__) [docs]def import_google() -> Tuple[Request, Credentials]: """Import google libraries. Returns: Tuple[Request, Credentials]: Request and Credentials classes. """ # google-auth-httplib2 try: from google.auth.transport.requests import Request # noqa: F401 from google.oauth2.credentials import Credentials # noqa: F401 except ImportError: raise ImportError( "You need to install google-auth-httplib2 to use this toolkit. " "Try running pip install --upgrade google-auth-httplib2" ) return Request, Credentials [docs]def import_installed_app_flow() -> InstalledAppFlow: """Import InstalledAppFlow class. Returns: InstalledAppFlow: InstalledAppFlow class. """ try: from google_auth_oauthlib.flow import InstalledAppFlow except ImportError: raise ValueError( "You need to install google-auth-oauthlib to use this toolkit. " "Try running pip install --upgrade google-auth-oauthlib" ) return InstalledAppFlow [docs]def import_googleapiclient_resource_builder() -> build_resource: """Import googleapiclient.discovery.build function. Returns:
https://api.python.langchain.com/en/latest/_modules/langchain/tools/gmail/utils.html
e0983a4e3b8a-1
"""Import googleapiclient.discovery.build function. Returns: build_resource: googleapiclient.discovery.build function. """ try: from googleapiclient.discovery import build except ImportError: raise ValueError( "You need to install googleapiclient to use this toolkit. " "Try running pip install --upgrade google-api-python-client" ) return build DEFAULT_SCOPES = ["https://mail.google.com/"] DEFAULT_CREDS_TOKEN_FILE = "token.json" DEFAULT_CLIENT_SECRETS_FILE = "credentials.json" [docs]def get_gmail_credentials( token_file: Optional[str] = None, client_secrets_file: Optional[str] = None, scopes: Optional[List[str]] = None, ) -> Credentials: """Get credentials.""" # From https://developers.google.com/gmail/api/quickstart/python Request, Credentials = import_google() InstalledAppFlow = import_installed_app_flow() creds = None scopes = scopes or DEFAULT_SCOPES token_file = token_file or DEFAULT_CREDS_TOKEN_FILE client_secrets_file = client_secrets_file or DEFAULT_CLIENT_SECRETS_FILE # The file token.json stores the user's access and refresh tokens, and is # created automatically when the authorization flow completes for the first # time. if os.path.exists(token_file): creds = Credentials.from_authorized_user_file(token_file, scopes) # If there are no (valid) credentials available, let the user log in. if not creds or not creds.valid: if creds and creds.expired and creds.refresh_token: creds.refresh(Request()) else:
https://api.python.langchain.com/en/latest/_modules/langchain/tools/gmail/utils.html
e0983a4e3b8a-2
creds.refresh(Request()) else: # https://developers.google.com/gmail/api/quickstart/python#authorize_credentials_for_a_desktop_application # noqa flow = InstalledAppFlow.from_client_secrets_file( client_secrets_file, scopes ) creds = flow.run_local_server(port=0) # Save the credentials for the next run with open(token_file, "w") as token: token.write(creds.to_json()) return creds [docs]def build_resource_service( credentials: Optional[Credentials] = None, service_name: str = "gmail", service_version: str = "v1", ) -> Resource: """Build a Gmail service.""" credentials = credentials or get_gmail_credentials() builder = import_googleapiclient_resource_builder() return builder(service_name, service_version, credentials=credentials) [docs]def clean_email_body(body: str) -> str: """Clean email body.""" try: from bs4 import BeautifulSoup try: soup = BeautifulSoup(str(body), "html.parser") body = soup.get_text() return str(body) except Exception as e: logger.error(e) return str(body) except ImportError: logger.warning("BeautifulSoup not installed. Skipping cleaning.") return str(body)
https://api.python.langchain.com/en/latest/_modules/langchain/tools/gmail/utils.html
8e6c350e55b2-0
Source code for langchain.tools.gmail.search import base64 import email from enum import Enum from typing import Any, Dict, List, Optional, Type from pydantic import BaseModel, Field from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.gmail.base import GmailBaseTool from langchain.tools.gmail.utils import clean_email_body [docs]class Resource(str, Enum): """Enumerator of Resources to search.""" THREADS = "threads" MESSAGES = "messages" [docs]class SearchArgsSchema(BaseModel): # From https://support.google.com/mail/answer/7190?hl=en query: str = Field( ..., description="The Gmail query. Example filters include from:sender," " to:recipient, subject:subject, -filtered_term," " in:folder, is:important|read|starred, after:year/mo/date, " "before:year/mo/date, label:label_name" ' "exact phrase".' " Search newer/older than using d (day), m (month), and y (year): " "newer_than:2d, older_than:1y." " Attachments with extension example: filename:pdf. Multiple term" " matching example: from:amy OR from:david.", ) resource: Resource = Field( default=Resource.MESSAGES, description="Whether to search for threads or messages.", ) max_results: int = Field( default=10, description="The maximum number of results to return.", ) [docs]class GmailSearch(GmailBaseTool): name: str = "search_gmail"
https://api.python.langchain.com/en/latest/_modules/langchain/tools/gmail/search.html
8e6c350e55b2-1
name: str = "search_gmail" description: str = ( "Use this tool to search for email messages or threads." " The input must be a valid Gmail query." " The output is a JSON list of the requested resource." ) args_schema: Type[SearchArgsSchema] = SearchArgsSchema def _parse_threads(self, threads: List[Dict[str, Any]]) -> List[Dict[str, Any]]: # Add the thread message snippets to the thread results results = [] for thread in threads: thread_id = thread["id"] thread_data = ( self.api_resource.users() .threads() .get(userId="me", id=thread_id) .execute() ) messages = thread_data["messages"] thread["messages"] = [] for message in messages: snippet = message["snippet"] thread["messages"].append({"snippet": snippet, "id": message["id"]}) results.append(thread) return results def _parse_messages(self, messages: List[Dict[str, Any]]) -> List[Dict[str, Any]]: results = [] for message in messages: message_id = message["id"] message_data = ( self.api_resource.users() .messages() .get(userId="me", format="raw", id=message_id) .execute() ) raw_message = base64.urlsafe_b64decode(message_data["raw"]) email_msg = email.message_from_bytes(raw_message) subject = email_msg["Subject"] sender = email_msg["From"] message_body = email_msg.get_payload() body = clean_email_body(message_body) results.append( {
https://api.python.langchain.com/en/latest/_modules/langchain/tools/gmail/search.html
8e6c350e55b2-2
body = clean_email_body(message_body) results.append( { "id": message["id"], "threadId": message_data["threadId"], "snippet": message_data["snippet"], "body": body, "subject": subject, "sender": sender, } ) return results def _run( self, query: str, resource: Resource = Resource.MESSAGES, max_results: int = 10, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> List[Dict[str, Any]]: """Run the tool.""" results = ( self.api_resource.users() .messages() .list(userId="me", q=query, maxResults=max_results) .execute() .get(resource.value, []) ) if resource == Resource.THREADS: return self._parse_threads(results) elif resource == Resource.MESSAGES: return self._parse_messages(results) else: raise NotImplementedError(f"Resource of type {resource} not implemented.") async def _arun( self, query: str, resource: Resource = Resource.MESSAGES, max_results: int = 10, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> List[Dict[str, Any]]: """Run the tool.""" raise NotImplementedError
https://api.python.langchain.com/en/latest/_modules/langchain/tools/gmail/search.html
1a735b9a8632-0
Source code for langchain.tools.gmail.get_thread from typing import Dict, Optional, Type from pydantic import BaseModel, Field from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.gmail.base import GmailBaseTool [docs]class GetThreadSchema(BaseModel): # From https://support.google.com/mail/answer/7190?hl=en thread_id: str = Field( ..., description="The thread ID.", ) [docs]class GmailGetThread(GmailBaseTool): name: str = "get_gmail_thread" description: str = ( "Use this tool to search for email messages." " The input must be a valid Gmail query." " The output is a JSON list of messages." ) args_schema: Type[GetThreadSchema] = GetThreadSchema def _run( self, thread_id: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> Dict: """Run the tool.""" query = self.api_resource.users().threads().get(userId="me", id=thread_id) thread_data = query.execute() if not isinstance(thread_data, dict): raise ValueError("The output of the query must be a list.") messages = thread_data["messages"] thread_data["messages"] = [] keys_to_keep = ["id", "snippet", "snippet"] # TODO: Parse body. for message in messages: thread_data["messages"].append( {k: message[k] for k in keys_to_keep if k in message} ) return thread_data async def _arun( self,
https://api.python.langchain.com/en/latest/_modules/langchain/tools/gmail/get_thread.html