id
stringlengths
14
15
text
stringlengths
35
2.51k
source
stringlengths
61
154
1d81dac07857-4
async def _arun( self, url: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Run the tool asynchronously.""" return await self.requests_wrapper.adelete(_clean_url(url))
https://api.python.langchain.com/en/latest/_modules/langchain/tools/requests/tool.html
d41630dabb12-0
Source code for langchain.tools.brave_search.tool from __future__ import annotations from typing import Any, Optional from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool from langchain.utilities.brave_search import BraveSearchWrapper [docs]class BraveSearch(BaseTool): name = "brave_search" description = ( "a search engine. " "useful for when you need to answer questions about current events." " input should be a search query." ) search_wrapper: BraveSearchWrapper [docs] @classmethod def from_api_key( cls, api_key: str, search_kwargs: Optional[dict] = None, **kwargs: Any ) -> BraveSearch: wrapper = BraveSearchWrapper(api_key=api_key, search_kwargs=search_kwargs or {}) return cls(search_wrapper=wrapper, **kwargs) def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" return self.search_wrapper.run(query) async def _arun( self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool asynchronously.""" raise NotImplementedError("BraveSearch does not support async")
https://api.python.langchain.com/en/latest/_modules/langchain/tools/brave_search/tool.html
020fce48dd9e-0
Source code for langchain.tools.sleep.tool """Tool for agent to sleep.""" from asyncio import sleep as asleep from time import sleep from typing import Optional, Type from pydantic import BaseModel, Field from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool [docs]class SleepInput(BaseModel): """Input for CopyFileTool.""" sleep_time: int = Field(..., description="Time to sleep in seconds") [docs]class SleepTool(BaseTool): """Tool that adds the capability to sleep.""" name = "sleep" args_schema: Type[BaseModel] = SleepInput description = "Make agent sleep for a specified number of seconds." def _run( self, sleep_time: int, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the Sleep tool.""" sleep(sleep_time) return f"Agent slept for {sleep_time} seconds." async def _arun( self, sleep_time: int, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the sleep tool asynchronously.""" await asleep(sleep_time) return f"Agent slept for {sleep_time} seconds."
https://api.python.langchain.com/en/latest/_modules/langchain/tools/sleep/tool.html
4a63ccd5f4e7-0
Source code for langchain.tools.sql_database.tool # flake8: noqa """Tools for interacting with a SQL database.""" from typing import Any, Dict, Optional from pydantic import BaseModel, Extra, Field, root_validator from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.chains.llm import LLMChain from langchain.prompts import PromptTemplate from langchain.sql_database import SQLDatabase from langchain.tools.base import BaseTool from langchain.tools.sql_database.prompt import QUERY_CHECKER [docs]class BaseSQLDatabaseTool(BaseModel): """Base tool for interacting with a SQL database.""" db: SQLDatabase = Field(exclude=True) # Override BaseTool.Config to appease mypy # See https://github.com/pydantic/pydantic/issues/4173 [docs] class Config(BaseTool.Config): """Configuration for this pydantic object.""" arbitrary_types_allowed = True extra = Extra.forbid [docs]class QuerySQLDataBaseTool(BaseSQLDatabaseTool, BaseTool): """Tool for querying a SQL database.""" name = "sql_db_query" description = """ Input to this tool is a detailed and correct SQL query, output is a result from the database. If the query is not correct, an error message will be returned. If an error is returned, rewrite the query, check the query, and try again. """ def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Execute the query, return the results or an error message."""
https://api.python.langchain.com/en/latest/_modules/langchain/tools/sql_database/tool.html
4a63ccd5f4e7-1
"""Execute the query, return the results or an error message.""" return self.db.run_no_throw(query) async def _arun( self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: raise NotImplementedError("QuerySqlDbTool does not support async") [docs]class InfoSQLDatabaseTool(BaseSQLDatabaseTool, BaseTool): """Tool for getting metadata about a SQL database.""" name = "sql_db_schema" description = """ Input to this tool is a comma-separated list of tables, output is the schema and sample rows for those tables. Example Input: "table1, table2, table3" """ def _run( self, table_names: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Get the schema for tables in a comma-separated list.""" return self.db.get_table_info_no_throw(table_names.split(", ")) async def _arun( self, table_name: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: raise NotImplementedError("SchemaSqlDbTool does not support async") [docs]class ListSQLDatabaseTool(BaseSQLDatabaseTool, BaseTool): """Tool for getting tables names.""" name = "sql_db_list_tables" description = "Input is an empty string, output is a comma separated list of tables in the database." def _run( self, tool_input: str = "", run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str:
https://api.python.langchain.com/en/latest/_modules/langchain/tools/sql_database/tool.html
4a63ccd5f4e7-2
) -> str: """Get the schema for a specific table.""" return ", ".join(self.db.get_usable_table_names()) async def _arun( self, tool_input: str = "", run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: raise NotImplementedError("ListTablesSqlDbTool does not support async") [docs]class QuerySQLCheckerTool(BaseSQLDatabaseTool, BaseTool): """Use an LLM to check if a query is correct. Adapted from https://www.patterns.app/blog/2023/01/18/crunchbot-sql-analyst-gpt/""" template: str = QUERY_CHECKER llm: BaseLanguageModel llm_chain: LLMChain = Field(init=False) name = "sql_db_query_checker" description = """ Use this tool to double check if your query is correct before executing it. Always use this tool before executing a query with query_sql_db! """ [docs] @root_validator(pre=True) def initialize_llm_chain(cls, values: Dict[str, Any]) -> Dict[str, Any]: if "llm_chain" not in values: values["llm_chain"] = LLMChain( llm=values.get("llm"), prompt=PromptTemplate( template=QUERY_CHECKER, input_variables=["query", "dialect"] ), ) if values["llm_chain"].prompt.input_variables != ["query", "dialect"]: raise ValueError( "LLM chain for QueryCheckerTool must have input variables ['query', 'dialect']" ) return values def _run( self,
https://api.python.langchain.com/en/latest/_modules/langchain/tools/sql_database/tool.html
4a63ccd5f4e7-3
) return values def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the LLM to check the query.""" return self.llm_chain.predict(query=query, dialect=self.db.dialect) async def _arun( self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: return await self.llm_chain.apredict(query=query, dialect=self.db.dialect)
https://api.python.langchain.com/en/latest/_modules/langchain/tools/sql_database/tool.html
e50ba1e48ae8-0
Source code for langchain.tools.jira.tool """ This tool allows agents to interact with the atlassian-python-api library and operate on a Jira instance. For more information on the atlassian-python-api library, see https://atlassian-python-api.readthedocs.io/jira.html To use this tool, you must first set as environment variables: JIRA_API_TOKEN JIRA_USERNAME JIRA_INSTANCE_URL Below is a sample script that uses the Jira tool: ```python from langchain.agents import AgentType from langchain.agents import initialize_agent from langchain.agents.agent_toolkits.jira.toolkit import JiraToolkit from langchain.llms import OpenAI from langchain.utilities.jira import JiraAPIWrapper llm = OpenAI(temperature=0) jira = JiraAPIWrapper() toolkit = JiraToolkit.from_jira_api_wrapper(jira) agent = initialize_agent( toolkit.get_tools(), llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True ) ``` """ from typing import Optional from pydantic import Field from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool from langchain.utilities.jira import JiraAPIWrapper [docs]class JiraAction(BaseTool): api_wrapper: JiraAPIWrapper = Field(default_factory=JiraAPIWrapper) mode: str name = "" description = "" def _run( self, instructions: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the Atlassian Jira API to run an operation."""
https://api.python.langchain.com/en/latest/_modules/langchain/tools/jira/tool.html
e50ba1e48ae8-1
"""Use the Atlassian Jira API to run an operation.""" return self.api_wrapper.run(self.mode, instructions) async def _arun( self, _: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the Atlassian Jira API to run an operation.""" raise NotImplementedError("JiraAction does not support async")
https://api.python.langchain.com/en/latest/_modules/langchain/tools/jira/tool.html
998581c9d35c-0
Source code for langchain.tools.office365.utils """O365 tool utils.""" from __future__ import annotations import logging import os from typing import TYPE_CHECKING if TYPE_CHECKING: from O365 import Account logger = logging.getLogger(__name__) [docs]def clean_body(body: str) -> str: """Clean body of a message or event.""" try: from bs4 import BeautifulSoup try: # Remove HTML soup = BeautifulSoup(str(body), "html.parser") body = soup.get_text() # Remove return characters body = "".join(body.splitlines()) # Remove extra spaces body = " ".join(body.split()) return str(body) except Exception: return str(body) except ImportError: return str(body) [docs]def authenticate() -> Account: """Authenticate using the Microsoft Grah API""" try: from O365 import Account except ImportError as e: raise ImportError( "Cannot import 0365. Please install the package with `pip install O365`." ) from e if "CLIENT_ID" in os.environ and "CLIENT_SECRET" in os.environ: client_id = os.environ["CLIENT_ID"] client_secret = os.environ["CLIENT_SECRET"] credentials = (client_id, client_secret) else: logger.error( "Error: The CLIENT_ID and CLIENT_SECRET environmental variables have not " "been set. Visit the following link on how to acquire these authorization " "tokens: https://learn.microsoft.com/en-us/graph/auth/" ) return None account = Account(credentials) if account.is_authenticated is False: if not account.authenticate( scopes=[
https://api.python.langchain.com/en/latest/_modules/langchain/tools/office365/utils.html
998581c9d35c-1
if account.is_authenticated is False: if not account.authenticate( scopes=[ "https://graph.microsoft.com/Mail.ReadWrite", "https://graph.microsoft.com/Mail.Send", "https://graph.microsoft.com/Calendars.ReadWrite", "https://graph.microsoft.com/MailboxSettings.ReadWrite", ] ): print("Error: Could not authenticate") return None else: return account else: return account
https://api.python.langchain.com/en/latest/_modules/langchain/tools/office365/utils.html
dff4a8d835e4-0
Source code for langchain.tools.office365.create_draft_message from typing import List, Optional, Type from pydantic import BaseModel, Field from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.office365.base import O365BaseTool [docs]class CreateDraftMessageSchema(BaseModel): body: str = Field( ..., description="The message body to include in the draft.", ) to: List[str] = Field( ..., description="The list of recipients.", ) subject: str = Field( ..., description="The subject of the message.", ) cc: Optional[List[str]] = Field( None, description="The list of CC recipients.", ) bcc: Optional[List[str]] = Field( None, description="The list of BCC recipients.", ) [docs]class O365CreateDraftMessage(O365BaseTool): name: str = "create_email_draft" description: str = ( "Use this tool to create a draft email with the provided message fields." ) args_schema: Type[CreateDraftMessageSchema] = CreateDraftMessageSchema def _run( self, body: str, to: List[str], subject: str, cc: Optional[List[str]] = None, bcc: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: # Get mailbox object mailbox = self.account.mailbox() message = mailbox.new_message() # Assign message values message.body = body message.subject = subject
https://api.python.langchain.com/en/latest/_modules/langchain/tools/office365/create_draft_message.html
dff4a8d835e4-1
# Assign message values message.body = body message.subject = subject message.to.add(to) if cc is not None: message.cc.add(cc) if bcc is not None: message.bcc.add(cc) message.save_draft() output = "Draft created: " + str(message) return output async def _arun( self, message: str, to: List[str], subject: str, cc: Optional[List[str]] = None, bcc: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: raise NotImplementedError(f"The tool {self.name} does not support async yet.")
https://api.python.langchain.com/en/latest/_modules/langchain/tools/office365/create_draft_message.html
0446343462b2-0
Source code for langchain.tools.office365.send_event """Util that sends calendar events in Office 365. Free, but setup is required. See link below. https://learn.microsoft.com/en-us/graph/auth/ """ from datetime import datetime as dt from typing import List, Optional, Type from pydantic import BaseModel, Field from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.office365.base import O365BaseTool [docs]class SendEventSchema(BaseModel): """Input for CreateEvent Tool.""" body: str = Field( ..., description="The message body to include in the event.", ) attendees: List[str] = Field( ..., description="The list of attendees for the event.", ) subject: str = Field( ..., description="The subject of the event.", ) start_datetime: str = Field( description=" The start datetime for the event in the following format: " ' YYYY-MM-DDTHH:MM:SS±hh:mm, where "T" separates the date and time ' " components, and the time zone offset is specified as ±hh:mm. " ' For example: "2023-06-09T10:30:00+03:00" represents June 9th, ' " 2023, at 10:30 AM in a time zone with a positive offset of 3 " " hours from Coordinated Universal Time (UTC).", ) end_datetime: str = Field( description=" The end datetime for the event in the following format: "
https://api.python.langchain.com/en/latest/_modules/langchain/tools/office365/send_event.html
0446343462b2-1
description=" The end datetime for the event in the following format: " ' YYYY-MM-DDTHH:MM:SS±hh:mm, where "T" separates the date and time ' " components, and the time zone offset is specified as ±hh:mm. " ' For example: "2023-06-09T10:30:00+03:00" represents June 9th, ' " 2023, at 10:30 AM in a time zone with a positive offset of 3 " " hours from Coordinated Universal Time (UTC).", ) [docs]class O365SendEvent(O365BaseTool): name: str = "send_event" description: str = ( "Use this tool to create and send an event with the provided event fields." ) args_schema: Type[SendEventSchema] = SendEventSchema def _run( self, body: str, attendees: List[str], subject: str, start_datetime: str, end_datetime: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: # Get calendar object schedule = self.account.schedule() calendar = schedule.get_default_calendar() event = calendar.new_event() event.body = body event.subject = subject event.start = dt.strptime(start_datetime, "%Y-%m-%dT%H:%M:%S%z") event.end = dt.strptime(end_datetime, "%Y-%m-%dT%H:%M:%S%z") for attendee in attendees: event.attendees.add(attendee) # TO-DO: Look into PytzUsageWarning event.save()
https://api.python.langchain.com/en/latest/_modules/langchain/tools/office365/send_event.html
0446343462b2-2
# TO-DO: Look into PytzUsageWarning event.save() output = "Event sent: " + str(event) return output async def _arun( self, message: str, to: List[str], subject: str, cc: Optional[List[str]] = None, bcc: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: raise NotImplementedError(f"The tool {self.name} does not support async yet.")
https://api.python.langchain.com/en/latest/_modules/langchain/tools/office365/send_event.html
52f14fc1be3f-0
Source code for langchain.tools.office365.events_search """Util that Searches calendar events in Office 365. Free, but setup is required. See link below. https://learn.microsoft.com/en-us/graph/auth/ """ from datetime import datetime as dt from typing import Any, Dict, List, Optional, Type from pydantic import BaseModel, Extra, Field from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.office365.base import O365BaseTool from langchain.tools.office365.utils import clean_body [docs]class SearchEventsInput(BaseModel): """Input for SearchEmails Tool.""" """From https://learn.microsoft.com/en-us/graph/search-query-parameter""" start_datetime: str = Field( description=( " The start datetime for the search query in the following format: " ' YYYY-MM-DDTHH:MM:SS±hh:mm, where "T" separates the date and time ' " components, and the time zone offset is specified as ±hh:mm. " ' For example: "2023-06-09T10:30:00+03:00" represents June 9th, ' " 2023, at 10:30 AM in a time zone with a positive offset of 3 " " hours from Coordinated Universal Time (UTC)." ) ) end_datetime: str = Field( description=( " The end datetime for the search query in the following format: " ' YYYY-MM-DDTHH:MM:SS±hh:mm, where "T" separates the date and time ' " components, and the time zone offset is specified as ±hh:mm. "
https://api.python.langchain.com/en/latest/_modules/langchain/tools/office365/events_search.html
52f14fc1be3f-1
" components, and the time zone offset is specified as ±hh:mm. " ' For example: "2023-06-09T10:30:00+03:00" represents June 9th, ' " 2023, at 10:30 AM in a time zone with a positive offset of 3 " " hours from Coordinated Universal Time (UTC)." ) ) max_results: int = Field( default=10, description="The maximum number of results to return.", ) truncate: bool = Field( default=True, description=( "Whether the event's body is trucated to meet token number limits. Set to " "False for searches that will retrieve very few results, otherwise, set to " "True." ), ) [docs]class O365SearchEvents(O365BaseTool): """Class for searching calendar events in Office 365 Free, but setup is required """ name: str = "events_search" args_schema: Type[BaseModel] = SearchEventsInput description: str = ( " Use this tool to search for the user's calendar events." " The input must be the start and end datetimes for the search query." " The output is a JSON list of all the events in the user's calendar" " between the start and end times. You can assume that the user can " " not schedule any meeting over existing meetings, and that the user " "is busy during meetings. Any times without events are free for the user. " ) [docs] class Config: """Configuration for this pydantic object.""" extra = Extra.forbid def _run(
https://api.python.langchain.com/en/latest/_modules/langchain/tools/office365/events_search.html
52f14fc1be3f-2
extra = Extra.forbid def _run( self, start_datetime: str, end_datetime: str, max_results: int = 10, truncate: bool = True, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> List[Dict[str, Any]]: TRUNCATE_LIMIT = 150 # Get calendar object schedule = self.account.schedule() calendar = schedule.get_default_calendar() # Process the date range parameters start_datetime_query = dt.strptime(start_datetime, "%Y-%m-%dT%H:%M:%S%z") end_datetime_query = dt.strptime(end_datetime, "%Y-%m-%dT%H:%M:%S%z") # Run the query q = calendar.new_query("start").greater_equal(start_datetime_query) q.chain("and").on_attribute("end").less_equal(end_datetime_query) events = calendar.get_events(query=q, include_recurring=True, limit=max_results) # Generate output dict output_events = [] for event in events: output_event = {} output_event["organizer"] = event.organizer output_event["subject"] = event.subject if truncate: output_event["body"] = clean_body(event.body)[:TRUNCATE_LIMIT] else: output_event["body"] = clean_body(event.body) # Get the time zone from the search parameters time_zone = start_datetime_query.tzinfo # Assign the datetimes in the search time zone output_event["start_datetime"] = event.start.astimezone(time_zone).strftime( "%Y-%m-%dT%H:%M:%S%z" )
https://api.python.langchain.com/en/latest/_modules/langchain/tools/office365/events_search.html
52f14fc1be3f-3
"%Y-%m-%dT%H:%M:%S%z" ) output_event["end_datetime"] = event.end.astimezone(time_zone).strftime( "%Y-%m-%dT%H:%M:%S%z" ) output_event["modified_date"] = event.modified.astimezone( time_zone ).strftime("%Y-%m-%dT%H:%M:%S%z") output_events.append(output_event) return output_events async def _arun( self, query: str, max_results: int = 10, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> List[Dict[str, Any]]: """Run the tool.""" raise NotImplementedError
https://api.python.langchain.com/en/latest/_modules/langchain/tools/office365/events_search.html
708d00a0d1e4-0
Source code for langchain.tools.office365.messages_search """Util that Searches email messages in Office 365. Free, but setup is required. See link below. https://learn.microsoft.com/en-us/graph/auth/ """ from typing import Any, Dict, List, Optional, Type from pydantic import BaseModel, Extra, Field from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.office365.base import O365BaseTool from langchain.tools.office365.utils import clean_body [docs]class SearchEmailsInput(BaseModel): """Input for SearchEmails Tool.""" """From https://learn.microsoft.com/en-us/graph/search-query-parameter""" folder: str = Field( default=None, description=( " If the user wants to search in only one folder, the name of the folder. " 'Default folders are "inbox", "drafts", "sent items", "deleted ttems", but ' "users can search custom folders as well." ), ) query: str = Field( description=( "The Microsoift Graph v1.0 $search query. Example filters include " "from:sender, from:sender, to:recipient, subject:subject, " "recipients:list_of_recipients, body:excitement, importance:high, " "received>2022-12-01, received<2021-12-01, sent>2022-12-01, " "sent<2021-12-01, hasAttachments:true attachment:api-catalog.md, " "cc:samanthab@contoso.com, bcc:samanthab@contoso.com, body:excitement date "
https://api.python.langchain.com/en/latest/_modules/langchain/tools/office365/messages_search.html
708d00a0d1e4-1
"range example: received:2023-06-08..2023-06-09 matching example: " "from:amy OR from:david." ) ) max_results: int = Field( default=10, description="The maximum number of results to return.", ) truncate: bool = Field( default=True, description=( "Whether the email body is trucated to meet token number limits. Set to " "False for searches that will retrieve very few results, otherwise, set to " "True" ), ) [docs]class O365SearchEmails(O365BaseTool): """Class for searching email messages in Office 365 Free, but setup is required """ name: str = "messages_search" args_schema: Type[BaseModel] = SearchEmailsInput description: str = ( "Use this tool to search for email messages." " The input must be a valid Microsoft Graph v1.0 $search query." " The output is a JSON list of the requested resource." ) [docs] class Config: """Configuration for this pydantic object.""" extra = Extra.forbid def _run( self, query: str, folder: str = "", max_results: int = 10, truncate: bool = True, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> List[Dict[str, Any]]: # Get mailbox object mailbox = self.account.mailbox() # Pull the folder if the user wants to search in a folder if folder != "":
https://api.python.langchain.com/en/latest/_modules/langchain/tools/office365/messages_search.html
708d00a0d1e4-2
if folder != "": mailbox = mailbox.get_folder(folder_name=folder) # Retrieve messages based on query query = mailbox.q().search(query) messages = mailbox.get_messages(limit=max_results, query=query) # Generate output dict output_messages = [] for message in messages: output_message = {} output_message["from"] = message.sender if truncate: output_message["body"] = message.body_preview else: output_message["body"] = clean_body(message.body) output_message["subject"] = message.subject output_message["date"] = message.modified.strftime("%Y-%m-%dT%H:%M:%S%z") output_message["to"] = [] for recipient in message.to._recipients: output_message["to"].append(str(recipient)) output_message["cc"] = [] for recipient in message.cc._recipients: output_message["cc"].append(str(recipient)) output_message["bcc"] = [] for recipient in message.bcc._recipients: output_message["bcc"].append(str(recipient)) output_messages.append(output_message) return output_messages async def _arun( self, query: str, max_results: int = 10, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> List[Dict[str, Any]]: """Run the tool.""" raise NotImplementedError
https://api.python.langchain.com/en/latest/_modules/langchain/tools/office365/messages_search.html
8ef45f3cc468-0
Source code for langchain.tools.office365.base """Base class for Gmail tools.""" from __future__ import annotations from typing import TYPE_CHECKING from pydantic import Field from langchain.tools.base import BaseTool from langchain.tools.office365.utils import authenticate if TYPE_CHECKING: from O365 import Account [docs]class O365BaseTool(BaseTool): account: Account = Field(default_factory=authenticate)
https://api.python.langchain.com/en/latest/_modules/langchain/tools/office365/base.html
b2e89f17bfef-0
Source code for langchain.tools.office365.send_message from typing import List, Optional, Type from pydantic import BaseModel, Field from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.office365.base import O365BaseTool [docs]class SendMessageSchema(BaseModel): body: str = Field( ..., description="The message body to be sent.", ) to: List[str] = Field( ..., description="The list of recipients.", ) subject: str = Field( ..., description="The subject of the message.", ) cc: Optional[List[str]] = Field( None, description="The list of CC recipients.", ) bcc: Optional[List[str]] = Field( None, description="The list of BCC recipients.", ) [docs]class O365SendMessage(O365BaseTool): name: str = "send_email" description: str = ( "Use this tool to send an email with the provided message fields." ) args_schema: Type[SendMessageSchema] = SendMessageSchema def _run( self, body: str, to: List[str], subject: str, cc: Optional[List[str]] = None, bcc: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: # Get mailbox object mailbox = self.account.mailbox() message = mailbox.new_message() # Assign message values message.body = body message.subject = subject message.to.add(to)
https://api.python.langchain.com/en/latest/_modules/langchain/tools/office365/send_message.html
b2e89f17bfef-1
message.body = body message.subject = subject message.to.add(to) if cc is not None: message.cc.add(cc) if bcc is not None: message.bcc.add(cc) message.send() output = "Message sent: " + str(message) return output async def _arun( self, message: str, to: List[str], subject: str, cc: Optional[List[str]] = None, bcc: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: raise NotImplementedError(f"The tool {self.name} does not support async yet.")
https://api.python.langchain.com/en/latest/_modules/langchain/tools/office365/send_message.html
304441ecf07d-0
Source code for langchain.tools.pubmed.tool """Tool for the Pubmed API.""" from typing import Optional from pydantic import Field from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool from langchain.utilities.pupmed import PubMedAPIWrapper [docs]class PubmedQueryRun(BaseTool): """Tool that adds the capability to search using the PubMed API.""" name = "PubMed" description = ( "A wrapper around PubMed.org " "Useful for when you need to answer questions about Physics, Mathematics, " "Computer Science, Quantitative Biology, Quantitative Finance, Statistics, " "Electrical Engineering, and Economics " "from scientific articles on PubMed.org. " "Input should be a search query." ) api_wrapper: PubMedAPIWrapper = Field(default_factory=PubMedAPIWrapper) def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the Arxiv tool.""" return self.api_wrapper.run(query) async def _arun( self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the PubMed tool asynchronously.""" raise NotImplementedError("PubMedAPIWrapper does not support async")
https://api.python.langchain.com/en/latest/_modules/langchain/tools/pubmed/tool.html
8311156d248b-0
Source code for langchain.tools.steamship_image_generation.utils """Steamship Utils.""" from __future__ import annotations import uuid from typing import TYPE_CHECKING if TYPE_CHECKING: from steamship import Block, Steamship [docs]def make_image_public(client: Steamship, block: Block) -> str: """Upload a block to a signed URL and return the public URL.""" try: from steamship.data.workspace import SignedUrl from steamship.utils.signed_urls import upload_to_signed_url except ImportError: raise ValueError( "The make_image_public function requires the steamship" " package to be installed. Please install steamship" " with `pip install --upgrade steamship`" ) filepath = str(uuid.uuid4()) signed_url = ( client.get_workspace() .create_signed_url( SignedUrl.Request( bucket=SignedUrl.Bucket.PLUGIN_DATA, filepath=filepath, operation=SignedUrl.Operation.WRITE, ) ) .signed_url ) read_signed_url = ( client.get_workspace() .create_signed_url( SignedUrl.Request( bucket=SignedUrl.Bucket.PLUGIN_DATA, filepath=filepath, operation=SignedUrl.Operation.READ, ) ) .signed_url ) upload_to_signed_url(signed_url, block.raw()) return read_signed_url
https://api.python.langchain.com/en/latest/_modules/langchain/tools/steamship_image_generation/utils.html
8d501f43f946-0
Source code for langchain.tools.steamship_image_generation.tool """This tool allows agents to generate images using Steamship. Steamship offers access to different third party image generation APIs using a single API key. Today the following models are supported: - Dall-E - Stable Diffusion To use this tool, you must first set as environment variables: STEAMSHIP_API_KEY ``` """ from __future__ import annotations from enum import Enum from typing import TYPE_CHECKING, Dict, Optional from pydantic import root_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools import BaseTool from langchain.tools.steamship_image_generation.utils import make_image_public from langchain.utils import get_from_dict_or_env if TYPE_CHECKING: pass [docs]class ModelName(str, Enum): """Supported Image Models for generation.""" DALL_E = "dall-e" STABLE_DIFFUSION = "stable-diffusion" SUPPORTED_IMAGE_SIZES = { ModelName.DALL_E: ("256x256", "512x512", "1024x1024"), ModelName.STABLE_DIFFUSION: ("512x512", "768x768"), } [docs]class SteamshipImageGenerationTool(BaseTool): try: from steamship import Steamship except ImportError: pass """Tool used to generate images from a text-prompt.""" model_name: ModelName size: Optional[str] = "512x512" steamship: Steamship return_urls: Optional[bool] = False name = "GenerateImage" description = ( "Useful for when you need to generate an image."
https://api.python.langchain.com/en/latest/_modules/langchain/tools/steamship_image_generation/tool.html
8d501f43f946-1
description = ( "Useful for when you need to generate an image." "Input: A detailed text-2-image prompt describing an image" "Output: the UUID of a generated image" ) [docs] @root_validator(pre=True) def validate_size(cls, values: Dict) -> Dict: if "size" in values: size = values["size"] model_name = values["model_name"] if size not in SUPPORTED_IMAGE_SIZES[model_name]: raise RuntimeError(f"size {size} is not supported by {model_name}") return values [docs] @root_validator(pre=True) def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" steamship_api_key = get_from_dict_or_env( values, "steamship_api_key", "STEAMSHIP_API_KEY" ) try: from steamship import Steamship except ImportError: raise ImportError( "steamship is not installed. " "Please install it with `pip install steamship`" ) steamship = Steamship( api_key=steamship_api_key, ) values["steamship"] = steamship if "steamship_api_key" in values: del values["steamship_api_key"] return values def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" image_generator = self.steamship.use_plugin( plugin_handle=self.model_name.value, config={"n": 1, "size": self.size} )
https://api.python.langchain.com/en/latest/_modules/langchain/tools/steamship_image_generation/tool.html
8d501f43f946-2
) task = image_generator.generate(text=query, append_output_to_file=True) task.wait() blocks = task.output.blocks if len(blocks) > 0: if self.return_urls: return make_image_public(self.steamship, blocks[0]) else: return blocks[0].id raise RuntimeError(f"[{self.name}] Tool unable to generate image!") async def _arun( self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool asynchronously.""" raise NotImplementedError("GenerateImageTool does not support async")
https://api.python.langchain.com/en/latest/_modules/langchain/tools/steamship_image_generation/tool.html
248ef5718c2b-0
Source code for langchain.tools.powerbi.tool """Tools for interacting with a Power BI dataset.""" import logging from time import perf_counter from typing import Any, Dict, Optional, Tuple from pydantic import Field, validator from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.chains.llm import LLMChain from langchain.tools.base import BaseTool from langchain.tools.powerbi.prompt import ( BAD_REQUEST_RESPONSE, DEFAULT_FEWSHOT_EXAMPLES, QUESTION_TO_QUERY, RETRY_RESPONSE, ) from langchain.utilities.powerbi import PowerBIDataset, json_to_md logger = logging.getLogger(__name__) [docs]class QueryPowerBITool(BaseTool): """Tool for querying a Power BI Dataset.""" name = "query_powerbi" description = """ Input to this tool is a detailed question about the dataset, output is a result from the dataset. It will try to answer the question using the dataset, and if it cannot, it will ask for clarification. Example Input: "How many rows are in table1?" """ # noqa: E501 llm_chain: LLMChain powerbi: PowerBIDataset = Field(exclude=True) template: Optional[str] = QUESTION_TO_QUERY examples: Optional[str] = DEFAULT_FEWSHOT_EXAMPLES session_cache: Dict[str, Any] = Field(default_factory=dict, exclude=True) max_iterations: int = 5 [docs] class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True [docs] @validator("llm_chain") def validate_llm_chain_input_variables( # pylint: disable=E0213
https://api.python.langchain.com/en/latest/_modules/langchain/tools/powerbi/tool.html
248ef5718c2b-1
def validate_llm_chain_input_variables( # pylint: disable=E0213 cls, llm_chain: LLMChain ) -> LLMChain: """Make sure the LLM chain has the correct input variables.""" if llm_chain.prompt.input_variables != [ "tool_input", "tables", "schemas", "examples", ]: raise ValueError( "LLM chain for QueryPowerBITool must have input variables ['tool_input', 'tables', 'schemas', 'examples'], found %s", # noqa: C0301 E501 # pylint: disable=C0301 llm_chain.prompt.input_variables, ) return llm_chain def _check_cache(self, tool_input: str) -> Optional[str]: """Check if the input is present in the cache. If the value is a bad request, overwrite with the escalated version, if not present return None.""" if tool_input not in self.session_cache: return None return self.session_cache[tool_input] def _run( self, tool_input: str, run_manager: Optional[CallbackManagerForToolRun] = None, **kwargs: Any, ) -> str: """Execute the query, return the results or an error message.""" if cache := self._check_cache(tool_input): logger.debug("Found cached result for %s: %s", tool_input, cache) return cache try: logger.info("Running PBI Query Tool with input: %s", tool_input) query = self.llm_chain.predict( tool_input=tool_input, tables=self.powerbi.get_table_names(), schemas=self.powerbi.get_schemas(),
https://api.python.langchain.com/en/latest/_modules/langchain/tools/powerbi/tool.html
248ef5718c2b-2
tables=self.powerbi.get_table_names(), schemas=self.powerbi.get_schemas(), examples=self.examples, ) except Exception as exc: # pylint: disable=broad-except self.session_cache[tool_input] = f"Error on call to LLM: {exc}" return self.session_cache[tool_input] if query == "I cannot answer this": self.session_cache[tool_input] = query return self.session_cache[tool_input] logger.info("PBI Query: %s", query) start_time = perf_counter() pbi_result = self.powerbi.run(command=query) end_time = perf_counter() logger.debug("PBI Result: %s", pbi_result) logger.debug(f"PBI Query duration: {end_time - start_time:0.6f}") result, error = self._parse_output(pbi_result) if error is not None and "TokenExpired" in error: self.session_cache[ tool_input ] = "Authentication token expired or invalid, please try reauthenticate." return self.session_cache[tool_input] iterations = kwargs.get("iterations", 0) if error and iterations < self.max_iterations: return self._run( tool_input=RETRY_RESPONSE.format( tool_input=tool_input, query=query, error=error ), run_manager=run_manager, iterations=iterations + 1, ) self.session_cache[tool_input] = ( result if result else BAD_REQUEST_RESPONSE.format(error=error) ) return self.session_cache[tool_input] async def _arun( self, tool_input: str,
https://api.python.langchain.com/en/latest/_modules/langchain/tools/powerbi/tool.html
248ef5718c2b-3
async def _arun( self, tool_input: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, **kwargs: Any, ) -> str: """Execute the query, return the results or an error message.""" if cache := self._check_cache(tool_input): logger.debug("Found cached result for %s: %s", tool_input, cache) return cache try: logger.info("Running PBI Query Tool with input: %s", tool_input) query = await self.llm_chain.apredict( tool_input=tool_input, tables=self.powerbi.get_table_names(), schemas=self.powerbi.get_schemas(), examples=self.examples, ) except Exception as exc: # pylint: disable=broad-except self.session_cache[tool_input] = f"Error on call to LLM: {exc}" return self.session_cache[tool_input] if query == "I cannot answer this": self.session_cache[tool_input] = query return self.session_cache[tool_input] logger.info("PBI Query: %s", query) start_time = perf_counter() pbi_result = await self.powerbi.arun(command=query) end_time = perf_counter() logger.debug("PBI Result: %s", pbi_result) logger.debug(f"PBI Query duration: {end_time - start_time:0.6f}") result, error = self._parse_output(pbi_result) if error is not None and "TokenExpired" in error: self.session_cache[ tool_input ] = "Authentication token expired or invalid, please try reauthenticate." return self.session_cache[tool_input]
https://api.python.langchain.com/en/latest/_modules/langchain/tools/powerbi/tool.html
248ef5718c2b-4
return self.session_cache[tool_input] iterations = kwargs.get("iterations", 0) if error and iterations < self.max_iterations: return await self._arun( tool_input=RETRY_RESPONSE.format( tool_input=tool_input, query=query, error=error ), run_manager=run_manager, iterations=iterations + 1, ) self.session_cache[tool_input] = ( result if result else BAD_REQUEST_RESPONSE.format(error=error) ) return self.session_cache[tool_input] def _parse_output( self, pbi_result: Dict[str, Any] ) -> Tuple[Optional[str], Optional[str]]: """Parse the output of the query to a markdown table.""" if "results" in pbi_result: return json_to_md(pbi_result["results"][0]["tables"][0]["rows"]), None if "error" in pbi_result: if ( "pbi.error" in pbi_result["error"] and "details" in pbi_result["error"]["pbi.error"] ): return None, pbi_result["error"]["pbi.error"]["details"][0]["detail"] return None, pbi_result["error"] return None, "Unknown error" [docs]class InfoPowerBITool(BaseTool): """Tool for getting metadata about a PowerBI Dataset.""" name = "schema_powerbi" description = """ Input to this tool is a comma-separated list of tables, output is the schema and sample rows for those tables. Be sure that the tables actually exist by calling list_tables_powerbi first! Example Input: "table1, table2, table3"
https://api.python.langchain.com/en/latest/_modules/langchain/tools/powerbi/tool.html
248ef5718c2b-5
Example Input: "table1, table2, table3" """ # noqa: E501 powerbi: PowerBIDataset = Field(exclude=True) [docs] class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True def _run( self, tool_input: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Get the schema for tables in a comma-separated list.""" return self.powerbi.get_table_info(tool_input.split(", ")) async def _arun( self, tool_input: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: return await self.powerbi.aget_table_info(tool_input.split(", ")) [docs]class ListPowerBITool(BaseTool): """Tool for getting tables names.""" name = "list_tables_powerbi" description = "Input is an empty string, output is a comma separated list of tables in the database." # noqa: E501 # pylint: disable=C0301 powerbi: PowerBIDataset = Field(exclude=True) [docs] class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True def _run( self, tool_input: Optional[str] = None, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Get the names of the tables.""" return ", ".join(self.powerbi.get_table_names()) async def _arun( self, tool_input: Optional[str] = None,
https://api.python.langchain.com/en/latest/_modules/langchain/tools/powerbi/tool.html
248ef5718c2b-6
self, tool_input: Optional[str] = None, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Get the names of the tables.""" return ", ".join(self.powerbi.get_table_names())
https://api.python.langchain.com/en/latest/_modules/langchain/tools/powerbi/tool.html
16d0d2a3d814-0
langchain.chat_models.vertexai.ChatVertexAI¶ class langchain.chat_models.vertexai.ChatVertexAI(*, cache: Optional[bool] = None, verbose: bool = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, callback_manager: Optional[BaseCallbackManager] = None, tags: Optional[List[str]] = None, client: _LanguageModel = None, model_name: str = 'chat-bison', temperature: float = 0.0, max_output_tokens: int = 128, top_p: float = 0.95, top_k: int = 40, stop: Optional[List[str]] = None, project: Optional[str] = None, location: str = 'us-central1', credentials: Any = None, request_parallelism: int = 5)[source]¶ Bases: _VertexAICommon, BaseChatModel Wrapper around Vertex AI large language models. Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model. param cache: Optional[bool] = None¶ param callback_manager: Optional[BaseCallbackManager] = None¶ param callbacks: Callbacks = None¶ param credentials: Any = None¶ The default custom credentials (google.auth.credentials.Credentials) to use param location: str = 'us-central1'¶ The default location to use when making API calls. param max_output_tokens: int = 128¶ Token limit determines the maximum amount of text output from one prompt. param model_name: str = 'chat-bison'¶ Model name to use. param project: Optional[str] = None¶ The default GCP project to use when making Vertex API calls. param request_parallelism: int = 5¶ The amount of parallelism allowed for requests issued to VertexAI models.
https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.vertexai.ChatVertexAI.html
16d0d2a3d814-1
The amount of parallelism allowed for requests issued to VertexAI models. param stop: Optional[List[str]] = None¶ Optional list of stop words to use when generating. param tags: Optional[List[str]] = None¶ Tags to add to the run trace. param temperature: float = 0.0¶ Sampling temperature, it controls the degree of randomness in token selection. param top_k: int = 40¶ How the model selects tokens for output, the next token is selected from param top_p: float = 0.95¶ Tokens are selected from most probable to least until the sum of their param verbose: bool [Optional]¶ Whether to print out response text. __call__(messages: List[BaseMessage], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, **kwargs: Any) → BaseMessage¶ Call self as a function. async agenerate(messages: List[List[BaseMessage]], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, *, tags: Optional[List[str]] = None, **kwargs: Any) → LLMResult¶ Top Level call async agenerate_prompt(prompts: List[PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, **kwargs: Any) → LLMResult¶ Take in a list of prompt values and return an LLMResult. classmethod all_required_field_names() → Set¶ async apredict(text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any) → str¶ Predict text from text.
https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.vertexai.ChatVertexAI.html
16d0d2a3d814-2
Predict text from text. async apredict_messages(messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None, **kwargs: Any) → BaseMessage¶ Predict message from messages. call_as_llm(message: str, stop: Optional[List[str]] = None, **kwargs: Any) → str¶ dict(**kwargs: Any) → Dict¶ Return a dictionary of the LLM. generate(messages: List[List[BaseMessage]], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, *, tags: Optional[List[str]] = None, **kwargs: Any) → LLMResult¶ Top Level call generate_prompt(prompts: List[PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, **kwargs: Any) → LLMResult¶ Take in a list of prompt values and return an LLMResult. get_num_tokens(text: str) → int¶ Get the number of tokens present in the text. get_num_tokens_from_messages(messages: List[BaseMessage]) → int¶ Get the number of tokens in the message. get_token_ids(text: str) → List[int]¶ Get the token present in the text. predict(text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any) → str¶ Predict text from text. predict_messages(messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None, **kwargs: Any) → BaseMessage¶ Predict message from messages. validator raise_deprecation  »  all fields¶ Raise deprecation warning if callback_manager is used. to_json() → Union[SerializedConstructor, SerializedNotImplemented]¶ to_json_not_implemented() → SerializedNotImplemented¶
https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.vertexai.ChatVertexAI.html
16d0d2a3d814-3
to_json_not_implemented() → SerializedNotImplemented¶ validator validate_environment  »  all fields[source]¶ Validate that the python package exists in environment. property is_codey_model: bool¶ property lc_attributes: Dict¶ Return a list of attribute names that should be included in the serialized kwargs. These attributes must be accepted by the constructor. property lc_namespace: List[str]¶ Return the namespace of the langchain object. eg. [“langchain”, “llms”, “openai”] property lc_secrets: Dict[str, str]¶ Return a map of constructor argument names to secret ids. eg. {“openai_api_key”: “OPENAI_API_KEY”} property lc_serializable: bool¶ Return whether or not the class is serializable. task_executor: ClassVar[Optional[Executor]] = None¶ model Config¶ Bases: object Configuration for this pydantic object. arbitrary_types_allowed = True¶
https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.vertexai.ChatVertexAI.html
40d7032c2060-0
langchain.chat_models.fake.FakeListChatModel¶ class langchain.chat_models.fake.FakeListChatModel(*, cache: Optional[bool] = None, verbose: bool = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, callback_manager: Optional[BaseCallbackManager] = None, tags: Optional[List[str]] = None, responses: List, i: int = 0)[source]¶ Bases: SimpleChatModel Fake ChatModel for testing purposes. Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model. param cache: Optional[bool] = None¶ param callback_manager: Optional[BaseCallbackManager] = None¶ param callbacks: Callbacks = None¶ param i: int = 0¶ param responses: List [Required]¶ param tags: Optional[List[str]] = None¶ Tags to add to the run trace. param verbose: bool [Optional]¶ Whether to print out response text. __call__(messages: List[BaseMessage], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, **kwargs: Any) → BaseMessage¶ Call self as a function. async agenerate(messages: List[List[BaseMessage]], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, *, tags: Optional[List[str]] = None, **kwargs: Any) → LLMResult¶ Top Level call async agenerate_prompt(prompts: List[PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, **kwargs: Any) → LLMResult¶
https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.fake.FakeListChatModel.html
40d7032c2060-1
Take in a list of prompt values and return an LLMResult. classmethod all_required_field_names() → Set¶ async apredict(text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any) → str¶ Predict text from text. async apredict_messages(messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None, **kwargs: Any) → BaseMessage¶ Predict message from messages. call_as_llm(message: str, stop: Optional[List[str]] = None, **kwargs: Any) → str¶ dict(**kwargs: Any) → Dict¶ Return a dictionary of the LLM. generate(messages: List[List[BaseMessage]], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, *, tags: Optional[List[str]] = None, **kwargs: Any) → LLMResult¶ Top Level call generate_prompt(prompts: List[PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, **kwargs: Any) → LLMResult¶ Take in a list of prompt values and return an LLMResult. get_num_tokens(text: str) → int¶ Get the number of tokens present in the text. get_num_tokens_from_messages(messages: List[BaseMessage]) → int¶ Get the number of tokens in the message. get_token_ids(text: str) → List[int]¶ Get the token present in the text. predict(text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any) → str¶ Predict text from text. predict_messages(messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None, **kwargs: Any) → BaseMessage¶
https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.fake.FakeListChatModel.html
40d7032c2060-2
Predict message from messages. validator raise_deprecation  »  all fields¶ Raise deprecation warning if callback_manager is used. to_json() → Union[SerializedConstructor, SerializedNotImplemented]¶ to_json_not_implemented() → SerializedNotImplemented¶ property lc_attributes: Dict¶ Return a list of attribute names that should be included in the serialized kwargs. These attributes must be accepted by the constructor. property lc_namespace: List[str]¶ Return the namespace of the langchain object. eg. [“langchain”, “llms”, “openai”] property lc_secrets: Dict[str, str]¶ Return a map of constructor argument names to secret ids. eg. {“openai_api_key”: “OPENAI_API_KEY”} property lc_serializable: bool¶ Return whether or not the class is serializable. model Config¶ Bases: object Configuration for this pydantic object. arbitrary_types_allowed = True¶
https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.fake.FakeListChatModel.html
38d3b4101d5d-0
langchain.chat_models.promptlayer_openai.PromptLayerChatOpenAI¶ class langchain.chat_models.promptlayer_openai.PromptLayerChatOpenAI(*, cache: Optional[bool] = None, verbose: bool = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, callback_manager: Optional[BaseCallbackManager] = None, tags: Optional[List[str]] = None, client: Any = None, model: str = 'gpt-3.5-turbo', temperature: float = 0.7, model_kwargs: Dict[str, Any] = None, openai_api_key: Optional[str] = None, openai_api_base: Optional[str] = None, openai_organization: Optional[str] = None, openai_proxy: Optional[str] = None, request_timeout: Optional[Union[float, Tuple[float, float]]] = None, max_retries: int = 6, streaming: bool = False, n: int = 1, max_tokens: Optional[int] = None, tiktoken_model_name: Optional[str] = None, pl_tags: Optional[List[str]] = None, return_pl_id: Optional[bool] = False)[source]¶ Bases: ChatOpenAI Wrapper around OpenAI Chat large language models and PromptLayer. To use, you should have the openai and promptlayer python package installed, and the environment variable OPENAI_API_KEY and PROMPTLAYER_API_KEY set with your openAI API key and promptlayer key respectively. All parameters that can be passed to the OpenAI LLM can also be passed here. The PromptLayerChatOpenAI adds to optional Parameters pl_tags – List of strings to tag the request with. return_pl_id – If True, the PromptLayer request ID will be returned in the generation_info field of the Generation object. Example
https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.promptlayer_openai.PromptLayerChatOpenAI.html
38d3b4101d5d-1
returned in the generation_info field of the Generation object. Example from langchain.chat_models import PromptLayerChatOpenAI openai = PromptLayerChatOpenAI(model_name="gpt-3.5-turbo") Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model. param cache: Optional[bool] = None¶ param callback_manager: Optional[BaseCallbackManager] = None¶ param callbacks: Callbacks = None¶ param max_retries: int = 6¶ Maximum number of retries to make when generating. param max_tokens: Optional[int] = None¶ Maximum number of tokens to generate. param model_kwargs: Dict[str, Any] [Optional]¶ Holds any model parameters valid for create call not explicitly specified. param model_name: str = 'gpt-3.5-turbo' (alias 'model')¶ Model name to use. param n: int = 1¶ Number of chat completions to generate for each prompt. param openai_api_base: Optional[str] = None¶ param openai_api_key: Optional[str] = None¶ Base URL path for API requests, leave blank if not using a proxy or service emulator. param openai_organization: Optional[str] = None¶ param openai_proxy: Optional[str] = None¶ param pl_tags: Optional[List[str]] = None¶ param request_timeout: Optional[Union[float, Tuple[float, float]]] = None¶ Timeout for requests to OpenAI completion API. Default is 600 seconds. param return_pl_id: Optional[bool] = False¶ param streaming: bool = False¶ Whether to stream the results or not. param tags: Optional[List[str]] = None¶ Tags to add to the run trace.
https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.promptlayer_openai.PromptLayerChatOpenAI.html
38d3b4101d5d-2
param tags: Optional[List[str]] = None¶ Tags to add to the run trace. param temperature: float = 0.7¶ What sampling temperature to use. param tiktoken_model_name: Optional[str] = None¶ The model name to pass to tiktoken when using this class. Tiktoken is used to count the number of tokens in documents to constrain them to be under a certain limit. By default, when set to None, this will be the same as the embedding model name. However, there are some cases where you may want to use this Embedding class with a model name not supported by tiktoken. This can include when using Azure embeddings or when using one of the many model providers that expose an OpenAI-like API but with different models. In those cases, in order to avoid erroring when tiktoken is called, you can specify a model name to use here. param verbose: bool [Optional]¶ Whether to print out response text. __call__(messages: List[BaseMessage], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, **kwargs: Any) → BaseMessage¶ Call self as a function. async agenerate(messages: List[List[BaseMessage]], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, *, tags: Optional[List[str]] = None, **kwargs: Any) → LLMResult¶ Top Level call async agenerate_prompt(prompts: List[PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, **kwargs: Any) → LLMResult¶ Take in a list of prompt values and return an LLMResult.
https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.promptlayer_openai.PromptLayerChatOpenAI.html
38d3b4101d5d-3
Take in a list of prompt values and return an LLMResult. classmethod all_required_field_names() → Set¶ async apredict(text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any) → str¶ Predict text from text. async apredict_messages(messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None, **kwargs: Any) → BaseMessage¶ Predict message from messages. validator build_extra  »  all fields¶ Build extra kwargs from additional params that were passed in. call_as_llm(message: str, stop: Optional[List[str]] = None, **kwargs: Any) → str¶ completion_with_retry(**kwargs: Any) → Any¶ Use tenacity to retry the completion call. dict(**kwargs: Any) → Dict¶ Return a dictionary of the LLM. generate(messages: List[List[BaseMessage]], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, *, tags: Optional[List[str]] = None, **kwargs: Any) → LLMResult¶ Top Level call generate_prompt(prompts: List[PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, **kwargs: Any) → LLMResult¶ Take in a list of prompt values and return an LLMResult. get_num_tokens(text: str) → int¶ Get the number of tokens present in the text. get_num_tokens_from_messages(messages: List[BaseMessage]) → int¶ Calculate num tokens for gpt-3.5-turbo and gpt-4 with tiktoken package. Official documentation: https://github.com/openai/openai-cookbook/blob/
https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.promptlayer_openai.PromptLayerChatOpenAI.html
38d3b4101d5d-4
Official documentation: https://github.com/openai/openai-cookbook/blob/ main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb get_token_ids(text: str) → List[int]¶ Get the tokens present in the text with tiktoken package. predict(text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any) → str¶ Predict text from text. predict_messages(messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None, **kwargs: Any) → BaseMessage¶ Predict message from messages. validator raise_deprecation  »  all fields¶ Raise deprecation warning if callback_manager is used. to_json() → Union[SerializedConstructor, SerializedNotImplemented]¶ to_json_not_implemented() → SerializedNotImplemented¶ validator validate_environment  »  all fields¶ Validate that api key and python package exists in environment. property lc_attributes: Dict¶ Return a list of attribute names that should be included in the serialized kwargs. These attributes must be accepted by the constructor. property lc_namespace: List[str]¶ Return the namespace of the langchain object. eg. [“langchain”, “llms”, “openai”] property lc_secrets: Dict[str, str]¶ Return a map of constructor argument names to secret ids. eg. {“openai_api_key”: “OPENAI_API_KEY”} property lc_serializable: bool¶ Return whether or not the class is serializable. model Config¶ Bases: object Configuration for this pydantic object. allow_population_by_field_name = True¶
https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.promptlayer_openai.PromptLayerChatOpenAI.html
ec65df64c072-0
langchain.chat_models.azure_openai.AzureChatOpenAI¶ class langchain.chat_models.azure_openai.AzureChatOpenAI(*, cache: Optional[bool] = None, verbose: bool = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, callback_manager: Optional[BaseCallbackManager] = None, tags: Optional[List[str]] = None, client: Any = None, model: str = 'gpt-3.5-turbo', temperature: float = 0.7, model_kwargs: Dict[str, Any] = None, openai_api_key: str = '', openai_api_base: str = '', openai_organization: str = '', openai_proxy: str = '', request_timeout: Optional[Union[float, Tuple[float, float]]] = None, max_retries: int = 6, streaming: bool = False, n: int = 1, max_tokens: Optional[int] = None, tiktoken_model_name: Optional[str] = None, deployment_name: str = '', openai_api_type: str = 'azure', openai_api_version: str = '')[source]¶ Bases: ChatOpenAI Wrapper around Azure OpenAI Chat Completion API. To use this class you must have a deployed model on Azure OpenAI. Use deployment_name in the constructor to refer to the “Model deployment name” in the Azure portal. In addition, you should have the openai python package installed, and the following environment variables set or passed in constructor in lower case: - OPENAI_API_TYPE (default: azure) - OPENAI_API_KEY - OPENAI_API_BASE - OPENAI_API_VERSION - OPENAI_PROXY For exmaple, if you have gpt-35-turbo deployed, with the deployment name 35-turbo-dev, the constructor should look like: AzureChatOpenAI(
https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.azure_openai.AzureChatOpenAI.html
ec65df64c072-1
35-turbo-dev, the constructor should look like: AzureChatOpenAI( deployment_name="35-turbo-dev", openai_api_version="2023-03-15-preview", ) Be aware the API version may change. Any parameters that are valid to be passed to the openai.create call can be passed in, even if not explicitly saved on this class. Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model. param cache: Optional[bool] = None¶ param callback_manager: Optional[BaseCallbackManager] = None¶ param callbacks: Callbacks = None¶ param deployment_name: str = ''¶ param max_retries: int = 6¶ Maximum number of retries to make when generating. param max_tokens: Optional[int] = None¶ Maximum number of tokens to generate. param model_kwargs: Dict[str, Any] [Optional]¶ Holds any model parameters valid for create call not explicitly specified. param model_name: str = 'gpt-3.5-turbo' (alias 'model')¶ Model name to use. param n: int = 1¶ Number of chat completions to generate for each prompt. param openai_api_base: str = ''¶ param openai_api_key: str = ''¶ Base URL path for API requests, leave blank if not using a proxy or service emulator. param openai_api_type: str = 'azure'¶ param openai_api_version: str = ''¶ param openai_organization: str = ''¶ param openai_proxy: str = ''¶ param request_timeout: Optional[Union[float, Tuple[float, float]]] = None¶ Timeout for requests to OpenAI completion API. Default is 600 seconds.
https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.azure_openai.AzureChatOpenAI.html
ec65df64c072-2
Timeout for requests to OpenAI completion API. Default is 600 seconds. param streaming: bool = False¶ Whether to stream the results or not. param tags: Optional[List[str]] = None¶ Tags to add to the run trace. param temperature: float = 0.7¶ What sampling temperature to use. param tiktoken_model_name: Optional[str] = None¶ The model name to pass to tiktoken when using this class. Tiktoken is used to count the number of tokens in documents to constrain them to be under a certain limit. By default, when set to None, this will be the same as the embedding model name. However, there are some cases where you may want to use this Embedding class with a model name not supported by tiktoken. This can include when using Azure embeddings or when using one of the many model providers that expose an OpenAI-like API but with different models. In those cases, in order to avoid erroring when tiktoken is called, you can specify a model name to use here. param verbose: bool [Optional]¶ Whether to print out response text. __call__(messages: List[BaseMessage], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, **kwargs: Any) → BaseMessage¶ Call self as a function. async agenerate(messages: List[List[BaseMessage]], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, *, tags: Optional[List[str]] = None, **kwargs: Any) → LLMResult¶ Top Level call
https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.azure_openai.AzureChatOpenAI.html
ec65df64c072-3
Top Level call async agenerate_prompt(prompts: List[PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, **kwargs: Any) → LLMResult¶ Take in a list of prompt values and return an LLMResult. classmethod all_required_field_names() → Set¶ async apredict(text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any) → str¶ Predict text from text. async apredict_messages(messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None, **kwargs: Any) → BaseMessage¶ Predict message from messages. validator build_extra  »  all fields¶ Build extra kwargs from additional params that were passed in. call_as_llm(message: str, stop: Optional[List[str]] = None, **kwargs: Any) → str¶ completion_with_retry(**kwargs: Any) → Any¶ Use tenacity to retry the completion call. dict(**kwargs: Any) → Dict¶ Return a dictionary of the LLM. generate(messages: List[List[BaseMessage]], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, *, tags: Optional[List[str]] = None, **kwargs: Any) → LLMResult¶ Top Level call generate_prompt(prompts: List[PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, **kwargs: Any) → LLMResult¶ Take in a list of prompt values and return an LLMResult. get_num_tokens(text: str) → int¶ Get the number of tokens present in the text.
https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.azure_openai.AzureChatOpenAI.html
ec65df64c072-4
Get the number of tokens present in the text. get_num_tokens_from_messages(messages: List[BaseMessage]) → int¶ Calculate num tokens for gpt-3.5-turbo and gpt-4 with tiktoken package. Official documentation: https://github.com/openai/openai-cookbook/blob/ main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb get_token_ids(text: str) → List[int]¶ Get the tokens present in the text with tiktoken package. predict(text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any) → str¶ Predict text from text. predict_messages(messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None, **kwargs: Any) → BaseMessage¶ Predict message from messages. validator raise_deprecation  »  all fields¶ Raise deprecation warning if callback_manager is used. to_json() → Union[SerializedConstructor, SerializedNotImplemented]¶ to_json_not_implemented() → SerializedNotImplemented¶ validator validate_environment  »  all fields[source]¶ Validate that api key and python package exists in environment. property lc_attributes: Dict¶ Return a list of attribute names that should be included in the serialized kwargs. These attributes must be accepted by the constructor. property lc_namespace: List[str]¶ Return the namespace of the langchain object. eg. [“langchain”, “llms”, “openai”] property lc_secrets: Dict[str, str]¶ Return a map of constructor argument names to secret ids. eg. {“openai_api_key”: “OPENAI_API_KEY”} property lc_serializable: bool¶ Return whether or not the class is serializable. model Config¶ Bases: object Configuration for this pydantic object.
https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.azure_openai.AzureChatOpenAI.html
ec65df64c072-5
model Config¶ Bases: object Configuration for this pydantic object. allow_population_by_field_name = True¶
https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.azure_openai.AzureChatOpenAI.html
7f29db8b29d6-0
langchain.chat_models.anthropic.ChatAnthropic¶ class langchain.chat_models.anthropic.ChatAnthropic(*, client: Any = None, model: str = 'claude-v1', max_tokens_to_sample: int = 256, temperature: Optional[float] = None, top_k: Optional[int] = None, top_p: Optional[float] = None, streaming: bool = False, default_request_timeout: Optional[Union[float, Tuple[float, float]]] = None, anthropic_api_url: Optional[str] = None, anthropic_api_key: Optional[str] = None, HUMAN_PROMPT: Optional[str] = None, AI_PROMPT: Optional[str] = None, count_tokens: Optional[Callable[[str], int]] = None, cache: Optional[bool] = None, verbose: bool = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, callback_manager: Optional[BaseCallbackManager] = None, tags: Optional[List[str]] = None)[source]¶ Bases: BaseChatModel, _AnthropicCommon Wrapper around Anthropic’s large language model. To use, you should have the anthropic python package installed, and the environment variable ANTHROPIC_API_KEY set with your API key, or pass it as a named parameter to the constructor. Example import anthropic from langchain.llms import Anthropic model = ChatAnthropic(model="<model_name>", anthropic_api_key="my-api-key") Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model. param AI_PROMPT: Optional[str] = None¶ param HUMAN_PROMPT: Optional[str] = None¶ param anthropic_api_key: Optional[str] = None¶ param anthropic_api_url: Optional[str] = None¶
https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.anthropic.ChatAnthropic.html
7f29db8b29d6-1
param anthropic_api_url: Optional[str] = None¶ param cache: Optional[bool] = None¶ param callback_manager: Optional[langchain.callbacks.base.BaseCallbackManager] = None¶ param callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None¶ param count_tokens: Optional[Callable[[str], int]] = None¶ param default_request_timeout: Optional[Union[float, Tuple[float, float]]] = None¶ Timeout for requests to Anthropic Completion API. Default is 600 seconds. param max_tokens_to_sample: int = 256¶ Denotes the number of tokens to predict per generation. param model: str = 'claude-v1'¶ Model name to use. param streaming: bool = False¶ Whether to stream the results. param tags: Optional[List[str]] = None¶ Tags to add to the run trace. param temperature: Optional[float] = None¶ A non-negative float that tunes the degree of randomness in generation. param top_k: Optional[int] = None¶ Number of most likely tokens to consider at each step. param top_p: Optional[float] = None¶ Total probability mass of tokens to consider at each step. param verbose: bool [Optional]¶ Whether to print out response text. __call__(messages: List[BaseMessage], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, **kwargs: Any) → BaseMessage¶ Call self as a function. async agenerate(messages: List[List[BaseMessage]], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, *, tags: Optional[List[str]] = None, **kwargs: Any) → LLMResult¶
https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.anthropic.ChatAnthropic.html
7f29db8b29d6-2
Top Level call async agenerate_prompt(prompts: List[PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, **kwargs: Any) → LLMResult¶ Take in a list of prompt values and return an LLMResult. classmethod all_required_field_names() → Set¶ async apredict(text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any) → str¶ Predict text from text. async apredict_messages(messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None, **kwargs: Any) → BaseMessage¶ Predict message from messages. call_as_llm(message: str, stop: Optional[List[str]] = None, **kwargs: Any) → str¶ dict(**kwargs: Any) → Dict¶ Return a dictionary of the LLM. generate(messages: List[List[BaseMessage]], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, *, tags: Optional[List[str]] = None, **kwargs: Any) → LLMResult¶ Top Level call generate_prompt(prompts: List[PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, **kwargs: Any) → LLMResult¶ Take in a list of prompt values and return an LLMResult. get_num_tokens(text: str) → int[source]¶ Calculate number of tokens. get_num_tokens_from_messages(messages: List[BaseMessage]) → int¶ Get the number of tokens in the message. get_token_ids(text: str) → List[int]¶ Get the token present in the text.
https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.anthropic.ChatAnthropic.html
7f29db8b29d6-3
Get the token present in the text. predict(text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any) → str¶ Predict text from text. predict_messages(messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None, **kwargs: Any) → BaseMessage¶ Predict message from messages. validator raise_deprecation  »  all fields¶ Raise deprecation warning if callback_manager is used. to_json() → Union[SerializedConstructor, SerializedNotImplemented]¶ to_json_not_implemented() → SerializedNotImplemented¶ validator validate_environment  »  all fields¶ Validate that api key and python package exists in environment. property lc_attributes: Dict¶ Return a list of attribute names that should be included in the serialized kwargs. These attributes must be accepted by the constructor. property lc_namespace: List[str]¶ Return the namespace of the langchain object. eg. [“langchain”, “llms”, “openai”] property lc_secrets: Dict[str, str]¶ Return a map of constructor argument names to secret ids. eg. {“openai_api_key”: “OPENAI_API_KEY”} property lc_serializable: bool¶ Return whether or not the class is serializable. model Config¶ Bases: object Configuration for this pydantic object. arbitrary_types_allowed = True¶
https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.anthropic.ChatAnthropic.html
13a3376ec55c-0
langchain.chat_models.base.BaseChatModel¶ class langchain.chat_models.base.BaseChatModel(*, cache: Optional[bool] = None, verbose: bool = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, callback_manager: Optional[BaseCallbackManager] = None, tags: Optional[List[str]] = None)[source]¶ Bases: BaseLanguageModel, ABC Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model. param cache: Optional[bool] = None¶ param callback_manager: Optional[langchain.callbacks.base.BaseCallbackManager] = None¶ param callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None¶ param tags: Optional[List[str]] = None¶ Tags to add to the run trace. param verbose: bool [Optional]¶ Whether to print out response text. __call__(messages: List[BaseMessage], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, **kwargs: Any) → BaseMessage[source]¶ Call self as a function. async agenerate(messages: List[List[BaseMessage]], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, *, tags: Optional[List[str]] = None, **kwargs: Any) → LLMResult[source]¶ Top Level call async agenerate_prompt(prompts: List[PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, **kwargs: Any) → LLMResult[source]¶ Take in a list of prompt values and return an LLMResult.
https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.base.BaseChatModel.html
13a3376ec55c-1
Take in a list of prompt values and return an LLMResult. classmethod all_required_field_names() → Set¶ async apredict(text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any) → str[source]¶ Predict text from text. async apredict_messages(messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None, **kwargs: Any) → BaseMessage[source]¶ Predict message from messages. call_as_llm(message: str, stop: Optional[List[str]] = None, **kwargs: Any) → str[source]¶ dict(**kwargs: Any) → Dict[source]¶ Return a dictionary of the LLM. generate(messages: List[List[BaseMessage]], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, *, tags: Optional[List[str]] = None, **kwargs: Any) → LLMResult[source]¶ Top Level call generate_prompt(prompts: List[PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, **kwargs: Any) → LLMResult[source]¶ Take in a list of prompt values and return an LLMResult. get_num_tokens(text: str) → int¶ Get the number of tokens present in the text. get_num_tokens_from_messages(messages: List[BaseMessage]) → int¶ Get the number of tokens in the message. get_token_ids(text: str) → List[int]¶ Get the token present in the text. predict(text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any) → str[source]¶ Predict text from text.
https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.base.BaseChatModel.html
13a3376ec55c-2
Predict text from text. predict_messages(messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None, **kwargs: Any) → BaseMessage[source]¶ Predict message from messages. validator raise_deprecation  »  all fields[source]¶ Raise deprecation warning if callback_manager is used. to_json() → Union[SerializedConstructor, SerializedNotImplemented]¶ to_json_not_implemented() → SerializedNotImplemented¶ property lc_attributes: Dict¶ Return a list of attribute names that should be included in the serialized kwargs. These attributes must be accepted by the constructor. property lc_namespace: List[str]¶ Return the namespace of the langchain object. eg. [“langchain”, “llms”, “openai”] property lc_secrets: Dict[str, str]¶ Return a map of constructor argument names to secret ids. eg. {“openai_api_key”: “OPENAI_API_KEY”} property lc_serializable: bool¶ Return whether or not the class is serializable. model Config[source]¶ Bases: object Configuration for this pydantic object. arbitrary_types_allowed = True¶
https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.base.BaseChatModel.html
363a4d1a6b23-0
langchain.chat_models.google_palm.ChatGooglePalmError¶ class langchain.chat_models.google_palm.ChatGooglePalmError[source]¶ Bases: Exception Error raised when there is an issue with the Google PaLM API. add_note()¶ Exception.add_note(note) – add a note to the exception with_traceback()¶ Exception.with_traceback(tb) – set self.__traceback__ to tb and return self. args¶
https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.google_palm.ChatGooglePalmError.html
816072f55a9f-0
langchain.chat_models.openai.ChatOpenAI¶ class langchain.chat_models.openai.ChatOpenAI(*, cache: Optional[bool] = None, verbose: bool = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, callback_manager: Optional[BaseCallbackManager] = None, tags: Optional[List[str]] = None, client: Any = None, model: str = 'gpt-3.5-turbo', temperature: float = 0.7, model_kwargs: Dict[str, Any] = None, openai_api_key: Optional[str] = None, openai_api_base: Optional[str] = None, openai_organization: Optional[str] = None, openai_proxy: Optional[str] = None, request_timeout: Optional[Union[float, Tuple[float, float]]] = None, max_retries: int = 6, streaming: bool = False, n: int = 1, max_tokens: Optional[int] = None, tiktoken_model_name: Optional[str] = None)[source]¶ Bases: BaseChatModel Wrapper around OpenAI Chat large language models. To use, you should have the openai python package installed, and the environment variable OPENAI_API_KEY set with your API key. Any parameters that are valid to be passed to the openai.create call can be passed in, even if not explicitly saved on this class. Example from langchain.chat_models import ChatOpenAI openai = ChatOpenAI(model_name="gpt-3.5-turbo") Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model. param cache: Optional[bool] = None¶ param callback_manager: Optional[BaseCallbackManager] = None¶ param callbacks: Callbacks = None¶
https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.openai.ChatOpenAI.html
816072f55a9f-1
param callbacks: Callbacks = None¶ param max_retries: int = 6¶ Maximum number of retries to make when generating. param max_tokens: Optional[int] = None¶ Maximum number of tokens to generate. param model_kwargs: Dict[str, Any] [Optional]¶ Holds any model parameters valid for create call not explicitly specified. param model_name: str = 'gpt-3.5-turbo' (alias 'model')¶ Model name to use. param n: int = 1¶ Number of chat completions to generate for each prompt. param openai_api_base: Optional[str] = None¶ param openai_api_key: Optional[str] = None¶ Base URL path for API requests, leave blank if not using a proxy or service emulator. param openai_organization: Optional[str] = None¶ param openai_proxy: Optional[str] = None¶ param request_timeout: Optional[Union[float, Tuple[float, float]]] = None¶ Timeout for requests to OpenAI completion API. Default is 600 seconds. param streaming: bool = False¶ Whether to stream the results or not. param tags: Optional[List[str]] = None¶ Tags to add to the run trace. param temperature: float = 0.7¶ What sampling temperature to use. param tiktoken_model_name: Optional[str] = None¶ The model name to pass to tiktoken when using this class. Tiktoken is used to count the number of tokens in documents to constrain them to be under a certain limit. By default, when set to None, this will be the same as the embedding model name. However, there are some cases where you may want to use this Embedding class with a model name not supported by tiktoken. This can include when using Azure embeddings or
https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.openai.ChatOpenAI.html
816072f55a9f-2
supported by tiktoken. This can include when using Azure embeddings or when using one of the many model providers that expose an OpenAI-like API but with different models. In those cases, in order to avoid erroring when tiktoken is called, you can specify a model name to use here. param verbose: bool [Optional]¶ Whether to print out response text. __call__(messages: List[BaseMessage], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, **kwargs: Any) → BaseMessage¶ Call self as a function. async agenerate(messages: List[List[BaseMessage]], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, *, tags: Optional[List[str]] = None, **kwargs: Any) → LLMResult¶ Top Level call async agenerate_prompt(prompts: List[PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, **kwargs: Any) → LLMResult¶ Take in a list of prompt values and return an LLMResult. classmethod all_required_field_names() → Set¶ async apredict(text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any) → str¶ Predict text from text. async apredict_messages(messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None, **kwargs: Any) → BaseMessage¶ Predict message from messages. validator build_extra  »  all fields[source]¶ Build extra kwargs from additional params that were passed in. call_as_llm(message: str, stop: Optional[List[str]] = None, **kwargs: Any) → str¶
https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.openai.ChatOpenAI.html
816072f55a9f-3
completion_with_retry(**kwargs: Any) → Any[source]¶ Use tenacity to retry the completion call. dict(**kwargs: Any) → Dict¶ Return a dictionary of the LLM. generate(messages: List[List[BaseMessage]], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, *, tags: Optional[List[str]] = None, **kwargs: Any) → LLMResult¶ Top Level call generate_prompt(prompts: List[PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, **kwargs: Any) → LLMResult¶ Take in a list of prompt values and return an LLMResult. get_num_tokens(text: str) → int¶ Get the number of tokens present in the text. get_num_tokens_from_messages(messages: List[BaseMessage]) → int[source]¶ Calculate num tokens for gpt-3.5-turbo and gpt-4 with tiktoken package. Official documentation: https://github.com/openai/openai-cookbook/blob/ main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb get_token_ids(text: str) → List[int][source]¶ Get the tokens present in the text with tiktoken package. predict(text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any) → str¶ Predict text from text. predict_messages(messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None, **kwargs: Any) → BaseMessage¶ Predict message from messages. validator raise_deprecation  »  all fields¶ Raise deprecation warning if callback_manager is used. to_json() → Union[SerializedConstructor, SerializedNotImplemented]¶
https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.openai.ChatOpenAI.html
816072f55a9f-4
to_json() → Union[SerializedConstructor, SerializedNotImplemented]¶ to_json_not_implemented() → SerializedNotImplemented¶ validator validate_environment  »  all fields[source]¶ Validate that api key and python package exists in environment. property lc_attributes: Dict¶ Return a list of attribute names that should be included in the serialized kwargs. These attributes must be accepted by the constructor. property lc_namespace: List[str]¶ Return the namespace of the langchain object. eg. [“langchain”, “llms”, “openai”] property lc_secrets: Dict[str, str]¶ Return a map of constructor argument names to secret ids. eg. {“openai_api_key”: “OPENAI_API_KEY”} property lc_serializable: bool¶ Return whether or not the class is serializable. model Config[source]¶ Bases: object Configuration for this pydantic object. allow_population_by_field_name = True¶
https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.openai.ChatOpenAI.html
96f64fcf452a-0
langchain.chat_models.google_palm.chat_with_retry¶ langchain.chat_models.google_palm.chat_with_retry(llm: ChatGooglePalm, **kwargs: Any) → Any[source]¶ Use tenacity to retry the completion call.
https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.google_palm.chat_with_retry.html
32b03e159cd7-0
langchain.chat_models.google_palm.ChatGooglePalm¶ class langchain.chat_models.google_palm.ChatGooglePalm(*, cache: Optional[bool] = None, verbose: bool = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, callback_manager: Optional[BaseCallbackManager] = None, tags: Optional[List[str]] = None, client: Any = None, model_name: str = 'models/chat-bison-001', google_api_key: Optional[str] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, top_k: Optional[int] = None, n: int = 1)[source]¶ Bases: BaseChatModel, BaseModel Wrapper around Google’s PaLM Chat API. To use you must have the google.generativeai Python package installed and either: The GOOGLE_API_KEY` environment varaible set with your API key, or Pass your API key using the google_api_key kwarg to the ChatGoogle constructor. Example from langchain.chat_models import ChatGooglePalm chat = ChatGooglePalm() Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model. param cache: Optional[bool] = None¶ param callback_manager: Optional[BaseCallbackManager] = None¶ param callbacks: Callbacks = None¶ param google_api_key: Optional[str] = None¶ param model_name: str = 'models/chat-bison-001'¶ Model name to use. param n: int = 1¶ Number of chat completions to generate for each prompt. Note that the API may not return the full n completions if duplicates are generated. param tags: Optional[List[str]] = None¶ Tags to add to the run trace.
https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.google_palm.ChatGooglePalm.html
32b03e159cd7-1
param tags: Optional[List[str]] = None¶ Tags to add to the run trace. param temperature: Optional[float] = None¶ Run inference with this temperature. Must by in the closed interval [0.0, 1.0]. param top_k: Optional[int] = None¶ Decode using top-k sampling: consider the set of top_k most probable tokens. Must be positive. param top_p: Optional[float] = None¶ Decode using nucleus sampling: consider the smallest set of tokens whose probability sum is at least top_p. Must be in the closed interval [0.0, 1.0]. param verbose: bool [Optional]¶ Whether to print out response text. __call__(messages: List[BaseMessage], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, **kwargs: Any) → BaseMessage¶ Call self as a function. async agenerate(messages: List[List[BaseMessage]], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, *, tags: Optional[List[str]] = None, **kwargs: Any) → LLMResult¶ Top Level call async agenerate_prompt(prompts: List[PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, **kwargs: Any) → LLMResult¶ Take in a list of prompt values and return an LLMResult. classmethod all_required_field_names() → Set¶ async apredict(text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any) → str¶ Predict text from text.
https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.google_palm.ChatGooglePalm.html
32b03e159cd7-2
Predict text from text. async apredict_messages(messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None, **kwargs: Any) → BaseMessage¶ Predict message from messages. call_as_llm(message: str, stop: Optional[List[str]] = None, **kwargs: Any) → str¶ dict(**kwargs: Any) → Dict¶ Return a dictionary of the LLM. generate(messages: List[List[BaseMessage]], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, *, tags: Optional[List[str]] = None, **kwargs: Any) → LLMResult¶ Top Level call generate_prompt(prompts: List[PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, **kwargs: Any) → LLMResult¶ Take in a list of prompt values and return an LLMResult. get_num_tokens(text: str) → int¶ Get the number of tokens present in the text. get_num_tokens_from_messages(messages: List[BaseMessage]) → int¶ Get the number of tokens in the message. get_token_ids(text: str) → List[int]¶ Get the token present in the text. predict(text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any) → str¶ Predict text from text. predict_messages(messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None, **kwargs: Any) → BaseMessage¶ Predict message from messages. validator raise_deprecation  »  all fields¶ Raise deprecation warning if callback_manager is used. to_json() → Union[SerializedConstructor, SerializedNotImplemented]¶ to_json_not_implemented() → SerializedNotImplemented¶
https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.google_palm.ChatGooglePalm.html
32b03e159cd7-3
to_json_not_implemented() → SerializedNotImplemented¶ validator validate_environment  »  all fields[source]¶ Validate api key, python package exists, temperature, top_p, and top_k. property lc_attributes: Dict¶ Return a list of attribute names that should be included in the serialized kwargs. These attributes must be accepted by the constructor. property lc_namespace: List[str]¶ Return the namespace of the langchain object. eg. [“langchain”, “llms”, “openai”] property lc_secrets: Dict[str, str]¶ Return a map of constructor argument names to secret ids. eg. {“openai_api_key”: “OPENAI_API_KEY”} property lc_serializable: bool¶ Return whether or not the class is serializable. model Config¶ Bases: object Configuration for this pydantic object. arbitrary_types_allowed = True¶
https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.google_palm.ChatGooglePalm.html
f0231b42cecb-0
langchain.chat_models.base.SimpleChatModel¶ class langchain.chat_models.base.SimpleChatModel(*, cache: Optional[bool] = None, verbose: bool = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, callback_manager: Optional[BaseCallbackManager] = None, tags: Optional[List[str]] = None)[source]¶ Bases: BaseChatModel Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model. param cache: Optional[bool] = None¶ param callback_manager: Optional[langchain.callbacks.base.BaseCallbackManager] = None¶ param callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None¶ param tags: Optional[List[str]] = None¶ Tags to add to the run trace. param verbose: bool [Optional]¶ Whether to print out response text. __call__(messages: List[BaseMessage], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, **kwargs: Any) → BaseMessage¶ Call self as a function. async agenerate(messages: List[List[BaseMessage]], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, *, tags: Optional[List[str]] = None, **kwargs: Any) → LLMResult¶ Top Level call async agenerate_prompt(prompts: List[PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, **kwargs: Any) → LLMResult¶ Take in a list of prompt values and return an LLMResult.
https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.base.SimpleChatModel.html
f0231b42cecb-1
Take in a list of prompt values and return an LLMResult. classmethod all_required_field_names() → Set¶ async apredict(text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any) → str¶ Predict text from text. async apredict_messages(messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None, **kwargs: Any) → BaseMessage¶ Predict message from messages. call_as_llm(message: str, stop: Optional[List[str]] = None, **kwargs: Any) → str¶ dict(**kwargs: Any) → Dict¶ Return a dictionary of the LLM. generate(messages: List[List[BaseMessage]], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, *, tags: Optional[List[str]] = None, **kwargs: Any) → LLMResult¶ Top Level call generate_prompt(prompts: List[PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, **kwargs: Any) → LLMResult¶ Take in a list of prompt values and return an LLMResult. get_num_tokens(text: str) → int¶ Get the number of tokens present in the text. get_num_tokens_from_messages(messages: List[BaseMessage]) → int¶ Get the number of tokens in the message. get_token_ids(text: str) → List[int]¶ Get the token present in the text. predict(text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any) → str¶ Predict text from text. predict_messages(messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None, **kwargs: Any) → BaseMessage¶
https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.base.SimpleChatModel.html
f0231b42cecb-2
Predict message from messages. validator raise_deprecation  »  all fields¶ Raise deprecation warning if callback_manager is used. to_json() → Union[SerializedConstructor, SerializedNotImplemented]¶ to_json_not_implemented() → SerializedNotImplemented¶ property lc_attributes: Dict¶ Return a list of attribute names that should be included in the serialized kwargs. These attributes must be accepted by the constructor. property lc_namespace: List[str]¶ Return the namespace of the langchain object. eg. [“langchain”, “llms”, “openai”] property lc_secrets: Dict[str, str]¶ Return a map of constructor argument names to secret ids. eg. {“openai_api_key”: “OPENAI_API_KEY”} property lc_serializable: bool¶ Return whether or not the class is serializable. model Config¶ Bases: object Configuration for this pydantic object. arbitrary_types_allowed = True¶
https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.base.SimpleChatModel.html
fd7e90e3fd56-0
langchain.utils.raise_for_status_with_text¶ langchain.utils.raise_for_status_with_text(response: Response) → None[source]¶ Raise an error with the response text.
https://api.python.langchain.com/en/latest/utils/langchain.utils.raise_for_status_with_text.html
3af79e427c87-0
langchain.utils.stringify_value¶ langchain.utils.stringify_value(val: Any) → str[source]¶ Stringify a value. Parameters val – The value to stringify. Returns The stringified value. Return type str
https://api.python.langchain.com/en/latest/utils/langchain.utils.stringify_value.html
b5ac27728bb0-0
langchain.utils.guard_import¶ langchain.utils.guard_import(module_name: str, *, pip_name: Optional[str] = None, package: Optional[str] = None) → Any[source]¶ Dynamically imports a module and raises a helpful exception if the module is not installed.
https://api.python.langchain.com/en/latest/utils/langchain.utils.guard_import.html
d9b2f9df49f1-0
langchain.utils.comma_list¶ langchain.utils.comma_list(items: List[Any]) → str[source]¶
https://api.python.langchain.com/en/latest/utils/langchain.utils.comma_list.html
d766e747edad-0
langchain.utils.stringify_dict¶ langchain.utils.stringify_dict(data: dict) → str[source]¶ Stringify a dictionary. Parameters data – The dictionary to stringify. Returns The stringified dictionary. Return type str
https://api.python.langchain.com/en/latest/utils/langchain.utils.stringify_dict.html
45098d1643d1-0
langchain.utils.get_from_dict_or_env¶ langchain.utils.get_from_dict_or_env(data: Dict[str, Any], key: str, env_key: str, default: Optional[str] = None) → str[source]¶ Get a value from a dictionary or an environment variable.
https://api.python.langchain.com/en/latest/utils/langchain.utils.get_from_dict_or_env.html
b3873011e843-0
langchain.utils.get_from_env¶ langchain.utils.get_from_env(key: str, env_key: str, default: Optional[str] = None) → str[source]¶ Get a value from a dictionary or an environment variable.
https://api.python.langchain.com/en/latest/utils/langchain.utils.get_from_env.html
c0b04f25fdd4-0
langchain.utils.mock_now¶ langchain.utils.mock_now(dt_value)[source]¶ Context manager for mocking out datetime.now() in unit tests. Example: with mock_now(datetime.datetime(2011, 2, 3, 10, 11)): assert datetime.datetime.now() == datetime.datetime(2011, 2, 3, 10, 11)
https://api.python.langchain.com/en/latest/utils/langchain.utils.mock_now.html
bb116e03c8b6-0
langchain.utils.xor_args¶ langchain.utils.xor_args(*arg_groups: Tuple[str, ...]) → Callable[source]¶ Validate specified keyword args are mutually exclusive.
https://api.python.langchain.com/en/latest/utils/langchain.utils.xor_args.html
08af877cbb86-0
langchain.llms.azureml_endpoint.DollyContentFormatter¶ class langchain.llms.azureml_endpoint.DollyContentFormatter[source]¶ Bases: ContentFormatterBase Content handler for the Dolly-v2-12b model Methods __init__() format_request_payload(prompt, model_kwargs) Formats the request body according to the input schema of the model. format_response_payload(output) Formats the response body according to the output schema of the model. Attributes accepts The MIME type of the response data returned form the endpoint content_type The MIME type of the input data passed to the endpoint format_request_payload(prompt: str, model_kwargs: Dict) → bytes[source]¶ Formats the request body according to the input schema of the model. Returns bytes or seekable file like object in the format specified in the content_type request header. format_response_payload(output: bytes) → str[source]¶ Formats the response body according to the output schema of the model. Returns the data type that is received from the response. accepts: Optional[str] = 'application/json'¶ The MIME type of the response data returned form the endpoint content_type: Optional[str] = 'application/json'¶ The MIME type of the input data passed to the endpoint
https://api.python.langchain.com/en/latest/llms/langchain.llms.azureml_endpoint.DollyContentFormatter.html
821ea6ff17fb-0
langchain.llms.google_palm.GooglePalm¶ class langchain.llms.google_palm.GooglePalm(*, cache: Optional[bool] = None, verbose: bool = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, callback_manager: Optional[BaseCallbackManager] = None, tags: Optional[List[str]] = None, client: Any = None, google_api_key: Optional[str] = None, model_name: str = 'models/text-bison-001', temperature: float = 0.7, top_p: Optional[float] = None, top_k: Optional[int] = None, max_output_tokens: Optional[int] = None, n: int = 1)[source]¶ Bases: BaseLLM, BaseModel Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model. param cache: Optional[bool] = None¶ param callback_manager: Optional[BaseCallbackManager] = None¶ param callbacks: Callbacks = None¶ param google_api_key: Optional[str] = None¶ param max_output_tokens: Optional[int] = None¶ Maximum number of tokens to include in a candidate. Must be greater than zero. If unset, will default to 64. param model_name: str = 'models/text-bison-001'¶ Model name to use. param n: int = 1¶ Number of chat completions to generate for each prompt. Note that the API may not return the full n completions if duplicates are generated. param tags: Optional[List[str]] = None¶ Tags to add to the run trace. param temperature: float = 0.7¶ Run inference with this temperature. Must by in the closed interval [0.0, 1.0].
https://api.python.langchain.com/en/latest/llms/langchain.llms.google_palm.GooglePalm.html
821ea6ff17fb-1
[0.0, 1.0]. param top_k: Optional[int] = None¶ Decode using top-k sampling: consider the set of top_k most probable tokens. Must be positive. param top_p: Optional[float] = None¶ Decode using nucleus sampling: consider the smallest set of tokens whose probability sum is at least top_p. Must be in the closed interval [0.0, 1.0]. param verbose: bool [Optional]¶ Whether to print out response text. __call__(prompt: str, stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, **kwargs: Any) → str¶ Check Cache and run the LLM on the given prompt and input. async agenerate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, *, tags: Optional[List[str]] = None, **kwargs: Any) → LLMResult¶ Run the LLM on the given prompt and input. async agenerate_prompt(prompts: List[PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, **kwargs: Any) → LLMResult¶ Take in a list of prompt values and return an LLMResult. classmethod all_required_field_names() → Set¶ async apredict(text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any) → str¶ Predict text from text. async apredict_messages(messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None, **kwargs: Any) → BaseMessage¶ Predict message from messages. dict(**kwargs: Any) → Dict¶
https://api.python.langchain.com/en/latest/llms/langchain.llms.google_palm.GooglePalm.html
821ea6ff17fb-2
Predict message from messages. dict(**kwargs: Any) → Dict¶ Return a dictionary of the LLM. generate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, *, tags: Optional[List[str]] = None, **kwargs: Any) → LLMResult¶ Run the LLM on the given prompt and input. generate_prompt(prompts: List[PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, **kwargs: Any) → LLMResult¶ Take in a list of prompt values and return an LLMResult. get_num_tokens(text: str) → int¶ Get the number of tokens present in the text. get_num_tokens_from_messages(messages: List[BaseMessage]) → int¶ Get the number of tokens in the message. get_token_ids(text: str) → List[int]¶ Get the token present in the text. predict(text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any) → str¶ Predict text from text. predict_messages(messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None, **kwargs: Any) → BaseMessage¶ Predict message from messages. validator raise_deprecation  »  all fields¶ Raise deprecation warning if callback_manager is used. save(file_path: Union[Path, str]) → None¶ Save the LLM. Parameters file_path – Path to file to save the LLM to. Example: .. code-block:: python llm.save(file_path=”path/llm.yaml”) validator set_verbose  »  verbose¶ If verbose is None, set it.
https://api.python.langchain.com/en/latest/llms/langchain.llms.google_palm.GooglePalm.html
821ea6ff17fb-3
validator set_verbose  »  verbose¶ If verbose is None, set it. This allows users to pass in None as verbose to access the global setting. to_json() → Union[SerializedConstructor, SerializedNotImplemented]¶ to_json_not_implemented() → SerializedNotImplemented¶ validator validate_environment  »  all fields[source]¶ Validate api key, python package exists. property lc_attributes: Dict¶ Return a list of attribute names that should be included in the serialized kwargs. These attributes must be accepted by the constructor. property lc_namespace: List[str]¶ Return the namespace of the langchain object. eg. [“langchain”, “llms”, “openai”] property lc_secrets: Dict[str, str]¶ Return a map of constructor argument names to secret ids. eg. {“openai_api_key”: “OPENAI_API_KEY”} property lc_serializable: bool¶ Return whether or not the class is serializable. model Config¶ Bases: object Configuration for this pydantic object. arbitrary_types_allowed = True¶
https://api.python.langchain.com/en/latest/llms/langchain.llms.google_palm.GooglePalm.html
d281e79cc2b9-0
langchain.llms.deepinfra.DeepInfra¶ class langchain.llms.deepinfra.DeepInfra(*, cache: Optional[bool] = None, verbose: bool = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, callback_manager: Optional[BaseCallbackManager] = None, tags: Optional[List[str]] = None, model_id: str = 'google/flan-t5-xl', model_kwargs: Optional[dict] = None, deepinfra_api_token: Optional[str] = None)[source]¶ Bases: LLM Wrapper around DeepInfra deployed models. To use, you should have the requests python package installed, and the environment variable DEEPINFRA_API_TOKEN set with your API token, or pass it as a named parameter to the constructor. Only supports text-generation and text2text-generation for now. Example from langchain.llms import DeepInfra di = DeepInfra(model_id="google/flan-t5-xl", deepinfra_api_token="my-api-key") Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model. param cache: Optional[bool] = None¶ param callback_manager: Optional[BaseCallbackManager] = None¶ param callbacks: Callbacks = None¶ param deepinfra_api_token: Optional[str] = None¶ param model_id: str = 'google/flan-t5-xl'¶ param model_kwargs: Optional[dict] = None¶ param tags: Optional[List[str]] = None¶ Tags to add to the run trace. param verbose: bool [Optional]¶ Whether to print out response text.
https://api.python.langchain.com/en/latest/llms/langchain.llms.deepinfra.DeepInfra.html
d281e79cc2b9-1
param verbose: bool [Optional]¶ Whether to print out response text. __call__(prompt: str, stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, **kwargs: Any) → str¶ Check Cache and run the LLM on the given prompt and input. async agenerate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, *, tags: Optional[List[str]] = None, **kwargs: Any) → LLMResult¶ Run the LLM on the given prompt and input. async agenerate_prompt(prompts: List[PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, **kwargs: Any) → LLMResult¶ Take in a list of prompt values and return an LLMResult. classmethod all_required_field_names() → Set¶ async apredict(text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any) → str¶ Predict text from text. async apredict_messages(messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None, **kwargs: Any) → BaseMessage¶ Predict message from messages. dict(**kwargs: Any) → Dict¶ Return a dictionary of the LLM. generate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, *, tags: Optional[List[str]] = None, **kwargs: Any) → LLMResult¶ Run the LLM on the given prompt and input.
https://api.python.langchain.com/en/latest/llms/langchain.llms.deepinfra.DeepInfra.html
d281e79cc2b9-2
Run the LLM on the given prompt and input. generate_prompt(prompts: List[PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, **kwargs: Any) → LLMResult¶ Take in a list of prompt values and return an LLMResult. get_num_tokens(text: str) → int¶ Get the number of tokens present in the text. get_num_tokens_from_messages(messages: List[BaseMessage]) → int¶ Get the number of tokens in the message. get_token_ids(text: str) → List[int]¶ Get the token present in the text. predict(text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any) → str¶ Predict text from text. predict_messages(messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None, **kwargs: Any) → BaseMessage¶ Predict message from messages. validator raise_deprecation  »  all fields¶ Raise deprecation warning if callback_manager is used. save(file_path: Union[Path, str]) → None¶ Save the LLM. Parameters file_path – Path to file to save the LLM to. Example: .. code-block:: python llm.save(file_path=”path/llm.yaml”) validator set_verbose  »  verbose¶ If verbose is None, set it. This allows users to pass in None as verbose to access the global setting. to_json() → Union[SerializedConstructor, SerializedNotImplemented]¶ to_json_not_implemented() → SerializedNotImplemented¶ validator validate_environment  »  all fields[source]¶ Validate that api key and python package exists in environment. property lc_attributes: Dict¶ Return a list of attribute names that should be included in the
https://api.python.langchain.com/en/latest/llms/langchain.llms.deepinfra.DeepInfra.html
d281e79cc2b9-3
property lc_attributes: Dict¶ Return a list of attribute names that should be included in the serialized kwargs. These attributes must be accepted by the constructor. property lc_namespace: List[str]¶ Return the namespace of the langchain object. eg. [“langchain”, “llms”, “openai”] property lc_secrets: Dict[str, str]¶ Return a map of constructor argument names to secret ids. eg. {“openai_api_key”: “OPENAI_API_KEY”} property lc_serializable: bool¶ Return whether or not the class is serializable. model Config[source]¶ Bases: object Configuration for this pydantic object. extra = 'forbid'¶
https://api.python.langchain.com/en/latest/llms/langchain.llms.deepinfra.DeepInfra.html
a63c9e4a9bb8-0
langchain.llms.azureml_endpoint.OSSContentFormatter¶ class langchain.llms.azureml_endpoint.OSSContentFormatter[source]¶ Bases: ContentFormatterBase Content handler for LLMs from the OSS catalog. Methods __init__() format_request_payload(prompt, model_kwargs) Formats the request body according to the input schema of the model. format_response_payload(output) Formats the response body according to the output schema of the model. Attributes accepts The MIME type of the response data returned form the endpoint content_type The MIME type of the input data passed to the endpoint format_request_payload(prompt: str, model_kwargs: Dict) → bytes[source]¶ Formats the request body according to the input schema of the model. Returns bytes or seekable file like object in the format specified in the content_type request header. format_response_payload(output: bytes) → str[source]¶ Formats the response body according to the output schema of the model. Returns the data type that is received from the response. accepts: Optional[str] = 'application/json'¶ The MIME type of the response data returned form the endpoint content_type: Optional[str] = 'application/json'¶ The MIME type of the input data passed to the endpoint
https://api.python.langchain.com/en/latest/llms/langchain.llms.azureml_endpoint.OSSContentFormatter.html
31ee3d02f087-0
langchain.llms.anyscale.Anyscale¶ class langchain.llms.anyscale.Anyscale(*, cache: Optional[bool] = None, verbose: bool = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, callback_manager: Optional[BaseCallbackManager] = None, tags: Optional[List[str]] = None, model_kwargs: Optional[dict] = None, anyscale_service_url: Optional[str] = None, anyscale_service_route: Optional[str] = None, anyscale_service_token: Optional[str] = None)[source]¶ Bases: LLM Wrapper around Anyscale Services. To use, you should have the environment variable ANYSCALE_SERVICE_URL, ANYSCALE_SERVICE_ROUTE and ANYSCALE_SERVICE_TOKEN set with your Anyscale Service, or pass it as a named parameter to the constructor. Example from langchain.llms import Anyscale anyscale = Anyscale(anyscale_service_url="SERVICE_URL", anyscale_service_route="SERVICE_ROUTE", anyscale_service_token="SERVICE_TOKEN") # Use Ray for distributed processing import ray prompt_list=[] @ray.remote def send_query(llm, prompt): resp = llm(prompt) return resp futures = [send_query.remote(anyscale, prompt) for prompt in prompt_list] results = ray.get(futures) Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model. param anyscale_service_route: Optional[str] = None¶ param anyscale_service_token: Optional[str] = None¶ param anyscale_service_url: Optional[str] = None¶ param cache: Optional[bool] = None¶ param callback_manager: Optional[BaseCallbackManager] = None¶ param callbacks: Callbacks = None¶
https://api.python.langchain.com/en/latest/llms/langchain.llms.anyscale.Anyscale.html
31ee3d02f087-1
param callbacks: Callbacks = None¶ param model_kwargs: Optional[dict] = None¶ Key word arguments to pass to the model. Reserved for future use param tags: Optional[List[str]] = None¶ Tags to add to the run trace. param verbose: bool [Optional]¶ Whether to print out response text. __call__(prompt: str, stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, **kwargs: Any) → str¶ Check Cache and run the LLM on the given prompt and input. async agenerate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, *, tags: Optional[List[str]] = None, **kwargs: Any) → LLMResult¶ Run the LLM on the given prompt and input. async agenerate_prompt(prompts: List[PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, **kwargs: Any) → LLMResult¶ Take in a list of prompt values and return an LLMResult. classmethod all_required_field_names() → Set¶ async apredict(text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any) → str¶ Predict text from text. async apredict_messages(messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None, **kwargs: Any) → BaseMessage¶ Predict message from messages. dict(**kwargs: Any) → Dict¶ Return a dictionary of the LLM.
https://api.python.langchain.com/en/latest/llms/langchain.llms.anyscale.Anyscale.html
31ee3d02f087-2
dict(**kwargs: Any) → Dict¶ Return a dictionary of the LLM. generate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, *, tags: Optional[List[str]] = None, **kwargs: Any) → LLMResult¶ Run the LLM on the given prompt and input. generate_prompt(prompts: List[PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, **kwargs: Any) → LLMResult¶ Take in a list of prompt values and return an LLMResult. get_num_tokens(text: str) → int¶ Get the number of tokens present in the text. get_num_tokens_from_messages(messages: List[BaseMessage]) → int¶ Get the number of tokens in the message. get_token_ids(text: str) → List[int]¶ Get the token present in the text. predict(text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any) → str¶ Predict text from text. predict_messages(messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None, **kwargs: Any) → BaseMessage¶ Predict message from messages. validator raise_deprecation  »  all fields¶ Raise deprecation warning if callback_manager is used. save(file_path: Union[Path, str]) → None¶ Save the LLM. Parameters file_path – Path to file to save the LLM to. Example: .. code-block:: python llm.save(file_path=”path/llm.yaml”) validator set_verbose  »  verbose¶ If verbose is None, set it. This allows users to pass in None as verbose to access the global setting.
https://api.python.langchain.com/en/latest/llms/langchain.llms.anyscale.Anyscale.html
31ee3d02f087-3
This allows users to pass in None as verbose to access the global setting. to_json() → Union[SerializedConstructor, SerializedNotImplemented]¶ to_json_not_implemented() → SerializedNotImplemented¶ validator validate_environment  »  all fields[source]¶ Validate that api key and python package exists in environment. property lc_attributes: Dict¶ Return a list of attribute names that should be included in the serialized kwargs. These attributes must be accepted by the constructor. property lc_namespace: List[str]¶ Return the namespace of the langchain object. eg. [“langchain”, “llms”, “openai”] property lc_secrets: Dict[str, str]¶ Return a map of constructor argument names to secret ids. eg. {“openai_api_key”: “OPENAI_API_KEY”} property lc_serializable: bool¶ Return whether or not the class is serializable. model Config[source]¶ Bases: object Configuration for this pydantic object. extra = 'forbid'¶
https://api.python.langchain.com/en/latest/llms/langchain.llms.anyscale.Anyscale.html