id
stringlengths
14
15
text
stringlengths
49
2.47k
source
stringlengths
61
166
6a6d48ea842e-1
" What is the nearest airport to {location}? Please respond with the " " airport's International Air Transport Association (IATA) Location " ' Identifier in the following JSON format. JSON: "iataCode": "IATA ' ' Location Identifier" ' ) llm = ChatOpenAI(temperature=0) llm_chain = LLMChain.from_string(llm=llm, template=template) output = llm_chain.run(location=location) return output
https://api.python.langchain.com/en/latest/_modules/langchain/tools/amadeus/closest_airport.html
9a26b9c99710-0
Source code for langchain.tools.amadeus.utils """O365 tool utils.""" from __future__ import annotations import logging import os from typing import TYPE_CHECKING if TYPE_CHECKING: from amadeus import Client logger = logging.getLogger(__name__) [docs]def authenticate() -> Client: """Authenticate using the Amadeus API""" try: from amadeus import Client except ImportError as e: raise ImportError( "Cannot import amadeus. Please install the package with " "`pip install amadeus`." ) from e if "AMADEUS_CLIENT_ID" in os.environ and "AMADEUS_CLIENT_SECRET" in os.environ: client_id = os.environ["AMADEUS_CLIENT_ID"] client_secret = os.environ["AMADEUS_CLIENT_SECRET"] else: logger.error( "Error: The AMADEUS_CLIENT_ID and AMADEUS_CLIENT_SECRET environmental " "variables have not been set. Visit the following link on how to " "acquire these authorization tokens: " "https://developers.amadeus.com/register" ) return None client = Client(client_id=client_id, client_secret=client_secret) return client
https://api.python.langchain.com/en/latest/_modules/langchain/tools/amadeus/utils.html
f29f59d3783e-0
Source code for langchain.tools.office365.events_search """Util that Searches calendar events in Office 365. Free, but setup is required. See link below. https://learn.microsoft.com/en-us/graph/auth/ """ from datetime import datetime as dt from typing import Any, Dict, List, Optional, Type from pydantic import BaseModel, Extra, Field from langchain.callbacks.manager import CallbackManagerForToolRun from langchain.tools.office365.base import O365BaseTool from langchain.tools.office365.utils import clean_body [docs]class SearchEventsInput(BaseModel): """Input for SearchEmails Tool.""" """From https://learn.microsoft.com/en-us/graph/search-query-parameter""" start_datetime: str = Field( description=( " The start datetime for the search query in the following format: " ' YYYY-MM-DDTHH:MM:SS±hh:mm, where "T" separates the date and time ' " components, and the time zone offset is specified as ±hh:mm. " ' For example: "2023-06-09T10:30:00+03:00" represents June 9th, ' " 2023, at 10:30 AM in a time zone with a positive offset of 3 " " hours from Coordinated Universal Time (UTC)." ) ) end_datetime: str = Field( description=( " The end datetime for the search query in the following format: " ' YYYY-MM-DDTHH:MM:SS±hh:mm, where "T" separates the date and time ' " components, and the time zone offset is specified as ±hh:mm. "
https://api.python.langchain.com/en/latest/_modules/langchain/tools/office365/events_search.html
f29f59d3783e-1
" components, and the time zone offset is specified as ±hh:mm. " ' For example: "2023-06-09T10:30:00+03:00" represents June 9th, ' " 2023, at 10:30 AM in a time zone with a positive offset of 3 " " hours from Coordinated Universal Time (UTC)." ) ) max_results: int = Field( default=10, description="The maximum number of results to return.", ) truncate: bool = Field( default=True, description=( "Whether the event's body is truncated to meet token number limits. Set to " "False for searches that will retrieve very few results, otherwise, set to " "True." ), ) [docs]class O365SearchEvents(O365BaseTool): """Class for searching calendar events in Office 365 Free, but setup is required """ name: str = "events_search" args_schema: Type[BaseModel] = SearchEventsInput description: str = ( " Use this tool to search for the user's calendar events." " The input must be the start and end datetimes for the search query." " The output is a JSON list of all the events in the user's calendar" " between the start and end times. You can assume that the user can " " not schedule any meeting over existing meetings, and that the user " "is busy during meetings. Any times without events are free for the user. " ) class Config: """Configuration for this pydantic object.""" extra = Extra.forbid def _run( self,
https://api.python.langchain.com/en/latest/_modules/langchain/tools/office365/events_search.html
f29f59d3783e-2
extra = Extra.forbid def _run( self, start_datetime: str, end_datetime: str, max_results: int = 10, truncate: bool = True, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> List[Dict[str, Any]]: TRUNCATE_LIMIT = 150 # Get calendar object schedule = self.account.schedule() calendar = schedule.get_default_calendar() # Process the date range parameters start_datetime_query = dt.strptime(start_datetime, "%Y-%m-%dT%H:%M:%S%z") end_datetime_query = dt.strptime(end_datetime, "%Y-%m-%dT%H:%M:%S%z") # Run the query q = calendar.new_query("start").greater_equal(start_datetime_query) q.chain("and").on_attribute("end").less_equal(end_datetime_query) events = calendar.get_events(query=q, include_recurring=True, limit=max_results) # Generate output dict output_events = [] for event in events: output_event = {} output_event["organizer"] = event.organizer output_event["subject"] = event.subject if truncate: output_event["body"] = clean_body(event.body)[:TRUNCATE_LIMIT] else: output_event["body"] = clean_body(event.body) # Get the time zone from the search parameters time_zone = start_datetime_query.tzinfo # Assign the datetimes in the search time zone output_event["start_datetime"] = event.start.astimezone(time_zone).strftime( "%Y-%m-%dT%H:%M:%S%z" )
https://api.python.langchain.com/en/latest/_modules/langchain/tools/office365/events_search.html
f29f59d3783e-3
"%Y-%m-%dT%H:%M:%S%z" ) output_event["end_datetime"] = event.end.astimezone(time_zone).strftime( "%Y-%m-%dT%H:%M:%S%z" ) output_event["modified_date"] = event.modified.astimezone( time_zone ).strftime("%Y-%m-%dT%H:%M:%S%z") output_events.append(output_event) return output_events
https://api.python.langchain.com/en/latest/_modules/langchain/tools/office365/events_search.html
96f1531b3279-0
Source code for langchain.tools.office365.base """Base class for Office 365 tools.""" from __future__ import annotations from typing import TYPE_CHECKING from pydantic import Field from langchain.tools.base import BaseTool from langchain.tools.office365.utils import authenticate if TYPE_CHECKING: from O365 import Account [docs]class O365BaseTool(BaseTool): """Base class for the Office 365 tools.""" account: Account = Field(default_factory=authenticate) """The account object for the Office 365 account."""
https://api.python.langchain.com/en/latest/_modules/langchain/tools/office365/base.html
fc213c5eedb6-0
Source code for langchain.tools.office365.send_message from typing import List, Optional, Type from pydantic import BaseModel, Field from langchain.callbacks.manager import CallbackManagerForToolRun from langchain.tools.office365.base import O365BaseTool [docs]class SendMessageSchema(BaseModel): """Input for SendMessageTool.""" body: str = Field( ..., description="The message body to be sent.", ) to: List[str] = Field( ..., description="The list of recipients.", ) subject: str = Field( ..., description="The subject of the message.", ) cc: Optional[List[str]] = Field( None, description="The list of CC recipients.", ) bcc: Optional[List[str]] = Field( None, description="The list of BCC recipients.", ) [docs]class O365SendMessage(O365BaseTool): """Tool for sending an email in Office 365.""" name: str = "send_email" description: str = ( "Use this tool to send an email with the provided message fields." ) args_schema: Type[SendMessageSchema] = SendMessageSchema def _run( self, body: str, to: List[str], subject: str, cc: Optional[List[str]] = None, bcc: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: # Get mailbox object mailbox = self.account.mailbox() message = mailbox.new_message() # Assign message values message.body = body message.subject = subject message.to.add(to)
https://api.python.langchain.com/en/latest/_modules/langchain/tools/office365/send_message.html
fc213c5eedb6-1
message.body = body message.subject = subject message.to.add(to) if cc is not None: message.cc.add(cc) if bcc is not None: message.bcc.add(cc) message.send() output = "Message sent: " + str(message) return output
https://api.python.langchain.com/en/latest/_modules/langchain/tools/office365/send_message.html
ee3220847fbe-0
Source code for langchain.tools.office365.messages_search """Util that Searches email messages in Office 365. Free, but setup is required. See link below. https://learn.microsoft.com/en-us/graph/auth/ """ from typing import Any, Dict, List, Optional, Type from pydantic import BaseModel, Extra, Field from langchain.callbacks.manager import CallbackManagerForToolRun from langchain.tools.office365.base import O365BaseTool from langchain.tools.office365.utils import clean_body [docs]class SearchEmailsInput(BaseModel): """Input for SearchEmails Tool.""" """From https://learn.microsoft.com/en-us/graph/search-query-parameter""" folder: str = Field( default=None, description=( " If the user wants to search in only one folder, the name of the folder. " 'Default folders are "inbox", "drafts", "sent items", "deleted ttems", but ' "users can search custom folders as well." ), ) query: str = Field( description=( "The Microsoift Graph v1.0 $search query. Example filters include " "from:sender, from:sender, to:recipient, subject:subject, " "recipients:list_of_recipients, body:excitement, importance:high, " "received>2022-12-01, received<2021-12-01, sent>2022-12-01, " "sent<2021-12-01, hasAttachments:true attachment:api-catalog.md, " "cc:samanthab@contoso.com, bcc:samanthab@contoso.com, body:excitement date "
https://api.python.langchain.com/en/latest/_modules/langchain/tools/office365/messages_search.html
ee3220847fbe-1
"range example: received:2023-06-08..2023-06-09 matching example: " "from:amy OR from:david." ) ) max_results: int = Field( default=10, description="The maximum number of results to return.", ) truncate: bool = Field( default=True, description=( "Whether the email body is truncated to meet token number limits. Set to " "False for searches that will retrieve very few results, otherwise, set to " "True" ), ) [docs]class O365SearchEmails(O365BaseTool): """Class for searching email messages in Office 365 Free, but setup is required """ name: str = "messages_search" args_schema: Type[BaseModel] = SearchEmailsInput description: str = ( "Use this tool to search for email messages." " The input must be a valid Microsoft Graph v1.0 $search query." " The output is a JSON list of the requested resource." ) class Config: """Configuration for this pydantic object.""" extra = Extra.forbid def _run( self, query: str, folder: str = "", max_results: int = 10, truncate: bool = True, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> List[Dict[str, Any]]: # Get mailbox object mailbox = self.account.mailbox() # Pull the folder if the user wants to search in a folder if folder != "": mailbox = mailbox.get_folder(folder_name=folder)
https://api.python.langchain.com/en/latest/_modules/langchain/tools/office365/messages_search.html
ee3220847fbe-2
if folder != "": mailbox = mailbox.get_folder(folder_name=folder) # Retrieve messages based on query query = mailbox.q().search(query) messages = mailbox.get_messages(limit=max_results, query=query) # Generate output dict output_messages = [] for message in messages: output_message = {} output_message["from"] = message.sender if truncate: output_message["body"] = message.body_preview else: output_message["body"] = clean_body(message.body) output_message["subject"] = message.subject output_message["date"] = message.modified.strftime("%Y-%m-%dT%H:%M:%S%z") output_message["to"] = [] for recipient in message.to._recipients: output_message["to"].append(str(recipient)) output_message["cc"] = [] for recipient in message.cc._recipients: output_message["cc"].append(str(recipient)) output_message["bcc"] = [] for recipient in message.bcc._recipients: output_message["bcc"].append(str(recipient)) output_messages.append(output_message) return output_messages
https://api.python.langchain.com/en/latest/_modules/langchain/tools/office365/messages_search.html
8dd5341eb80c-0
Source code for langchain.tools.office365.create_draft_message from typing import List, Optional, Type from pydantic import BaseModel, Field from langchain.callbacks.manager import CallbackManagerForToolRun from langchain.tools.office365.base import O365BaseTool [docs]class CreateDraftMessageSchema(BaseModel): """Input for SendMessageTool.""" body: str = Field( ..., description="The message body to include in the draft.", ) to: List[str] = Field( ..., description="The list of recipients.", ) subject: str = Field( ..., description="The subject of the message.", ) cc: Optional[List[str]] = Field( None, description="The list of CC recipients.", ) bcc: Optional[List[str]] = Field( None, description="The list of BCC recipients.", ) [docs]class O365CreateDraftMessage(O365BaseTool): """Tool for creating a draft email in Office 365.""" name: str = "create_email_draft" description: str = ( "Use this tool to create a draft email with the provided message fields." ) args_schema: Type[CreateDraftMessageSchema] = CreateDraftMessageSchema def _run( self, body: str, to: List[str], subject: str, cc: Optional[List[str]] = None, bcc: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: # Get mailbox object mailbox = self.account.mailbox() message = mailbox.new_message() # Assign message values
https://api.python.langchain.com/en/latest/_modules/langchain/tools/office365/create_draft_message.html
8dd5341eb80c-1
message = mailbox.new_message() # Assign message values message.body = body message.subject = subject message.to.add(to) if cc is not None: message.cc.add(cc) if bcc is not None: message.bcc.add(cc) message.save_draft() output = "Draft created: " + str(message) return output
https://api.python.langchain.com/en/latest/_modules/langchain/tools/office365/create_draft_message.html
7254177581f8-0
Source code for langchain.tools.office365.send_event """Util that sends calendar events in Office 365. Free, but setup is required. See link below. https://learn.microsoft.com/en-us/graph/auth/ """ from datetime import datetime as dt from typing import List, Optional, Type from pydantic import BaseModel, Field from langchain.callbacks.manager import CallbackManagerForToolRun from langchain.tools.office365.base import O365BaseTool [docs]class SendEventSchema(BaseModel): """Input for CreateEvent Tool.""" body: str = Field( ..., description="The message body to include in the event.", ) attendees: List[str] = Field( ..., description="The list of attendees for the event.", ) subject: str = Field( ..., description="The subject of the event.", ) start_datetime: str = Field( description=" The start datetime for the event in the following format: " ' YYYY-MM-DDTHH:MM:SS±hh:mm, where "T" separates the date and time ' " components, and the time zone offset is specified as ±hh:mm. " ' For example: "2023-06-09T10:30:00+03:00" represents June 9th, ' " 2023, at 10:30 AM in a time zone with a positive offset of 3 " " hours from Coordinated Universal Time (UTC).", ) end_datetime: str = Field( description=" The end datetime for the event in the following format: " ' YYYY-MM-DDTHH:MM:SS±hh:mm, where "T" separates the date and time '
https://api.python.langchain.com/en/latest/_modules/langchain/tools/office365/send_event.html
7254177581f8-1
" components, and the time zone offset is specified as ±hh:mm. " ' For example: "2023-06-09T10:30:00+03:00" represents June 9th, ' " 2023, at 10:30 AM in a time zone with a positive offset of 3 " " hours from Coordinated Universal Time (UTC).", ) [docs]class O365SendEvent(O365BaseTool): """Tool for sending calendar events in Office 365.""" name: str = "send_event" description: str = ( "Use this tool to create and send an event with the provided event fields." ) args_schema: Type[SendEventSchema] = SendEventSchema def _run( self, body: str, attendees: List[str], subject: str, start_datetime: str, end_datetime: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: # Get calendar object schedule = self.account.schedule() calendar = schedule.get_default_calendar() event = calendar.new_event() event.body = body event.subject = subject event.start = dt.strptime(start_datetime, "%Y-%m-%dT%H:%M:%S%z") event.end = dt.strptime(end_datetime, "%Y-%m-%dT%H:%M:%S%z") for attendee in attendees: event.attendees.add(attendee) # TO-DO: Look into PytzUsageWarning event.save() output = "Event sent: " + str(event) return output
https://api.python.langchain.com/en/latest/_modules/langchain/tools/office365/send_event.html
68ac0df7d7bc-0
Source code for langchain.tools.office365.utils """O365 tool utils.""" from __future__ import annotations import logging import os from typing import TYPE_CHECKING if TYPE_CHECKING: from O365 import Account logger = logging.getLogger(__name__) [docs]def clean_body(body: str) -> str: """Clean body of a message or event.""" try: from bs4 import BeautifulSoup try: # Remove HTML soup = BeautifulSoup(str(body), "html.parser") body = soup.get_text() # Remove return characters body = "".join(body.splitlines()) # Remove extra spaces body = " ".join(body.split()) return str(body) except Exception: return str(body) except ImportError: return str(body) [docs]def authenticate() -> Account: """Authenticate using the Microsoft Grah API""" try: from O365 import Account except ImportError as e: raise ImportError( "Cannot import 0365. Please install the package with `pip install O365`." ) from e if "CLIENT_ID" in os.environ and "CLIENT_SECRET" in os.environ: client_id = os.environ["CLIENT_ID"] client_secret = os.environ["CLIENT_SECRET"] credentials = (client_id, client_secret) else: logger.error( "Error: The CLIENT_ID and CLIENT_SECRET environmental variables have not " "been set. Visit the following link on how to acquire these authorization " "tokens: https://learn.microsoft.com/en-us/graph/auth/" ) return None account = Account(credentials) if account.is_authenticated is False: if not account.authenticate( scopes=[
https://api.python.langchain.com/en/latest/_modules/langchain/tools/office365/utils.html
68ac0df7d7bc-1
if account.is_authenticated is False: if not account.authenticate( scopes=[ "https://graph.microsoft.com/Mail.ReadWrite", "https://graph.microsoft.com/Mail.Send", "https://graph.microsoft.com/Calendars.ReadWrite", "https://graph.microsoft.com/MailboxSettings.ReadWrite", ] ): print("Error: Could not authenticate") return None else: return account else: return account
https://api.python.langchain.com/en/latest/_modules/langchain/tools/office365/utils.html
709f50f744ef-0
Source code for langchain.tools.bing_search.tool """Tool for the Bing search API.""" from typing import Optional from langchain.callbacks.manager import CallbackManagerForToolRun from langchain.tools.base import BaseTool from langchain.utilities.bing_search import BingSearchAPIWrapper [docs]class BingSearchRun(BaseTool): """Tool that queries the Bing search API.""" name = "bing_search" description = ( "A wrapper around Bing Search. " "Useful for when you need to answer questions about current events. " "Input should be a search query." ) api_wrapper: BingSearchAPIWrapper def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" return self.api_wrapper.run(query) [docs]class BingSearchResults(BaseTool): """Tool that queries the Bing Search API and gets back json.""" name = "Bing Search Results JSON" description = ( "A wrapper around Bing Search. " "Useful for when you need to answer questions about current events. " "Input should be a search query. Output is a JSON array of the query results" ) num_results: int = 4 api_wrapper: BingSearchAPIWrapper def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" return str(self.api_wrapper.results(query, self.num_results))
https://api.python.langchain.com/en/latest/_modules/langchain/tools/bing_search/tool.html
c56883ef635e-0
Source code for langchain.tools.nuclia.tool """Tool for the Nuclia Understanding API. Installation: ```bash pip install --upgrade protobuf pip install nucliadb-protos ``` """ import asyncio import base64 import logging import mimetypes import os from typing import Any, Dict, Optional, Type, Union import requests from pydantic import BaseModel, Field from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool logger = logging.getLogger(__name__) [docs]class NUASchema(BaseModel): action: str = Field( ..., description="Action to perform. Either `push` or `pull`.", ) id: str = Field( ..., description="ID of the file to push or pull.", ) path: Optional[str] = Field( ..., description="Path to the file to push (needed only for `push` action).", ) text: Optional[str] = Field( ..., description="Text content to process (needed only for `push` action).", ) [docs]class NucliaUnderstandingAPI(BaseTool): """Tool to process files with the Nuclia Understanding API.""" name = "nuclia_understanding_api" description = ( "A wrapper around Nuclia Understanding API endpoints. " "Useful for when you need to extract text from any kind of files. " ) args_schema: Type[BaseModel] = NUASchema _results: Dict[str, Any] = {} _config: Dict[str, Any] = {}
https://api.python.langchain.com/en/latest/_modules/langchain/tools/nuclia/tool.html
c56883ef635e-1
_config: Dict[str, Any] = {} def __init__(self, enable_ml: bool = False) -> None: zone = os.environ.get("NUCLIA_ZONE", "europe-1") self._config["BACKEND"] = f"https://{zone}.nuclia.cloud/api/v1" key = os.environ.get("NUCLIA_NUA_KEY") if not key: raise ValueError("NUCLIA_NUA_KEY environment variable not set") else: self._config["NUA_KEY"] = key self._config["enable_ml"] = enable_ml super().__init__() def _run( self, action: str, id: str, path: Optional[str], text: Optional[str], run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" if action == "push": self._check_params(path, text) if path: return self._pushFile(id, path) if text: return self._pushText(id, text) elif action == "pull": return self._pull(id) return "" async def _arun( self, action: str, id: str, path: Optional[str] = None, text: Optional[str] = None, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool asynchronously.""" self._check_params(path, text) if path: self._pushFile(id, path) if text: self._pushText(id, text) data = None
https://api.python.langchain.com/en/latest/_modules/langchain/tools/nuclia/tool.html
c56883ef635e-2
if text: self._pushText(id, text) data = None while True: data = self._pull(id) if data: break await asyncio.sleep(15) return data def _pushText(self, id: str, text: str) -> str: field = { "textfield": {"text": {"body": text, "format": 0}}, "processing_options": {"ml_text": self._config["enable_ml"]}, } return self._pushField(id, field) def _pushFile(self, id: str, content_path: str) -> str: with open(content_path, "rb") as source_file: response = requests.post( self._config["BACKEND"] + "/processing/upload", headers={ "content-type": mimetypes.guess_type(content_path)[0] or "application/octet-stream", "x-stf-nuakey": "Bearer " + self._config["NUA_KEY"], }, data=source_file.read(), ) if response.status_code != 200: logger.info( f"Error uploading {content_path}: " f"{response.status_code} {response.text}" ) return "" else: field = { "filefield": {"file": f"{response.text}"}, "processing_options": {"ml_text": self._config["enable_ml"]}, } return self._pushField(id, field) def _pushField(self, id: str, field: Any) -> str: logger.info(f"Pushing {id} in queue") response = requests.post(
https://api.python.langchain.com/en/latest/_modules/langchain/tools/nuclia/tool.html
c56883ef635e-3
response = requests.post( self._config["BACKEND"] + "/processing/push", headers={ "content-type": "application/json", "x-stf-nuakey": "Bearer " + self._config["NUA_KEY"], }, json=field, ) if response.status_code != 200: logger.info( f"Error pushing field {id}:" f"{response.status_code} {response.text}" ) raise ValueError("Error pushing field") else: uuid = response.json()["uuid"] logger.info(f"Field {id} pushed in queue, uuid: {uuid}") self._results[id] = {"uuid": uuid, "status": "pending"} return uuid def _pull(self, id: str) -> str: self._pull_queue() result = self._results.get(id, None) if not result: logger.info(f"{id} not in queue") return "" elif result["status"] == "pending": logger.info(f'Waiting for {result["uuid"]} to be processed') return "" else: return result["data"] def _pull_queue(self) -> None: try: from nucliadb_protos.writer_pb2 import BrokerMessage except ImportError as e: raise ImportError( "nucliadb-protos is not installed. " "Run `pip install nucliadb-protos` to install." ) from e try: from google.protobuf.json_format import MessageToJson except ImportError as e: raise ImportError( "Unable to import google.protobuf, please install with " "`pip install protobuf`."
https://api.python.langchain.com/en/latest/_modules/langchain/tools/nuclia/tool.html
c56883ef635e-4
"`pip install protobuf`." ) from e res = requests.get( self._config["BACKEND"] + "/processing/pull", headers={ "x-stf-nuakey": "Bearer " + self._config["NUA_KEY"], }, ).json() if res["status"] == "empty": logger.info("Queue empty") elif res["status"] == "ok": payload = res["payload"] pb = BrokerMessage() pb.ParseFromString(base64.b64decode(payload)) uuid = pb.uuid logger.info(f"Pulled {uuid} from queue") matching_id = self._find_matching_id(uuid) if not matching_id: logger.info(f"No matching id for {uuid}") else: self._results[matching_id]["status"] = "done" data = MessageToJson( pb, preserving_proto_field_name=True, including_default_value_fields=True, ) self._results[matching_id]["data"] = data def _find_matching_id(self, uuid: str) -> Union[str, None]: for id, result in self._results.items(): if result["uuid"] == uuid: return id return None def _check_params(self, path: Optional[str], text: Optional[str]) -> None: if not path and not text: raise ValueError("File path or text is required") if path and text: raise ValueError("Cannot process both file and text on a single run")
https://api.python.langchain.com/en/latest/_modules/langchain/tools/nuclia/tool.html
899454e1e4af-0
Source code for langchain.tools.google_places.tool """Tool for the Google search API.""" from typing import Optional, Type from pydantic import BaseModel, Field from langchain.callbacks.manager import CallbackManagerForToolRun from langchain.tools.base import BaseTool from langchain.utilities.google_places_api import GooglePlacesAPIWrapper [docs]class GooglePlacesSchema(BaseModel): """Input for GooglePlacesTool.""" query: str = Field(..., description="Query for google maps") [docs]class GooglePlacesTool(BaseTool): """Tool that queries the Google places API.""" name = "google_places" description = ( "A wrapper around Google Places. " "Useful for when you need to validate or " "discover addressed from ambiguous text. " "Input should be a search query." ) api_wrapper: GooglePlacesAPIWrapper = Field(default_factory=GooglePlacesAPIWrapper) args_schema: Type[BaseModel] = GooglePlacesSchema def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" return self.api_wrapper.run(query)
https://api.python.langchain.com/en/latest/_modules/langchain/tools/google_places/tool.html
8b0529e1af76-0
Source code for langchain.tools.golden_query.tool """Tool for the Golden API.""" from typing import Optional from langchain.callbacks.manager import CallbackManagerForToolRun from langchain.tools.base import BaseTool from langchain.utilities.golden_query import GoldenQueryAPIWrapper [docs]class GoldenQueryRun(BaseTool): """Tool that adds the capability to query using the Golden API and get back JSON.""" name = "Golden Query" description = ( "A wrapper around Golden Query API." " Useful for getting entities that match" " a natural language query from Golden's Knowledge Base." "\nExample queries:" "\n- companies in nanotech" "\n- list of cloud providers starting in 2019" "\nInput should be the natural language query." "\nOutput is a paginated list of results or an error object" " in JSON format." ) api_wrapper: GoldenQueryAPIWrapper def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the Golden tool.""" return self.api_wrapper.run(query)
https://api.python.langchain.com/en/latest/_modules/langchain/tools/golden_query/tool.html
5319e164b73f-0
Source code for langchain.tools.human.tool """Tool for asking human input.""" from typing import Callable, Optional from pydantic import Field from langchain.callbacks.manager import CallbackManagerForToolRun from langchain.tools.base import BaseTool def _print_func(text: str) -> None: print("\n") print(text) [docs]class HumanInputRun(BaseTool): """Tool that asks user for input.""" name = "human" description = ( "You can ask a human for guidance when you think you " "got stuck or you are not sure what to do next. " "The input should be a question for the human." ) prompt_func: Callable[[str], None] = Field(default_factory=lambda: _print_func) input_func: Callable = Field(default_factory=lambda: input) def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the Human input tool.""" self.prompt_func(query) return self.input_func()
https://api.python.langchain.com/en/latest/_modules/langchain/tools/human/tool.html
b820cc1e143f-0
Source code for langchain.tools.steamship_image_generation.tool """This tool allows agents to generate images using Steamship. Steamship offers access to different third party image generation APIs using a single API key. Today the following models are supported: - Dall-E - Stable Diffusion To use this tool, you must first set as environment variables: STEAMSHIP_API_KEY ``` """ from __future__ import annotations from enum import Enum from typing import TYPE_CHECKING, Dict, Optional from pydantic import root_validator from langchain.callbacks.manager import CallbackManagerForToolRun from langchain.tools import BaseTool from langchain.tools.steamship_image_generation.utils import make_image_public from langchain.utils import get_from_dict_or_env if TYPE_CHECKING: from steamship import Steamship [docs]class ModelName(str, Enum): """Supported Image Models for generation.""" DALL_E = "dall-e" STABLE_DIFFUSION = "stable-diffusion" SUPPORTED_IMAGE_SIZES = { ModelName.DALL_E: ("256x256", "512x512", "1024x1024"), ModelName.STABLE_DIFFUSION: ("512x512", "768x768"), } [docs]class SteamshipImageGenerationTool(BaseTool): """Tool used to generate images from a text-prompt.""" model_name: ModelName size: Optional[str] = "512x512" steamship: Steamship return_urls: Optional[bool] = False name = "GenerateImage" description = ( "Useful for when you need to generate an image." "Input: A detailed text-2-image prompt describing an image" "Output: the UUID of a generated image"
https://api.python.langchain.com/en/latest/_modules/langchain/tools/steamship_image_generation/tool.html
b820cc1e143f-1
"Output: the UUID of a generated image" ) @root_validator(pre=True) def validate_size(cls, values: Dict) -> Dict: if "size" in values: size = values["size"] model_name = values["model_name"] if size not in SUPPORTED_IMAGE_SIZES[model_name]: raise RuntimeError(f"size {size} is not supported by {model_name}") return values @root_validator(pre=True) def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" steamship_api_key = get_from_dict_or_env( values, "steamship_api_key", "STEAMSHIP_API_KEY" ) try: from steamship import Steamship except ImportError: raise ImportError( "steamship is not installed. " "Please install it with `pip install steamship`" ) steamship = Steamship( api_key=steamship_api_key, ) values["steamship"] = steamship if "steamship_api_key" in values: del values["steamship_api_key"] return values def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" image_generator = self.steamship.use_plugin( plugin_handle=self.model_name.value, config={"n": 1, "size": self.size} ) task = image_generator.generate(text=query, append_output_to_file=True) task.wait() blocks = task.output.blocks if len(blocks) > 0:
https://api.python.langchain.com/en/latest/_modules/langchain/tools/steamship_image_generation/tool.html
b820cc1e143f-2
blocks = task.output.blocks if len(blocks) > 0: if self.return_urls: return make_image_public(self.steamship, blocks[0]) else: return blocks[0].id raise RuntimeError(f"[{self.name}] Tool unable to generate image!")
https://api.python.langchain.com/en/latest/_modules/langchain/tools/steamship_image_generation/tool.html
2f61e129b892-0
Source code for langchain.tools.steamship_image_generation.utils """Steamship Utils.""" from __future__ import annotations import uuid from typing import TYPE_CHECKING if TYPE_CHECKING: from steamship import Block, Steamship [docs]def make_image_public(client: Steamship, block: Block) -> str: """Upload a block to a signed URL and return the public URL.""" try: from steamship.data.workspace import SignedUrl from steamship.utils.signed_urls import upload_to_signed_url except ImportError: raise ImportError( "The make_image_public function requires the steamship" " package to be installed. Please install steamship" " with `pip install --upgrade steamship`" ) filepath = str(uuid.uuid4()) signed_url = ( client.get_workspace() .create_signed_url( SignedUrl.Request( bucket=SignedUrl.Bucket.PLUGIN_DATA, filepath=filepath, operation=SignedUrl.Operation.WRITE, ) ) .signed_url ) read_signed_url = ( client.get_workspace() .create_signed_url( SignedUrl.Request( bucket=SignedUrl.Bucket.PLUGIN_DATA, filepath=filepath, operation=SignedUrl.Operation.READ, ) ) .signed_url ) upload_to_signed_url(signed_url, block.raw()) return read_signed_url
https://api.python.langchain.com/en/latest/_modules/langchain/tools/steamship_image_generation/utils.html
a85f1d5d9cef-0
Source code for langchain.graphs.nebula_graph import logging from string import Template from typing import Any, Dict rel_query = Template( """ MATCH ()-[e:`$edge_type`]->() WITH e limit 1 MATCH (m)-[:`$edge_type`]->(n) WHERE id(m) == src(e) AND id(n) == dst(e) RETURN "(:" + tags(m)[0] + ")-[:$edge_type]->(:" + tags(n)[0] + ")" AS rels """ ) RETRY_TIMES = 3 [docs]class NebulaGraph: """NebulaGraph wrapper for graph operations NebulaGraph inherits methods from Neo4jGraph to bring ease to the user space. """ [docs] def __init__( self, space: str, username: str = "root", password: str = "nebula", address: str = "127.0.0.1", port: int = 9669, session_pool_size: int = 30, ) -> None: """Create a new NebulaGraph wrapper instance.""" try: import nebula3 # noqa: F401 import pandas # noqa: F401 except ImportError: raise ValueError( "Please install NebulaGraph Python client and pandas first: " "`pip install nebula3-python pandas`" ) self.username = username self.password = password self.address = address self.port = port self.space = space self.session_pool_size = session_pool_size self.session_pool = self._get_session_pool() self.schema = "" # Set schema try:
https://api.python.langchain.com/en/latest/_modules/langchain/graphs/nebula_graph.html
a85f1d5d9cef-1
self.schema = "" # Set schema try: self.refresh_schema() except Exception as e: raise ValueError(f"Could not refresh schema. Error: {e}") def _get_session_pool(self) -> Any: assert all( [self.username, self.password, self.address, self.port, self.space] ), ( "Please provide all of the following parameters: " "username, password, address, port, space" ) from nebula3.Config import SessionPoolConfig from nebula3.Exception import AuthFailedException, InValidHostname from nebula3.gclient.net.SessionPool import SessionPool config = SessionPoolConfig() config.max_size = self.session_pool_size try: session_pool = SessionPool( self.username, self.password, self.space, [(self.address, self.port)], ) except InValidHostname: raise ValueError( "Could not connect to NebulaGraph database. " "Please ensure that the address and port are correct" ) try: session_pool.init(config) except AuthFailedException: raise ValueError( "Could not connect to NebulaGraph database. " "Please ensure that the username and password are correct" ) except RuntimeError as e: raise ValueError(f"Error initializing session pool. Error: {e}") return session_pool def __del__(self) -> None: try: self.session_pool.close() except Exception as e: logging.warning(f"Could not close session pool. Error: {e}") @property def get_schema(self) -> str:
https://api.python.langchain.com/en/latest/_modules/langchain/graphs/nebula_graph.html
a85f1d5d9cef-2
@property def get_schema(self) -> str: """Returns the schema of the NebulaGraph database""" return self.schema [docs] def execute(self, query: str, params: dict = {}, retry: int = 0) -> Any: """Query NebulaGraph database.""" from nebula3.Exception import IOErrorException, NoValidSessionException from nebula3.fbthrift.transport.TTransport import TTransportException try: result = self.session_pool.execute_parameter(query, params) if not result.is_succeeded(): logging.warning( f"Error executing query to NebulaGraph. " f"Error: {result.error_msg()}\n" f"Query: {query} \n" ) return result except NoValidSessionException: logging.warning( f"No valid session found in session pool. " f"Please consider increasing the session pool size. " f"Current size: {self.session_pool_size}" ) raise ValueError( f"No valid session found in session pool. " f"Please consider increasing the session pool size. " f"Current size: {self.session_pool_size}" ) except RuntimeError as e: if retry < RETRY_TIMES: retry += 1 logging.warning( f"Error executing query to NebulaGraph. " f"Retrying ({retry}/{RETRY_TIMES})...\n" f"query: {query} \n" f"Error: {e}" ) return self.execute(query, params, retry) else: raise ValueError(f"Error executing query to NebulaGraph. Error: {e}")
https://api.python.langchain.com/en/latest/_modules/langchain/graphs/nebula_graph.html
a85f1d5d9cef-3
raise ValueError(f"Error executing query to NebulaGraph. Error: {e}") except (TTransportException, IOErrorException): # connection issue, try to recreate session pool if retry < RETRY_TIMES: retry += 1 logging.warning( f"Connection issue with NebulaGraph. " f"Retrying ({retry}/{RETRY_TIMES})...\n to recreate session pool" ) self.session_pool = self._get_session_pool() return self.execute(query, params, retry) [docs] def refresh_schema(self) -> None: """ Refreshes the NebulaGraph schema information. """ tags_schema, edge_types_schema, relationships = [], [], [] for tag in self.execute("SHOW TAGS").column_values("Name"): tag_name = tag.cast() tag_schema = {"tag": tag_name, "properties": []} r = self.execute(f"DESCRIBE TAG `{tag_name}`") props, types = r.column_values("Field"), r.column_values("Type") for i in range(r.row_size()): tag_schema["properties"].append((props[i].cast(), types[i].cast())) tags_schema.append(tag_schema) for edge_type in self.execute("SHOW EDGES").column_values("Name"): edge_type_name = edge_type.cast() edge_schema = {"edge": edge_type_name, "properties": []} r = self.execute(f"DESCRIBE EDGE `{edge_type_name}`") props, types = r.column_values("Field"), r.column_values("Type") for i in range(r.row_size()): edge_schema["properties"].append((props[i].cast(), types[i].cast())) edge_types_schema.append(edge_schema)
https://api.python.langchain.com/en/latest/_modules/langchain/graphs/nebula_graph.html
a85f1d5d9cef-4
edge_types_schema.append(edge_schema) # build relationships types r = self.execute( rel_query.substitute(edge_type=edge_type_name) ).column_values("rels") if len(r) > 0: relationships.append(r[0].cast()) self.schema = ( f"Node properties: {tags_schema}\n" f"Edge properties: {edge_types_schema}\n" f"Relationships: {relationships}\n" ) [docs] def query(self, query: str, retry: int = 0) -> Dict[str, Any]: result = self.execute(query, retry=retry) columns = result.keys() d: Dict[str, list] = {} for col_num in range(result.col_size()): col_name = columns[col_num] col_list = result.column_values(col_name) d[col_name] = [x.cast() for x in col_list] return d
https://api.python.langchain.com/en/latest/_modules/langchain/graphs/nebula_graph.html
811334fadcec-0
Source code for langchain.graphs.neptune_graph import json from typing import Any, Dict, List, Tuple, Union import requests [docs]class NeptuneQueryException(Exception): """A class to handle queries that fail to execute""" def __init__(self, exception: Union[str, Dict]): if isinstance(exception, dict): self.message = exception["message"] if "message" in exception else "unknown" self.details = exception["details"] if "details" in exception else "unknown" else: self.message = exception self.details = "unknown" def get_message(self) -> str: return self.message def get_details(self) -> Any: return self.details [docs]class NeptuneGraph: """Neptune wrapper for graph operations. This version does not support Sigv4 signing of requests. Example: .. code-block:: python graph = NeptuneGraph( host='<my-cluster>', port=8182 ) """ [docs] def __init__(self, host: str, port: int = 8182, use_https: bool = True) -> None: """Create a new Neptune graph wrapper instance.""" if use_https: self.summary_url = ( f"https://{host}:{port}/pg/statistics/summary?mode=detailed" ) self.query_url = f"https://{host}:{port}/openCypher" else: self.summary_url = ( f"http://{host}:{port}/pg/statistics/summary?mode=detailed" ) self.query_url = f"http://{host}:{port}/openCypher" # Set schema try: self._refresh_schema() except NeptuneQueryException:
https://api.python.langchain.com/en/latest/_modules/langchain/graphs/neptune_graph.html
811334fadcec-1
try: self._refresh_schema() except NeptuneQueryException: raise ValueError("Could not get schema for Neptune database") @property def get_schema(self) -> str: """Returns the schema of the Neptune database""" return self.schema [docs] def query(self, query: str, params: dict = {}) -> Dict[str, Any]: """Query Neptune database.""" response = requests.post(url=self.query_url, data={"query": query}) if response.ok: results = json.loads(response.content.decode()) return results else: raise NeptuneQueryException( { "message": "The generated query failed to execute", "details": response.content.decode(), } ) def _get_summary(self) -> Dict: response = requests.get(url=self.summary_url) if not response.ok: raise NeptuneQueryException( { "message": ( "Summary API is not available for this instance of Neptune," "ensure the engine version is >=1.2.1.0" ), "details": response.content.decode(), } ) try: summary = response.json()["payload"]["graphSummary"] except Exception: raise NeptuneQueryException( { "message": "Summary API did not return a valid response.", "details": response.content.decode(), } ) else: return summary def _get_labels(self) -> Tuple[List[str], List[str]]: """Get node and edge labels from the Neptune statistics summary""" summary = self._get_summary() n_labels = summary["nodeLabels"] e_labels = summary["edgeLabels"]
https://api.python.langchain.com/en/latest/_modules/langchain/graphs/neptune_graph.html
811334fadcec-2
n_labels = summary["nodeLabels"] e_labels = summary["edgeLabels"] return n_labels, e_labels def _get_triples(self, e_labels: List[str]) -> List[str]: triple_query = """ MATCH (a)-[e:{e_label}]->(b) WITH a,e,b LIMIT 3000 RETURN DISTINCT labels(a) AS from, type(e) AS edge, labels(b) AS to LIMIT 10 """ triple_template = "(:{a})-[:{e}]->(:{b})" triple_schema = [] for label in e_labels: q = triple_query.format(e_label=label) data = self.query(q) for d in data["results"]: triple = triple_template.format( a=d["from"][0], e=d["edge"], b=d["to"][0] ) triple_schema.append(triple) return triple_schema def _get_node_properties(self, n_labels: List[str], types: Dict) -> List: node_properties_query = """ MATCH (a:{n_label}) RETURN properties(a) AS props LIMIT 100 """ node_properties = [] for label in n_labels: q = node_properties_query.format(n_label=label) data = {"label": label, "properties": self.query(q)["results"]} s = set({}) for p in data["properties"]: for k, v in p["props"].items(): s.add((k, types[type(v).__name__])) np = { "properties": [{"property": k, "type": v} for k, v in s], "labels": label, }
https://api.python.langchain.com/en/latest/_modules/langchain/graphs/neptune_graph.html
811334fadcec-3
"labels": label, } node_properties.append(np) return node_properties def _get_edge_properties(self, e_labels: List[str], types: Dict[str, Any]) -> List: edge_properties_query = """ MATCH ()-[e:{e_label}]->() RETURN properties(e) AS props LIMIT 100 """ edge_properties = [] for label in e_labels: q = edge_properties_query.format(e_label=label) data = {"label": label, "properties": self.query(q)["results"]} s = set({}) for p in data["properties"]: for k, v in p["props"].items(): s.add((k, types[type(v).__name__])) ep = { "type": label, "properties": [{"property": k, "type": v} for k, v in s], } edge_properties.append(ep) return edge_properties def _refresh_schema(self) -> None: """ Refreshes the Neptune graph schema information. """ types = { "str": "STRING", "float": "DOUBLE", "int": "INTEGER", "list": "LIST", "dict": "MAP", } n_labels, e_labels = self._get_labels() triple_schema = self._get_triples(e_labels) node_properties = self._get_node_properties(n_labels, types) edge_properties = self._get_edge_properties(e_labels, types) self.schema = f""" Node properties are the following: {node_properties} Relationship properties are the following: {edge_properties} The relationships are the following: {triple_schema}
https://api.python.langchain.com/en/latest/_modules/langchain/graphs/neptune_graph.html
811334fadcec-4
The relationships are the following: {triple_schema} """
https://api.python.langchain.com/en/latest/_modules/langchain/graphs/neptune_graph.html
ca3f570f726c-0
Source code for langchain.graphs.rdf_graph from __future__ import annotations from typing import ( TYPE_CHECKING, List, Optional, ) if TYPE_CHECKING: import rdflib prefixes = { "owl": """PREFIX owl: <http://www.w3.org/2002/07/owl#>\n""", "rdf": """PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>\n""", "rdfs": """PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n""", "xsd": """PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>\n""", } cls_query_rdf = prefixes["rdfs"] + ( """SELECT DISTINCT ?cls ?com\n""" """WHERE { \n""" """ ?instance a ?cls . \n""" """ OPTIONAL { ?cls rdfs:comment ?com } \n""" """}""" ) cls_query_rdfs = prefixes["rdfs"] + ( """SELECT DISTINCT ?cls ?com\n""" """WHERE { \n""" """ ?instance a/rdfs:subClassOf* ?cls . \n""" """ OPTIONAL { ?cls rdfs:comment ?com } \n""" """}""" ) cls_query_owl = prefixes["rdfs"] + ( """SELECT DISTINCT ?cls ?com\n""" """WHERE { \n""" """ ?instance a/rdfs:subClassOf* ?cls . \n""" """ FILTER (isIRI(?cls)) . \n"""
https://api.python.langchain.com/en/latest/_modules/langchain/graphs/rdf_graph.html
ca3f570f726c-1
""" FILTER (isIRI(?cls)) . \n""" """ OPTIONAL { ?cls rdfs:comment ?com } \n""" """}""" ) rel_query_rdf = prefixes["rdfs"] + ( """SELECT DISTINCT ?rel ?com\n""" """WHERE { \n""" """ ?subj ?rel ?obj . \n""" """ OPTIONAL { ?cls rdfs:comment ?com } \n""" """}""" ) rel_query_rdfs = ( prefixes["rdf"] + prefixes["rdfs"] + ( """SELECT DISTINCT ?rel ?com\n""" """WHERE { \n""" """ ?rel a/rdfs:subPropertyOf* rdf:Property . \n""" """ OPTIONAL { ?cls rdfs:comment ?com } \n""" """}""" ) ) op_query_owl = ( prefixes["rdfs"] + prefixes["owl"] + ( """SELECT DISTINCT ?op ?com\n""" """WHERE { \n""" """ ?op a/rdfs:subPropertyOf* owl:ObjectProperty . \n""" """ OPTIONAL { ?cls rdfs:comment ?com } \n""" """}""" ) ) dp_query_owl = ( prefixes["rdfs"] + prefixes["owl"] + ( """SELECT DISTINCT ?dp ?com\n""" """WHERE { \n""" """ ?dp a/rdfs:subPropertyOf* owl:DatatypeProperty . \n""" """ OPTIONAL { ?cls rdfs:comment ?com } \n""" """}""" ) )
https://api.python.langchain.com/en/latest/_modules/langchain/graphs/rdf_graph.html
ca3f570f726c-2
"""}""" ) ) [docs]class RdfGraph: """ RDFlib wrapper for graph operations. Modes: * local: Local file - can be queried and changed * online: Online file - can only be queried, changes can be stored locally * store: Triple store - can be queried and changed if update_endpoint available Together with a source file, the serialization should be specified. """ [docs] def __init__( self, source_file: Optional[str] = None, serialization: Optional[str] = "ttl", query_endpoint: Optional[str] = None, update_endpoint: Optional[str] = None, standard: Optional[str] = "rdf", local_copy: Optional[str] = None, ) -> None: """ Set up the RDFlib graph :param source_file: either a path for a local file or a URL :param serialization: serialization of the input :param query_endpoint: SPARQL endpoint for queries, read access :param update_endpoint: SPARQL endpoint for UPDATE queries, write access :param standard: RDF, RDFS, or OWL :param local_copy: new local copy for storing changes """ self.source_file = source_file self.serialization = serialization self.query_endpoint = query_endpoint self.update_endpoint = update_endpoint self.standard = standard self.local_copy = local_copy try: import rdflib from rdflib.graph import DATASET_DEFAULT_GRAPH_ID as default from rdflib.plugins.stores import sparqlstore except ImportError: raise ValueError( "Could not import rdflib python package. "
https://api.python.langchain.com/en/latest/_modules/langchain/graphs/rdf_graph.html
ca3f570f726c-3
raise ValueError( "Could not import rdflib python package. " "Please install it with `pip install rdflib`." ) if self.standard not in (supported_standards := ("rdf", "rdfs", "owl")): raise ValueError( f"Invalid standard. Supported standards are: {supported_standards}." ) if ( not source_file and not query_endpoint or source_file and (query_endpoint or update_endpoint) ): raise ValueError( "Could not unambiguously initialize the graph wrapper. " "Specify either a file (local or online) via the source_file " "or a triple store via the endpoints." ) if source_file: if source_file.startswith("http"): self.mode = "online" else: self.mode = "local" if self.local_copy is None: self.local_copy = self.source_file self.graph = rdflib.Graph() self.graph.parse(source_file, format=self.serialization) if query_endpoint: self.mode = "store" if not update_endpoint: self._store = sparqlstore.SPARQLStore() self._store.open(query_endpoint) else: self._store = sparqlstore.SPARQLUpdateStore() self._store.open((query_endpoint, update_endpoint)) self.graph = rdflib.Graph(self._store, identifier=default) # Verify that the graph was loaded if not len(self.graph): raise AssertionError("The graph is empty.") # Set schema self.schema = "" self.load_schema() @property def get_schema(self) -> str: """
https://api.python.langchain.com/en/latest/_modules/langchain/graphs/rdf_graph.html
ca3f570f726c-4
@property def get_schema(self) -> str: """ Returns the schema of the graph database. """ return self.schema [docs] def query( self, query: str, ) -> List[rdflib.query.ResultRow]: """ Query the graph. """ from rdflib.exceptions import ParserError from rdflib.query import ResultRow try: res = self.graph.query(query) except ParserError as e: raise ValueError("Generated SPARQL statement is invalid\n" f"{e}") return [r for r in res if isinstance(r, ResultRow)] [docs] def update( self, query: str, ) -> None: """ Update the graph. """ from rdflib.exceptions import ParserError try: self.graph.update(query) except ParserError as e: raise ValueError("Generated SPARQL statement is invalid\n" f"{e}") if self.local_copy: self.graph.serialize( destination=self.local_copy, format=self.local_copy.split(".")[-1] ) else: raise ValueError("No target file specified for saving the updated file.") @staticmethod def _get_local_name(iri: str) -> str: if "#" in iri: local_name = iri.split("#")[-1] elif "/" in iri: local_name = iri.split("/")[-1] else: raise ValueError(f"Unexpected IRI '{iri}', contains neither '#' nor '/'.") return local_name def _res_to_str(self, res: rdflib.query.ResultRow, var: str) -> str:
https://api.python.langchain.com/en/latest/_modules/langchain/graphs/rdf_graph.html
ca3f570f726c-5
return ( "<" + str(res[var]) + "> (" + self._get_local_name(res[var]) + ", " + str(res["com"]) + ")" ) [docs] def load_schema(self) -> None: """ Load the graph schema information. """ def _rdf_s_schema( classes: List[rdflib.query.ResultRow], relationships: List[rdflib.query.ResultRow], ) -> str: return ( f"In the following, each IRI is followed by the local name and " f"optionally its description in parentheses. \n" f"The RDF graph supports the following node types:\n" f'{", ".join([self._res_to_str(r, "cls") for r in classes])}\n' f"The RDF graph supports the following relationships:\n" f'{", ".join([self._res_to_str(r, "rel") for r in relationships])}\n' ) if self.standard == "rdf": clss = self.query(cls_query_rdf) rels = self.query(rel_query_rdf) self.schema = _rdf_s_schema(clss, rels) elif self.standard == "rdfs": clss = self.query(cls_query_rdfs) rels = self.query(rel_query_rdfs) self.schema = _rdf_s_schema(clss, rels) elif self.standard == "owl": clss = self.query(cls_query_owl) ops = self.query(op_query_owl) dps = self.query(dp_query_owl) self.schema = ( f"In the following, each IRI is followed by the local name and "
https://api.python.langchain.com/en/latest/_modules/langchain/graphs/rdf_graph.html
ca3f570f726c-6
f"In the following, each IRI is followed by the local name and " f"optionally its description in parentheses. \n" f"The OWL graph supports the following node types:\n" f'{", ".join([self._res_to_str(r, "cls") for r in clss])}\n' f"The OWL graph supports the following object properties, " f"i.e., relationships between objects:\n" f'{", ".join([self._res_to_str(r, "op") for r in ops])}\n' f"The OWL graph supports the following data properties, " f"i.e., relationships between objects and literals:\n" f'{", ".join([self._res_to_str(r, "dp") for r in dps])}\n' ) else: raise ValueError(f"Mode '{self.standard}' is currently not supported.")
https://api.python.langchain.com/en/latest/_modules/langchain/graphs/rdf_graph.html
bdcc4038b00a-0
Source code for langchain.graphs.memgraph_graph from langchain.graphs.neo4j_graph import Neo4jGraph SCHEMA_QUERY = """ CALL llm_util.schema("prompt_ready") YIELD * RETURN * """ [docs]class MemgraphGraph(Neo4jGraph): """Memgraph wrapper for graph operations.""" [docs] def __init__( self, url: str, username: str, password: str, *, database: str = "memgraph" ) -> None: """Create a new Memgraph graph wrapper instance.""" super().__init__(url, username, password, database=database) [docs] def refresh_schema(self) -> None: """ Refreshes the Memgraph graph schema information. """ db_schema = self.query(SCHEMA_QUERY)[0].get("schema") assert db_schema is not None self.schema = db_schema
https://api.python.langchain.com/en/latest/_modules/langchain/graphs/memgraph_graph.html
0f0405ace6dd-0
Source code for langchain.graphs.arangodb_graph import os from math import ceil from typing import Any, Dict, List, Optional [docs]class ArangoGraph: """ArangoDB wrapper for graph operations.""" [docs] def __init__(self, db: Any) -> None: """Create a new ArangoDB graph wrapper instance.""" self.set_db(db) self.set_schema() @property def db(self) -> Any: return self.__db @property def schema(self) -> Dict[str, Any]: return self.__schema [docs] def set_db(self, db: Any) -> None: from arango.database import Database if not isinstance(db, Database): msg = "**db** parameter must inherit from arango.database.Database" raise TypeError(msg) self.__db: Database = db self.set_schema() [docs] def set_schema(self, schema: Optional[Dict[str, Any]] = None) -> None: """ Set the schema of the ArangoDB Database. Auto-generates Schema if **schema** is None. """ self.__schema = self.generate_schema() if schema is None else schema [docs] def generate_schema( self, sample_ratio: float = 0 ) -> Dict[str, List[Dict[str, Any]]]: """ Generates the schema of the ArangoDB Database and returns it User can specify a **sample_ratio** (0 to 1) to determine the ratio of documents/edges used (in relation to the Collection size) to render each Collection Schema. """ if not 0 <= sample_ratio <= 1:
https://api.python.langchain.com/en/latest/_modules/langchain/graphs/arangodb_graph.html
0f0405ace6dd-1
""" if not 0 <= sample_ratio <= 1: raise ValueError("**sample_ratio** value must be in between 0 to 1") # Stores the Edge Relationships between each ArangoDB Document Collection graph_schema: List[Dict[str, Any]] = [ {"graph_name": g["name"], "edge_definitions": g["edge_definitions"]} for g in self.db.graphs() ] # Stores the schema of every ArangoDB Document/Edge collection collection_schema: List[Dict[str, Any]] = [] for collection in self.db.collections(): if collection["system"]: continue # Extract collection name, type, and size col_name: str = collection["name"] col_type: str = collection["type"] col_size: int = self.db.collection(col_name).count() # Set number of ArangoDB documents/edges to retrieve limit_amount = ceil(sample_ratio * col_size) or 1 aql = f""" FOR doc in {col_name} LIMIT {limit_amount} RETURN doc """ doc: Dict[str, Any] properties: List[Dict[str, str]] = [] for doc in self.__db.aql.execute(aql): for key, value in doc.items(): properties.append({"name": key, "type": type(value).__name__}) collection_schema.append( { "collection_name": col_name, "collection_type": col_type, f"{col_type}_properties": properties, f"example_{col_type}": doc, } ) return {"Graph Schema": graph_schema, "Collection Schema": collection_schema} [docs] def query(
https://api.python.langchain.com/en/latest/_modules/langchain/graphs/arangodb_graph.html
0f0405ace6dd-2
[docs] def query( self, query: str, top_k: Optional[int] = None, **kwargs: Any ) -> List[Dict[str, Any]]: """Query the ArangoDB database.""" import itertools cursor = self.__db.aql.execute(query, **kwargs) return [doc for doc in itertools.islice(cursor, top_k)] [docs] @classmethod def from_db_credentials( cls, url: Optional[str] = None, dbname: Optional[str] = None, username: Optional[str] = None, password: Optional[str] = None, ) -> Any: """Convenience constructor that builds Arango DB from credentials. Args: url: Arango DB url. Can be passed in as named arg or set as environment var ``ARANGODB_URL``. Defaults to "http://localhost:8529". dbname: Arango DB name. Can be passed in as named arg or set as environment var ``ARANGODB_DBNAME``. Defaults to "_system". username: Can be passed in as named arg or set as environment var ``ARANGODB_USERNAME``. Defaults to "root". password: Can be passed ni as named arg or set as environment var ``ARANGODB_PASSWORD``. Defaults to "". Returns: An arango.database.StandardDatabase. """ db = get_arangodb_client( url=url, dbname=dbname, username=username, password=password ) return cls(db) [docs]def get_arangodb_client( url: Optional[str] = None, dbname: Optional[str] = None, username: Optional[str] = None,
https://api.python.langchain.com/en/latest/_modules/langchain/graphs/arangodb_graph.html
0f0405ace6dd-3
dbname: Optional[str] = None, username: Optional[str] = None, password: Optional[str] = None, ) -> Any: """Get the Arango DB client from credentials. Args: url: Arango DB url. Can be passed in as named arg or set as environment var ``ARANGODB_URL``. Defaults to "http://localhost:8529". dbname: Arango DB name. Can be passed in as named arg or set as environment var ``ARANGODB_DBNAME``. Defaults to "_system". username: Can be passed in as named arg or set as environment var ``ARANGODB_USERNAME``. Defaults to "root". password: Can be passed ni as named arg or set as environment var ``ARANGODB_PASSWORD``. Defaults to "". Returns: An arango.database.StandardDatabase. """ try: from arango import ArangoClient except ImportError as e: raise ImportError( "Unable to import arango, please install with `pip install python-arango`." ) from e _url: str = url or os.environ.get("ARANGODB_URL", "http://localhost:8529") # type: ignore[assignment] # noqa: E501 _dbname: str = dbname or os.environ.get("ARANGODB_DBNAME", "_system") # type: ignore[assignment] # noqa: E501 _username: str = username or os.environ.get("ARANGODB_USERNAME", "root") # type: ignore[assignment] # noqa: E501 _password: str = password or os.environ.get("ARANGODB_PASSWORD", "") # type: ignore[assignment] # noqa: E501
https://api.python.langchain.com/en/latest/_modules/langchain/graphs/arangodb_graph.html
0f0405ace6dd-4
return ArangoClient(_url).db(_dbname, _username, _password, verify=True)
https://api.python.langchain.com/en/latest/_modules/langchain/graphs/arangodb_graph.html
7df30e563a8a-0
Source code for langchain.graphs.kuzu_graph from typing import Any, Dict, List [docs]class KuzuGraph: """Kùzu wrapper for graph operations.""" [docs] def __init__(self, db: Any, database: str = "kuzu") -> None: try: import kuzu except ImportError: raise ImportError( "Could not import Kùzu python package." "Please install Kùzu with `pip install kuzu`." ) self.db = db self.conn = kuzu.Connection(self.db) self.database = database self.refresh_schema() @property def get_schema(self) -> str: """Returns the schema of the Kùzu database""" return self.schema [docs] def query(self, query: str, params: dict = {}) -> List[Dict[str, Any]]: """Query Kùzu database""" params_list = [] for param_name in params: params_list.append([param_name, params[param_name]]) result = self.conn.execute(query, params_list) column_names = result.get_column_names() return_list = [] while result.has_next(): row = result.get_next() return_list.append(dict(zip(column_names, row))) return return_list [docs] def refresh_schema(self) -> None: """Refreshes the Kùzu graph schema information""" node_properties = [] node_table_names = self.conn._get_node_table_names() for table_name in node_table_names: current_table_schema = {"properties": [], "label": table_name} properties = self.conn._get_node_property_names(table_name) for property_name in properties:
https://api.python.langchain.com/en/latest/_modules/langchain/graphs/kuzu_graph.html
7df30e563a8a-1
for property_name in properties: property_type = properties[property_name]["type"] list_type_flag = "" if properties[property_name]["dimension"] > 0: if "shape" in properties[property_name]: for s in properties[property_name]["shape"]: list_type_flag += "[%s]" % s else: for i in range(properties[property_name]["dimension"]): list_type_flag += "[]" property_type += list_type_flag current_table_schema["properties"].append( (property_name, property_type) ) node_properties.append(current_table_schema) relationships = [] rel_tables = self.conn._get_rel_table_names() for table in rel_tables: relationships.append( "(:%s)-[:%s]->(:%s)" % (table["src"], table["name"], table["dst"]) ) rel_properties = [] for table in rel_tables: current_table_schema = {"properties": [], "label": table["name"]} properties_text = self.conn._connection.get_rel_property_names( table["name"] ).split("\n") for i, line in enumerate(properties_text): # The first 3 lines defines src, dst and name, so we skip them if i < 3: continue if not line: continue property_name, property_type = line.strip().split(" ") current_table_schema["properties"].append( (property_name, property_type) ) rel_properties.append(current_table_schema) self.schema = ( f"Node properties: {node_properties}\n" f"Relationships properties: {rel_properties}\n" f"Relationships: {relationships}\n" )
https://api.python.langchain.com/en/latest/_modules/langchain/graphs/kuzu_graph.html
4c8d9235bb9b-0
Source code for langchain.graphs.neo4j_graph from typing import Any, Dict, List node_properties_query = """ CALL apoc.meta.data() YIELD label, other, elementType, type, property WHERE NOT type = "RELATIONSHIP" AND elementType = "node" WITH label AS nodeLabels, collect({property:property, type:type}) AS properties RETURN {labels: nodeLabels, properties: properties} AS output """ rel_properties_query = """ CALL apoc.meta.data() YIELD label, other, elementType, type, property WHERE NOT type = "RELATIONSHIP" AND elementType = "relationship" WITH label AS nodeLabels, collect({property:property, type:type}) AS properties RETURN {type: nodeLabels, properties: properties} AS output """ rel_query = """ CALL apoc.meta.data() YIELD label, other, elementType, type, property WHERE type = "RELATIONSHIP" AND elementType = "node" UNWIND other AS other_node RETURN "(:" + label + ")-[:" + property + "]->(:" + toString(other_node) + ")" AS output """ [docs]class Neo4jGraph: """Neo4j wrapper for graph operations.""" [docs] def __init__( self, url: str, username: str, password: str, database: str = "neo4j" ) -> None: """Create a new Neo4j graph wrapper instance.""" try: import neo4j except ImportError: raise ValueError( "Could not import neo4j python package. " "Please install it with `pip install neo4j`." ) self._driver = neo4j.GraphDatabase.driver(url, auth=(username, password)) self._database = database
https://api.python.langchain.com/en/latest/_modules/langchain/graphs/neo4j_graph.html
4c8d9235bb9b-1
self._database = database self.schema = "" # Verify connection try: self._driver.verify_connectivity() except neo4j.exceptions.ServiceUnavailable: raise ValueError( "Could not connect to Neo4j database. " "Please ensure that the url is correct" ) except neo4j.exceptions.AuthError: raise ValueError( "Could not connect to Neo4j database. " "Please ensure that the username and password are correct" ) # Set schema try: self.refresh_schema() except neo4j.exceptions.ClientError: raise ValueError( "Could not use APOC procedures. " "Please ensure the APOC plugin is installed in Neo4j and that " "'apoc.meta.data()' is allowed in Neo4j configuration " ) @property def get_schema(self) -> str: """Returns the schema of the Neo4j database""" return self.schema [docs] def query(self, query: str, params: dict = {}) -> List[Dict[str, Any]]: """Query Neo4j database.""" from neo4j.exceptions import CypherSyntaxError with self._driver.session(database=self._database) as session: try: data = session.run(query, params) return [r.data() for r in data] except CypherSyntaxError as e: raise ValueError(f"Generated Cypher Statement is not valid\n{e}") [docs] def refresh_schema(self) -> None: """ Refreshes the Neo4j graph schema information. """ node_properties = self.query(node_properties_query) relationships_properties = self.query(rel_properties_query)
https://api.python.langchain.com/en/latest/_modules/langchain/graphs/neo4j_graph.html
4c8d9235bb9b-2
relationships_properties = self.query(rel_properties_query) relationships = self.query(rel_query) self.schema = f""" Node properties are the following: {[el['output'] for el in node_properties]} Relationship properties are the following: {[el['output'] for el in relationships_properties]} The relationships are the following: {[el['output'] for el in relationships]} """
https://api.python.langchain.com/en/latest/_modules/langchain/graphs/neo4j_graph.html
00322734d04d-0
Source code for langchain.graphs.hugegraph from typing import Any, Dict, List [docs]class HugeGraph: """HugeGraph wrapper for graph operations""" [docs] def __init__( self, username: str = "default", password: str = "default", address: str = "127.0.0.1", port: int = 8081, graph: str = "hugegraph", ) -> None: """Create a new HugeGraph wrapper instance.""" try: from hugegraph.connection import PyHugeGraph except ImportError: raise ValueError( "Please install HugeGraph Python client first: " "`pip3 install hugegraph-python`" ) self.username = username self.password = password self.address = address self.port = port self.graph = graph self.client = PyHugeGraph( address, port, user=username, pwd=password, graph=graph ) self.schema = "" # Set schema try: self.refresh_schema() except Exception as e: raise ValueError(f"Could not refresh schema. Error: {e}") @property def get_schema(self) -> str: """Returns the schema of the HugeGraph database""" return self.schema [docs] def refresh_schema(self) -> None: """ Refreshes the HugeGraph schema information. """ schema = self.client.schema() vertex_schema = schema.getVertexLabels() edge_schema = schema.getEdgeLabels() relationships = schema.getRelations() self.schema = ( f"Node properties: {vertex_schema}\n"
https://api.python.langchain.com/en/latest/_modules/langchain/graphs/hugegraph.html
00322734d04d-1
self.schema = ( f"Node properties: {vertex_schema}\n" f"Edge properties: {edge_schema}\n" f"Relationships: {relationships}\n" ) [docs] def query(self, query: str) -> List[Dict[str, Any]]: g = self.client.gremlin() res = g.exec(query) return res["data"]
https://api.python.langchain.com/en/latest/_modules/langchain/graphs/hugegraph.html
fe158f4d019a-0
Source code for langchain.graphs.networkx_graph """Networkx wrapper for graph operations.""" from __future__ import annotations from typing import Any, List, NamedTuple, Optional, Tuple KG_TRIPLE_DELIMITER = "<|>" [docs]class KnowledgeTriple(NamedTuple): """A triple in the graph.""" subject: str predicate: str object_: str [docs] @classmethod def from_string(cls, triple_string: str) -> "KnowledgeTriple": """Create a KnowledgeTriple from a string.""" subject, predicate, object_ = triple_string.strip().split(", ") subject = subject[1:] object_ = object_[:-1] return cls(subject, predicate, object_) [docs]def parse_triples(knowledge_str: str) -> List[KnowledgeTriple]: """Parse knowledge triples from the knowledge string.""" knowledge_str = knowledge_str.strip() if not knowledge_str or knowledge_str == "NONE": return [] triple_strs = knowledge_str.split(KG_TRIPLE_DELIMITER) results = [] for triple_str in triple_strs: try: kg_triple = KnowledgeTriple.from_string(triple_str) except ValueError: continue results.append(kg_triple) return results [docs]def get_entities(entity_str: str) -> List[str]: """Extract entities from entity string.""" if entity_str.strip() == "NONE": return [] else: return [w.strip() for w in entity_str.split(",")] [docs]class NetworkxEntityGraph: """Networkx wrapper for entity graph operations.""" [docs] def __init__(self, graph: Optional[Any] = None) -> None:
https://api.python.langchain.com/en/latest/_modules/langchain/graphs/networkx_graph.html
fe158f4d019a-1
"""Create a new graph.""" try: import networkx as nx except ImportError: raise ImportError( "Could not import networkx python package. " "Please install it with `pip install networkx`." ) if graph is not None: if not isinstance(graph, nx.DiGraph): raise ValueError("Passed in graph is not of correct shape") self._graph = graph else: self._graph = nx.DiGraph() [docs] @classmethod def from_gml(cls, gml_path: str) -> NetworkxEntityGraph: try: import networkx as nx except ImportError: raise ImportError( "Could not import networkx python package. " "Please install it with `pip install networkx`." ) graph = nx.read_gml(gml_path) return cls(graph) [docs] def add_triple(self, knowledge_triple: KnowledgeTriple) -> None: """Add a triple to the graph.""" # Creates nodes if they don't exist # Overwrites existing edges if not self._graph.has_node(knowledge_triple.subject): self._graph.add_node(knowledge_triple.subject) if not self._graph.has_node(knowledge_triple.object_): self._graph.add_node(knowledge_triple.object_) self._graph.add_edge( knowledge_triple.subject, knowledge_triple.object_, relation=knowledge_triple.predicate, ) [docs] def delete_triple(self, knowledge_triple: KnowledgeTriple) -> None: """Delete a triple from the graph.""" if self._graph.has_edge(knowledge_triple.subject, knowledge_triple.object_):
https://api.python.langchain.com/en/latest/_modules/langchain/graphs/networkx_graph.html
fe158f4d019a-2
if self._graph.has_edge(knowledge_triple.subject, knowledge_triple.object_): self._graph.remove_edge(knowledge_triple.subject, knowledge_triple.object_) [docs] def get_triples(self) -> List[Tuple[str, str, str]]: """Get all triples in the graph.""" return [(u, v, d["relation"]) for u, v, d in self._graph.edges(data=True)] [docs] def get_entity_knowledge(self, entity: str, depth: int = 1) -> List[str]: """Get information about an entity.""" import networkx as nx # TODO: Have more information-specific retrieval methods if not self._graph.has_node(entity): return [] results = [] for src, sink in nx.dfs_edges(self._graph, entity, depth_limit=depth): relation = self._graph[src][sink]["relation"] results.append(f"{src} {relation} {sink}") return results [docs] def write_to_gml(self, path: str) -> None: import networkx as nx nx.write_gml(self._graph, path) [docs] def clear(self) -> None: """Clear the graph.""" self._graph.clear() [docs] def get_topological_sort(self) -> List[str]: """Get a list of entity names in the graph sorted by causal dependence.""" import networkx as nx return list(nx.topological_sort(self._graph)) [docs] def draw_graphviz(self, **kwargs: Any) -> None: """ Provides better drawing Usage in a jupyter notebook: >>> from IPython.display import SVG
https://api.python.langchain.com/en/latest/_modules/langchain/graphs/networkx_graph.html
fe158f4d019a-3
Usage in a jupyter notebook: >>> from IPython.display import SVG >>> self.draw_graphviz_svg(layout="dot", filename="web.svg") >>> SVG('web.svg') """ from networkx.drawing.nx_agraph import to_agraph try: import pygraphviz # noqa: F401 except ImportError as e: if e.name == "_graphviz": """ >>> e.msg # pygraphviz throws this error ImportError: libcgraph.so.6: cannot open shared object file """ raise ImportError( "Could not import graphviz debian package. " "Please install it with:" "`sudo apt-get update`" "`sudo apt-get install graphviz graphviz-dev`" ) else: raise ImportError( "Could not import pygraphviz python package. " "Please install it with:" "`pip install pygraphviz`." ) graph = to_agraph(self._graph) # --> pygraphviz.agraph.AGraph # pygraphviz.github.io/documentation/stable/tutorial.html#layout-and-drawing graph.layout(prog=kwargs.get("prog", "dot")) graph.draw(kwargs.get("path", "graph.svg"))
https://api.python.langchain.com/en/latest/_modules/langchain/graphs/networkx_graph.html
988618d66ab1-0
Source code for langchain.prompts.base """BasePrompt schema definition.""" from __future__ import annotations import warnings from abc import ABC from typing import Any, Callable, Dict, List, Set from langchain.formatting import formatter from langchain.schema.messages import BaseMessage, HumanMessage from langchain.schema.prompt import PromptValue from langchain.schema.prompt_template import BasePromptTemplate [docs]def jinja2_formatter(template: str, **kwargs: Any) -> str: """Format a template using jinja2.""" try: from jinja2 import Template except ImportError: raise ImportError( "jinja2 not installed, which is needed to use the jinja2_formatter. " "Please install it with `pip install jinja2`." ) return Template(template).render(**kwargs) [docs]def validate_jinja2(template: str, input_variables: List[str]) -> None: """ Validate that the input variables are valid for the template. Issues an warning if missing or extra variables are found. Args: template: The template string. input_variables: The input variables. """ input_variables_set = set(input_variables) valid_variables = _get_jinja2_variables_from_template(template) missing_variables = valid_variables - input_variables_set extra_variables = input_variables_set - valid_variables warning_message = "" if missing_variables: warning_message += f"Missing variables: {missing_variables} " if extra_variables: warning_message += f"Extra variables: {extra_variables}" if warning_message: warnings.warn(warning_message.strip()) def _get_jinja2_variables_from_template(template: str) -> Set[str]: try:
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/base.html
988618d66ab1-1
try: from jinja2 import Environment, meta except ImportError: raise ImportError( "jinja2 not installed, which is needed to use the jinja2_formatter. " "Please install it with `pip install jinja2`." ) env = Environment() ast = env.parse(template) variables = meta.find_undeclared_variables(ast) return variables DEFAULT_FORMATTER_MAPPING: Dict[str, Callable] = { "f-string": formatter.format, "jinja2": jinja2_formatter, } DEFAULT_VALIDATOR_MAPPING: Dict[str, Callable] = { "f-string": formatter.validate_input_variables, "jinja2": validate_jinja2, } [docs]def check_valid_template( template: str, template_format: str, input_variables: List[str] ) -> None: """Check that template string is valid.""" if template_format not in DEFAULT_FORMATTER_MAPPING: valid_formats = list(DEFAULT_FORMATTER_MAPPING) raise ValueError( f"Invalid template format. Got `{template_format}`;" f" should be one of {valid_formats}" ) try: validator_func = DEFAULT_VALIDATOR_MAPPING[template_format] validator_func(template, input_variables) except KeyError as e: raise ValueError( "Invalid prompt schema; check for mismatched or missing input parameters. " + str(e) ) [docs]class StringPromptValue(PromptValue): """String prompt value.""" text: str """Prompt text.""" [docs] def to_string(self) -> str: """Return prompt as string.""" return self.text
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/base.html
988618d66ab1-2
"""Return prompt as string.""" return self.text [docs] def to_messages(self) -> List[BaseMessage]: """Return prompt as messages.""" return [HumanMessage(content=self.text)] [docs]class StringPromptTemplate(BasePromptTemplate, ABC): """String prompt that exposes the format method, returning a prompt.""" [docs] def format_prompt(self, **kwargs: Any) -> PromptValue: """Create Chat Messages.""" return StringPromptValue(text=self.format(**kwargs))
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/base.html
9e057ead08e3-0
Source code for langchain.prompts.pipeline from typing import Any, Dict, List, Tuple from pydantic import root_validator from langchain.prompts.chat import BaseChatPromptTemplate from langchain.schema import BasePromptTemplate, PromptValue def _get_inputs(inputs: dict, input_variables: List[str]) -> dict: return {k: inputs[k] for k in input_variables} [docs]class PipelinePromptTemplate(BasePromptTemplate): """A prompt template for composing multiple prompt templates together. This can be useful when you want to reuse parts of prompts. A PipelinePrompt consists of two main parts: - final_prompt: This is the final prompt that is returned - pipeline_prompts: This is a list of tuples, consisting of a string (`name`) and a Prompt Template. Each PromptTemplate will be formatted and then passed to future prompt templates as a variable with the same name as `name` """ final_prompt: BasePromptTemplate """The final prompt that is returned.""" pipeline_prompts: List[Tuple[str, BasePromptTemplate]] """A list of tuples, consisting of a string (`name`) and a Prompt Template.""" @root_validator(pre=True) def get_input_variables(cls, values: Dict) -> Dict: """Get input variables.""" created_variables = set() all_variables = set() for k, prompt in values["pipeline_prompts"]: created_variables.add(k) all_variables.update(prompt.input_variables) values["input_variables"] = list(all_variables.difference(created_variables)) return values [docs] def format_prompt(self, **kwargs: Any) -> PromptValue: for k, prompt in self.pipeline_prompts:
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/pipeline.html
9e057ead08e3-1
for k, prompt in self.pipeline_prompts: _inputs = _get_inputs(kwargs, prompt.input_variables) if isinstance(prompt, BaseChatPromptTemplate): kwargs[k] = prompt.format_messages(**_inputs) else: kwargs[k] = prompt.format(**_inputs) _inputs = _get_inputs(kwargs, self.final_prompt.input_variables) return self.final_prompt.format_prompt(**_inputs) [docs] def format(self, **kwargs: Any) -> str: return self.format_prompt(**kwargs).to_string() @property def _prompt_type(self) -> str: raise ValueError
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/pipeline.html
22397d2ea558-0
Source code for langchain.prompts.loading """Load prompts.""" import json import logging from pathlib import Path from typing import Union import yaml from langchain.output_parsers.regex import RegexParser from langchain.prompts.few_shot import FewShotPromptTemplate from langchain.prompts.prompt import PromptTemplate from langchain.schema import BaseLLMOutputParser, BasePromptTemplate, StrOutputParser from langchain.utilities.loading import try_load_from_hub URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/prompts/" logger = logging.getLogger(__name__) [docs]def load_prompt_from_config(config: dict) -> BasePromptTemplate: """Load prompt from Config Dict.""" if "_type" not in config: logger.warning("No `_type` key found, defaulting to `prompt`.") config_type = config.pop("_type", "prompt") if config_type not in type_to_loader_dict: raise ValueError(f"Loading {config_type} prompt not supported") prompt_loader = type_to_loader_dict[config_type] return prompt_loader(config) def _load_template(var_name: str, config: dict) -> dict: """Load template from the path if applicable.""" # Check if template_path exists in config. if f"{var_name}_path" in config: # If it does, make sure template variable doesn't also exist. if var_name in config: raise ValueError( f"Both `{var_name}_path` and `{var_name}` cannot be provided." ) # Pop the template path from the config. template_path = Path(config.pop(f"{var_name}_path")) # Load the template. if template_path.suffix == ".txt":
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/loading.html
22397d2ea558-1
# Load the template. if template_path.suffix == ".txt": with open(template_path) as f: template = f.read() else: raise ValueError # Set the template variable to the extracted variable. config[var_name] = template return config def _load_examples(config: dict) -> dict: """Load examples if necessary.""" if isinstance(config["examples"], list): pass elif isinstance(config["examples"], str): with open(config["examples"]) as f: if config["examples"].endswith(".json"): examples = json.load(f) elif config["examples"].endswith((".yaml", ".yml")): examples = yaml.safe_load(f) else: raise ValueError( "Invalid file format. Only json or yaml formats are supported." ) config["examples"] = examples else: raise ValueError("Invalid examples format. Only list or string are supported.") return config def _load_output_parser(config: dict) -> dict: """Load output parser.""" if "output_parser" in config and config["output_parser"]: _config = config.pop("output_parser") output_parser_type = _config.pop("_type") if output_parser_type == "regex_parser": output_parser: BaseLLMOutputParser = RegexParser(**_config) elif output_parser_type == "default": output_parser = StrOutputParser(**_config) else: raise ValueError(f"Unsupported output parser {output_parser_type}") config["output_parser"] = output_parser return config def _load_few_shot_prompt(config: dict) -> FewShotPromptTemplate: """Load the "few shot" prompt from the config."""
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/loading.html
22397d2ea558-2
"""Load the "few shot" prompt from the config.""" # Load the suffix and prefix templates. config = _load_template("suffix", config) config = _load_template("prefix", config) # Load the example prompt. if "example_prompt_path" in config: if "example_prompt" in config: raise ValueError( "Only one of example_prompt and example_prompt_path should " "be specified." ) config["example_prompt"] = load_prompt(config.pop("example_prompt_path")) else: config["example_prompt"] = load_prompt_from_config(config["example_prompt"]) # Load the examples. config = _load_examples(config) config = _load_output_parser(config) return FewShotPromptTemplate(**config) def _load_prompt(config: dict) -> PromptTemplate: """Load the prompt template from config.""" # Load the template from disk if necessary. config = _load_template("template", config) config = _load_output_parser(config) return PromptTemplate(**config) [docs]def load_prompt(path: Union[str, Path]) -> BasePromptTemplate: """Unified method for loading a prompt from LangChainHub or local fs.""" if hub_result := try_load_from_hub( path, _load_prompt_from_file, "prompts", {"py", "json", "yaml"} ): return hub_result else: return _load_prompt_from_file(path) def _load_prompt_from_file(file: Union[str, Path]) -> BasePromptTemplate: """Load prompt from file.""" # Convert file to a Path object. if isinstance(file, str): file_path = Path(file) else: file_path = file
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/loading.html
22397d2ea558-3
file_path = Path(file) else: file_path = file # Load from either json or yaml. if file_path.suffix == ".json": with open(file_path) as f: config = json.load(f) elif file_path.suffix == ".yaml": with open(file_path, "r") as f: config = yaml.safe_load(f) else: raise ValueError(f"Got unsupported file type {file_path.suffix}") # Load the prompt from the config now. return load_prompt_from_config(config) type_to_loader_dict = { "prompt": _load_prompt, "few_shot": _load_few_shot_prompt, # "few_shot_with_templates": _load_few_shot_with_templates_prompt, }
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/loading.html
cc03e0eae319-0
Source code for langchain.prompts.chat """Chat prompt template.""" from __future__ import annotations from abc import ABC, abstractmethod from pathlib import Path from typing import Any, Callable, List, Sequence, Tuple, Type, TypeVar, Union from pydantic import Field, root_validator from langchain.load.serializable import Serializable from langchain.prompts.base import StringPromptTemplate from langchain.prompts.prompt import PromptTemplate from langchain.schema import ( BasePromptTemplate, PromptValue, ) from langchain.schema.messages import ( AIMessage, BaseMessage, ChatMessage, HumanMessage, SystemMessage, get_buffer_string, ) [docs]class BaseMessagePromptTemplate(Serializable, ABC): """Base class for message prompt templates.""" @property def lc_serializable(self) -> bool: """Whether this object should be serialized. Returns: Whether this object should be serialized. """ return True [docs] @abstractmethod def format_messages(self, **kwargs: Any) -> List[BaseMessage]: """Format messages from kwargs. Should return a list of BaseMessages. Args: **kwargs: Keyword arguments to use for formatting. Returns: List of BaseMessages. """ @property @abstractmethod def input_variables(self) -> List[str]: """Input variables for this prompt template. Returns: List of input variables. """ def __add__(self, other: Any) -> ChatPromptTemplate: """Combine two prompt templates. Args: other: Another prompt template. Returns: Combined prompt template. """ prompt = ChatPromptTemplate(messages=[self])
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/chat.html
cc03e0eae319-1
Combined prompt template. """ prompt = ChatPromptTemplate(messages=[self]) return prompt + other [docs]class MessagesPlaceholder(BaseMessagePromptTemplate): """Prompt template that assumes variable is already list of messages.""" variable_name: str """Name of variable to use as messages.""" [docs] def format_messages(self, **kwargs: Any) -> List[BaseMessage]: """Format messages from kwargs. Args: **kwargs: Keyword arguments to use for formatting. Returns: List of BaseMessage. """ value = kwargs[self.variable_name] if not isinstance(value, list): raise ValueError( f"variable {self.variable_name} should be a list of base messages, " f"got {value}" ) for v in value: if not isinstance(v, BaseMessage): raise ValueError( f"variable {self.variable_name} should be a list of base messages," f" got {value}" ) return value @property def input_variables(self) -> List[str]: """Input variables for this prompt template. Returns: List of input variable names. """ return [self.variable_name] MessagePromptTemplateT = TypeVar( "MessagePromptTemplateT", bound="BaseStringMessagePromptTemplate" ) """Type variable for message prompt templates.""" [docs]class BaseStringMessagePromptTemplate(BaseMessagePromptTemplate, ABC): """Base class for message prompt templates that use a string prompt template.""" prompt: StringPromptTemplate """String prompt template.""" additional_kwargs: dict = Field(default_factory=dict) """Additional keyword arguments to pass to the prompt template.""" [docs] @classmethod
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/chat.html
cc03e0eae319-2
"""Additional keyword arguments to pass to the prompt template.""" [docs] @classmethod def from_template( cls: Type[MessagePromptTemplateT], template: str, template_format: str = "f-string", **kwargs: Any, ) -> MessagePromptTemplateT: """Create a class from a string template. Args: template: a template. template_format: format of the template. **kwargs: keyword arguments to pass to the constructor. Returns: A new instance of this class. """ prompt = PromptTemplate.from_template(template, template_format=template_format) return cls(prompt=prompt, **kwargs) [docs] @classmethod def from_template_file( cls: Type[MessagePromptTemplateT], template_file: Union[str, Path], input_variables: List[str], **kwargs: Any, ) -> MessagePromptTemplateT: """Create a class from a template file. Args: template_file: path to a template file. String or Path. input_variables: list of input variables. **kwargs: keyword arguments to pass to the constructor. Returns: A new instance of this class. """ prompt = PromptTemplate.from_file(template_file, input_variables) return cls(prompt=prompt, **kwargs) [docs] @abstractmethod def format(self, **kwargs: Any) -> BaseMessage: """Format the prompt template. Args: **kwargs: Keyword arguments to use for formatting. Returns: Formatted message. """ [docs] def format_messages(self, **kwargs: Any) -> List[BaseMessage]: """Format messages from kwargs.
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/chat.html
cc03e0eae319-3
"""Format messages from kwargs. Args: **kwargs: Keyword arguments to use for formatting. Returns: List of BaseMessages. """ return [self.format(**kwargs)] @property def input_variables(self) -> List[str]: """ Input variables for this prompt template. Returns: List of input variable names. """ return self.prompt.input_variables [docs]class ChatMessagePromptTemplate(BaseStringMessagePromptTemplate): """Chat message prompt template.""" role: str """Role of the message.""" [docs] def format(self, **kwargs: Any) -> BaseMessage: """Format the prompt template. Args: **kwargs: Keyword arguments to use for formatting. Returns: Formatted message. """ text = self.prompt.format(**kwargs) return ChatMessage( content=text, role=self.role, additional_kwargs=self.additional_kwargs ) [docs]class HumanMessagePromptTemplate(BaseStringMessagePromptTemplate): """Human message prompt template. This is a message that is sent to the user.""" [docs] def format(self, **kwargs: Any) -> BaseMessage: """Format the prompt template. Args: **kwargs: Keyword arguments to use for formatting. Returns: Formatted message. """ text = self.prompt.format(**kwargs) return HumanMessage(content=text, additional_kwargs=self.additional_kwargs) [docs]class AIMessagePromptTemplate(BaseStringMessagePromptTemplate): """AI message prompt template. This is a message that is not sent to the user.""" [docs] def format(self, **kwargs: Any) -> BaseMessage: """Format the prompt template. Args:
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/chat.html
cc03e0eae319-4
"""Format the prompt template. Args: **kwargs: Keyword arguments to use for formatting. Returns: Formatted message. """ text = self.prompt.format(**kwargs) return AIMessage(content=text, additional_kwargs=self.additional_kwargs) [docs]class SystemMessagePromptTemplate(BaseStringMessagePromptTemplate): """System message prompt template. This is a message that is not sent to the user. """ [docs] def format(self, **kwargs: Any) -> BaseMessage: """Format the prompt template. Args: **kwargs: Keyword arguments to use for formatting. Returns: Formatted message. """ text = self.prompt.format(**kwargs) return SystemMessage(content=text, additional_kwargs=self.additional_kwargs) [docs]class ChatPromptValue(PromptValue): """Chat prompt value. A type of a prompt value that is built from messages. """ messages: List[BaseMessage] """List of messages.""" [docs] def to_string(self) -> str: """Return prompt as string.""" return get_buffer_string(self.messages) [docs] def to_messages(self) -> List[BaseMessage]: """Return prompt as a list of messages.""" return self.messages [docs]class BaseChatPromptTemplate(BasePromptTemplate, ABC): """Base class for chat prompt templates.""" [docs] def format(self, **kwargs: Any) -> str: """Format the chat template into a string. Args: **kwargs: keyword arguments to use for filling in template variables in all the template messages in this chat template. Returns: formatted string """ return self.format_prompt(**kwargs).to_string()
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/chat.html
cc03e0eae319-5
formatted string """ return self.format_prompt(**kwargs).to_string() [docs] def format_prompt(self, **kwargs: Any) -> PromptValue: """ Format prompt. Should return a PromptValue. Args: **kwargs: Keyword arguments to use for formatting. Returns: PromptValue. """ messages = self.format_messages(**kwargs) return ChatPromptValue(messages=messages) [docs] @abstractmethod def format_messages(self, **kwargs: Any) -> List[BaseMessage]: """Format kwargs into a list of messages.""" [docs]class ChatPromptTemplate(BaseChatPromptTemplate, ABC): """A prompt template for chat models. Use to create flexible templated prompts for chat models. Examples: .. code-block:: python from langchain.prompts import ChatPromptTemplate template = ChatPromptTemplate.from_messages([ ("system", "You are a helpful AI bot. Your name is {name}."), ("human", "Hello, how are you doing?"), ("ai", "I'm doing well, thanks!"), ("human", "{user_input}"), ]) messages = template.format_messages( name="Bob", user_input="What is your name?" ) """ input_variables: List[str] """List of input variables in template messages. Used for validation.""" messages: List[ Union[BaseMessagePromptTemplate, BaseMessage, BaseChatPromptTemplate] ] """List of messages consisting of either message prompt templates or messages.""" def __add__(self, other: Any) -> ChatPromptTemplate: """Combine two prompt templates. Args: other: Another prompt template.
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/chat.html
cc03e0eae319-6
"""Combine two prompt templates. Args: other: Another prompt template. Returns: Combined prompt template. """ # Allow for easy combining if isinstance(other, ChatPromptTemplate): return ChatPromptTemplate(messages=self.messages + other.messages) elif isinstance( other, (BaseMessagePromptTemplate, BaseMessage, BaseChatPromptTemplate) ): return ChatPromptTemplate(messages=self.messages + [other]) elif isinstance(other, str): prompt = HumanMessagePromptTemplate.from_template(other) return ChatPromptTemplate(messages=self.messages + [prompt]) else: raise NotImplementedError(f"Unsupported operand type for +: {type(other)}") @root_validator(pre=True) def validate_input_variables(cls, values: dict) -> dict: """Validate input variables. If input_variables is not set, it will be set to the union of all input variables in the messages. Args: values: values to validate. Returns: Validated values. """ messages = values["messages"] input_vars = set() for message in messages: if isinstance(message, (BaseMessagePromptTemplate, BaseChatPromptTemplate)): input_vars.update(message.input_variables) if "partial_variables" in values: input_vars = input_vars - set(values["partial_variables"]) if "input_variables" in values: if input_vars != set(values["input_variables"]): raise ValueError( "Got mismatched input_variables. " f"Expected: {input_vars}. " f"Got: {values['input_variables']}" ) else: values["input_variables"] = list(input_vars) return values
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/chat.html
cc03e0eae319-7
else: values["input_variables"] = list(input_vars) return values [docs] @classmethod def from_template(cls, template: str, **kwargs: Any) -> ChatPromptTemplate: """Create a chat prompt template from a template string. Creates a chat template consisting of a single message assumed to be from the human. Args: template: template string **kwargs: keyword arguments to pass to the constructor. Returns: A new instance of this class. """ prompt_template = PromptTemplate.from_template(template, **kwargs) message = HumanMessagePromptTemplate(prompt=prompt_template) return cls.from_messages([message]) [docs] @classmethod def from_role_strings( cls, string_messages: List[Tuple[str, str]] ) -> ChatPromptTemplate: """Create a chat prompt template from a list of (role, template) tuples. Args: string_messages: list of (role, template) tuples. Returns: a chat prompt template """ return cls( messages=[ ChatMessagePromptTemplate.from_template(template, role=role) for role, template in string_messages ] ) [docs] @classmethod def from_strings( cls, string_messages: List[Tuple[Type[BaseMessagePromptTemplate], str]] ) -> ChatPromptTemplate: """Create a chat prompt template from a list of (role class, template) tuples. Args: string_messages: list of (role class, template) tuples. Returns: a chat prompt template """ return cls.from_messages(string_messages) [docs] @classmethod def from_messages( cls,
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/chat.html
cc03e0eae319-8
[docs] @classmethod def from_messages( cls, messages: Sequence[ Union[ BaseMessagePromptTemplate, BaseChatPromptTemplate, BaseMessage, Tuple[str, str], Tuple[Type, str], str, ] ], ) -> ChatPromptTemplate: """Create a chat prompt template from a variety of message formats. Examples: Instantiation from a list of message templates: .. code-block:: python template = ChatPromptTemplate.from_messages([ ("human", "Hello, how are you?"), ("ai", "I'm doing well, thanks!"), ("human", "That's good to hear."), ]) Instantiation from mixed message formats: .. code-block:: python template = ChatPromptTemplate.from_messages([ SystemMessage(content="hello"), ("human", "Hello, how are you?"), ]) Args: messages: sequence of message representations. A message can be represented using the following formats: (1) BaseMessagePromptTemplate, (2) BaseMessage, (3) 2-tuple of (message type, template); e.g., ("human", "{user_input}"), (4) 2-tuple of (message class, template), (4) a string which is shorthand for ("human", template); e.g., "{user_input}" Returns: a chat prompt template """ _messages = [_convert_to_message(message) for message in messages] # Automatically infer input variables from messages input_vars = set() for _message in _messages: if isinstance( _message, (BaseChatPromptTemplate, BaseMessagePromptTemplate)
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/chat.html
cc03e0eae319-9
_message, (BaseChatPromptTemplate, BaseMessagePromptTemplate) ): input_vars.update(_message.input_variables) return cls(input_variables=sorted(input_vars), messages=_messages) [docs] def format(self, **kwargs: Any) -> str: """Format the chat template into a string. Args: **kwargs: keyword arguments to use for filling in template variables in all the template messages in this chat template. Returns: formatted string """ return self.format_prompt(**kwargs).to_string() [docs] def format_messages(self, **kwargs: Any) -> List[BaseMessage]: """Format the chat template into a list of finalized messages. Args: **kwargs: keyword arguments to use for filling in template variables in all the template messages in this chat template. Returns: list of formatted messages """ kwargs = self._merge_partial_and_user_variables(**kwargs) result = [] for message_template in self.messages: if isinstance(message_template, BaseMessage): result.extend([message_template]) elif isinstance( message_template, (BaseMessagePromptTemplate, BaseChatPromptTemplate) ): rel_params = { k: v for k, v in kwargs.items() if k in message_template.input_variables } message = message_template.format_messages(**rel_params) result.extend(message) else: raise ValueError(f"Unexpected input: {message_template}") return result [docs] def partial(self, **kwargs: Union[str, Callable[[], str]]) -> ChatPromptTemplate: """Return a new ChatPromptTemplate with some of the input variables already filled in. Args:
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/chat.html
cc03e0eae319-10
filled in. Args: **kwargs: keyword arguments to use for filling in template variables. Ought to be a subset of the input variables. Returns: A new ChatPromptTemplate. Example: .. code-block:: python from langchain.prompts import ChatPromptTemplate template = ChatPromptTemplate.from_messages( [ ("system", "You are an AI assistant named {name}."), ("human", "Hi I'm {user}"), ("ai", "Hi there, {user}, I'm {name}."), ("human", "{input}"), ] ) template2 = template.partial(user="Lucy", name="R2D2") template2.format_messages(input="hello") """ prompt_dict = self.__dict__.copy() prompt_dict["input_variables"] = list( set(self.input_variables).difference(kwargs) ) prompt_dict["partial_variables"] = {**self.partial_variables, **kwargs} return type(self)(**prompt_dict) @property def _prompt_type(self) -> str: """Name of prompt type.""" return "chat" [docs] def save(self, file_path: Union[Path, str]) -> None: """Save prompt to file. Args: file_path: path to file. """ raise NotImplementedError() def _create_template_from_message_type( message_type: str, template: str ) -> BaseMessagePromptTemplate: """Create a message prompt template from a message type and template string. Args: message_type: str the type of the message template (e.g., "human", "ai", etc.) template: str the template string.
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/chat.html
cc03e0eae319-11
template: str the template string. Returns: a message prompt template of the appropriate type. """ if message_type == "human": message: BaseMessagePromptTemplate = HumanMessagePromptTemplate.from_template( template ) elif message_type == "ai": message = AIMessagePromptTemplate.from_template(template) elif message_type == "system": message = SystemMessagePromptTemplate.from_template(template) else: raise ValueError( f"Unexpected message type: {message_type}. Use one of 'human', 'ai', " f"or 'system'." ) return message def _convert_to_message( message: Union[ BaseMessagePromptTemplate, BaseChatPromptTemplate, BaseMessage, Tuple[str, str], Tuple[Type, str], str, ] ) -> Union[BaseMessage, BaseMessagePromptTemplate, BaseChatPromptTemplate]: """Instantiate a message from a variety of message formats. The message format can be one of the following: - BaseMessagePromptTemplate - BaseMessage - 2-tuple of (role string, template); e.g., ("human", "{user_input}") - 2-tuple of (message class, template) - string: shorthand for ("human", template); e.g., "{user_input}" Args: message: a representation of a message in one of the supported formats Returns: an instance of a message or a message template """ if isinstance(message, (BaseMessagePromptTemplate, BaseChatPromptTemplate)): _message: Union[ BaseMessage, BaseMessagePromptTemplate, BaseChatPromptTemplate ] = message
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/chat.html
cc03e0eae319-12
BaseMessage, BaseMessagePromptTemplate, BaseChatPromptTemplate ] = message elif isinstance(message, BaseMessage): _message = message elif isinstance(message, str): _message = _create_template_from_message_type("human", message) elif isinstance(message, tuple): if len(message) != 2: raise ValueError(f"Expected 2-tuple of (role, template), got {message}") message_type_str, template = message if isinstance(message_type_str, str): _message = _create_template_from_message_type(message_type_str, template) else: _message = message_type_str(prompt=PromptTemplate.from_template(template)) else: raise NotImplementedError(f"Unsupported message type: {type(message)}") return _message
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/chat.html
4ad9a4dcdce9-0
Source code for langchain.prompts.prompt """Prompt schema definition.""" from __future__ import annotations from pathlib import Path from string import Formatter from typing import Any, Dict, List, Optional, Union from pydantic import root_validator from langchain.prompts.base import ( DEFAULT_FORMATTER_MAPPING, StringPromptTemplate, _get_jinja2_variables_from_template, check_valid_template, ) [docs]class PromptTemplate(StringPromptTemplate): """A prompt template for a language model. A prompt template consists of a string template. It accepts a set of parameters from the user that can be used to generate a prompt for a language model. The template can be formatted using either f-strings (default) or jinja2 syntax. Example: .. code-block:: python from langchain import PromptTemplate # Instantiation using from_template (recommended) prompt = PromptTemplate.from_template("Say {foo}") prompt.format(foo="bar") # Instantiation using initializer prompt = PromptTemplate(input_variables=["foo"], template="Say {foo}") """ @property def lc_attributes(self) -> Dict[str, Any]: return { "template_format": self.template_format, } input_variables: List[str] """A list of the names of the variables the prompt template expects.""" template: str """The prompt template.""" template_format: str = "f-string" """The format of the prompt template. Options are: 'f-string', 'jinja2'.""" validate_template: bool = True """Whether or not to try validating the template.""" def __add__(self, other: Any) -> PromptTemplate:
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/prompt.html
4ad9a4dcdce9-1
def __add__(self, other: Any) -> PromptTemplate: """Override the + operator to allow for combining prompt templates.""" # Allow for easy combining if isinstance(other, PromptTemplate): if self.template_format != "f-string": raise ValueError( "Adding prompt templates only supported for f-strings." ) if other.template_format != "f-string": raise ValueError( "Adding prompt templates only supported for f-strings." ) input_variables = list( set(self.input_variables) | set(other.input_variables) ) template = self.template + other.template # If any do not want to validate, then don't validate_template = self.validate_template and other.validate_template partial_variables = {k: v for k, v in self.partial_variables.items()} for k, v in other.partial_variables.items(): if k in partial_variables: raise ValueError("Cannot have same variable partialed twice.") else: partial_variables[k] = v return PromptTemplate( template=template, input_variables=input_variables, partial_variables=partial_variables, template_format="f-string", validate_template=validate_template, ) elif isinstance(other, str): prompt = PromptTemplate.from_template(other) return self + prompt else: raise NotImplementedError(f"Unsupported operand type for +: {type(other)}") @property def _prompt_type(self) -> str: """Return the prompt type key.""" return "prompt" [docs] def format(self, **kwargs: Any) -> str: """Format the prompt with the inputs. Args: kwargs: Any arguments to be passed to the prompt template.
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/prompt.html
4ad9a4dcdce9-2
Args: kwargs: Any arguments to be passed to the prompt template. Returns: A formatted string. Example: .. code-block:: python prompt.format(variable1="foo") """ kwargs = self._merge_partial_and_user_variables(**kwargs) return DEFAULT_FORMATTER_MAPPING[self.template_format](self.template, **kwargs) @root_validator() def template_is_valid(cls, values: Dict) -> Dict: """Check that template and input variables are consistent.""" if values["validate_template"]: all_inputs = values["input_variables"] + list(values["partial_variables"]) check_valid_template( values["template"], values["template_format"], all_inputs ) return values [docs] @classmethod def from_examples( cls, examples: List[str], suffix: str, input_variables: List[str], example_separator: str = "\n\n", prefix: str = "", **kwargs: Any, ) -> PromptTemplate: """Take examples in list format with prefix and suffix to create a prompt. Intended to be used as a way to dynamically create a prompt from examples. Args: examples: List of examples to use in the prompt. suffix: String to go after the list of examples. Should generally set up the user's input. input_variables: A list of variable names the final prompt template will expect. example_separator: The separator to use in between examples. Defaults to two new line characters. prefix: String that should go before any examples. Generally includes examples. Default to an empty string. Returns: The final prompt generated. """
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/prompt.html
4ad9a4dcdce9-3
Returns: The final prompt generated. """ template = example_separator.join([prefix, *examples, suffix]) return cls(input_variables=input_variables, template=template, **kwargs) [docs] @classmethod def from_file( cls, template_file: Union[str, Path], input_variables: List[str], **kwargs: Any ) -> PromptTemplate: """Load a prompt from a file. Args: template_file: The path to the file containing the prompt template. input_variables: A list of variable names the final prompt template will expect. Returns: The prompt loaded from the file. """ with open(str(template_file), "r") as f: template = f.read() return cls(input_variables=input_variables, template=template, **kwargs) [docs] @classmethod def from_template( cls, template: str, *, template_format: str = "f-string", partial_variables: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> PromptTemplate: """Load a prompt template from a template. Args: template: The template to load. template_format: The format of the template. Use `jinja2` for jinja2, and `f-string` or None for f-strings. partial_variables: A dictionary of variables that can be used to partially fill in the template. For example, if the template is `"{variable1} {variable2}"`, and `partial_variables` is `{"variable1": "foo"}`, then the final prompt will be `"foo {variable2}"`. Returns:
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/prompt.html
4ad9a4dcdce9-4
`"foo {variable2}"`. Returns: The prompt template loaded from the template. """ if template_format == "jinja2": # Get the variables for the template input_variables = _get_jinja2_variables_from_template(template) elif template_format == "f-string": input_variables = { v for _, v, _, _ in Formatter().parse(template) if v is not None } else: raise ValueError(f"Unsupported template format: {template_format}") _partial_variables = partial_variables or {} if _partial_variables: input_variables = { var for var in input_variables if var not in _partial_variables } return cls( input_variables=sorted(input_variables), template=template, template_format=template_format, partial_variables=_partial_variables, **kwargs, ) # For backwards compatibility. Prompt = PromptTemplate
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/prompt.html
446f2fdfd839-0
Source code for langchain.prompts.few_shot_with_templates """Prompt template that contains few shot examples.""" from typing import Any, Dict, List, Optional from pydantic import Extra, root_validator from langchain.prompts.base import DEFAULT_FORMATTER_MAPPING, StringPromptTemplate from langchain.prompts.example_selector.base import BaseExampleSelector from langchain.prompts.prompt import PromptTemplate [docs]class FewShotPromptWithTemplates(StringPromptTemplate): """Prompt template that contains few shot examples.""" examples: Optional[List[dict]] = None """Examples to format into the prompt. Either this or example_selector should be provided.""" example_selector: Optional[BaseExampleSelector] = None """ExampleSelector to choose the examples to format into the prompt. Either this or examples should be provided.""" example_prompt: PromptTemplate """PromptTemplate used to format an individual example.""" suffix: StringPromptTemplate """A PromptTemplate to put after the examples.""" input_variables: List[str] """A list of the names of the variables the prompt template expects.""" example_separator: str = "\n\n" """String separator used to join the prefix, the examples, and suffix.""" prefix: Optional[StringPromptTemplate] = None """A PromptTemplate to put before the examples.""" template_format: str = "f-string" """The format of the prompt template. Options are: 'f-string', 'jinja2'.""" validate_template: bool = True """Whether or not to try validating the template.""" @root_validator(pre=True) def check_examples_and_selector(cls, values: Dict) -> Dict: """Check that one and only one of examples/example_selector are provided.""" examples = values.get("examples", None)
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/few_shot_with_templates.html
446f2fdfd839-1
examples = values.get("examples", None) example_selector = values.get("example_selector", None) if examples and example_selector: raise ValueError( "Only one of 'examples' and 'example_selector' should be provided" ) if examples is None and example_selector is None: raise ValueError( "One of 'examples' and 'example_selector' should be provided" ) return values @root_validator() def template_is_valid(cls, values: Dict) -> Dict: """Check that prefix, suffix, and input variables are consistent.""" if values["validate_template"]: input_variables = values["input_variables"] expected_input_variables = set(values["suffix"].input_variables) expected_input_variables |= set(values["partial_variables"]) if values["prefix"] is not None: expected_input_variables |= set(values["prefix"].input_variables) missing_vars = expected_input_variables.difference(input_variables) if missing_vars: raise ValueError( f"Got input_variables={input_variables}, but based on " f"prefix/suffix expected {expected_input_variables}" ) return values class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True def _get_examples(self, **kwargs: Any) -> List[dict]: if self.examples is not None: return self.examples elif self.example_selector is not None: return self.example_selector.select_examples(kwargs) else: raise ValueError [docs] def format(self, **kwargs: Any) -> str: """Format the prompt with the inputs. Args: kwargs: Any arguments to be passed to the prompt template.
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/few_shot_with_templates.html
446f2fdfd839-2
Args: kwargs: Any arguments to be passed to the prompt template. Returns: A formatted string. Example: .. code-block:: python prompt.format(variable1="foo") """ kwargs = self._merge_partial_and_user_variables(**kwargs) # Get the examples to use. examples = self._get_examples(**kwargs) # Format the examples. example_strings = [ self.example_prompt.format(**example) for example in examples ] # Create the overall prefix. if self.prefix is None: prefix = "" else: prefix_kwargs = { k: v for k, v in kwargs.items() if k in self.prefix.input_variables } for k in prefix_kwargs.keys(): kwargs.pop(k) prefix = self.prefix.format(**prefix_kwargs) # Create the overall suffix suffix_kwargs = { k: v for k, v in kwargs.items() if k in self.suffix.input_variables } for k in suffix_kwargs.keys(): kwargs.pop(k) suffix = self.suffix.format( **suffix_kwargs, ) pieces = [prefix, *example_strings, suffix] template = self.example_separator.join([piece for piece in pieces if piece]) # Format the template with the input variables. return DEFAULT_FORMATTER_MAPPING[self.template_format](template, **kwargs) @property def _prompt_type(self) -> str: """Return the prompt type key.""" return "few_shot_with_templates" [docs] def dict(self, **kwargs: Any) -> Dict: """Return a dictionary of the prompt.""" if self.example_selector:
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/few_shot_with_templates.html
446f2fdfd839-3
"""Return a dictionary of the prompt.""" if self.example_selector: raise ValueError("Saving an example selector is not currently supported") return super().dict(**kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/few_shot_with_templates.html
074ec12427ee-0
Source code for langchain.prompts.few_shot """Prompt template that contains few shot examples.""" from __future__ import annotations from typing import Any, Dict, List, Optional, Union from pydantic import BaseModel, Extra, Field, root_validator from langchain.prompts.base import ( DEFAULT_FORMATTER_MAPPING, StringPromptTemplate, check_valid_template, ) from langchain.prompts.chat import BaseChatPromptTemplate, BaseMessagePromptTemplate from langchain.prompts.example_selector.base import BaseExampleSelector from langchain.prompts.prompt import PromptTemplate from langchain.schema.messages import BaseMessage, get_buffer_string class _FewShotPromptTemplateMixin(BaseModel): """Prompt template that contains few shot examples.""" examples: Optional[List[dict]] = None """Examples to format into the prompt. Either this or example_selector should be provided.""" example_selector: Optional[BaseExampleSelector] = None """ExampleSelector to choose the examples to format into the prompt. Either this or examples should be provided.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @root_validator(pre=True) def check_examples_and_selector(cls, values: Dict) -> Dict: """Check that one and only one of examples/example_selector are provided.""" examples = values.get("examples", None) example_selector = values.get("example_selector", None) if examples and example_selector: raise ValueError( "Only one of 'examples' and 'example_selector' should be provided" ) if examples is None and example_selector is None: raise ValueError( "One of 'examples' and 'example_selector' should be provided" ) return values
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/few_shot.html
074ec12427ee-1
) return values def _get_examples(self, **kwargs: Any) -> List[dict]: """Get the examples to use for formatting the prompt. Args: **kwargs: Keyword arguments to be passed to the example selector. Returns: List of examples. """ if self.examples is not None: return self.examples elif self.example_selector is not None: return self.example_selector.select_examples(kwargs) else: raise ValueError( "One of 'examples' and 'example_selector' should be provided" ) [docs]class FewShotPromptTemplate(_FewShotPromptTemplateMixin, StringPromptTemplate): """Prompt template that contains few shot examples.""" @property def lc_serializable(self) -> bool: """Return whether the prompt template is lc_serializable. Returns: Boolean indicating whether the prompt template is lc_serializable. """ return False validate_template: bool = True """Whether or not to try validating the template.""" input_variables: List[str] """A list of the names of the variables the prompt template expects.""" example_prompt: PromptTemplate """PromptTemplate used to format an individual example.""" suffix: str """A prompt template string to put after the examples.""" example_separator: str = "\n\n" """String separator used to join the prefix, the examples, and suffix.""" prefix: str = "" """A prompt template string to put before the examples.""" template_format: str = "f-string" """The format of the prompt template. Options are: 'f-string', 'jinja2'.""" @root_validator() def template_is_valid(cls, values: Dict) -> Dict:
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/few_shot.html
074ec12427ee-2
def template_is_valid(cls, values: Dict) -> Dict: """Check that prefix, suffix, and input variables are consistent.""" if values["validate_template"]: check_valid_template( values["prefix"] + values["suffix"], values["template_format"], values["input_variables"] + list(values["partial_variables"]), ) return values class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True [docs] def format(self, **kwargs: Any) -> str: """Format the prompt with the inputs. Args: **kwargs: Any arguments to be passed to the prompt template. Returns: A formatted string. Example: .. code-block:: python prompt.format(variable1="foo") """ kwargs = self._merge_partial_and_user_variables(**kwargs) # Get the examples to use. examples = self._get_examples(**kwargs) examples = [ {k: e[k] for k in self.example_prompt.input_variables} for e in examples ] # Format the examples. example_strings = [ self.example_prompt.format(**example) for example in examples ] # Create the overall template. pieces = [self.prefix, *example_strings, self.suffix] template = self.example_separator.join([piece for piece in pieces if piece]) # Format the template with the input variables. return DEFAULT_FORMATTER_MAPPING[self.template_format](template, **kwargs) @property def _prompt_type(self) -> str: """Return the prompt type key.""" return "few_shot" [docs] def dict(self, **kwargs: Any) -> Dict:
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/few_shot.html
074ec12427ee-3
[docs] def dict(self, **kwargs: Any) -> Dict: """Return a dictionary of the prompt.""" if self.example_selector: raise ValueError("Saving an example selector is not currently supported") return super().dict(**kwargs) [docs]class FewShotChatMessagePromptTemplate( BaseChatPromptTemplate, _FewShotPromptTemplateMixin ): """Chat prompt template that supports few-shot examples. The high level structure of produced by this prompt template is a list of messages consisting of prefix message(s), example message(s), and suffix message(s). This structure enables creating a conversation with intermediate examples like: System: You are a helpful AI Assistant Human: What is 2+2? AI: 4 Human: What is 2+3? AI: 5 Human: What is 4+4? This prompt template can be used to generate a fixed list of examples or else to dynamically select examples based on the input. Examples: Prompt template with a fixed list of examples (matching the sample conversation above): .. code-block:: python from langchain.prompts import ( FewShotChatMessagePromptTemplate, ChatPromptTemplate ) examples = [ {"input": "2+2", "output": "4"}, {"input": "2+3", "output": "5"}, ] example_prompt = ChatPromptTemplate.from_messages( [('human', '{input}'), ('ai', '{output}')] ) few_shot_prompt = FewShotChatMessagePromptTemplate( examples=examples, # This is a prompt template used to format each individual example. example_prompt=example_prompt, )
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/few_shot.html