id
stringlengths
14
15
text
stringlengths
49
2.47k
source
stringlengths
61
166
8230019c2354-1
model_kwargs: Optional[dict] = None """Other model keyword args""" deepinfra_api_token: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" deepinfra_api_token = get_from_dict_or_env( values, "deepinfra_api_token", "DEEPINFRA_API_TOKEN" ) values["deepinfra_api_token"] = deepinfra_api_token return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {"model_id": self.model_id} def _embed(self, input: List[str]) -> List[List[float]]: _model_kwargs = self.model_kwargs or {} # HTTP headers for authorization headers = { "Authorization": f"bearer {self.deepinfra_api_token}", "Content-Type": "application/json", } # send request try: res = requests.post( f"https://api.deepinfra.com/v1/inference/{self.model_id}", headers=headers, json={"inputs": input, "normalize": self.normalize, **_model_kwargs}, ) except requests.exceptions.RequestException as e: raise ValueError(f"Error raised by inference endpoint: {e}") if res.status_code != 200: raise ValueError( "Error raised by inference API HTTP code: %s, %s" % (res.status_code, res.text) ) try: t = res.json() embeddings = t["embeddings"]
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/deepinfra.html
8230019c2354-2
try: t = res.json() embeddings = t["embeddings"] except requests.exceptions.JSONDecodeError as e: raise ValueError( f"Error raised by inference API: {e}.\nResponse: {res.text}" ) return embeddings [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Embed documents using a Deep Infra deployed embedding model. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ instruction_pairs = [f"{self.query_instruction}{text}" for text in texts] embeddings = self._embed(instruction_pairs) return embeddings [docs] def embed_query(self, text: str) -> List[float]: """Embed a query using a Deep Infra deployed embedding model. Args: text: The text to embed. Returns: Embeddings for the text. """ instruction_pair = f"{self.query_instruction}{text}" embedding = self._embed([instruction_pair])[0] return embedding
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/deepinfra.html
7d364523f7a8-0
Source code for langchain.embeddings.spacy_embeddings import importlib.util from typing import Any, Dict, List from pydantic import BaseModel, Extra, root_validator from langchain.embeddings.base import Embeddings [docs]class SpacyEmbeddings(BaseModel, Embeddings): """Embeddings by SpaCy models. It only supports the 'en_core_web_sm' model. Attributes: nlp (Any): The Spacy model loaded into memory. Methods: embed_documents(texts: List[str]) -> List[List[float]]: Generates embeddings for a list of documents. embed_query(text: str) -> List[float]: Generates an embedding for a single piece of text. """ nlp: Any # The Spacy model loaded into memory class Config: """Configuration for this pydantic object.""" extra = Extra.forbid # Forbid extra attributes during model initialization @root_validator(pre=True) def validate_environment(cls, values: Dict) -> Dict: """ Validates that the Spacy package and the 'en_core_web_sm' model are installed. Args: values (Dict): The values provided to the class constructor. Returns: The validated values. Raises: ValueError: If the Spacy package or the 'en_core_web_sm' model are not installed. """ # Check if the Spacy package is installed if importlib.util.find_spec("spacy") is None: raise ValueError( "Spacy package not found. " "Please install it with `pip install spacy`." ) try: # Try to load the 'en_core_web_sm' Spacy model import spacy
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/spacy_embeddings.html
7d364523f7a8-1
import spacy values["nlp"] = spacy.load("en_core_web_sm") except OSError: # If the model is not found, raise a ValueError raise ValueError( "Spacy model 'en_core_web_sm' not found. " "Please install it with" " `python -m spacy download en_core_web_sm`." ) return values # Return the validated values [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """ Generates embeddings for a list of documents. Args: texts (List[str]): The documents to generate embeddings for. Returns: A list of embeddings, one for each document. """ return [self.nlp(text).vector.tolist() for text in texts] [docs] def embed_query(self, text: str) -> List[float]: """ Generates an embedding for a single piece of text. Args: text (str): The text to generate an embedding for. Returns: The embedding for the text. """ return self.nlp(text).vector.tolist() [docs] async def aembed_documents(self, texts: List[str]) -> List[List[float]]: """ Asynchronously generates embeddings for a list of documents. This method is not implemented and raises a NotImplementedError. Args: texts (List[str]): The documents to generate embeddings for. Raises: NotImplementedError: This method is not implemented. """ raise NotImplementedError("Asynchronous embedding generation is not supported.") [docs] async def aembed_query(self, text: str) -> List[float]: """ Asynchronously generates an embedding for a single piece of text.
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/spacy_embeddings.html
7d364523f7a8-2
""" Asynchronously generates an embedding for a single piece of text. This method is not implemented and raises a NotImplementedError. Args: text (str): The text to generate an embedding for. Raises: NotImplementedError: This method is not implemented. """ raise NotImplementedError("Asynchronous embedding generation is not supported.")
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/spacy_embeddings.html
571ce5748f35-0
Source code for langchain.embeddings.sagemaker_endpoint from typing import Any, Dict, List, Optional from pydantic import BaseModel, Extra, root_validator from langchain.embeddings.base import Embeddings from langchain.llms.sagemaker_endpoint import ContentHandlerBase [docs]class EmbeddingsContentHandler(ContentHandlerBase[List[str], List[List[float]]]): """Content handler for LLM class.""" [docs]class SagemakerEndpointEmbeddings(BaseModel, Embeddings): """Custom Sagemaker Inference Endpoints. To use, you must supply the endpoint name from your deployed Sagemaker model & the region where it is deployed. To authenticate, the AWS client uses the following methods to automatically load credentials: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html If a specific credential profile should be used, you must pass the name of the profile from the ~/.aws/credentials file that is to be used. Make sure the credentials / roles used have the required policies to access the Sagemaker endpoint. See: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html """ """ Example: .. code-block:: python from langchain.embeddings import SagemakerEndpointEmbeddings endpoint_name = ( "my-endpoint-name" ) region_name = ( "us-west-2" ) credentials_profile_name = ( "default" ) se = SagemakerEndpointEmbeddings( endpoint_name=endpoint_name, region_name=region_name, credentials_profile_name=credentials_profile_name ) """ client: Any #: :meta private: endpoint_name: str = ""
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/sagemaker_endpoint.html
571ce5748f35-1
client: Any #: :meta private: endpoint_name: str = "" """The name of the endpoint from the deployed Sagemaker model. Must be unique within an AWS Region.""" region_name: str = "" """The aws region where the Sagemaker model is deployed, eg. `us-west-2`.""" credentials_profile_name: Optional[str] = None """The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which has either access keys or role information specified. If not specified, the default credential profile or, if on an EC2 instance, credentials from IMDS will be used. See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html """ content_handler: EmbeddingsContentHandler """The content handler class that provides an input and output transform functions to handle formats between LLM and the endpoint. """ """ Example: .. code-block:: python from langchain.embeddings.sagemaker_endpoint import EmbeddingsContentHandler class ContentHandler(EmbeddingsContentHandler): content_type = "application/json" accepts = "application/json" def transform_input(self, prompts: List[str], model_kwargs: Dict) -> bytes: input_str = json.dumps({prompts: prompts, **model_kwargs}) return input_str.encode('utf-8') def transform_output(self, output: bytes) -> List[List[float]]: response_json = json.loads(output.read().decode("utf-8")) return response_json["vectors"] """ # noqa: E501 model_kwargs: Optional[Dict] = None """Key word arguments to pass to the model."""
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/sagemaker_endpoint.html
571ce5748f35-2
"""Key word arguments to pass to the model.""" endpoint_kwargs: Optional[Dict] = None """Optional attributes passed to the invoke_endpoint function. See `boto3`_. docs for more info. .. _boto3: <https://boto3.amazonaws.com/v1/documentation/api/latest/index.html> """ class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that AWS credentials to and python package exists in environment.""" try: import boto3 try: if values["credentials_profile_name"] is not None: session = boto3.Session( profile_name=values["credentials_profile_name"] ) else: # use default credentials session = boto3.Session() values["client"] = session.client( "sagemaker-runtime", region_name=values["region_name"] ) except Exception as e: raise ValueError( "Could not load credentials to authenticate with AWS client. " "Please check that credentials in the specified " "profile name are valid." ) from e except ImportError: raise ImportError( "Could not import boto3 python package. " "Please install it with `pip install boto3`." ) return values def _embedding_func(self, texts: List[str]) -> List[List[float]]: """Call out to SageMaker Inference embedding endpoint.""" # replace newlines, which can negatively affect performance. texts = list(map(lambda x: x.replace("\n", " "), texts))
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/sagemaker_endpoint.html
571ce5748f35-3
texts = list(map(lambda x: x.replace("\n", " "), texts)) _model_kwargs = self.model_kwargs or {} _endpoint_kwargs = self.endpoint_kwargs or {} body = self.content_handler.transform_input(texts, _model_kwargs) content_type = self.content_handler.content_type accepts = self.content_handler.accepts # send request try: response = self.client.invoke_endpoint( EndpointName=self.endpoint_name, Body=body, ContentType=content_type, Accept=accepts, **_endpoint_kwargs, ) except Exception as e: raise ValueError(f"Error raised by inference endpoint: {e}") return self.content_handler.transform_output(response["Body"]) [docs] def embed_documents( self, texts: List[str], chunk_size: int = 64 ) -> List[List[float]]: """Compute doc embeddings using a SageMaker Inference Endpoint. Args: texts: The list of texts to embed. chunk_size: The chunk size defines how many input texts will be grouped together as request. If None, will use the chunk size specified by the class. Returns: List of embeddings, one for each text. """ results = [] _chunk_size = len(texts) if chunk_size > len(texts) else chunk_size for i in range(0, len(texts), _chunk_size): response = self._embedding_func(texts[i : i + _chunk_size]) results.extend(response) return results [docs] def embed_query(self, text: str) -> List[float]: """Compute query embeddings using a SageMaker inference endpoint. Args: text: The text to embed.
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/sagemaker_endpoint.html
571ce5748f35-4
Args: text: The text to embed. Returns: Embeddings for the text. """ return self._embedding_func([text])[0]
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/sagemaker_endpoint.html
71bebf5c4e93-0
Source code for langchain.embeddings.nlpcloud from typing import Any, Dict, List from pydantic import BaseModel, root_validator from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env [docs]class NLPCloudEmbeddings(BaseModel, Embeddings): """NLP Cloud embedding models. To use, you should have the nlpcloud python package installed Example: .. code-block:: python from langchain.embeddings import NLPCloudEmbeddings embeddings = NLPCloudEmbeddings() """ model_name: str # Define model_name as a class attribute gpu: bool # Define gpu as a class attribute client: Any #: :meta private: def __init__( self, model_name: str = "paraphrase-multilingual-mpnet-base-v2", gpu: bool = False, **kwargs: Any ) -> None: super().__init__(model_name=model_name, gpu=gpu, **kwargs) @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" nlpcloud_api_key = get_from_dict_or_env( values, "nlpcloud_api_key", "NLPCLOUD_API_KEY" ) try: import nlpcloud values["client"] = nlpcloud.Client( values["model_name"], nlpcloud_api_key, gpu=values["gpu"], lang="en" ) except ImportError: raise ImportError( "Could not import nlpcloud python package. " "Please install it with `pip install nlpcloud`." ) return values
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/nlpcloud.html
71bebf5c4e93-1
) return values [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Embed a list of documents using NLP Cloud. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ return self.client.embeddings(texts)["embeddings"] [docs] def embed_query(self, text: str) -> List[float]: """Embed a query using NLP Cloud. Args: text: The text to embed. Returns: Embeddings for the text. """ return self.client.embeddings([text])["embeddings"][0]
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/nlpcloud.html
8b86005e1bb1-0
Source code for langchain.embeddings.aleph_alpha from typing import Any, Dict, List, Optional from pydantic import BaseModel, root_validator from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env [docs]class AlephAlphaAsymmetricSemanticEmbedding(BaseModel, Embeddings): """Aleph Alpha's asymmetric semantic embedding. AA provides you with an endpoint to embed a document and a query. The models were optimized to make the embeddings of documents and the query for a document as similar as possible. To learn more, check out: https://docs.aleph-alpha.com/docs/tasks/semantic_embed/ Example: .. code-block:: python from aleph_alpha import AlephAlphaAsymmetricSemanticEmbedding embeddings = AlephAlphaAsymmetricSemanticEmbedding( normalize=True, compress_to_size=128 ) document = "This is a content of the document" query = "What is the content of the document?" doc_result = embeddings.embed_documents([document]) query_result = embeddings.embed_query(query) """ client: Any #: :meta private: # Embedding params model: str = "luminous-base" """Model name to use.""" compress_to_size: Optional[int] = None """Should the returned embeddings come back as an original 5120-dim vector, or should it be compressed to 128-dim.""" normalize: Optional[bool] = None """Should returned embeddings be normalized""" contextual_control_threshold: Optional[int] = None """Attention control parameters only apply to those tokens that have explicitly been set in the request.""" control_log_additive: bool = True
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/aleph_alpha.html
8b86005e1bb1-1
explicitly been set in the request.""" control_log_additive: bool = True """Apply controls on prompt items by adding the log(control_factor) to attention scores.""" # Client params aleph_alpha_api_key: Optional[str] = None """API key for Aleph Alpha API.""" host: str = "https://api.aleph-alpha.com" """The hostname of the API host. The default one is "https://api.aleph-alpha.com")""" hosting: Optional[str] = None """Determines in which datacenters the request may be processed. You can either set the parameter to "aleph-alpha" or omit it (defaulting to None). Not setting this value, or setting it to None, gives us maximal flexibility in processing your request in our own datacenters and on servers hosted with other providers. Choose this option for maximal availability. Setting it to "aleph-alpha" allows us to only process the request in our own datacenters. Choose this option for maximal data privacy.""" request_timeout_seconds: int = 305 """Client timeout that will be set for HTTP requests in the `requests` library's API calls. Server will close all requests after 300 seconds with an internal server error.""" total_retries: int = 8 """The number of retries made in case requests fail with certain retryable status codes. If the last retry fails a corresponding exception is raised. Note, that between retries an exponential backoff is applied, starting with 0.5 s after the first retry and doubling for each retry made. So with the
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/aleph_alpha.html
8b86005e1bb1-2
retry made. So with the default setting of 8 retries a total wait time of 63.5 s is added between the retries.""" nice: bool = False """Setting this to True, will signal to the API that you intend to be nice to other users by de-prioritizing your request below concurrent ones.""" @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" aleph_alpha_api_key = get_from_dict_or_env( values, "aleph_alpha_api_key", "ALEPH_ALPHA_API_KEY" ) try: from aleph_alpha_client import Client values["client"] = Client( token=aleph_alpha_api_key, host=values["host"], hosting=values["hosting"], request_timeout_seconds=values["request_timeout_seconds"], total_retries=values["total_retries"], nice=values["nice"], ) except ImportError: raise ValueError( "Could not import aleph_alpha_client python package. " "Please install it with `pip install aleph_alpha_client`." ) return values [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Call out to Aleph Alpha's asymmetric Document endpoint. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ try: from aleph_alpha_client import ( Prompt, SemanticEmbeddingRequest, SemanticRepresentation, ) except ImportError: raise ValueError(
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/aleph_alpha.html
8b86005e1bb1-3
SemanticRepresentation, ) except ImportError: raise ValueError( "Could not import aleph_alpha_client python package. " "Please install it with `pip install aleph_alpha_client`." ) document_embeddings = [] for text in texts: document_params = { "prompt": Prompt.from_text(text), "representation": SemanticRepresentation.Document, "compress_to_size": self.compress_to_size, "normalize": self.normalize, "contextual_control_threshold": self.contextual_control_threshold, "control_log_additive": self.control_log_additive, } document_request = SemanticEmbeddingRequest(**document_params) document_response = self.client.semantic_embed( request=document_request, model=self.model ) document_embeddings.append(document_response.embedding) return document_embeddings [docs] def embed_query(self, text: str) -> List[float]: """Call out to Aleph Alpha's asymmetric, query embedding endpoint Args: text: The text to embed. Returns: Embeddings for the text. """ try: from aleph_alpha_client import ( Prompt, SemanticEmbeddingRequest, SemanticRepresentation, ) except ImportError: raise ValueError( "Could not import aleph_alpha_client python package. " "Please install it with `pip install aleph_alpha_client`." ) symmetric_params = { "prompt": Prompt.from_text(text), "representation": SemanticRepresentation.Query, "compress_to_size": self.compress_to_size, "normalize": self.normalize, "contextual_control_threshold": self.contextual_control_threshold, "control_log_additive": self.control_log_additive,
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/aleph_alpha.html
8b86005e1bb1-4
"control_log_additive": self.control_log_additive, } symmetric_request = SemanticEmbeddingRequest(**symmetric_params) symmetric_response = self.client.semantic_embed( request=symmetric_request, model=self.model ) return symmetric_response.embedding [docs]class AlephAlphaSymmetricSemanticEmbedding(AlephAlphaAsymmetricSemanticEmbedding): """The symmetric version of the Aleph Alpha's semantic embeddings. The main difference is that here, both the documents and queries are embedded with a SemanticRepresentation.Symmetric Example: .. code-block:: python from aleph_alpha import AlephAlphaSymmetricSemanticEmbedding embeddings = AlephAlphaAsymmetricSemanticEmbedding( normalize=True, compress_to_size=128 ) text = "This is a test text" doc_result = embeddings.embed_documents([text]) query_result = embeddings.embed_query(text) """ def _embed(self, text: str) -> List[float]: try: from aleph_alpha_client import ( Prompt, SemanticEmbeddingRequest, SemanticRepresentation, ) except ImportError: raise ValueError( "Could not import aleph_alpha_client python package. " "Please install it with `pip install aleph_alpha_client`." ) query_params = { "prompt": Prompt.from_text(text), "representation": SemanticRepresentation.Symmetric, "compress_to_size": self.compress_to_size, "normalize": self.normalize, "contextual_control_threshold": self.contextual_control_threshold, "control_log_additive": self.control_log_additive, } query_request = SemanticEmbeddingRequest(**query_params) query_response = self.client.semantic_embed(
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/aleph_alpha.html
8b86005e1bb1-5
query_response = self.client.semantic_embed( request=query_request, model=self.model ) return query_response.embedding [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Call out to Aleph Alpha's Document endpoint. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ document_embeddings = [] for text in texts: document_embeddings.append(self._embed(text)) return document_embeddings [docs] def embed_query(self, text: str) -> List[float]: """Call out to Aleph Alpha's asymmetric, query embedding endpoint Args: text: The text to embed. Returns: Embeddings for the text. """ return self._embed(text)
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/aleph_alpha.html
a05657c30217-0
Source code for langchain.embeddings.openai from __future__ import annotations import logging import warnings from typing import ( Any, Callable, Dict, List, Literal, Optional, Sequence, Set, Tuple, Union, ) import numpy as np from pydantic import BaseModel, Extra, Field, root_validator from tenacity import ( AsyncRetrying, before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential, ) from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env, get_pydantic_field_names logger = logging.getLogger(__name__) def _create_retry_decorator(embeddings: OpenAIEmbeddings) -> Callable[[Any], Any]: import openai min_seconds = 4 max_seconds = 10 # Wait 2^x * 1 second between each retry starting with # 4 seconds, then up to 10 seconds, then 10 seconds afterwards return retry( reraise=True, stop=stop_after_attempt(embeddings.max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=( retry_if_exception_type(openai.error.Timeout) | retry_if_exception_type(openai.error.APIError) | retry_if_exception_type(openai.error.APIConnectionError) | retry_if_exception_type(openai.error.RateLimitError) | retry_if_exception_type(openai.error.ServiceUnavailableError) ), before_sleep=before_sleep_log(logger, logging.WARNING), ) def _async_retry_decorator(embeddings: OpenAIEmbeddings) -> Any:
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html
a05657c30217-1
) def _async_retry_decorator(embeddings: OpenAIEmbeddings) -> Any: import openai min_seconds = 4 max_seconds = 10 # Wait 2^x * 1 second between each retry starting with # 4 seconds, then up to 10 seconds, then 10 seconds afterwards async_retrying = AsyncRetrying( reraise=True, stop=stop_after_attempt(embeddings.max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=( retry_if_exception_type(openai.error.Timeout) | retry_if_exception_type(openai.error.APIError) | retry_if_exception_type(openai.error.APIConnectionError) | retry_if_exception_type(openai.error.RateLimitError) | retry_if_exception_type(openai.error.ServiceUnavailableError) ), before_sleep=before_sleep_log(logger, logging.WARNING), ) def wrap(func: Callable) -> Callable: async def wrapped_f(*args: Any, **kwargs: Any) -> Callable: async for _ in async_retrying: return await func(*args, **kwargs) raise AssertionError("this is unreachable") return wrapped_f return wrap # https://stackoverflow.com/questions/76469415/getting-embeddings-of-length-1-from-langchain-openaiembeddings def _check_response(response: dict) -> dict: if any(len(d["embedding"]) == 1 for d in response["data"]): import openai raise openai.error.APIError("OpenAI API returned an empty embedding") return response [docs]def embed_with_retry(embeddings: OpenAIEmbeddings, **kwargs: Any) -> Any:
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html
a05657c30217-2
"""Use tenacity to retry the embedding call.""" retry_decorator = _create_retry_decorator(embeddings) @retry_decorator def _embed_with_retry(**kwargs: Any) -> Any: response = embeddings.client.create(**kwargs) return _check_response(response) return _embed_with_retry(**kwargs) [docs]async def async_embed_with_retry(embeddings: OpenAIEmbeddings, **kwargs: Any) -> Any: """Use tenacity to retry the embedding call.""" @_async_retry_decorator(embeddings) async def _async_embed_with_retry(**kwargs: Any) -> Any: response = await embeddings.client.acreate(**kwargs) return _check_response(response) return await _async_embed_with_retry(**kwargs) [docs]class OpenAIEmbeddings(BaseModel, Embeddings): """OpenAI embedding models. To use, you should have the ``openai`` python package installed, and the environment variable ``OPENAI_API_KEY`` set with your API key or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.embeddings import OpenAIEmbeddings openai = OpenAIEmbeddings(openai_api_key="my-api-key") In order to use the library with Microsoft Azure endpoints, you need to set the OPENAI_API_TYPE, OPENAI_API_BASE, OPENAI_API_KEY and OPENAI_API_VERSION. The OPENAI_API_TYPE must be set to 'azure' and the others correspond to the properties of your endpoint. In addition, the deployment name must be passed as the model parameter. Example: .. code-block:: python import os os.environ["OPENAI_API_TYPE"] = "azure"
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html
a05657c30217-3
import os os.environ["OPENAI_API_TYPE"] = "azure" os.environ["OPENAI_API_BASE"] = "https://<your-endpoint.openai.azure.com/" os.environ["OPENAI_API_KEY"] = "your AzureOpenAI key" os.environ["OPENAI_API_VERSION"] = "2023-05-15" os.environ["OPENAI_PROXY"] = "http://your-corporate-proxy:8080" from langchain.embeddings.openai import OpenAIEmbeddings embeddings = OpenAIEmbeddings( deployment="your-embeddings-deployment-name", model="your-embeddings-model-name", openai_api_base="https://your-endpoint.openai.azure.com/", openai_api_type="azure", ) text = "This is a test query." query_result = embeddings.embed_query(text) """ client: Any #: :meta private: model: str = "text-embedding-ada-002" deployment: str = model # to support Azure OpenAI Service custom deployment names openai_api_version: Optional[str] = None # to support Azure OpenAI Service custom endpoints openai_api_base: Optional[str] = None # to support Azure OpenAI Service custom endpoints openai_api_type: Optional[str] = None # to support explicit proxy for OpenAI openai_proxy: Optional[str] = None embedding_ctx_length: int = 8191 """The maximum number of tokens to embed at once.""" openai_api_key: Optional[str] = None openai_organization: Optional[str] = None allowed_special: Union[Literal["all"], Set[str]] = set()
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html
a05657c30217-4
allowed_special: Union[Literal["all"], Set[str]] = set() disallowed_special: Union[Literal["all"], Set[str], Sequence[str]] = "all" chunk_size: int = 1000 """Maximum number of texts to embed in each batch""" max_retries: int = 6 """Maximum number of retries to make when generating.""" request_timeout: Optional[Union[float, Tuple[float, float]]] = None """Timeout in seconds for the OpenAPI request.""" headers: Any = None tiktoken_model_name: Optional[str] = None """The model name to pass to tiktoken when using this class. Tiktoken is used to count the number of tokens in documents to constrain them to be under a certain limit. By default, when set to None, this will be the same as the embedding model name. However, there are some cases where you may want to use this Embedding class with a model name not supported by tiktoken. This can include when using Azure embeddings or when using one of the many model providers that expose an OpenAI-like API but with different models. In those cases, in order to avoid erroring when tiktoken is called, you can specify a model name to use here.""" show_progress_bar: bool = False """Whether to show a progress bar when embedding.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html
a05657c30217-5
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = get_pydantic_field_names(cls) extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") if field_name not in all_required_field_names: warnings.warn( f"""WARNING! {field_name} is not default parameter. {field_name} was transferred to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) invalid_model_kwargs = all_required_field_names.intersection(extra.keys()) if invalid_model_kwargs: raise ValueError( f"Parameters {invalid_model_kwargs} should be specified explicitly. " f"Instead they were passed in as part of `model_kwargs` parameter." ) values["model_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" values["openai_api_key"] = get_from_dict_or_env( values, "openai_api_key", "OPENAI_API_KEY" ) values["openai_api_base"] = get_from_dict_or_env( values, "openai_api_base", "OPENAI_API_BASE", default="", ) values["openai_api_type"] = get_from_dict_or_env( values, "openai_api_type", "OPENAI_API_TYPE", default="", )
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html
a05657c30217-6
"OPENAI_API_TYPE", default="", ) values["openai_proxy"] = get_from_dict_or_env( values, "openai_proxy", "OPENAI_PROXY", default="", ) if values["openai_api_type"] in ("azure", "azure_ad", "azuread"): default_api_version = "2022-12-01" else: default_api_version = "" values["openai_api_version"] = get_from_dict_or_env( values, "openai_api_version", "OPENAI_API_VERSION", default=default_api_version, ) values["openai_organization"] = get_from_dict_or_env( values, "openai_organization", "OPENAI_ORGANIZATION", default="", ) try: import openai values["client"] = openai.Embedding except ImportError: raise ImportError( "Could not import openai python package. " "Please install it with `pip install openai`." ) return values @property def _invocation_params(self) -> Dict: openai_args = { "model": self.model, "request_timeout": self.request_timeout, "headers": self.headers, "api_key": self.openai_api_key, "organization": self.openai_organization, "api_base": self.openai_api_base, "api_type": self.openai_api_type, "api_version": self.openai_api_version, **self.model_kwargs, } if self.openai_api_type in ("azure", "azure_ad", "azuread"):
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html
a05657c30217-7
if self.openai_api_type in ("azure", "azure_ad", "azuread"): openai_args["engine"] = self.deployment if self.openai_proxy: try: import openai except ImportError: raise ImportError( "Could not import openai python package. " "Please install it with `pip install openai`." ) openai.proxy = { "http": self.openai_proxy, "https": self.openai_proxy, } # type: ignore[assignment] # noqa: E501 return openai_args # please refer to # https://github.com/openai/openai-cookbook/blob/main/examples/Embedding_long_inputs.ipynb def _get_len_safe_embeddings( self, texts: List[str], *, engine: str, chunk_size: Optional[int] = None ) -> List[List[float]]: embeddings: List[List[float]] = [[] for _ in range(len(texts))] try: import tiktoken except ImportError: raise ImportError( "Could not import tiktoken python package. " "This is needed in order to for OpenAIEmbeddings. " "Please install it with `pip install tiktoken`." ) tokens = [] indices = [] model_name = self.tiktoken_model_name or self.model try: encoding = tiktoken.encoding_for_model(model_name) except KeyError: logger.warning("Warning: model not found. Using cl100k_base encoding.") model = "cl100k_base" encoding = tiktoken.get_encoding(model) for i, text in enumerate(texts): if self.model.endswith("001"):
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html
a05657c30217-8
for i, text in enumerate(texts): if self.model.endswith("001"): # See: https://github.com/openai/openai-python/issues/418#issuecomment-1525939500 # replace newlines, which can negatively affect performance. text = text.replace("\n", " ") token = encoding.encode( text, allowed_special=self.allowed_special, disallowed_special=self.disallowed_special, ) for j in range(0, len(token), self.embedding_ctx_length): tokens.append(token[j : j + self.embedding_ctx_length]) indices.append(i) batched_embeddings: List[List[float]] = [] _chunk_size = chunk_size or self.chunk_size if self.show_progress_bar: try: import tqdm _iter = tqdm.tqdm(range(0, len(tokens), _chunk_size)) except ImportError: _iter = range(0, len(tokens), _chunk_size) else: _iter = range(0, len(tokens), _chunk_size) for i in _iter: response = embed_with_retry( self, input=tokens[i : i + _chunk_size], **self._invocation_params, ) batched_embeddings.extend(r["embedding"] for r in response["data"]) results: List[List[List[float]]] = [[] for _ in range(len(texts))] num_tokens_in_batch: List[List[int]] = [[] for _ in range(len(texts))] for i in range(len(indices)): results[indices[i]].append(batched_embeddings[i]) num_tokens_in_batch[indices[i]].append(len(tokens[i])) for i in range(len(texts)): _result = results[i]
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html
a05657c30217-9
for i in range(len(texts)): _result = results[i] if len(_result) == 0: average = embed_with_retry( self, input="", **self._invocation_params, )[ "data" ][0]["embedding"] else: average = np.average(_result, axis=0, weights=num_tokens_in_batch[i]) embeddings[i] = (average / np.linalg.norm(average)).tolist() return embeddings # please refer to # https://github.com/openai/openai-cookbook/blob/main/examples/Embedding_long_inputs.ipynb async def _aget_len_safe_embeddings( self, texts: List[str], *, engine: str, chunk_size: Optional[int] = None ) -> List[List[float]]: embeddings: List[List[float]] = [[] for _ in range(len(texts))] try: import tiktoken except ImportError: raise ImportError( "Could not import tiktoken python package. " "This is needed in order to for OpenAIEmbeddings. " "Please install it with `pip install tiktoken`." ) tokens = [] indices = [] model_name = self.tiktoken_model_name or self.model try: encoding = tiktoken.encoding_for_model(model_name) except KeyError: logger.warning("Warning: model not found. Using cl100k_base encoding.") model = "cl100k_base" encoding = tiktoken.get_encoding(model) for i, text in enumerate(texts): if self.model.endswith("001"): # See: https://github.com/openai/openai-python/issues/418#issuecomment-1525939500
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html
a05657c30217-10
# replace newlines, which can negatively affect performance. text = text.replace("\n", " ") token = encoding.encode( text, allowed_special=self.allowed_special, disallowed_special=self.disallowed_special, ) for j in range(0, len(token), self.embedding_ctx_length): tokens.append(token[j : j + self.embedding_ctx_length]) indices.append(i) batched_embeddings: List[List[float]] = [] _chunk_size = chunk_size or self.chunk_size for i in range(0, len(tokens), _chunk_size): response = await async_embed_with_retry( self, input=tokens[i : i + _chunk_size], **self._invocation_params, ) batched_embeddings.extend(r["embedding"] for r in response["data"]) results: List[List[List[float]]] = [[] for _ in range(len(texts))] num_tokens_in_batch: List[List[int]] = [[] for _ in range(len(texts))] for i in range(len(indices)): results[indices[i]].append(batched_embeddings[i]) num_tokens_in_batch[indices[i]].append(len(tokens[i])) for i in range(len(texts)): _result = results[i] if len(_result) == 0: average = ( await async_embed_with_retry( self, input="", **self._invocation_params, ) )["data"][0]["embedding"] else: average = np.average(_result, axis=0, weights=num_tokens_in_batch[i]) embeddings[i] = (average / np.linalg.norm(average)).tolist() return embeddings [docs] def embed_documents(
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html
a05657c30217-11
return embeddings [docs] def embed_documents( self, texts: List[str], chunk_size: Optional[int] = 0 ) -> List[List[float]]: """Call out to OpenAI's embedding endpoint for embedding search docs. Args: texts: The list of texts to embed. chunk_size: The chunk size of embeddings. If None, will use the chunk size specified by the class. Returns: List of embeddings, one for each text. """ # NOTE: to keep things simple, we assume the list may contain texts longer # than the maximum context and use length-safe embedding function. return self._get_len_safe_embeddings(texts, engine=self.deployment) [docs] async def aembed_documents( self, texts: List[str], chunk_size: Optional[int] = 0 ) -> List[List[float]]: """Call out to OpenAI's embedding endpoint async for embedding search docs. Args: texts: The list of texts to embed. chunk_size: The chunk size of embeddings. If None, will use the chunk size specified by the class. Returns: List of embeddings, one for each text. """ # NOTE: to keep things simple, we assume the list may contain texts longer # than the maximum context and use length-safe embedding function. return await self._aget_len_safe_embeddings(texts, engine=self.deployment) [docs] def embed_query(self, text: str) -> List[float]: """Call out to OpenAI's embedding endpoint for embedding query text. Args: text: The text to embed. Returns: Embedding for the text. """
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html
a05657c30217-12
Returns: Embedding for the text. """ return self.embed_documents([text])[0] [docs] async def aembed_query(self, text: str) -> List[float]: """Call out to OpenAI's embedding endpoint async for embedding query text. Args: text: The text to embed. Returns: Embedding for the text. """ embeddings = await self.aembed_documents([text]) return embeddings[0]
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html
fad74986feb0-0
Source code for langchain.schema.storage from abc import ABC, abstractmethod from typing import Generic, Iterator, List, Optional, Sequence, Tuple, TypeVar, Union K = TypeVar("K") V = TypeVar("V") [docs]class BaseStore(Generic[K, V], ABC): """Abstract interface for a key-value store.""" [docs] @abstractmethod def mget(self, keys: Sequence[K]) -> List[Optional[V]]: """Get the values associated with the given keys. Args: keys (Sequence[K]): A sequence of keys. Returns: A sequence of optional values associated with the keys. If a key is not found, the corresponding value will be None. """ [docs] @abstractmethod def mset(self, key_value_pairs: Sequence[Tuple[K, V]]) -> None: """Set the values for the given keys. Args: key_value_pairs (Sequence[Tuple[K, V]]): A sequence of key-value pairs. """ [docs] @abstractmethod def mdelete(self, keys: Sequence[K]) -> None: """Delete the given keys and their associated values. Args: keys (Sequence[K]): A sequence of keys to delete. """ [docs] @abstractmethod def yield_keys( self, *, prefix: Optional[str] = None ) -> Union[Iterator[K], Iterator[str]]: """Get an iterator over keys that match the given prefix. Args: prefix (str): The prefix to match. Returns: Iterator[K | str]: An iterator over keys that match the given prefix. This method is allowed to return an iterator over either K or str
https://api.python.langchain.com/en/latest/_modules/langchain/schema/storage.html
fad74986feb0-1
This method is allowed to return an iterator over either K or str depending on what makes more sense for the given store. """
https://api.python.langchain.com/en/latest/_modules/langchain/schema/storage.html
6cbcc5c8b741-0
Source code for langchain.schema.messages from __future__ import annotations from abc import abstractmethod from typing import TYPE_CHECKING, Any, Dict, List, Sequence from pydantic import Field from langchain.load.serializable import Serializable if TYPE_CHECKING: from langchain.prompts.chat import ChatPromptTemplate [docs]def get_buffer_string( messages: Sequence[BaseMessage], human_prefix: str = "Human", ai_prefix: str = "AI" ) -> str: """Convert sequence of Messages to strings and concatenate them into one string. Args: messages: Messages to be converted to strings. human_prefix: The prefix to prepend to contents of HumanMessages. ai_prefix: THe prefix to prepend to contents of AIMessages. Returns: A single string concatenation of all input messages. Example: .. code-block:: python from langchain.schema import AIMessage, HumanMessage messages = [ HumanMessage(content="Hi, how are you?"), AIMessage(content="Good, how are you?"), ] get_buffer_string(messages) # -> "Human: Hi, how are you?\nAI: Good, how are you?" """ string_messages = [] for m in messages: if isinstance(m, HumanMessage): role = human_prefix elif isinstance(m, AIMessage): role = ai_prefix elif isinstance(m, SystemMessage): role = "System" elif isinstance(m, FunctionMessage): role = "Function" elif isinstance(m, ChatMessage): role = m.role else: raise ValueError(f"Got unsupported message type: {m}") message = f"{role}: {m.content}"
https://api.python.langchain.com/en/latest/_modules/langchain/schema/messages.html
6cbcc5c8b741-1
message = f"{role}: {m.content}" if isinstance(m, AIMessage) and "function_call" in m.additional_kwargs: message += f"{m.additional_kwargs['function_call']}" string_messages.append(message) return "\n".join(string_messages) [docs]class BaseMessage(Serializable): """The base abstract Message class. Messages are the inputs and outputs of ChatModels. """ content: str """The string contents of the message.""" additional_kwargs: dict = Field(default_factory=dict) """Any additional information.""" @property @abstractmethod def type(self) -> str: """Type of the Message, used for serialization.""" @property def lc_serializable(self) -> bool: """Whether this class is LangChain serializable.""" return True def __add__(self, other: Any) -> ChatPromptTemplate: from langchain.prompts.chat import ChatPromptTemplate prompt = ChatPromptTemplate(messages=[self]) return prompt + other [docs]class BaseMessageChunk(BaseMessage): def _merge_kwargs_dict( self, left: Dict[str, Any], right: Dict[str, Any] ) -> Dict[str, Any]: """Merge additional_kwargs from another BaseMessageChunk into this one.""" merged = left.copy() for k, v in right.items(): if k not in merged: merged[k] = v elif type(merged[k]) != type(v): raise ValueError( f'additional_kwargs["{k}"] already exists in this message,' " but with a different type." ) elif isinstance(merged[k], str): merged[k] += v
https://api.python.langchain.com/en/latest/_modules/langchain/schema/messages.html
6cbcc5c8b741-2
) elif isinstance(merged[k], str): merged[k] += v elif isinstance(merged[k], dict): merged[k] = self._merge_kwargs_dict(merged[k], v) else: raise ValueError( f"Additional kwargs key {k} already exists in this message." ) return merged def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore if isinstance(other, BaseMessageChunk): # If both are (subclasses of) BaseMessageChunk, # concat into a single BaseMessageChunk return self.__class__( content=self.content + other.content, additional_kwargs=self._merge_kwargs_dict( self.additional_kwargs, other.additional_kwargs ), ) else: raise TypeError( 'unsupported operand type(s) for +: "' f"{self.__class__.__name__}" f'" and "{other.__class__.__name__}"' ) [docs]class HumanMessage(BaseMessage): """A Message from a human.""" example: bool = False """Whether this Message is being passed in to the model as part of an example conversation. """ @property def type(self) -> str: """Type of the message, used for serialization.""" return "human" [docs]class HumanMessageChunk(HumanMessage, BaseMessageChunk): pass [docs]class AIMessage(BaseMessage): """A Message from an AI.""" example: bool = False """Whether this Message is being passed in to the model as part of an example conversation. """ @property def type(self) -> str:
https://api.python.langchain.com/en/latest/_modules/langchain/schema/messages.html
6cbcc5c8b741-3
conversation. """ @property def type(self) -> str: """Type of the message, used for serialization.""" return "ai" [docs]class AIMessageChunk(AIMessage, BaseMessageChunk): pass [docs]class SystemMessage(BaseMessage): """A Message for priming AI behavior, usually passed in as the first of a sequence of input messages. """ @property def type(self) -> str: """Type of the message, used for serialization.""" return "system" [docs]class SystemMessageChunk(SystemMessage, BaseMessageChunk): pass [docs]class FunctionMessage(BaseMessage): """A Message for passing the result of executing a function back to a model.""" name: str """The name of the function that was executed.""" @property def type(self) -> str: """Type of the message, used for serialization.""" return "function" [docs]class FunctionMessageChunk(FunctionMessage, BaseMessageChunk): pass [docs]class ChatMessage(BaseMessage): """A Message that can be assigned an arbitrary speaker (i.e. role).""" role: str """The speaker / role of the Message.""" @property def type(self) -> str: """Type of the message, used for serialization.""" return "chat" [docs]class ChatMessageChunk(ChatMessage, BaseMessageChunk): pass def _message_to_dict(message: BaseMessage) -> dict: return {"type": message.type, "data": message.dict()} [docs]def messages_to_dict(messages: Sequence[BaseMessage]) -> List[dict]: """Convert a sequence of Messages to a list of dictionaries. Args:
https://api.python.langchain.com/en/latest/_modules/langchain/schema/messages.html
6cbcc5c8b741-4
"""Convert a sequence of Messages to a list of dictionaries. Args: messages: Sequence of messages (as BaseMessages) to convert. Returns: List of messages as dicts. """ return [_message_to_dict(m) for m in messages] def _message_from_dict(message: dict) -> BaseMessage: _type = message["type"] if _type == "human": return HumanMessage(**message["data"]) elif _type == "ai": return AIMessage(**message["data"]) elif _type == "system": return SystemMessage(**message["data"]) elif _type == "chat": return ChatMessage(**message["data"]) elif _type == "function": return FunctionMessage(**message["data"]) else: raise ValueError(f"Got unexpected message type: {_type}") [docs]def messages_from_dict(messages: List[dict]) -> List[BaseMessage]: """Convert a sequence of messages from dicts to Message objects. Args: messages: Sequence of messages (as dicts) to convert. Returns: List of messages (BaseMessages). """ return [_message_from_dict(m) for m in messages]
https://api.python.langchain.com/en/latest/_modules/langchain/schema/messages.html
9c674b37a3fa-0
Source code for langchain.schema.agent from __future__ import annotations from dataclasses import dataclass from typing import NamedTuple, Union [docs]@dataclass class AgentAction: """A full description of an action for an ActionAgent to execute.""" tool: str """The name of the Tool to execute.""" tool_input: Union[str, dict] """The input to pass in to the Tool.""" log: str """Additional information to log about the action.""" [docs]class AgentFinish(NamedTuple): """The final return value of an ActionAgent.""" return_values: dict """Dictionary of return values.""" log: str """Additional information to log about the return value"""
https://api.python.langchain.com/en/latest/_modules/langchain/schema/agent.html
d5123ac3075e-0
Source code for langchain.schema.retriever from __future__ import annotations import warnings from abc import ABC, abstractmethod from inspect import signature from typing import TYPE_CHECKING, Any, Dict, List, Optional from langchain.load.dump import dumpd from langchain.load.serializable import Serializable from langchain.schema.document import Document from langchain.schema.runnable import Runnable, RunnableConfig if TYPE_CHECKING: from langchain.callbacks.manager import ( AsyncCallbackManagerForRetrieverRun, CallbackManagerForRetrieverRun, Callbacks, ) [docs]class BaseRetriever(Serializable, Runnable[str, List[Document]], ABC): """Abstract base class for a Document retrieval system. A retrieval system is defined as something that can take string queries and return the most 'relevant' Documents from some source. Example: .. code-block:: python class TFIDFRetriever(BaseRetriever, BaseModel): vectorizer: Any docs: List[Document] tfidf_array: Any k: int = 4 class Config: arbitrary_types_allowed = True def get_relevant_documents(self, query: str) -> List[Document]: from sklearn.metrics.pairwise import cosine_similarity # Ip -- (n_docs,x), Op -- (n_docs,n_Feats) query_vec = self.vectorizer.transform([query]) # Op -- (n_docs,1) -- Cosine Sim with each doc results = cosine_similarity(self.tfidf_array, query_vec).reshape((-1,)) return [self.docs[i] for i in results.argsort()[-self.k :][::-1]] """ # noqa: E501 class Config:
https://api.python.langchain.com/en/latest/_modules/langchain/schema/retriever.html
d5123ac3075e-1
""" # noqa: E501 class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True _new_arg_supported: bool = False _expects_other_args: bool = False tags: Optional[List[str]] = None """Optional list of tags associated with the retriever. Defaults to None These tags will be associated with each call to this retriever, and passed as arguments to the handlers defined in `callbacks`. You can use these to eg identify a specific instance of a retriever with its use case. """ metadata: Optional[Dict[str, Any]] = None """Optional metadata associated with the retriever. Defaults to None This metadata will be associated with each call to this retriever, and passed as arguments to the handlers defined in `callbacks`. You can use these to eg identify a specific instance of a retriever with its use case. """ def __init_subclass__(cls, **kwargs: Any) -> None: super().__init_subclass__(**kwargs) # Version upgrade for old retrievers that implemented the public # methods directly. if cls.get_relevant_documents != BaseRetriever.get_relevant_documents: warnings.warn( "Retrievers must implement abstract `_get_relevant_documents` method" " instead of `get_relevant_documents`", DeprecationWarning, ) swap = cls.get_relevant_documents cls.get_relevant_documents = ( # type: ignore[assignment] BaseRetriever.get_relevant_documents ) cls._get_relevant_documents = swap # type: ignore[assignment] if ( hasattr(cls, "aget_relevant_documents")
https://api.python.langchain.com/en/latest/_modules/langchain/schema/retriever.html
d5123ac3075e-2
if ( hasattr(cls, "aget_relevant_documents") and cls.aget_relevant_documents != BaseRetriever.aget_relevant_documents ): warnings.warn( "Retrievers must implement abstract `_aget_relevant_documents` method" " instead of `aget_relevant_documents`", DeprecationWarning, ) aswap = cls.aget_relevant_documents cls.aget_relevant_documents = ( # type: ignore[assignment] BaseRetriever.aget_relevant_documents ) cls._aget_relevant_documents = aswap # type: ignore[assignment] parameters = signature(cls._get_relevant_documents).parameters cls._new_arg_supported = parameters.get("run_manager") is not None # If a V1 retriever broke the interface and expects additional arguments cls._expects_other_args = ( len(set(parameters.keys()) - {"self", "query", "run_manager"}) > 0 ) [docs] def invoke( self, input: str, config: Optional[RunnableConfig] = None ) -> List[Document]: return self.get_relevant_documents(input, **(config or {})) [docs] async def ainvoke( self, input: str, config: Optional[RunnableConfig] = None ) -> List[Document]: if type(self).aget_relevant_documents == BaseRetriever.aget_relevant_documents: # If the retriever doesn't implement async, use default implementation return await super().ainvoke(input, config) return await self.aget_relevant_documents(input, **(config or {})) @abstractmethod def _get_relevant_documents(
https://api.python.langchain.com/en/latest/_modules/langchain/schema/retriever.html
d5123ac3075e-3
@abstractmethod def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun ) -> List[Document]: """Get documents relevant to a query. Args: query: String to find relevant documents for run_manager: The callbacks handler to use Returns: List of relevant documents """ async def _aget_relevant_documents( self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun ) -> List[Document]: """Asynchronously get documents relevant to a query. Args: query: String to find relevant documents for run_manager: The callbacks handler to use Returns: List of relevant documents """ raise NotImplementedError() [docs] def get_relevant_documents( self, query: str, *, callbacks: Callbacks = None, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> List[Document]: """Retrieve documents relevant to a query. Args: query: string to find relevant documents for callbacks: Callback manager or list of callbacks tags: Optional list of tags associated with the retriever. Defaults to None These tags will be associated with each call to this retriever, and passed as arguments to the handlers defined in `callbacks`. metadata: Optional metadata associated with the retriever. Defaults to None This metadata will be associated with each call to this retriever, and passed as arguments to the handlers defined in `callbacks`. Returns: List of relevant documents """ from langchain.callbacks.manager import CallbackManager
https://api.python.langchain.com/en/latest/_modules/langchain/schema/retriever.html
d5123ac3075e-4
List of relevant documents """ from langchain.callbacks.manager import CallbackManager callback_manager = CallbackManager.configure( callbacks, None, verbose=kwargs.get("verbose", False), inheritable_tags=tags, local_tags=self.tags, inheritable_metadata=metadata, local_metadata=self.metadata, ) run_manager = callback_manager.on_retriever_start( dumpd(self), query, **kwargs, ) try: _kwargs = kwargs if self._expects_other_args else {} if self._new_arg_supported: result = self._get_relevant_documents( query, run_manager=run_manager, **_kwargs ) else: result = self._get_relevant_documents(query, **_kwargs) except Exception as e: run_manager.on_retriever_error(e) raise e else: run_manager.on_retriever_end( result, **kwargs, ) return result [docs] async def aget_relevant_documents( self, query: str, *, callbacks: Callbacks = None, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> List[Document]: """Asynchronously get documents relevant to a query. Args: query: string to find relevant documents for callbacks: Callback manager or list of callbacks tags: Optional list of tags associated with the retriever. Defaults to None These tags will be associated with each call to this retriever, and passed as arguments to the handlers defined in `callbacks`.
https://api.python.langchain.com/en/latest/_modules/langchain/schema/retriever.html
d5123ac3075e-5
and passed as arguments to the handlers defined in `callbacks`. metadata: Optional metadata associated with the retriever. Defaults to None This metadata will be associated with each call to this retriever, and passed as arguments to the handlers defined in `callbacks`. Returns: List of relevant documents """ from langchain.callbacks.manager import AsyncCallbackManager callback_manager = AsyncCallbackManager.configure( callbacks, None, verbose=kwargs.get("verbose", False), inheritable_tags=tags, local_tags=self.tags, inheritable_metadata=metadata, local_metadata=self.metadata, ) run_manager = await callback_manager.on_retriever_start( dumpd(self), query, **kwargs, ) try: _kwargs = kwargs if self._expects_other_args else {} if self._new_arg_supported: result = await self._aget_relevant_documents( query, run_manager=run_manager, **_kwargs ) else: result = await self._aget_relevant_documents(query, **_kwargs) except Exception as e: await run_manager.on_retriever_error(e) raise e else: await run_manager.on_retriever_end( result, **kwargs, ) return result
https://api.python.langchain.com/en/latest/_modules/langchain/schema/retriever.html
61ff44e1fb49-0
Source code for langchain.schema.document from __future__ import annotations from abc import ABC, abstractmethod from typing import Any, Sequence from pydantic import Field from langchain.load.serializable import Serializable [docs]class Document(Serializable): """Class for storing a piece of text and associated metadata.""" page_content: str """String text.""" metadata: dict = Field(default_factory=dict) """Arbitrary metadata about the page content (e.g., source, relationships to other documents, etc.). """ [docs]class BaseDocumentTransformer(ABC): """Abstract base class for document transformation systems. A document transformation system takes a sequence of Documents and returns a sequence of transformed Documents. Example: .. code-block:: python class EmbeddingsRedundantFilter(BaseDocumentTransformer, BaseModel): embeddings: Embeddings similarity_fn: Callable = cosine_similarity similarity_threshold: float = 0.95 class Config: arbitrary_types_allowed = True def transform_documents( self, documents: Sequence[Document], **kwargs: Any ) -> Sequence[Document]: stateful_documents = get_stateful_documents(documents) embedded_documents = _get_embeddings_from_stateful_docs( self.embeddings, stateful_documents ) included_idxs = _filter_similar_embeddings( embedded_documents, self.similarity_fn, self.similarity_threshold ) return [stateful_documents[i] for i in sorted(included_idxs)] async def atransform_documents( self, documents: Sequence[Document], **kwargs: Any ) -> Sequence[Document]: raise NotImplementedError """ # noqa: E501 [docs] @abstractmethod def transform_documents(
https://api.python.langchain.com/en/latest/_modules/langchain/schema/document.html
61ff44e1fb49-1
[docs] @abstractmethod def transform_documents( self, documents: Sequence[Document], **kwargs: Any ) -> Sequence[Document]: """Transform a list of documents. Args: documents: A sequence of Documents to be transformed. Returns: A list of transformed Documents. """ [docs] @abstractmethod async def atransform_documents( self, documents: Sequence[Document], **kwargs: Any ) -> Sequence[Document]: """Asynchronously transform a list of documents. Args: documents: A sequence of Documents to be transformed. Returns: A list of transformed Documents. """
https://api.python.langchain.com/en/latest/_modules/langchain/schema/document.html
e75dd585aa4a-0
Source code for langchain.schema.exceptions [docs]class LangChainException(Exception): """General LangChain exception."""
https://api.python.langchain.com/en/latest/_modules/langchain/schema/exceptions.html
ba21689d3f12-0
Source code for langchain.schema.prompt from __future__ import annotations from abc import ABC, abstractmethod from typing import List from langchain.load.serializable import Serializable from langchain.schema.messages import BaseMessage [docs]class PromptValue(Serializable, ABC): """Base abstract class for inputs to any language model. PromptValues can be converted to both LLM (pure text-generation) inputs and ChatModel inputs. """ [docs] @abstractmethod def to_string(self) -> str: """Return prompt value as string.""" [docs] @abstractmethod def to_messages(self) -> List[BaseMessage]: """Return prompt as a list of Messages."""
https://api.python.langchain.com/en/latest/_modules/langchain/schema/prompt.html
7827df3132bf-0
Source code for langchain.schema.output_parser from __future__ import annotations import asyncio from abc import ABC, abstractmethod from typing import Any, Dict, Generic, List, Optional, TypeVar, Union from langchain.load.serializable import Serializable from langchain.schema.messages import BaseMessage from langchain.schema.output import ChatGeneration, Generation from langchain.schema.prompt import PromptValue from langchain.schema.runnable import Runnable, RunnableConfig T = TypeVar("T") [docs]class BaseLLMOutputParser(Serializable, Generic[T], ABC): """Abstract base class for parsing the outputs of a model.""" [docs] @abstractmethod def parse_result(self, result: List[Generation]) -> T: """Parse a list of candidate model Generations into a specific format. Args: result: A list of Generations to be parsed. The Generations are assumed to be different candidate outputs for a single model input. Returns: Structured output. """ [docs] async def aparse_result(self, result: List[Generation]) -> T: """Parse a list of candidate model Generations into a specific format. Args: result: A list of Generations to be parsed. The Generations are assumed to be different candidate outputs for a single model input. Returns: Structured output. """ return await asyncio.get_running_loop().run_in_executor( None, self.parse_result, result ) [docs]class BaseGenerationOutputParser( BaseLLMOutputParser, Runnable[Union[str, BaseMessage], T] ): [docs] def invoke( self, input: str | BaseMessage, config: RunnableConfig | None = None ) -> T:
https://api.python.langchain.com/en/latest/_modules/langchain/schema/output_parser.html
7827df3132bf-1
) -> T: if isinstance(input, BaseMessage): return self._call_with_config( lambda inner_input: self.parse_result( [ChatGeneration(message=inner_input)] ), input, config, run_type="parser", ) else: return self._call_with_config( lambda inner_input: self.parse_result([Generation(text=inner_input)]), input, config, run_type="parser", ) [docs] async def ainvoke( self, input: str | BaseMessage, config: RunnableConfig | None = None ) -> T: if isinstance(input, BaseMessage): return await self._acall_with_config( lambda inner_input: self.aparse_result( [ChatGeneration(message=inner_input)] ), input, config, run_type="parser", ) else: return await self._acall_with_config( lambda inner_input: self.aparse_result([Generation(text=inner_input)]), input, config, run_type="parser", ) [docs]class BaseOutputParser(BaseLLMOutputParser, Runnable[Union[str, BaseMessage], T]): """Base class to parse the output of an LLM call. Output parsers help structure language model responses. Example: .. code-block:: python class BooleanOutputParser(BaseOutputParser[bool]): true_val: str = "YES" false_val: str = "NO" def parse(self, text: str) -> bool: cleaned_text = text.strip().upper() if cleaned_text not in (self.true_val.upper(), self.false_val.upper()):
https://api.python.langchain.com/en/latest/_modules/langchain/schema/output_parser.html
7827df3132bf-2
if cleaned_text not in (self.true_val.upper(), self.false_val.upper()): raise OutputParserException( f"BooleanOutputParser expected output value to either be " f"{self.true_val} or {self.false_val} (case-insensitive). " f"Received {cleaned_text}." ) return cleaned_text == self.true_val.upper() @property def _type(self) -> str: return "boolean_output_parser" """ # noqa: E501 [docs] def invoke( self, input: str | BaseMessage, config: RunnableConfig | None = None ) -> T: if isinstance(input, BaseMessage): return self._call_with_config( lambda inner_input: self.parse_result( [ChatGeneration(message=inner_input)] ), input, config, run_type="parser", ) else: return self._call_with_config( lambda inner_input: self.parse_result([Generation(text=inner_input)]), input, config, run_type="parser", ) [docs] async def ainvoke( self, input: str | BaseMessage, config: RunnableConfig | None = None ) -> T: if isinstance(input, BaseMessage): return await self._acall_with_config( lambda inner_input: self.aparse_result( [ChatGeneration(message=inner_input)] ), input, config, run_type="parser", ) else: return await self._acall_with_config( lambda inner_input: self.aparse_result([Generation(text=inner_input)]), input, config, run_type="parser",
https://api.python.langchain.com/en/latest/_modules/langchain/schema/output_parser.html
7827df3132bf-3
input, config, run_type="parser", ) [docs] def parse_result(self, result: List[Generation]) -> T: """Parse a list of candidate model Generations into a specific format. The return value is parsed from only the first Generation in the result, which is assumed to be the highest-likelihood Generation. Args: result: A list of Generations to be parsed. The Generations are assumed to be different candidate outputs for a single model input. Returns: Structured output. """ return self.parse(result[0].text) [docs] @abstractmethod def parse(self, text: str) -> T: """Parse a single string model output into some structure. Args: text: String output of a language model. Returns: Structured output. """ [docs] async def aparse_result(self, result: List[Generation]) -> T: """Parse a list of candidate model Generations into a specific format. The return value is parsed from only the first Generation in the result, which is assumed to be the highest-likelihood Generation. Args: result: A list of Generations to be parsed. The Generations are assumed to be different candidate outputs for a single model input. Returns: Structured output. """ return await self.aparse(result[0].text) [docs] async def aparse(self, text: str) -> T: """Parse a single string model output into some structure. Args: text: String output of a language model. Returns: Structured output. """
https://api.python.langchain.com/en/latest/_modules/langchain/schema/output_parser.html
7827df3132bf-4
Returns: Structured output. """ return await asyncio.get_running_loop().run_in_executor(None, self.parse, text) # TODO: rename 'completion' -> 'text'. [docs] def parse_with_prompt(self, completion: str, prompt: PromptValue) -> Any: """Parse the output of an LLM call with the input prompt for context. The prompt is largely provided in the event the OutputParser wants to retry or fix the output in some way, and needs information from the prompt to do so. Args: completion: String output of a language model. prompt: Input PromptValue. Returns: Structured output """ return self.parse(completion) [docs] def get_format_instructions(self) -> str: """Instructions on how the LLM output should be formatted.""" raise NotImplementedError @property def _type(self) -> str: """Return the output parser type for serialization.""" raise NotImplementedError( f"_type property is not implemented in class {self.__class__.__name__}." " This is required for serialization." ) [docs] def dict(self, **kwargs: Any) -> Dict: """Return dictionary representation of output parser.""" output_parser_dict = super().dict(**kwargs) output_parser_dict["_type"] = self._type return output_parser_dict [docs]class StrOutputParser(BaseOutputParser[str]): """OutputParser that parses LLMResult into the top likely string..""" @property def lc_serializable(self) -> bool: """Whether the class LangChain serializable.""" return True @property def _type(self) -> str:
https://api.python.langchain.com/en/latest/_modules/langchain/schema/output_parser.html
7827df3132bf-5
return True @property def _type(self) -> str: """Return the output parser type for serialization.""" return "default" [docs] def parse(self, text: str) -> str: """Returns the input text with no changes.""" return text # TODO: Deprecate NoOpOutputParser = StrOutputParser [docs]class OutputParserException(ValueError): """Exception that output parsers should raise to signify a parsing error. This exists to differentiate parsing errors from other code or execution errors that also may arise inside the output parser. OutputParserExceptions will be available to catch and handle in ways to fix the parsing error, while other errors will be raised. Args: error: The error that's being re-raised or an error message. observation: String explanation of error which can be passed to a model to try and remediate the issue. llm_output: String model output which is error-ing. send_to_llm: Whether to send the observation and llm_output back to an Agent after an OutputParserException has been raised. This gives the underlying model driving the agent the context that the previous output was improperly structured, in the hopes that it will update the output to the correct format. """ def __init__( self, error: Any, observation: Optional[str] = None, llm_output: Optional[str] = None, send_to_llm: bool = False, ): super(OutputParserException, self).__init__(error) if send_to_llm: if observation is None or llm_output is None: raise ValueError( "Arguments 'observation' & 'llm_output'"
https://api.python.langchain.com/en/latest/_modules/langchain/schema/output_parser.html
7827df3132bf-6
raise ValueError( "Arguments 'observation' & 'llm_output'" " are required if 'send_to_llm' is True" ) self.observation = observation self.llm_output = llm_output self.send_to_llm = send_to_llm
https://api.python.langchain.com/en/latest/_modules/langchain/schema/output_parser.html
1f9237578396-0
Source code for langchain.schema.runnable from __future__ import annotations import asyncio from abc import ABC, abstractmethod from concurrent.futures import ThreadPoolExecutor from typing import ( Any, AsyncIterator, Awaitable, Callable, Coroutine, Dict, Generic, Iterator, List, Mapping, Optional, Sequence, Tuple, Type, TypedDict, TypeVar, Union, cast, ) from pydantic import Field from langchain.callbacks.base import BaseCallbackManager, Callbacks from langchain.load.dump import dumpd from langchain.load.serializable import Serializable async def _gated_coro(semaphore: asyncio.Semaphore, coro: Coroutine) -> Any: async with semaphore: return await coro async def _gather_with_concurrency(n: Union[int, None], *coros: Coroutine) -> list: if n is None: return await asyncio.gather(*coros) semaphore = asyncio.Semaphore(n) return await asyncio.gather(*(_gated_coro(semaphore, c) for c in coros)) [docs]class RunnableConfig(TypedDict, total=False): tags: List[str] """ Tags for this call and any sub-calls (eg. a Chain calling an LLM). You can use these to filter calls. """ metadata: Dict[str, Any] """ Metadata for this call and any sub-calls (eg. a Chain calling an LLM). Keys should be strings, values should be JSON-serializable. """ callbacks: Callbacks """
https://api.python.langchain.com/en/latest/_modules/langchain/schema/runnable.html
1f9237578396-1
""" callbacks: Callbacks """ Callbacks for this call and any sub-calls (eg. a Chain calling an LLM). Tags are passed to all callbacks, metadata is passed to handle*Start callbacks. """ Input = TypeVar("Input") # Output type should implement __concat__, as eg str, list, dict do Output = TypeVar("Output") Other = TypeVar("Other") [docs]class Runnable(Generic[Input, Output], ABC): def __or__( self, other: Union[ Runnable[Any, Other], Callable[[Any], Other], Mapping[str, Union[Runnable[Any, Other], Callable[[Any], Other]]], ], ) -> RunnableSequence[Input, Other]: return RunnableSequence(first=self, last=_coerce_to_runnable(other)) def __ror__( self, other: Union[ Runnable[Other, Any], Callable[[Any], Other], Mapping[str, Union[Runnable[Other, Any], Callable[[Other], Any]]], ], ) -> RunnableSequence[Other, Output]: return RunnableSequence(first=_coerce_to_runnable(other), last=self) [docs] @abstractmethod def invoke(self, input: Input, config: Optional[RunnableConfig] = None) -> Output: ... [docs] async def ainvoke( self, input: Input, config: Optional[RunnableConfig] = None ) -> Output: return await asyncio.get_running_loop().run_in_executor( None, self.invoke, input, config ) [docs] def batch( self, inputs: List[Input],
https://api.python.langchain.com/en/latest/_modules/langchain/schema/runnable.html
1f9237578396-2
[docs] def batch( self, inputs: List[Input], config: Optional[Union[RunnableConfig, List[RunnableConfig]]] = None, *, max_concurrency: Optional[int] = None, ) -> List[Output]: configs = self._get_config_list(config, len(inputs)) # If there's only one input, don't bother with the executor if len(inputs) == 1: return [self.invoke(inputs[0], configs[0])] with ThreadPoolExecutor(max_workers=max_concurrency) as executor: return list(executor.map(self.invoke, inputs, configs)) [docs] async def abatch( self, inputs: List[Input], config: Optional[Union[RunnableConfig, List[RunnableConfig]]] = None, *, max_concurrency: Optional[int] = None, ) -> List[Output]: configs = self._get_config_list(config, len(inputs)) coros = map(self.ainvoke, inputs, configs) return await _gather_with_concurrency(max_concurrency, *coros) [docs] def stream( self, input: Input, config: Optional[RunnableConfig] = None ) -> Iterator[Output]: yield self.invoke(input, config) [docs] async def astream( self, input: Input, config: Optional[RunnableConfig] = None ) -> AsyncIterator[Output]: yield await self.ainvoke(input, config) [docs] def bind(self, **kwargs: Any) -> Runnable[Input, Output]: """ Bind arguments to a Runnable, returning a new Runnable. """ return RunnableBinding(bound=self, kwargs=kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/schema/runnable.html
1f9237578396-3
""" return RunnableBinding(bound=self, kwargs=kwargs) def _get_config_list( self, config: Optional[Union[RunnableConfig, List[RunnableConfig]]], length: int ) -> List[RunnableConfig]: if isinstance(config, list) and len(config) != length: raise ValueError( f"config must be a list of the same length as inputs, " f"but got {len(config)} configs for {length} inputs" ) return ( config if isinstance(config, list) else [config.copy() if config is not None else {} for _ in range(length)] ) def _call_with_config( self, func: Callable[[Input], Output], input: Input, config: Optional[RunnableConfig], run_type: Optional[str] = None, ) -> Output: from langchain.callbacks.manager import CallbackManager config = config or {} callback_manager = CallbackManager.configure( inheritable_callbacks=config.get("callbacks"), inheritable_tags=config.get("tags"), inheritable_metadata=config.get("metadata"), ) run_manager = callback_manager.on_chain_start( dumpd(self), input if isinstance(input, dict) else {"input": input}, run_type=run_type, ) try: output = func(input) except Exception as e: run_manager.on_chain_error(e) raise else: run_manager.on_chain_end( output if isinstance(output, dict) else {"output": output} ) return output async def _acall_with_config( self, func: Callable[[Input], Awaitable[Output]],
https://api.python.langchain.com/en/latest/_modules/langchain/schema/runnable.html
1f9237578396-4
self, func: Callable[[Input], Awaitable[Output]], input: Input, config: Optional[RunnableConfig], run_type: Optional[str] = None, ) -> Output: from langchain.callbacks.manager import AsyncCallbackManager config = config or {} callback_manager = AsyncCallbackManager.configure( inheritable_callbacks=config.get("callbacks"), inheritable_tags=config.get("tags"), inheritable_metadata=config.get("metadata"), ) run_manager = await callback_manager.on_chain_start( dumpd(self), input if isinstance(input, dict) else {"input": input}, run_type=run_type, ) try: output = await func(input) except Exception as e: await run_manager.on_chain_error(e) raise else: await run_manager.on_chain_end( output if isinstance(output, dict) else {"output": output} ) return output [docs] def with_fallbacks( self, fallbacks: Sequence[Runnable[Input, Output]], *, exceptions_to_handle: Tuple[Type[BaseException]] = (Exception,), ) -> RunnableWithFallbacks[Input, Output]: return RunnableWithFallbacks( runnable=self, fallbacks=fallbacks, exceptions_to_handle=exceptions_to_handle, ) [docs]class RunnableWithFallbacks(Serializable, Runnable[Input, Output]): runnable: Runnable[Input, Output] fallbacks: Sequence[Runnable[Input, Output]] exceptions_to_handle: Tuple[Type[BaseException]] = (Exception,) class Config: arbitrary_types_allowed = True @property
https://api.python.langchain.com/en/latest/_modules/langchain/schema/runnable.html
1f9237578396-5
class Config: arbitrary_types_allowed = True @property def runnables(self) -> Iterator[Runnable[Input, Output]]: yield self.runnable yield from self.fallbacks [docs] def invoke(self, input: Input, config: Optional[RunnableConfig] = None) -> Output: from langchain.callbacks.manager import CallbackManager # setup callbacks config = config or {} callback_manager = CallbackManager.configure( inheritable_callbacks=config.get("callbacks"), local_callbacks=None, verbose=False, inheritable_tags=config.get("tags"), local_tags=None, inheritable_metadata=config.get("metadata"), local_metadata=None, ) # start the root run run_manager = callback_manager.on_chain_start( dumpd(self), input if isinstance(input, dict) else {"input": input} ) first_error = None for runnable in self.runnables: try: output = runnable.invoke( input, _patch_config(config, run_manager.get_child()), ) except self.exceptions_to_handle as e: if first_error is None: first_error = e except BaseException as e: run_manager.on_chain_error(e) raise e else: run_manager.on_chain_end( output if isinstance(output, dict) else {"output": output} ) return output if first_error is None: raise ValueError("No error stored at end of fallbacks.") run_manager.on_chain_error(first_error) raise first_error [docs] async def ainvoke( self, input: Input, config: Optional[RunnableConfig] = None ) -> Output:
https://api.python.langchain.com/en/latest/_modules/langchain/schema/runnable.html
1f9237578396-6
) -> Output: from langchain.callbacks.manager import AsyncCallbackManager # setup callbacks config = config or {} callback_manager = AsyncCallbackManager.configure( inheritable_callbacks=config.get("callbacks"), local_callbacks=None, verbose=False, inheritable_tags=config.get("tags"), local_tags=None, inheritable_metadata=config.get("metadata"), local_metadata=None, ) # start the root run run_manager = await callback_manager.on_chain_start( dumpd(self), input if isinstance(input, dict) else {"input": input} ) first_error = None for runnable in self.runnables: try: output = await runnable.ainvoke( input, _patch_config(config, run_manager.get_child()), ) except self.exceptions_to_handle as e: if first_error is None: first_error = e except BaseException as e: await run_manager.on_chain_error(e) raise e else: await run_manager.on_chain_end( output if isinstance(output, dict) else {"output": output} ) return output if first_error is None: raise ValueError("No error stored at end of fallbacks.") await run_manager.on_chain_error(first_error) raise first_error [docs] def batch( self, inputs: List[Input], config: Optional[Union[RunnableConfig, List[RunnableConfig]]] = None, *, max_concurrency: Optional[int] = None, ) -> List[Output]: from langchain.callbacks.manager import CallbackManager # setup callbacks configs = self._get_config_list(config, len(inputs))
https://api.python.langchain.com/en/latest/_modules/langchain/schema/runnable.html
1f9237578396-7
# setup callbacks configs = self._get_config_list(config, len(inputs)) callback_managers = [ CallbackManager.configure( inheritable_callbacks=config.get("callbacks"), local_callbacks=None, verbose=False, inheritable_tags=config.get("tags"), local_tags=None, inheritable_metadata=config.get("metadata"), local_metadata=None, ) for config in configs ] # start the root runs, one per input run_managers = [ cm.on_chain_start( dumpd(self), input if isinstance(input, dict) else {"input": input} ) for cm, input in zip(callback_managers, inputs) ] first_error = None for runnable in self.runnables: try: outputs = runnable.batch( inputs, [ # each step a child run of the corresponding root run _patch_config(config, rm.get_child()) for rm, config in zip(run_managers, configs) ], max_concurrency=max_concurrency, ) except self.exceptions_to_handle as e: if first_error is None: first_error = e except BaseException as e: for rm in run_managers: rm.on_chain_error(e) raise e else: for rm, output in zip(run_managers, outputs): rm.on_chain_end( output if isinstance(output, dict) else {"output": output} ) return outputs if first_error is None: raise ValueError("No error stored at end of fallbacks.") for rm in run_managers: rm.on_chain_error(first_error) raise first_error
https://api.python.langchain.com/en/latest/_modules/langchain/schema/runnable.html
1f9237578396-8
rm.on_chain_error(first_error) raise first_error [docs] async def abatch( self, inputs: List[Input], config: Optional[Union[RunnableConfig, List[RunnableConfig]]] = None, *, max_concurrency: Optional[int] = None, ) -> List[Output]: from langchain.callbacks.manager import ( AsyncCallbackManager, AsyncCallbackManagerForChainRun, ) # setup callbacks configs = self._get_config_list(config, len(inputs)) callback_managers = [ AsyncCallbackManager.configure( inheritable_callbacks=config.get("callbacks"), local_callbacks=None, verbose=False, inheritable_tags=config.get("tags"), local_tags=None, inheritable_metadata=config.get("metadata"), local_metadata=None, ) for config in configs ] # start the root runs, one per input run_managers: List[AsyncCallbackManagerForChainRun] = await asyncio.gather( *( cm.on_chain_start( dumpd(self), input if isinstance(input, dict) else {"input": input} ) for cm, input in zip(callback_managers, inputs) ) ) first_error = None for runnable in self.runnables: try: outputs = await runnable.abatch( inputs, [ # each step a child run of the corresponding root run _patch_config(config, rm.get_child()) for rm, config in zip(run_managers, configs) ], max_concurrency=max_concurrency, ) except self.exceptions_to_handle as e: if first_error is None: first_error = e
https://api.python.langchain.com/en/latest/_modules/langchain/schema/runnable.html
1f9237578396-9
if first_error is None: first_error = e except BaseException as e: await asyncio.gather(*(rm.on_chain_error(e) for rm in run_managers)) else: await asyncio.gather( *( rm.on_chain_end( output if isinstance(output, dict) else {"output": output} ) for rm, output in zip(run_managers, outputs) ) ) return outputs if first_error is None: raise ValueError("No error stored at end of fallbacks.") await asyncio.gather(*(rm.on_chain_error(first_error) for rm in run_managers)) raise first_error [docs]class RunnableSequence(Serializable, Runnable[Input, Output]): first: Runnable[Input, Any] middle: List[Runnable[Any, Any]] = Field(default_factory=list) last: Runnable[Any, Output] @property def steps(self) -> List[Runnable[Any, Any]]: return [self.first] + self.middle + [self.last] @property def lc_serializable(self) -> bool: return True class Config: arbitrary_types_allowed = True def __or__( self, other: Union[ Runnable[Any, Other], Callable[[Any], Other], Mapping[str, Union[Runnable[Any, Other], Callable[[Any], Other]]], ], ) -> RunnableSequence[Input, Other]: if isinstance(other, RunnableSequence): return RunnableSequence( first=self.first, middle=self.middle + [self.last] + [other.first] + other.middle, last=other.last, ) else: return RunnableSequence(
https://api.python.langchain.com/en/latest/_modules/langchain/schema/runnable.html
1f9237578396-10
last=other.last, ) else: return RunnableSequence( first=self.first, middle=self.middle + [self.last], last=_coerce_to_runnable(other), ) def __ror__( self, other: Union[ Runnable[Other, Any], Callable[[Any], Other], Mapping[str, Union[Runnable[Other, Any], Callable[[Other], Any]]], ], ) -> RunnableSequence[Other, Output]: if isinstance(other, RunnableSequence): return RunnableSequence( first=other.first, middle=other.middle + [other.last] + [self.first] + self.middle, last=self.last, ) else: return RunnableSequence( first=_coerce_to_runnable(other), middle=[self.first] + self.middle, last=self.last, ) [docs] def invoke(self, input: Input, config: Optional[RunnableConfig] = None) -> Output: from langchain.callbacks.manager import CallbackManager # setup callbacks config = config or {} callback_manager = CallbackManager.configure( inheritable_callbacks=config.get("callbacks"), local_callbacks=None, verbose=False, inheritable_tags=config.get("tags"), local_tags=None, inheritable_metadata=config.get("metadata"), local_metadata=None, ) # start the root run run_manager = callback_manager.on_chain_start( dumpd(self), input if isinstance(input, dict) else {"input": input} ) # invoke all steps in sequence try: for step in self.steps: input = step.invoke( input,
https://api.python.langchain.com/en/latest/_modules/langchain/schema/runnable.html
1f9237578396-11
for step in self.steps: input = step.invoke( input, # mark each step as a child run _patch_config(config, run_manager.get_child()), ) # finish the root run except (KeyboardInterrupt, Exception) as e: run_manager.on_chain_error(e) raise else: run_manager.on_chain_end( input if isinstance(input, dict) else {"output": input} ) return cast(Output, input) [docs] async def ainvoke( self, input: Input, config: Optional[RunnableConfig] = None ) -> Output: from langchain.callbacks.manager import AsyncCallbackManager # setup callbacks config = config or {} callback_manager = AsyncCallbackManager.configure( inheritable_callbacks=config.get("callbacks"), local_callbacks=None, verbose=False, inheritable_tags=config.get("tags"), local_tags=None, inheritable_metadata=config.get("metadata"), local_metadata=None, ) # start the root run run_manager = await callback_manager.on_chain_start( dumpd(self), input if isinstance(input, dict) else {"input": input} ) # invoke all steps in sequence try: for step in self.steps: input = await step.ainvoke( input, # mark each step as a child run _patch_config(config, run_manager.get_child()), ) # finish the root run except (KeyboardInterrupt, Exception) as e: await run_manager.on_chain_error(e) raise else: await run_manager.on_chain_end( input if isinstance(input, dict) else {"output": input} )
https://api.python.langchain.com/en/latest/_modules/langchain/schema/runnable.html
1f9237578396-12
input if isinstance(input, dict) else {"output": input} ) return cast(Output, input) [docs] def batch( self, inputs: List[Input], config: Optional[Union[RunnableConfig, List[RunnableConfig]]] = None, *, max_concurrency: Optional[int] = None, ) -> List[Output]: from langchain.callbacks.manager import CallbackManager # setup callbacks configs = self._get_config_list(config, len(inputs)) callback_managers = [ CallbackManager.configure( inheritable_callbacks=config.get("callbacks"), local_callbacks=None, verbose=False, inheritable_tags=config.get("tags"), local_tags=None, inheritable_metadata=config.get("metadata"), local_metadata=None, ) for config in configs ] # start the root runs, one per input run_managers = [ cm.on_chain_start( dumpd(self), input if isinstance(input, dict) else {"input": input} ) for cm, input in zip(callback_managers, inputs) ] # invoke try: for step in self.steps: inputs = step.batch( inputs, [ # each step a child run of the corresponding root run _patch_config(config, rm.get_child()) for rm, config in zip(run_managers, configs) ], max_concurrency=max_concurrency, ) # finish the root runs except (KeyboardInterrupt, Exception) as e: for rm in run_managers: rm.on_chain_error(e) raise else: for rm, input in zip(run_managers, inputs):
https://api.python.langchain.com/en/latest/_modules/langchain/schema/runnable.html
1f9237578396-13
else: for rm, input in zip(run_managers, inputs): rm.on_chain_end(input if isinstance(input, dict) else {"output": input}) return cast(List[Output], inputs) [docs] async def abatch( self, inputs: List[Input], config: Optional[Union[RunnableConfig, List[RunnableConfig]]] = None, *, max_concurrency: Optional[int] = None, ) -> List[Output]: from langchain.callbacks.manager import ( AsyncCallbackManager, AsyncCallbackManagerForChainRun, ) # setup callbacks configs = self._get_config_list(config, len(inputs)) callback_managers = [ AsyncCallbackManager.configure( inheritable_callbacks=config.get("callbacks"), local_callbacks=None, verbose=False, inheritable_tags=config.get("tags"), local_tags=None, inheritable_metadata=config.get("metadata"), local_metadata=None, ) for config in configs ] # start the root runs, one per input run_managers: List[AsyncCallbackManagerForChainRun] = await asyncio.gather( *( cm.on_chain_start( dumpd(self), input if isinstance(input, dict) else {"input": input} ) for cm, input in zip(callback_managers, inputs) ) ) # invoke .batch() on each step # this uses batching optimizations in Runnable subclasses, like LLM try: for step in self.steps: inputs = await step.abatch( inputs, [ # each step a child run of the corresponding root run _patch_config(config, rm.get_child())
https://api.python.langchain.com/en/latest/_modules/langchain/schema/runnable.html
1f9237578396-14
_patch_config(config, rm.get_child()) for rm, config in zip(run_managers, configs) ], max_concurrency=max_concurrency, ) # finish the root runs except (KeyboardInterrupt, Exception) as e: await asyncio.gather(*(rm.on_chain_error(e) for rm in run_managers)) raise else: await asyncio.gather( *( rm.on_chain_end( input if isinstance(input, dict) else {"output": input} ) for rm, input in zip(run_managers, inputs) ) ) return cast(List[Output], inputs) [docs] def stream( self, input: Input, config: Optional[RunnableConfig] = None ) -> Iterator[Output]: from langchain.callbacks.manager import CallbackManager # setup callbacks config = config or {} callback_manager = CallbackManager.configure( inheritable_callbacks=config.get("callbacks"), local_callbacks=None, verbose=False, inheritable_tags=config.get("tags"), local_tags=None, inheritable_metadata=config.get("metadata"), local_metadata=None, ) # start the root run run_manager = callback_manager.on_chain_start( dumpd(self), input if isinstance(input, dict) else {"input": input} ) # invoke the first steps try: for step in [self.first] + self.middle: input = step.invoke( input, # mark each step as a child run _patch_config(config, run_manager.get_child()), ) except (KeyboardInterrupt, Exception) as e: run_manager.on_chain_error(e) raise
https://api.python.langchain.com/en/latest/_modules/langchain/schema/runnable.html
1f9237578396-15
run_manager.on_chain_error(e) raise # stream the last step final: Union[Output, None] = None final_supported = True try: for output in self.last.stream( input, # mark the last step as a child run _patch_config(config, run_manager.get_child()), ): yield output # Accumulate output if possible, otherwise disable accumulation if final_supported: if final is None: final = output else: try: final += output # type: ignore[operator] except TypeError: final = None final_supported = False pass # finish the root run except (KeyboardInterrupt, Exception) as e: run_manager.on_chain_error(e) raise else: run_manager.on_chain_end( final if isinstance(final, dict) else {"output": final} ) [docs] async def astream( self, input: Input, config: Optional[RunnableConfig] = None ) -> AsyncIterator[Output]: from langchain.callbacks.manager import AsyncCallbackManager # setup callbacks config = config or {} callback_manager = AsyncCallbackManager.configure( inheritable_callbacks=config.get("callbacks"), local_callbacks=None, verbose=False, inheritable_tags=config.get("tags"), local_tags=None, inheritable_metadata=config.get("metadata"), local_metadata=None, ) # start the root run run_manager = await callback_manager.on_chain_start( dumpd(self), input if isinstance(input, dict) else {"input": input} ) # invoke the first steps try:
https://api.python.langchain.com/en/latest/_modules/langchain/schema/runnable.html
1f9237578396-16
) # invoke the first steps try: for step in [self.first] + self.middle: input = await step.ainvoke( input, # mark each step as a child run _patch_config(config, run_manager.get_child()), ) except (KeyboardInterrupt, Exception) as e: await run_manager.on_chain_error(e) raise # stream the last step final: Union[Output, None] = None final_supported = True try: async for output in self.last.astream( input, # mark the last step as a child run _patch_config(config, run_manager.get_child()), ): yield output # Accumulate output if possible, otherwise disable accumulation if final_supported: if final is None: final = output else: try: final += output # type: ignore[operator] except TypeError: final = None final_supported = False pass # finish the root run except (KeyboardInterrupt, Exception) as e: await run_manager.on_chain_error(e) raise else: await run_manager.on_chain_end( final if isinstance(final, dict) else {"output": final} ) [docs]class RunnableMap(Serializable, Runnable[Input, Dict[str, Any]]): steps: Mapping[str, Runnable[Input, Any]] def __init__( self, steps: Mapping[ str, Union[ Runnable[Input, Any], Callable[[Input], Any], Mapping[str, Union[Runnable[Input, Any], Callable[[Input], Any]]], ], ],
https://api.python.langchain.com/en/latest/_modules/langchain/schema/runnable.html
1f9237578396-17
], ], ) -> None: super().__init__( steps={key: _coerce_to_runnable(r) for key, r in steps.items()} ) @property def lc_serializable(self) -> bool: return True class Config: arbitrary_types_allowed = True [docs] def invoke( self, input: Input, config: Optional[RunnableConfig] = None ) -> Dict[str, Any]: from langchain.callbacks.manager import CallbackManager # setup callbacks config = config or {} callback_manager = CallbackManager.configure( inheritable_callbacks=config.get("callbacks"), local_callbacks=None, verbose=False, inheritable_tags=config.get("tags"), local_tags=None, inheritable_metadata=config.get("metadata"), local_metadata=None, ) # start the root run run_manager = callback_manager.on_chain_start(dumpd(self), {"input": input}) # gather results from all steps try: # copy to avoid issues from the caller mutating the steps during invoke() steps = dict(self.steps) with ThreadPoolExecutor() as executor: futures = [ executor.submit( step.invoke, input, # mark each step as a child run _patch_config(config, run_manager.get_child()), ) for step in steps.values() ] output = {key: future.result() for key, future in zip(steps, futures)} # finish the root run except (KeyboardInterrupt, Exception) as e: run_manager.on_chain_error(e) raise else: run_manager.on_chain_end(output) return output [docs] async def ainvoke(
https://api.python.langchain.com/en/latest/_modules/langchain/schema/runnable.html
1f9237578396-18
return output [docs] async def ainvoke( self, input: Input, config: Optional[RunnableConfig] = None ) -> Dict[str, Any]: from langchain.callbacks.manager import AsyncCallbackManager # setup callbacks config = config or {} callback_manager = AsyncCallbackManager.configure( inheritable_callbacks=config.get("callbacks"), local_callbacks=None, verbose=False, inheritable_tags=config.get("tags"), local_tags=None, inheritable_metadata=config.get("metadata"), local_metadata=None, ) # start the root run run_manager = await callback_manager.on_chain_start( dumpd(self), {"input": input} ) # gather results from all steps try: # copy to avoid issues from the caller mutating the steps during invoke() steps = dict(self.steps) results = await asyncio.gather( *( step.ainvoke( input, # mark each step as a child run _patch_config(config, run_manager.get_child()), ) for step in steps.values() ) ) output = {key: value for key, value in zip(steps, results)} # finish the root run except (KeyboardInterrupt, Exception) as e: await run_manager.on_chain_error(e) raise else: await run_manager.on_chain_end(output) return output [docs]class RunnableLambda(Runnable[Input, Output]): [docs] def __init__(self, func: Callable[[Input], Output]) -> None: if callable(func): self.func = func else: raise TypeError( "Expected a callable type for `func`."
https://api.python.langchain.com/en/latest/_modules/langchain/schema/runnable.html
1f9237578396-19
raise TypeError( "Expected a callable type for `func`." f"Instead got an unsupported type: {type(func)}" ) def __eq__(self, other: Any) -> bool: if isinstance(other, RunnableLambda): return self.func == other.func else: return False [docs] def invoke(self, input: Input, config: Optional[RunnableConfig] = None) -> Output: return self._call_with_config(self.func, input, config) [docs]class RunnablePassthrough(Serializable, Runnable[Input, Input]): @property def lc_serializable(self) -> bool: return True [docs] def invoke(self, input: Input, config: Optional[RunnableConfig] = None) -> Input: return self._call_with_config(lambda x: x, input, config) [docs]class RunnableBinding(Serializable, Runnable[Input, Output]): bound: Runnable[Input, Output] kwargs: Mapping[str, Any] class Config: arbitrary_types_allowed = True @property def lc_serializable(self) -> bool: return True [docs] def bind(self, **kwargs: Any) -> Runnable[Input, Output]: return self.__class__(bound=self.bound, kwargs={**self.kwargs, **kwargs}) [docs] def invoke(self, input: Input, config: Optional[RunnableConfig] = None) -> Output: return self.bound.invoke(input, config, **self.kwargs) [docs] async def ainvoke( self, input: Input, config: Optional[RunnableConfig] = None ) -> Output: return await self.bound.ainvoke(input, config, **self.kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/schema/runnable.html
1f9237578396-20
return await self.bound.ainvoke(input, config, **self.kwargs) [docs] def batch( self, inputs: List[Input], config: Optional[Union[RunnableConfig, List[RunnableConfig]]] = None, *, max_concurrency: Optional[int] = None, ) -> List[Output]: return self.bound.batch( inputs, config, max_concurrency=max_concurrency, **self.kwargs ) [docs] async def abatch( self, inputs: List[Input], config: Optional[Union[RunnableConfig, List[RunnableConfig]]] = None, *, max_concurrency: Optional[int] = None, ) -> List[Output]: return await self.bound.abatch( inputs, config, max_concurrency=max_concurrency, **self.kwargs ) [docs] def stream( self, input: Input, config: Optional[RunnableConfig] = None ) -> Iterator[Output]: yield from self.bound.stream(input, config, **self.kwargs) [docs] async def astream( self, input: Input, config: Optional[RunnableConfig] = None ) -> AsyncIterator[Output]: async for item in self.bound.astream(input, config, **self.kwargs): yield item [docs]class RouterInput(TypedDict): key: str input: Any [docs]class RouterRunnable( Serializable, Generic[Input, Output], Runnable[RouterInput, Output] ): runnables: Mapping[str, Runnable[Input, Output]] def __init__(self, runnables: Mapping[str, Runnable[Input, Output]]) -> None:
https://api.python.langchain.com/en/latest/_modules/langchain/schema/runnable.html
1f9237578396-21
super().__init__(runnables=runnables) class Config: arbitrary_types_allowed = True @property def lc_serializable(self) -> bool: return True def __or__( self, other: Union[ Runnable[Any, Other], Callable[[Any], Other], Mapping[str, Union[Runnable[Any, Other], Callable[[Any], Other]]], Mapping[str, Any], ], ) -> RunnableSequence[RouterInput, Other]: return RunnableSequence(first=self, last=_coerce_to_runnable(other)) def __ror__( self, other: Union[ Runnable[Other, Any], Callable[[Any], Other], Mapping[str, Union[Runnable[Other, Any], Callable[[Other], Any]]], Mapping[str, Any], ], ) -> RunnableSequence[Other, Output]: return RunnableSequence(first=_coerce_to_runnable(other), last=self) [docs] def invoke( self, input: RouterInput, config: Optional[RunnableConfig] = None ) -> Output: key = input["key"] actual_input = input["input"] if key not in self.runnables: raise ValueError(f"No runnable associated with key '{key}'") runnable = self.runnables[key] return runnable.invoke(actual_input, config) [docs] async def ainvoke( self, input: RouterInput, config: Optional[RunnableConfig] = None ) -> Output: key = input["key"] actual_input = input["input"] if key not in self.runnables: raise ValueError(f"No runnable associated with key '{key}'")
https://api.python.langchain.com/en/latest/_modules/langchain/schema/runnable.html
1f9237578396-22
raise ValueError(f"No runnable associated with key '{key}'") runnable = self.runnables[key] return await runnable.ainvoke(actual_input, config) [docs] def batch( self, inputs: List[RouterInput], config: Optional[Union[RunnableConfig, List[RunnableConfig]]] = None, *, max_concurrency: Optional[int] = None, ) -> List[Output]: keys = [input["key"] for input in inputs] actual_inputs = [input["input"] for input in inputs] if any(key not in self.runnables for key in keys): raise ValueError("One or more keys do not have a corresponding runnable") runnables = [self.runnables[key] for key in keys] configs = self._get_config_list(config, len(inputs)) with ThreadPoolExecutor(max_workers=max_concurrency) as executor: return list( executor.map( lambda runnable, input, config: runnable.invoke(input, config), runnables, actual_inputs, configs, ) ) [docs] async def abatch( self, inputs: List[RouterInput], config: Optional[Union[RunnableConfig, List[RunnableConfig]]] = None, *, max_concurrency: Optional[int] = None, ) -> List[Output]: keys = [input["key"] for input in inputs] actual_inputs = [input["input"] for input in inputs] if any(key not in self.runnables for key in keys): raise ValueError("One or more keys do not have a corresponding runnable") runnables = [self.runnables[key] for key in keys]
https://api.python.langchain.com/en/latest/_modules/langchain/schema/runnable.html
1f9237578396-23
runnables = [self.runnables[key] for key in keys] configs = self._get_config_list(config, len(inputs)) return await _gather_with_concurrency( max_concurrency, *( runnable.ainvoke(input, config) for runnable, input, config in zip(runnables, actual_inputs, configs) ), ) [docs] def stream( self, input: RouterInput, config: Optional[RunnableConfig] = None ) -> Iterator[Output]: key = input["key"] actual_input = input["input"] if key not in self.runnables: raise ValueError(f"No runnable associated with key '{key}'") runnable = self.runnables[key] yield from runnable.stream(actual_input, config) [docs] async def astream( self, input: RouterInput, config: Optional[RunnableConfig] = None ) -> AsyncIterator[Output]: key = input["key"] actual_input = input["input"] if key not in self.runnables: raise ValueError(f"No runnable associated with key '{key}'") runnable = self.runnables[key] async for output in runnable.astream(actual_input, config): yield output def _patch_config( config: RunnableConfig, callback_manager: BaseCallbackManager ) -> RunnableConfig: config = config.copy() config["callbacks"] = callback_manager return config def _coerce_to_runnable( thing: Union[ Runnable[Input, Output], Callable[[Input], Output], Mapping[str, Union[Runnable[Input, Output], Callable[[Input], Output]]], ] ) -> Runnable[Input, Output]:
https://api.python.langchain.com/en/latest/_modules/langchain/schema/runnable.html
1f9237578396-24
] ) -> Runnable[Input, Output]: if isinstance(thing, Runnable): return thing elif callable(thing): return RunnableLambda(thing) elif isinstance(thing, dict): runnables = {key: _coerce_to_runnable(r) for key, r in thing.items()} return cast(Runnable[Input, Output], RunnableMap(steps=runnables)) else: raise TypeError( f"Expected a Runnable, callable or dict." f"Instead got an unsupported type: {type(thing)}" )
https://api.python.langchain.com/en/latest/_modules/langchain/schema/runnable.html
5e557644de8a-0
Source code for langchain.schema.prompt_template from __future__ import annotations import json from abc import ABC, abstractmethod from pathlib import Path from typing import Any, Callable, Dict, List, Mapping, Optional, Union import yaml from pydantic import Field, root_validator from langchain.load.serializable import Serializable from langchain.schema.document import Document from langchain.schema.output_parser import BaseOutputParser from langchain.schema.prompt import PromptValue from langchain.schema.runnable import Runnable, RunnableConfig [docs]class BasePromptTemplate(Serializable, Runnable[Dict, PromptValue], ABC): """Base class for all prompt templates, returning a prompt.""" input_variables: List[str] """A list of the names of the variables the prompt template expects.""" output_parser: Optional[BaseOutputParser] = None """How to parse the output of calling an LLM on this formatted prompt.""" partial_variables: Mapping[str, Union[str, Callable[[], str]]] = Field( default_factory=dict ) @property def lc_serializable(self) -> bool: return True class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True [docs] def invoke(self, input: Dict, config: RunnableConfig | None = None) -> PromptValue: return self._call_with_config( lambda inner_input: self.format_prompt(**inner_input), input, config, run_type="prompt", ) [docs] @abstractmethod def format_prompt(self, **kwargs: Any) -> PromptValue: """Create Chat Messages.""" @root_validator() def validate_variable_names(cls, values: Dict) -> Dict: """Validate variable names do not include restricted names."""
https://api.python.langchain.com/en/latest/_modules/langchain/schema/prompt_template.html
5e557644de8a-1
"""Validate variable names do not include restricted names.""" if "stop" in values["input_variables"]: raise ValueError( "Cannot have an input variable named 'stop', as it is used internally," " please rename." ) if "stop" in values["partial_variables"]: raise ValueError( "Cannot have an partial variable named 'stop', as it is used " "internally, please rename." ) overall = set(values["input_variables"]).intersection( values["partial_variables"] ) if overall: raise ValueError( f"Found overlapping input and partial variables: {overall}" ) return values [docs] def partial(self, **kwargs: Union[str, Callable[[], str]]) -> BasePromptTemplate: """Return a partial of the prompt template.""" prompt_dict = self.__dict__.copy() prompt_dict["input_variables"] = list( set(self.input_variables).difference(kwargs) ) prompt_dict["partial_variables"] = {**self.partial_variables, **kwargs} return type(self)(**prompt_dict) def _merge_partial_and_user_variables(self, **kwargs: Any) -> Dict[str, Any]: # Get partial params: partial_kwargs = { k: v if isinstance(v, str) else v() for k, v in self.partial_variables.items() } return {**partial_kwargs, **kwargs} [docs] @abstractmethod def format(self, **kwargs: Any) -> str: """Format the prompt with the inputs. Args: kwargs: Any arguments to be passed to the prompt template. Returns: A formatted string. Example: .. code-block:: python
https://api.python.langchain.com/en/latest/_modules/langchain/schema/prompt_template.html
5e557644de8a-2
A formatted string. Example: .. code-block:: python prompt.format(variable1="foo") """ @property def _prompt_type(self) -> str: """Return the prompt type key.""" raise NotImplementedError [docs] def dict(self, **kwargs: Any) -> Dict: """Return dictionary representation of prompt.""" prompt_dict = super().dict(**kwargs) prompt_dict["_type"] = self._prompt_type return prompt_dict [docs] def save(self, file_path: Union[Path, str]) -> None: """Save the prompt. Args: file_path: Path to directory to save prompt to. Example: .. code-block:: python prompt.save(file_path="path/prompt.yaml") """ if self.partial_variables: raise ValueError("Cannot save prompt with partial variables.") # Convert file to Path object. if isinstance(file_path, str): save_path = Path(file_path) else: save_path = file_path directory_path = save_path.parent directory_path.mkdir(parents=True, exist_ok=True) # Fetch dictionary to save prompt_dict = self.dict() if save_path.suffix == ".json": with open(file_path, "w") as f: json.dump(prompt_dict, f, indent=4) elif save_path.suffix == ".yaml": with open(file_path, "w") as f: yaml.dump(prompt_dict, f, default_flow_style=False) else: raise ValueError(f"{save_path} must be json or yaml") [docs]def format_document(doc: Document, prompt: BasePromptTemplate) -> str:
https://api.python.langchain.com/en/latest/_modules/langchain/schema/prompt_template.html
5e557644de8a-3
[docs]def format_document(doc: Document, prompt: BasePromptTemplate) -> str: """Format a document into a string based on a prompt template. First, this pulls information from the document from two sources: 1. `page_content`: This takes the information from the `document.page_content` and assigns it to a variable named `page_content`. 2. metadata: This takes information from `document.metadata` and assigns it to variables of the same name. Those variables are then passed into the `prompt` to produce a formatted string. Args: doc: Document, the page_content and metadata will be used to create the final string. prompt: BasePromptTemplate, will be used to format the page_content and metadata into the final string. Returns: string of the document formatted. Example: .. code-block:: python from langchain.schema import Document from langchain.prompts import PromptTemplate doc = Document(page_content="This is a joke", metadata={"page": "1"}) prompt = PromptTemplate.from_template("Page {page}: {page_content}") format_document(doc, prompt) >>> "Page 1: This is a joke" """ base_info = {"page_content": doc.page_content, **doc.metadata} missing_metadata = set(prompt.input_variables).difference(base_info) if len(missing_metadata) > 0: required_metadata = [ iv for iv in prompt.input_variables if iv != "page_content" ] raise ValueError( f"Document prompt requires documents to have metadata variables: " f"{required_metadata}. Received document with missing metadata: " f"{list(missing_metadata)}." )
https://api.python.langchain.com/en/latest/_modules/langchain/schema/prompt_template.html
5e557644de8a-4
f"{list(missing_metadata)}." ) document_info = {k: base_info[k] for k in prompt.input_variables} return prompt.format(**document_info)
https://api.python.langchain.com/en/latest/_modules/langchain/schema/prompt_template.html
1a62fd275027-0
Source code for langchain.schema.output from __future__ import annotations from copy import deepcopy from typing import Any, Dict, List, Optional from uuid import UUID from pydantic import BaseModel, root_validator from langchain.load.serializable import Serializable from langchain.schema.messages import BaseMessage, BaseMessageChunk [docs]class Generation(Serializable): """A single text generation output.""" text: str """Generated text output.""" generation_info: Optional[Dict[str, Any]] = None """Raw response from the provider. May include things like the reason for finishing or token log probabilities. """ # TODO: add log probs as separate attribute @property def lc_serializable(self) -> bool: """Whether this class is LangChain serializable.""" return True [docs]class GenerationChunk(Generation): def __add__(self, other: GenerationChunk) -> GenerationChunk: if isinstance(other, GenerationChunk): generation_info = ( {**(self.generation_info or {}), **(other.generation_info or {})} if self.generation_info is not None or other.generation_info is not None else None ) return GenerationChunk( text=self.text + other.text, generation_info=generation_info, ) else: raise TypeError( f"unsupported operand type(s) for +: '{type(self)}' and '{type(other)}'" ) [docs]class ChatGeneration(Generation): """A single chat generation output.""" text: str = "" """*SHOULD NOT BE SET DIRECTLY* The text contents of the output message.""" message: BaseMessage """The message output by the chat model.""" @root_validator
https://api.python.langchain.com/en/latest/_modules/langchain/schema/output.html
1a62fd275027-1
"""The message output by the chat model.""" @root_validator def set_text(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Set the text attribute to be the contents of the message.""" values["text"] = values["message"].content return values [docs]class ChatGenerationChunk(ChatGeneration): message: BaseMessageChunk def __add__(self, other: ChatGenerationChunk) -> ChatGenerationChunk: if isinstance(other, ChatGenerationChunk): generation_info = ( {**(self.generation_info or {}), **(other.generation_info or {})} if self.generation_info is not None or other.generation_info is not None else None ) return ChatGenerationChunk( message=self.message + other.message, generation_info=generation_info, ) else: raise TypeError( f"unsupported operand type(s) for +: '{type(self)}' and '{type(other)}'" ) [docs]class RunInfo(BaseModel): """Class that contains metadata for a single execution of a Chain or model.""" run_id: UUID """A unique identifier for the model or chain run.""" [docs]class ChatResult(BaseModel): """Class that contains all results for a single chat model call.""" generations: List[ChatGeneration] """List of the chat generations. This is a List because an input can have multiple candidate generations. """ llm_output: Optional[dict] = None """For arbitrary LLM provider specific output.""" [docs]class LLMResult(BaseModel): """Class that contains all results for a batched LLM call.""" generations: List[List[Generation]]
https://api.python.langchain.com/en/latest/_modules/langchain/schema/output.html
1a62fd275027-2
generations: List[List[Generation]] """List of generated outputs. This is a List[List[]] because each input could have multiple candidate generations.""" llm_output: Optional[dict] = None """Arbitrary LLM provider-specific output.""" run: Optional[List[RunInfo]] = None """List of metadata info for model call for each input.""" [docs] def flatten(self) -> List[LLMResult]: """Flatten generations into a single list. Unpack List[List[Generation]] -> List[LLMResult] where each returned LLMResult contains only a single Generation. If token usage information is available, it is kept only for the LLMResult corresponding to the top-choice Generation, to avoid over-counting of token usage downstream. Returns: List of LLMResults where each returned LLMResult contains a single Generation. """ llm_results = [] for i, gen_list in enumerate(self.generations): # Avoid double counting tokens in OpenAICallback if i == 0: llm_results.append( LLMResult( generations=[gen_list], llm_output=self.llm_output, ) ) else: if self.llm_output is not None: llm_output = deepcopy(self.llm_output) llm_output["token_usage"] = dict() else: llm_output = None llm_results.append( LLMResult( generations=[gen_list], llm_output=llm_output, ) ) return llm_results def __eq__(self, other: object) -> bool:
https://api.python.langchain.com/en/latest/_modules/langchain/schema/output.html
1a62fd275027-3
def __eq__(self, other: object) -> bool: """Check for LLMResult equality by ignoring any metadata related to runs.""" if not isinstance(other, LLMResult): return NotImplemented return ( self.generations == other.generations and self.llm_output == other.llm_output )
https://api.python.langchain.com/en/latest/_modules/langchain/schema/output.html
2fa3a88f651b-0
Source code for langchain.schema.language_model from __future__ import annotations from abc import ABC, abstractmethod from typing import ( TYPE_CHECKING, Any, List, Optional, Sequence, Set, TypeVar, Union, ) from langchain.load.serializable import Serializable from langchain.schema.messages import BaseMessage, get_buffer_string from langchain.schema.output import LLMResult from langchain.schema.prompt import PromptValue from langchain.schema.runnable import Runnable from langchain.utils import get_pydantic_field_names if TYPE_CHECKING: from langchain.callbacks.manager import Callbacks def _get_token_ids_default_method(text: str) -> List[int]: """Encode the text into token IDs.""" # TODO: this method may not be exact. # TODO: this method may differ based on model (eg codex). try: from transformers import GPT2TokenizerFast except ImportError: raise ImportError( "Could not import transformers python package. " "This is needed in order to calculate get_token_ids. " "Please install it with `pip install transformers`." ) # create a GPT-2 tokenizer instance tokenizer = GPT2TokenizerFast.from_pretrained("gpt2") # tokenize the text using the GPT-2 tokenizer return tokenizer.encode(text) LanguageModelInput = Union[PromptValue, str, List[BaseMessage]] LanguageModelOutput = TypeVar("LanguageModelOutput") [docs]class BaseLanguageModel( Serializable, Runnable[LanguageModelInput, LanguageModelOutput], ABC ): """Abstract base class for interfacing with language models. All language model wrappers inherit from BaseLanguageModel. Exposes three main methods:
https://api.python.langchain.com/en/latest/_modules/langchain/schema/language_model.html
2fa3a88f651b-1
All language model wrappers inherit from BaseLanguageModel. Exposes three main methods: - generate_prompt: generate language model outputs for a sequence of prompt values. A prompt value is a model input that can be converted to any language model input format (string or messages). - predict: pass in a single string to a language model and return a string prediction. - predict_messages: pass in a sequence of BaseMessages (corresponding to a single model call) to a language model and return a BaseMessage prediction. Each of these has an equivalent asynchronous method. """ [docs] @abstractmethod def generate_prompt( self, prompts: List[PromptValue], stop: Optional[List[str]] = None, callbacks: Callbacks = None, **kwargs: Any, ) -> LLMResult: """Pass a sequence of prompts to the model and return model generations. This method should make use of batched calls for models that expose a batched API. Use this method when you want to: 1. take advantage of batched calls, 2. need more output from the model than just the top generated value, 3. are building chains that are agnostic to the underlying language model type (e.g., pure text completion models vs chat models). Args: prompts: List of PromptValues. A PromptValue is an object that can be converted to match the format of any language model (string for pure text generation models and BaseMessages for chat models). stop: Stop words to use when generating. Model output is cut off at the first occurrence of any of these substrings. callbacks: Callbacks to pass through. Used for executing additional
https://api.python.langchain.com/en/latest/_modules/langchain/schema/language_model.html
2fa3a88f651b-2
callbacks: Callbacks to pass through. Used for executing additional functionality, such as logging or streaming, throughout generation. **kwargs: Arbitrary additional keyword arguments. These are usually passed to the model provider API call. Returns: An LLMResult, which contains a list of candidate Generations for each input prompt and additional model provider-specific output. """ [docs] @abstractmethod async def agenerate_prompt( self, prompts: List[PromptValue], stop: Optional[List[str]] = None, callbacks: Callbacks = None, **kwargs: Any, ) -> LLMResult: """Asynchronously pass a sequence of prompts and return model generations. This method should make use of batched calls for models that expose a batched API. Use this method when you want to: 1. take advantage of batched calls, 2. need more output from the model than just the top generated value, 3. are building chains that are agnostic to the underlying language model type (e.g., pure text completion models vs chat models). Args: prompts: List of PromptValues. A PromptValue is an object that can be converted to match the format of any language model (string for pure text generation models and BaseMessages for chat models). stop: Stop words to use when generating. Model output is cut off at the first occurrence of any of these substrings. callbacks: Callbacks to pass through. Used for executing additional functionality, such as logging or streaming, throughout generation. **kwargs: Arbitrary additional keyword arguments. These are usually passed to the model provider API call. Returns:
https://api.python.langchain.com/en/latest/_modules/langchain/schema/language_model.html
2fa3a88f651b-3
to the model provider API call. Returns: An LLMResult, which contains a list of candidate Generations for each input prompt and additional model provider-specific output. """ [docs] @abstractmethod def predict( self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any ) -> str: """Pass a single string input to the model and return a string prediction. Use this method when passing in raw text. If you want to pass in specific types of chat messages, use predict_messages. Args: text: String input to pass to the model. stop: Stop words to use when generating. Model output is cut off at the first occurrence of any of these substrings. **kwargs: Arbitrary additional keyword arguments. These are usually passed to the model provider API call. Returns: Top model prediction as a string. """ [docs] @abstractmethod def predict_messages( self, messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None, **kwargs: Any, ) -> BaseMessage: """Pass a message sequence to the model and return a message prediction. Use this method when passing in chat messages. If you want to pass in raw text, use predict. Args: messages: A sequence of chat messages corresponding to a single model input. stop: Stop words to use when generating. Model output is cut off at the first occurrence of any of these substrings. **kwargs: Arbitrary additional keyword arguments. These are usually passed to the model provider API call. Returns: Top model prediction as a message. """
https://api.python.langchain.com/en/latest/_modules/langchain/schema/language_model.html
2fa3a88f651b-4
Returns: Top model prediction as a message. """ [docs] @abstractmethod async def apredict( self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any ) -> str: """Asynchronously pass a string to the model and return a string prediction. Use this method when calling pure text generation models and only the top candidate generation is needed. Args: text: String input to pass to the model. stop: Stop words to use when generating. Model output is cut off at the first occurrence of any of these substrings. **kwargs: Arbitrary additional keyword arguments. These are usually passed to the model provider API call. Returns: Top model prediction as a string. """ [docs] @abstractmethod async def apredict_messages( self, messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None, **kwargs: Any, ) -> BaseMessage: """Asynchronously pass messages to the model and return a message prediction. Use this method when calling chat models and only the top candidate generation is needed. Args: messages: A sequence of chat messages corresponding to a single model input. stop: Stop words to use when generating. Model output is cut off at the first occurrence of any of these substrings. **kwargs: Arbitrary additional keyword arguments. These are usually passed to the model provider API call. Returns: Top model prediction as a message. """ [docs] def get_token_ids(self, text: str) -> List[int]: """Return the ordered ids of the tokens in a text. Args:
https://api.python.langchain.com/en/latest/_modules/langchain/schema/language_model.html
2fa3a88f651b-5
"""Return the ordered ids of the tokens in a text. Args: text: The string input to tokenize. Returns: A list of ids corresponding to the tokens in the text, in order they occur in the text. """ return _get_token_ids_default_method(text) [docs] def get_num_tokens(self, text: str) -> int: """Get the number of tokens present in the text. Useful for checking if an input will fit in a model's context window. Args: text: The string input to tokenize. Returns: The integer number of tokens in the text. """ return len(self.get_token_ids(text)) [docs] def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int: """Get the number of tokens in the messages. Useful for checking if an input will fit in a model's context window. Args: messages: The message inputs to tokenize. Returns: The sum of the number of tokens across the messages. """ return sum([self.get_num_tokens(get_buffer_string([m])) for m in messages]) @classmethod def _all_required_field_names(cls) -> Set: """DEPRECATED: Kept for backwards compatibility. Use get_pydantic_field_names. """ return get_pydantic_field_names(cls)
https://api.python.langchain.com/en/latest/_modules/langchain/schema/language_model.html
5ea79ef84250-0
Source code for langchain.schema.memory from __future__ import annotations from abc import ABC, abstractmethod from typing import Any, Dict, List from langchain.load.serializable import Serializable from langchain.schema.messages import AIMessage, BaseMessage, HumanMessage [docs]class BaseMemory(Serializable, ABC): """Abstract base class for memory in Chains. Memory refers to state in Chains. Memory can be used to store information about past executions of a Chain and inject that information into the inputs of future executions of the Chain. For example, for conversational Chains Memory can be used to store conversations and automatically add them to future model prompts so that the model has the necessary context to respond coherently to the latest input. Example: .. code-block:: python class SimpleMemory(BaseMemory): memories: Dict[str, Any] = dict() @property def memory_variables(self) -> List[str]: return list(self.memories.keys()) def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]: return self.memories def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: pass def clear(self) -> None: pass """ # noqa: E501 class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True @property @abstractmethod def memory_variables(self) -> List[str]: """The string keys this memory class will add to chain inputs.""" [docs] @abstractmethod def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]: """Return key-value pairs given the text input to the chain."""
https://api.python.langchain.com/en/latest/_modules/langchain/schema/memory.html
5ea79ef84250-1
"""Return key-value pairs given the text input to the chain.""" [docs] @abstractmethod def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """Save the context of this chain run to memory.""" [docs] @abstractmethod def clear(self) -> None: """Clear memory contents.""" [docs]class BaseChatMessageHistory(ABC): """Abstract base class for storing chat message history. See `ChatMessageHistory` for default implementation. Example: .. code-block:: python class FileChatMessageHistory(BaseChatMessageHistory): storage_path: str session_id: str @property def messages(self): with open(os.path.join(storage_path, session_id), 'r:utf-8') as f: messages = json.loads(f.read()) return messages_from_dict(messages) def add_message(self, message: BaseMessage) -> None: messages = self.messages.append(_message_to_dict(message)) with open(os.path.join(storage_path, session_id), 'w') as f: json.dump(f, messages) def clear(self): with open(os.path.join(storage_path, session_id), 'w') as f: f.write("[]") """ messages: List[BaseMessage] """A list of Messages stored in-memory.""" [docs] def add_user_message(self, message: str) -> None: """Convenience method for adding a human message string to the store. Args: message: The string contents of a human message. """ self.add_message(HumanMessage(content=message)) [docs] def add_ai_message(self, message: str) -> None:
https://api.python.langchain.com/en/latest/_modules/langchain/schema/memory.html
5ea79ef84250-2
[docs] def add_ai_message(self, message: str) -> None: """Convenience method for adding an AI message string to the store. Args: message: The string contents of an AI message. """ self.add_message(AIMessage(content=message)) [docs] @abstractmethod def add_message(self, message: BaseMessage) -> None: """Add a Message object to the store. Args: message: A BaseMessage object to store. """ raise NotImplementedError() [docs] @abstractmethod def clear(self) -> None: """Remove all messages from the store"""
https://api.python.langchain.com/en/latest/_modules/langchain/schema/memory.html
074b081ea0c0-0
Source code for langchain.document_transformers.nuclia_text_transform import asyncio import json import uuid from typing import Any, Sequence from langchain.schema.document import BaseDocumentTransformer, Document from langchain.tools.nuclia.tool import NucliaUnderstandingAPI [docs]class NucliaTextTransformer(BaseDocumentTransformer): """ The Nuclia Understanding API splits into paragraphs and sentences, identifies entities, provides a summary of the text and generates embeddings for all the sentences. """ [docs] def __init__(self, nua: NucliaUnderstandingAPI): self.nua = nua [docs] def transform_documents( self, documents: Sequence[Document], **kwargs: Any ) -> Sequence[Document]: raise NotImplementedError [docs] async def atransform_documents( self, documents: Sequence[Document], **kwargs: Any ) -> Sequence[Document]: tasks = [ self.nua.arun( { "action": "push", "id": str(uuid.uuid4()), "text": doc.page_content, "path": None, } ) for doc in documents ] results = await asyncio.gather(*tasks) for doc, result in zip(documents, results): obj = json.loads(result) metadata = { "file": obj["file_extracted_data"][0], "metadata": obj["field_metadata"][0], } doc.metadata["nuclia"] = metadata return documents
https://api.python.langchain.com/en/latest/_modules/langchain/document_transformers/nuclia_text_transform.html