id
stringlengths
14
15
text
stringlengths
35
2.51k
source
stringlengths
61
154
dfe36cf2d9a1-2
"""Wrapper around sentence_transformers embedding models. To use, you should have the ``sentence_transformers`` and ``InstructorEmbedding`` python packages installed. Example: .. code-block:: python from langchain.embeddings import HuggingFaceInstructEmbeddings model_name = "hkunlp/instructor-large" model_kwargs = {'device': 'cpu'} encode_kwargs = {'normalize_embeddings': True} hf = HuggingFaceInstructEmbeddings( model_name=model_name, model_kwargs=model_kwargs, encode_kwargs=encode_kwargs ) """ client: Any #: :meta private: model_name: str = DEFAULT_INSTRUCT_MODEL """Model name to use.""" cache_folder: Optional[str] = None """Path to store models. Can be also set by SENTENCE_TRANSFORMERS_HOME environment variable.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Key word arguments to pass to the model.""" encode_kwargs: Dict[str, Any] = Field(default_factory=dict) """Key word arguments to pass when calling the `encode` method of the model.""" embed_instruction: str = DEFAULT_EMBED_INSTRUCTION """Instruction to use for embedding documents.""" query_instruction: str = DEFAULT_QUERY_INSTRUCTION """Instruction to use for embedding query.""" def __init__(self, **kwargs: Any): """Initialize the sentence_transformer.""" super().__init__(**kwargs) try: from InstructorEmbedding import INSTRUCTOR self.client = INSTRUCTOR( self.model_name, cache_folder=self.cache_folder, **self.model_kwargs ) except ImportError as e:
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/huggingface.html
dfe36cf2d9a1-3
) except ImportError as e: raise ValueError("Dependencies for InstructorEmbedding not found.") from e [docs] class Config: """Configuration for this pydantic object.""" extra = Extra.forbid [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Compute doc embeddings using a HuggingFace instruct model. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ instruction_pairs = [[self.embed_instruction, text] for text in texts] embeddings = self.client.encode(instruction_pairs, **self.encode_kwargs) return embeddings.tolist() [docs] def embed_query(self, text: str) -> List[float]: """Compute query embeddings using a HuggingFace instruct model. Args: text: The text to embed. Returns: Embeddings for the text. """ instruction_pair = [self.query_instruction, text] embedding = self.client.encode([instruction_pair], **self.encode_kwargs)[0] return embedding.tolist()
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/huggingface.html
029089106e37-0
Source code for langchain.embeddings.sagemaker_endpoint """Wrapper around Sagemaker InvokeEndpoint API.""" from typing import Any, Dict, List, Optional from pydantic import BaseModel, Extra, root_validator from langchain.embeddings.base import Embeddings from langchain.llms.sagemaker_endpoint import ContentHandlerBase [docs]class EmbeddingsContentHandler(ContentHandlerBase[List[str], List[List[float]]]): """Content handler for LLM class.""" [docs]class SagemakerEndpointEmbeddings(BaseModel, Embeddings): """Wrapper around custom Sagemaker Inference Endpoints. To use, you must supply the endpoint name from your deployed Sagemaker model & the region where it is deployed. To authenticate, the AWS client uses the following methods to automatically load credentials: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html If a specific credential profile should be used, you must pass the name of the profile from the ~/.aws/credentials file that is to be used. Make sure the credentials / roles used have the required policies to access the Sagemaker endpoint. See: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html """ """ Example: .. code-block:: python from langchain.embeddings import SagemakerEndpointEmbeddings endpoint_name = ( "my-endpoint-name" ) region_name = ( "us-west-2" ) credentials_profile_name = ( "default" ) se = SagemakerEndpointEmbeddings( endpoint_name=endpoint_name, region_name=region_name, credentials_profile_name=credentials_profile_name ) """
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/sagemaker_endpoint.html
029089106e37-1
credentials_profile_name=credentials_profile_name ) """ client: Any #: :meta private: endpoint_name: str = "" """The name of the endpoint from the deployed Sagemaker model. Must be unique within an AWS Region.""" region_name: str = "" """The aws region where the Sagemaker model is deployed, eg. `us-west-2`.""" credentials_profile_name: Optional[str] = None """The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which has either access keys or role information specified. If not specified, the default credential profile or, if on an EC2 instance, credentials from IMDS will be used. See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html """ content_handler: EmbeddingsContentHandler """The content handler class that provides an input and output transform functions to handle formats between LLM and the endpoint. """ """ Example: .. code-block:: python from langchain.embeddings.sagemaker_endpoint import EmbeddingsContentHandler class ContentHandler(EmbeddingsContentHandler): content_type = "application/json" accepts = "application/json" def transform_input(self, prompts: List[str], model_kwargs: Dict) -> bytes: input_str = json.dumps({prompts: prompts, **model_kwargs}) return input_str.encode('utf-8') def transform_output(self, output: bytes) -> List[List[float]]: response_json = json.loads(output.read().decode("utf-8")) return response_json["vectors"] """ # noqa: E501 model_kwargs: Optional[Dict] = None
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/sagemaker_endpoint.html
029089106e37-2
""" # noqa: E501 model_kwargs: Optional[Dict] = None """Key word arguments to pass to the model.""" endpoint_kwargs: Optional[Dict] = None """Optional attributes passed to the invoke_endpoint function. See `boto3`_. docs for more info. .. _boto3: <https://boto3.amazonaws.com/v1/documentation/api/latest/index.html> """ [docs] class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True [docs] @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that AWS credentials to and python package exists in environment.""" try: import boto3 try: if values["credentials_profile_name"] is not None: session = boto3.Session( profile_name=values["credentials_profile_name"] ) else: # use default credentials session = boto3.Session() values["client"] = session.client( "sagemaker-runtime", region_name=values["region_name"] ) except Exception as e: raise ValueError( "Could not load credentials to authenticate with AWS client. " "Please check that credentials in the specified " "profile name are valid." ) from e except ImportError: raise ValueError( "Could not import boto3 python package. " "Please install it with `pip install boto3`." ) return values def _embedding_func(self, texts: List[str]) -> List[List[float]]: """Call out to SageMaker Inference embedding endpoint.""" # replace newlines, which can negatively affect performance.
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/sagemaker_endpoint.html
029089106e37-3
# replace newlines, which can negatively affect performance. texts = list(map(lambda x: x.replace("\n", " "), texts)) _model_kwargs = self.model_kwargs or {} _endpoint_kwargs = self.endpoint_kwargs or {} body = self.content_handler.transform_input(texts, _model_kwargs) content_type = self.content_handler.content_type accepts = self.content_handler.accepts # send request try: response = self.client.invoke_endpoint( EndpointName=self.endpoint_name, Body=body, ContentType=content_type, Accept=accepts, **_endpoint_kwargs, ) except Exception as e: raise ValueError(f"Error raised by inference endpoint: {e}") return self.content_handler.transform_output(response["Body"]) [docs] def embed_documents( self, texts: List[str], chunk_size: int = 64 ) -> List[List[float]]: """Compute doc embeddings using a SageMaker Inference Endpoint. Args: texts: The list of texts to embed. chunk_size: The chunk size defines how many input texts will be grouped together as request. If None, will use the chunk size specified by the class. Returns: List of embeddings, one for each text. """ results = [] _chunk_size = len(texts) if chunk_size > len(texts) else chunk_size for i in range(0, len(texts), _chunk_size): response = self._embedding_func(texts[i : i + _chunk_size]) results.extend(response) return results [docs] def embed_query(self, text: str) -> List[float]: """Compute query embeddings using a SageMaker inference endpoint.
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/sagemaker_endpoint.html
029089106e37-4
"""Compute query embeddings using a SageMaker inference endpoint. Args: text: The text to embed. Returns: Embeddings for the text. """ return self._embedding_func([text])[0]
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/sagemaker_endpoint.html
c19417d0fcb7-0
Source code for langchain.embeddings.tensorflow_hub """Wrapper around TensorflowHub embedding models.""" from typing import Any, List from pydantic import BaseModel, Extra from langchain.embeddings.base import Embeddings DEFAULT_MODEL_URL = "https://tfhub.dev/google/universal-sentence-encoder-multilingual/3" [docs]class TensorflowHubEmbeddings(BaseModel, Embeddings): """Wrapper around tensorflow_hub embedding models. To use, you should have the ``tensorflow_text`` python package installed. Example: .. code-block:: python from langchain.embeddings import TensorflowHubEmbeddings url = "https://tfhub.dev/google/universal-sentence-encoder-multilingual/3" tf = TensorflowHubEmbeddings(model_url=url) """ embed: Any #: :meta private: model_url: str = DEFAULT_MODEL_URL """Model name to use.""" def __init__(self, **kwargs: Any): """Initialize the tensorflow_hub and tensorflow_text.""" super().__init__(**kwargs) try: import tensorflow_hub except ImportError: raise ImportError( "Could not import tensorflow-hub python package. " "Please install it with `pip install tensorflow-hub``." ) try: import tensorflow_text # noqa except ImportError: raise ImportError( "Could not import tensorflow_text python package. " "Please install it with `pip install tensorflow_text``." ) self.embed = tensorflow_hub.load(self.model_url) [docs] class Config: """Configuration for this pydantic object.""" extra = Extra.forbid [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/tensorflow_hub.html
c19417d0fcb7-1
"""Compute doc embeddings using a TensorflowHub embedding model. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ texts = list(map(lambda x: x.replace("\n", " "), texts)) embeddings = self.embed(texts).numpy() return embeddings.tolist() [docs] def embed_query(self, text: str) -> List[float]: """Compute query embeddings using a TensorflowHub embedding model. Args: text: The text to embed. Returns: Embeddings for the text. """ text = text.replace("\n", " ") embedding = self.embed([text]).numpy()[0] return embedding.tolist()
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/tensorflow_hub.html
32b3956ba176-0
Source code for langchain.embeddings.huggingface_hub """Wrapper around HuggingFace Hub embedding models.""" from typing import Any, Dict, List, Optional from pydantic import BaseModel, Extra, root_validator from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env DEFAULT_REPO_ID = "sentence-transformers/all-mpnet-base-v2" VALID_TASKS = ("feature-extraction",) [docs]class HuggingFaceHubEmbeddings(BaseModel, Embeddings): """Wrapper around HuggingFaceHub embedding models. To use, you should have the ``huggingface_hub`` python package installed, and the environment variable ``HUGGINGFACEHUB_API_TOKEN`` set with your API token, or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.embeddings import HuggingFaceHubEmbeddings repo_id = "sentence-transformers/all-mpnet-base-v2" hf = HuggingFaceHubEmbeddings( repo_id=repo_id, task="feature-extraction", huggingfacehub_api_token="my-api-key", ) """ client: Any #: :meta private: repo_id: str = DEFAULT_REPO_ID """Model name to use.""" task: Optional[str] = "feature-extraction" """Task to call the model with.""" model_kwargs: Optional[dict] = None """Key word arguments to pass to the model.""" huggingfacehub_api_token: Optional[str] = None [docs] class Config: """Configuration for this pydantic object.""" extra = Extra.forbid [docs] @root_validator()
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/huggingface_hub.html
32b3956ba176-1
extra = Extra.forbid [docs] @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" huggingfacehub_api_token = get_from_dict_or_env( values, "huggingfacehub_api_token", "HUGGINGFACEHUB_API_TOKEN" ) try: from huggingface_hub.inference_api import InferenceApi repo_id = values["repo_id"] if not repo_id.startswith("sentence-transformers"): raise ValueError( "Currently only 'sentence-transformers' embedding models " f"are supported. Got invalid 'repo_id' {repo_id}." ) client = InferenceApi( repo_id=repo_id, token=huggingfacehub_api_token, task=values.get("task"), ) if client.task not in VALID_TASKS: raise ValueError( f"Got invalid task {client.task}, " f"currently only {VALID_TASKS} are supported" ) values["client"] = client except ImportError: raise ValueError( "Could not import huggingface_hub python package. " "Please install it with `pip install huggingface_hub`." ) return values [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Call out to HuggingFaceHub's embedding endpoint for embedding search docs. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ # replace newlines, which can negatively affect performance.
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/huggingface_hub.html
32b3956ba176-2
""" # replace newlines, which can negatively affect performance. texts = [text.replace("\n", " ") for text in texts] _model_kwargs = self.model_kwargs or {} responses = self.client(inputs=texts, params=_model_kwargs) return responses [docs] def embed_query(self, text: str) -> List[float]: """Call out to HuggingFaceHub's embedding endpoint for embedding query text. Args: text: The text to embed. Returns: Embeddings for the text. """ response = self.embed_documents([text])[0] return response
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/huggingface_hub.html
6a10cecb1ce4-0
Source code for langchain.embeddings.cohere """Wrapper around Cohere embedding models.""" from typing import Any, Dict, List, Optional from pydantic import BaseModel, Extra, root_validator from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env [docs]class CohereEmbeddings(BaseModel, Embeddings): """Wrapper around Cohere embedding models. To use, you should have the ``cohere`` python package installed, and the environment variable ``COHERE_API_KEY`` set with your API key or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.embeddings import CohereEmbeddings cohere = CohereEmbeddings( model="embed-english-light-v2.0", cohere_api_key="my-api-key" ) """ client: Any #: :meta private: model: str = "embed-english-v2.0" """Model name to use.""" truncate: Optional[str] = None """Truncate embeddings that are too long from start or end ("NONE"|"START"|"END")""" cohere_api_key: Optional[str] = None [docs] class Config: """Configuration for this pydantic object.""" extra = Extra.forbid [docs] @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" cohere_api_key = get_from_dict_or_env( values, "cohere_api_key", "COHERE_API_KEY" ) try: import cohere values["client"] = cohere.Client(cohere_api_key) except ImportError:
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/cohere.html
6a10cecb1ce4-1
values["client"] = cohere.Client(cohere_api_key) except ImportError: raise ValueError( "Could not import cohere python package. " "Please install it with `pip install cohere`." ) return values [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Call out to Cohere's embedding endpoint. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ embeddings = self.client.embed( model=self.model, texts=texts, truncate=self.truncate ).embeddings return [list(map(float, e)) for e in embeddings] [docs] def embed_query(self, text: str) -> List[float]: """Call out to Cohere's embedding endpoint. Args: text: The text to embed. Returns: Embeddings for the text. """ embedding = self.client.embed( model=self.model, texts=[text], truncate=self.truncate ).embeddings[0] return list(map(float, embedding))
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/cohere.html
39f05955bdbe-0
Source code for langchain.embeddings.bedrock import json import os from typing import Any, Dict, List, Optional from pydantic import BaseModel, Extra, root_validator from langchain.embeddings.base import Embeddings [docs]class BedrockEmbeddings(BaseModel, Embeddings): """Embeddings provider to invoke Bedrock embedding models. To authenticate, the AWS client uses the following methods to automatically load credentials: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html If a specific credential profile should be used, you must pass the name of the profile from the ~/.aws/credentials file that is to be used. Make sure the credentials / roles used have the required policies to access the Bedrock service. """ """ Example: .. code-block:: python from langchain.bedrock_embeddings import BedrockEmbeddings region_name ="us-east-1" credentials_profile_name = "default" model_id = "amazon.titan-e1t-medium" be = BedrockEmbeddings( credentials_profile_name=credentials_profile_name, region_name=region_name, model_id=model_id ) """ client: Any #: :meta private: region_name: Optional[str] = None """The aws region e.g., `us-west-2`. Fallsback to AWS_DEFAULT_REGION env variable or region specified in ~/.aws/config in case it is not provided here. """ credentials_profile_name: Optional[str] = None """The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which has either access keys or role information specified. If not specified, the default credential profile or, if on an EC2 instance,
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/bedrock.html
39f05955bdbe-1
If not specified, the default credential profile or, if on an EC2 instance, credentials from IMDS will be used. See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html """ model_id: str = "amazon.titan-e1t-medium" """Id of the model to call, e.g., amazon.titan-e1t-medium, this is equivalent to the modelId property in the list-foundation-models api""" model_kwargs: Optional[Dict] = None """Key word arguments to pass to the model.""" [docs] class Config: """Configuration for this pydantic object.""" extra = Extra.forbid [docs] @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that AWS credentials to and python package exists in environment.""" if values["client"] is not None: return values try: import boto3 if values["credentials_profile_name"] is not None: session = boto3.Session(profile_name=values["credentials_profile_name"]) else: # use default credentials session = boto3.Session() client_params = {} if values["region_name"]: client_params["region_name"] = values["region_name"] values["client"] = session.client("bedrock", **client_params) except ImportError: raise ModuleNotFoundError( "Could not import boto3 python package. " "Please install it with `pip install boto3`." ) except Exception as e: raise ValueError( "Could not load credentials to authenticate with AWS client. " "Please check that credentials in the specified " "profile name are valid."
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/bedrock.html
39f05955bdbe-2
"Please check that credentials in the specified " "profile name are valid." ) from e return values def _embedding_func(self, text: str) -> List[float]: """Call out to Bedrock embedding endpoint.""" # replace newlines, which can negatively affect performance. text = text.replace(os.linesep, " ") _model_kwargs = self.model_kwargs or {} input_body = {**_model_kwargs} input_body["inputText"] = text body = json.dumps(input_body) content_type = "application/json" accepts = "application/json" embeddings = [] try: response = self.client.invoke_model( body=body, modelId=self.model_id, accept=accepts, contentType=content_type, ) response_body = json.loads(response.get("body").read()) embeddings = response_body.get("embedding") except Exception as e: raise ValueError(f"Error raised by inference endpoint: {e}") return embeddings [docs] def embed_documents( self, texts: List[str], chunk_size: int = 1 ) -> List[List[float]]: """Compute doc embeddings using a Bedrock model. Args: texts: The list of texts to embed. chunk_size: Bedrock currently only allows single string inputs, so chunk size is always 1. This input is here only for compatibility with the embeddings interface. Returns: List of embeddings, one for each text. """ results = [] for text in texts: response = self._embedding_func(text) results.append(response) return results
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/bedrock.html
39f05955bdbe-3
response = self._embedding_func(text) results.append(response) return results [docs] def embed_query(self, text: str) -> List[float]: """Compute query embeddings using a Bedrock model. Args: text: The text to embed. Returns: Embeddings for the text. """ return self._embedding_func(text)
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/bedrock.html
cb2e790a1a77-0
Source code for langchain.embeddings.self_hosted_hugging_face """Wrapper around HuggingFace embedding models for self-hosted remote hardware.""" import importlib import logging from typing import Any, Callable, List, Optional from langchain.embeddings.self_hosted import SelfHostedEmbeddings DEFAULT_MODEL_NAME = "sentence-transformers/all-mpnet-base-v2" DEFAULT_INSTRUCT_MODEL = "hkunlp/instructor-large" DEFAULT_EMBED_INSTRUCTION = "Represent the document for retrieval: " DEFAULT_QUERY_INSTRUCTION = ( "Represent the question for retrieving supporting documents: " ) logger = logging.getLogger(__name__) def _embed_documents(client: Any, *args: Any, **kwargs: Any) -> List[List[float]]: """Inference function to send to the remote hardware. Accepts a sentence_transformer model_id and returns a list of embeddings for each document in the batch. """ return client.encode(*args, **kwargs) [docs]def load_embedding_model(model_id: str, instruct: bool = False, device: int = 0) -> Any: """Load the embedding model.""" if not instruct: import sentence_transformers client = sentence_transformers.SentenceTransformer(model_id) else: from InstructorEmbedding import INSTRUCTOR client = INSTRUCTOR(model_id) if importlib.util.find_spec("torch") is not None: import torch cuda_device_count = torch.cuda.device_count() if device < -1 or (device >= cuda_device_count): raise ValueError( f"Got device=={device}, " f"device is required to be within [-1, {cuda_device_count})" ) if device < 0 and cuda_device_count > 0:
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted_hugging_face.html
cb2e790a1a77-1
) if device < 0 and cuda_device_count > 0: logger.warning( "Device has %d GPUs available. " "Provide device={deviceId} to `from_model_id` to use available" "GPUs for execution. deviceId is -1 for CPU and " "can be a positive integer associated with CUDA device id.", cuda_device_count, ) client = client.to(device) return client [docs]class SelfHostedHuggingFaceEmbeddings(SelfHostedEmbeddings): """Runs sentence_transformers embedding models on self-hosted remote hardware. Supported hardware includes auto-launched instances on AWS, GCP, Azure, and Lambda, as well as servers specified by IP address and SSH credentials (such as on-prem, or another cloud like Paperspace, Coreweave, etc.). To use, you should have the ``runhouse`` python package installed. Example: .. code-block:: python from langchain.embeddings import SelfHostedHuggingFaceEmbeddings import runhouse as rh model_name = "sentence-transformers/all-mpnet-base-v2" gpu = rh.cluster(name="rh-a10x", instance_type="A100:1") hf = SelfHostedHuggingFaceEmbeddings(model_name=model_name, hardware=gpu) """ client: Any #: :meta private: model_id: str = DEFAULT_MODEL_NAME """Model name to use.""" model_reqs: List[str] = ["./", "sentence_transformers", "torch"] """Requirements to install on hardware to inference the model.""" hardware: Any """Remote hardware to send the inference function to.""" model_load_fn: Callable = load_embedding_model
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted_hugging_face.html
cb2e790a1a77-2
model_load_fn: Callable = load_embedding_model """Function to load the model remotely on the server.""" load_fn_kwargs: Optional[dict] = None """Key word arguments to pass to the model load function.""" inference_fn: Callable = _embed_documents """Inference function to extract the embeddings.""" def __init__(self, **kwargs: Any): """Initialize the remote inference function.""" load_fn_kwargs = kwargs.pop("load_fn_kwargs", {}) load_fn_kwargs["model_id"] = load_fn_kwargs.get("model_id", DEFAULT_MODEL_NAME) load_fn_kwargs["instruct"] = load_fn_kwargs.get("instruct", False) load_fn_kwargs["device"] = load_fn_kwargs.get("device", 0) super().__init__(load_fn_kwargs=load_fn_kwargs, **kwargs) [docs]class SelfHostedHuggingFaceInstructEmbeddings(SelfHostedHuggingFaceEmbeddings): """Runs InstructorEmbedding embedding models on self-hosted remote hardware. Supported hardware includes auto-launched instances on AWS, GCP, Azure, and Lambda, as well as servers specified by IP address and SSH credentials (such as on-prem, or another cloud like Paperspace, Coreweave, etc.). To use, you should have the ``runhouse`` python package installed. Example: .. code-block:: python from langchain.embeddings import SelfHostedHuggingFaceInstructEmbeddings import runhouse as rh model_name = "hkunlp/instructor-large" gpu = rh.cluster(name='rh-a10x', instance_type='A100:1') hf = SelfHostedHuggingFaceInstructEmbeddings( model_name=model_name, hardware=gpu) """
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted_hugging_face.html
cb2e790a1a77-3
model_name=model_name, hardware=gpu) """ model_id: str = DEFAULT_INSTRUCT_MODEL """Model name to use.""" embed_instruction: str = DEFAULT_EMBED_INSTRUCTION """Instruction to use for embedding documents.""" query_instruction: str = DEFAULT_QUERY_INSTRUCTION """Instruction to use for embedding query.""" model_reqs: List[str] = ["./", "InstructorEmbedding", "torch"] """Requirements to install on hardware to inference the model.""" def __init__(self, **kwargs: Any): """Initialize the remote inference function.""" load_fn_kwargs = kwargs.pop("load_fn_kwargs", {}) load_fn_kwargs["model_id"] = load_fn_kwargs.get( "model_id", DEFAULT_INSTRUCT_MODEL ) load_fn_kwargs["instruct"] = load_fn_kwargs.get("instruct", True) load_fn_kwargs["device"] = load_fn_kwargs.get("device", 0) super().__init__(load_fn_kwargs=load_fn_kwargs, **kwargs) [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Compute doc embeddings using a HuggingFace instruct model. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ instruction_pairs = [] for text in texts: instruction_pairs.append([self.embed_instruction, text]) embeddings = self.client(self.pipeline_ref, instruction_pairs) return embeddings.tolist() [docs] def embed_query(self, text: str) -> List[float]: """Compute query embeddings using a HuggingFace instruct model. Args: text: The text to embed. Returns: Embeddings for the text.
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted_hugging_face.html
cb2e790a1a77-4
Returns: Embeddings for the text. """ instruction_pair = [self.query_instruction, text] embedding = self.client(self.pipeline_ref, [instruction_pair])[0] return embedding.tolist()
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted_hugging_face.html
ab30e82cde86-0
Source code for langchain.embeddings.mosaicml """Wrapper around MosaicML APIs.""" from typing import Any, Dict, List, Mapping, Optional, Tuple import requests from pydantic import BaseModel, Extra, root_validator from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env [docs]class MosaicMLInstructorEmbeddings(BaseModel, Embeddings): """Wrapper around MosaicML's embedding inference service. To use, you should have the environment variable ``MOSAICML_API_TOKEN`` set with your API token, or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.llms import MosaicMLInstructorEmbeddings endpoint_url = ( "https://models.hosted-on.mosaicml.hosting/instructor-large/v1/predict" ) mosaic_llm = MosaicMLInstructorEmbeddings( endpoint_url=endpoint_url, mosaicml_api_token="my-api-key" ) """ endpoint_url: str = ( "https://models.hosted-on.mosaicml.hosting/instructor-xl/v1/predict" ) """Endpoint URL to use.""" embed_instruction: str = "Represent the document for retrieval: " """Instruction used to embed documents.""" query_instruction: str = ( "Represent the question for retrieving supporting documents: " ) """Instruction used to embed the query.""" retry_sleep: float = 1.0 """How long to try sleeping for if a rate limit is encountered""" mosaicml_api_token: Optional[str] = None [docs] class Config: """Configuration for this pydantic object."""
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/mosaicml.html
ab30e82cde86-1
[docs] class Config: """Configuration for this pydantic object.""" extra = Extra.forbid [docs] @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" mosaicml_api_token = get_from_dict_or_env( values, "mosaicml_api_token", "MOSAICML_API_TOKEN" ) values["mosaicml_api_token"] = mosaicml_api_token return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {"endpoint_url": self.endpoint_url} def _embed( self, input: List[Tuple[str, str]], is_retry: bool = False ) -> List[List[float]]: payload = {"input_strings": input} # HTTP headers for authorization headers = { "Authorization": f"{self.mosaicml_api_token}", "Content-Type": "application/json", } # send request try: response = requests.post(self.endpoint_url, headers=headers, json=payload) except requests.exceptions.RequestException as e: raise ValueError(f"Error raised by inference endpoint: {e}") try: parsed_response = response.json() if "error" in parsed_response: # if we get rate limited, try sleeping for 1 second if ( not is_retry and "rate limit exceeded" in parsed_response["error"].lower() ): import time time.sleep(self.retry_sleep) return self._embed(input, is_retry=True) raise ValueError(
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/mosaicml.html
ab30e82cde86-2
return self._embed(input, is_retry=True) raise ValueError( f"Error raised by inference API: {parsed_response['error']}" ) # The inference API has changed a couple of times, so we add some handling # to be robust to multiple response formats. if isinstance(parsed_response, dict): if "data" in parsed_response: output_item = parsed_response["data"] elif "output" in parsed_response: output_item = parsed_response["output"] else: raise ValueError( f"No key data or output in response: {parsed_response}" ) if isinstance(output_item, list) and isinstance(output_item[0], list): embeddings = output_item else: embeddings = [output_item] elif isinstance(parsed_response, list): first_item = parsed_response[0] if isinstance(first_item, list): embeddings = parsed_response elif isinstance(first_item, dict): if "output" in first_item: embeddings = [item["output"] for item in parsed_response] else: raise ValueError( f"No key data or output in response: {parsed_response}" ) else: raise ValueError(f"Unexpected response format: {parsed_response}") else: raise ValueError(f"Unexpected response type: {parsed_response}") except requests.exceptions.JSONDecodeError as e: raise ValueError( f"Error raised by inference API: {e}.\nResponse: {response.text}" ) return embeddings [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Embed documents using a MosaicML deployed instructor embedding model. Args:
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/mosaicml.html
ab30e82cde86-3
"""Embed documents using a MosaicML deployed instructor embedding model. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ instruction_pairs = [(self.embed_instruction, text) for text in texts] embeddings = self._embed(instruction_pairs) return embeddings [docs] def embed_query(self, text: str) -> List[float]: """Embed a query using a MosaicML deployed instructor embedding model. Args: text: The text to embed. Returns: Embeddings for the text. """ instruction_pair = (self.query_instruction, text) embedding = self._embed([instruction_pair])[0] return embedding
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/mosaicml.html
03edac8217c2-0
Source code for langchain.embeddings.modelscope_hub """Wrapper around ModelScopeHub embedding models.""" from typing import Any, List from pydantic import BaseModel, Extra from langchain.embeddings.base import Embeddings [docs]class ModelScopeEmbeddings(BaseModel, Embeddings): """Wrapper around modelscope_hub embedding models. To use, you should have the ``modelscope`` python package installed. Example: .. code-block:: python from langchain.embeddings import ModelScopeEmbeddings model_id = "damo/nlp_corom_sentence-embedding_english-base" embed = ModelScopeEmbeddings(model_id=model_id) """ embed: Any model_id: str = "damo/nlp_corom_sentence-embedding_english-base" """Model name to use.""" def __init__(self, **kwargs: Any): """Initialize the modelscope""" super().__init__(**kwargs) try: from modelscope.pipelines import pipeline from modelscope.utils.constant import Tasks self.embed = pipeline(Tasks.sentence_embedding, model=self.model_id) except ImportError as e: raise ImportError( "Could not import some python packages." "Please install it with `pip install modelscope`." ) from e [docs] class Config: """Configuration for this pydantic object.""" extra = Extra.forbid [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Compute doc embeddings using a modelscope embedding model. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/modelscope_hub.html
03edac8217c2-1
Returns: List of embeddings, one for each text. """ texts = list(map(lambda x: x.replace("\n", " "), texts)) inputs = {"source_sentence": texts} embeddings = self.embed(input=inputs)["text_embedding"] return embeddings.tolist() [docs] def embed_query(self, text: str) -> List[float]: """Compute query embeddings using a modelscope embedding model. Args: text: The text to embed. Returns: Embeddings for the text. """ text = text.replace("\n", " ") inputs = {"source_sentence": [text]} embedding = self.embed(input=inputs)["text_embedding"][0] return embedding.tolist()
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/modelscope_hub.html
094de39724ab-0
Source code for langchain.embeddings.vertexai """Wrapper around Google VertexAI embedding models.""" from typing import Dict, List from pydantic import root_validator from langchain.embeddings.base import Embeddings from langchain.llms.vertexai import _VertexAICommon from langchain.utilities.vertexai import raise_vertex_import_error [docs]class VertexAIEmbeddings(_VertexAICommon, Embeddings): model_name: str = "textembedding-gecko" [docs] @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validates that the python package exists in environment.""" cls._try_init_vertexai(values) try: from vertexai.preview.language_models import TextEmbeddingModel except ImportError: raise_vertex_import_error() values["client"] = TextEmbeddingModel.from_pretrained(values["model_name"]) return values [docs] def embed_documents( self, texts: List[str], batch_size: int = 5 ) -> List[List[float]]: """Embed a list of strings. Vertex AI currently sets a max batch size of 5 strings. Args: texts: List[str] The list of strings to embed. batch_size: [int] The batch size of embeddings to send to the model Returns: List of embeddings, one for each text. """ embeddings = [] for batch in range(0, len(texts), batch_size): text_batch = texts[batch : batch + batch_size] embeddings_batch = self.client.get_embeddings(text_batch) embeddings.extend([el.values for el in embeddings_batch]) return embeddings [docs] def embed_query(self, text: str) -> List[float]: """Embed a text.
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/vertexai.html
094de39724ab-1
"""Embed a text. Args: text: The text to embed. Returns: Embedding for the text. """ embeddings = self.client.get_embeddings([text]) return embeddings[0].values
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/vertexai.html
0ff3b7871b07-0
Source code for langchain.embeddings.base """Interface for embedding models.""" from abc import ABC, abstractmethod from typing import List [docs]class Embeddings(ABC): """Interface for embedding models.""" [docs] @abstractmethod def embed_documents(self, texts: List[str]) -> List[List[float]]: """Embed search docs.""" [docs] @abstractmethod def embed_query(self, text: str) -> List[float]: """Embed query text.""" [docs] async def aembed_documents(self, texts: List[str]) -> List[List[float]]: """Embed search docs.""" raise NotImplementedError [docs] async def aembed_query(self, text: str) -> List[float]: """Embed query text.""" raise NotImplementedError
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/base.html
e7125c238635-0
Source code for langchain.embeddings.fake from typing import List import numpy as np from pydantic import BaseModel from langchain.embeddings.base import Embeddings [docs]class FakeEmbeddings(Embeddings, BaseModel): size: int def _get_embedding(self) -> List[float]: return list(np.random.normal(size=self.size)) [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: return [self._get_embedding() for _ in texts] [docs] def embed_query(self, text: str) -> List[float]: return self._get_embedding()
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/fake.html
88c315a71ba3-0
Source code for langchain.embeddings.minimax """Wrapper around MiniMax APIs.""" from __future__ import annotations import logging from typing import Any, Callable, Dict, List, Optional import requests from pydantic import BaseModel, Extra, root_validator from tenacity import ( before_sleep_log, retry, stop_after_attempt, wait_exponential, ) from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) def _create_retry_decorator() -> Callable[[Any], Any]: """Returns a tenacity retry decorator.""" multiplier = 1 min_seconds = 1 max_seconds = 4 max_retries = 6 return retry( reraise=True, stop=stop_after_attempt(max_retries), wait=wait_exponential(multiplier=multiplier, min=min_seconds, max=max_seconds), before_sleep=before_sleep_log(logger, logging.WARNING), ) [docs]def embed_with_retry(embeddings: MiniMaxEmbeddings, *args: Any, **kwargs: Any) -> Any: """Use tenacity to retry the completion call.""" retry_decorator = _create_retry_decorator() @retry_decorator def _embed_with_retry(*args: Any, **kwargs: Any) -> Any: return embeddings.embed(*args, **kwargs) return _embed_with_retry(*args, **kwargs) [docs]class MiniMaxEmbeddings(BaseModel, Embeddings): """Wrapper around MiniMax's embedding inference service. To use, you should have the environment variable ``MINIMAX_GROUP_ID`` and ``MINIMAX_API_KEY`` set with your API token, or pass it as a named parameter to
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/minimax.html
88c315a71ba3-1
the constructor. Example: .. code-block:: python from langchain.embeddings import MiniMaxEmbeddings embeddings = MiniMaxEmbeddings() query_text = "This is a test query." query_result = embeddings.embed_query(query_text) document_text = "This is a test document." document_result = embeddings.embed_documents([document_text]) """ endpoint_url: str = "https://api.minimax.chat/v1/embeddings" """Endpoint URL to use.""" model: str = "embo-01" """Embeddings model name to use.""" embed_type_db: str = "db" """For embed_documents""" embed_type_query: str = "query" """For embed_query""" minimax_group_id: Optional[str] = None """Group ID for MiniMax API.""" minimax_api_key: Optional[str] = None """API Key for MiniMax API.""" [docs] class Config: """Configuration for this pydantic object.""" extra = Extra.forbid [docs] @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that group id and api key exists in environment.""" minimax_group_id = get_from_dict_or_env( values, "minimax_group_id", "MINIMAX_GROUP_ID" ) minimax_api_key = get_from_dict_or_env( values, "minimax_api_key", "MINIMAX_API_KEY" ) values["minimax_group_id"] = minimax_group_id values["minimax_api_key"] = minimax_api_key return values [docs] def embed( self, texts: List[str],
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/minimax.html
88c315a71ba3-2
[docs] def embed( self, texts: List[str], embed_type: str, ) -> List[List[float]]: payload = { "model": self.model, "type": embed_type, "texts": texts, } # HTTP headers for authorization headers = { "Authorization": f"Bearer {self.minimax_api_key}", "Content-Type": "application/json", } params = { "GroupId": self.minimax_group_id, } # send request response = requests.post( self.endpoint_url, params=params, headers=headers, json=payload ) parsed_response = response.json() # check for errors if parsed_response["base_resp"]["status_code"] != 0: raise ValueError( f"MiniMax API returned an error: {parsed_response['base_resp']}" ) embeddings = parsed_response["vectors"] return embeddings [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Embed documents using a MiniMax embedding endpoint. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ embeddings = embed_with_retry(self, texts=texts, embed_type=self.embed_type_db) return embeddings [docs] def embed_query(self, text: str) -> List[float]: """Embed a query using a MiniMax embedding endpoint. Args: text: The text to embed. Returns: Embeddings for the text. """ embeddings = embed_with_retry( self, texts=[text], embed_type=self.embed_type_query )
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/minimax.html
88c315a71ba3-3
self, texts=[text], embed_type=self.embed_type_query ) return embeddings[0]
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/minimax.html
f950f8838f41-0
Source code for langchain.embeddings.openai """Wrapper around OpenAI embedding models.""" from __future__ import annotations import logging from typing import ( Any, Callable, Dict, List, Literal, Optional, Sequence, Set, Tuple, Union, ) import numpy as np from pydantic import BaseModel, Extra, root_validator from tenacity import ( AsyncRetrying, before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential, ) from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) def _create_retry_decorator(embeddings: OpenAIEmbeddings) -> Callable[[Any], Any]: import openai min_seconds = 4 max_seconds = 10 # Wait 2^x * 1 second between each retry starting with # 4 seconds, then up to 10 seconds, then 10 seconds afterwards return retry( reraise=True, stop=stop_after_attempt(embeddings.max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=( retry_if_exception_type(openai.error.Timeout) | retry_if_exception_type(openai.error.APIError) | retry_if_exception_type(openai.error.APIConnectionError) | retry_if_exception_type(openai.error.RateLimitError) | retry_if_exception_type(openai.error.ServiceUnavailableError) ), before_sleep=before_sleep_log(logger, logging.WARNING), ) def _async_retry_decorator(embeddings: OpenAIEmbeddings) -> Any: import openai
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html
f950f8838f41-1
import openai min_seconds = 4 max_seconds = 10 # Wait 2^x * 1 second between each retry starting with # 4 seconds, then up to 10 seconds, then 10 seconds afterwards async_retrying = AsyncRetrying( reraise=True, stop=stop_after_attempt(embeddings.max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=( retry_if_exception_type(openai.error.Timeout) | retry_if_exception_type(openai.error.APIError) | retry_if_exception_type(openai.error.APIConnectionError) | retry_if_exception_type(openai.error.RateLimitError) | retry_if_exception_type(openai.error.ServiceUnavailableError) ), before_sleep=before_sleep_log(logger, logging.WARNING), ) def wrap(func: Callable) -> Callable: async def wrapped_f(*args: Any, **kwargs: Any) -> Callable: async for _ in async_retrying: return await func(*args, **kwargs) raise AssertionError("this is unreachable") return wrapped_f return wrap [docs]def embed_with_retry(embeddings: OpenAIEmbeddings, **kwargs: Any) -> Any: """Use tenacity to retry the embedding call.""" retry_decorator = _create_retry_decorator(embeddings) @retry_decorator def _embed_with_retry(**kwargs: Any) -> Any: return embeddings.client.create(**kwargs) return _embed_with_retry(**kwargs) async def async_embed_with_retry(embeddings: OpenAIEmbeddings, **kwargs: Any) -> Any: """Use tenacity to retry the embedding call.""" @_async_retry_decorator(embeddings)
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html
f950f8838f41-2
@_async_retry_decorator(embeddings) async def _async_embed_with_retry(**kwargs: Any) -> Any: return await embeddings.client.acreate(**kwargs) return await _async_embed_with_retry(**kwargs) [docs]class OpenAIEmbeddings(BaseModel, Embeddings): """Wrapper around OpenAI embedding models. To use, you should have the ``openai`` python package installed, and the environment variable ``OPENAI_API_KEY`` set with your API key or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.embeddings import OpenAIEmbeddings openai = OpenAIEmbeddings(openai_api_key="my-api-key") In order to use the library with Microsoft Azure endpoints, you need to set the OPENAI_API_TYPE, OPENAI_API_BASE, OPENAI_API_KEY and OPENAI_API_VERSION. The OPENAI_API_TYPE must be set to 'azure' and the others correspond to the properties of your endpoint. In addition, the deployment name must be passed as the model parameter. Example: .. code-block:: python import os os.environ["OPENAI_API_TYPE"] = "azure" os.environ["OPENAI_API_BASE"] = "https://<your-endpoint.openai.azure.com/" os.environ["OPENAI_API_KEY"] = "your AzureOpenAI key" os.environ["OPENAI_API_VERSION"] = "2023-03-15-preview" os.environ["OPENAI_PROXY"] = "http://your-corporate-proxy:8080" from langchain.embeddings.openai import OpenAIEmbeddings embeddings = OpenAIEmbeddings( deployment="your-embeddings-deployment-name",
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html
f950f8838f41-3
deployment="your-embeddings-deployment-name", model="your-embeddings-model-name", openai_api_base="https://your-endpoint.openai.azure.com/", openai_api_type="azure", ) text = "This is a test query." query_result = embeddings.embed_query(text) """ client: Any #: :meta private: model: str = "text-embedding-ada-002" deployment: str = model # to support Azure OpenAI Service custom deployment names openai_api_version: Optional[str] = None # to support Azure OpenAI Service custom endpoints openai_api_base: Optional[str] = None # to support Azure OpenAI Service custom endpoints openai_api_type: Optional[str] = None # to support explicit proxy for OpenAI openai_proxy: Optional[str] = None embedding_ctx_length: int = 8191 openai_api_key: Optional[str] = None openai_organization: Optional[str] = None allowed_special: Union[Literal["all"], Set[str]] = set() disallowed_special: Union[Literal["all"], Set[str], Sequence[str]] = "all" chunk_size: int = 1000 """Maximum number of texts to embed in each batch""" max_retries: int = 6 """Maximum number of retries to make when generating.""" request_timeout: Optional[Union[float, Tuple[float, float]]] = None """Timeout in seconds for the OpenAPI request.""" headers: Any = None tiktoken_model_name: Optional[str] = None """The model name to pass to tiktoken when using this class. Tiktoken is used to count the number of tokens in documents to constrain
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html
f950f8838f41-4
Tiktoken is used to count the number of tokens in documents to constrain them to be under a certain limit. By default, when set to None, this will be the same as the embedding model name. However, there are some cases where you may want to use this Embedding class with a model name not supported by tiktoken. This can include when using Azure embeddings or when using one of the many model providers that expose an OpenAI-like API but with different models. In those cases, in order to avoid erroring when tiktoken is called, you can specify a model name to use here.""" [docs] class Config: """Configuration for this pydantic object.""" extra = Extra.forbid [docs] @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" values["openai_api_key"] = get_from_dict_or_env( values, "openai_api_key", "OPENAI_API_KEY" ) values["openai_api_base"] = get_from_dict_or_env( values, "openai_api_base", "OPENAI_API_BASE", default="", ) values["openai_api_type"] = get_from_dict_or_env( values, "openai_api_type", "OPENAI_API_TYPE", default="", ) values["openai_proxy"] = get_from_dict_or_env( values, "openai_proxy", "OPENAI_PROXY", default="", ) if values["openai_api_type"] in ("azure", "azure_ad", "azuread"):
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html
f950f8838f41-5
default_api_version = "2022-12-01" else: default_api_version = "" values["openai_api_version"] = get_from_dict_or_env( values, "openai_api_version", "OPENAI_API_VERSION", default=default_api_version, ) values["openai_organization"] = get_from_dict_or_env( values, "openai_organization", "OPENAI_ORGANIZATION", default="", ) try: import openai values["client"] = openai.Embedding except ImportError: raise ImportError( "Could not import openai python package. " "Please install it with `pip install openai`." ) return values @property def _invocation_params(self) -> Dict: openai_args = { "engine": self.deployment, "request_timeout": self.request_timeout, "headers": self.headers, "api_key": self.openai_api_key, "organization": self.openai_organization, "api_base": self.openai_api_base, "api_type": self.openai_api_type, "api_version": self.openai_api_version, } if self.openai_proxy: import openai openai.proxy = { "http": self.openai_proxy, "https": self.openai_proxy, } # type: ignore[assignment] # noqa: E501 return openai_args # please refer to # https://github.com/openai/openai-cookbook/blob/main/examples/Embedding_long_inputs.ipynb def _get_len_safe_embeddings(
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html
f950f8838f41-6
def _get_len_safe_embeddings( self, texts: List[str], *, engine: str, chunk_size: Optional[int] = None ) -> List[List[float]]: embeddings: List[List[float]] = [[] for _ in range(len(texts))] try: import tiktoken except ImportError: raise ImportError( "Could not import tiktoken python package. " "This is needed in order to for OpenAIEmbeddings. " "Please install it with `pip install tiktoken`." ) tokens = [] indices = [] model_name = self.tiktoken_model_name or self.model try: encoding = tiktoken.encoding_for_model(model_name) except KeyError: logger.warning("Warning: model not found. Using cl100k_base encoding.") model = "cl100k_base" encoding = tiktoken.get_encoding(model) for i, text in enumerate(texts): if self.model.endswith("001"): # See: https://github.com/openai/openai-python/issues/418#issuecomment-1525939500 # replace newlines, which can negatively affect performance. text = text.replace("\n", " ") token = encoding.encode( text, allowed_special=self.allowed_special, disallowed_special=self.disallowed_special, ) for j in range(0, len(token), self.embedding_ctx_length): tokens += [token[j : j + self.embedding_ctx_length]] indices += [i] batched_embeddings = [] _chunk_size = chunk_size or self.chunk_size for i in range(0, len(tokens), _chunk_size): response = embed_with_retry( self,
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html
f950f8838f41-7
response = embed_with_retry( self, input=tokens[i : i + _chunk_size], **self._invocation_params, ) batched_embeddings += [r["embedding"] for r in response["data"]] results: List[List[List[float]]] = [[] for _ in range(len(texts))] num_tokens_in_batch: List[List[int]] = [[] for _ in range(len(texts))] for i in range(len(indices)): results[indices[i]].append(batched_embeddings[i]) num_tokens_in_batch[indices[i]].append(len(tokens[i])) for i in range(len(texts)): _result = results[i] if len(_result) == 0: average = embed_with_retry( self, input="", **self._invocation_params, )[ "data" ][0]["embedding"] else: average = np.average(_result, axis=0, weights=num_tokens_in_batch[i]) embeddings[i] = (average / np.linalg.norm(average)).tolist() return embeddings # please refer to # https://github.com/openai/openai-cookbook/blob/main/examples/Embedding_long_inputs.ipynb async def _aget_len_safe_embeddings( self, texts: List[str], *, engine: str, chunk_size: Optional[int] = None ) -> List[List[float]]: embeddings: List[List[float]] = [[] for _ in range(len(texts))] try: import tiktoken except ImportError: raise ImportError( "Could not import tiktoken python package. " "This is needed in order to for OpenAIEmbeddings. " "Please install it with `pip install tiktoken`."
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html
f950f8838f41-8
"Please install it with `pip install tiktoken`." ) tokens = [] indices = [] model_name = self.tiktoken_model_name or self.model try: encoding = tiktoken.encoding_for_model(model_name) except KeyError: logger.warning("Warning: model not found. Using cl100k_base encoding.") model = "cl100k_base" encoding = tiktoken.get_encoding(model) for i, text in enumerate(texts): if self.model.endswith("001"): # See: https://github.com/openai/openai-python/issues/418#issuecomment-1525939500 # replace newlines, which can negatively affect performance. text = text.replace("\n", " ") token = encoding.encode( text, allowed_special=self.allowed_special, disallowed_special=self.disallowed_special, ) for j in range(0, len(token), self.embedding_ctx_length): tokens += [token[j : j + self.embedding_ctx_length]] indices += [i] batched_embeddings = [] _chunk_size = chunk_size or self.chunk_size for i in range(0, len(tokens), _chunk_size): response = await async_embed_with_retry( self, input=tokens[i : i + _chunk_size], **self._invocation_params, ) batched_embeddings += [r["embedding"] for r in response["data"]] results: List[List[List[float]]] = [[] for _ in range(len(texts))] num_tokens_in_batch: List[List[int]] = [[] for _ in range(len(texts))] for i in range(len(indices)): results[indices[i]].append(batched_embeddings[i])
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html
f950f8838f41-9
results[indices[i]].append(batched_embeddings[i]) num_tokens_in_batch[indices[i]].append(len(tokens[i])) for i in range(len(texts)): _result = results[i] if len(_result) == 0: average = ( await async_embed_with_retry( self, input="", **self._invocation_params, ) )["data"][0]["embedding"] else: average = np.average(_result, axis=0, weights=num_tokens_in_batch[i]) embeddings[i] = (average / np.linalg.norm(average)).tolist() return embeddings def _embedding_func(self, text: str, *, engine: str) -> List[float]: """Call out to OpenAI's embedding endpoint.""" # handle large input text if len(text) > self.embedding_ctx_length: return self._get_len_safe_embeddings([text], engine=engine)[0] else: if self.model.endswith("001"): # See: https://github.com/openai/openai-python/issues/418#issuecomment-1525939500 # replace newlines, which can negatively affect performance. text = text.replace("\n", " ") return embed_with_retry( self, input=[text], **self._invocation_params, )[ "data" ][0]["embedding"] async def _aembedding_func(self, text: str, *, engine: str) -> List[float]: """Call out to OpenAI's embedding endpoint.""" # handle large input text if len(text) > self.embedding_ctx_length: return (await self._aget_len_safe_embeddings([text], engine=engine))[0] else:
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html
f950f8838f41-10
else: if self.model.endswith("001"): # See: https://github.com/openai/openai-python/issues/418#issuecomment-1525939500 # replace newlines, which can negatively affect performance. text = text.replace("\n", " ") return ( await async_embed_with_retry( self, input=[text], **self._invocation_params, ) )["data"][0]["embedding"] [docs] def embed_documents( self, texts: List[str], chunk_size: Optional[int] = 0 ) -> List[List[float]]: """Call out to OpenAI's embedding endpoint for embedding search docs. Args: texts: The list of texts to embed. chunk_size: The chunk size of embeddings. If None, will use the chunk size specified by the class. Returns: List of embeddings, one for each text. """ # NOTE: to keep things simple, we assume the list may contain texts longer # than the maximum context and use length-safe embedding function. return self._get_len_safe_embeddings(texts, engine=self.deployment) [docs] async def aembed_documents( self, texts: List[str], chunk_size: Optional[int] = 0 ) -> List[List[float]]: """Call out to OpenAI's embedding endpoint async for embedding search docs. Args: texts: The list of texts to embed. chunk_size: The chunk size of embeddings. If None, will use the chunk size specified by the class. Returns: List of embeddings, one for each text. """ # NOTE: to keep things simple, we assume the list may contain texts longer
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html
f950f8838f41-11
# NOTE: to keep things simple, we assume the list may contain texts longer # than the maximum context and use length-safe embedding function. return await self._aget_len_safe_embeddings(texts, engine=self.deployment) [docs] def embed_query(self, text: str) -> List[float]: """Call out to OpenAI's embedding endpoint for embedding query text. Args: text: The text to embed. Returns: Embedding for the text. """ embedding = self._embedding_func(text, engine=self.deployment) return embedding [docs] async def aembed_query(self, text: str) -> List[float]: """Call out to OpenAI's embedding endpoint async for embedding query text. Args: text: The text to embed. Returns: Embedding for the text. """ embedding = await self._aembedding_func(text, engine=self.deployment) return embedding
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html
7b00ff31d114-0
Source code for langchain.embeddings.deepinfra from typing import Any, Dict, List, Mapping, Optional import requests from pydantic import BaseModel, Extra, root_validator from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env DEFAULT_MODEL_ID = "sentence-transformers/clip-ViT-B-32" [docs]class DeepInfraEmbeddings(BaseModel, Embeddings): """Wrapper around Deep Infra's embedding inference service. To use, you should have the environment variable ``DEEPINFRA_API_TOKEN`` set with your API token, or pass it as a named parameter to the constructor. There are multiple embeddings models available, see https://deepinfra.com/models?type=embeddings. Example: .. code-block:: python from langchain.embeddings import DeepInfraEmbeddings deepinfra_emb = DeepInfraEmbeddings( model_id="sentence-transformers/clip-ViT-B-32", deepinfra_api_token="my-api-key" ) r1 = deepinfra_emb.embed_documents( [ "Alpha is the first letter of Greek alphabet", "Beta is the second letter of Greek alphabet", ] ) r2 = deepinfra_emb.embed_query( "What is the second letter of Greek alphabet" ) """ model_id: str = DEFAULT_MODEL_ID """Embeddings model to use.""" normalize: bool = False """whether to normalize the computed embeddings""" embed_instruction: str = "passage: " """Instruction used to embed documents.""" query_instruction: str = "query: " """Instruction used to embed the query.""" model_kwargs: Optional[dict] = None
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/deepinfra.html
7b00ff31d114-1
model_kwargs: Optional[dict] = None """Other model keyword args""" deepinfra_api_token: Optional[str] = None [docs] class Config: """Configuration for this pydantic object.""" extra = Extra.forbid [docs] @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" deepinfra_api_token = get_from_dict_or_env( values, "deepinfra_api_token", "DEEPINFRA_API_TOKEN" ) values["deepinfra_api_token"] = deepinfra_api_token return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {"model_id": self.model_id} def _embed(self, input: List[str]) -> List[List[float]]: _model_kwargs = self.model_kwargs or {} # HTTP headers for authorization headers = { "Authorization": f"bearer {self.deepinfra_api_token}", "Content-Type": "application/json", } # send request try: res = requests.post( f"https://api.deepinfra.com/v1/inference/{self.model_id}", headers=headers, json={"inputs": input, "normalize": self.normalize, **_model_kwargs}, ) except requests.exceptions.RequestException as e: raise ValueError(f"Error raised by inference endpoint: {e}") if res.status_code != 200: raise ValueError( "Error raised by inference API HTTP code: %s, %s" % (res.status_code, res.text) ) try: t = res.json()
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/deepinfra.html
7b00ff31d114-2
) try: t = res.json() embeddings = t["embeddings"] except requests.exceptions.JSONDecodeError as e: raise ValueError( f"Error raised by inference API: {e}.\nResponse: {res.text}" ) return embeddings [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Embed documents using a Deep Infra deployed embedding model. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ instruction_pairs = [f"{self.query_instruction}{text}" for text in texts] embeddings = self._embed(instruction_pairs) return embeddings [docs] def embed_query(self, text: str) -> List[float]: """Embed a query using a Deep Infra deployed embedding model. Args: text: The text to embed. Returns: Embeddings for the text. """ instruction_pair = f"{self.query_instruction}{text}" embedding = self._embed([instruction_pair])[0] return embedding
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/deepinfra.html
bf4beff084a7-0
Source code for langchain.memory.token_buffer from typing import Any, Dict, List from langchain.base_language import BaseLanguageModel from langchain.memory.chat_memory import BaseChatMemory from langchain.schema import BaseMessage, get_buffer_string [docs]class ConversationTokenBufferMemory(BaseChatMemory): """Buffer for storing conversation memory.""" human_prefix: str = "Human" ai_prefix: str = "AI" llm: BaseLanguageModel memory_key: str = "history" max_token_limit: int = 2000 @property def buffer(self) -> List[BaseMessage]: """String buffer of memory.""" return self.chat_memory.messages @property def memory_variables(self) -> List[str]: """Will always return list of memory variables. :meta private: """ return [self.memory_key] [docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]: """Return history buffer.""" buffer: Any = self.buffer if self.return_messages: final_buffer: Any = buffer else: final_buffer = get_buffer_string( buffer, human_prefix=self.human_prefix, ai_prefix=self.ai_prefix, ) return {self.memory_key: final_buffer} [docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """Save context from this conversation to buffer. Pruned.""" super().save_context(inputs, outputs) # Prune buffer if it exceeds max token limit buffer = self.chat_memory.messages curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer) if curr_buffer_length > self.max_token_limit:
https://api.python.langchain.com/en/latest/_modules/langchain/memory/token_buffer.html
bf4beff084a7-1
if curr_buffer_length > self.max_token_limit: pruned_memory = [] while curr_buffer_length > self.max_token_limit: pruned_memory.append(buffer.pop(0)) curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer)
https://api.python.langchain.com/en/latest/_modules/langchain/memory/token_buffer.html
7e95e5dfa03d-0
Source code for langchain.memory.utils from typing import Any, Dict, List from langchain.schema import get_buffer_string # noqa: 401 [docs]def get_prompt_input_key(inputs: Dict[str, Any], memory_variables: List[str]) -> str: """ Get the prompt input key. Args: inputs: Dict[str, Any] memory_variables: List[str] Returns: A prompt input key. """ # "stop" is a special key that can be passed as input but is not used to # format the prompt. prompt_input_keys = list(set(inputs).difference(memory_variables + ["stop"])) if len(prompt_input_keys) != 1: raise ValueError(f"One input key expected got {prompt_input_keys}") return prompt_input_keys[0]
https://api.python.langchain.com/en/latest/_modules/langchain/memory/utils.html
fbf5eaaf7946-0
Source code for langchain.memory.kg from typing import Any, Dict, List, Type, Union from pydantic import Field from langchain.base_language import BaseLanguageModel from langchain.chains.llm import LLMChain from langchain.graphs import NetworkxEntityGraph from langchain.graphs.networkx_graph import KnowledgeTriple, get_entities, parse_triples from langchain.memory.chat_memory import BaseChatMemory from langchain.memory.prompt import ( ENTITY_EXTRACTION_PROMPT, KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT, ) from langchain.memory.utils import get_prompt_input_key from langchain.prompts.base import BasePromptTemplate from langchain.schema import ( BaseMessage, SystemMessage, get_buffer_string, ) [docs]class ConversationKGMemory(BaseChatMemory): """Knowledge graph memory for storing conversation memory. Integrates with external knowledge graph to store and retrieve information about knowledge triples in the conversation. """ k: int = 2 human_prefix: str = "Human" ai_prefix: str = "AI" kg: NetworkxEntityGraph = Field(default_factory=NetworkxEntityGraph) knowledge_extraction_prompt: BasePromptTemplate = KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT entity_extraction_prompt: BasePromptTemplate = ENTITY_EXTRACTION_PROMPT llm: BaseLanguageModel summary_message_cls: Type[BaseMessage] = SystemMessage """Number of previous utterances to include in the context.""" memory_key: str = "history" #: :meta private: [docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]: """Return history buffer.""" entities = self._get_current_entities(inputs) summary_strings = []
https://api.python.langchain.com/en/latest/_modules/langchain/memory/kg.html
fbf5eaaf7946-1
entities = self._get_current_entities(inputs) summary_strings = [] for entity in entities: knowledge = self.kg.get_entity_knowledge(entity) if knowledge: summary = f"On {entity}: {'. '.join(knowledge)}." summary_strings.append(summary) context: Union[str, List] if not summary_strings: context = [] if self.return_messages else "" elif self.return_messages: context = [ self.summary_message_cls(content=text) for text in summary_strings ] else: context = "\n".join(summary_strings) return {self.memory_key: context} @property def memory_variables(self) -> List[str]: """Will always return list of memory variables. :meta private: """ return [self.memory_key] def _get_prompt_input_key(self, inputs: Dict[str, Any]) -> str: """Get the input key for the prompt.""" if self.input_key is None: return get_prompt_input_key(inputs, self.memory_variables) return self.input_key def _get_prompt_output_key(self, outputs: Dict[str, Any]) -> str: """Get the output key for the prompt.""" if self.output_key is None: if len(outputs) != 1: raise ValueError(f"One output key expected, got {outputs.keys()}") return list(outputs.keys())[0] return self.output_key [docs] def get_current_entities(self, input_string: str) -> List[str]: chain = LLMChain(llm=self.llm, prompt=self.entity_extraction_prompt) buffer_string = get_buffer_string( self.chat_memory.messages[-self.k * 2 :], human_prefix=self.human_prefix,
https://api.python.langchain.com/en/latest/_modules/langchain/memory/kg.html
fbf5eaaf7946-2
human_prefix=self.human_prefix, ai_prefix=self.ai_prefix, ) output = chain.predict( history=buffer_string, input=input_string, ) return get_entities(output) def _get_current_entities(self, inputs: Dict[str, Any]) -> List[str]: """Get the current entities in the conversation.""" prompt_input_key = self._get_prompt_input_key(inputs) return self.get_current_entities(inputs[prompt_input_key]) [docs] def get_knowledge_triplets(self, input_string: str) -> List[KnowledgeTriple]: chain = LLMChain(llm=self.llm, prompt=self.knowledge_extraction_prompt) buffer_string = get_buffer_string( self.chat_memory.messages[-self.k * 2 :], human_prefix=self.human_prefix, ai_prefix=self.ai_prefix, ) output = chain.predict( history=buffer_string, input=input_string, verbose=True, ) knowledge = parse_triples(output) return knowledge def _get_and_update_kg(self, inputs: Dict[str, Any]) -> None: """Get and update knowledge graph from the conversation history.""" prompt_input_key = self._get_prompt_input_key(inputs) knowledge = self.get_knowledge_triplets(inputs[prompt_input_key]) for triple in knowledge: self.kg.add_triple(triple) [docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """Save context from this conversation to buffer.""" super().save_context(inputs, outputs) self._get_and_update_kg(inputs) [docs] def clear(self) -> None: """Clear memory contents."""
https://api.python.langchain.com/en/latest/_modules/langchain/memory/kg.html
fbf5eaaf7946-3
[docs] def clear(self) -> None: """Clear memory contents.""" super().clear() self.kg.clear()
https://api.python.langchain.com/en/latest/_modules/langchain/memory/kg.html
619ef2503ecd-0
Source code for langchain.memory.vectorstore """Class for a VectorStore-backed memory object.""" from typing import Any, Dict, List, Optional, Union from pydantic import Field from langchain.memory.chat_memory import BaseMemory from langchain.memory.utils import get_prompt_input_key from langchain.schema import Document from langchain.vectorstores.base import VectorStoreRetriever [docs]class VectorStoreRetrieverMemory(BaseMemory): """Class for a VectorStore-backed memory object.""" retriever: VectorStoreRetriever = Field(exclude=True) """VectorStoreRetriever object to connect to.""" memory_key: str = "history" #: :meta private: """Key name to locate the memories in the result of load_memory_variables.""" input_key: Optional[str] = None """Key name to index the inputs to load_memory_variables.""" return_docs: bool = False """Whether or not to return the result of querying the database directly.""" @property def memory_variables(self) -> List[str]: """The list of keys emitted from the load_memory_variables method.""" return [self.memory_key] def _get_prompt_input_key(self, inputs: Dict[str, Any]) -> str: """Get the input key for the prompt.""" if self.input_key is None: return get_prompt_input_key(inputs, self.memory_variables) return self.input_key [docs] def load_memory_variables( self, inputs: Dict[str, Any] ) -> Dict[str, Union[List[Document], str]]: """Return history buffer.""" input_key = self._get_prompt_input_key(inputs) query = inputs[input_key] docs = self.retriever.get_relevant_documents(query)
https://api.python.langchain.com/en/latest/_modules/langchain/memory/vectorstore.html
619ef2503ecd-1
docs = self.retriever.get_relevant_documents(query) result: Union[List[Document], str] if not self.return_docs: result = "\n".join([doc.page_content for doc in docs]) else: result = docs return {self.memory_key: result} def _form_documents( self, inputs: Dict[str, Any], outputs: Dict[str, str] ) -> List[Document]: """Format context from this conversation to buffer.""" # Each document should only include the current turn, not the chat history filtered_inputs = {k: v for k, v in inputs.items() if k != self.memory_key} texts = [ f"{k}: {v}" for k, v in list(filtered_inputs.items()) + list(outputs.items()) ] page_content = "\n".join(texts) return [Document(page_content=page_content)] [docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """Save context from this conversation to buffer.""" documents = self._form_documents(inputs, outputs) self.retriever.add_documents(documents) [docs] def clear(self) -> None: """Nothing to clear."""
https://api.python.langchain.com/en/latest/_modules/langchain/memory/vectorstore.html
8ff0dcd8ac33-0
Source code for langchain.memory.buffer_window from typing import Any, Dict, List from langchain.memory.chat_memory import BaseChatMemory from langchain.schema import BaseMessage, get_buffer_string [docs]class ConversationBufferWindowMemory(BaseChatMemory): """Buffer for storing conversation memory.""" human_prefix: str = "Human" ai_prefix: str = "AI" memory_key: str = "history" #: :meta private: k: int = 5 @property def buffer(self) -> List[BaseMessage]: """String buffer of memory.""" return self.chat_memory.messages @property def memory_variables(self) -> List[str]: """Will always return list of memory variables. :meta private: """ return [self.memory_key] [docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]: """Return history buffer.""" buffer: Any = self.buffer[-self.k * 2 :] if self.k > 0 else [] if not self.return_messages: buffer = get_buffer_string( buffer, human_prefix=self.human_prefix, ai_prefix=self.ai_prefix, ) return {self.memory_key: buffer}
https://api.python.langchain.com/en/latest/_modules/langchain/memory/buffer_window.html
95126a7ffd6c-0
Source code for langchain.memory.readonly from typing import Any, Dict, List from langchain.schema import BaseMemory [docs]class ReadOnlySharedMemory(BaseMemory): """A memory wrapper that is read-only and cannot be changed.""" memory: BaseMemory @property def memory_variables(self) -> List[str]: """Return memory variables.""" return self.memory.memory_variables [docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]: """Load memory variables from memory.""" return self.memory.load_memory_variables(inputs) [docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """Nothing should be saved or changed""" pass [docs] def clear(self) -> None: """Nothing to clear, got a memory like a vault.""" pass
https://api.python.langchain.com/en/latest/_modules/langchain/memory/readonly.html
3342730a0381-0
Source code for langchain.memory.summary from __future__ import annotations from typing import Any, Dict, List, Type from pydantic import BaseModel, root_validator from langchain.base_language import BaseLanguageModel from langchain.chains.llm import LLMChain from langchain.memory.chat_memory import BaseChatMemory from langchain.memory.prompt import SUMMARY_PROMPT from langchain.prompts.base import BasePromptTemplate from langchain.schema import ( BaseChatMessageHistory, BaseMessage, SystemMessage, get_buffer_string, ) [docs]class SummarizerMixin(BaseModel): human_prefix: str = "Human" ai_prefix: str = "AI" llm: BaseLanguageModel prompt: BasePromptTemplate = SUMMARY_PROMPT summary_message_cls: Type[BaseMessage] = SystemMessage [docs] def predict_new_summary( self, messages: List[BaseMessage], existing_summary: str ) -> str: new_lines = get_buffer_string( messages, human_prefix=self.human_prefix, ai_prefix=self.ai_prefix, ) chain = LLMChain(llm=self.llm, prompt=self.prompt) return chain.predict(summary=existing_summary, new_lines=new_lines) [docs]class ConversationSummaryMemory(BaseChatMemory, SummarizerMixin): """Conversation summarizer to memory.""" buffer: str = "" memory_key: str = "history" #: :meta private: [docs] @classmethod def from_messages( cls, llm: BaseLanguageModel, chat_memory: BaseChatMessageHistory, *, summarize_step: int = 2, **kwargs: Any, ) -> ConversationSummaryMemory:
https://api.python.langchain.com/en/latest/_modules/langchain/memory/summary.html
3342730a0381-1
**kwargs: Any, ) -> ConversationSummaryMemory: obj = cls(llm=llm, chat_memory=chat_memory, **kwargs) for i in range(0, len(obj.chat_memory.messages), summarize_step): obj.buffer = obj.predict_new_summary( obj.chat_memory.messages[i : i + summarize_step], obj.buffer ) return obj @property def memory_variables(self) -> List[str]: """Will always return list of memory variables. :meta private: """ return [self.memory_key] [docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]: """Return history buffer.""" if self.return_messages: buffer: Any = [self.summary_message_cls(content=self.buffer)] else: buffer = self.buffer return {self.memory_key: buffer} [docs] @root_validator() def validate_prompt_input_variables(cls, values: Dict) -> Dict: """Validate that prompt input variables are consistent.""" prompt_variables = values["prompt"].input_variables expected_keys = {"summary", "new_lines"} if expected_keys != set(prompt_variables): raise ValueError( "Got unexpected prompt input variables. The prompt expects " f"{prompt_variables}, but it should have {expected_keys}." ) return values [docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """Save context from this conversation to buffer.""" super().save_context(inputs, outputs) self.buffer = self.predict_new_summary( self.chat_memory.messages[-2:], self.buffer ) [docs] def clear(self) -> None:
https://api.python.langchain.com/en/latest/_modules/langchain/memory/summary.html
3342730a0381-2
) [docs] def clear(self) -> None: """Clear memory contents.""" super().clear() self.buffer = ""
https://api.python.langchain.com/en/latest/_modules/langchain/memory/summary.html
3fc3e29f076b-0
Source code for langchain.memory.entity import logging from abc import ABC, abstractmethod from itertools import islice from typing import Any, Dict, Iterable, List, Optional from pydantic import BaseModel, Field from langchain.base_language import BaseLanguageModel from langchain.chains.llm import LLMChain from langchain.memory.chat_memory import BaseChatMemory from langchain.memory.prompt import ( ENTITY_EXTRACTION_PROMPT, ENTITY_SUMMARIZATION_PROMPT, ) from langchain.memory.utils import get_prompt_input_key from langchain.prompts.base import BasePromptTemplate from langchain.schema import BaseMessage, get_buffer_string logger = logging.getLogger(__name__) [docs]class BaseEntityStore(BaseModel, ABC): [docs] @abstractmethod def get(self, key: str, default: Optional[str] = None) -> Optional[str]: """Get entity value from store.""" pass [docs] @abstractmethod def set(self, key: str, value: Optional[str]) -> None: """Set entity value in store.""" pass [docs] @abstractmethod def delete(self, key: str) -> None: """Delete entity value from store.""" pass [docs] @abstractmethod def exists(self, key: str) -> bool: """Check if entity exists in store.""" pass [docs] @abstractmethod def clear(self) -> None: """Delete all entities from store.""" pass [docs]class InMemoryEntityStore(BaseEntityStore): """Basic in-memory entity store.""" store: Dict[str, Optional[str]] = {} [docs] def get(self, key: str, default: Optional[str] = None) -> Optional[str]: return self.store.get(key, default)
https://api.python.langchain.com/en/latest/_modules/langchain/memory/entity.html
3fc3e29f076b-1
return self.store.get(key, default) [docs] def set(self, key: str, value: Optional[str]) -> None: self.store[key] = value [docs] def delete(self, key: str) -> None: del self.store[key] [docs] def exists(self, key: str) -> bool: return key in self.store [docs] def clear(self) -> None: return self.store.clear() [docs]class RedisEntityStore(BaseEntityStore): """Redis-backed Entity store. Entities get a TTL of 1 day by default, and that TTL is extended by 3 days every time the entity is read back. """ redis_client: Any session_id: str = "default" key_prefix: str = "memory_store" ttl: Optional[int] = 60 * 60 * 24 recall_ttl: Optional[int] = 60 * 60 * 24 * 3 def __init__( self, session_id: str = "default", url: str = "redis://localhost:6379/0", key_prefix: str = "memory_store", ttl: Optional[int] = 60 * 60 * 24, recall_ttl: Optional[int] = 60 * 60 * 24 * 3, *args: Any, **kwargs: Any, ): try: import redis except ImportError: raise ImportError( "Could not import redis python package. " "Please install it with `pip install redis`." ) super().__init__(*args, **kwargs) try: self.redis_client = redis.Redis.from_url(url=url, decode_responses=True)
https://api.python.langchain.com/en/latest/_modules/langchain/memory/entity.html
3fc3e29f076b-2
self.redis_client = redis.Redis.from_url(url=url, decode_responses=True) except redis.exceptions.ConnectionError as error: logger.error(error) self.session_id = session_id self.key_prefix = key_prefix self.ttl = ttl self.recall_ttl = recall_ttl or ttl @property def full_key_prefix(self) -> str: return f"{self.key_prefix}:{self.session_id}" [docs] def get(self, key: str, default: Optional[str] = None) -> Optional[str]: res = ( self.redis_client.getex(f"{self.full_key_prefix}:{key}", ex=self.recall_ttl) or default or "" ) logger.debug(f"REDIS MEM get '{self.full_key_prefix}:{key}': '{res}'") return res [docs] def set(self, key: str, value: Optional[str]) -> None: if not value: return self.delete(key) self.redis_client.set(f"{self.full_key_prefix}:{key}", value, ex=self.ttl) logger.debug( f"REDIS MEM set '{self.full_key_prefix}:{key}': '{value}' EX {self.ttl}" ) [docs] def delete(self, key: str) -> None: self.redis_client.delete(f"{self.full_key_prefix}:{key}") [docs] def exists(self, key: str) -> bool: return self.redis_client.exists(f"{self.full_key_prefix}:{key}") == 1 [docs] def clear(self) -> None: # iterate a list in batches of size batch_size def batched(iterable: Iterable[Any], batch_size: int) -> Iterable[Any]: iterator = iter(iterable)
https://api.python.langchain.com/en/latest/_modules/langchain/memory/entity.html
3fc3e29f076b-3
iterator = iter(iterable) while batch := list(islice(iterator, batch_size)): yield batch for keybatch in batched( self.redis_client.scan_iter(f"{self.full_key_prefix}:*"), 500 ): self.redis_client.delete(*keybatch) [docs]class SQLiteEntityStore(BaseEntityStore): """SQLite-backed Entity store""" session_id: str = "default" table_name: str = "memory_store" def __init__( self, session_id: str = "default", db_file: str = "entities.db", table_name: str = "memory_store", *args: Any, **kwargs: Any, ): try: import sqlite3 except ImportError: raise ImportError( "Could not import sqlite3 python package. " "Please install it with `pip install sqlite3`." ) super().__init__(*args, **kwargs) self.conn = sqlite3.connect(db_file) self.session_id = session_id self.table_name = table_name self._create_table_if_not_exists() @property def full_table_name(self) -> str: return f"{self.table_name}_{self.session_id}" def _create_table_if_not_exists(self) -> None: create_table_query = f""" CREATE TABLE IF NOT EXISTS {self.full_table_name} ( key TEXT PRIMARY KEY, value TEXT ) """ with self.conn: self.conn.execute(create_table_query) [docs] def get(self, key: str, default: Optional[str] = None) -> Optional[str]: query = f""" SELECT value
https://api.python.langchain.com/en/latest/_modules/langchain/memory/entity.html
3fc3e29f076b-4
query = f""" SELECT value FROM {self.full_table_name} WHERE key = ? """ cursor = self.conn.execute(query, (key,)) result = cursor.fetchone() if result is not None: value = result[0] return value return default [docs] def set(self, key: str, value: Optional[str]) -> None: if not value: return self.delete(key) query = f""" INSERT OR REPLACE INTO {self.full_table_name} (key, value) VALUES (?, ?) """ with self.conn: self.conn.execute(query, (key, value)) [docs] def delete(self, key: str) -> None: query = f""" DELETE FROM {self.full_table_name} WHERE key = ? """ with self.conn: self.conn.execute(query, (key,)) [docs] def exists(self, key: str) -> bool: query = f""" SELECT 1 FROM {self.full_table_name} WHERE key = ? LIMIT 1 """ cursor = self.conn.execute(query, (key,)) result = cursor.fetchone() return result is not None [docs] def clear(self) -> None: query = f""" DELETE FROM {self.full_table_name} """ with self.conn: self.conn.execute(query) [docs]class ConversationEntityMemory(BaseChatMemory): """Entity extractor & summarizer memory. Extracts named entities from the recent chat history and generates summaries. With a swapable entity store, persisting entities across conversations.
https://api.python.langchain.com/en/latest/_modules/langchain/memory/entity.html
3fc3e29f076b-5
With a swapable entity store, persisting entities across conversations. Defaults to an in-memory entity store, and can be swapped out for a Redis, SQLite, or other entity store. """ human_prefix: str = "Human" ai_prefix: str = "AI" llm: BaseLanguageModel entity_extraction_prompt: BasePromptTemplate = ENTITY_EXTRACTION_PROMPT entity_summarization_prompt: BasePromptTemplate = ENTITY_SUMMARIZATION_PROMPT # Cache of recently detected entity names, if any # It is updated when load_memory_variables is called: entity_cache: List[str] = [] # Number of recent message pairs to consider when updating entities: k: int = 3 chat_history_key: str = "history" # Store to manage entity-related data: entity_store: BaseEntityStore = Field(default_factory=InMemoryEntityStore) @property def buffer(self) -> List[BaseMessage]: """Access chat memory messages.""" return self.chat_memory.messages @property def memory_variables(self) -> List[str]: """Will always return list of memory variables. :meta private: """ return ["entities", self.chat_history_key] [docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]: """ Returns chat history and all generated entities with summaries if available, and updates or clears the recent entity cache. New entity name can be found when calling this method, before the entity summaries are generated, so the entity cache values may be empty if no entity descriptions are generated yet. """ # Create an LLMChain for predicting entity names from the recent chat history:
https://api.python.langchain.com/en/latest/_modules/langchain/memory/entity.html
3fc3e29f076b-6
# Create an LLMChain for predicting entity names from the recent chat history: chain = LLMChain(llm=self.llm, prompt=self.entity_extraction_prompt) if self.input_key is None: prompt_input_key = get_prompt_input_key(inputs, self.memory_variables) else: prompt_input_key = self.input_key # Extract an arbitrary window of the last message pairs from # the chat history, where the hyperparameter k is the # number of message pairs: buffer_string = get_buffer_string( self.buffer[-self.k * 2 :], human_prefix=self.human_prefix, ai_prefix=self.ai_prefix, ) # Generates a comma-separated list of named entities, # e.g. "Jane, White House, UFO" # or "NONE" if no named entities are extracted: output = chain.predict( history=buffer_string, input=inputs[prompt_input_key], ) # If no named entities are extracted, assigns an empty list. if output.strip() == "NONE": entities = [] else: # Make a list of the extracted entities: entities = [w.strip() for w in output.split(",")] # Make a dictionary of entities with summary if exists: entity_summaries = {} for entity in entities: entity_summaries[entity] = self.entity_store.get(entity, "") # Replaces the entity name cache with the most recently discussed entities, # or if no entities were extracted, clears the cache: self.entity_cache = entities # Should we return as message objects or as a string? if self.return_messages: # Get last `k` pair of chat messages:
https://api.python.langchain.com/en/latest/_modules/langchain/memory/entity.html
3fc3e29f076b-7
if self.return_messages: # Get last `k` pair of chat messages: buffer: Any = self.buffer[-self.k * 2 :] else: # Reuse the string we made earlier: buffer = buffer_string return { self.chat_history_key: buffer, "entities": entity_summaries, } [docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """ Save context from this conversation history to the entity store. Generates a summary for each entity in the entity cache by prompting the model, and saves these summaries to the entity store. """ super().save_context(inputs, outputs) if self.input_key is None: prompt_input_key = get_prompt_input_key(inputs, self.memory_variables) else: prompt_input_key = self.input_key # Extract an arbitrary window of the last message pairs from # the chat history, where the hyperparameter k is the # number of message pairs: buffer_string = get_buffer_string( self.buffer[-self.k * 2 :], human_prefix=self.human_prefix, ai_prefix=self.ai_prefix, ) input_data = inputs[prompt_input_key] # Create an LLMChain for predicting entity summarization from the context chain = LLMChain(llm=self.llm, prompt=self.entity_summarization_prompt) # Generate new summaries for entities and save them in the entity store for entity in self.entity_cache: # Get existing summary if it exists existing_summary = self.entity_store.get(entity, "") output = chain.predict( summary=existing_summary, entity=entity, history=buffer_string,
https://api.python.langchain.com/en/latest/_modules/langchain/memory/entity.html
3fc3e29f076b-8
summary=existing_summary, entity=entity, history=buffer_string, input=input_data, ) # Save the updated summary to the entity store self.entity_store.set(entity, output.strip()) [docs] def clear(self) -> None: """Clear memory contents.""" self.chat_memory.clear() self.entity_cache.clear() self.entity_store.clear()
https://api.python.langchain.com/en/latest/_modules/langchain/memory/entity.html
7d21cac494e6-0
Source code for langchain.memory.combined import warnings from typing import Any, Dict, List, Set from pydantic import validator from langchain.memory.chat_memory import BaseChatMemory from langchain.schema import BaseMemory [docs]class CombinedMemory(BaseMemory): """Class for combining multiple memories' data together.""" memories: List[BaseMemory] """For tracking all the memories that should be accessed.""" [docs] @validator("memories") def check_repeated_memory_variable( cls, value: List[BaseMemory] ) -> List[BaseMemory]: all_variables: Set[str] = set() for val in value: overlap = all_variables.intersection(val.memory_variables) if overlap: raise ValueError( f"The same variables {overlap} are found in multiple" "memory object, which is not allowed by CombinedMemory." ) all_variables |= set(val.memory_variables) return value [docs] @validator("memories") def check_input_key(cls, value: List[BaseMemory]) -> List[BaseMemory]: """Check that if memories are of type BaseChatMemory that input keys exist.""" for val in value: if isinstance(val, BaseChatMemory): if val.input_key is None: warnings.warn( "When using CombinedMemory, " "input keys should be so the input is known. " f" Was not set on {val}" ) return value @property def memory_variables(self) -> List[str]: """All the memory variables that this instance provides.""" """Collected from the all the linked memories.""" memory_variables = [] for memory in self.memories:
https://api.python.langchain.com/en/latest/_modules/langchain/memory/combined.html
7d21cac494e6-1
memory_variables = [] for memory in self.memories: memory_variables.extend(memory.memory_variables) return memory_variables [docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]: """Load all vars from sub-memories.""" memory_data: Dict[str, Any] = {} # Collect vars from all sub-memories for memory in self.memories: data = memory.load_memory_variables(inputs) memory_data = { **memory_data, **data, } return memory_data [docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """Save context from this session for every memory.""" # Save context for all sub-memories for memory in self.memories: memory.save_context(inputs, outputs) [docs] def clear(self) -> None: """Clear context from this session for every memory.""" for memory in self.memories: memory.clear()
https://api.python.langchain.com/en/latest/_modules/langchain/memory/combined.html
8298e24ffa52-0
Source code for langchain.memory.chat_memory from abc import ABC from typing import Any, Dict, Optional, Tuple from pydantic import Field from langchain.memory.chat_message_histories.in_memory import ChatMessageHistory from langchain.memory.utils import get_prompt_input_key from langchain.schema import BaseChatMessageHistory, BaseMemory [docs]class BaseChatMemory(BaseMemory, ABC): chat_memory: BaseChatMessageHistory = Field(default_factory=ChatMessageHistory) output_key: Optional[str] = None input_key: Optional[str] = None return_messages: bool = False def _get_input_output( self, inputs: Dict[str, Any], outputs: Dict[str, str] ) -> Tuple[str, str]: if self.input_key is None: prompt_input_key = get_prompt_input_key(inputs, self.memory_variables) else: prompt_input_key = self.input_key if self.output_key is None: if len(outputs) != 1: raise ValueError(f"One output key expected, got {outputs.keys()}") output_key = list(outputs.keys())[0] else: output_key = self.output_key return inputs[prompt_input_key], outputs[output_key] [docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """Save context from this conversation to buffer.""" input_str, output_str = self._get_input_output(inputs, outputs) self.chat_memory.add_user_message(input_str) self.chat_memory.add_ai_message(output_str) [docs] def clear(self) -> None: """Clear memory contents.""" self.chat_memory.clear()
https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_memory.html
81daa7ac24e5-0
Source code for langchain.memory.buffer from typing import Any, Dict, List, Optional from pydantic import root_validator from langchain.memory.chat_memory import BaseChatMemory, BaseMemory from langchain.memory.utils import get_prompt_input_key from langchain.schema import get_buffer_string [docs]class ConversationBufferMemory(BaseChatMemory): """Buffer for storing conversation memory.""" human_prefix: str = "Human" ai_prefix: str = "AI" memory_key: str = "history" #: :meta private: @property def buffer(self) -> Any: """String buffer of memory.""" if self.return_messages: return self.chat_memory.messages else: return get_buffer_string( self.chat_memory.messages, human_prefix=self.human_prefix, ai_prefix=self.ai_prefix, ) @property def memory_variables(self) -> List[str]: """Will always return list of memory variables. :meta private: """ return [self.memory_key] [docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]: """Return history buffer.""" return {self.memory_key: self.buffer} [docs]class ConversationStringBufferMemory(BaseMemory): """Buffer for storing conversation memory.""" human_prefix: str = "Human" ai_prefix: str = "AI" """Prefix to use for AI generated responses.""" buffer: str = "" output_key: Optional[str] = None input_key: Optional[str] = None memory_key: str = "history" #: :meta private: [docs] @root_validator() def validate_chains(cls, values: Dict) -> Dict:
https://api.python.langchain.com/en/latest/_modules/langchain/memory/buffer.html
81daa7ac24e5-1
def validate_chains(cls, values: Dict) -> Dict: """Validate that return messages is not True.""" if values.get("return_messages", False): raise ValueError( "return_messages must be False for ConversationStringBufferMemory" ) return values @property def memory_variables(self) -> List[str]: """Will always return list of memory variables. :meta private: """ return [self.memory_key] [docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]: """Return history buffer.""" return {self.memory_key: self.buffer} [docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """Save context from this conversation to buffer.""" if self.input_key is None: prompt_input_key = get_prompt_input_key(inputs, self.memory_variables) else: prompt_input_key = self.input_key if self.output_key is None: if len(outputs) != 1: raise ValueError(f"One output key expected, got {outputs.keys()}") output_key = list(outputs.keys())[0] else: output_key = self.output_key human = f"{self.human_prefix}: " + inputs[prompt_input_key] ai = f"{self.ai_prefix}: " + outputs[output_key] self.buffer += "\n" + "\n".join([human, ai]) [docs] def clear(self) -> None: """Clear memory contents.""" self.buffer = ""
https://api.python.langchain.com/en/latest/_modules/langchain/memory/buffer.html
3da74169c441-0
Source code for langchain.memory.simple from typing import Any, Dict, List from langchain.schema import BaseMemory [docs]class SimpleMemory(BaseMemory): """Simple memory for storing context or other bits of information that shouldn't ever change between prompts. """ memories: Dict[str, Any] = dict() @property def memory_variables(self) -> List[str]: return list(self.memories.keys()) [docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]: return self.memories [docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """Nothing should be saved or changed, my memory is set in stone.""" pass [docs] def clear(self) -> None: """Nothing to clear, got a memory like a vault.""" pass
https://api.python.langchain.com/en/latest/_modules/langchain/memory/simple.html
302bc32b3cf8-0
Source code for langchain.memory.motorhead_memory from typing import Any, Dict, List, Optional import requests from langchain.memory.chat_memory import BaseChatMemory from langchain.schema import get_buffer_string MANAGED_URL = "https://api.getmetal.io/v1/motorhead" # LOCAL_URL = "http://localhost:8080" [docs]class MotorheadMemory(BaseChatMemory): url: str = MANAGED_URL timeout = 3000 memory_key = "history" session_id: str context: Optional[str] = None # Managed Params api_key: Optional[str] = None client_id: Optional[str] = None def __get_headers(self) -> Dict[str, str]: is_managed = self.url == MANAGED_URL headers = { "Content-Type": "application/json", } if is_managed and not (self.api_key and self.client_id): raise ValueError( """ You must provide an API key or a client ID to use the managed version of Motorhead. Visit https://getmetal.io for more information. """ ) if is_managed and self.api_key and self.client_id: headers["x-metal-api-key"] = self.api_key headers["x-metal-client-id"] = self.client_id return headers [docs] async def init(self) -> None: res = requests.get( f"{self.url}/sessions/{self.session_id}/memory", timeout=self.timeout, headers=self.__get_headers(), ) res_data = res.json() res_data = res_data.get("data", res_data) # Handle Managed Version messages = res_data.get("messages", [])
https://api.python.langchain.com/en/latest/_modules/langchain/memory/motorhead_memory.html
302bc32b3cf8-1
messages = res_data.get("messages", []) context = res_data.get("context", "NONE") for message in reversed(messages): if message["role"] == "AI": self.chat_memory.add_ai_message(message["content"]) else: self.chat_memory.add_user_message(message["content"]) if context and context != "NONE": self.context = context [docs] def load_memory_variables(self, values: Dict[str, Any]) -> Dict[str, Any]: if self.return_messages: return {self.memory_key: self.chat_memory.messages} else: return {self.memory_key: get_buffer_string(self.chat_memory.messages)} @property def memory_variables(self) -> List[str]: return [self.memory_key] [docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: input_str, output_str = self._get_input_output(inputs, outputs) requests.post( f"{self.url}/sessions/{self.session_id}/memory", timeout=self.timeout, json={ "messages": [ {"role": "Human", "content": f"{input_str}"}, {"role": "AI", "content": f"{output_str}"}, ] }, headers=self.__get_headers(), ) super().save_context(inputs, outputs) [docs] def delete_session(self) -> None: """Delete a session""" requests.delete(f"{self.url}/sessions/{self.session_id}/memory")
https://api.python.langchain.com/en/latest/_modules/langchain/memory/motorhead_memory.html
ee985eb740d1-0
Source code for langchain.memory.summary_buffer from typing import Any, Dict, List from pydantic import root_validator from langchain.memory.chat_memory import BaseChatMemory from langchain.memory.summary import SummarizerMixin from langchain.schema import BaseMessage, get_buffer_string [docs]class ConversationSummaryBufferMemory(BaseChatMemory, SummarizerMixin): """Buffer with summarizer for storing conversation memory.""" max_token_limit: int = 2000 moving_summary_buffer: str = "" memory_key: str = "history" @property def buffer(self) -> List[BaseMessage]: return self.chat_memory.messages @property def memory_variables(self) -> List[str]: """Will always return list of memory variables. :meta private: """ return [self.memory_key] [docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]: """Return history buffer.""" buffer = self.buffer if self.moving_summary_buffer != "": first_messages: List[BaseMessage] = [ self.summary_message_cls(content=self.moving_summary_buffer) ] buffer = first_messages + buffer if self.return_messages: final_buffer: Any = buffer else: final_buffer = get_buffer_string( buffer, human_prefix=self.human_prefix, ai_prefix=self.ai_prefix ) return {self.memory_key: final_buffer} [docs] @root_validator() def validate_prompt_input_variables(cls, values: Dict) -> Dict: """Validate that prompt input variables are consistent.""" prompt_variables = values["prompt"].input_variables expected_keys = {"summary", "new_lines"} if expected_keys != set(prompt_variables):
https://api.python.langchain.com/en/latest/_modules/langchain/memory/summary_buffer.html
ee985eb740d1-1
if expected_keys != set(prompt_variables): raise ValueError( "Got unexpected prompt input variables. The prompt expects " f"{prompt_variables}, but it should have {expected_keys}." ) return values [docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """Save context from this conversation to buffer.""" super().save_context(inputs, outputs) self.prune() [docs] def prune(self) -> None: """Prune buffer if it exceeds max token limit""" buffer = self.chat_memory.messages curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer) if curr_buffer_length > self.max_token_limit: pruned_memory = [] while curr_buffer_length > self.max_token_limit: pruned_memory.append(buffer.pop(0)) curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer) self.moving_summary_buffer = self.predict_new_summary( pruned_memory, self.moving_summary_buffer ) [docs] def clear(self) -> None: """Clear memory contents.""" super().clear() self.moving_summary_buffer = ""
https://api.python.langchain.com/en/latest/_modules/langchain/memory/summary_buffer.html
1cfdff909e36-0
Source code for langchain.memory.chat_message_histories.file import json import logging from pathlib import Path from typing import List from langchain.schema import ( BaseChatMessageHistory, BaseMessage, messages_from_dict, messages_to_dict, ) logger = logging.getLogger(__name__) [docs]class FileChatMessageHistory(BaseChatMessageHistory): """ Chat message history that stores history in a local file. Args: file_path: path of the local file to store the messages. """ def __init__(self, file_path: str): self.file_path = Path(file_path) if not self.file_path.exists(): self.file_path.touch() self.file_path.write_text(json.dumps([])) @property def messages(self) -> List[BaseMessage]: # type: ignore """Retrieve the messages from the local file""" items = json.loads(self.file_path.read_text()) messages = messages_from_dict(items) return messages [docs] def add_message(self, message: BaseMessage) -> None: """Append the message to the record in the local file""" messages = messages_to_dict(self.messages) messages.append(messages_to_dict([message])[0]) self.file_path.write_text(json.dumps(messages)) [docs] def clear(self) -> None: """Clear session memory from the local file""" self.file_path.write_text(json.dumps([]))
https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/file.html
d4bef4283b4a-0
Source code for langchain.memory.chat_message_histories.firestore """Firestore Chat Message History.""" from __future__ import annotations import logging from typing import TYPE_CHECKING, List, Optional from langchain.schema import ( BaseChatMessageHistory, BaseMessage, messages_from_dict, messages_to_dict, ) logger = logging.getLogger(__name__) if TYPE_CHECKING: from google.cloud.firestore import DocumentReference [docs]class FirestoreChatMessageHistory(BaseChatMessageHistory): """Chat history backed by Google Firestore.""" def __init__( self, collection_name: str, session_id: str, user_id: str, ): """ Initialize a new instance of the FirestoreChatMessageHistory class. :param collection_name: The name of the collection to use. :param session_id: The session ID for the chat.. :param user_id: The user ID for the chat. """ self.collection_name = collection_name self.session_id = session_id self.user_id = user_id self._document: Optional[DocumentReference] = None self.messages: List[BaseMessage] = [] self.prepare_firestore() [docs] def prepare_firestore(self) -> None: """Prepare the Firestore client. Use this function to make sure your database is ready. """ try: import firebase_admin from firebase_admin import firestore except ImportError: raise ImportError( "Could not import firebase-admin python package. " "Please install it with `pip install firebase-admin`." ) # For multiple instances, only initialize the app once. try: firebase_admin.get_app() except ValueError as e:
https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/firestore.html
d4bef4283b4a-1
try: firebase_admin.get_app() except ValueError as e: logger.debug("Initializing Firebase app: %s", e) firebase_admin.initialize_app() self.firestore_client = firestore.client() self._document = self.firestore_client.collection( self.collection_name ).document(self.session_id) self.load_messages() [docs] def load_messages(self) -> None: """Retrieve the messages from Firestore""" if not self._document: raise ValueError("Document not initialized") doc = self._document.get() if doc.exists: data = doc.to_dict() if "messages" in data and len(data["messages"]) > 0: self.messages = messages_from_dict(data["messages"]) [docs] def add_message(self, message: BaseMessage) -> None: self.messages.append(message) self.upsert_messages() [docs] def upsert_messages(self, new_message: Optional[BaseMessage] = None) -> None: """Update the Firestore document.""" if not self._document: raise ValueError("Document not initialized") self._document.set( { "id": self.session_id, "user_id": self.user_id, "messages": messages_to_dict(self.messages), } ) [docs] def clear(self) -> None: """Clear session memory from this memory and Firestore.""" self.messages = [] if self._document: self._document.delete()
https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/firestore.html
2b3dd4a37190-0
Source code for langchain.memory.chat_message_histories.cosmos_db """Azure CosmosDB Memory History.""" from __future__ import annotations import logging from types import TracebackType from typing import TYPE_CHECKING, Any, List, Optional, Type from langchain.schema import ( BaseChatMessageHistory, BaseMessage, messages_from_dict, messages_to_dict, ) logger = logging.getLogger(__name__) if TYPE_CHECKING: from azure.cosmos import ContainerProxy [docs]class CosmosDBChatMessageHistory(BaseChatMessageHistory): """Chat history backed by Azure CosmosDB.""" def __init__( self, cosmos_endpoint: str, cosmos_database: str, cosmos_container: str, session_id: str, user_id: str, credential: Any = None, connection_string: Optional[str] = None, ttl: Optional[int] = None, cosmos_client_kwargs: Optional[dict] = None, ): """ Initializes a new instance of the CosmosDBChatMessageHistory class. Make sure to call prepare_cosmos or use the context manager to make sure your database is ready. Either a credential or a connection string must be provided. :param cosmos_endpoint: The connection endpoint for the Azure Cosmos DB account. :param cosmos_database: The name of the database to use. :param cosmos_container: The name of the container to use. :param session_id: The session ID to use, can be overwritten while loading. :param user_id: The user ID to use, can be overwritten while loading. :param credential: The credential to use to authenticate to Azure Cosmos DB. :param connection_string: The connection string to use to authenticate.
https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/cosmos_db.html
2b3dd4a37190-1
:param connection_string: The connection string to use to authenticate. :param ttl: The time to live (in seconds) to use for documents in the container. :param cosmos_client_kwargs: Additional kwargs to pass to the CosmosClient. """ self.cosmos_endpoint = cosmos_endpoint self.cosmos_database = cosmos_database self.cosmos_container = cosmos_container self.credential = credential self.conn_string = connection_string self.session_id = session_id self.user_id = user_id self.ttl = ttl self.messages: List[BaseMessage] = [] try: from azure.cosmos import ( # pylint: disable=import-outside-toplevel # noqa: E501 CosmosClient, ) except ImportError as exc: raise ImportError( "You must install the azure-cosmos package to use the CosmosDBChatMessageHistory." # noqa: E501 ) from exc if self.credential: self._client = CosmosClient( url=self.cosmos_endpoint, credential=self.credential, **cosmos_client_kwargs or {}, ) elif self.conn_string: self._client = CosmosClient.from_connection_string( conn_str=self.conn_string, **cosmos_client_kwargs or {}, ) else: raise ValueError("Either a connection string or a credential must be set.") self._container: Optional[ContainerProxy] = None [docs] def prepare_cosmos(self) -> None: """Prepare the CosmosDB client. Use this function or the context manager to make sure your database is ready. """ try: from azure.cosmos import ( # pylint: disable=import-outside-toplevel # noqa: E501
https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/cosmos_db.html
2b3dd4a37190-2
PartitionKey, ) except ImportError as exc: raise ImportError( "You must install the azure-cosmos package to use the CosmosDBChatMessageHistory." # noqa: E501 ) from exc database = self._client.create_database_if_not_exists(self.cosmos_database) self._container = database.create_container_if_not_exists( self.cosmos_container, partition_key=PartitionKey("/user_id"), default_ttl=self.ttl, ) self.load_messages() def __enter__(self) -> "CosmosDBChatMessageHistory": """Context manager entry point.""" self._client.__enter__() self.prepare_cosmos() return self def __exit__( self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], traceback: Optional[TracebackType], ) -> None: """Context manager exit""" self.upsert_messages() self._client.__exit__(exc_type, exc_val, traceback) [docs] def load_messages(self) -> None: """Retrieve the messages from Cosmos""" if not self._container: raise ValueError("Container not initialized") try: from azure.cosmos.exceptions import ( # pylint: disable=import-outside-toplevel # noqa: E501 CosmosHttpResponseError, ) except ImportError as exc: raise ImportError( "You must install the azure-cosmos package to use the CosmosDBChatMessageHistory." # noqa: E501 ) from exc try: item = self._container.read_item( item=self.session_id, partition_key=self.user_id ) except CosmosHttpResponseError:
https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/cosmos_db.html
2b3dd4a37190-3
) except CosmosHttpResponseError: logger.info("no session found") return if "messages" in item and len(item["messages"]) > 0: self.messages = messages_from_dict(item["messages"]) [docs] def add_message(self, message: BaseMessage) -> None: """Add a self-created message to the store""" self.messages.append(message) self.upsert_messages() [docs] def upsert_messages(self) -> None: """Update the cosmosdb item.""" if not self._container: raise ValueError("Container not initialized") self._container.upsert_item( body={ "id": self.session_id, "user_id": self.user_id, "messages": messages_to_dict(self.messages), } ) [docs] def clear(self) -> None: """Clear session memory from this memory and cosmos.""" self.messages = [] if self._container: self._container.delete_item( item=self.session_id, partition_key=self.user_id )
https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/cosmos_db.html
ac9f76ee6754-0
Source code for langchain.memory.chat_message_histories.in_memory from typing import List from pydantic import BaseModel from langchain.schema import ( BaseChatMessageHistory, BaseMessage, ) [docs]class ChatMessageHistory(BaseChatMessageHistory, BaseModel): messages: List[BaseMessage] = [] [docs] def add_message(self, message: BaseMessage) -> None: """Add a self-created message to the store""" self.messages.append(message) [docs] def clear(self) -> None: self.messages = []
https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/in_memory.html
8b553c52c223-0
Source code for langchain.memory.chat_message_histories.sql import json import logging from typing import List from sqlalchemy import Column, Integer, Text, create_engine try: from sqlalchemy.orm import declarative_base except ImportError: from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import sessionmaker from langchain.schema import ( BaseChatMessageHistory, BaseMessage, _message_to_dict, messages_from_dict, ) logger = logging.getLogger(__name__) [docs]def create_message_model(table_name, DynamicBase): # type: ignore """ Create a message model for a given table name. Args: table_name: The name of the table to use. DynamicBase: The base class to use for the model. Returns: The model class. """ # Model decleared inside a function to have a dynamic table name class Message(DynamicBase): __tablename__ = table_name id = Column(Integer, primary_key=True) session_id = Column(Text) message = Column(Text) return Message [docs]class SQLChatMessageHistory(BaseChatMessageHistory): """Chat message history stored in an SQL database.""" def __init__( self, session_id: str, connection_string: str, table_name: str = "message_store", ): self.table_name = table_name self.connection_string = connection_string self.engine = create_engine(connection_string, echo=False) self._create_table_if_not_exists() self.session_id = session_id self.Session = sessionmaker(self.engine) def _create_table_if_not_exists(self) -> None: DynamicBase = declarative_base()
https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/sql.html
8b553c52c223-1
DynamicBase = declarative_base() self.Message = create_message_model(self.table_name, DynamicBase) # Create all does the check for us in case the table exists. DynamicBase.metadata.create_all(self.engine) @property def messages(self) -> List[BaseMessage]: # type: ignore """Retrieve all messages from db""" with self.Session() as session: result = session.query(self.Message).where( self.Message.session_id == self.session_id ) items = [json.loads(record.message) for record in result] messages = messages_from_dict(items) return messages [docs] def add_message(self, message: BaseMessage) -> None: """Append the message to the record in db""" with self.Session() as session: jsonstr = json.dumps(_message_to_dict(message)) session.add(self.Message(session_id=self.session_id, message=jsonstr)) session.commit() [docs] def clear(self) -> None: """Clear session memory from db""" with self.Session() as session: session.query(self.Message).filter( self.Message.session_id == self.session_id ).delete() session.commit()
https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/sql.html
dd8002e18135-0
Source code for langchain.memory.chat_message_histories.dynamodb import logging from typing import List, Optional from langchain.schema import ( BaseChatMessageHistory, BaseMessage, _message_to_dict, messages_from_dict, messages_to_dict, ) logger = logging.getLogger(__name__) [docs]class DynamoDBChatMessageHistory(BaseChatMessageHistory): """Chat message history that stores history in AWS DynamoDB. This class expects that a DynamoDB table with name `table_name` and a partition Key of `SessionId` is present. Args: table_name: name of the DynamoDB table session_id: arbitrary key that is used to store the messages of a single chat session. endpoint_url: URL of the AWS endpoint to connect to. This argument is optional and useful for test purposes, like using Localstack. If you plan to use AWS cloud service, you normally don't have to worry about setting the endpoint_url. """ def __init__( self, table_name: str, session_id: str, endpoint_url: Optional[str] = None ): import boto3 if endpoint_url: client = boto3.resource("dynamodb", endpoint_url=endpoint_url) else: client = boto3.resource("dynamodb") self.table = client.Table(table_name) self.session_id = session_id @property def messages(self) -> List[BaseMessage]: # type: ignore """Retrieve the messages from DynamoDB""" from botocore.exceptions import ClientError response = None try: response = self.table.get_item(Key={"SessionId": self.session_id}) except ClientError as error:
https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/dynamodb.html
dd8002e18135-1
except ClientError as error: if error.response["Error"]["Code"] == "ResourceNotFoundException": logger.warning("No record found with session id: %s", self.session_id) else: logger.error(error) if response and "Item" in response: items = response["Item"]["History"] else: items = [] messages = messages_from_dict(items) return messages [docs] def add_message(self, message: BaseMessage) -> None: """Append the message to the record in DynamoDB""" from botocore.exceptions import ClientError messages = messages_to_dict(self.messages) _message = _message_to_dict(message) messages.append(_message) try: self.table.put_item( Item={"SessionId": self.session_id, "History": messages} ) except ClientError as err: logger.error(err) [docs] def clear(self) -> None: """Clear session memory from DynamoDB""" from botocore.exceptions import ClientError try: self.table.delete_item(Key={"SessionId": self.session_id}) except ClientError as err: logger.error(err)
https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/dynamodb.html
a947e9d153c6-0
Source code for langchain.memory.chat_message_histories.zep from __future__ import annotations import logging from typing import TYPE_CHECKING, Dict, List, Optional from langchain.schema import ( AIMessage, BaseChatMessageHistory, BaseMessage, HumanMessage, ) if TYPE_CHECKING: from zep_python import Memory, MemorySearchResult, Message, NotFoundError logger = logging.getLogger(__name__) [docs]class ZepChatMessageHistory(BaseChatMessageHistory): """A ChatMessageHistory implementation that uses Zep as a backend. Recommended usage:: # Set up Zep Chat History zep_chat_history = ZepChatMessageHistory( session_id=session_id, url=ZEP_API_URL, api_key=<your_api_key>, ) # Use a standard ConversationBufferMemory to encapsulate the Zep chat history memory = ConversationBufferMemory( memory_key="chat_history", chat_memory=zep_chat_history ) Zep provides long-term conversation storage for LLM apps. The server stores, summarizes, embeds, indexes, and enriches conversational AI chat histories, and exposes them via simple, low-latency APIs. For server installation instructions and more, see: https://docs.getzep.com/deployment/quickstart/ This class is a thin wrapper around the zep-python package. Additional Zep functionality is exposed via the `zep_summary` and `zep_messages` properties. For more information on the zep-python package, see: https://github.com/getzep/zep-python """ def __init__( self, session_id: str, url: str = "http://localhost:8000",
https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/zep.html
a947e9d153c6-1
url: str = "http://localhost:8000", api_key: Optional[str] = None, ) -> None: try: from zep_python import ZepClient except ImportError: raise ValueError( "Could not import zep-python package. " "Please install it with `pip install zep-python`." ) self.zep_client = ZepClient(base_url=url, api_key=api_key) self.session_id = session_id @property def messages(self) -> List[BaseMessage]: # type: ignore """Retrieve messages from Zep memory""" zep_memory: Optional[Memory] = self._get_memory() if not zep_memory: return [] messages: List[BaseMessage] = [] # Extract summary, if present, and messages if zep_memory.summary: if len(zep_memory.summary.content) > 0: messages.append(HumanMessage(content=zep_memory.summary.content)) if zep_memory.messages: msg: Message for msg in zep_memory.messages: if msg.role == "ai": messages.append(AIMessage(content=msg.content)) else: messages.append(HumanMessage(content=msg.content)) return messages @property def zep_messages(self) -> List[Message]: """Retrieve summary from Zep memory""" zep_memory: Optional[Memory] = self._get_memory() if not zep_memory: return [] return zep_memory.messages @property def zep_summary(self) -> Optional[str]: """Retrieve summary from Zep memory""" zep_memory: Optional[Memory] = self._get_memory()
https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/zep.html
a947e9d153c6-2
zep_memory: Optional[Memory] = self._get_memory() if not zep_memory or not zep_memory.summary: return None return zep_memory.summary.content def _get_memory(self) -> Optional[Memory]: """Retrieve memory from Zep""" from zep_python import NotFoundError try: zep_memory: Memory = self.zep_client.get_memory(self.session_id) except NotFoundError: logger.warning( f"Session {self.session_id} not found in Zep. Returning None" ) return None return zep_memory [docs] def add_message(self, message: BaseMessage) -> None: """Append the message to the Zep memory history""" from zep_python import Memory, Message zep_message: Message if isinstance(message, HumanMessage): zep_message = Message(content=message.content, role="human") else: zep_message = Message(content=message.content, role="ai") zep_memory = Memory(messages=[zep_message]) self.zep_client.add_memory(self.session_id, zep_memory) [docs] def search( self, query: str, metadata: Optional[Dict] = None, limit: Optional[int] = None ) -> List[MemorySearchResult]: """Search Zep memory for messages matching the query""" from zep_python import MemorySearchPayload payload: MemorySearchPayload = MemorySearchPayload( text=query, metadata=metadata ) return self.zep_client.search_memory(self.session_id, payload, limit=limit) [docs] def clear(self) -> None: """Clear session memory from Zep. Note that Zep is long-term storage for memory
https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/zep.html