id
stringlengths 14
15
| text
stringlengths 49
2.47k
| source
stringlengths 61
166
|
|---|---|---|
7a73cff8e807-0
|
Source code for langchain.retrievers.chaindesk
from typing import Any, List, Optional
import aiohttp
import requests
from langchain.callbacks.manager import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain.schema import BaseRetriever, Document
[docs]class ChaindeskRetriever(BaseRetriever):
"""Retriever for the Chaindesk API."""
datastore_url: str
top_k: Optional[int]
api_key: Optional[str]
def __init__(
self,
datastore_url: str,
top_k: Optional[int] = None,
api_key: Optional[str] = None,
):
self.datastore_url = datastore_url
self.api_key = api_key
self.top_k = top_k
def _get_relevant_documents(
self,
query: str,
*,
run_manager: CallbackManagerForRetrieverRun,
**kwargs: Any,
) -> List[Document]:
response = requests.post(
self.datastore_url,
json={
"query": query,
**({"topK": self.top_k} if self.top_k is not None else {}),
},
headers={
"Content-Type": "application/json",
**(
{"Authorization": f"Bearer {self.api_key}"}
if self.api_key is not None
else {}
),
},
)
data = response.json()
return [
Document(
page_content=r["text"],
metadata={"source": r["source"], "score": r["score"]},
)
for r in data["results"]
]
|
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/chaindesk.html
|
7a73cff8e807-1
|
)
for r in data["results"]
]
async def _aget_relevant_documents(
self,
query: str,
*,
run_manager: AsyncCallbackManagerForRetrieverRun,
**kwargs: Any,
) -> List[Document]:
async with aiohttp.ClientSession() as session:
async with session.request(
"POST",
self.datastore_url,
json={
"query": query,
**({"topK": self.top_k} if self.top_k is not None else {}),
},
headers={
"Content-Type": "application/json",
**(
{"Authorization": f"Bearer {self.api_key}"}
if self.api_key is not None
else {}
),
},
) as response:
data = await response.json()
return [
Document(
page_content=r["text"],
metadata={"source": r["source"], "score": r["score"]},
)
for r in data["results"]
]
|
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/chaindesk.html
|
ba7b488118f3-0
|
Source code for langchain.retrievers.weaviate_hybrid_search
from __future__ import annotations
from typing import Any, Dict, List, Optional, cast
from uuid import uuid4
from pydantic import root_validator
from langchain.callbacks.manager import CallbackManagerForRetrieverRun
from langchain.docstore.document import Document
from langchain.schema import BaseRetriever
[docs]class WeaviateHybridSearchRetriever(BaseRetriever):
"""Retriever for the Weaviate's hybrid search."""
client: Any
"""keyword arguments to pass to the Weaviate client."""
index_name: str
"""The name of the index to use."""
text_key: str
"""The name of the text key to use."""
alpha: float = 0.5
"""The weight of the text key in the hybrid search."""
k: int = 4
"""The number of results to return."""
attributes: List[str]
"""The attributes to return in the results."""
create_schema_if_missing: bool = True
"""Whether to create the schema if it doesn't exist."""
@root_validator(pre=True)
def validate_client(
cls,
values: Dict[str, Any],
) -> Dict[str, Any]:
try:
import weaviate
except ImportError:
raise ImportError(
"Could not import weaviate python package. "
"Please install it with `pip install weaviate-client`."
)
if not isinstance(values["client"], weaviate.Client):
client = values["client"]
raise ValueError(
f"client should be an instance of weaviate.Client, got {type(client)}"
)
|
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/weaviate_hybrid_search.html
|
ba7b488118f3-1
|
)
if values.get("attributes") is None:
values["attributes"] = []
cast(List, values["attributes"]).append(values["text_key"])
if values.get("create_schema_if_missing", True):
class_obj = {
"class": values["index_name"],
"properties": [{"name": values["text_key"], "dataType": ["text"]}],
"vectorizer": "text2vec-openai",
}
if not values["client"].schema.exists(values["index_name"]):
values["client"].schema.create_class(class_obj)
return values
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
# added text_key
[docs] def add_documents(self, docs: List[Document], **kwargs: Any) -> List[str]:
"""Upload documents to Weaviate."""
from weaviate.util import get_valid_uuid
with self.client.batch as batch:
ids = []
for i, doc in enumerate(docs):
metadata = doc.metadata or {}
data_properties = {self.text_key: doc.page_content, **metadata}
# If the UUID of one of the objects already exists
# then the existing objectwill be replaced by the new object.
if "uuids" in kwargs:
_id = kwargs["uuids"][i]
else:
_id = get_valid_uuid(uuid4())
batch.add_data_object(data_properties, self.index_name, _id)
ids.append(_id)
return ids
def _get_relevant_documents(
self,
query: str,
*,
run_manager: CallbackManagerForRetrieverRun,
where_filter: Optional[Dict[str, object]] = None,
|
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/weaviate_hybrid_search.html
|
ba7b488118f3-2
|
where_filter: Optional[Dict[str, object]] = None,
score: bool = False,
) -> List[Document]:
"""Look up similar documents in Weaviate."""
query_obj = self.client.query.get(self.index_name, self.attributes)
if where_filter:
query_obj = query_obj.with_where(where_filter)
if score:
query_obj = query_obj.with_additional(["score", "explainScore"])
result = query_obj.with_hybrid(query, alpha=self.alpha).with_limit(self.k).do()
if "errors" in result:
raise ValueError(f"Error during query: {result['errors']}")
docs = []
for res in result["data"]["Get"][self.index_name]:
text = res.pop(self.text_key)
docs.append(Document(page_content=text, metadata=res))
return docs
|
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/weaviate_hybrid_search.html
|
1a8cb305da3e-0
|
Source code for langchain.retrievers.document_compressors.base
"""Interface for retrieved document compressors."""
from abc import ABC, abstractmethod
from inspect import signature
from typing import List, Optional, Sequence, Union
from pydantic import BaseModel
from langchain.callbacks.manager import Callbacks
from langchain.schema import BaseDocumentTransformer, Document
[docs]class BaseDocumentCompressor(BaseModel, ABC):
"""Base abstraction interface for document compression."""
[docs] @abstractmethod
def compress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""Compress retrieved documents given the query context."""
[docs] @abstractmethod
async def acompress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""Compress retrieved documents given the query context."""
[docs]class DocumentCompressorPipeline(BaseDocumentCompressor):
"""Document compressor that uses a pipeline of transformers."""
transformers: List[Union[BaseDocumentTransformer, BaseDocumentCompressor]]
"""List of document filters that are chained together and run in sequence."""
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
[docs] def compress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""Transform a list of documents."""
for _transformer in self.transformers:
if isinstance(_transformer, BaseDocumentCompressor):
accepts_callbacks = (
|
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/document_compressors/base.html
|
1a8cb305da3e-1
|
if isinstance(_transformer, BaseDocumentCompressor):
accepts_callbacks = (
signature(_transformer.compress_documents).parameters.get(
"callbacks"
)
is not None
)
if accepts_callbacks:
documents = _transformer.compress_documents(
documents, query, callbacks=callbacks
)
else:
documents = _transformer.compress_documents(documents, query)
elif isinstance(_transformer, BaseDocumentTransformer):
documents = _transformer.transform_documents(documents)
else:
raise ValueError(f"Got unexpected transformer type: {_transformer}")
return documents
[docs] async def acompress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""Compress retrieved documents given the query context."""
for _transformer in self.transformers:
if isinstance(_transformer, BaseDocumentCompressor):
accepts_callbacks = (
signature(_transformer.acompress_documents).parameters.get(
"callbacks"
)
is not None
)
if accepts_callbacks:
documents = await _transformer.acompress_documents(
documents, query, callbacks=callbacks
)
else:
documents = await _transformer.acompress_documents(documents, query)
elif isinstance(_transformer, BaseDocumentTransformer):
documents = await _transformer.atransform_documents(documents)
else:
raise ValueError(f"Got unexpected transformer type: {_transformer}")
return documents
|
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/document_compressors/base.html
|
fff5f05887b0-0
|
Source code for langchain.retrievers.document_compressors.chain_filter
"""Filter that uses an LLM to drop documents that aren't relevant to the query."""
from typing import Any, Callable, Dict, Optional, Sequence
from langchain import LLMChain, PromptTemplate
from langchain.callbacks.manager import Callbacks
from langchain.output_parsers.boolean import BooleanOutputParser
from langchain.retrievers.document_compressors.base import BaseDocumentCompressor
from langchain.retrievers.document_compressors.chain_filter_prompt import (
prompt_template,
)
from langchain.schema import BasePromptTemplate, Document
from langchain.schema.language_model import BaseLanguageModel
def _get_default_chain_prompt() -> PromptTemplate:
return PromptTemplate(
template=prompt_template,
input_variables=["question", "context"],
output_parser=BooleanOutputParser(),
)
[docs]def default_get_input(query: str, doc: Document) -> Dict[str, Any]:
"""Return the compression chain input."""
return {"question": query, "context": doc.page_content}
[docs]class LLMChainFilter(BaseDocumentCompressor):
"""Filter that drops documents that aren't relevant to the query."""
llm_chain: LLMChain
"""LLM wrapper to use for filtering documents.
The chain prompt is expected to have a BooleanOutputParser."""
get_input: Callable[[str, Document], dict] = default_get_input
"""Callable for constructing the chain input from the query and a Document."""
[docs] def compress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""Filter down documents based on their relevance to the query."""
filtered_docs = []
|
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/document_compressors/chain_filter.html
|
fff5f05887b0-1
|
"""Filter down documents based on their relevance to the query."""
filtered_docs = []
for doc in documents:
_input = self.get_input(query, doc)
include_doc = self.llm_chain.predict_and_parse(
**_input, callbacks=callbacks
)
if include_doc:
filtered_docs.append(doc)
return filtered_docs
[docs] async def acompress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""Filter down documents."""
raise NotImplementedError()
[docs] @classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
prompt: Optional[BasePromptTemplate] = None,
**kwargs: Any
) -> "LLMChainFilter":
"""Create a LLMChainFilter from a language model.
Args:
llm: The language model to use for filtering.
prompt: The prompt to use for the filter.
**kwargs: Additional arguments to pass to the constructor.
Returns:
A LLMChainFilter that uses the given language model.
"""
_prompt = prompt if prompt is not None else _get_default_chain_prompt()
llm_chain = LLMChain(llm=llm, prompt=_prompt)
return cls(llm_chain=llm_chain, **kwargs)
|
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/document_compressors/chain_filter.html
|
5d928532a55f-0
|
Source code for langchain.retrievers.document_compressors.cohere_rerank
from __future__ import annotations
from typing import TYPE_CHECKING, Dict, Optional, Sequence
from pydantic import Extra, root_validator
from langchain.callbacks.manager import Callbacks
from langchain.retrievers.document_compressors.base import BaseDocumentCompressor
from langchain.schema import Document
from langchain.utils import get_from_dict_or_env
if TYPE_CHECKING:
from cohere import Client
else:
# We do to avoid pydantic annotation issues when actually instantiating
# while keeping this import optional
try:
from cohere import Client
except ImportError:
pass
[docs]class CohereRerank(BaseDocumentCompressor):
"""DocumentCompressor that uses Cohere's rerank API to compress documents."""
client: Client
"""Cohere client to use for compressing documents."""
top_n: int = 3
"""Number of documents to return."""
model: str = "rerank-english-v2.0"
"""Model to use for reranking."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
cohere_api_key = get_from_dict_or_env(
values, "cohere_api_key", "COHERE_API_KEY"
)
try:
import cohere
values["client"] = cohere.Client(cohere_api_key)
except ImportError:
raise ImportError(
"Could not import cohere python package. "
|
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/document_compressors/cohere_rerank.html
|
5d928532a55f-1
|
raise ImportError(
"Could not import cohere python package. "
"Please install it with `pip install cohere`."
)
return values
[docs] def compress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""
Compress documents using Cohere's rerank API.
Args:
documents: A sequence of documents to compress.
query: The query to use for compressing the documents.
callbacks: Callbacks to run during the compression process.
Returns:
A sequence of compressed documents.
"""
if len(documents) == 0: # to avoid empty api call
return []
doc_list = list(documents)
_docs = [d.page_content for d in doc_list]
results = self.client.rerank(
model=self.model, query=query, documents=_docs, top_n=self.top_n
)
final_results = []
for r in results:
doc = doc_list[r.index]
doc.metadata["relevance_score"] = r.relevance_score
final_results.append(doc)
return final_results
[docs] async def acompress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
raise NotImplementedError()
|
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/document_compressors/cohere_rerank.html
|
4496f26b1bf1-0
|
Source code for langchain.retrievers.document_compressors.chain_extract
"""DocumentFilter that uses an LLM chain to extract the relevant parts of documents."""
from __future__ import annotations
import asyncio
from typing import Any, Callable, Dict, Optional, Sequence
from langchain import LLMChain, PromptTemplate
from langchain.callbacks.manager import Callbacks
from langchain.retrievers.document_compressors.base import BaseDocumentCompressor
from langchain.retrievers.document_compressors.chain_extract_prompt import (
prompt_template,
)
from langchain.schema import BaseOutputParser, Document
from langchain.schema.language_model import BaseLanguageModel
[docs]def default_get_input(query: str, doc: Document) -> Dict[str, Any]:
"""Return the compression chain input."""
return {"question": query, "context": doc.page_content}
[docs]class NoOutputParser(BaseOutputParser[str]):
"""Parse outputs that could return a null string of some sort."""
no_output_str: str = "NO_OUTPUT"
[docs] def parse(self, text: str) -> str:
cleaned_text = text.strip()
if cleaned_text == self.no_output_str:
return ""
return cleaned_text
def _get_default_chain_prompt() -> PromptTemplate:
output_parser = NoOutputParser()
template = prompt_template.format(no_output_str=output_parser.no_output_str)
return PromptTemplate(
template=template,
input_variables=["question", "context"],
output_parser=output_parser,
)
[docs]class LLMChainExtractor(BaseDocumentCompressor):
"""DocumentCompressor that uses an LLM chain to extract
the relevant parts of documents."""
llm_chain: LLMChain
"""LLM wrapper to use for compressing documents."""
|
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/document_compressors/chain_extract.html
|
4496f26b1bf1-1
|
"""LLM wrapper to use for compressing documents."""
get_input: Callable[[str, Document], dict] = default_get_input
"""Callable for constructing the chain input from the query and a Document."""
[docs] def compress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""Compress page content of raw documents."""
compressed_docs = []
for doc in documents:
_input = self.get_input(query, doc)
output = self.llm_chain.predict_and_parse(**_input, callbacks=callbacks)
if len(output) == 0:
continue
compressed_docs.append(Document(page_content=output, metadata=doc.metadata))
return compressed_docs
[docs] async def acompress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""Compress page content of raw documents asynchronously."""
outputs = await asyncio.gather(
*[
self.llm_chain.apredict_and_parse(
**self.get_input(query, doc), callbacks=callbacks
)
for doc in documents
]
)
compressed_docs = []
for i, doc in enumerate(documents):
if len(outputs[i]) == 0:
continue
compressed_docs.append(
Document(page_content=outputs[i], metadata=doc.metadata)
)
return compressed_docs
[docs] @classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
prompt: Optional[PromptTemplate] = None,
|
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/document_compressors/chain_extract.html
|
4496f26b1bf1-2
|
prompt: Optional[PromptTemplate] = None,
get_input: Optional[Callable[[str, Document], str]] = None,
llm_chain_kwargs: Optional[dict] = None,
) -> LLMChainExtractor:
"""Initialize from LLM."""
_prompt = prompt if prompt is not None else _get_default_chain_prompt()
_get_input = get_input if get_input is not None else default_get_input
llm_chain = LLMChain(llm=llm, prompt=_prompt, **(llm_chain_kwargs or {}))
return cls(llm_chain=llm_chain, get_input=_get_input)
|
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/document_compressors/chain_extract.html
|
06f5610bf15d-0
|
Source code for langchain.retrievers.document_compressors.embeddings_filter
from typing import Callable, Dict, Optional, Sequence
import numpy as np
from pydantic import root_validator
from langchain.callbacks.manager import Callbacks
from langchain.document_transformers.embeddings_redundant_filter import (
_get_embeddings_from_stateful_docs,
get_stateful_documents,
)
from langchain.embeddings.base import Embeddings
from langchain.retrievers.document_compressors.base import (
BaseDocumentCompressor,
)
from langchain.schema import Document
from langchain.utils.math import cosine_similarity
[docs]class EmbeddingsFilter(BaseDocumentCompressor):
"""Document compressor that uses embeddings to drop documents
unrelated to the query."""
embeddings: Embeddings
"""Embeddings to use for embedding document contents and queries."""
similarity_fn: Callable = cosine_similarity
"""Similarity function for comparing documents. Function expected to take as input
two matrices (List[List[float]]) and return a matrix of scores where higher values
indicate greater similarity."""
k: Optional[int] = 20
"""The number of relevant documents to return. Can be set to None, in which case
`similarity_threshold` must be specified. Defaults to 20."""
similarity_threshold: Optional[float]
"""Threshold for determining when two documents are similar enough
to be considered redundant. Defaults to None, must be specified if `k` is set
to None."""
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
@root_validator()
def validate_params(cls, values: Dict) -> Dict:
"""Validate similarity parameters."""
if values["k"] is None and values["similarity_threshold"] is None:
|
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/document_compressors/embeddings_filter.html
|
06f5610bf15d-1
|
if values["k"] is None and values["similarity_threshold"] is None:
raise ValueError("Must specify one of `k` or `similarity_threshold`.")
return values
[docs] def compress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""Filter documents based on similarity of their embeddings to the query."""
stateful_documents = get_stateful_documents(documents)
embedded_documents = _get_embeddings_from_stateful_docs(
self.embeddings, stateful_documents
)
embedded_query = self.embeddings.embed_query(query)
similarity = self.similarity_fn([embedded_query], embedded_documents)[0]
included_idxs = np.arange(len(embedded_documents))
if self.k is not None:
included_idxs = np.argsort(similarity)[::-1][: self.k]
if self.similarity_threshold is not None:
similar_enough = np.where(
similarity[included_idxs] > self.similarity_threshold
)
included_idxs = included_idxs[similar_enough]
return [stateful_documents[i] for i in included_idxs]
[docs] async def acompress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""Filter down documents."""
raise NotImplementedError()
|
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/document_compressors/embeddings_filter.html
|
fe9124fab753-0
|
Source code for langchain.retrievers.self_query.base
"""Retriever that generates and executes structured queries over its own data source."""
from typing import Any, Dict, List, Optional, Type, cast
from pydantic import BaseModel, Field, root_validator
from langchain import LLMChain
from langchain.callbacks.manager import CallbackManagerForRetrieverRun
from langchain.chains.query_constructor.base import load_query_constructor_chain
from langchain.chains.query_constructor.ir import StructuredQuery, Visitor
from langchain.chains.query_constructor.schema import AttributeInfo
from langchain.retrievers.self_query.chroma import ChromaTranslator
from langchain.retrievers.self_query.deeplake import DeepLakeTranslator
from langchain.retrievers.self_query.myscale import MyScaleTranslator
from langchain.retrievers.self_query.pinecone import PineconeTranslator
from langchain.retrievers.self_query.qdrant import QdrantTranslator
from langchain.retrievers.self_query.weaviate import WeaviateTranslator
from langchain.schema import BaseRetriever, Document
from langchain.schema.language_model import BaseLanguageModel
from langchain.vectorstores import (
Chroma,
DeepLake,
MyScale,
Pinecone,
Qdrant,
VectorStore,
Weaviate,
)
def _get_builtin_translator(vectorstore: VectorStore) -> Visitor:
"""Get the translator class corresponding to the vector store class."""
vectorstore_cls = vectorstore.__class__
BUILTIN_TRANSLATORS: Dict[Type[VectorStore], Type[Visitor]] = {
Pinecone: PineconeTranslator,
Chroma: ChromaTranslator,
Weaviate: WeaviateTranslator,
Qdrant: QdrantTranslator,
MyScale: MyScaleTranslator,
|
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/self_query/base.html
|
fe9124fab753-1
|
Qdrant: QdrantTranslator,
MyScale: MyScaleTranslator,
DeepLake: DeepLakeTranslator,
}
if vectorstore_cls not in BUILTIN_TRANSLATORS:
raise ValueError(
f"Self query retriever with Vector Store type {vectorstore_cls}"
f" not supported."
)
if isinstance(vectorstore, Qdrant):
return QdrantTranslator(metadata_key=vectorstore.metadata_payload_key)
elif isinstance(vectorstore, MyScale):
return MyScaleTranslator(metadata_key=vectorstore.metadata_column)
return BUILTIN_TRANSLATORS[vectorstore_cls]()
[docs]class SelfQueryRetriever(BaseRetriever, BaseModel):
"""Retriever that uses a vector store and an LLM to generate
the vector store queries."""
vectorstore: VectorStore
"""The underlying vector store from which documents will be retrieved."""
llm_chain: LLMChain
"""The LLMChain for generating the vector store queries."""
search_type: str = "similarity"
"""The search type to perform on the vector store."""
search_kwargs: dict = Field(default_factory=dict)
"""Keyword arguments to pass in to the vector store search."""
structured_query_translator: Visitor
"""Translator for turning internal query language into vectorstore search params."""
verbose: bool = False
"""Use original query instead of the revised new query from LLM"""
use_original_query: bool = False
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
@root_validator(pre=True)
def validate_translator(cls, values: Dict) -> Dict:
"""Validate translator."""
if "structured_query_translator" not in values:
|
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/self_query/base.html
|
fe9124fab753-2
|
"""Validate translator."""
if "structured_query_translator" not in values:
values["structured_query_translator"] = _get_builtin_translator(
values["vectorstore"]
)
return values
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
"""Get documents relevant for a query.
Args:
query: string to find relevant documents for
Returns:
List of relevant documents
"""
inputs = self.llm_chain.prep_inputs({"query": query})
structured_query = cast(
StructuredQuery,
self.llm_chain.predict_and_parse(
callbacks=run_manager.get_child(), **inputs
),
)
if self.verbose:
print(structured_query)
new_query, new_kwargs = self.structured_query_translator.visit_structured_query(
structured_query
)
if structured_query.limit is not None:
new_kwargs["k"] = structured_query.limit
if self.use_original_query:
new_query = query
search_kwargs = {**self.search_kwargs, **new_kwargs}
docs = self.vectorstore.search(new_query, self.search_type, **search_kwargs)
return docs
[docs] @classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
vectorstore: VectorStore,
document_contents: str,
metadata_field_info: List[AttributeInfo],
structured_query_translator: Optional[Visitor] = None,
chain_kwargs: Optional[Dict] = None,
enable_limit: bool = False,
use_original_query: bool = False,
**kwargs: Any,
|
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/self_query/base.html
|
fe9124fab753-3
|
use_original_query: bool = False,
**kwargs: Any,
) -> "SelfQueryRetriever":
if structured_query_translator is None:
structured_query_translator = _get_builtin_translator(vectorstore)
chain_kwargs = chain_kwargs or {}
if "allowed_comparators" not in chain_kwargs:
chain_kwargs[
"allowed_comparators"
] = structured_query_translator.allowed_comparators
if "allowed_operators" not in chain_kwargs:
chain_kwargs[
"allowed_operators"
] = structured_query_translator.allowed_operators
llm_chain = load_query_constructor_chain(
llm,
document_contents,
metadata_field_info,
enable_limit=enable_limit,
**chain_kwargs,
)
return cls(
llm_chain=llm_chain,
vectorstore=vectorstore,
use_original_query=use_original_query,
structured_query_translator=structured_query_translator,
**kwargs,
)
|
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/self_query/base.html
|
0fdf9b0588a4-0
|
Source code for langchain.retrievers.self_query.pinecone
from typing import Dict, Tuple, Union
from langchain.chains.query_constructor.ir import (
Comparator,
Comparison,
Operation,
Operator,
StructuredQuery,
Visitor,
)
[docs]class PineconeTranslator(Visitor):
"""Translate the internal query language elements to valid filters."""
allowed_comparators = (
Comparator.EQ,
Comparator.LT,
Comparator.LTE,
Comparator.GT,
Comparator.GTE,
)
"""Subset of allowed logical comparators."""
allowed_operators = (Operator.AND, Operator.OR)
"""Subset of allowed logical operators."""
def _format_func(self, func: Union[Operator, Comparator]) -> str:
self._validate_func(func)
return f"${func.value}"
[docs] def visit_operation(self, operation: Operation) -> Dict:
args = [arg.accept(self) for arg in operation.arguments]
return {self._format_func(operation.operator): args}
[docs] def visit_comparison(self, comparison: Comparison) -> Dict:
return {
comparison.attribute: {
self._format_func(comparison.comparator): comparison.value
}
}
[docs] def visit_structured_query(
self, structured_query: StructuredQuery
) -> Tuple[str, dict]:
if structured_query.filter is None:
kwargs = {}
else:
kwargs = {"filter": structured_query.filter.accept(self)}
return structured_query.query, kwargs
|
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/self_query/pinecone.html
|
de3e1b246ae2-0
|
Source code for langchain.retrievers.self_query.weaviate
from typing import Dict, Tuple, Union
from langchain.chains.query_constructor.ir import (
Comparator,
Comparison,
Operation,
Operator,
StructuredQuery,
Visitor,
)
[docs]class WeaviateTranslator(Visitor):
"""Translate the internal query language elements to valid filters."""
allowed_operators = [Operator.AND, Operator.OR]
"""Subset of allowed logical operators."""
allowed_comparators = [Comparator.EQ]
def _format_func(self, func: Union[Operator, Comparator]) -> str:
self._validate_func(func)
# https://weaviate.io/developers/weaviate/api/graphql/filters
map_dict = {Operator.AND: "And", Operator.OR: "Or", Comparator.EQ: "Equal"}
return map_dict[func]
[docs] def visit_operation(self, operation: Operation) -> Dict:
args = [arg.accept(self) for arg in operation.arguments]
return {"operator": self._format_func(operation.operator), "operands": args}
[docs] def visit_comparison(self, comparison: Comparison) -> Dict:
return {
"path": [comparison.attribute],
"operator": self._format_func(comparison.comparator),
"valueText": comparison.value,
}
[docs] def visit_structured_query(
self, structured_query: StructuredQuery
) -> Tuple[str, dict]:
if structured_query.filter is None:
kwargs = {}
else:
kwargs = {"where_filter": structured_query.filter.accept(self)}
return structured_query.query, kwargs
|
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/self_query/weaviate.html
|
83b1161ced17-0
|
Source code for langchain.retrievers.self_query.qdrant
from __future__ import annotations
from typing import TYPE_CHECKING, Tuple
from langchain.chains.query_constructor.ir import (
Comparator,
Comparison,
Operation,
Operator,
StructuredQuery,
Visitor,
)
if TYPE_CHECKING:
from qdrant_client.http import models as rest
[docs]class QdrantTranslator(Visitor):
"""Translate the internal query language elements to valid filters."""
allowed_comparators = (
Comparator.EQ,
Comparator.LT,
Comparator.LTE,
Comparator.GT,
Comparator.GTE,
)
"""Subset of allowed logical comparators."""
[docs] def __init__(self, metadata_key: str):
self.metadata_key = metadata_key
[docs] def visit_operation(self, operation: Operation) -> rest.Filter:
try:
from qdrant_client.http import models as rest
except ImportError as e:
raise ImportError(
"Cannot import qdrant_client. Please install with `pip install "
"qdrant-client`."
) from e
args = [arg.accept(self) for arg in operation.arguments]
operator = {
Operator.AND: "must",
Operator.OR: "should",
Operator.NOT: "must_not",
}[operation.operator]
return rest.Filter(**{operator: args})
[docs] def visit_comparison(self, comparison: Comparison) -> rest.FieldCondition:
try:
from qdrant_client.http import models as rest
except ImportError as e:
raise ImportError(
"Cannot import qdrant_client. Please install with `pip install "
"qdrant-client`."
|
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/self_query/qdrant.html
|
83b1161ced17-1
|
"qdrant-client`."
) from e
self._validate_func(comparison.comparator)
attribute = self.metadata_key + "." + comparison.attribute
if comparison.comparator == Comparator.EQ:
return rest.FieldCondition(
key=attribute, match=rest.MatchValue(value=comparison.value)
)
kwargs = {comparison.comparator.value: comparison.value}
return rest.FieldCondition(key=attribute, range=rest.Range(**kwargs))
[docs] def visit_structured_query(
self, structured_query: StructuredQuery
) -> Tuple[str, dict]:
try:
from qdrant_client.http import models as rest
except ImportError as e:
raise ImportError(
"Cannot import qdrant_client. Please install with `pip install "
"qdrant-client`."
) from e
if structured_query.filter is None:
kwargs = {}
else:
filter = structured_query.filter.accept(self)
if isinstance(filter, rest.FieldCondition):
filter = rest.Filter(must=[filter])
kwargs = {"filter": filter}
return structured_query.query, kwargs
|
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/self_query/qdrant.html
|
f368979a9403-0
|
Source code for langchain.retrievers.self_query.chroma
from typing import Dict, Tuple, Union
from langchain.chains.query_constructor.ir import (
Comparator,
Comparison,
Operation,
Operator,
StructuredQuery,
Visitor,
)
[docs]class ChromaTranslator(Visitor):
"""Translate internal query language elements to valid filters."""
allowed_operators = [Operator.AND, Operator.OR]
"""Subset of allowed logical operators."""
allowed_comparators = [
Comparator.EQ,
Comparator.GT,
Comparator.GTE,
Comparator.LT,
Comparator.LTE,
]
"""Subset of allowed logical comparators."""
def _format_func(self, func: Union[Operator, Comparator]) -> str:
self._validate_func(func)
return f"${func.value}"
[docs] def visit_operation(self, operation: Operation) -> Dict:
args = [arg.accept(self) for arg in operation.arguments]
return {self._format_func(operation.operator): args}
[docs] def visit_comparison(self, comparison: Comparison) -> Dict:
return {
comparison.attribute: {
self._format_func(comparison.comparator): comparison.value
}
}
[docs] def visit_structured_query(
self, structured_query: StructuredQuery
) -> Tuple[str, dict]:
if structured_query.filter is None:
kwargs = {}
else:
kwargs = {"filter": structured_query.filter.accept(self)}
return structured_query.query, kwargs
|
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/self_query/chroma.html
|
c18b240f8f6a-0
|
Source code for langchain.retrievers.self_query.myscale
import datetime
import re
from typing import Any, Callable, Dict, Tuple
from langchain.chains.query_constructor.ir import (
Comparator,
Comparison,
Operation,
Operator,
StructuredQuery,
Visitor,
)
[docs]def DEFAULT_COMPOSER(op_name: str) -> Callable:
"""
Default composer for logical operators.
Args:
op_name: Name of the operator.
Returns:
Callable that takes a list of arguments and returns a string.
"""
def f(*args: Any) -> str:
args_: map[str] = map(str, args)
return f" {op_name} ".join(args_)
return f
[docs]def FUNCTION_COMPOSER(op_name: str) -> Callable:
"""
Composer for functions.
Args:
op_name: Name of the function.
Returns:
Callable that takes a list of arguments and returns a string.
"""
def f(*args: Any) -> str:
args_: map[str] = map(str, args)
return f"{op_name}({','.join(args_)})"
return f
[docs]class MyScaleTranslator(Visitor):
"""Translate internal query language elements to valid filters."""
allowed_operators = [Operator.AND, Operator.OR, Operator.NOT]
"""Subset of allowed logical operators."""
allowed_comparators = [
Comparator.EQ,
Comparator.GT,
Comparator.GTE,
Comparator.LT,
Comparator.LTE,
Comparator.CONTAIN,
Comparator.LIKE,
]
map_dict = {
Operator.AND: DEFAULT_COMPOSER("AND"),
|
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/self_query/myscale.html
|
c18b240f8f6a-1
|
map_dict = {
Operator.AND: DEFAULT_COMPOSER("AND"),
Operator.OR: DEFAULT_COMPOSER("OR"),
Operator.NOT: DEFAULT_COMPOSER("NOT"),
Comparator.EQ: DEFAULT_COMPOSER("="),
Comparator.GT: DEFAULT_COMPOSER(">"),
Comparator.GTE: DEFAULT_COMPOSER(">="),
Comparator.LT: DEFAULT_COMPOSER("<"),
Comparator.LTE: DEFAULT_COMPOSER("<="),
Comparator.CONTAIN: FUNCTION_COMPOSER("has"),
Comparator.LIKE: DEFAULT_COMPOSER("ILIKE"),
}
[docs] def __init__(self, metadata_key: str = "metadata") -> None:
super().__init__()
self.metadata_key = metadata_key
[docs] def visit_operation(self, operation: Operation) -> Dict:
args = [arg.accept(self) for arg in operation.arguments]
func = operation.operator
self._validate_func(func)
return self.map_dict[func](*args)
[docs] def visit_comparison(self, comparison: Comparison) -> Dict:
regex = "\((.*?)\)"
matched = re.search("\(\w+\)", comparison.attribute)
# If arbitrary function is applied to an attribute
if matched:
attr = re.sub(
regex,
f"({self.metadata_key}.{matched.group(0)[1:-1]})",
comparison.attribute,
)
else:
attr = f"{self.metadata_key}.{comparison.attribute}"
value = comparison.value
comp = comparison.comparator
value = f"'{value}'" if type(value) is str else value
# convert timestamp for datetime objects
if type(value) is datetime.date:
|
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/self_query/myscale.html
|
c18b240f8f6a-2
|
# convert timestamp for datetime objects
if type(value) is datetime.date:
attr = f"parseDateTime32BestEffort({attr})"
value = f"parseDateTime32BestEffort('{value.strftime('%Y-%m-%d')}')"
# string pattern match
if comp is Comparator.LIKE:
value = f"'%{value[1:-1]}%'"
return self.map_dict[comp](attr, value)
[docs] def visit_structured_query(
self, structured_query: StructuredQuery
) -> Tuple[str, dict]:
print(structured_query)
if structured_query.filter is None:
kwargs = {}
else:
kwargs = {"where_str": structured_query.filter.accept(self)}
return structured_query.query, kwargs
|
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/self_query/myscale.html
|
b488df002a3d-0
|
Source code for langchain.retrievers.self_query.deeplake
"""Logic for converting internal query language to a valid Chroma query."""
from typing import Tuple, Union
from langchain.chains.query_constructor.ir import (
Comparator,
Comparison,
Operation,
Operator,
StructuredQuery,
Visitor,
)
COMPARATOR_TO_TQL = {
Comparator.EQ: "==",
Comparator.GT: ">",
Comparator.GTE: ">=",
Comparator.LT: "<",
Comparator.LTE: "<=",
}
OPERATOR_TO_TQL = {
Operator.AND: "and",
Operator.OR: "or",
}
[docs]def can_cast_to_float(string: str) -> bool:
"""Check if a string can be cast to a float."""
try:
float(string)
return True
except ValueError:
return False
[docs]class DeepLakeTranslator(Visitor):
"""Logic for converting internal query language elements to valid filters."""
allowed_operators = [Operator.AND, Operator.OR]
"""Subset of allowed logical operators."""
allowed_comparators = [
Comparator.EQ,
Comparator.GT,
Comparator.GTE,
Comparator.LT,
Comparator.LTE,
]
"""Subset of allowed logical comparators."""
def _format_func(self, func: Union[Operator, Comparator]) -> str:
self._validate_func(func)
if isinstance(func, Operator):
value = OPERATOR_TO_TQL[func.value] # type: ignore
elif isinstance(func, Comparator):
value = COMPARATOR_TO_TQL[func.value] # type: ignore
return f"{value}"
|
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/self_query/deeplake.html
|
b488df002a3d-1
|
return f"{value}"
[docs] def visit_operation(self, operation: Operation) -> str:
args = [arg.accept(self) for arg in operation.arguments]
operator = self._format_func(operation.operator)
return "(" + (" " + operator + " ").join(args) + ")"
[docs] def visit_comparison(self, comparison: Comparison) -> str:
comparator = self._format_func(comparison.comparator)
values = comparison.value
if isinstance(values, list):
tql = []
for value in values:
comparison.value = value
tql.append(self.visit_comparison(comparison))
return "(" + (" or ").join(tql) + ")"
if not can_cast_to_float(comparison.value):
values = f"'{values}'"
return f"metadata['{comparison.attribute}'] {comparator} {values}"
[docs] def visit_structured_query(
self, structured_query: StructuredQuery
) -> Tuple[str, dict]:
if structured_query.filter is None:
kwargs = {}
else:
tqL = f"SELECT * WHERE {structured_query.filter.accept(self)}"
kwargs = {"tql": tqL}
return structured_query.query, kwargs
|
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/self_query/deeplake.html
|
e4d0dfbdce90-0
|
Source code for langchain.utils.formatting
"""Utilities for formatting strings."""
from string import Formatter
from typing import Any, List, Mapping, Sequence, Union
[docs]class StrictFormatter(Formatter):
"""A subclass of formatter that checks for extra keys."""
[docs] def check_unused_args(
self,
used_args: Sequence[Union[int, str]],
args: Sequence,
kwargs: Mapping[str, Any],
) -> None:
"""Check to see if extra parameters are passed."""
extra = set(kwargs).difference(used_args)
if extra:
raise KeyError(extra)
[docs] def vformat(
self, format_string: str, args: Sequence, kwargs: Mapping[str, Any]
) -> str:
"""Check that no arguments are provided."""
if len(args) > 0:
raise ValueError(
"No arguments should be provided, "
"everything should be passed as keyword arguments."
)
return super().vformat(format_string, args, kwargs)
[docs] def validate_input_variables(
self, format_string: str, input_variables: List[str]
) -> None:
dummy_inputs = {input_variable: "foo" for input_variable in input_variables}
super().format(format_string, **dummy_inputs)
formatter = StrictFormatter()
|
https://api.python.langchain.com/en/latest/_modules/langchain/utils/formatting.html
|
8cf800698030-0
|
Source code for langchain.utils.env
import os
from typing import Any, Dict, Optional
[docs]def get_from_dict_or_env(
data: Dict[str, Any], key: str, env_key: str, default: Optional[str] = None
) -> str:
"""Get a value from a dictionary or an environment variable."""
if key in data and data[key]:
return data[key]
else:
return get_from_env(key, env_key, default=default)
[docs]def get_from_env(key: str, env_key: str, default: Optional[str] = None) -> str:
"""Get a value from a dictionary or an environment variable."""
if env_key in os.environ and os.environ[env_key]:
return os.environ[env_key]
elif default is not None:
return default
else:
raise ValueError(
f"Did not find {key}, please add an environment variable"
f" `{env_key}` which contains it, or pass"
f" `{key}` as a named parameter."
)
|
https://api.python.langchain.com/en/latest/_modules/langchain/utils/env.html
|
cb554fd25d55-0
|
Source code for langchain.utils.math
"""Math utils."""
from typing import List, Optional, Tuple, Union
import numpy as np
Matrix = Union[List[List[float]], List[np.ndarray], np.ndarray]
[docs]def cosine_similarity(X: Matrix, Y: Matrix) -> np.ndarray:
"""Row-wise cosine similarity between two equal-width matrices."""
if len(X) == 0 or len(Y) == 0:
return np.array([])
X = np.array(X)
Y = np.array(Y)
if X.shape[1] != Y.shape[1]:
raise ValueError(
f"Number of columns in X and Y must be the same. X has shape {X.shape} "
f"and Y has shape {Y.shape}."
)
X_norm = np.linalg.norm(X, axis=1)
Y_norm = np.linalg.norm(Y, axis=1)
similarity = np.dot(X, Y.T) / np.outer(X_norm, Y_norm)
similarity[np.isnan(similarity) | np.isinf(similarity)] = 0.0
return similarity
[docs]def cosine_similarity_top_k(
X: Matrix,
Y: Matrix,
top_k: Optional[int] = 5,
score_threshold: Optional[float] = None,
) -> Tuple[List[Tuple[int, int]], List[float]]:
"""Row-wise cosine similarity with optional top-k and score threshold filtering.
Args:
X: Matrix.
Y: Matrix, same width as X.
top_k: Max number of results to return.
score_threshold: Minimum cosine similarity of results.
Returns:
Tuple of two lists. First contains two-tuples of indices (X_idx, Y_idx),
second contains corresponding cosine similarities.
|
https://api.python.langchain.com/en/latest/_modules/langchain/utils/math.html
|
cb554fd25d55-1
|
second contains corresponding cosine similarities.
"""
if len(X) == 0 or len(Y) == 0:
return [], []
score_array = cosine_similarity(X, Y)
score_threshold = score_threshold or -1.0
score_array[score_array < score_threshold] = 0
top_k = min(top_k or len(score_array), np.count_nonzero(score_array))
top_k_idxs = np.argpartition(score_array, -top_k, axis=None)[-top_k:]
top_k_idxs = top_k_idxs[np.argsort(score_array.ravel()[top_k_idxs])][::-1]
ret_idxs = np.unravel_index(top_k_idxs, score_array.shape)
scores = score_array.ravel()[top_k_idxs].tolist()
return list(zip(*ret_idxs)), scores # type: ignore
|
https://api.python.langchain.com/en/latest/_modules/langchain/utils/math.html
|
6adf1b667048-0
|
Source code for langchain.utils.input
"""Handle chained inputs."""
from typing import Dict, List, Optional, TextIO
_TEXT_COLOR_MAPPING = {
"blue": "36;1",
"yellow": "33;1",
"pink": "38;5;200",
"green": "32;1",
"red": "31;1",
}
[docs]def get_color_mapping(
items: List[str], excluded_colors: Optional[List] = None
) -> Dict[str, str]:
"""Get mapping for items to a support color."""
colors = list(_TEXT_COLOR_MAPPING.keys())
if excluded_colors is not None:
colors = [c for c in colors if c not in excluded_colors]
color_mapping = {item: colors[i % len(colors)] for i, item in enumerate(items)}
return color_mapping
[docs]def get_colored_text(text: str, color: str) -> str:
"""Get colored text."""
color_str = _TEXT_COLOR_MAPPING[color]
return f"\u001b[{color_str}m\033[1;3m{text}\u001b[0m"
[docs]def get_bolded_text(text: str) -> str:
"""Get bolded text."""
return f"\033[1m{text}\033[0m"
[docs]def print_text(
text: str, color: Optional[str] = None, end: str = "", file: Optional[TextIO] = None
) -> None:
"""Print text with highlighting and no end characters."""
text_to_print = get_colored_text(text, color) if color else text
print(text_to_print, end=end, file=file)
if file:
|
https://api.python.langchain.com/en/latest/_modules/langchain/utils/input.html
|
6adf1b667048-1
|
print(text_to_print, end=end, file=file)
if file:
file.flush() # ensure all printed content are written to file
|
https://api.python.langchain.com/en/latest/_modules/langchain/utils/input.html
|
6a0aef296a81-0
|
Source code for langchain.utils.strings
from typing import Any, List
[docs]def stringify_value(val: Any) -> str:
"""Stringify a value.
Args:
val: The value to stringify.
Returns:
str: The stringified value.
"""
if isinstance(val, str):
return val
elif isinstance(val, dict):
return "\n" + stringify_dict(val)
elif isinstance(val, list):
return "\n".join(stringify_value(v) for v in val)
else:
return str(val)
[docs]def stringify_dict(data: dict) -> str:
"""Stringify a dictionary.
Args:
data: The dictionary to stringify.
Returns:
str: The stringified dictionary.
"""
text = ""
for key, value in data.items():
text += key + ": " + stringify_value(value) + "\n"
return text
[docs]def comma_list(items: List[Any]) -> str:
"""Convert a list to a comma-separated string."""
return ", ".join(str(item) for item in items)
|
https://api.python.langchain.com/en/latest/_modules/langchain/utils/strings.html
|
4961abdcfc9d-0
|
Source code for langchain.utils.utils
"""Generic utility functions."""
import contextlib
import datetime
import importlib
import warnings
from importlib.metadata import version
from typing import Any, Callable, Dict, Optional, Set, Tuple
from packaging.version import parse
from requests import HTTPError, Response
[docs]def xor_args(*arg_groups: Tuple[str, ...]) -> Callable:
"""Validate specified keyword args are mutually exclusive."""
def decorator(func: Callable) -> Callable:
def wrapper(*args: Any, **kwargs: Any) -> Callable:
"""Validate exactly one arg in each group is not None."""
counts = [
sum(1 for arg in arg_group if kwargs.get(arg) is not None)
for arg_group in arg_groups
]
invalid_groups = [i for i, count in enumerate(counts) if count != 1]
if invalid_groups:
invalid_group_names = [", ".join(arg_groups[i]) for i in invalid_groups]
raise ValueError(
"Exactly one argument in each of the following"
" groups must be defined:"
f" {', '.join(invalid_group_names)}"
)
return func(*args, **kwargs)
return wrapper
return decorator
[docs]def raise_for_status_with_text(response: Response) -> None:
"""Raise an error with the response text."""
try:
response.raise_for_status()
except HTTPError as e:
raise ValueError(response.text) from e
[docs]@contextlib.contextmanager
def mock_now(dt_value): # type: ignore
"""Context manager for mocking out datetime.now() in unit tests.
Example:
with mock_now(datetime.datetime(2011, 2, 3, 10, 11)):
|
https://api.python.langchain.com/en/latest/_modules/langchain/utils/utils.html
|
4961abdcfc9d-1
|
assert datetime.datetime.now() == datetime.datetime(2011, 2, 3, 10, 11)
"""
class MockDateTime(datetime.datetime):
"""Mock datetime.datetime.now() with a fixed datetime."""
@classmethod
def now(cls): # type: ignore
# Create a copy of dt_value.
return datetime.datetime(
dt_value.year,
dt_value.month,
dt_value.day,
dt_value.hour,
dt_value.minute,
dt_value.second,
dt_value.microsecond,
dt_value.tzinfo,
)
real_datetime = datetime.datetime
datetime.datetime = MockDateTime
try:
yield datetime.datetime
finally:
datetime.datetime = real_datetime
[docs]def guard_import(
module_name: str, *, pip_name: Optional[str] = None, package: Optional[str] = None
) -> Any:
"""Dynamically imports a module and raises a helpful exception if the module is not
installed."""
try:
module = importlib.import_module(module_name, package)
except ImportError:
raise ImportError(
f"Could not import {module_name} python package. "
f"Please install it with `pip install {pip_name or module_name}`."
)
return module
[docs]def check_package_version(
package: str,
lt_version: Optional[str] = None,
lte_version: Optional[str] = None,
gt_version: Optional[str] = None,
gte_version: Optional[str] = None,
) -> None:
"""Check the version of a package."""
imported_version = parse(version(package))
if lt_version is not None and imported_version >= parse(lt_version):
|
https://api.python.langchain.com/en/latest/_modules/langchain/utils/utils.html
|
4961abdcfc9d-2
|
if lt_version is not None and imported_version >= parse(lt_version):
raise ValueError(
f"Expected {package} version to be < {lt_version}. Received "
f"{imported_version}."
)
if lte_version is not None and imported_version > parse(lte_version):
raise ValueError(
f"Expected {package} version to be <= {lte_version}. Received "
f"{imported_version}."
)
if gt_version is not None and imported_version <= parse(gt_version):
raise ValueError(
f"Expected {package} version to be > {gt_version}. Received "
f"{imported_version}."
)
if gte_version is not None and imported_version < parse(gte_version):
raise ValueError(
f"Expected {package} version to be >= {gte_version}. Received "
f"{imported_version}."
)
[docs]def get_pydantic_field_names(pydantic_cls: Any) -> Set[str]:
"""Get field names, including aliases, for a pydantic class.
Args:
pydantic_cls: Pydantic class."""
all_required_field_names = set()
for field in pydantic_cls.__fields__.values():
all_required_field_names.add(field.name)
if field.has_alias:
all_required_field_names.add(field.alias)
return all_required_field_names
[docs]def build_extra_kwargs(
extra_kwargs: Dict[str, Any],
values: Dict[str, Any],
all_required_field_names: Set[str],
) -> Dict[str, Any]:
""""""
for field_name in list(values):
if field_name in extra_kwargs:
|
https://api.python.langchain.com/en/latest/_modules/langchain/utils/utils.html
|
4961abdcfc9d-3
|
for field_name in list(values):
if field_name in extra_kwargs:
raise ValueError(f"Found {field_name} supplied twice.")
if field_name not in all_required_field_names:
warnings.warn(
f"""WARNING! {field_name} is not default parameter.
{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra_kwargs[field_name] = values.pop(field_name)
invalid_model_kwargs = all_required_field_names.intersection(extra_kwargs.keys())
if invalid_model_kwargs:
raise ValueError(
f"Parameters {invalid_model_kwargs} should be specified explicitly. "
f"Instead they were passed in as part of `model_kwargs` parameter."
)
return extra_kwargs
|
https://api.python.langchain.com/en/latest/_modules/langchain/utils/utils.html
|
3df1bcd0c29e-0
|
Source code for langchain.memory.motorhead_memory
from typing import Any, Dict, List, Optional
import requests
from langchain.memory.chat_memory import BaseChatMemory
from langchain.schema.messages import get_buffer_string
MANAGED_URL = "https://api.getmetal.io/v1/motorhead"
# LOCAL_URL = "http://localhost:8080"
[docs]class MotorheadMemory(BaseChatMemory):
"""Chat message memory backed by Motorhead service."""
url: str = MANAGED_URL
timeout = 3000
memory_key = "history"
session_id: str
context: Optional[str] = None
# Managed Params
api_key: Optional[str] = None
client_id: Optional[str] = None
def __get_headers(self) -> Dict[str, str]:
is_managed = self.url == MANAGED_URL
headers = {
"Content-Type": "application/json",
}
if is_managed and not (self.api_key and self.client_id):
raise ValueError(
"""
You must provide an API key or a client ID to use the managed
version of Motorhead. Visit https://getmetal.io for more information.
"""
)
if is_managed and self.api_key and self.client_id:
headers["x-metal-api-key"] = self.api_key
headers["x-metal-client-id"] = self.client_id
return headers
[docs] async def init(self) -> None:
res = requests.get(
f"{self.url}/sessions/{self.session_id}/memory",
timeout=self.timeout,
headers=self.__get_headers(),
)
res_data = res.json()
res_data = res_data.get("data", res_data) # Handle Managed Version
|
https://api.python.langchain.com/en/latest/_modules/langchain/memory/motorhead_memory.html
|
3df1bcd0c29e-1
|
res_data = res_data.get("data", res_data) # Handle Managed Version
messages = res_data.get("messages", [])
context = res_data.get("context", "NONE")
for message in reversed(messages):
if message["role"] == "AI":
self.chat_memory.add_ai_message(message["content"])
else:
self.chat_memory.add_user_message(message["content"])
if context and context != "NONE":
self.context = context
[docs] def load_memory_variables(self, values: Dict[str, Any]) -> Dict[str, Any]:
if self.return_messages:
return {self.memory_key: self.chat_memory.messages}
else:
return {self.memory_key: get_buffer_string(self.chat_memory.messages)}
@property
def memory_variables(self) -> List[str]:
return [self.memory_key]
[docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
input_str, output_str = self._get_input_output(inputs, outputs)
requests.post(
f"{self.url}/sessions/{self.session_id}/memory",
timeout=self.timeout,
json={
"messages": [
{"role": "Human", "content": f"{input_str}"},
{"role": "AI", "content": f"{output_str}"},
]
},
headers=self.__get_headers(),
)
super().save_context(inputs, outputs)
[docs] def delete_session(self) -> None:
"""Delete a session"""
requests.delete(f"{self.url}/sessions/{self.session_id}/memory")
|
https://api.python.langchain.com/en/latest/_modules/langchain/memory/motorhead_memory.html
|
b9d042637f2e-0
|
Source code for langchain.memory.chat_memory
from abc import ABC
from typing import Any, Dict, Optional, Tuple
from pydantic import Field
from langchain.memory.chat_message_histories.in_memory import ChatMessageHistory
from langchain.memory.utils import get_prompt_input_key
from langchain.schema import BaseChatMessageHistory, BaseMemory
[docs]class BaseChatMemory(BaseMemory, ABC):
"""Abstract base class for chat memory."""
chat_memory: BaseChatMessageHistory = Field(default_factory=ChatMessageHistory)
output_key: Optional[str] = None
input_key: Optional[str] = None
return_messages: bool = False
def _get_input_output(
self, inputs: Dict[str, Any], outputs: Dict[str, str]
) -> Tuple[str, str]:
if self.input_key is None:
prompt_input_key = get_prompt_input_key(inputs, self.memory_variables)
else:
prompt_input_key = self.input_key
if self.output_key is None:
if len(outputs) != 1:
raise ValueError(f"One output key expected, got {outputs.keys()}")
output_key = list(outputs.keys())[0]
else:
output_key = self.output_key
return inputs[prompt_input_key], outputs[output_key]
[docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Save context from this conversation to buffer."""
input_str, output_str = self._get_input_output(inputs, outputs)
self.chat_memory.add_user_message(input_str)
self.chat_memory.add_ai_message(output_str)
[docs] def clear(self) -> None:
"""Clear memory contents."""
self.chat_memory.clear()
|
https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_memory.html
|
38fad0fa410e-0
|
Source code for langchain.memory.buffer_window
from typing import Any, Dict, List
from langchain.memory.chat_memory import BaseChatMemory
from langchain.schema.messages import BaseMessage, get_buffer_string
[docs]class ConversationBufferWindowMemory(BaseChatMemory):
"""Buffer for storing conversation memory inside a limited size window."""
human_prefix: str = "Human"
ai_prefix: str = "AI"
memory_key: str = "history" #: :meta private:
k: int = 5
"""Number of messages to store in buffer."""
@property
def buffer(self) -> List[BaseMessage]:
"""String buffer of memory."""
return self.chat_memory.messages
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
[docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
"""Return history buffer."""
buffer: Any = self.buffer[-self.k * 2 :] if self.k > 0 else []
if not self.return_messages:
buffer = get_buffer_string(
buffer,
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
return {self.memory_key: buffer}
|
https://api.python.langchain.com/en/latest/_modules/langchain/memory/buffer_window.html
|
56f31371be28-0
|
Source code for langchain.memory.readonly
from typing import Any, Dict, List
from langchain.schema import BaseMemory
[docs]class ReadOnlySharedMemory(BaseMemory):
"""A memory wrapper that is read-only and cannot be changed."""
memory: BaseMemory
@property
def memory_variables(self) -> List[str]:
"""Return memory variables."""
return self.memory.memory_variables
[docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
"""Load memory variables from memory."""
return self.memory.load_memory_variables(inputs)
[docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Nothing should be saved or changed"""
pass
[docs] def clear(self) -> None:
"""Nothing to clear, got a memory like a vault."""
pass
|
https://api.python.langchain.com/en/latest/_modules/langchain/memory/readonly.html
|
7d7e2b57e127-0
|
Source code for langchain.memory.zep_memory
from __future__ import annotations
from typing import Any, Dict, Optional
from langchain.memory import ConversationBufferMemory
from langchain.memory.chat_message_histories import ZepChatMessageHistory
[docs]class ZepMemory(ConversationBufferMemory):
"""Persist your chain history to the Zep Memory Server.
The number of messages returned by Zep and when the Zep server summarizes chat
histories is configurable. See the Zep documentation for more details.
Documentation: https://docs.getzep.com
Example:
.. code-block:: python
memory = ZepMemory(
session_id=session_id, # Identifies your user or a user's session
url=ZEP_API_URL, # Your Zep server's URL
api_key=<your_api_key>, # Optional
memory_key="history", # Ensure this matches the key used in
# chain's prompt template
return_messages=True, # Does your prompt template expect a string
# or a list of Messages?
)
chain = LLMChain(memory=memory,...) # Configure your chain to use the ZepMemory
instance
Note:
To persist metadata alongside your chat history, your will need to create a
custom Chain class that overrides the `prep_outputs` method to include the metadata
in the call to `self.memory.save_context`.
About Zep
=========
Zep provides long-term conversation storage for LLM apps. The server stores,
summarizes, embeds, indexes, and enriches conversational AI chat
histories, and exposes them via simple, low-latency APIs.
For server installation instructions and more, see:
https://docs.getzep.com/deployment/quickstart/
|
https://api.python.langchain.com/en/latest/_modules/langchain/memory/zep_memory.html
|
7d7e2b57e127-1
|
https://docs.getzep.com/deployment/quickstart/
For more information on the zep-python package, see:
https://github.com/getzep/zep-python
"""
chat_memory: ZepChatMessageHistory
def __init__(
self,
session_id: str,
url: str = "http://localhost:8000",
api_key: Optional[str] = None,
output_key: Optional[str] = None,
input_key: Optional[str] = None,
return_messages: bool = False,
human_prefix: str = "Human",
ai_prefix: str = "AI",
memory_key: str = "history",
):
"""Initialize ZepMemory.
Args:
session_id (str): Identifies your user or a user's session
url (str, optional): Your Zep server's URL. Defaults to
"http://localhost:8000".
api_key (Optional[str], optional): Your Zep API key. Defaults to None.
output_key (Optional[str], optional): The key to use for the output message.
Defaults to None.
input_key (Optional[str], optional): The key to use for the input message.
Defaults to None.
return_messages (bool, optional): Does your prompt template expect a string
or a list of Messages? Defaults to False
i.e. return a string.
human_prefix (str, optional): The prefix to use for human messages.
Defaults to "Human".
ai_prefix (str, optional): The prefix to use for AI messages.
Defaults to "AI".
memory_key (str, optional): The key to use for the memory.
Defaults to "history".
|
https://api.python.langchain.com/en/latest/_modules/langchain/memory/zep_memory.html
|
7d7e2b57e127-2
|
Defaults to "history".
Ensure that this matches the key used in
chain's prompt template.
"""
chat_message_history = ZepChatMessageHistory(
session_id=session_id,
url=url,
api_key=api_key,
)
super().__init__(
chat_memory=chat_message_history,
output_key=output_key,
input_key=input_key,
return_messages=return_messages,
human_prefix=human_prefix,
ai_prefix=ai_prefix,
memory_key=memory_key,
)
[docs] def save_context(
self,
inputs: Dict[str, Any],
outputs: Dict[str, str],
metadata: Optional[Dict[str, Any]] = None,
) -> None:
"""Save context from this conversation to buffer.
Args:
inputs (Dict[str, Any]): The inputs to the chain.
outputs (Dict[str, str]): The outputs from the chain.
metadata (Optional[Dict[str, Any]], optional): Any metadata to save with
the context. Defaults to None
Returns:
None
"""
input_str, output_str = self._get_input_output(inputs, outputs)
self.chat_memory.add_user_message(input_str, metadata=metadata)
self.chat_memory.add_ai_message(output_str, metadata=metadata)
|
https://api.python.langchain.com/en/latest/_modules/langchain/memory/zep_memory.html
|
ba7e048d3cbd-0
|
Source code for langchain.memory.entity
import logging
from abc import ABC, abstractmethod
from itertools import islice
from typing import Any, Dict, Iterable, List, Optional
from pydantic import BaseModel, Field
from langchain.chains.llm import LLMChain
from langchain.memory.chat_memory import BaseChatMemory
from langchain.memory.prompt import (
ENTITY_EXTRACTION_PROMPT,
ENTITY_SUMMARIZATION_PROMPT,
)
from langchain.memory.utils import get_prompt_input_key
from langchain.schema import BasePromptTemplate
from langchain.schema.language_model import BaseLanguageModel
from langchain.schema.messages import BaseMessage, get_buffer_string
from langchain.utilities.redis import get_client
logger = logging.getLogger(__name__)
[docs]class BaseEntityStore(BaseModel, ABC):
"""Abstract base class for Entity store."""
[docs] @abstractmethod
def get(self, key: str, default: Optional[str] = None) -> Optional[str]:
"""Get entity value from store."""
pass
[docs] @abstractmethod
def set(self, key: str, value: Optional[str]) -> None:
"""Set entity value in store."""
pass
[docs] @abstractmethod
def delete(self, key: str) -> None:
"""Delete entity value from store."""
pass
[docs] @abstractmethod
def exists(self, key: str) -> bool:
"""Check if entity exists in store."""
pass
[docs] @abstractmethod
def clear(self) -> None:
"""Delete all entities from store."""
pass
[docs]class InMemoryEntityStore(BaseEntityStore):
"""In-memory Entity store."""
store: Dict[str, Optional[str]] = {}
|
https://api.python.langchain.com/en/latest/_modules/langchain/memory/entity.html
|
ba7e048d3cbd-1
|
"""In-memory Entity store."""
store: Dict[str, Optional[str]] = {}
[docs] def get(self, key: str, default: Optional[str] = None) -> Optional[str]:
return self.store.get(key, default)
[docs] def set(self, key: str, value: Optional[str]) -> None:
self.store[key] = value
[docs] def delete(self, key: str) -> None:
del self.store[key]
[docs] def exists(self, key: str) -> bool:
return key in self.store
[docs] def clear(self) -> None:
return self.store.clear()
[docs]class RedisEntityStore(BaseEntityStore):
"""Redis-backed Entity store.
Entities get a TTL of 1 day by default, and
that TTL is extended by 3 days every time the entity is read back.
"""
redis_client: Any
session_id: str = "default"
key_prefix: str = "memory_store"
ttl: Optional[int] = 60 * 60 * 24
recall_ttl: Optional[int] = 60 * 60 * 24 * 3
def __init__(
self,
session_id: str = "default",
url: str = "redis://localhost:6379/0",
key_prefix: str = "memory_store",
ttl: Optional[int] = 60 * 60 * 24,
recall_ttl: Optional[int] = 60 * 60 * 24 * 3,
*args: Any,
**kwargs: Any,
):
try:
import redis
except ImportError:
raise ImportError(
"Could not import redis python package. "
|
https://api.python.langchain.com/en/latest/_modules/langchain/memory/entity.html
|
ba7e048d3cbd-2
|
raise ImportError(
"Could not import redis python package. "
"Please install it with `pip install redis`."
)
super().__init__(*args, **kwargs)
try:
self.redis_client = get_client(redis_url=url, decode_responses=True)
except redis.exceptions.ConnectionError as error:
logger.error(error)
self.session_id = session_id
self.key_prefix = key_prefix
self.ttl = ttl
self.recall_ttl = recall_ttl or ttl
@property
def full_key_prefix(self) -> str:
return f"{self.key_prefix}:{self.session_id}"
[docs] def get(self, key: str, default: Optional[str] = None) -> Optional[str]:
res = (
self.redis_client.getex(f"{self.full_key_prefix}:{key}", ex=self.recall_ttl)
or default
or ""
)
logger.debug(f"REDIS MEM get '{self.full_key_prefix}:{key}': '{res}'")
return res
[docs] def set(self, key: str, value: Optional[str]) -> None:
if not value:
return self.delete(key)
self.redis_client.set(f"{self.full_key_prefix}:{key}", value, ex=self.ttl)
logger.debug(
f"REDIS MEM set '{self.full_key_prefix}:{key}': '{value}' EX {self.ttl}"
)
[docs] def delete(self, key: str) -> None:
self.redis_client.delete(f"{self.full_key_prefix}:{key}")
[docs] def exists(self, key: str) -> bool:
return self.redis_client.exists(f"{self.full_key_prefix}:{key}") == 1
[docs] def clear(self) -> None:
|
https://api.python.langchain.com/en/latest/_modules/langchain/memory/entity.html
|
ba7e048d3cbd-3
|
[docs] def clear(self) -> None:
# iterate a list in batches of size batch_size
def batched(iterable: Iterable[Any], batch_size: int) -> Iterable[Any]:
iterator = iter(iterable)
while batch := list(islice(iterator, batch_size)):
yield batch
for keybatch in batched(
self.redis_client.scan_iter(f"{self.full_key_prefix}:*"), 500
):
self.redis_client.delete(*keybatch)
[docs]class SQLiteEntityStore(BaseEntityStore):
"""SQLite-backed Entity store"""
session_id: str = "default"
table_name: str = "memory_store"
def __init__(
self,
session_id: str = "default",
db_file: str = "entities.db",
table_name: str = "memory_store",
*args: Any,
**kwargs: Any,
):
try:
import sqlite3
except ImportError:
raise ImportError(
"Could not import sqlite3 python package. "
"Please install it with `pip install sqlite3`."
)
super().__init__(*args, **kwargs)
self.conn = sqlite3.connect(db_file)
self.session_id = session_id
self.table_name = table_name
self._create_table_if_not_exists()
@property
def full_table_name(self) -> str:
return f"{self.table_name}_{self.session_id}"
def _create_table_if_not_exists(self) -> None:
create_table_query = f"""
CREATE TABLE IF NOT EXISTS {self.full_table_name} (
key TEXT PRIMARY KEY,
value TEXT
)
"""
with self.conn:
|
https://api.python.langchain.com/en/latest/_modules/langchain/memory/entity.html
|
ba7e048d3cbd-4
|
value TEXT
)
"""
with self.conn:
self.conn.execute(create_table_query)
[docs] def get(self, key: str, default: Optional[str] = None) -> Optional[str]:
query = f"""
SELECT value
FROM {self.full_table_name}
WHERE key = ?
"""
cursor = self.conn.execute(query, (key,))
result = cursor.fetchone()
if result is not None:
value = result[0]
return value
return default
[docs] def set(self, key: str, value: Optional[str]) -> None:
if not value:
return self.delete(key)
query = f"""
INSERT OR REPLACE INTO {self.full_table_name} (key, value)
VALUES (?, ?)
"""
with self.conn:
self.conn.execute(query, (key, value))
[docs] def delete(self, key: str) -> None:
query = f"""
DELETE FROM {self.full_table_name}
WHERE key = ?
"""
with self.conn:
self.conn.execute(query, (key,))
[docs] def exists(self, key: str) -> bool:
query = f"""
SELECT 1
FROM {self.full_table_name}
WHERE key = ?
LIMIT 1
"""
cursor = self.conn.execute(query, (key,))
result = cursor.fetchone()
return result is not None
[docs] def clear(self) -> None:
query = f"""
DELETE FROM {self.full_table_name}
"""
with self.conn:
self.conn.execute(query)
[docs]class ConversationEntityMemory(BaseChatMemory):
|
https://api.python.langchain.com/en/latest/_modules/langchain/memory/entity.html
|
ba7e048d3cbd-5
|
self.conn.execute(query)
[docs]class ConversationEntityMemory(BaseChatMemory):
"""Entity extractor & summarizer memory.
Extracts named entities from the recent chat history and generates summaries.
With a swappable entity store, persisting entities across conversations.
Defaults to an in-memory entity store, and can be swapped out for a Redis,
SQLite, or other entity store.
"""
human_prefix: str = "Human"
ai_prefix: str = "AI"
llm: BaseLanguageModel
entity_extraction_prompt: BasePromptTemplate = ENTITY_EXTRACTION_PROMPT
entity_summarization_prompt: BasePromptTemplate = ENTITY_SUMMARIZATION_PROMPT
# Cache of recently detected entity names, if any
# It is updated when load_memory_variables is called:
entity_cache: List[str] = []
# Number of recent message pairs to consider when updating entities:
k: int = 3
chat_history_key: str = "history"
# Store to manage entity-related data:
entity_store: BaseEntityStore = Field(default_factory=InMemoryEntityStore)
@property
def buffer(self) -> List[BaseMessage]:
"""Access chat memory messages."""
return self.chat_memory.messages
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return ["entities", self.chat_history_key]
[docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
"""
Returns chat history and all generated entities with summaries if available,
and updates or clears the recent entity cache.
New entity name can be found when calling this method, before the entity
|
https://api.python.langchain.com/en/latest/_modules/langchain/memory/entity.html
|
ba7e048d3cbd-6
|
New entity name can be found when calling this method, before the entity
summaries are generated, so the entity cache values may be empty if no entity
descriptions are generated yet.
"""
# Create an LLMChain for predicting entity names from the recent chat history:
chain = LLMChain(llm=self.llm, prompt=self.entity_extraction_prompt)
if self.input_key is None:
prompt_input_key = get_prompt_input_key(inputs, self.memory_variables)
else:
prompt_input_key = self.input_key
# Extract an arbitrary window of the last message pairs from
# the chat history, where the hyperparameter k is the
# number of message pairs:
buffer_string = get_buffer_string(
self.buffer[-self.k * 2 :],
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
# Generates a comma-separated list of named entities,
# e.g. "Jane, White House, UFO"
# or "NONE" if no named entities are extracted:
output = chain.predict(
history=buffer_string,
input=inputs[prompt_input_key],
)
# If no named entities are extracted, assigns an empty list.
if output.strip() == "NONE":
entities = []
else:
# Make a list of the extracted entities:
entities = [w.strip() for w in output.split(",")]
# Make a dictionary of entities with summary if exists:
entity_summaries = {}
for entity in entities:
entity_summaries[entity] = self.entity_store.get(entity, "")
# Replaces the entity name cache with the most recently discussed entities,
# or if no entities were extracted, clears the cache:
self.entity_cache = entities
|
https://api.python.langchain.com/en/latest/_modules/langchain/memory/entity.html
|
ba7e048d3cbd-7
|
self.entity_cache = entities
# Should we return as message objects or as a string?
if self.return_messages:
# Get last `k` pair of chat messages:
buffer: Any = self.buffer[-self.k * 2 :]
else:
# Reuse the string we made earlier:
buffer = buffer_string
return {
self.chat_history_key: buffer,
"entities": entity_summaries,
}
[docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""
Save context from this conversation history to the entity store.
Generates a summary for each entity in the entity cache by prompting
the model, and saves these summaries to the entity store.
"""
super().save_context(inputs, outputs)
if self.input_key is None:
prompt_input_key = get_prompt_input_key(inputs, self.memory_variables)
else:
prompt_input_key = self.input_key
# Extract an arbitrary window of the last message pairs from
# the chat history, where the hyperparameter k is the
# number of message pairs:
buffer_string = get_buffer_string(
self.buffer[-self.k * 2 :],
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
input_data = inputs[prompt_input_key]
# Create an LLMChain for predicting entity summarization from the context
chain = LLMChain(llm=self.llm, prompt=self.entity_summarization_prompt)
# Generate new summaries for entities and save them in the entity store
for entity in self.entity_cache:
# Get existing summary if it exists
existing_summary = self.entity_store.get(entity, "")
output = chain.predict(
|
https://api.python.langchain.com/en/latest/_modules/langchain/memory/entity.html
|
ba7e048d3cbd-8
|
existing_summary = self.entity_store.get(entity, "")
output = chain.predict(
summary=existing_summary,
entity=entity,
history=buffer_string,
input=input_data,
)
# Save the updated summary to the entity store
self.entity_store.set(entity, output.strip())
[docs] def clear(self) -> None:
"""Clear memory contents."""
self.chat_memory.clear()
self.entity_cache.clear()
self.entity_store.clear()
|
https://api.python.langchain.com/en/latest/_modules/langchain/memory/entity.html
|
a7430fa3722b-0
|
Source code for langchain.memory.summary_buffer
from typing import Any, Dict, List
from pydantic import root_validator
from langchain.memory.chat_memory import BaseChatMemory
from langchain.memory.summary import SummarizerMixin
from langchain.schema.messages import BaseMessage, get_buffer_string
[docs]class ConversationSummaryBufferMemory(BaseChatMemory, SummarizerMixin):
"""Buffer with summarizer for storing conversation memory."""
max_token_limit: int = 2000
moving_summary_buffer: str = ""
memory_key: str = "history"
@property
def buffer(self) -> List[BaseMessage]:
return self.chat_memory.messages
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
[docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
"""Return history buffer."""
buffer = self.buffer
if self.moving_summary_buffer != "":
first_messages: List[BaseMessage] = [
self.summary_message_cls(content=self.moving_summary_buffer)
]
buffer = first_messages + buffer
if self.return_messages:
final_buffer: Any = buffer
else:
final_buffer = get_buffer_string(
buffer, human_prefix=self.human_prefix, ai_prefix=self.ai_prefix
)
return {self.memory_key: final_buffer}
@root_validator()
def validate_prompt_input_variables(cls, values: Dict) -> Dict:
"""Validate that prompt input variables are consistent."""
prompt_variables = values["prompt"].input_variables
expected_keys = {"summary", "new_lines"}
if expected_keys != set(prompt_variables):
raise ValueError(
|
https://api.python.langchain.com/en/latest/_modules/langchain/memory/summary_buffer.html
|
a7430fa3722b-1
|
if expected_keys != set(prompt_variables):
raise ValueError(
"Got unexpected prompt input variables. The prompt expects "
f"{prompt_variables}, but it should have {expected_keys}."
)
return values
[docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Save context from this conversation to buffer."""
super().save_context(inputs, outputs)
self.prune()
[docs] def prune(self) -> None:
"""Prune buffer if it exceeds max token limit"""
buffer = self.chat_memory.messages
curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer)
if curr_buffer_length > self.max_token_limit:
pruned_memory = []
while curr_buffer_length > self.max_token_limit:
pruned_memory.append(buffer.pop(0))
curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer)
self.moving_summary_buffer = self.predict_new_summary(
pruned_memory, self.moving_summary_buffer
)
[docs] def clear(self) -> None:
"""Clear memory contents."""
super().clear()
self.moving_summary_buffer = ""
|
https://api.python.langchain.com/en/latest/_modules/langchain/memory/summary_buffer.html
|
5e07f50c2238-0
|
Source code for langchain.memory.simple
from typing import Any, Dict, List
from langchain.schema import BaseMemory
[docs]class SimpleMemory(BaseMemory):
"""Simple memory for storing context or other information that shouldn't
ever change between prompts.
"""
memories: Dict[str, Any] = dict()
@property
def memory_variables(self) -> List[str]:
return list(self.memories.keys())
[docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
return self.memories
[docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Nothing should be saved or changed, my memory is set in stone."""
pass
[docs] def clear(self) -> None:
"""Nothing to clear, got a memory like a vault."""
pass
|
https://api.python.langchain.com/en/latest/_modules/langchain/memory/simple.html
|
9fe6dff01326-0
|
Source code for langchain.memory.summary
from __future__ import annotations
from typing import Any, Dict, List, Type
from pydantic import BaseModel, root_validator
from langchain.chains.llm import LLMChain
from langchain.memory.chat_memory import BaseChatMemory
from langchain.memory.prompt import SUMMARY_PROMPT
from langchain.schema import (
BaseChatMessageHistory,
BasePromptTemplate,
)
from langchain.schema.language_model import BaseLanguageModel
from langchain.schema.messages import BaseMessage, SystemMessage, get_buffer_string
[docs]class SummarizerMixin(BaseModel):
"""Mixin for summarizer."""
human_prefix: str = "Human"
ai_prefix: str = "AI"
llm: BaseLanguageModel
prompt: BasePromptTemplate = SUMMARY_PROMPT
summary_message_cls: Type[BaseMessage] = SystemMessage
[docs] def predict_new_summary(
self, messages: List[BaseMessage], existing_summary: str
) -> str:
new_lines = get_buffer_string(
messages,
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
chain = LLMChain(llm=self.llm, prompt=self.prompt)
return chain.predict(summary=existing_summary, new_lines=new_lines)
[docs]class ConversationSummaryMemory(BaseChatMemory, SummarizerMixin):
"""Conversation summarizer to chat memory."""
buffer: str = ""
memory_key: str = "history" #: :meta private:
[docs] @classmethod
def from_messages(
cls,
llm: BaseLanguageModel,
chat_memory: BaseChatMessageHistory,
*,
summarize_step: int = 2,
**kwargs: Any,
) -> ConversationSummaryMemory:
|
https://api.python.langchain.com/en/latest/_modules/langchain/memory/summary.html
|
9fe6dff01326-1
|
**kwargs: Any,
) -> ConversationSummaryMemory:
obj = cls(llm=llm, chat_memory=chat_memory, **kwargs)
for i in range(0, len(obj.chat_memory.messages), summarize_step):
obj.buffer = obj.predict_new_summary(
obj.chat_memory.messages[i : i + summarize_step], obj.buffer
)
return obj
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
[docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
"""Return history buffer."""
if self.return_messages:
buffer: Any = [self.summary_message_cls(content=self.buffer)]
else:
buffer = self.buffer
return {self.memory_key: buffer}
@root_validator()
def validate_prompt_input_variables(cls, values: Dict) -> Dict:
"""Validate that prompt input variables are consistent."""
prompt_variables = values["prompt"].input_variables
expected_keys = {"summary", "new_lines"}
if expected_keys != set(prompt_variables):
raise ValueError(
"Got unexpected prompt input variables. The prompt expects "
f"{prompt_variables}, but it should have {expected_keys}."
)
return values
[docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Save context from this conversation to buffer."""
super().save_context(inputs, outputs)
self.buffer = self.predict_new_summary(
self.chat_memory.messages[-2:], self.buffer
)
[docs] def clear(self) -> None:
"""Clear memory contents."""
|
https://api.python.langchain.com/en/latest/_modules/langchain/memory/summary.html
|
9fe6dff01326-2
|
[docs] def clear(self) -> None:
"""Clear memory contents."""
super().clear()
self.buffer = ""
|
https://api.python.langchain.com/en/latest/_modules/langchain/memory/summary.html
|
69a714c7656b-0
|
Source code for langchain.memory.vectorstore
"""Class for a VectorStore-backed memory object."""
from typing import Any, Dict, List, Optional, Sequence, Union
from pydantic import Field
from langchain.memory.chat_memory import BaseMemory
from langchain.memory.utils import get_prompt_input_key
from langchain.schema import Document
from langchain.vectorstores.base import VectorStoreRetriever
[docs]class VectorStoreRetrieverMemory(BaseMemory):
"""VectorStoreRetriever-backed memory."""
retriever: VectorStoreRetriever = Field(exclude=True)
"""VectorStoreRetriever object to connect to."""
memory_key: str = "history" #: :meta private:
"""Key name to locate the memories in the result of load_memory_variables."""
input_key: Optional[str] = None
"""Key name to index the inputs to load_memory_variables."""
return_docs: bool = False
"""Whether or not to return the result of querying the database directly."""
exclude_input_keys: Sequence[str] = Field(default_factory=tuple)
"""Input keys to exclude in addition to memory key when constructing the document"""
@property
def memory_variables(self) -> List[str]:
"""The list of keys emitted from the load_memory_variables method."""
return [self.memory_key]
def _get_prompt_input_key(self, inputs: Dict[str, Any]) -> str:
"""Get the input key for the prompt."""
if self.input_key is None:
return get_prompt_input_key(inputs, self.memory_variables)
return self.input_key
[docs] def load_memory_variables(
self, inputs: Dict[str, Any]
) -> Dict[str, Union[List[Document], str]]:
"""Return history buffer."""
|
https://api.python.langchain.com/en/latest/_modules/langchain/memory/vectorstore.html
|
69a714c7656b-1
|
"""Return history buffer."""
input_key = self._get_prompt_input_key(inputs)
query = inputs[input_key]
docs = self.retriever.get_relevant_documents(query)
result: Union[List[Document], str]
if not self.return_docs:
result = "\n".join([doc.page_content for doc in docs])
else:
result = docs
return {self.memory_key: result}
def _form_documents(
self, inputs: Dict[str, Any], outputs: Dict[str, str]
) -> List[Document]:
"""Format context from this conversation to buffer."""
# Each document should only include the current turn, not the chat history
exclude = set(self.exclude_input_keys)
exclude.add(self.memory_key)
filtered_inputs = {k: v for k, v in inputs.items() if k not in exclude}
texts = [
f"{k}: {v}"
for k, v in list(filtered_inputs.items()) + list(outputs.items())
]
page_content = "\n".join(texts)
return [Document(page_content=page_content)]
[docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Save context from this conversation to buffer."""
documents = self._form_documents(inputs, outputs)
self.retriever.add_documents(documents)
[docs] def clear(self) -> None:
"""Nothing to clear."""
|
https://api.python.langchain.com/en/latest/_modules/langchain/memory/vectorstore.html
|
d14a36e2290b-0
|
Source code for langchain.memory.buffer
from typing import Any, Dict, List, Optional
from pydantic import root_validator
from langchain.memory.chat_memory import BaseChatMemory, BaseMemory
from langchain.memory.utils import get_prompt_input_key
from langchain.schema.messages import get_buffer_string
[docs]class ConversationBufferMemory(BaseChatMemory):
"""Buffer for storing conversation memory."""
human_prefix: str = "Human"
ai_prefix: str = "AI"
memory_key: str = "history" #: :meta private:
@property
def buffer(self) -> Any:
"""String buffer of memory."""
if self.return_messages:
return self.chat_memory.messages
else:
return get_buffer_string(
self.chat_memory.messages,
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
[docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
"""Return history buffer."""
return {self.memory_key: self.buffer}
[docs]class ConversationStringBufferMemory(BaseMemory):
"""Buffer for storing conversation memory."""
human_prefix: str = "Human"
ai_prefix: str = "AI"
"""Prefix to use for AI generated responses."""
buffer: str = ""
output_key: Optional[str] = None
input_key: Optional[str] = None
memory_key: str = "history" #: :meta private:
@root_validator()
def validate_chains(cls, values: Dict) -> Dict:
|
https://api.python.langchain.com/en/latest/_modules/langchain/memory/buffer.html
|
d14a36e2290b-1
|
def validate_chains(cls, values: Dict) -> Dict:
"""Validate that return messages is not True."""
if values.get("return_messages", False):
raise ValueError(
"return_messages must be False for ConversationStringBufferMemory"
)
return values
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
[docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
"""Return history buffer."""
return {self.memory_key: self.buffer}
[docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Save context from this conversation to buffer."""
if self.input_key is None:
prompt_input_key = get_prompt_input_key(inputs, self.memory_variables)
else:
prompt_input_key = self.input_key
if self.output_key is None:
if len(outputs) != 1:
raise ValueError(f"One output key expected, got {outputs.keys()}")
output_key = list(outputs.keys())[0]
else:
output_key = self.output_key
human = f"{self.human_prefix}: " + inputs[prompt_input_key]
ai = f"{self.ai_prefix}: " + outputs[output_key]
self.buffer += "\n" + "\n".join([human, ai])
[docs] def clear(self) -> None:
"""Clear memory contents."""
self.buffer = ""
|
https://api.python.langchain.com/en/latest/_modules/langchain/memory/buffer.html
|
702da19f2e6e-0
|
Source code for langchain.memory.kg
from typing import Any, Dict, List, Type, Union
from pydantic import Field
from langchain.chains.llm import LLMChain
from langchain.graphs import NetworkxEntityGraph
from langchain.graphs.networkx_graph import KnowledgeTriple, get_entities, parse_triples
from langchain.memory.chat_memory import BaseChatMemory
from langchain.memory.prompt import (
ENTITY_EXTRACTION_PROMPT,
KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT,
)
from langchain.memory.utils import get_prompt_input_key
from langchain.schema import BasePromptTemplate
from langchain.schema.language_model import BaseLanguageModel
from langchain.schema.messages import BaseMessage, SystemMessage, get_buffer_string
[docs]class ConversationKGMemory(BaseChatMemory):
"""Knowledge graph conversation memory.
Integrates with external knowledge graph to store and retrieve
information about knowledge triples in the conversation.
"""
k: int = 2
human_prefix: str = "Human"
ai_prefix: str = "AI"
kg: NetworkxEntityGraph = Field(default_factory=NetworkxEntityGraph)
knowledge_extraction_prompt: BasePromptTemplate = KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT
entity_extraction_prompt: BasePromptTemplate = ENTITY_EXTRACTION_PROMPT
llm: BaseLanguageModel
summary_message_cls: Type[BaseMessage] = SystemMessage
"""Number of previous utterances to include in the context."""
memory_key: str = "history" #: :meta private:
[docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
"""Return history buffer."""
entities = self._get_current_entities(inputs)
summary_strings = []
for entity in entities:
|
https://api.python.langchain.com/en/latest/_modules/langchain/memory/kg.html
|
702da19f2e6e-1
|
summary_strings = []
for entity in entities:
knowledge = self.kg.get_entity_knowledge(entity)
if knowledge:
summary = f"On {entity}: {'. '.join(knowledge)}."
summary_strings.append(summary)
context: Union[str, List]
if not summary_strings:
context = [] if self.return_messages else ""
elif self.return_messages:
context = [
self.summary_message_cls(content=text) for text in summary_strings
]
else:
context = "\n".join(summary_strings)
return {self.memory_key: context}
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
def _get_prompt_input_key(self, inputs: Dict[str, Any]) -> str:
"""Get the input key for the prompt."""
if self.input_key is None:
return get_prompt_input_key(inputs, self.memory_variables)
return self.input_key
def _get_prompt_output_key(self, outputs: Dict[str, Any]) -> str:
"""Get the output key for the prompt."""
if self.output_key is None:
if len(outputs) != 1:
raise ValueError(f"One output key expected, got {outputs.keys()}")
return list(outputs.keys())[0]
return self.output_key
[docs] def get_current_entities(self, input_string: str) -> List[str]:
chain = LLMChain(llm=self.llm, prompt=self.entity_extraction_prompt)
buffer_string = get_buffer_string(
self.chat_memory.messages[-self.k * 2 :],
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
|
https://api.python.langchain.com/en/latest/_modules/langchain/memory/kg.html
|
702da19f2e6e-2
|
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
output = chain.predict(
history=buffer_string,
input=input_string,
)
return get_entities(output)
def _get_current_entities(self, inputs: Dict[str, Any]) -> List[str]:
"""Get the current entities in the conversation."""
prompt_input_key = self._get_prompt_input_key(inputs)
return self.get_current_entities(inputs[prompt_input_key])
[docs] def get_knowledge_triplets(self, input_string: str) -> List[KnowledgeTriple]:
chain = LLMChain(llm=self.llm, prompt=self.knowledge_extraction_prompt)
buffer_string = get_buffer_string(
self.chat_memory.messages[-self.k * 2 :],
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
output = chain.predict(
history=buffer_string,
input=input_string,
verbose=True,
)
knowledge = parse_triples(output)
return knowledge
def _get_and_update_kg(self, inputs: Dict[str, Any]) -> None:
"""Get and update knowledge graph from the conversation history."""
prompt_input_key = self._get_prompt_input_key(inputs)
knowledge = self.get_knowledge_triplets(inputs[prompt_input_key])
for triple in knowledge:
self.kg.add_triple(triple)
[docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Save context from this conversation to buffer."""
super().save_context(inputs, outputs)
self._get_and_update_kg(inputs)
[docs] def clear(self) -> None:
"""Clear memory contents."""
|
https://api.python.langchain.com/en/latest/_modules/langchain/memory/kg.html
|
702da19f2e6e-3
|
[docs] def clear(self) -> None:
"""Clear memory contents."""
super().clear()
self.kg.clear()
|
https://api.python.langchain.com/en/latest/_modules/langchain/memory/kg.html
|
f8ff61dff44a-0
|
Source code for langchain.memory.combined
import warnings
from typing import Any, Dict, List, Set
from pydantic import validator
from langchain.memory.chat_memory import BaseChatMemory
from langchain.schema import BaseMemory
[docs]class CombinedMemory(BaseMemory):
"""Combining multiple memories' data together."""
memories: List[BaseMemory]
"""For tracking all the memories that should be accessed."""
@validator("memories")
def check_repeated_memory_variable(
cls, value: List[BaseMemory]
) -> List[BaseMemory]:
all_variables: Set[str] = set()
for val in value:
overlap = all_variables.intersection(val.memory_variables)
if overlap:
raise ValueError(
f"The same variables {overlap} are found in multiple"
"memory object, which is not allowed by CombinedMemory."
)
all_variables |= set(val.memory_variables)
return value
@validator("memories")
def check_input_key(cls, value: List[BaseMemory]) -> List[BaseMemory]:
"""Check that if memories are of type BaseChatMemory that input keys exist."""
for val in value:
if isinstance(val, BaseChatMemory):
if val.input_key is None:
warnings.warn(
"When using CombinedMemory, "
"input keys should be so the input is known. "
f" Was not set on {val}"
)
return value
@property
def memory_variables(self) -> List[str]:
"""All the memory variables that this instance provides."""
"""Collected from the all the linked memories."""
memory_variables = []
for memory in self.memories:
memory_variables.extend(memory.memory_variables)
return memory_variables
|
https://api.python.langchain.com/en/latest/_modules/langchain/memory/combined.html
|
f8ff61dff44a-1
|
memory_variables.extend(memory.memory_variables)
return memory_variables
[docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
"""Load all vars from sub-memories."""
memory_data: Dict[str, Any] = {}
# Collect vars from all sub-memories
for memory in self.memories:
data = memory.load_memory_variables(inputs)
for key, value in data.items():
if key in memory_data:
raise ValueError(
f"The variable {key} is repeated in the CombinedMemory."
)
memory_data[key] = value
return memory_data
[docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Save context from this session for every memory."""
# Save context for all sub-memories
for memory in self.memories:
memory.save_context(inputs, outputs)
[docs] def clear(self) -> None:
"""Clear context from this session for every memory."""
for memory in self.memories:
memory.clear()
|
https://api.python.langchain.com/en/latest/_modules/langchain/memory/combined.html
|
82e45ba18243-0
|
Source code for langchain.memory.token_buffer
from typing import Any, Dict, List
from langchain.memory.chat_memory import BaseChatMemory
from langchain.schema.language_model import BaseLanguageModel
from langchain.schema.messages import BaseMessage, get_buffer_string
[docs]class ConversationTokenBufferMemory(BaseChatMemory):
"""Conversation chat memory with token limit."""
human_prefix: str = "Human"
ai_prefix: str = "AI"
llm: BaseLanguageModel
memory_key: str = "history"
max_token_limit: int = 2000
@property
def buffer(self) -> List[BaseMessage]:
"""String buffer of memory."""
return self.chat_memory.messages
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
[docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
"""Return history buffer."""
buffer: Any = self.buffer
if self.return_messages:
final_buffer: Any = buffer
else:
final_buffer = get_buffer_string(
buffer,
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
return {self.memory_key: final_buffer}
[docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Save context from this conversation to buffer. Pruned."""
super().save_context(inputs, outputs)
# Prune buffer if it exceeds max token limit
buffer = self.chat_memory.messages
curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer)
if curr_buffer_length > self.max_token_limit:
|
https://api.python.langchain.com/en/latest/_modules/langchain/memory/token_buffer.html
|
82e45ba18243-1
|
if curr_buffer_length > self.max_token_limit:
pruned_memory = []
while curr_buffer_length > self.max_token_limit:
pruned_memory.append(buffer.pop(0))
curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer)
|
https://api.python.langchain.com/en/latest/_modules/langchain/memory/token_buffer.html
|
56fe617953ca-0
|
Source code for langchain.memory.utils
from typing import Any, Dict, List
from langchain.schema.messages import get_buffer_string # noqa: 401
[docs]def get_prompt_input_key(inputs: Dict[str, Any], memory_variables: List[str]) -> str:
"""
Get the prompt input key.
Args:
inputs: Dict[str, Any]
memory_variables: List[str]
Returns:
A prompt input key.
"""
# "stop" is a special key that can be passed as input but is not used to
# format the prompt.
prompt_input_keys = list(set(inputs).difference(memory_variables + ["stop"]))
if len(prompt_input_keys) != 1:
raise ValueError(f"One input key expected got {prompt_input_keys}")
return prompt_input_keys[0]
|
https://api.python.langchain.com/en/latest/_modules/langchain/memory/utils.html
|
4f870f69dd4d-0
|
Source code for langchain.memory.chat_message_histories.zep
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Dict, List, Optional
from langchain.schema import (
BaseChatMessageHistory,
)
from langchain.schema.messages import (
AIMessage,
BaseMessage,
HumanMessage,
SystemMessage,
)
if TYPE_CHECKING:
from zep_python import Memory, MemorySearchResult, Message, NotFoundError
logger = logging.getLogger(__name__)
[docs]class ZepChatMessageHistory(BaseChatMessageHistory):
"""Chat message history that uses Zep as a backend.
Recommended usage::
# Set up Zep Chat History
zep_chat_history = ZepChatMessageHistory(
session_id=session_id,
url=ZEP_API_URL,
api_key=<your_api_key>,
)
# Use a standard ConversationBufferMemory to encapsulate the Zep chat history
memory = ConversationBufferMemory(
memory_key="chat_history", chat_memory=zep_chat_history
)
Zep provides long-term conversation storage for LLM apps. The server stores,
summarizes, embeds, indexes, and enriches conversational AI chat
histories, and exposes them via simple, low-latency APIs.
For server installation instructions and more, see:
https://docs.getzep.com/deployment/quickstart/
This class is a thin wrapper around the zep-python package. Additional
Zep functionality is exposed via the `zep_summary` and `zep_messages`
properties.
For more information on the zep-python package, see:
https://github.com/getzep/zep-python
"""
[docs] def __init__(
self,
|
https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/zep.html
|
4f870f69dd4d-1
|
"""
[docs] def __init__(
self,
session_id: str,
url: str = "http://localhost:8000",
api_key: Optional[str] = None,
) -> None:
try:
from zep_python import ZepClient
except ImportError:
raise ImportError(
"Could not import zep-python package. "
"Please install it with `pip install zep-python`."
)
self.zep_client = ZepClient(base_url=url, api_key=api_key)
self.session_id = session_id
@property
def messages(self) -> List[BaseMessage]: # type: ignore
"""Retrieve messages from Zep memory"""
zep_memory: Optional[Memory] = self._get_memory()
if not zep_memory:
return []
messages: List[BaseMessage] = []
# Extract summary, if present, and messages
if zep_memory.summary:
if len(zep_memory.summary.content) > 0:
messages.append(SystemMessage(content=zep_memory.summary.content))
if zep_memory.messages:
msg: Message
for msg in zep_memory.messages:
metadata: Dict = {
"uuid": msg.uuid,
"created_at": msg.created_at,
"token_count": msg.token_count,
"metadata": msg.metadata,
}
if msg.role == "ai":
messages.append(
AIMessage(content=msg.content, additional_kwargs=metadata)
)
else:
messages.append(
HumanMessage(content=msg.content, additional_kwargs=metadata)
)
return messages
@property
def zep_messages(self) -> List[Message]:
|
https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/zep.html
|
4f870f69dd4d-2
|
@property
def zep_messages(self) -> List[Message]:
"""Retrieve summary from Zep memory"""
zep_memory: Optional[Memory] = self._get_memory()
if not zep_memory:
return []
return zep_memory.messages
@property
def zep_summary(self) -> Optional[str]:
"""Retrieve summary from Zep memory"""
zep_memory: Optional[Memory] = self._get_memory()
if not zep_memory or not zep_memory.summary:
return None
return zep_memory.summary.content
def _get_memory(self) -> Optional[Memory]:
"""Retrieve memory from Zep"""
from zep_python import NotFoundError
try:
zep_memory: Memory = self.zep_client.get_memory(self.session_id)
except NotFoundError:
logger.warning(
f"Session {self.session_id} not found in Zep. Returning None"
)
return None
return zep_memory
[docs] def add_user_message(
self, message: str, metadata: Optional[Dict[str, Any]] = None
) -> None:
"""Convenience method for adding a human message string to the store.
Args:
message: The string contents of a human message.
metadata: Optional metadata to attach to the message.
"""
self.add_message(HumanMessage(content=message), metadata=metadata)
[docs] def add_ai_message(
self, message: str, metadata: Optional[Dict[str, Any]] = None
) -> None:
"""Convenience method for adding an AI message string to the store.
Args:
message: The string contents of an AI message.
|
https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/zep.html
|
4f870f69dd4d-3
|
Args:
message: The string contents of an AI message.
metadata: Optional metadata to attach to the message.
"""
self.add_message(AIMessage(content=message), metadata=metadata)
[docs] def add_message(
self, message: BaseMessage, metadata: Optional[Dict[str, Any]] = None
) -> None:
"""Append the message to the Zep memory history"""
from zep_python import Memory, Message
zep_message = Message(
content=message.content, role=message.type, metadata=metadata
)
zep_memory = Memory(messages=[zep_message])
self.zep_client.add_memory(self.session_id, zep_memory)
[docs] def search(
self, query: str, metadata: Optional[Dict] = None, limit: Optional[int] = None
) -> List[MemorySearchResult]:
"""Search Zep memory for messages matching the query"""
from zep_python import MemorySearchPayload
payload: MemorySearchPayload = MemorySearchPayload(
text=query, metadata=metadata
)
return self.zep_client.search_memory(self.session_id, payload, limit=limit)
[docs] def clear(self) -> None:
"""Clear session memory from Zep. Note that Zep is long-term storage for memory
and this is not advised unless you have specific data retention requirements.
"""
try:
self.zep_client.delete_memory(self.session_id)
except NotFoundError:
logger.warning(
f"Session {self.session_id} not found in Zep. Skipping delete."
)
|
https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/zep.html
|
fcd79fd9eed8-0
|
Source code for langchain.memory.chat_message_histories.in_memory
from typing import List
from pydantic import BaseModel
from langchain.schema import (
BaseChatMessageHistory,
)
from langchain.schema.messages import BaseMessage
[docs]class ChatMessageHistory(BaseChatMessageHistory, BaseModel):
"""In memory implementation of chat message history.
Stores messages in an in memory list.
"""
messages: List[BaseMessage] = []
[docs] def add_message(self, message: BaseMessage) -> None:
"""Add a self-created message to the store"""
self.messages.append(message)
[docs] def clear(self) -> None:
self.messages = []
|
https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/in_memory.html
|
3a15187817a8-0
|
Source code for langchain.memory.chat_message_histories.sql
import json
import logging
from typing import List
from sqlalchemy import Column, Integer, Text, create_engine
try:
from sqlalchemy.orm import declarative_base
except ImportError:
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from langchain.schema import (
BaseChatMessageHistory,
)
from langchain.schema.messages import BaseMessage, _message_to_dict, messages_from_dict
logger = logging.getLogger(__name__)
[docs]def create_message_model(table_name, DynamicBase): # type: ignore
"""
Create a message model for a given table name.
Args:
table_name: The name of the table to use.
DynamicBase: The base class to use for the model.
Returns:
The model class.
"""
# Model decleared inside a function to have a dynamic table name
class Message(DynamicBase):
__tablename__ = table_name
id = Column(Integer, primary_key=True)
session_id = Column(Text)
message = Column(Text)
return Message
[docs]class SQLChatMessageHistory(BaseChatMessageHistory):
"""Chat message history stored in an SQL database."""
[docs] def __init__(
self,
session_id: str,
connection_string: str,
table_name: str = "message_store",
):
self.table_name = table_name
self.connection_string = connection_string
self.engine = create_engine(connection_string, echo=False)
self._create_table_if_not_exists()
self.session_id = session_id
self.Session = sessionmaker(self.engine)
def _create_table_if_not_exists(self) -> None:
DynamicBase = declarative_base()
|
https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/sql.html
|
3a15187817a8-1
|
DynamicBase = declarative_base()
self.Message = create_message_model(self.table_name, DynamicBase)
# Create all does the check for us in case the table exists.
DynamicBase.metadata.create_all(self.engine)
@property
def messages(self) -> List[BaseMessage]: # type: ignore
"""Retrieve all messages from db"""
with self.Session() as session:
result = session.query(self.Message).where(
self.Message.session_id == self.session_id
)
items = [json.loads(record.message) for record in result]
messages = messages_from_dict(items)
return messages
[docs] def add_message(self, message: BaseMessage) -> None:
"""Append the message to the record in db"""
with self.Session() as session:
jsonstr = json.dumps(_message_to_dict(message))
session.add(self.Message(session_id=self.session_id, message=jsonstr))
session.commit()
[docs] def clear(self) -> None:
"""Clear session memory from db"""
with self.Session() as session:
session.query(self.Message).filter(
self.Message.session_id == self.session_id
).delete()
session.commit()
|
https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/sql.html
|
c1ccce97a786-0
|
Source code for langchain.memory.chat_message_histories.firestore
"""Firestore Chat Message History."""
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, List, Optional
from langchain.schema import (
BaseChatMessageHistory,
)
from langchain.schema.messages import BaseMessage, messages_from_dict, messages_to_dict
logger = logging.getLogger(__name__)
if TYPE_CHECKING:
from google.cloud.firestore import Client, DocumentReference
def _get_firestore_client() -> Client:
try:
import firebase_admin
from firebase_admin import firestore
except ImportError:
raise ImportError(
"Could not import firebase-admin python package. "
"Please install it with `pip install firebase-admin`."
)
# For multiple instances, only initialize the app once.
try:
firebase_admin.get_app()
except ValueError as e:
logger.debug("Initializing Firebase app: %s", e)
firebase_admin.initialize_app()
return firestore.client()
[docs]class FirestoreChatMessageHistory(BaseChatMessageHistory):
"""Chat message history backed by Google Firestore."""
[docs] def __init__(
self,
collection_name: str,
session_id: str,
user_id: str,
firestore_client: Optional[Client] = None,
):
"""
Initialize a new instance of the FirestoreChatMessageHistory class.
:param collection_name: The name of the collection to use.
:param session_id: The session ID for the chat..
:param user_id: The user ID for the chat.
"""
self.collection_name = collection_name
self.session_id = session_id
self.user_id = user_id
self._document: Optional[DocumentReference] = None
|
https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/firestore.html
|
c1ccce97a786-1
|
self._document: Optional[DocumentReference] = None
self.messages: List[BaseMessage] = []
self.firestore_client = firestore_client or _get_firestore_client()
self.prepare_firestore()
[docs] def prepare_firestore(self) -> None:
"""Prepare the Firestore client.
Use this function to make sure your database is ready.
"""
self._document = self.firestore_client.collection(
self.collection_name
).document(self.session_id)
self.load_messages()
[docs] def load_messages(self) -> None:
"""Retrieve the messages from Firestore"""
if not self._document:
raise ValueError("Document not initialized")
doc = self._document.get()
if doc.exists:
data = doc.to_dict()
if "messages" in data and len(data["messages"]) > 0:
self.messages = messages_from_dict(data["messages"])
[docs] def add_message(self, message: BaseMessage) -> None:
self.messages.append(message)
self.upsert_messages()
[docs] def upsert_messages(self, new_message: Optional[BaseMessage] = None) -> None:
"""Update the Firestore document."""
if not self._document:
raise ValueError("Document not initialized")
self._document.set(
{
"id": self.session_id,
"user_id": self.user_id,
"messages": messages_to_dict(self.messages),
}
)
[docs] def clear(self) -> None:
"""Clear session memory from this memory and Firestore."""
self.messages = []
if self._document:
self._document.delete()
|
https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/firestore.html
|
b71ecb086f8d-0
|
Source code for langchain.memory.chat_message_histories.dynamodb
import logging
from typing import List, Optional
from langchain.schema import (
BaseChatMessageHistory,
)
from langchain.schema.messages import (
BaseMessage,
_message_to_dict,
messages_from_dict,
messages_to_dict,
)
logger = logging.getLogger(__name__)
[docs]class DynamoDBChatMessageHistory(BaseChatMessageHistory):
"""Chat message history that stores history in AWS DynamoDB.
This class expects that a DynamoDB table with name `table_name`
and a partition Key of `SessionId` is present.
Args:
table_name: name of the DynamoDB table
session_id: arbitrary key that is used to store the messages
of a single chat session.
endpoint_url: URL of the AWS endpoint to connect to. This argument
is optional and useful for test purposes, like using Localstack.
If you plan to use AWS cloud service, you normally don't have to
worry about setting the endpoint_url.
"""
[docs] def __init__(
self, table_name: str, session_id: str, endpoint_url: Optional[str] = None
):
import boto3
if endpoint_url:
client = boto3.resource("dynamodb", endpoint_url=endpoint_url)
else:
client = boto3.resource("dynamodb")
self.table = client.Table(table_name)
self.session_id = session_id
@property
def messages(self) -> List[BaseMessage]: # type: ignore
"""Retrieve the messages from DynamoDB"""
from botocore.exceptions import ClientError
response = None
try:
response = self.table.get_item(Key={"SessionId": self.session_id})
|
https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/dynamodb.html
|
b71ecb086f8d-1
|
response = self.table.get_item(Key={"SessionId": self.session_id})
except ClientError as error:
if error.response["Error"]["Code"] == "ResourceNotFoundException":
logger.warning("No record found with session id: %s", self.session_id)
else:
logger.error(error)
if response and "Item" in response:
items = response["Item"]["History"]
else:
items = []
messages = messages_from_dict(items)
return messages
[docs] def add_message(self, message: BaseMessage) -> None:
"""Append the message to the record in DynamoDB"""
from botocore.exceptions import ClientError
messages = messages_to_dict(self.messages)
_message = _message_to_dict(message)
messages.append(_message)
try:
self.table.put_item(
Item={"SessionId": self.session_id, "History": messages}
)
except ClientError as err:
logger.error(err)
[docs] def clear(self) -> None:
"""Clear session memory from DynamoDB"""
from botocore.exceptions import ClientError
try:
self.table.delete_item(Key={"SessionId": self.session_id})
except ClientError as err:
logger.error(err)
|
https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/dynamodb.html
|
3801fe2aae1f-0
|
Source code for langchain.memory.chat_message_histories.cosmos_db
"""Azure CosmosDB Memory History."""
from __future__ import annotations
import logging
from types import TracebackType
from typing import TYPE_CHECKING, Any, List, Optional, Type
from langchain.schema import (
BaseChatMessageHistory,
)
from langchain.schema.messages import BaseMessage, messages_from_dict, messages_to_dict
logger = logging.getLogger(__name__)
if TYPE_CHECKING:
from azure.cosmos import ContainerProxy
[docs]class CosmosDBChatMessageHistory(BaseChatMessageHistory):
"""Chat message history backed by Azure CosmosDB."""
[docs] def __init__(
self,
cosmos_endpoint: str,
cosmos_database: str,
cosmos_container: str,
session_id: str,
user_id: str,
credential: Any = None,
connection_string: Optional[str] = None,
ttl: Optional[int] = None,
cosmos_client_kwargs: Optional[dict] = None,
):
"""
Initializes a new instance of the CosmosDBChatMessageHistory class.
Make sure to call prepare_cosmos or use the context manager to make
sure your database is ready.
Either a credential or a connection string must be provided.
:param cosmos_endpoint: The connection endpoint for the Azure Cosmos DB account.
:param cosmos_database: The name of the database to use.
:param cosmos_container: The name of the container to use.
:param session_id: The session ID to use, can be overwritten while loading.
:param user_id: The user ID to use, can be overwritten while loading.
:param credential: The credential to use to authenticate to Azure Cosmos DB.
|
https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/cosmos_db.html
|
3801fe2aae1f-1
|
:param credential: The credential to use to authenticate to Azure Cosmos DB.
:param connection_string: The connection string to use to authenticate.
:param ttl: The time to live (in seconds) to use for documents in the container.
:param cosmos_client_kwargs: Additional kwargs to pass to the CosmosClient.
"""
self.cosmos_endpoint = cosmos_endpoint
self.cosmos_database = cosmos_database
self.cosmos_container = cosmos_container
self.credential = credential
self.conn_string = connection_string
self.session_id = session_id
self.user_id = user_id
self.ttl = ttl
self.messages: List[BaseMessage] = []
try:
from azure.cosmos import ( # pylint: disable=import-outside-toplevel # noqa: E501
CosmosClient,
)
except ImportError as exc:
raise ImportError(
"You must install the azure-cosmos package to use the CosmosDBChatMessageHistory." # noqa: E501
"Please install it with `pip install azure-cosmos`."
) from exc
if self.credential:
self._client = CosmosClient(
url=self.cosmos_endpoint,
credential=self.credential,
**cosmos_client_kwargs or {},
)
elif self.conn_string:
self._client = CosmosClient.from_connection_string(
conn_str=self.conn_string,
**cosmos_client_kwargs or {},
)
else:
raise ValueError("Either a connection string or a credential must be set.")
self._container: Optional[ContainerProxy] = None
[docs] def prepare_cosmos(self) -> None:
"""Prepare the CosmosDB client.
|
https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/cosmos_db.html
|
3801fe2aae1f-2
|
"""Prepare the CosmosDB client.
Use this function or the context manager to make sure your database is ready.
"""
try:
from azure.cosmos import ( # pylint: disable=import-outside-toplevel # noqa: E501
PartitionKey,
)
except ImportError as exc:
raise ImportError(
"You must install the azure-cosmos package to use the CosmosDBChatMessageHistory." # noqa: E501
"Please install it with `pip install azure-cosmos`."
) from exc
database = self._client.create_database_if_not_exists(self.cosmos_database)
self._container = database.create_container_if_not_exists(
self.cosmos_container,
partition_key=PartitionKey("/user_id"),
default_ttl=self.ttl,
)
self.load_messages()
def __enter__(self) -> "CosmosDBChatMessageHistory":
"""Context manager entry point."""
self._client.__enter__()
self.prepare_cosmos()
return self
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
traceback: Optional[TracebackType],
) -> None:
"""Context manager exit"""
self.upsert_messages()
self._client.__exit__(exc_type, exc_val, traceback)
[docs] def load_messages(self) -> None:
"""Retrieve the messages from Cosmos"""
if not self._container:
raise ValueError("Container not initialized")
try:
from azure.cosmos.exceptions import ( # pylint: disable=import-outside-toplevel # noqa: E501
CosmosHttpResponseError,
)
except ImportError as exc:
|
https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/cosmos_db.html
|
3801fe2aae1f-3
|
CosmosHttpResponseError,
)
except ImportError as exc:
raise ImportError(
"You must install the azure-cosmos package to use the CosmosDBChatMessageHistory." # noqa: E501
"Please install it with `pip install azure-cosmos`."
) from exc
try:
item = self._container.read_item(
item=self.session_id, partition_key=self.user_id
)
except CosmosHttpResponseError:
logger.info("no session found")
return
if "messages" in item and len(item["messages"]) > 0:
self.messages = messages_from_dict(item["messages"])
[docs] def add_message(self, message: BaseMessage) -> None:
"""Add a self-created message to the store"""
self.messages.append(message)
self.upsert_messages()
[docs] def upsert_messages(self) -> None:
"""Update the cosmosdb item."""
if not self._container:
raise ValueError("Container not initialized")
self._container.upsert_item(
body={
"id": self.session_id,
"user_id": self.user_id,
"messages": messages_to_dict(self.messages),
}
)
[docs] def clear(self) -> None:
"""Clear session memory from this memory and cosmos."""
self.messages = []
if self._container:
self._container.delete_item(
item=self.session_id, partition_key=self.user_id
)
|
https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/cosmos_db.html
|
cc39a4cfd37b-0
|
Source code for langchain.memory.chat_message_histories.cassandra
"""Cassandra-based chat message history, based on cassIO."""
from __future__ import annotations
import json
import typing
from typing import List
if typing.TYPE_CHECKING:
from cassandra.cluster import Session
from langchain.schema import (
BaseChatMessageHistory,
)
from langchain.schema.messages import BaseMessage, _message_to_dict, messages_from_dict
DEFAULT_TABLE_NAME = "message_store"
DEFAULT_TTL_SECONDS = None
[docs]class CassandraChatMessageHistory(BaseChatMessageHistory):
"""Chat message history that stores history in Cassandra.
Args:
session_id: arbitrary key that is used to store the messages
of a single chat session.
session: a Cassandra `Session` object (an open DB connection)
keyspace: name of the keyspace to use.
table_name: name of the table to use.
ttl_seconds: time-to-live (seconds) for automatic expiration
of stored entries. None (default) for no expiration.
"""
[docs] def __init__(
self,
session_id: str,
session: Session,
keyspace: str,
table_name: str = DEFAULT_TABLE_NAME,
ttl_seconds: int | None = DEFAULT_TTL_SECONDS,
) -> None:
try:
from cassio.history import StoredBlobHistory
except (ImportError, ModuleNotFoundError):
raise ImportError(
"Could not import cassio python package. "
"Please install it with `pip install cassio`."
)
self.session_id = session_id
self.ttl_seconds = ttl_seconds
self.blob_history = StoredBlobHistory(session, keyspace, table_name)
@property
|
https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/cassandra.html
|
cc39a4cfd37b-1
|
@property
def messages(self) -> List[BaseMessage]: # type: ignore
"""Retrieve all session messages from DB"""
message_blobs = self.blob_history.retrieve(
self.session_id,
)
items = [json.loads(message_blob) for message_blob in message_blobs]
messages = messages_from_dict(items)
return messages
[docs] def add_message(self, message: BaseMessage) -> None:
"""Write a message to the table"""
self.blob_history.store(
self.session_id, json.dumps(_message_to_dict(message)), self.ttl_seconds
)
[docs] def clear(self) -> None:
"""Clear session memory from DB"""
self.blob_history.clear_session_id(self.session_id)
|
https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/cassandra.html
|
ed817ab14480-0
|
Source code for langchain.memory.chat_message_histories.momento
from __future__ import annotations
import json
from datetime import timedelta
from typing import TYPE_CHECKING, Any, Optional
from langchain.schema import (
BaseChatMessageHistory,
)
from langchain.schema.messages import BaseMessage, _message_to_dict, messages_from_dict
from langchain.utils import get_from_env
if TYPE_CHECKING:
import momento
def _ensure_cache_exists(cache_client: momento.CacheClient, cache_name: str) -> None:
"""Create cache if it doesn't exist.
Raises:
SdkException: Momento service or network error
Exception: Unexpected response
"""
from momento.responses import CreateCache
create_cache_response = cache_client.create_cache(cache_name)
if isinstance(create_cache_response, CreateCache.Success) or isinstance(
create_cache_response, CreateCache.CacheAlreadyExists
):
return None
elif isinstance(create_cache_response, CreateCache.Error):
raise create_cache_response.inner_exception
else:
raise Exception(f"Unexpected response cache creation: {create_cache_response}")
[docs]class MomentoChatMessageHistory(BaseChatMessageHistory):
"""Chat message history cache that uses Momento as a backend.
See https://gomomento.com/"""
[docs] def __init__(
self,
session_id: str,
cache_client: momento.CacheClient,
cache_name: str,
*,
key_prefix: str = "message_store:",
ttl: Optional[timedelta] = None,
ensure_cache_exists: bool = True,
):
"""Instantiate a chat message history cache that uses Momento as a backend.
Note: to instantiate the cache client passed to MomentoChatMessageHistory,
|
https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/momento.html
|
ed817ab14480-1
|
Note: to instantiate the cache client passed to MomentoChatMessageHistory,
you must have a Momento account at https://gomomento.com/.
Args:
session_id (str): The session ID to use for this chat session.
cache_client (CacheClient): The Momento cache client.
cache_name (str): The name of the cache to use to store the messages.
key_prefix (str, optional): The prefix to apply to the cache key.
Defaults to "message_store:".
ttl (Optional[timedelta], optional): The TTL to use for the messages.
Defaults to None, ie the default TTL of the cache will be used.
ensure_cache_exists (bool, optional): Create the cache if it doesn't exist.
Defaults to True.
Raises:
ImportError: Momento python package is not installed.
TypeError: cache_client is not of type momento.CacheClientObject
"""
try:
from momento import CacheClient
from momento.requests import CollectionTtl
except ImportError:
raise ImportError(
"Could not import momento python package. "
"Please install it with `pip install momento`."
)
if not isinstance(cache_client, CacheClient):
raise TypeError("cache_client must be a momento.CacheClient object.")
if ensure_cache_exists:
_ensure_cache_exists(cache_client, cache_name)
self.key = key_prefix + session_id
self.cache_client = cache_client
self.cache_name = cache_name
if ttl is not None:
self.ttl = CollectionTtl.of(ttl)
else:
self.ttl = CollectionTtl.from_cache_ttl()
[docs] @classmethod
def from_client_params(
cls,
session_id: str,
|
https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/momento.html
|
ed817ab14480-2
|
def from_client_params(
cls,
session_id: str,
cache_name: str,
ttl: timedelta,
*,
configuration: Optional[momento.config.Configuration] = None,
auth_token: Optional[str] = None,
**kwargs: Any,
) -> MomentoChatMessageHistory:
"""Construct cache from CacheClient parameters."""
try:
from momento import CacheClient, Configurations, CredentialProvider
except ImportError:
raise ImportError(
"Could not import momento python package. "
"Please install it with `pip install momento`."
)
if configuration is None:
configuration = Configurations.Laptop.v1()
auth_token = auth_token or get_from_env("auth_token", "MOMENTO_AUTH_TOKEN")
credentials = CredentialProvider.from_string(auth_token)
cache_client = CacheClient(configuration, credentials, default_ttl=ttl)
return cls(session_id, cache_client, cache_name, ttl=ttl, **kwargs)
@property
def messages(self) -> list[BaseMessage]: # type: ignore[override]
"""Retrieve the messages from Momento.
Raises:
SdkException: Momento service or network error
Exception: Unexpected response
Returns:
list[BaseMessage]: List of cached messages
"""
from momento.responses import CacheListFetch
fetch_response = self.cache_client.list_fetch(self.cache_name, self.key)
if isinstance(fetch_response, CacheListFetch.Hit):
items = [json.loads(m) for m in fetch_response.value_list_string]
return messages_from_dict(items)
elif isinstance(fetch_response, CacheListFetch.Miss):
return []
elif isinstance(fetch_response, CacheListFetch.Error):
|
https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/momento.html
|
ed817ab14480-3
|
return []
elif isinstance(fetch_response, CacheListFetch.Error):
raise fetch_response.inner_exception
else:
raise Exception(f"Unexpected response: {fetch_response}")
[docs] def add_message(self, message: BaseMessage) -> None:
"""Store a message in the cache.
Args:
message (BaseMessage): The message object to store.
Raises:
SdkException: Momento service or network error.
Exception: Unexpected response.
"""
from momento.responses import CacheListPushBack
item = json.dumps(_message_to_dict(message))
push_response = self.cache_client.list_push_back(
self.cache_name, self.key, item, ttl=self.ttl
)
if isinstance(push_response, CacheListPushBack.Success):
return None
elif isinstance(push_response, CacheListPushBack.Error):
raise push_response.inner_exception
else:
raise Exception(f"Unexpected response: {push_response}")
[docs] def clear(self) -> None:
"""Remove the session's messages from the cache.
Raises:
SdkException: Momento service or network error.
Exception: Unexpected response.
"""
from momento.responses import CacheDelete
delete_response = self.cache_client.delete(self.cache_name, self.key)
if isinstance(delete_response, CacheDelete.Success):
return None
elif isinstance(delete_response, CacheDelete.Error):
raise delete_response.inner_exception
else:
raise Exception(f"Unexpected response: {delete_response}")
|
https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/momento.html
|
85bf5d002edb-0
|
Source code for langchain.memory.chat_message_histories.redis
import json
import logging
from typing import List, Optional
from langchain.schema import (
BaseChatMessageHistory,
)
from langchain.schema.messages import BaseMessage, _message_to_dict, messages_from_dict
from langchain.utilities.redis import get_client
logger = logging.getLogger(__name__)
[docs]class RedisChatMessageHistory(BaseChatMessageHistory):
"""Chat message history stored in a Redis database."""
[docs] def __init__(
self,
session_id: str,
url: str = "redis://localhost:6379/0",
key_prefix: str = "message_store:",
ttl: Optional[int] = None,
):
try:
import redis
except ImportError:
raise ImportError(
"Could not import redis python package. "
"Please install it with `pip install redis`."
)
try:
self.redis_client = get_client(redis_url=url)
except redis.exceptions.ConnectionError as error:
logger.error(error)
self.session_id = session_id
self.key_prefix = key_prefix
self.ttl = ttl
@property
def key(self) -> str:
"""Construct the record key to use"""
return self.key_prefix + self.session_id
@property
def messages(self) -> List[BaseMessage]: # type: ignore
"""Retrieve the messages from Redis"""
_items = self.redis_client.lrange(self.key, 0, -1)
items = [json.loads(m.decode("utf-8")) for m in _items[::-1]]
messages = messages_from_dict(items)
return messages
[docs] def add_message(self, message: BaseMessage) -> None:
|
https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/redis.html
|
85bf5d002edb-1
|
[docs] def add_message(self, message: BaseMessage) -> None:
"""Append the message to the record in Redis"""
self.redis_client.lpush(self.key, json.dumps(_message_to_dict(message)))
if self.ttl:
self.redis_client.expire(self.key, self.ttl)
[docs] def clear(self) -> None:
"""Clear session memory from Redis"""
self.redis_client.delete(self.key)
|
https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/redis.html
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.