index int64 0 0 | repo_id stringclasses 596 values | file_path stringlengths 31 168 | content stringlengths 1 6.2M |
|---|---|---|---|
0 | lc_public_repos/langchain/libs/community/langchain_community/document_loaders | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/blob_loaders/cloud_blob_loader.py | """Use to load blobs from the local file system."""
import contextlib
import mimetypes
import tempfile
from io import BufferedReader, BytesIO
from pathlib import Path
from typing import (
TYPE_CHECKING,
Callable,
Generator,
Iterable,
Iterator,
Optional,
Sequence,
TypeVar,
Union,
)
from urllib.parse import urlparse
if TYPE_CHECKING:
from cloudpathlib import AnyPath
from langchain_community.document_loaders.blob_loaders.schema import (
Blob,
BlobLoader,
)
T = TypeVar("T")
class _CloudBlob(Blob):
def as_string(self) -> str:
"""Read data as a string."""
from cloudpathlib import AnyPath
if self.data is None and self.path:
return AnyPath(self.path).read_text(encoding=self.encoding) # type: ignore
elif isinstance(self.data, bytes):
return self.data.decode(self.encoding)
elif isinstance(self.data, str):
return self.data
else:
raise ValueError(f"Unable to get string for blob {self}")
def as_bytes(self) -> bytes:
"""Read data as bytes."""
from cloudpathlib import AnyPath
if isinstance(self.data, bytes):
return self.data
elif isinstance(self.data, str):
return self.data.encode(self.encoding)
elif self.data is None and self.path:
return AnyPath(self.path).read_bytes() # type: ignore
else:
raise ValueError(f"Unable to get bytes for blob {self}")
@contextlib.contextmanager
def as_bytes_io(self) -> Generator[Union[BytesIO, BufferedReader], None, None]:
"""Read data as a byte stream."""
from cloudpathlib import AnyPath
if isinstance(self.data, bytes):
yield BytesIO(self.data)
elif self.data is None and self.path:
return AnyPath(self.path).read_bytes() # type: ignore
else:
raise NotImplementedError(f"Unable to convert blob {self}")
def _url_to_filename(url: str) -> str:
"""
Convert file:, s3:, az: or gs: url to localfile.
If the file is not here, download it in a temporary file.
"""
from cloudpathlib import AnyPath
url_parsed = urlparse(url)
suffix = Path(url_parsed.path).suffix
if url_parsed.scheme in ["s3", "az", "gs"]:
with AnyPath(url).open("rb") as f: # type: ignore
temp_file = tempfile.NamedTemporaryFile(suffix=suffix, delete=False)
while True:
buf = f.read()
if not buf:
break
temp_file.write(buf)
temp_file.close()
file_path = temp_file.name
elif url_parsed.scheme in ["file", ""]:
file_path = url_parsed.path
else:
raise ValueError(f"Scheme {url_parsed.scheme} not supported")
return file_path
def _make_iterator(
length_func: Callable[[], int], show_progress: bool = False
) -> Callable[[Iterable[T]], Iterator[T]]:
"""Create a function that optionally wraps an iterable in tqdm."""
if show_progress:
try:
from tqdm.auto import tqdm
except ImportError:
raise ImportError(
"You must install tqdm to use show_progress=True."
"You can install tqdm with `pip install tqdm`."
)
# Make sure to provide `total` here so that tqdm can show
# a progress bar that takes into account the total number of files.
def _with_tqdm(iterable: Iterable[T]) -> Iterator[T]:
"""Wrap an iterable in a tqdm progress bar."""
return tqdm(iterable, total=length_func())
iterator = _with_tqdm
else:
iterator = iter # type: ignore
return iterator
# PUBLIC API
class CloudBlobLoader(BlobLoader):
"""Load blobs from cloud URL or file:.
Example:
.. code-block:: python
loader = CloudBlobLoader("s3://mybucket/id")
for blob in loader.yield_blobs():
print(blob)
""" # noqa: E501
def __init__(
self,
url: Union[str, "AnyPath"],
*,
glob: str = "**/[!.]*",
exclude: Sequence[str] = (),
suffixes: Optional[Sequence[str]] = None,
show_progress: bool = False,
) -> None:
"""Initialize with a url and how to glob over it.
Use [CloudPathLib](https://cloudpathlib.drivendata.org/).
Args:
url: Cloud URL to load from.
Supports s3://, az://, gs://, file:// schemes.
If no scheme is provided, it is assumed to be a local file.
If a path to a file is provided, glob/exclude/suffixes are ignored.
glob: Glob pattern relative to the specified path
by default set to pick up all non-hidden files
exclude: patterns to exclude from results, use glob syntax
suffixes: Provide to keep only files with these suffixes
Useful when wanting to keep files with different suffixes
Suffixes must include the dot, e.g. ".txt"
show_progress: If true, will show a progress bar as the files are loaded.
This forces an iteration through all matching files
to count them prior to loading them.
Examples:
.. code-block:: python
from langchain_community.document_loaders.blob_loaders import CloudBlobLoader
# Load a single file.
loader = CloudBlobLoader("s3://mybucket/id") # az://
# Recursively load all text files in a directory.
loader = CloudBlobLoader("az://mybucket/id", glob="**/*.txt")
# Recursively load all non-hidden files in a directory.
loader = CloudBlobLoader("gs://mybucket/id", glob="**/[!.]*")
# Load all files in a directory without recursion.
loader = CloudBlobLoader("s3://mybucket/id", glob="*")
# Recursively load all files in a directory, except for py or pyc files.
loader = CloudBlobLoader(
"s3://mybucket/id",
glob="**/*.txt",
exclude=["**/*.py", "**/*.pyc"]
)
""" # noqa: E501
from cloudpathlib import AnyPath
url_parsed = urlparse(str(url))
if url_parsed.scheme == "file":
url = url_parsed.path
if isinstance(url, str):
self.path = AnyPath(url)
else:
self.path = url
self.glob = glob
self.suffixes = set(suffixes or [])
self.show_progress = show_progress
self.exclude = exclude
def yield_blobs(
self,
) -> Iterable[Blob]:
"""Yield blobs that match the requested pattern."""
iterator = _make_iterator(
length_func=self.count_matching_files, show_progress=self.show_progress
)
for path in iterator(self._yield_paths()):
# yield Blob.from_path(path)
yield self.from_path(path)
def _yield_paths(self) -> Iterable["AnyPath"]:
"""Yield paths that match the requested pattern."""
if self.path.is_file(): # type: ignore
yield self.path
return
paths = self.path.glob(self.glob) # type: ignore[attr-defined]
for path in paths:
if self.exclude:
if any(path.match(glob) for glob in self.exclude):
continue
if path.is_file():
if self.suffixes and path.suffix not in self.suffixes:
continue # FIXME
yield path
def count_matching_files(self) -> int:
"""Count files that match the pattern without loading them."""
# Carry out a full iteration to count the files without
# materializing anything expensive in memory.
num = 0
for _ in self._yield_paths():
num += 1
return num
@classmethod
def from_path(
cls,
path: "AnyPath",
*,
encoding: str = "utf-8",
mime_type: Optional[str] = None,
guess_type: bool = True,
metadata: Optional[dict] = None,
) -> Blob:
"""Load the blob from a path like object.
Args:
path: path like object to file to be read
Supports s3://, az://, gs://, file:// schemes.
If no scheme is provided, it is assumed to be a local file.
encoding: Encoding to use if decoding the bytes into a string
mime_type: if provided, will be set as the mime-type of the data
guess_type: If True, the mimetype will be guessed from the file extension,
if a mime-type was not provided
metadata: Metadata to associate with the blob
Returns:
Blob instance
"""
if mime_type is None and guess_type:
_mimetype = mimetypes.guess_type(path)[0] if guess_type else None # type: ignore
else:
_mimetype = mime_type
url_parsed = urlparse(str(path))
if url_parsed.scheme in ["file", ""]:
if url_parsed.scheme == "file":
local_path = url_parsed.path
else:
local_path = str(path)
return Blob(
data=None,
mimetype=_mimetype,
encoding=encoding,
path=local_path,
metadata=metadata if metadata is not None else {},
)
return _CloudBlob(
data=None,
mimetype=_mimetype,
encoding=encoding,
path=str(path),
metadata=metadata if metadata is not None else {},
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/document_loaders | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/blob_loaders/schema.py | from langchain_core.document_loaders.blob_loaders import Blob, BlobLoader, PathLike
__all__ = [
"Blob",
"BlobLoader",
"PathLike",
]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/document_loaders | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/blob_loaders/__init__.py | import importlib
from typing import TYPE_CHECKING, Any
from langchain_core.document_loaders import Blob, BlobLoader
if TYPE_CHECKING:
from langchain_community.document_loaders.blob_loaders.cloud_blob_loader import (
CloudBlobLoader,
)
from langchain_community.document_loaders.blob_loaders.file_system import (
FileSystemBlobLoader,
)
from langchain_community.document_loaders.blob_loaders.youtube_audio import (
YoutubeAudioLoader,
)
_module_lookup = {
"CloudBlobLoader": (
"langchain_community.document_loaders.blob_loaders.cloud_blob_loader"
),
"FileSystemBlobLoader": (
"langchain_community.document_loaders.blob_loaders.file_system"
),
"YoutubeAudioLoader": (
"langchain_community.document_loaders.blob_loaders.youtube_audio"
),
}
def __getattr__(name: str) -> Any:
if name in _module_lookup:
module = importlib.import_module(_module_lookup[name])
return getattr(module, name)
raise AttributeError(f"module {__name__} has no attribute {name}")
__all__ = [
"BlobLoader",
"Blob",
"CloudBlobLoader",
"FileSystemBlobLoader",
"YoutubeAudioLoader",
]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/document_loaders | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/blob_loaders/youtube_audio.py | from typing import Iterable, List
from langchain_community.document_loaders.blob_loaders import FileSystemBlobLoader
from langchain_community.document_loaders.blob_loaders.schema import Blob, BlobLoader
class YoutubeAudioLoader(BlobLoader):
"""Load YouTube urls as audio file(s)."""
def __init__(self, urls: List[str], save_dir: str):
if not isinstance(urls, list):
raise TypeError("urls must be a list")
self.urls = urls
self.save_dir = save_dir
def yield_blobs(self) -> Iterable[Blob]:
"""Yield audio blobs for each url."""
try:
import yt_dlp
except ImportError:
raise ImportError(
"yt_dlp package not found, please install it with "
"`pip install yt_dlp`"
)
# Use yt_dlp to download audio given a YouTube url
ydl_opts = {
"format": "m4a/bestaudio/best",
"noplaylist": True,
"outtmpl": self.save_dir + "/%(title)s.%(ext)s",
"postprocessors": [
{
"key": "FFmpegExtractAudio",
"preferredcodec": "m4a",
}
],
}
for url in self.urls:
# Download file
with yt_dlp.YoutubeDL(ydl_opts) as ydl:
ydl.download(url)
# Yield the written blobs
loader = FileSystemBlobLoader(self.save_dir, glob="*.m4a")
for blob in loader.yield_blobs():
yield blob
|
0 | lc_public_repos/langchain/libs/community/langchain_community/document_loaders | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/parsers/registry.py | """Module includes a registry of default parser configurations."""
from langchain_community.document_loaders.base import BaseBlobParser
from langchain_community.document_loaders.parsers.generic import MimeTypeBasedParser
from langchain_community.document_loaders.parsers.msword import MsWordParser
from langchain_community.document_loaders.parsers.pdf import PyMuPDFParser
from langchain_community.document_loaders.parsers.txt import TextParser
def _get_default_parser() -> BaseBlobParser:
"""Get default mime-type based parser."""
return MimeTypeBasedParser(
handlers={
"application/pdf": PyMuPDFParser(),
"text/plain": TextParser(),
"application/msword": MsWordParser(),
"application/vnd.openxmlformats-officedocument.wordprocessingml.document": (
MsWordParser()
),
},
fallback_parser=None,
)
_REGISTRY = {
"default": _get_default_parser,
}
# PUBLIC API
def get_parser(parser_name: str) -> BaseBlobParser:
"""Get a parser by parser name."""
if parser_name not in _REGISTRY:
raise ValueError(f"Unknown parser combination: {parser_name}")
return _REGISTRY[parser_name]()
|
0 | lc_public_repos/langchain/libs/community/langchain_community/document_loaders | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/parsers/txt.py | """Module for parsing text files.."""
from typing import Iterator
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseBlobParser
from langchain_community.document_loaders.blob_loaders import Blob
class TextParser(BaseBlobParser):
"""Parser for text blobs."""
def lazy_parse(self, blob: Blob) -> Iterator[Document]: # type: ignore[valid-type]
"""Lazily parse the blob."""
yield Document(page_content=blob.as_string(), metadata={"source": blob.source}) # type: ignore[attr-defined]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/document_loaders | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/parsers/generic.py | """Code for generic / auxiliary parsers.
This module contains some logic to help assemble more sophisticated parsers.
"""
from typing import Iterator, Mapping, Optional
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseBlobParser
from langchain_community.document_loaders.blob_loaders.schema import Blob
class MimeTypeBasedParser(BaseBlobParser):
"""Parser that uses `mime`-types to parse a blob.
This parser is useful for simple pipelines where the mime-type is sufficient
to determine how to parse a blob.
To use, configure handlers based on mime-types and pass them to the initializer.
Example:
.. code-block:: python
from langchain_community.document_loaders.parsers.generic import MimeTypeBasedParser
parser = MimeTypeBasedParser(
handlers={
"application/pdf": ...,
},
fallback_parser=...,
)
""" # noqa: E501
def __init__(
self,
handlers: Mapping[str, BaseBlobParser],
*,
fallback_parser: Optional[BaseBlobParser] = None,
) -> None:
"""Define a parser that uses mime-types to determine how to parse a blob.
Args:
handlers: A mapping from mime-types to functions that take a blob, parse it
and return a document.
fallback_parser: A fallback_parser parser to use if the mime-type is not
found in the handlers. If provided, this parser will be
used to parse blobs with all mime-types not found in
the handlers.
If not provided, a ValueError will be raised if the
mime-type is not found in the handlers.
"""
self.handlers = handlers
self.fallback_parser = fallback_parser
def lazy_parse(self, blob: Blob) -> Iterator[Document]:
"""Load documents from a blob."""
mimetype = blob.mimetype
if mimetype is None:
raise ValueError(f"{blob} does not have a mimetype.")
if mimetype in self.handlers:
handler = self.handlers[mimetype]
yield from handler.lazy_parse(blob)
else:
if self.fallback_parser is not None:
yield from self.fallback_parser.lazy_parse(blob)
else:
raise ValueError(f"Unsupported mime type: {mimetype}")
|
0 | lc_public_repos/langchain/libs/community/langchain_community/document_loaders | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/parsers/doc_intelligence.py | import logging
from typing import Any, Iterator, List, Optional
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseBlobParser
from langchain_community.document_loaders.blob_loaders import Blob
logger = logging.getLogger(__name__)
class AzureAIDocumentIntelligenceParser(BaseBlobParser):
"""Loads a PDF with Azure Document Intelligence
(formerly Forms Recognizer)."""
def __init__(
self,
api_endpoint: str,
api_key: str,
api_version: Optional[str] = None,
api_model: str = "prebuilt-layout",
mode: str = "markdown",
analysis_features: Optional[List[str]] = None,
):
from azure.ai.documentintelligence import DocumentIntelligenceClient
from azure.ai.documentintelligence.models import DocumentAnalysisFeature
from azure.core.credentials import AzureKeyCredential
kwargs = {}
if api_version is not None:
kwargs["api_version"] = api_version
if analysis_features is not None:
_SUPPORTED_FEATURES = [
DocumentAnalysisFeature.OCR_HIGH_RESOLUTION,
]
analysis_features = [
DocumentAnalysisFeature(feature) for feature in analysis_features
]
if any(
[feature not in _SUPPORTED_FEATURES for feature in analysis_features]
):
logger.warning(
f"The current supported features are: "
f"{[f.value for f in _SUPPORTED_FEATURES]}. "
"Using other features may result in unexpected behavior."
)
self.client = DocumentIntelligenceClient(
endpoint=api_endpoint,
credential=AzureKeyCredential(api_key),
headers={"x-ms-useragent": "langchain-parser/1.0.0"},
features=analysis_features,
**kwargs,
)
self.api_model = api_model
self.mode = mode
assert self.mode in ["single", "page", "markdown"]
def _generate_docs_page(self, result: Any) -> Iterator[Document]:
for p in result.pages:
content = " ".join([line.content for line in p.lines])
d = Document(
page_content=content,
metadata={
"page": p.page_number,
},
)
yield d
def _generate_docs_single(self, result: Any) -> Iterator[Document]:
yield Document(page_content=result.content, metadata={})
def lazy_parse(self, blob: Blob) -> Iterator[Document]:
"""Lazily parse the blob."""
with blob.as_bytes_io() as file_obj:
poller = self.client.begin_analyze_document(
self.api_model,
file_obj,
content_type="application/octet-stream",
output_content_format="markdown" if self.mode == "markdown" else "text",
)
result = poller.result()
if self.mode in ["single", "markdown"]:
yield from self._generate_docs_single(result)
elif self.mode in ["page"]:
yield from self._generate_docs_page(result)
else:
raise ValueError(f"Invalid mode: {self.mode}")
def parse_url(self, url: str) -> Iterator[Document]:
from azure.ai.documentintelligence.models import AnalyzeDocumentRequest
poller = self.client.begin_analyze_document(
self.api_model,
AnalyzeDocumentRequest(url_source=url),
# content_type="application/octet-stream",
output_content_format="markdown" if self.mode == "markdown" else "text",
)
result = poller.result()
if self.mode in ["single", "markdown"]:
yield from self._generate_docs_single(result)
elif self.mode in ["page"]:
yield from self._generate_docs_page(result)
else:
raise ValueError(f"Invalid mode: {self.mode}")
def parse_bytes(self, bytes_source: bytes) -> Iterator[Document]:
from azure.ai.documentintelligence.models import AnalyzeDocumentRequest
poller = self.client.begin_analyze_document(
self.api_model,
analyze_request=AnalyzeDocumentRequest(bytes_source=bytes_source),
# content_type="application/octet-stream",
output_content_format="markdown" if self.mode == "markdown" else "text",
)
result = poller.result()
if self.mode in ["single", "markdown"]:
yield from self._generate_docs_single(result)
elif self.mode in ["page"]:
yield from self._generate_docs_page(result)
else:
raise ValueError(f"Invalid mode: {self.mode}")
|
0 | lc_public_repos/langchain/libs/community/langchain_community/document_loaders | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/parsers/vsdx.py | import json
import re
import zipfile
from abc import ABC
from pathlib import Path
from typing import Iterator, List, Set, Tuple
from langchain_community.docstore.document import Document
from langchain_community.document_loaders.base import BaseBlobParser
from langchain_community.document_loaders.blob_loaders import Blob
class VsdxParser(BaseBlobParser, ABC):
"""Parser for vsdx files."""
def parse(self, blob: Blob) -> Iterator[Document]: # type: ignore[override]
"""Parse a vsdx file."""
return self.lazy_parse(blob)
def lazy_parse(self, blob: Blob) -> Iterator[Document]:
"""Retrieve the contents of pages from a .vsdx file
and insert them into documents, one document per page."""
with blob.as_bytes_io() as pdf_file_obj:
with zipfile.ZipFile(pdf_file_obj, "r") as zfile:
pages = self.get_pages_content(zfile, blob.source) # type: ignore[arg-type]
yield from [
Document(
page_content=page_content,
metadata={
"source": blob.source,
"page": page_number,
"page_name": page_name,
},
)
for page_number, page_name, page_content in pages
]
def get_pages_content(
self, zfile: zipfile.ZipFile, source: str
) -> List[Tuple[int, str, str]]:
"""Get the content of the pages of a vsdx file.
Attributes:
zfile (zipfile.ZipFile): The vsdx file under zip format.
source (str): The path of the vsdx file.
Returns:
list[tuple[int, str, str]]: A list of tuples containing the page number,
the name of the page and the content of the page
for each page of the vsdx file.
"""
try:
import xmltodict
except ImportError:
raise ImportError(
"The xmltodict library is required to parse vsdx files. "
"Please install it with `pip install xmltodict`."
)
if "visio/pages/pages.xml" not in zfile.namelist():
print("WARNING - No pages.xml file found in {}".format(source)) # noqa: T201
return # type: ignore[return-value]
if "visio/pages/_rels/pages.xml.rels" not in zfile.namelist():
print("WARNING - No pages.xml.rels file found in {}".format(source)) # noqa: T201
return # type: ignore[return-value]
if "docProps/app.xml" not in zfile.namelist():
print("WARNING - No app.xml file found in {}".format(source)) # noqa: T201
return # type: ignore[return-value]
pagesxml_content: dict = xmltodict.parse(zfile.read("visio/pages/pages.xml"))
appxml_content: dict = xmltodict.parse(zfile.read("docProps/app.xml"))
pagesxmlrels_content: dict = xmltodict.parse(
zfile.read("visio/pages/_rels/pages.xml.rels")
)
if isinstance(pagesxml_content["Pages"]["Page"], list):
disordered_names: List[str] = [
rel["@Name"].strip() for rel in pagesxml_content["Pages"]["Page"]
]
else:
disordered_names: List[str] = [ # type: ignore[no-redef]
pagesxml_content["Pages"]["Page"]["@Name"].strip()
]
if isinstance(pagesxmlrels_content["Relationships"]["Relationship"], list):
disordered_paths: List[str] = [
"visio/pages/" + rel["@Target"]
for rel in pagesxmlrels_content["Relationships"]["Relationship"]
]
else:
disordered_paths: List[str] = [ # type: ignore[no-redef]
"visio/pages/"
+ pagesxmlrels_content["Relationships"]["Relationship"]["@Target"]
]
ordered_names: List[str] = appxml_content["Properties"]["TitlesOfParts"][
"vt:vector"
]["vt:lpstr"][: len(disordered_names)]
ordered_names = [name.strip() for name in ordered_names]
ordered_paths = [
disordered_paths[disordered_names.index(name.strip())]
for name in ordered_names
]
# Pages out of order and without content of their relationships
disordered_pages = []
for path in ordered_paths:
content = zfile.read(path)
string_content = json.dumps(xmltodict.parse(content))
samples = re.findall(
r'"#text"\s*:\s*"([^\\"]*(?:\\.[^\\"]*)*)"', string_content
)
if len(samples) > 0:
page_content = "\n".join(samples)
map_symboles = {
"\\n": "\n",
"\\t": "\t",
"\\u2013": "-",
"\\u2019": "'",
"\\u00e9r": "é",
"\\u00f4me": "ô",
}
for key, value in map_symboles.items():
page_content = page_content.replace(key, value)
disordered_pages.append({"page": path, "page_content": page_content})
# Direct relationships of each page in a dict format
pagexml_rels = [
{
"path": page_path,
"content": xmltodict.parse(
zfile.read(f"visio/pages/_rels/{Path(page_path).stem}.xml.rels")
),
}
for page_path in ordered_paths
if f"visio/pages/_rels/{Path(page_path).stem}.xml.rels" in zfile.namelist()
]
# Pages in order and with content of their relationships (direct and indirect)
ordered_pages: List[Tuple[int, str, str]] = []
for page_number, (path, page_name) in enumerate(
zip(ordered_paths, ordered_names)
):
relationships = self.get_relationships(
path, zfile, ordered_paths, pagexml_rels
)
page_content = "\n".join(
[
page_["page_content"]
for page_ in disordered_pages
if page_["page"] in relationships
]
+ [
page_["page_content"]
for page_ in disordered_pages
if page_["page"] == path
]
)
ordered_pages.append((page_number, page_name, page_content))
return ordered_pages
def get_relationships(
self,
page: str,
zfile: zipfile.ZipFile,
filelist: List[str],
pagexml_rels: List[dict],
) -> Set[str]:
"""Get the relationships of a page and the relationships of its relationships,
etc... recursively.
Pages are based on other pages (ex: background page),
so we need to get all the relationships to get all the content of a single page.
"""
name_path = Path(page).name
parent_path = Path(page).parent
rels_path = parent_path / f"_rels/{name_path}.rels"
if str(rels_path) not in zfile.namelist():
return set()
pagexml_rels_content = next(
page_["content"] for page_ in pagexml_rels if page_["path"] == page
)
if isinstance(pagexml_rels_content["Relationships"]["Relationship"], list):
targets = [
rel["@Target"]
for rel in pagexml_rels_content["Relationships"]["Relationship"]
]
else:
targets = [pagexml_rels_content["Relationships"]["Relationship"]["@Target"]]
relationships = set(
[str(parent_path / target) for target in targets]
).intersection(filelist)
for rel in relationships:
relationships = relationships | self.get_relationships(
rel, zfile, filelist, pagexml_rels
)
return relationships
|
0 | lc_public_repos/langchain/libs/community/langchain_community/document_loaders | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/parsers/audio.py | import io
import logging
import os
import time
from typing import Any, Callable, Dict, Iterator, Literal, Optional, Tuple, Union
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseBlobParser
from langchain_community.document_loaders.blob_loaders import Blob
from langchain_community.utils.openai import is_openai_v1
logger = logging.getLogger(__name__)
class AzureOpenAIWhisperParser(BaseBlobParser):
"""
Transcribe and parse audio files using Azure OpenAI Whisper.
This parser integrates with the Azure OpenAI Whisper model to transcribe
audio files. It differs from the standard OpenAI Whisper parser, requiring
an Azure endpoint and credentials. The parser is limited to files under 25 MB.
**Note**:
This parser uses the Azure OpenAI API, providing integration with the Azure
ecosystem, and making it suitable for workflows involving other Azure services.
For files larger than 25 MB, consider using Azure AI Speech batch transcription:
https://learn.microsoft.com/azure/ai-services/speech-service/batch-transcription-create?pivots=rest-api#use-a-whisper-model
Setup:
1. Follow the instructions here to deploy Azure Whisper:
https://learn.microsoft.com/azure/ai-services/openai/whisper-quickstart?tabs=command-line%2Cpython-new&pivots=programming-language-python
2. Install ``langchain`` and set the following environment variables:
.. code-block:: bash
pip install -U langchain langchain-community
export AZURE_OPENAI_API_KEY="your-api-key"
export AZURE_OPENAI_ENDPOINT="https://your-endpoint.openai.azure.com/"
export OPENAI_API_VERSION="your-api-version"
Example Usage:
.. code-block:: python
from langchain.community import AzureOpenAIWhisperParser
whisper_parser = AzureOpenAIWhisperParser(
deployment_name="your-whisper-deployment",
api_version="2024-06-01",
api_key="your-api-key",
# other params...
)
audio_blob = Blob(path="your-audio-file-path")
response = whisper_parser.lazy_parse(audio_blob)
for document in response:
print(document.page_content)
Integration with Other Loaders:
The AzureOpenAIWhisperParser can be used with video/audio loaders and
`GenericLoader` to automate retrieval and parsing.
YoutubeAudioLoader Example:
.. code-block:: python
from langchain_community.document_loaders.blob_loaders import (
YoutubeAudioLoader
)
from langchain_community.document_loaders.generic import GenericLoader
# Must be a list
youtube_url = ["https://your-youtube-url"]
save_dir = "directory-to-download-videos"
loader = GenericLoader(
YoutubeAudioLoader(youtube_url, save_dir),
AzureOpenAIWhisperParser(deployment_name="your-deployment-name")
)
docs = loader.load()
"""
def __init__(
self,
*,
api_key: Optional[str] = None,
azure_endpoint: Optional[str] = None,
api_version: Optional[str] = None,
azure_ad_token_provider: Union[Callable[[], str], None] = None,
language: Optional[str] = None,
prompt: Optional[str] = None,
response_format: Union[
Literal["json", "text", "srt", "verbose_json", "vtt"], None
] = None,
temperature: Optional[float] = None,
deployment_name: str,
max_retries: int = 3,
):
"""
Initialize the AzureOpenAIWhisperParser.
Args:
api_key (Optional[str]):
Azure OpenAI API key. If not provided, defaults to the
`AZURE_OPENAI_API_KEY` environment variable.
azure_endpoint (Optional[str]):
Azure OpenAI service endpoint. Defaults to `AZURE_OPENAI_ENDPOINT`
environment variable if not set.
api_version (Optional[str]):
API version to use,
defaults to the `OPENAI_API_VERSION` environment variable.
azure_ad_token_provider (Union[Callable[[], str], None]):
Azure Active Directory token for authentication (if applicable).
language (Optional[str]):
Language in which the request should be processed.
prompt (Optional[str]):
Custom instructions or prompt for the Whisper model.
response_format (Union[str, None]):
The desired output format. Options: "json", "text", "srt",
"verbose_json", "vtt".
temperature (Optional[float]):
Controls the randomness of the model's output.
deployment_name (str):
The deployment name of the Whisper model.
max_retries (int):
Maximum number of retries for failed API requests.
Raises:
ImportError:
If the required package `openai` is not installed.
"""
self.api_key = api_key or os.environ.get("AZURE_OPENAI_API_KEY")
self.azure_endpoint = azure_endpoint or os.environ.get("AZURE_OPENAI_ENDPOINT")
self.api_version = api_version or os.environ.get("OPENAI_API_VERSION")
self.azure_ad_token_provider = azure_ad_token_provider
self.language = language
self.prompt = prompt
self.response_format = response_format
self.temperature = temperature
self.deployment_name = deployment_name
self.max_retries = max_retries
try:
import openai
except ImportError:
raise ImportError(
"openai package not found, please install it with "
"`pip install openai`"
)
if is_openai_v1():
self._client = openai.AzureOpenAI(
api_key=self.api_key,
azure_endpoint=self.azure_endpoint,
api_version=self.api_version,
max_retries=self.max_retries,
azure_ad_token=self.azure_ad_token_provider,
)
else:
if self.api_key:
openai.api_key = self.api_key
if self.azure_endpoint:
openai.api_base = self.azure_endpoint
if self.api_version:
openai.api_version = self.api_version
openai.api_type = "azure"
self._client = openai
@property
def _create_params(self) -> Dict[str, Any]:
params = {
"language": self.language,
"prompt": self.prompt,
"response_format": self.response_format,
"temperature": self.temperature,
}
return {k: v for k, v in params.items() if v is not None}
def lazy_parse(self, blob: Blob) -> Iterator[Document]:
"""
Lazily parse the provided audio blob for transcription.
Args:
blob (Blob):
The audio file in Blob format to be transcribed.
Yields:
Document:
Parsed transcription from the audio file.
Raises:
Exception:
If an error occurs during transcription.
"""
file_obj = open(str(blob.path), "rb")
# Transcribe
try:
if is_openai_v1():
transcript = self._client.audio.transcriptions.create(
model=self.deployment_name,
file=file_obj,
**self._create_params,
)
else:
transcript = self._client.Audio.transcribe(
model=self.deployment_name,
deployment_id=self.deployment_name,
file=file_obj,
**self._create_params,
)
except Exception:
raise
yield Document(
page_content=transcript.text
if not isinstance(transcript, str)
else transcript,
metadata={"source": blob.source},
)
class OpenAIWhisperParser(BaseBlobParser):
"""Transcribe and parse audio files.
Audio transcription is with OpenAI Whisper model.
Args:
api_key: OpenAI API key
chunk_duration_threshold: Minimum duration of a chunk in seconds
NOTE: According to the OpenAI API, the chunk duration should be at least 0.1
seconds. If the chunk duration is less or equal than the threshold,
it will be skipped.
"""
def __init__(
self,
api_key: Optional[str] = None,
*,
chunk_duration_threshold: float = 0.1,
base_url: Optional[str] = None,
language: Union[str, None] = None,
prompt: Union[str, None] = None,
response_format: Union[
Literal["json", "text", "srt", "verbose_json", "vtt"], None
] = None,
temperature: Union[float, None] = None,
):
self.api_key = api_key
self.chunk_duration_threshold = chunk_duration_threshold
self.base_url = (
base_url if base_url is not None else os.environ.get("OPENAI_API_BASE")
)
self.language = language
self.prompt = prompt
self.response_format = response_format
self.temperature = temperature
@property
def _create_params(self) -> Dict[str, Any]:
params = {
"language": self.language,
"prompt": self.prompt,
"response_format": self.response_format,
"temperature": self.temperature,
}
return {k: v for k, v in params.items() if v is not None}
def lazy_parse(self, blob: Blob) -> Iterator[Document]:
"""Lazily parse the blob."""
try:
import openai
except ImportError:
raise ImportError(
"openai package not found, please install it with "
"`pip install openai`"
)
try:
from pydub import AudioSegment
except ImportError:
raise ImportError(
"pydub package not found, please install it with " "`pip install pydub`"
)
if is_openai_v1():
# api_key optional, defaults to `os.environ['OPENAI_API_KEY']`
client = openai.OpenAI(api_key=self.api_key, base_url=self.base_url)
else:
# Set the API key if provided
if self.api_key:
openai.api_key = self.api_key
if self.base_url:
openai.api_base = self.base_url
# Audio file from disk
audio = AudioSegment.from_file(blob.path)
# Define the duration of each chunk in minutes
# Need to meet 25MB size limit for Whisper API
chunk_duration = 20
chunk_duration_ms = chunk_duration * 60 * 1000
# Split the audio into chunk_duration_ms chunks
for split_number, i in enumerate(range(0, len(audio), chunk_duration_ms)):
# Audio chunk
chunk = audio[i : i + chunk_duration_ms]
# Skip chunks that are too short to transcribe
if chunk.duration_seconds <= self.chunk_duration_threshold:
continue
file_obj = io.BytesIO(chunk.export(format="mp3").read())
if blob.source is not None:
file_obj.name = blob.source + f"_part_{split_number}.mp3"
else:
file_obj.name = f"part_{split_number}.mp3"
# Transcribe
print(f"Transcribing part {split_number + 1}!") # noqa: T201
attempts = 0
while attempts < 3:
try:
if is_openai_v1():
transcript = client.audio.transcriptions.create(
model="whisper-1", file=file_obj, **self._create_params
)
else:
transcript = openai.Audio.transcribe("whisper-1", file_obj) # type: ignore[attr-defined]
break
except Exception as e:
attempts += 1
print(f"Attempt {attempts} failed. Exception: {str(e)}") # noqa: T201
time.sleep(5)
else:
print("Failed to transcribe after 3 attempts.") # noqa: T201
continue
yield Document(
page_content=transcript.text
if not isinstance(transcript, str)
else transcript,
metadata={"source": blob.source, "chunk": split_number},
)
class OpenAIWhisperParserLocal(BaseBlobParser):
"""Transcribe and parse audio files with OpenAI Whisper model.
Audio transcription with OpenAI Whisper model locally from transformers.
Parameters:
device - device to use
NOTE: By default uses the gpu if available,
if you want to use cpu, please set device = "cpu"
lang_model - whisper model to use, for example "openai/whisper-medium"
forced_decoder_ids - id states for decoder in multilanguage model,
usage example:
from transformers import WhisperProcessor
processor = WhisperProcessor.from_pretrained("openai/whisper-medium")
forced_decoder_ids = WhisperProcessor.get_decoder_prompt_ids(language="french",
task="transcribe")
forced_decoder_ids = WhisperProcessor.get_decoder_prompt_ids(language="french",
task="translate")
"""
def __init__(
self,
device: str = "0",
lang_model: Optional[str] = None,
batch_size: int = 8,
chunk_length: int = 30,
forced_decoder_ids: Optional[Tuple[Dict]] = None,
):
"""Initialize the parser.
Args:
device: device to use.
lang_model: whisper model to use, for example "openai/whisper-medium".
Defaults to None.
forced_decoder_ids: id states for decoder in a multilanguage model.
Defaults to None.
batch_size: batch size used for decoding
Defaults to 8.
chunk_length: chunk length used during inference.
Defaults to 30s.
"""
try:
from transformers import pipeline
except ImportError:
raise ImportError(
"transformers package not found, please install it with "
"`pip install transformers`"
)
try:
import torch
except ImportError:
raise ImportError(
"torch package not found, please install it with " "`pip install torch`"
)
# Determine the device to use
if device == "cpu":
self.device = "cpu"
else:
self.device = "cuda:0" if torch.cuda.is_available() else "cpu"
if self.device == "cpu":
default_model = "openai/whisper-base"
self.lang_model = lang_model if lang_model else default_model
else:
# Set the language model based on the device and available memory
mem = torch.cuda.get_device_properties(self.device).total_memory / (1024**2)
if mem < 5000:
rec_model = "openai/whisper-base"
elif mem < 7000:
rec_model = "openai/whisper-small"
elif mem < 12000:
rec_model = "openai/whisper-medium"
else:
rec_model = "openai/whisper-large"
self.lang_model = lang_model if lang_model else rec_model
print("Using the following model: ", self.lang_model) # noqa: T201
self.batch_size = batch_size
# load model for inference
self.pipe = pipeline(
"automatic-speech-recognition",
model=self.lang_model,
chunk_length_s=chunk_length,
device=self.device,
)
if forced_decoder_ids is not None:
try:
self.pipe.model.config.forced_decoder_ids = forced_decoder_ids
except Exception as exception_text:
logger.info(
"Unable to set forced_decoder_ids parameter for whisper model"
f"Text of exception: {exception_text}"
"Therefore whisper model will use default mode for decoder"
)
def lazy_parse(self, blob: Blob) -> Iterator[Document]:
"""Lazily parse the blob."""
try:
from pydub import AudioSegment
except ImportError:
raise ImportError(
"pydub package not found, please install it with `pip install pydub`"
)
try:
import librosa
except ImportError:
raise ImportError(
"librosa package not found, please install it with "
"`pip install librosa`"
)
# Audio file from disk
audio = AudioSegment.from_file(blob.path)
file_obj = io.BytesIO(audio.export(format="mp3").read())
# Transcribe
print(f"Transcribing part {blob.path}!") # noqa: T201
y, sr = librosa.load(file_obj, sr=16000)
prediction = self.pipe(y.copy(), batch_size=self.batch_size)["text"]
yield Document(
page_content=prediction,
metadata={"source": blob.source},
)
class YandexSTTParser(BaseBlobParser):
"""Transcribe and parse audio files.
Audio transcription is with OpenAI Whisper model."""
def __init__(
self,
*,
api_key: Optional[str] = None,
iam_token: Optional[str] = None,
model: str = "general",
language: str = "auto",
):
"""Initialize the parser.
Args:
api_key: API key for a service account
with the `ai.speechkit-stt.user` role.
iam_token: IAM token for a service account
with the `ai.speechkit-stt.user` role.
model: Recognition model name.
Defaults to general.
language: The language in ISO 639-1 format.
Defaults to automatic language recognition.
Either `api_key` or `iam_token` must be provided, but not both.
"""
if (api_key is None) == (iam_token is None):
raise ValueError(
"Either 'api_key' or 'iam_token' must be provided, but not both."
)
self.api_key = api_key
self.iam_token = iam_token
self.model = model
self.language = language
def lazy_parse(self, blob: Blob) -> Iterator[Document]:
"""Lazily parse the blob."""
try:
from speechkit import configure_credentials, creds, model_repository
from speechkit.stt import AudioProcessingType
except ImportError:
raise ImportError(
"yandex-speechkit package not found, please install it with "
"`pip install yandex-speechkit`"
)
try:
from pydub import AudioSegment
except ImportError:
raise ImportError(
"pydub package not found, please install it with " "`pip install pydub`"
)
if self.api_key:
configure_credentials(
yandex_credentials=creds.YandexCredentials(api_key=self.api_key)
)
else:
configure_credentials(
yandex_credentials=creds.YandexCredentials(iam_token=self.iam_token)
)
audio = AudioSegment.from_file(blob.path)
model = model_repository.recognition_model()
model.model = self.model
model.language = self.language
model.audio_processing_type = AudioProcessingType.Full
result = model.transcribe(audio)
for res in result:
yield Document(
page_content=res.normalized_text,
metadata={"source": blob.source},
)
class FasterWhisperParser(BaseBlobParser):
"""Transcribe and parse audio files with faster-whisper.
faster-whisper is a reimplementation of OpenAI's Whisper model using CTranslate2,
which is up to 4 times faster than openai/whisper for the same accuracy while using
less memory. The efficiency can be further improved with 8-bit quantization on both
CPU and GPU.
It can automatically detect the following 14 languages and transcribe the text
into their respective languages: en, zh, fr, de, ja, ko, ru, es, th, it, pt, vi,
ar, tr.
The gitbub repository for faster-whisper is :
https://github.com/SYSTRAN/faster-whisper
Example: Load a YouTube video and transcribe the video speech into a document.
.. code-block:: python
from langchain.document_loaders.generic import GenericLoader
from langchain_community.document_loaders.parsers.audio
import FasterWhisperParser
from langchain.document_loaders.blob_loaders.youtube_audio
import YoutubeAudioLoader
url="https://www.youtube.com/watch?v=your_video"
save_dir="your_dir/"
loader = GenericLoader(
YoutubeAudioLoader([url],save_dir),
FasterWhisperParser()
)
docs = loader.load()
"""
def __init__(
self,
*,
device: Optional[str] = "cuda",
model_size: Optional[str] = None,
):
"""Initialize the parser.
Args:
device: It can be "cuda" or "cpu" based on the available device.
model_size: There are four model sizes to choose from: "base", "small",
"medium", and "large-v3", based on the available GPU memory.
"""
try:
import torch
except ImportError:
raise ImportError(
"torch package not found, please install it with `pip install torch`"
)
# Determine the device to use
if device == "cpu":
self.device = "cpu"
else:
self.device = "cuda" if torch.cuda.is_available() else "cpu"
# Determine the model_size
if self.device == "cpu":
self.model_size = "base"
else:
# Set the model_size based on the available memory
mem = torch.cuda.get_device_properties(self.device).total_memory / (1024**2)
if mem < 1000:
self.model_size = "base"
elif mem < 3000:
self.model_size = "small"
elif mem < 5000:
self.model_size = "medium"
else:
self.model_size = "large-v3"
# If the user has assigned a model size, then use the assigned size
if model_size is not None:
if model_size in ["base", "small", "medium", "large-v3"]:
self.model_size = model_size
def lazy_parse(self, blob: Blob) -> Iterator[Document]:
"""Lazily parse the blob."""
try:
from pydub import AudioSegment
except ImportError:
raise ImportError(
"pydub package not found, please install it with `pip install pydub`"
)
try:
from faster_whisper import WhisperModel
except ImportError:
raise ImportError(
"faster_whisper package not found, please install it with "
"`pip install faster-whisper`"
)
# get the audio
if isinstance(blob.data, bytes):
# blob contains the audio
audio = AudioSegment.from_file(io.BytesIO(blob.data))
elif blob.data is None and blob.path:
# Audio file from disk
audio = AudioSegment.from_file(blob.path)
else:
raise ValueError("Unable to get audio from blob")
file_obj = io.BytesIO(audio.export(format="mp3").read())
# Transcribe
model = WhisperModel(
self.model_size, device=self.device, compute_type="float16"
)
segments, info = model.transcribe(file_obj, beam_size=5)
for segment in segments:
yield Document(
page_content=segment.text,
metadata={
"source": blob.source,
"timestamps": "[%.2fs -> %.2fs]" % (segment.start, segment.end),
"language": info.language,
"probability": "%d%%" % round(info.language_probability * 100),
**blob.metadata,
},
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/document_loaders | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/parsers/grobid.py | import logging
from typing import Dict, Iterator, List, Union
import requests
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseBlobParser
from langchain_community.document_loaders.blob_loaders import Blob
logger = logging.getLogger(__name__)
class ServerUnavailableException(Exception):
"""Exception raised when the Grobid server is unavailable."""
pass
class GrobidParser(BaseBlobParser):
"""Load article `PDF` files using `Grobid`."""
def __init__(
self,
segment_sentences: bool,
grobid_server: str = "http://localhost:8070/api/processFulltextDocument",
) -> None:
self.segment_sentences = segment_sentences
self.grobid_server = grobid_server
try:
requests.get(grobid_server)
except requests.exceptions.RequestException:
logger.error(
"GROBID server does not appear up and running, \
please ensure Grobid is installed and the server is running"
)
raise ServerUnavailableException
def process_xml(
self, file_path: str, xml_data: str, segment_sentences: bool
) -> Iterator[Document]:
"""Process the XML file from Grobin."""
try:
from bs4 import BeautifulSoup
except ImportError:
raise ImportError(
"`bs4` package not found, please install it with " "`pip install bs4`"
)
soup = BeautifulSoup(xml_data, "xml")
sections = soup.find_all("div")
titles = soup.find_all("title")
if titles:
title = titles[0].text
else:
title = "No title found"
chunks = []
for section in sections:
sect = section.find("head")
if sect is not None:
for i, paragraph in enumerate(section.find_all("p")):
chunk_bboxes = []
paragraph_text = []
for i, sentence in enumerate(paragraph.find_all("s")):
paragraph_text.append(sentence.text)
sbboxes = []
if sentence.get("coords") is not None:
for bbox in sentence.get("coords").split(";"):
box = bbox.split(",")
sbboxes.append(
{
"page": box[0],
"x": box[1],
"y": box[2],
"h": box[3],
"w": box[4],
}
)
chunk_bboxes.append(sbboxes)
if (segment_sentences is True) and (len(sbboxes) > 0):
fpage, lpage = sbboxes[0]["page"], sbboxes[-1]["page"]
sentence_dict = {
"text": sentence.text,
"para": str(i),
"bboxes": [sbboxes],
"section_title": sect.text,
"section_number": sect.get("n"),
"pages": (fpage, lpage),
}
chunks.append(sentence_dict)
if segment_sentences is not True:
fpage, lpage = (
chunk_bboxes[0][0]["page"],
chunk_bboxes[-1][-1]["page"],
)
paragraph_dict = {
"text": "".join(paragraph_text),
"para": str(i),
"bboxes": chunk_bboxes,
"section_title": sect.text,
"section_number": sect.get("n"),
"pages": (fpage, lpage),
}
chunks.append(paragraph_dict)
yield from [
Document(
page_content=chunk["text"],
metadata=dict(
{
"text": str(chunk["text"]),
"para": str(chunk["para"]),
"bboxes": str(chunk["bboxes"]),
"pages": str(chunk["pages"]),
"section_title": str(chunk["section_title"]),
"section_number": str(chunk["section_number"]),
"paper_title": str(title),
"file_path": str(file_path),
}
),
)
for chunk in chunks
]
def lazy_parse(self, blob: Blob) -> Iterator[Document]:
file_path = blob.source
if file_path is None:
raise ValueError("blob.source cannot be None.")
pdf = open(file_path, "rb")
files = {"input": (file_path, pdf, "application/pdf", {"Expires": "0"})}
try:
data: Dict[str, Union[str, List[str]]] = {}
for param in ["generateIDs", "consolidateHeader", "segmentSentences"]:
data[param] = "1"
data["teiCoordinates"] = ["head", "s"]
files = files or {}
r = requests.request(
"POST",
self.grobid_server,
headers=None,
params=None,
files=files,
data=data,
timeout=60,
)
xml_data = r.text
except requests.exceptions.ReadTimeout:
logger.error("GROBID server timed out. Return None.")
xml_data = None
if xml_data is None:
return iter([])
else:
return self.process_xml(file_path, xml_data, self.segment_sentences)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/document_loaders | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/parsers/__init__.py | import importlib
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from langchain_community.document_loaders.parsers.audio import (
OpenAIWhisperParser,
)
from langchain_community.document_loaders.parsers.doc_intelligence import (
AzureAIDocumentIntelligenceParser,
)
from langchain_community.document_loaders.parsers.docai import (
DocAIParser,
)
from langchain_community.document_loaders.parsers.grobid import (
GrobidParser,
)
from langchain_community.document_loaders.parsers.html import (
BS4HTMLParser,
)
from langchain_community.document_loaders.parsers.language import (
LanguageParser,
)
from langchain_community.document_loaders.parsers.pdf import (
PDFMinerParser,
PDFPlumberParser,
PyMuPDFParser,
PyPDFium2Parser,
PyPDFParser,
)
from langchain_community.document_loaders.parsers.vsdx import (
VsdxParser,
)
_module_lookup = {
"AzureAIDocumentIntelligenceParser": "langchain_community.document_loaders.parsers.doc_intelligence", # noqa: E501
"BS4HTMLParser": "langchain_community.document_loaders.parsers.html",
"DocAIParser": "langchain_community.document_loaders.parsers.docai",
"GrobidParser": "langchain_community.document_loaders.parsers.grobid",
"LanguageParser": "langchain_community.document_loaders.parsers.language",
"OpenAIWhisperParser": "langchain_community.document_loaders.parsers.audio",
"PDFMinerParser": "langchain_community.document_loaders.parsers.pdf",
"PDFPlumberParser": "langchain_community.document_loaders.parsers.pdf",
"PyMuPDFParser": "langchain_community.document_loaders.parsers.pdf",
"PyPDFParser": "langchain_community.document_loaders.parsers.pdf",
"PyPDFium2Parser": "langchain_community.document_loaders.parsers.pdf",
"VsdxParser": "langchain_community.document_loaders.parsers.vsdx",
}
def __getattr__(name: str) -> Any:
if name in _module_lookup:
module = importlib.import_module(_module_lookup[name])
return getattr(module, name)
raise AttributeError(f"module {__name__} has no attribute {name}")
__all__ = [
"AzureAIDocumentIntelligenceParser",
"BS4HTMLParser",
"DocAIParser",
"GrobidParser",
"LanguageParser",
"OpenAIWhisperParser",
"PDFMinerParser",
"PDFPlumberParser",
"PyMuPDFParser",
"PyPDFParser",
"PyPDFium2Parser",
"VsdxParser",
]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/document_loaders | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/parsers/docai.py | """Module contains a PDF parser based on Document AI from Google Cloud.
You need to install two libraries to use this parser:
pip install google-cloud-documentai
pip install google-cloud-documentai-toolbox
"""
import logging
import re
import time
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterator, List, Optional, Sequence
from langchain_core._api.deprecation import deprecated
from langchain_core.documents import Document
from langchain_core.utils.iter import batch_iterate
from langchain_community.document_loaders.base import BaseBlobParser
from langchain_community.document_loaders.blob_loaders import Blob
from langchain_community.utilities.vertexai import get_client_info
if TYPE_CHECKING:
from google.api_core.operation import Operation
from google.cloud.documentai import DocumentProcessorServiceClient
logger = logging.getLogger(__name__)
@dataclass
class DocAIParsingResults:
"""Dataclass to store Document AI parsing results."""
source_path: str
parsed_path: str
@deprecated(
since="0.0.32",
removal="1.0",
alternative_import="langchain_google_community.DocAIParser",
)
class DocAIParser(BaseBlobParser):
"""`Google Cloud Document AI` parser.
For a detailed explanation of Document AI, refer to the product documentation.
https://cloud.google.com/document-ai/docs/overview
"""
def __init__(
self,
*,
client: Optional["DocumentProcessorServiceClient"] = None,
location: Optional[str] = None,
gcs_output_path: Optional[str] = None,
processor_name: Optional[str] = None,
):
"""Initializes the parser.
Args:
client: a DocumentProcessorServiceClient to use
location: a Google Cloud location where a Document AI processor is located
gcs_output_path: a path on Google Cloud Storage to store parsing results
processor_name: full resource name of a Document AI processor or processor
version
You should provide either a client or location (and then a client
would be instantiated).
"""
if bool(client) == bool(location):
raise ValueError(
"You must specify either a client or a location to instantiate "
"a client."
)
pattern = r"projects\/[0-9]+\/locations\/[a-z\-0-9]+\/processors\/[a-z0-9]+"
if processor_name and not re.fullmatch(pattern, processor_name):
raise ValueError(
f"Processor name {processor_name} has the wrong format. If your "
"prediction endpoint looks like https://us-documentai.googleapis.com"
"/v1/projects/PROJECT_ID/locations/us/processors/PROCESSOR_ID:process,"
" use only projects/PROJECT_ID/locations/us/processors/PROCESSOR_ID "
"part."
)
self._gcs_output_path = gcs_output_path
self._processor_name = processor_name
if client:
self._client = client
else:
try:
from google.api_core.client_options import ClientOptions
from google.cloud.documentai import DocumentProcessorServiceClient
except ImportError as exc:
raise ImportError(
"documentai package not found, please install it with"
" `pip install google-cloud-documentai`"
) from exc
options = ClientOptions(
api_endpoint=f"{location}-documentai.googleapis.com"
)
self._client = DocumentProcessorServiceClient(
client_options=options,
client_info=get_client_info(module="document-ai"),
)
def lazy_parse(self, blob: Blob) -> Iterator[Document]:
"""Parses a blob lazily.
Args:
blobs: a Blob to parse
This is a long-running operation. A recommended way is to batch
documents together and use the `batch_parse()` method.
"""
yield from self.batch_parse([blob], gcs_output_path=self._gcs_output_path)
def online_process(
self,
blob: Blob,
enable_native_pdf_parsing: bool = True,
field_mask: Optional[str] = None,
page_range: Optional[List[int]] = None,
) -> Iterator[Document]:
"""Parses a blob lazily using online processing.
Args:
blob: a blob to parse.
enable_native_pdf_parsing: enable pdf embedded text extraction
field_mask: a comma-separated list of which fields to include in the
Document AI response.
suggested: "text,pages.pageNumber,pages.layout"
page_range: list of page numbers to parse. If `None`,
entire document will be parsed.
"""
try:
from google.cloud import documentai
from google.cloud.documentai_v1.types import (
IndividualPageSelector,
OcrConfig,
ProcessOptions,
)
except ImportError as exc:
raise ImportError(
"documentai package not found, please install it with"
" `pip install google-cloud-documentai`"
) from exc
try:
from google.cloud.documentai_toolbox.wrappers.page import _text_from_layout
except ImportError as exc:
raise ImportError(
"documentai_toolbox package not found, please install it with"
" `pip install google-cloud-documentai-toolbox`"
) from exc
ocr_config = (
OcrConfig(enable_native_pdf_parsing=enable_native_pdf_parsing)
if enable_native_pdf_parsing
else None
)
individual_page_selector = (
IndividualPageSelector(pages=page_range) if page_range else None
)
response = self._client.process_document(
documentai.ProcessRequest(
name=self._processor_name,
gcs_document=documentai.GcsDocument(
gcs_uri=blob.path,
mime_type=blob.mimetype or "application/pdf",
),
process_options=ProcessOptions(
ocr_config=ocr_config,
individual_page_selector=individual_page_selector,
),
skip_human_review=True,
field_mask=field_mask,
)
)
yield from (
Document(
page_content=_text_from_layout(page.layout, response.document.text),
metadata={
"page": page.page_number,
"source": blob.path,
},
)
for page in response.document.pages
)
def batch_parse(
self,
blobs: Sequence[Blob],
gcs_output_path: Optional[str] = None,
timeout_sec: int = 3600,
check_in_interval_sec: int = 60,
) -> Iterator[Document]:
"""Parses a list of blobs lazily.
Args:
blobs: a list of blobs to parse.
gcs_output_path: a path on Google Cloud Storage to store parsing results.
timeout_sec: a timeout to wait for Document AI to complete, in seconds.
check_in_interval_sec: an interval to wait until next check
whether parsing operations have been completed, in seconds
This is a long-running operation. A recommended way is to decouple
parsing from creating LangChain Documents:
>>> operations = parser.docai_parse(blobs, gcs_path)
>>> parser.is_running(operations)
You can get operations names and save them:
>>> names = [op.operation.name for op in operations]
And when all operations are finished, you can use their results:
>>> operations = parser.operations_from_names(operation_names)
>>> results = parser.get_results(operations)
>>> docs = parser.parse_from_results(results)
"""
output_path = gcs_output_path or self._gcs_output_path
if not output_path:
raise ValueError(
"An output path on Google Cloud Storage should be provided."
)
operations = self.docai_parse(blobs, gcs_output_path=output_path)
operation_names = [op.operation.name for op in operations]
logger.debug(
"Started parsing with Document AI, submitted operations %s", operation_names
)
time_elapsed = 0
while self.is_running(operations):
time.sleep(check_in_interval_sec)
time_elapsed += check_in_interval_sec
if time_elapsed > timeout_sec:
raise TimeoutError(
"Timeout exceeded! Check operations " f"{operation_names} later!"
)
logger.debug(".")
results = self.get_results(operations=operations)
yield from self.parse_from_results(results)
def parse_from_results(
self, results: List[DocAIParsingResults]
) -> Iterator[Document]:
try:
from google.cloud.documentai_toolbox.utilities.gcs_utilities import (
split_gcs_uri,
)
from google.cloud.documentai_toolbox.wrappers.document import _get_shards
from google.cloud.documentai_toolbox.wrappers.page import _text_from_layout
except ImportError as exc:
raise ImportError(
"documentai_toolbox package not found, please install it with"
" `pip install google-cloud-documentai-toolbox`"
) from exc
for result in results:
gcs_bucket_name, gcs_prefix = split_gcs_uri(result.parsed_path)
shards = _get_shards(gcs_bucket_name, gcs_prefix)
yield from (
Document(
page_content=_text_from_layout(page.layout, shard.text),
metadata={"page": page.page_number, "source": result.source_path},
)
for shard in shards
for page in shard.pages
)
def operations_from_names(self, operation_names: List[str]) -> List["Operation"]:
"""Initializes Long-Running Operations from their names."""
try:
from google.longrunning.operations_pb2 import (
GetOperationRequest, # type: ignore
)
except ImportError as exc:
raise ImportError(
"long running operations package not found, please install it with"
" `pip install gapic-google-longrunning`"
) from exc
return [
self._client.get_operation(request=GetOperationRequest(name=name))
for name in operation_names
]
def is_running(self, operations: List["Operation"]) -> bool:
return any(not op.done() for op in operations)
def docai_parse(
self,
blobs: Sequence[Blob],
*,
gcs_output_path: Optional[str] = None,
processor_name: Optional[str] = None,
batch_size: int = 1000,
enable_native_pdf_parsing: bool = True,
field_mask: Optional[str] = None,
) -> List["Operation"]:
"""Runs Google Document AI PDF Batch Processing on a list of blobs.
Args:
blobs: a list of blobs to be parsed
gcs_output_path: a path (folder) on GCS to store results
processor_name: name of a Document AI processor.
batch_size: amount of documents per batch
enable_native_pdf_parsing: a config option for the parser
field_mask: a comma-separated list of which fields to include in the
Document AI response.
suggested: "text,pages.pageNumber,pages.layout"
Document AI has a 1000 file limit per batch, so batches larger than that need
to be split into multiple requests.
Batch processing is an async long-running operation
and results are stored in a output GCS bucket.
"""
try:
from google.cloud import documentai
from google.cloud.documentai_v1.types import OcrConfig, ProcessOptions
except ImportError as exc:
raise ImportError(
"documentai package not found, please install it with"
" `pip install google-cloud-documentai`"
) from exc
output_path = gcs_output_path or self._gcs_output_path
if output_path is None:
raise ValueError(
"An output path on Google Cloud Storage should be provided."
)
processor_name = processor_name or self._processor_name
if processor_name is None:
raise ValueError("A Document AI processor name should be provided.")
operations = []
for batch in batch_iterate(size=batch_size, iterable=blobs):
input_config = documentai.BatchDocumentsInputConfig(
gcs_documents=documentai.GcsDocuments(
documents=[
documentai.GcsDocument(
gcs_uri=blob.path,
mime_type=blob.mimetype or "application/pdf",
)
for blob in batch
]
)
)
output_config = documentai.DocumentOutputConfig(
gcs_output_config=documentai.DocumentOutputConfig.GcsOutputConfig(
gcs_uri=output_path, field_mask=field_mask
)
)
process_options = (
ProcessOptions(
ocr_config=OcrConfig(
enable_native_pdf_parsing=enable_native_pdf_parsing
)
)
if enable_native_pdf_parsing
else None
)
operations.append(
self._client.batch_process_documents(
documentai.BatchProcessRequest(
name=processor_name,
input_documents=input_config,
document_output_config=output_config,
process_options=process_options,
skip_human_review=True,
)
)
)
return operations
def get_results(self, operations: List["Operation"]) -> List[DocAIParsingResults]:
try:
from google.cloud.documentai_v1 import BatchProcessMetadata
except ImportError as exc:
raise ImportError(
"documentai package not found, please install it with"
" `pip install google-cloud-documentai`"
) from exc
return [
DocAIParsingResults(
source_path=status.input_gcs_source,
parsed_path=status.output_gcs_destination,
)
for op in operations
for status in (
op.metadata.individual_process_statuses
if isinstance(op.metadata, BatchProcessMetadata)
else BatchProcessMetadata.deserialize(
op.metadata.value
).individual_process_statuses
)
]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/document_loaders | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/parsers/msword.py | from typing import Iterator
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseBlobParser
from langchain_community.document_loaders.blob_loaders import Blob
class MsWordParser(BaseBlobParser):
"""Parse the Microsoft Word documents from a blob."""
def lazy_parse(self, blob: Blob) -> Iterator[Document]: # type: ignore[valid-type]
"""Parse a Microsoft Word document into the Document iterator.
Args:
blob: The blob to parse.
Returns: An iterator of Documents.
"""
try:
from unstructured.partition.doc import partition_doc
from unstructured.partition.docx import partition_docx
except ImportError as e:
raise ImportError(
"Could not import unstructured, please install with `pip install "
"unstructured`."
) from e
mime_type_parser = {
"application/msword": partition_doc,
"application/vnd.openxmlformats-officedocument.wordprocessingml.document": (
partition_docx
),
}
if blob.mimetype not in ( # type: ignore[attr-defined]
"application/msword",
"application/vnd.openxmlformats-officedocument.wordprocessingml.document",
):
raise ValueError("This blob type is not supported for this parser.")
with blob.as_bytes_io() as word_document: # type: ignore[attr-defined]
elements = mime_type_parser[blob.mimetype](file=word_document) # type: ignore[attr-defined] # type: ignore[operator] # type: ignore[operator] # type: ignore[operator] # type: ignore[operator] # type: ignore[operator] # type: ignore[operator]
text = "\n\n".join([str(el) for el in elements])
metadata = {"source": blob.source} # type: ignore[attr-defined]
yield Document(page_content=text, metadata=metadata)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/document_loaders | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/parsers/pdf.py | """Module contains common parsers for PDFs."""
from __future__ import annotations
import warnings
from typing import (
TYPE_CHECKING,
Any,
Dict,
Iterable,
Iterator,
Mapping,
Optional,
Sequence,
Union,
)
from urllib.parse import urlparse
import numpy as np
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseBlobParser
from langchain_community.document_loaders.blob_loaders import Blob
if TYPE_CHECKING:
import fitz.fitz
import pdfminer.layout
import pdfplumber.page
import pypdf._page
import pypdfium2._helpers.page
from pypdf import PageObject
from textractor.data.text_linearization_config import TextLinearizationConfig
_PDF_FILTER_WITH_LOSS = ["DCTDecode", "DCT", "JPXDecode"]
_PDF_FILTER_WITHOUT_LOSS = [
"LZWDecode",
"LZW",
"FlateDecode",
"Fl",
"ASCII85Decode",
"A85",
"ASCIIHexDecode",
"AHx",
"RunLengthDecode",
"RL",
"CCITTFaxDecode",
"CCF",
"JBIG2Decode",
]
def extract_from_images_with_rapidocr(
images: Sequence[Union[Iterable[np.ndarray], bytes]],
) -> str:
"""Extract text from images with RapidOCR.
Args:
images: Images to extract text from.
Returns:
Text extracted from images.
Raises:
ImportError: If `rapidocr-onnxruntime` package is not installed.
"""
try:
from rapidocr_onnxruntime import RapidOCR
except ImportError:
raise ImportError(
"`rapidocr-onnxruntime` package not found, please install it with "
"`pip install rapidocr-onnxruntime`"
)
ocr = RapidOCR()
text = ""
for img in images:
result, _ = ocr(img)
if result:
result = [text[1] for text in result]
text += "\n".join(result)
return text
class PyPDFParser(BaseBlobParser):
"""Load `PDF` using `pypdf`"""
def __init__(
self,
password: Optional[Union[str, bytes]] = None,
extract_images: bool = False,
*,
extraction_mode: str = "plain",
extraction_kwargs: Optional[Dict[str, Any]] = None,
):
self.password = password
self.extract_images = extract_images
self.extraction_mode = extraction_mode
self.extraction_kwargs = extraction_kwargs or {}
def lazy_parse(self, blob: Blob) -> Iterator[Document]: # type: ignore[valid-type]
"""Lazily parse the blob."""
try:
import pypdf
except ImportError:
raise ImportError(
"`pypdf` package not found, please install it with "
"`pip install pypdf`"
)
def _extract_text_from_page(page: "PageObject") -> str:
"""
Extract text from image given the version of pypdf.
"""
if pypdf.__version__.startswith("3"):
return page.extract_text()
else:
return page.extract_text(
extraction_mode=self.extraction_mode, # type: ignore[arg-type]
**self.extraction_kwargs, # type: ignore[arg-type]
)
with blob.as_bytes_io() as pdf_file_obj: # type: ignore[attr-defined]
pdf_reader = pypdf.PdfReader(pdf_file_obj, password=self.password)
yield from [
Document(
page_content=_extract_text_from_page(page=page)
+ self._extract_images_from_page(page),
metadata={"source": blob.source, "page": page_number}, # type: ignore[attr-defined]
)
for page_number, page in enumerate(pdf_reader.pages)
]
def _extract_images_from_page(self, page: pypdf._page.PageObject) -> str:
"""Extract images from page and get the text with RapidOCR."""
if not self.extract_images or "/XObject" not in page["/Resources"].keys(): # type: ignore[attr-defined]
return ""
xObject = page["/Resources"]["/XObject"].get_object() # type: ignore
images = []
for obj in xObject:
if xObject[obj]["/Subtype"] == "/Image":
if xObject[obj]["/Filter"][1:] in _PDF_FILTER_WITHOUT_LOSS:
height, width = xObject[obj]["/Height"], xObject[obj]["/Width"]
images.append(
np.frombuffer(xObject[obj].get_data(), dtype=np.uint8).reshape(
height, width, -1
)
)
elif xObject[obj]["/Filter"][1:] in _PDF_FILTER_WITH_LOSS:
images.append(xObject[obj].get_data())
else:
warnings.warn("Unknown PDF Filter!")
return extract_from_images_with_rapidocr(images)
class PDFMinerParser(BaseBlobParser):
"""Parse `PDF` using `PDFMiner`."""
def __init__(self, extract_images: bool = False, *, concatenate_pages: bool = True):
"""Initialize a parser based on PDFMiner.
Args:
extract_images: Whether to extract images from PDF.
concatenate_pages: If True, concatenate all PDF pages into one a single
document. Otherwise, return one document per page.
"""
self.extract_images = extract_images
self.concatenate_pages = concatenate_pages
def lazy_parse(self, blob: Blob) -> Iterator[Document]: # type: ignore[valid-type]
"""Lazily parse the blob."""
if not self.extract_images:
try:
from pdfminer.high_level import extract_text
except ImportError:
raise ImportError(
"`pdfminer` package not found, please install it with "
"`pip install pdfminer.six`"
)
with blob.as_bytes_io() as pdf_file_obj: # type: ignore[attr-defined]
if self.concatenate_pages:
text = extract_text(pdf_file_obj)
metadata = {"source": blob.source} # type: ignore[attr-defined]
yield Document(page_content=text, metadata=metadata)
else:
from pdfminer.pdfpage import PDFPage
pages = PDFPage.get_pages(pdf_file_obj)
for i, _ in enumerate(pages):
text = extract_text(pdf_file_obj, page_numbers=[i])
metadata = {"source": blob.source, "page": str(i)} # type: ignore[attr-defined]
yield Document(page_content=text, metadata=metadata)
else:
import io
from pdfminer.converter import PDFPageAggregator, TextConverter
from pdfminer.layout import LAParams
from pdfminer.pdfinterp import PDFPageInterpreter, PDFResourceManager
from pdfminer.pdfpage import PDFPage
text_io = io.StringIO()
with blob.as_bytes_io() as pdf_file_obj: # type: ignore[attr-defined]
pages = PDFPage.get_pages(pdf_file_obj)
rsrcmgr = PDFResourceManager()
device_for_text = TextConverter(rsrcmgr, text_io, laparams=LAParams())
device_for_image = PDFPageAggregator(rsrcmgr, laparams=LAParams())
interpreter_for_text = PDFPageInterpreter(rsrcmgr, device_for_text)
interpreter_for_image = PDFPageInterpreter(rsrcmgr, device_for_image)
for i, page in enumerate(pages):
interpreter_for_text.process_page(page)
interpreter_for_image.process_page(page)
content = text_io.getvalue() + self._extract_images_from_page(
device_for_image.get_result()
)
text_io.truncate(0)
text_io.seek(0)
metadata = {"source": blob.source, "page": str(i)} # type: ignore[attr-defined]
yield Document(page_content=content, metadata=metadata)
def _extract_images_from_page(self, page: pdfminer.layout.LTPage) -> str:
"""Extract images from page and get the text with RapidOCR."""
import pdfminer
def get_image(layout_object: Any) -> Any:
if isinstance(layout_object, pdfminer.layout.LTImage):
return layout_object
if isinstance(layout_object, pdfminer.layout.LTContainer):
for child in layout_object:
return get_image(child)
else:
return None
images = []
for img in list(filter(bool, map(get_image, page))):
if img.stream["Filter"].name in _PDF_FILTER_WITHOUT_LOSS:
images.append(
np.frombuffer(img.stream.get_data(), dtype=np.uint8).reshape(
img.stream["Height"], img.stream["Width"], -1
)
)
elif img.stream["Filter"].name in _PDF_FILTER_WITH_LOSS:
images.append(img.stream.get_data())
else:
warnings.warn("Unknown PDF Filter!")
return extract_from_images_with_rapidocr(images)
class PyMuPDFParser(BaseBlobParser):
"""Parse `PDF` using `PyMuPDF`."""
def __init__(
self,
text_kwargs: Optional[Mapping[str, Any]] = None,
extract_images: bool = False,
) -> None:
"""Initialize the parser.
Args:
text_kwargs: Keyword arguments to pass to ``fitz.Page.get_text()``.
"""
self.text_kwargs = text_kwargs or {}
self.extract_images = extract_images
def lazy_parse(self, blob: Blob) -> Iterator[Document]: # type: ignore[valid-type]
"""Lazily parse the blob."""
import fitz
with blob.as_bytes_io() as file_path: # type: ignore[attr-defined]
if blob.data is None: # type: ignore[attr-defined]
doc = fitz.open(file_path)
else:
doc = fitz.open(stream=file_path, filetype="pdf")
yield from [
Document(
page_content=self._get_page_content(doc, page, blob),
metadata=self._extract_metadata(doc, page, blob),
)
for page in doc
]
def _get_page_content(
self, doc: fitz.fitz.Document, page: fitz.fitz.Page, blob: Blob
) -> str:
"""
Get the text of the page using PyMuPDF and RapidOCR and issue a warning
if it is empty.
"""
content = page.get_text(**self.text_kwargs) + self._extract_images_from_page(
doc, page
)
if not content:
warnings.warn(
f"Warning: Empty content on page "
f"{page.number} of document {blob.source}"
)
return content
def _extract_metadata(
self, doc: fitz.fitz.Document, page: fitz.fitz.Page, blob: Blob
) -> dict:
"""Extract metadata from the document and page."""
return dict(
{
"source": blob.source, # type: ignore[attr-defined]
"file_path": blob.source, # type: ignore[attr-defined]
"page": page.number,
"total_pages": len(doc),
},
**{
k: doc.metadata[k]
for k in doc.metadata
if isinstance(doc.metadata[k], (str, int))
},
)
def _extract_images_from_page(
self, doc: fitz.fitz.Document, page: fitz.fitz.Page
) -> str:
"""Extract images from page and get the text with RapidOCR."""
if not self.extract_images:
return ""
import fitz
img_list = page.get_images()
imgs = []
for img in img_list:
xref = img[0]
pix = fitz.Pixmap(doc, xref)
imgs.append(
np.frombuffer(pix.samples, dtype=np.uint8).reshape(
pix.height, pix.width, -1
)
)
return extract_from_images_with_rapidocr(imgs)
class PyPDFium2Parser(BaseBlobParser):
"""Parse `PDF` with `PyPDFium2`."""
def __init__(self, extract_images: bool = False) -> None:
"""Initialize the parser."""
try:
import pypdfium2 # noqa:F401
except ImportError:
raise ImportError(
"pypdfium2 package not found, please install it with"
" `pip install pypdfium2`"
)
self.extract_images = extract_images
def lazy_parse(self, blob: Blob) -> Iterator[Document]: # type: ignore[valid-type]
"""Lazily parse the blob."""
import pypdfium2
# pypdfium2 is really finicky with respect to closing things,
# if done incorrectly creates seg faults.
with blob.as_bytes_io() as file_path: # type: ignore[attr-defined]
pdf_reader = pypdfium2.PdfDocument(file_path, autoclose=True)
try:
for page_number, page in enumerate(pdf_reader):
text_page = page.get_textpage()
content = text_page.get_text_range()
text_page.close()
content += "\n" + self._extract_images_from_page(page)
page.close()
metadata = {"source": blob.source, "page": page_number} # type: ignore[attr-defined]
yield Document(page_content=content, metadata=metadata)
finally:
pdf_reader.close()
def _extract_images_from_page(self, page: pypdfium2._helpers.page.PdfPage) -> str:
"""Extract images from page and get the text with RapidOCR."""
if not self.extract_images:
return ""
import pypdfium2.raw as pdfium_c
images = list(page.get_objects(filter=(pdfium_c.FPDF_PAGEOBJ_IMAGE,)))
images = list(map(lambda x: x.get_bitmap().to_numpy(), images))
return extract_from_images_with_rapidocr(images)
class PDFPlumberParser(BaseBlobParser):
"""Parse `PDF` with `PDFPlumber`."""
def __init__(
self,
text_kwargs: Optional[Mapping[str, Any]] = None,
dedupe: bool = False,
extract_images: bool = False,
) -> None:
"""Initialize the parser.
Args:
text_kwargs: Keyword arguments to pass to ``pdfplumber.Page.extract_text()``
dedupe: Avoiding the error of duplicate characters if `dedupe=True`.
"""
self.text_kwargs = text_kwargs or {}
self.dedupe = dedupe
self.extract_images = extract_images
def lazy_parse(self, blob: Blob) -> Iterator[Document]: # type: ignore[valid-type]
"""Lazily parse the blob."""
import pdfplumber
with blob.as_bytes_io() as file_path: # type: ignore[attr-defined]
doc = pdfplumber.open(file_path) # open document
yield from [
Document(
page_content=self._process_page_content(page)
+ "\n"
+ self._extract_images_from_page(page),
metadata=dict(
{
"source": blob.source, # type: ignore[attr-defined]
"file_path": blob.source, # type: ignore[attr-defined]
"page": page.page_number - 1,
"total_pages": len(doc.pages),
},
**{
k: doc.metadata[k]
for k in doc.metadata
if type(doc.metadata[k]) in [str, int]
},
),
)
for page in doc.pages
]
def _process_page_content(self, page: pdfplumber.page.Page) -> str:
"""Process the page content based on dedupe."""
if self.dedupe:
return page.dedupe_chars().extract_text(**self.text_kwargs)
return page.extract_text(**self.text_kwargs)
def _extract_images_from_page(self, page: pdfplumber.page.Page) -> str:
"""Extract images from page and get the text with RapidOCR."""
if not self.extract_images:
return ""
images = []
for img in page.images:
if img["stream"]["Filter"].name in _PDF_FILTER_WITHOUT_LOSS:
images.append(
np.frombuffer(img["stream"].get_data(), dtype=np.uint8).reshape(
img["stream"]["Height"], img["stream"]["Width"], -1
)
)
elif img["stream"]["Filter"].name in _PDF_FILTER_WITH_LOSS:
images.append(img["stream"].get_data())
else:
warnings.warn("Unknown PDF Filter!")
return extract_from_images_with_rapidocr(images)
class AmazonTextractPDFParser(BaseBlobParser):
"""Send `PDF` files to `Amazon Textract` and parse them.
For parsing multi-page PDFs, they have to reside on S3.
The AmazonTextractPDFLoader calls the
[Amazon Textract Service](https://aws.amazon.com/textract/)
to convert PDFs into a Document structure.
Single and multi-page documents are supported with up to 3000 pages
and 512 MB of size.
For the call to be successful an AWS account is required,
similar to the
[AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-configure.html)
requirements.
Besides the AWS configuration, it is very similar to the other PDF
loaders, while also supporting JPEG, PNG and TIFF and non-native
PDF formats.
```python
from langchain_community.document_loaders import AmazonTextractPDFLoader
loader=AmazonTextractPDFLoader("example_data/alejandro_rosalez_sample-small.jpeg")
documents = loader.load()
```
One feature is the linearization of the output.
When using the features LAYOUT, FORMS or TABLES together with Textract
```python
from langchain_community.document_loaders import AmazonTextractPDFLoader
# you can mix and match each of the features
loader=AmazonTextractPDFLoader(
"example_data/alejandro_rosalez_sample-small.jpeg",
textract_features=["TABLES", "LAYOUT"])
documents = loader.load()
```
it will generate output that formats the text in reading order and
try to output the information in a tabular structure or
output the key/value pairs with a colon (key: value).
This helps most LLMs to achieve better accuracy when
processing these texts.
"""
def __init__(
self,
textract_features: Optional[Sequence[int]] = None,
client: Optional[Any] = None,
*,
linearization_config: Optional["TextLinearizationConfig"] = None,
) -> None:
"""Initializes the parser.
Args:
textract_features: Features to be used for extraction, each feature
should be passed as an int that conforms to the enum
`Textract_Features`, see `amazon-textract-caller` pkg
client: boto3 textract client
linearization_config: Config to be used for linearization of the output
should be an instance of TextLinearizationConfig from
the `textractor` pkg
"""
try:
import textractcaller as tc
import textractor.entities.document as textractor
self.tc = tc
self.textractor = textractor
if textract_features is not None:
self.textract_features = [
tc.Textract_Features(f) for f in textract_features
]
else:
self.textract_features = []
if linearization_config is not None:
self.linearization_config = linearization_config
else:
self.linearization_config = self.textractor.TextLinearizationConfig(
hide_figure_layout=True,
title_prefix="# ",
section_header_prefix="## ",
list_element_prefix="*",
)
except ImportError:
raise ImportError(
"Could not import amazon-textract-caller or "
"amazon-textract-textractor python package. Please install it "
"with `pip install amazon-textract-caller` & "
"`pip install amazon-textract-textractor`."
)
if not client:
try:
import boto3
self.boto3_textract_client = boto3.client("textract")
except ImportError:
raise ImportError(
"Could not import boto3 python package. "
"Please install it with `pip install boto3`."
)
else:
self.boto3_textract_client = client
def lazy_parse(self, blob: Blob) -> Iterator[Document]: # type: ignore[valid-type]
"""Iterates over the Blob pages and returns an Iterator with a Document
for each page, like the other parsers If multi-page document, blob.path
has to be set to the S3 URI and for single page docs
the blob.data is taken
"""
url_parse_result = urlparse(str(blob.path)) if blob.path else None # type: ignore[attr-defined]
# Either call with S3 path (multi-page) or with bytes (single-page)
if (
url_parse_result
and url_parse_result.scheme == "s3"
and url_parse_result.netloc
):
textract_response_json = self.tc.call_textract(
input_document=str(blob.path), # type: ignore[attr-defined]
features=self.textract_features,
boto3_textract_client=self.boto3_textract_client,
)
else:
textract_response_json = self.tc.call_textract(
input_document=blob.as_bytes(), # type: ignore[attr-defined]
features=self.textract_features,
call_mode=self.tc.Textract_Call_Mode.FORCE_SYNC,
boto3_textract_client=self.boto3_textract_client,
)
document = self.textractor.Document.open(textract_response_json)
for idx, page in enumerate(document.pages):
yield Document(
page_content=page.get_text(config=self.linearization_config),
metadata={"source": blob.source, "page": idx + 1}, # type: ignore[attr-defined]
)
class DocumentIntelligenceParser(BaseBlobParser):
"""Loads a PDF with Azure Document Intelligence
(formerly Form Recognizer) and chunks at character level."""
def __init__(self, client: Any, model: str):
warnings.warn(
"langchain_community.document_loaders.parsers.pdf.DocumentIntelligenceParser"
"and langchain_community.document_loaders.pdf.DocumentIntelligenceLoader"
" are deprecated. Please upgrade to "
"langchain_community.document_loaders.DocumentIntelligenceLoader "
"for any file parsing purpose using Azure Document Intelligence "
"service."
)
self.client = client
self.model = model
def _generate_docs(self, blob: Blob, result: Any) -> Iterator[Document]: # type: ignore[valid-type]
for p in result.pages:
content = " ".join([line.content for line in p.lines])
d = Document(
page_content=content,
metadata={
"source": blob.source, # type: ignore[attr-defined]
"page": p.page_number,
},
)
yield d
def lazy_parse(self, blob: Blob) -> Iterator[Document]: # type: ignore[valid-type]
"""Lazily parse the blob."""
with blob.as_bytes_io() as file_obj: # type: ignore[attr-defined]
poller = self.client.begin_analyze_document(self.model, file_obj)
result = poller.result()
docs = self._generate_docs(blob, result)
yield from docs
|
0 | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/parsers | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/parsers/language/lua.py | from typing import TYPE_CHECKING
from langchain_community.document_loaders.parsers.language.tree_sitter_segmenter import ( # noqa: E501
TreeSitterSegmenter,
)
if TYPE_CHECKING:
from tree_sitter import Language
CHUNK_QUERY = """
[
(function_definition_statement
name: (identifier)) @function
(local_function_definition_statement
name: (identifier)) @function
]
""".strip()
class LuaSegmenter(TreeSitterSegmenter):
"""Code segmenter for Lua."""
def get_language(self) -> "Language":
from tree_sitter_languages import get_language
return get_language("lua")
def get_chunk_query(self) -> str:
return CHUNK_QUERY
def make_line_comment(self, text: str) -> str:
return f"-- {text}"
|
0 | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/parsers | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/parsers/language/php.py | from typing import TYPE_CHECKING
from langchain_community.document_loaders.parsers.language.tree_sitter_segmenter import ( # noqa: E501
TreeSitterSegmenter,
)
if TYPE_CHECKING:
from tree_sitter import Language
CHUNK_QUERY = """
[
(function_definition) @function
(class_declaration) @class
(interface_declaration) @interface
(trait_declaration) @trait
(enum_declaration) @enum
(namespace_definition) @namespace
]
""".strip()
class PHPSegmenter(TreeSitterSegmenter):
"""Code segmenter for PHP."""
def get_language(self) -> "Language":
from tree_sitter_languages import get_language
return get_language("php")
def get_chunk_query(self) -> str:
return CHUNK_QUERY
def make_line_comment(self, text: str) -> str:
return f"// {text}"
|
0 | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/parsers | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/parsers/language/tree_sitter_segmenter.py | from abc import abstractmethod
from typing import TYPE_CHECKING, List
from langchain_community.document_loaders.parsers.language.code_segmenter import (
CodeSegmenter,
)
if TYPE_CHECKING:
from tree_sitter import Language, Parser
class TreeSitterSegmenter(CodeSegmenter):
"""Abstract class for `CodeSegmenter`s that use the tree-sitter library."""
def __init__(self, code: str):
super().__init__(code)
self.source_lines = self.code.splitlines()
try:
import tree_sitter # noqa: F401
import tree_sitter_languages # noqa: F401
except ImportError:
raise ImportError(
"Could not import tree_sitter/tree_sitter_languages Python packages. "
"Please install them with "
"`pip install tree-sitter tree-sitter-languages`."
)
def is_valid(self) -> bool:
language = self.get_language()
error_query = language.query("(ERROR) @error")
parser = self.get_parser()
tree = parser.parse(bytes(self.code, encoding="UTF-8"))
return len(error_query.captures(tree.root_node)) == 0
def extract_functions_classes(self) -> List[str]:
language = self.get_language()
query = language.query(self.get_chunk_query())
parser = self.get_parser()
tree = parser.parse(bytes(self.code, encoding="UTF-8"))
captures = query.captures(tree.root_node)
processed_lines = set()
chunks = []
for node, name in captures:
start_line = node.start_point[0]
end_line = node.end_point[0]
lines = list(range(start_line, end_line + 1))
if any(line in processed_lines for line in lines):
continue
processed_lines.update(lines)
chunk_text = node.text.decode("UTF-8")
chunks.append(chunk_text)
return chunks
def simplify_code(self) -> str:
language = self.get_language()
query = language.query(self.get_chunk_query())
parser = self.get_parser()
tree = parser.parse(bytes(self.code, encoding="UTF-8"))
processed_lines = set()
simplified_lines = self.source_lines[:]
for node, name in query.captures(tree.root_node):
start_line = node.start_point[0]
end_line = node.end_point[0]
lines = list(range(start_line, end_line + 1))
if any(line in processed_lines for line in lines):
continue
simplified_lines[start_line] = self.make_line_comment(
f"Code for: {self.source_lines[start_line]}"
)
for line_num in range(start_line + 1, end_line + 1):
simplified_lines[line_num] = None # type: ignore
processed_lines.update(lines)
return "\n".join(line for line in simplified_lines if line is not None)
def get_parser(self) -> "Parser":
from tree_sitter import Parser
parser = Parser()
parser.set_language(self.get_language())
return parser
@abstractmethod
def get_language(self) -> "Language":
raise NotImplementedError() # pragma: no cover
@abstractmethod
def get_chunk_query(self) -> str:
raise NotImplementedError() # pragma: no cover
@abstractmethod
def make_line_comment(self, text: str) -> str:
raise NotImplementedError() # pragma: no cover
|
0 | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/parsers | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/parsers/language/elixir.py | from typing import TYPE_CHECKING
from langchain_community.document_loaders.parsers.language.tree_sitter_segmenter import ( # noqa: E501
TreeSitterSegmenter,
)
if TYPE_CHECKING:
from tree_sitter import Language
CHUNK_QUERY = """
[
(call target: ((identifier) @_identifier
(#any-of? @_identifier "defmodule" "defprotocol" "defimpl"))) @module
(call target: ((identifier) @_identifier
(#any-of? @_identifier "def" "defmacro" "defmacrop" "defp"))) @function
(unary_operator operator: "@" operand: (call target: ((identifier) @_identifier
(#any-of? @_identifier "moduledoc" "typedoc""doc")))) @comment
]
""".strip()
class ElixirSegmenter(TreeSitterSegmenter):
"""Code segmenter for Elixir."""
def get_language(self) -> "Language":
from tree_sitter_languages import get_language
return get_language("elixir")
def get_chunk_query(self) -> str:
return CHUNK_QUERY
def make_line_comment(self, text: str) -> str:
return f"# {text}"
|
0 | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/parsers | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/parsers/language/cpp.py | from typing import TYPE_CHECKING
from langchain_community.document_loaders.parsers.language.tree_sitter_segmenter import ( # noqa: E501
TreeSitterSegmenter,
)
if TYPE_CHECKING:
from tree_sitter import Language
CHUNK_QUERY = """
[
(class_specifier
body: (field_declaration_list)) @class
(struct_specifier
body: (field_declaration_list)) @struct
(union_specifier
body: (field_declaration_list)) @union
(function_definition) @function
]
""".strip()
class CPPSegmenter(TreeSitterSegmenter):
"""Code segmenter for C++."""
def get_language(self) -> "Language":
from tree_sitter_languages import get_language
return get_language("cpp")
def get_chunk_query(self) -> str:
return CHUNK_QUERY
def make_line_comment(self, text: str) -> str:
return f"// {text}"
|
0 | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/parsers | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/parsers/language/go.py | from typing import TYPE_CHECKING
from langchain_community.document_loaders.parsers.language.tree_sitter_segmenter import ( # noqa: E501
TreeSitterSegmenter,
)
if TYPE_CHECKING:
from tree_sitter import Language
CHUNK_QUERY = """
[
(function_declaration) @function
(type_declaration) @type
]
""".strip()
class GoSegmenter(TreeSitterSegmenter):
"""Code segmenter for Go."""
def get_language(self) -> "Language":
from tree_sitter_languages import get_language
return get_language("go")
def get_chunk_query(self) -> str:
return CHUNK_QUERY
def make_line_comment(self, text: str) -> str:
return f"// {text}"
|
0 | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/parsers | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/parsers/language/perl.py | from typing import TYPE_CHECKING
from langchain_community.document_loaders.parsers.language.tree_sitter_segmenter import ( # noqa: E501
TreeSitterSegmenter,
)
if TYPE_CHECKING:
from tree_sitter import Language
CHUNK_QUERY = """
[
(function_definition) @subroutine
]
""".strip()
class PerlSegmenter(TreeSitterSegmenter):
"""Code segmenter for Perl."""
def get_language(self) -> "Language":
from tree_sitter_languages import get_language
return get_language("perl")
def get_chunk_query(self) -> str:
return CHUNK_QUERY
def make_line_comment(self, text: str) -> str:
return f"# {text}"
|
0 | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/parsers | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/parsers/language/ruby.py | from typing import TYPE_CHECKING
from langchain_community.document_loaders.parsers.language.tree_sitter_segmenter import ( # noqa: E501
TreeSitterSegmenter,
)
if TYPE_CHECKING:
from tree_sitter import Language
CHUNK_QUERY = """
[
(method) @method
(module) @module
(class) @class
]
""".strip()
class RubySegmenter(TreeSitterSegmenter):
"""Code segmenter for Ruby."""
def get_language(self) -> "Language":
from tree_sitter_languages import get_language
return get_language("ruby")
def get_chunk_query(self) -> str:
return CHUNK_QUERY
def make_line_comment(self, text: str) -> str:
return f"# {text}"
|
0 | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/parsers | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/parsers/language/scala.py | from typing import TYPE_CHECKING
from langchain_community.document_loaders.parsers.language.tree_sitter_segmenter import ( # noqa: E501
TreeSitterSegmenter,
)
if TYPE_CHECKING:
from tree_sitter import Language
CHUNK_QUERY = """
[
(class_definition) @class
(function_definition) @function
(object_definition) @object
(trait_definition) @trait
]
""".strip()
class ScalaSegmenter(TreeSitterSegmenter):
"""Code segmenter for Scala."""
def get_language(self) -> "Language":
from tree_sitter_languages import get_language
return get_language("scala")
def get_chunk_query(self) -> str:
return CHUNK_QUERY
def make_line_comment(self, text: str) -> str:
return f"// {text}"
|
0 | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/parsers | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/parsers/language/python.py | import ast
from typing import Any, List, Tuple
from langchain_community.document_loaders.parsers.language.code_segmenter import (
CodeSegmenter,
)
class PythonSegmenter(CodeSegmenter):
"""Code segmenter for `Python`."""
def __init__(self, code: str):
super().__init__(code)
self.source_lines = self.code.splitlines()
def is_valid(self) -> bool:
try:
ast.parse(self.code)
return True
except SyntaxError:
return False
def _extract_code(self, node: Any) -> str:
start = node.lineno - 1
end = node.end_lineno
return "\n".join(self.source_lines[start:end])
def extract_functions_classes(self) -> List[str]:
tree = ast.parse(self.code)
functions_classes = []
for node in ast.iter_child_nodes(tree):
if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef)):
functions_classes.append(self._extract_code(node))
return functions_classes
def simplify_code(self) -> str:
tree = ast.parse(self.code)
simplified_lines = self.source_lines[:]
indices_to_del: List[Tuple[int, int]] = []
for node in ast.iter_child_nodes(tree):
if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef)):
start, end = node.lineno - 1, node.end_lineno
simplified_lines[start] = f"# Code for: {simplified_lines[start]}"
assert isinstance(end, int)
indices_to_del.append((start + 1, end))
for start, end in reversed(indices_to_del):
del simplified_lines[start + 0 : end]
return "\n".join(simplified_lines)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/parsers | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/parsers/language/typescript.py | from typing import TYPE_CHECKING
from langchain_community.document_loaders.parsers.language.tree_sitter_segmenter import ( # noqa: E501
TreeSitterSegmenter,
)
if TYPE_CHECKING:
from tree_sitter import Language
CHUNK_QUERY = """
[
(function_declaration) @function
(class_declaration) @class
(interface_declaration) @interface
(enum_declaration) @enum
]
""".strip()
class TypeScriptSegmenter(TreeSitterSegmenter):
"""Code segmenter for TypeScript."""
def get_language(self) -> "Language":
from tree_sitter_languages import get_language
return get_language("typescript")
def get_chunk_query(self) -> str:
return CHUNK_QUERY
def make_line_comment(self, text: str) -> str:
return f"// {text}"
|
0 | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/parsers | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/parsers/language/code_segmenter.py | from abc import ABC, abstractmethod
from typing import List
class CodeSegmenter(ABC):
"""Abstract class for the code segmenter."""
def __init__(self, code: str):
self.code = code
def is_valid(self) -> bool:
return True
@abstractmethod
def simplify_code(self) -> str:
raise NotImplementedError() # pragma: no cover
@abstractmethod
def extract_functions_classes(self) -> List[str]:
raise NotImplementedError() # pragma: no cover
|
0 | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/parsers | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/parsers/language/c.py | from typing import TYPE_CHECKING
from langchain_community.document_loaders.parsers.language.tree_sitter_segmenter import ( # noqa: E501
TreeSitterSegmenter,
)
if TYPE_CHECKING:
from tree_sitter import Language
CHUNK_QUERY = """
[
(struct_specifier
body: (field_declaration_list)) @struct
(enum_specifier
body: (enumerator_list)) @enum
(union_specifier
body: (field_declaration_list)) @union
(function_definition) @function
]
""".strip()
class CSegmenter(TreeSitterSegmenter):
"""Code segmenter for C."""
def get_language(self) -> "Language":
from tree_sitter_languages import get_language
return get_language("c")
def get_chunk_query(self) -> str:
return CHUNK_QUERY
def make_line_comment(self, text: str) -> str:
return f"// {text}"
|
0 | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/parsers | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/parsers/language/rust.py | from typing import TYPE_CHECKING
from langchain_community.document_loaders.parsers.language.tree_sitter_segmenter import ( # noqa: E501
TreeSitterSegmenter,
)
if TYPE_CHECKING:
from tree_sitter import Language
CHUNK_QUERY = """
[
(function_item
name: (identifier)
body: (block)) @function
(struct_item) @struct
(trait_item) @trait
]
""".strip()
class RustSegmenter(TreeSitterSegmenter):
"""Code segmenter for Rust."""
def get_language(self) -> "Language":
from tree_sitter_languages import get_language
return get_language("rust")
def get_chunk_query(self) -> str:
return CHUNK_QUERY
def make_line_comment(self, text: str) -> str:
return f"// {text}"
|
0 | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/parsers | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/parsers/language/kotlin.py | from typing import TYPE_CHECKING
from langchain_community.document_loaders.parsers.language.tree_sitter_segmenter import ( # noqa: E501
TreeSitterSegmenter,
)
if TYPE_CHECKING:
from tree_sitter import Language
CHUNK_QUERY = """
[
(function_declaration) @function
(class_declaration) @class
]
""".strip()
class KotlinSegmenter(TreeSitterSegmenter):
"""Code segmenter for Kotlin."""
def get_language(self) -> "Language":
from tree_sitter_languages import get_language
return get_language("kotlin")
def get_chunk_query(self) -> str:
return CHUNK_QUERY
def make_line_comment(self, text: str) -> str:
return f"// {text}"
|
0 | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/parsers | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/parsers/language/java.py | from typing import TYPE_CHECKING
from langchain_community.document_loaders.parsers.language.tree_sitter_segmenter import ( # noqa: E501
TreeSitterSegmenter,
)
if TYPE_CHECKING:
from tree_sitter import Language
CHUNK_QUERY = """
[
(class_declaration) @class
(interface_declaration) @interface
(enum_declaration) @enum
]
""".strip()
class JavaSegmenter(TreeSitterSegmenter):
"""Code segmenter for Java."""
def get_language(self) -> "Language":
from tree_sitter_languages import get_language
return get_language("java")
def get_chunk_query(self) -> str:
return CHUNK_QUERY
def make_line_comment(self, text: str) -> str:
return f"// {text}"
|
0 | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/parsers | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/parsers/language/language_parser.py | from __future__ import annotations
from typing import Any, Dict, Iterator, Literal, Optional
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseBlobParser
from langchain_community.document_loaders.blob_loaders import Blob
from langchain_community.document_loaders.parsers.language.c import CSegmenter
from langchain_community.document_loaders.parsers.language.cobol import CobolSegmenter
from langchain_community.document_loaders.parsers.language.cpp import CPPSegmenter
from langchain_community.document_loaders.parsers.language.csharp import CSharpSegmenter
from langchain_community.document_loaders.parsers.language.elixir import ElixirSegmenter
from langchain_community.document_loaders.parsers.language.go import GoSegmenter
from langchain_community.document_loaders.parsers.language.java import JavaSegmenter
from langchain_community.document_loaders.parsers.language.javascript import (
JavaScriptSegmenter,
)
from langchain_community.document_loaders.parsers.language.kotlin import KotlinSegmenter
from langchain_community.document_loaders.parsers.language.lua import LuaSegmenter
from langchain_community.document_loaders.parsers.language.perl import PerlSegmenter
from langchain_community.document_loaders.parsers.language.php import PHPSegmenter
from langchain_community.document_loaders.parsers.language.python import PythonSegmenter
from langchain_community.document_loaders.parsers.language.ruby import RubySegmenter
from langchain_community.document_loaders.parsers.language.rust import RustSegmenter
from langchain_community.document_loaders.parsers.language.scala import ScalaSegmenter
from langchain_community.document_loaders.parsers.language.typescript import (
TypeScriptSegmenter,
)
LANGUAGE_EXTENSIONS: Dict[str, str] = {
"py": "python",
"js": "js",
"cobol": "cobol",
"c": "c",
"cpp": "cpp",
"cs": "csharp",
"rb": "ruby",
"scala": "scala",
"rs": "rust",
"go": "go",
"kt": "kotlin",
"lua": "lua",
"pl": "perl",
"ts": "ts",
"java": "java",
"php": "php",
"ex": "elixir",
"exs": "elixir",
}
LANGUAGE_SEGMENTERS: Dict[str, Any] = {
"python": PythonSegmenter,
"js": JavaScriptSegmenter,
"cobol": CobolSegmenter,
"c": CSegmenter,
"cpp": CPPSegmenter,
"csharp": CSharpSegmenter,
"ruby": RubySegmenter,
"rust": RustSegmenter,
"scala": ScalaSegmenter,
"go": GoSegmenter,
"kotlin": KotlinSegmenter,
"lua": LuaSegmenter,
"perl": PerlSegmenter,
"ts": TypeScriptSegmenter,
"java": JavaSegmenter,
"php": PHPSegmenter,
"elixir": ElixirSegmenter,
}
Language = Literal[
"cpp",
"go",
"java",
"kotlin",
"js",
"ts",
"php",
"proto",
"python",
"rst",
"ruby",
"rust",
"scala",
"swift",
"markdown",
"latex",
"html",
"sol",
"csharp",
"cobol",
"c",
"lua",
"perl",
"elixir",
]
class LanguageParser(BaseBlobParser):
"""Parse using the respective programming language syntax.
Each top-level function and class in the code is loaded into separate documents.
Furthermore, an extra document is generated, containing the remaining top-level code
that excludes the already segmented functions and classes.
This approach can potentially improve the accuracy of QA models over source code.
The supported languages for code parsing are:
- C: "c" (*)
- C++: "cpp" (*)
- C#: "csharp" (*)
- COBOL: "cobol"
- Elixir: "elixir"
- Go: "go" (*)
- Java: "java" (*)
- JavaScript: "js" (requires package `esprima`)
- Kotlin: "kotlin" (*)
- Lua: "lua" (*)
- Perl: "perl" (*)
- Python: "python"
- Ruby: "ruby" (*)
- Rust: "rust" (*)
- Scala: "scala" (*)
- TypeScript: "ts" (*)
Items marked with (*) require the packages `tree_sitter` and
`tree_sitter_languages`. It is straightforward to add support for additional
languages using `tree_sitter`, although this currently requires modifying LangChain.
The language used for parsing can be configured, along with the minimum number of
lines required to activate the splitting based on syntax.
If a language is not explicitly specified, `LanguageParser` will infer one from
filename extensions, if present.
Examples:
.. code-block:: python
from langchain_community.document_loaders.generic import GenericLoader
from langchain_community.document_loaders.parsers import LanguageParser
loader = GenericLoader.from_filesystem(
"./code",
glob="**/*",
suffixes=[".py", ".js"],
parser=LanguageParser()
)
docs = loader.load()
Example instantiations to manually select the language:
.. code-block:: python
loader = GenericLoader.from_filesystem(
"./code",
glob="**/*",
suffixes=[".py"],
parser=LanguageParser(language="python")
)
Example instantiations to set number of lines threshold:
.. code-block:: python
loader = GenericLoader.from_filesystem(
"./code",
glob="**/*",
suffixes=[".py"],
parser=LanguageParser(parser_threshold=200)
)
"""
def __init__(self, language: Optional[Language] = None, parser_threshold: int = 0):
"""
Language parser that split code using the respective language syntax.
Args:
language: If None (default), it will try to infer language from source.
parser_threshold: Minimum lines needed to activate parsing (0 by default).
"""
if language and language not in LANGUAGE_SEGMENTERS:
raise Exception(f"No parser available for {language}")
self.language = language
self.parser_threshold = parser_threshold
def lazy_parse(self, blob: Blob) -> Iterator[Document]:
code = blob.as_string()
language = self.language or (
LANGUAGE_EXTENSIONS.get(blob.source.rsplit(".", 1)[-1])
if isinstance(blob.source, str)
else None
)
if language is None:
yield Document(
page_content=code,
metadata={
"source": blob.source,
},
)
return
if self.parser_threshold >= len(code.splitlines()):
yield Document(
page_content=code,
metadata={
"source": blob.source,
"language": language,
},
)
return
self.Segmenter = LANGUAGE_SEGMENTERS[language]
segmenter = self.Segmenter(blob.as_string())
if not segmenter.is_valid():
yield Document(
page_content=code,
metadata={
"source": blob.source,
},
)
return
for functions_classes in segmenter.extract_functions_classes():
yield Document(
page_content=functions_classes,
metadata={
"source": blob.source,
"content_type": "functions_classes",
"language": language,
},
)
yield Document(
page_content=segmenter.simplify_code(),
metadata={
"source": blob.source,
"content_type": "simplified_code",
"language": language,
},
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/parsers | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/parsers/language/__init__.py | from langchain_community.document_loaders.parsers.language.language_parser import (
LanguageParser,
)
__all__ = ["LanguageParser"]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/parsers | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/parsers/language/javascript.py | from typing import Any, List, Tuple
from langchain_community.document_loaders.parsers.language.code_segmenter import (
CodeSegmenter,
)
class JavaScriptSegmenter(CodeSegmenter):
"""Code segmenter for JavaScript."""
def __init__(self, code: str):
super().__init__(code)
self.source_lines = self.code.splitlines()
try:
import esprima # noqa: F401
except ImportError:
raise ImportError(
"Could not import esprima Python package. "
"Please install it with `pip install esprima`."
)
def is_valid(self) -> bool:
import esprima
try:
esprima.parseScript(self.code)
return True
except esprima.Error:
return False
def _extract_code(self, node: Any) -> str:
start = node.loc.start.line - 1
end = node.loc.end.line
return "\n".join(self.source_lines[start:end])
def extract_functions_classes(self) -> List[str]:
import esprima
tree = esprima.parseScript(self.code, loc=True)
functions_classes = []
for node in tree.body:
if isinstance(
node,
(esprima.nodes.FunctionDeclaration, esprima.nodes.ClassDeclaration),
):
functions_classes.append(self._extract_code(node))
return functions_classes
def simplify_code(self) -> str:
import esprima
tree = esprima.parseScript(self.code, loc=True)
simplified_lines = self.source_lines[:]
indices_to_del: List[Tuple[int, int]] = []
for node in tree.body:
if isinstance(
node,
(esprima.nodes.FunctionDeclaration, esprima.nodes.ClassDeclaration),
):
start, end = node.loc.start.line - 1, node.loc.end.line
simplified_lines[start] = f"// Code for: {simplified_lines[start]}"
indices_to_del.append((start + 1, end))
for start, end in reversed(indices_to_del):
del simplified_lines[start + 0 : end]
return "\n".join(line for line in simplified_lines)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/parsers | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/parsers/language/csharp.py | from typing import TYPE_CHECKING
from langchain_community.document_loaders.parsers.language.tree_sitter_segmenter import ( # noqa: E501
TreeSitterSegmenter,
)
if TYPE_CHECKING:
from tree_sitter import Language
CHUNK_QUERY = """
[
(namespace_declaration) @namespace
(class_declaration) @class
(method_declaration) @method
(interface_declaration) @interface
(enum_declaration) @enum
(struct_declaration) @struct
(record_declaration) @record
]
""".strip()
class CSharpSegmenter(TreeSitterSegmenter):
"""Code segmenter for C#."""
def get_language(self) -> "Language":
from tree_sitter_languages import get_language
return get_language("c_sharp")
def get_chunk_query(self) -> str:
return CHUNK_QUERY
def make_line_comment(self, text: str) -> str:
return f"// {text}"
|
0 | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/parsers | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/parsers/language/cobol.py | import re
from typing import Callable, List, Pattern
from langchain_community.document_loaders.parsers.language.code_segmenter import (
CodeSegmenter,
)
class CobolSegmenter(CodeSegmenter):
"""Code segmenter for `COBOL`."""
PARAGRAPH_PATTERN: Pattern = re.compile(r"^[A-Z0-9\-]+(\s+.*)?\.$", re.IGNORECASE)
DIVISION_PATTERN: Pattern = re.compile(
r"^\s*(IDENTIFICATION|DATA|PROCEDURE|ENVIRONMENT)\s+DIVISION.*$", re.IGNORECASE
)
SECTION_PATTERN: Pattern = re.compile(r"^\s*[A-Z0-9\-]+\s+SECTION.$", re.IGNORECASE)
def __init__(self, code: str):
super().__init__(code)
self.source_lines: List[str] = self.code.splitlines()
def is_valid(self) -> bool:
# Identify presence of any division to validate COBOL code
return any(self.DIVISION_PATTERN.match(line) for line in self.source_lines)
def _extract_code(self, start_idx: int, end_idx: int) -> str:
return "\n".join(self.source_lines[start_idx:end_idx]).rstrip("\n")
def _is_relevant_code(self, line: str) -> bool:
"""Check if a line is part of the procedure division or a relevant section."""
if "PROCEDURE DIVISION" in line.upper():
return True
# Add additional conditions for relevant sections if needed
return False
def _process_lines(self, func: Callable) -> List[str]:
"""A generic function to process COBOL lines based on provided func."""
elements: List[str] = []
start_idx = None
inside_relevant_section = False
for i, line in enumerate(self.source_lines):
if self._is_relevant_code(line):
inside_relevant_section = True
if inside_relevant_section and (
self.PARAGRAPH_PATTERN.match(line.strip().split(" ")[0])
or self.SECTION_PATTERN.match(line.strip())
):
if start_idx is not None:
func(elements, start_idx, i)
start_idx = i
# Handle the last element if exists
if start_idx is not None:
func(elements, start_idx, len(self.source_lines))
return elements
def extract_functions_classes(self) -> List[str]:
def extract_func(elements: List[str], start_idx: int, end_idx: int) -> None:
elements.append(self._extract_code(start_idx, end_idx))
return self._process_lines(extract_func)
def simplify_code(self) -> str:
simplified_lines: List[str] = []
inside_relevant_section = False
omitted_code_added = (
False # To track if "* OMITTED CODE *" has been added after the last header
)
for line in self.source_lines:
is_header = (
"PROCEDURE DIVISION" in line
or "DATA DIVISION" in line
or "IDENTIFICATION DIVISION" in line
or self.PARAGRAPH_PATTERN.match(line.strip().split(" ")[0])
or self.SECTION_PATTERN.match(line.strip())
)
if is_header:
inside_relevant_section = True
# Reset the flag since we're entering a new section/division or
# paragraph
omitted_code_added = False
if inside_relevant_section:
if is_header:
# Add header and reset the omitted code added flag
simplified_lines.append(line)
elif not omitted_code_added:
# Add omitted code comment only if it hasn't been added directly
# after the last header
simplified_lines.append("* OMITTED CODE *")
omitted_code_added = True
return "\n".join(simplified_lines)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/parsers | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/parsers/html/__init__.py | from langchain_community.document_loaders.parsers.html.bs4 import BS4HTMLParser
__all__ = ["BS4HTMLParser"]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/parsers | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/parsers/html/bs4.py | """Loader that uses bs4 to load HTML files, enriching metadata with page title."""
import logging
from typing import Any, Dict, Iterator, Union
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseBlobParser
from langchain_community.document_loaders.blob_loaders import Blob
logger = logging.getLogger(__name__)
class BS4HTMLParser(BaseBlobParser):
"""Parse HTML files using `Beautiful Soup`."""
def __init__(
self,
*,
features: str = "lxml",
get_text_separator: str = "",
**kwargs: Any,
) -> None:
"""Initialize a bs4 based HTML parser."""
try:
import bs4 # noqa:F401
except ImportError:
raise ImportError(
"beautifulsoup4 package not found, please install it with "
"`pip install beautifulsoup4`"
)
self.bs_kwargs = {"features": features, **kwargs}
self.get_text_separator = get_text_separator
def lazy_parse(self, blob: Blob) -> Iterator[Document]:
"""Load HTML document into document objects."""
from bs4 import BeautifulSoup
with blob.as_bytes_io() as f:
soup = BeautifulSoup(f, **self.bs_kwargs)
text = soup.get_text(self.get_text_separator)
if soup.title:
title = str(soup.title.string)
else:
title = ""
metadata: Dict[str, Union[str, None]] = {
"source": blob.source,
"title": title,
}
yield Document(page_content=text, metadata=metadata)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_message_histories/redis.py | import json
import logging
from typing import List, Optional
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.messages import (
BaseMessage,
message_to_dict,
messages_from_dict,
)
from langchain_community.utilities.redis import get_client
logger = logging.getLogger(__name__)
class RedisChatMessageHistory(BaseChatMessageHistory):
"""Chat message history stored in a Redis database.
Setup:
Install ``redis`` python package.
.. code-block:: bash
pip install redis
Instantiate:
.. code-block:: python
from langchain_community.chat_message_histories import RedisChatMessageHistory
history = RedisChatMessageHistory(
session_id = "your-session-id",
url="redis://your-host:your-port:your-database", # redis://localhost:6379/0
)
Add and retrieve messages:
.. code-block:: python
# Add single message
history.add_message(message)
# Add batch messages
history.add_messages([message1, message2, message3, ...])
# Add human message
history.add_user_message(human_message)
# Add ai message
history.add_ai_message(ai_message)
# Retrieve messages
messages = history.messages
""" # noqa: E501
def __init__(
self,
session_id: str,
url: str = "redis://localhost:6379/0",
key_prefix: str = "message_store:",
ttl: Optional[int] = None,
):
"""Initialize with a RedisChatMessageHistory instance.
Args:
session_id: str
The ID for single chat session. Used to form keys with `key_prefix`.
url: Optional[str]
String parameter configuration for connecting to the redis.
key_prefix: Optional[str]
The prefix of the key, combined with `session id` to form the key.
ttl: Optional[int]
Set the expiration time of `key`, the unit is seconds.
"""
try:
import redis
except ImportError:
raise ImportError(
"Could not import redis python package. "
"Please install it with `pip install redis`."
)
try:
self.redis_client = get_client(redis_url=url)
except redis.exceptions.ConnectionError as error:
logger.error(error)
self.session_id = session_id
self.key_prefix = key_prefix
self.ttl = ttl
@property
def key(self) -> str:
"""Construct the record key to use"""
return self.key_prefix + self.session_id
@property
def messages(self) -> List[BaseMessage]:
"""Retrieve the messages from Redis"""
_items = self.redis_client.lrange(self.key, 0, -1)
items = [json.loads(m.decode("utf-8")) for m in _items[::-1]]
messages = messages_from_dict(items)
return messages
@messages.setter
def messages(self, messages: List[BaseMessage]) -> None:
raise NotImplementedError(
"Direct assignment to 'messages' is not allowed."
" Use the 'add_messages' instead."
)
def add_message(self, message: BaseMessage) -> None:
"""Append the message to the record in Redis"""
self.redis_client.lpush(self.key, json.dumps(message_to_dict(message)))
if self.ttl:
self.redis_client.expire(self.key, self.ttl)
def clear(self) -> None:
"""Clear session memory from Redis"""
self.redis_client.delete(self.key)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_message_histories/kafka.py | """Kafka-based chat message history by using confluent-kafka-python.
confluent-kafka-python is under Apache 2.0 license.
https://github.com/confluentinc/confluent-kafka-python
"""
from __future__ import annotations
import json
import logging
import time
from enum import Enum
from typing import TYPE_CHECKING, List, Optional, Sequence
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.messages import BaseMessage, message_to_dict, messages_from_dict
if TYPE_CHECKING:
from confluent_kafka import TopicPartition
from confluent_kafka.admin import AdminClient
logger = logging.getLogger(__name__)
BOOTSTRAP_SERVERS_CONFIG = "bootstrap.servers"
DEFAULT_TTL_MS = 604800000 # 7 days
DEFAULT_REPLICATION_FACTOR = 1
DEFAULT_PARTITION = 3
class ConsumeStartPosition(Enum):
"""Consume start position for Kafka consumer to get chat history messages.
LAST_CONSUMED: Continue from the last consumed offset.
EARLIEST: Start consuming from the beginning.
LATEST: Start consuming from the latest offset.
"""
LAST_CONSUMED = 1
EARLIEST = 2
LATEST = 3
def ensure_topic_exists(
admin_client: AdminClient,
topic_name: str,
replication_factor: int,
partition: int,
ttl_ms: int,
) -> int:
"""Create topic if it doesn't exist, and return the number of partitions.
If the topic already exists, we don't change the topic configuration.
"""
from confluent_kafka.admin import NewTopic
try:
topic_metadata = admin_client.list_topics().topics
if topic_name in topic_metadata:
num_partitions = len(topic_metadata[topic_name].partitions)
logger.info(
f"Topic {topic_name} already exists with {num_partitions} partitions"
)
return num_partitions
except Exception as e:
logger.error(f"Failed to list topics: {e}")
raise e
topics = [
NewTopic(
topic_name,
num_partitions=partition,
replication_factor=replication_factor,
config={"retention.ms": str(ttl_ms)},
)
]
try:
futures = admin_client.create_topics(topics)
for _, f in futures.items():
f.result() # result is None
logger.info(f"Topic {topic_name} created")
except Exception as e:
logger.error(f"Failed to create topic {topic_name}: {e}")
raise e
return partition
class KafkaChatMessageHistory(BaseChatMessageHistory):
"""Chat message history stored in Kafka.
Setup:
Install ``confluent-kafka-python``.
.. code-block:: bash
pip install confluent_kafka
Instantiate:
.. code-block:: python
from langchain_community.chat_message_histories import KafkaChatMessageHistory
history = KafkaChatMessageHistory(
session_id="your_session_id",
bootstrap_servers="host:port",
)
Add and retrieve messages:
.. code-block:: python
# Add messages
history.add_messages([message1, message2, message3, ...])
# Retrieve messages
message_batch_0 = history.messages
# retrieve messages after message_batch_0
message_batch_1 = history.messages
# Reset to beginning and retrieve messages
messages_from_beginning = history.messages_from_beginning()
Retrieving messages is stateful. Internally, it uses Kafka consumer to read.
The consumed offset is maintained persistently.
To retrieve messages, you can use the following methods:
- `messages`:
continue consuming chat messages from last one.
- `messages_from_beginning`:
reset the consumer to the beginning of the chat history and return messages.
Optional parameters:
1. `max_message_count`: maximum number of messages to return.
2. `max_time_sec`: maximum time in seconds to wait for messages.
- `messages_from_latest`:
reset to end of the chat history and try consuming messages.
Optional parameters same as above.
- `messages_from_last_consumed`:
continuing from the last consumed message, similar to `messages`.
Optional parameters same as above.
`max_message_count` and `max_time_sec` are used to avoid blocking indefinitely
when retrieving messages. As a result, the method to retrieve messages may not
return all messages. Change `max_message_count` and `max_time_sec` to retrieve
all history messages.
""" # noqa: E501
def __init__(
self,
session_id: str,
bootstrap_servers: str,
ttl_ms: int = DEFAULT_TTL_MS,
replication_factor: int = DEFAULT_REPLICATION_FACTOR,
partition: int = DEFAULT_PARTITION,
):
"""
Args:
session_id: The ID for single chat session. It is used as Kafka topic name.
bootstrap_servers:
Comma-separated host/port pairs to establish connection to Kafka cluster
https://kafka.apache.org/documentation.html#adminclientconfigs_bootstrap.servers
ttl_ms:
Time-to-live (milliseconds) for automatic expiration of entries.
Default 7 days. -1 for no expiration.
It translates to https://kafka.apache.org/documentation.html#topicconfigs_retention.ms
replication_factor: The replication factor for the topic. Default 1.
partition: The number of partitions for the topic. Default 3.
"""
try:
from confluent_kafka import Producer
from confluent_kafka.admin import AdminClient
except (ImportError, ModuleNotFoundError):
raise ImportError(
"Could not import confluent_kafka package. "
"Please install it with `pip install confluent_kafka`."
)
self.session_id = session_id
self.bootstrap_servers = bootstrap_servers
self.admin_client = AdminClient({BOOTSTRAP_SERVERS_CONFIG: bootstrap_servers})
self.num_partitions = ensure_topic_exists(
self.admin_client, session_id, replication_factor, partition, ttl_ms
)
self.producer = Producer({BOOTSTRAP_SERVERS_CONFIG: bootstrap_servers})
def add_messages(
self,
messages: Sequence[BaseMessage],
flush_timeout_seconds: float = 5.0,
) -> None:
"""Add messages to the chat history by producing to the Kafka topic."""
try:
for message in messages:
self.producer.produce(
topic=self.session_id,
value=json.dumps(message_to_dict(message)),
)
message_remaining = self.producer.flush(flush_timeout_seconds)
if message_remaining > 0:
logger.warning(f"{message_remaining} messages are still in-flight.")
except Exception as e:
logger.error(f"Failed to add messages to Kafka: {e}")
raise e
def __read_messages(
self,
consume_start_pos: ConsumeStartPosition,
max_message_count: Optional[int],
max_time_sec: Optional[float],
) -> List[BaseMessage]:
"""Retrieve messages from Kafka topic for the session.
Please note this method is stateful. Internally, it uses Kafka consumer
to consume messages, and maintains the consumed offset.
Args:
consume_start_pos: Start position for Kafka consumer.
max_message_count: Maximum number of messages to consume.
max_time_sec: Time limit in seconds to consume messages.
Returns:
List of messages.
"""
from confluent_kafka import OFFSET_BEGINNING, OFFSET_END, Consumer
consumer_config = {
BOOTSTRAP_SERVERS_CONFIG: self.bootstrap_servers,
"group.id": self.session_id,
"auto.offset.reset": "latest"
if consume_start_pos == ConsumeStartPosition.LATEST
else "earliest",
}
def assign_beginning(
assigned_consumer: Consumer, assigned_partitions: list[TopicPartition]
) -> None:
for p in assigned_partitions:
p.offset = OFFSET_BEGINNING
assigned_consumer.assign(assigned_partitions)
def assign_latest(
assigned_consumer: Consumer, assigned_partitions: list[TopicPartition]
) -> None:
for p in assigned_partitions:
p.offset = OFFSET_END
assigned_consumer.assign(assigned_partitions)
messages: List[dict] = []
consumer = Consumer(consumer_config)
try:
if consume_start_pos == ConsumeStartPosition.EARLIEST:
consumer.subscribe([self.session_id], on_assign=assign_beginning)
elif consume_start_pos == ConsumeStartPosition.LATEST:
consumer.subscribe([self.session_id], on_assign=assign_latest)
else:
consumer.subscribe([self.session_id])
start_time_sec = time.time()
while True:
if (
max_time_sec is not None
and time.time() - start_time_sec > max_time_sec
):
break
if max_message_count is not None and len(messages) >= max_message_count:
break
message = consumer.poll(timeout=1.0)
if message is None: # poll timeout
continue
if message.error() is not None: # error
logger.error(f"Consumer error: {message.error()}")
continue
if message.value() is None: # empty value
logger.warning("Empty message value")
continue
messages.append(json.loads(message.value()))
except Exception as e:
logger.error(f"Failed to consume messages from Kafka: {e}")
raise e
finally:
consumer.close()
return messages_from_dict(messages)
def messages_from_beginning(
self, max_message_count: Optional[int] = 5, max_time_sec: Optional[float] = 5.0
) -> List[BaseMessage]:
"""Retrieve messages from Kafka topic from the beginning.
This method resets the consumer to the beginning and consumes messages.
Args:
max_message_count: Maximum number of messages to consume.
max_time_sec: Time limit in seconds to consume messages.
Returns:
List of messages.
"""
return self.__read_messages(
consume_start_pos=ConsumeStartPosition.EARLIEST,
max_message_count=max_message_count,
max_time_sec=max_time_sec,
)
def messages_from_latest(
self, max_message_count: Optional[int] = 5, max_time_sec: Optional[float] = 5.0
) -> List[BaseMessage]:
"""Reset to the end offset. Try to consume messages if available.
Args:
max_message_count: Maximum number of messages to consume.
max_time_sec: Time limit in seconds to consume messages.
Returns:
List of messages.
"""
return self.__read_messages(
consume_start_pos=ConsumeStartPosition.LATEST,
max_message_count=max_message_count,
max_time_sec=max_time_sec,
)
def messages_from_last_consumed(
self, max_message_count: Optional[int] = 5, max_time_sec: Optional[float] = 5.0
) -> List[BaseMessage]:
"""Retrieve messages from Kafka topic from the last consumed message.
Please note this method is stateful. Internally, it uses Kafka consumer
to consume messages, and maintains the commit offset.
Args:
max_message_count: Maximum number of messages to consume.
max_time_sec: Time limit in seconds to consume messages.
Returns:
List of messages.
"""
return self.__read_messages(
consume_start_pos=ConsumeStartPosition.LAST_CONSUMED,
max_message_count=max_message_count,
max_time_sec=max_time_sec,
)
@property
def messages(self) -> List[BaseMessage]: # type: ignore
"""
Retrieve the messages for the session, from Kafka topic continuously
from last consumed message. This method is stateful and maintains
consumed(committed) offset based on consumer group.
Alternatively, use messages_from_last_consumed() with specified parameters.
Use messages_from_beginning() to read from the earliest message.
Use messages_from_latest() to read from the latest message.
"""
return self.messages_from_last_consumed()
def clear(self) -> None:
"""Clear the chat history by deleting the Kafka topic."""
try:
futures = self.admin_client.delete_topics([self.session_id])
for _, f in futures.items():
f.result() # result is None
logger.info(f"Topic {self.session_id} deleted")
except Exception as e:
logger.error(f"Failed to delete topic {self.session_id}: {e}")
raise e
def close(self) -> None:
"""Release the resources.
Nothing to be released at this moment.
"""
pass
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_message_histories/file.py | import json
from pathlib import Path
from typing import List, Optional
from langchain_core.chat_history import (
BaseChatMessageHistory,
)
from langchain_core.messages import BaseMessage, messages_from_dict, messages_to_dict
class FileChatMessageHistory(BaseChatMessageHistory):
"""Chat message history that stores history in a local file."""
def __init__(
self,
file_path: str,
*,
encoding: Optional[str] = None,
ensure_ascii: bool = True,
) -> None:
"""Initialize the file path for the chat history.
Args:
file_path: The path to the local file to store the chat history.
encoding: The encoding to use for file operations. Defaults to None.
ensure_ascii: If True, escape non-ASCII in JSON. Defaults to True.
"""
self.file_path = Path(file_path)
self.encoding = encoding
self.ensure_ascii = ensure_ascii
if not self.file_path.exists():
self.file_path.touch()
self.file_path.write_text(
json.dumps([], ensure_ascii=self.ensure_ascii), encoding=self.encoding
)
@property
def messages(self) -> List[BaseMessage]: # type: ignore
"""Retrieve the messages from the local file"""
items = json.loads(self.file_path.read_text(encoding=self.encoding))
messages = messages_from_dict(items)
return messages
def add_message(self, message: BaseMessage) -> None:
"""Append the message to the record in the local file"""
messages = messages_to_dict(self.messages)
messages.append(messages_to_dict([message])[0])
self.file_path.write_text(
json.dumps(messages, ensure_ascii=self.ensure_ascii), encoding=self.encoding
)
def clear(self) -> None:
"""Clear session memory from the local file"""
self.file_path.write_text(
json.dumps([], ensure_ascii=self.ensure_ascii), encoding=self.encoding
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_message_histories/firestore.py | """Firestore Chat Message History."""
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, List, Optional
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.messages import (
BaseMessage,
messages_from_dict,
messages_to_dict,
)
logger = logging.getLogger(__name__)
if TYPE_CHECKING:
from google.cloud.firestore import Client, DocumentReference
def _get_firestore_client() -> Client:
try:
import firebase_admin
from firebase_admin import firestore
except ImportError:
raise ImportError(
"Could not import firebase-admin python package. "
"Please install it with `pip install firebase-admin`."
)
# For multiple instances, only initialize the app once.
try:
firebase_admin.get_app()
except ValueError as e:
logger.debug("Initializing Firebase app: %s", e)
firebase_admin.initialize_app()
return firestore.client()
class FirestoreChatMessageHistory(BaseChatMessageHistory):
"""Chat message history backed by Google Firestore."""
def __init__(
self,
collection_name: str,
session_id: str,
user_id: str,
firestore_client: Optional[Client] = None,
):
"""
Initialize a new instance of the FirestoreChatMessageHistory class.
:param collection_name: The name of the collection to use.
:param session_id: The session ID for the chat..
:param user_id: The user ID for the chat.
"""
self.collection_name = collection_name
self.session_id = session_id
self.user_id = user_id
self._document: Optional[DocumentReference] = None
self.messages: List[BaseMessage] = []
self.firestore_client = firestore_client or _get_firestore_client()
self.prepare_firestore()
def prepare_firestore(self) -> None:
"""Prepare the Firestore client.
Use this function to make sure your database is ready.
"""
self._document = self.firestore_client.collection(
self.collection_name
).document(self.session_id)
self.load_messages()
def load_messages(self) -> None:
"""Retrieve the messages from Firestore"""
if not self._document:
raise ValueError("Document not initialized")
doc = self._document.get()
if doc.exists:
data = doc.to_dict()
if "messages" in data and len(data["messages"]) > 0:
self.messages = messages_from_dict(data["messages"])
def add_message(self, message: BaseMessage) -> None:
self.messages.append(message)
self.upsert_messages()
def upsert_messages(self, new_message: Optional[BaseMessage] = None) -> None:
"""Update the Firestore document."""
if not self._document:
raise ValueError("Document not initialized")
self._document.set(
{
"id": self.session_id,
"user_id": self.user_id,
"messages": messages_to_dict(self.messages),
}
)
def clear(self) -> None:
"""Clear session memory from this memory and Firestore."""
self.messages = []
if self._document:
self._document.delete()
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_message_histories/cassandra.py | """Cassandra-based chat message history, based on cassIO."""
from __future__ import annotations
import json
import uuid
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Sequence
from langchain_community.utilities.cassandra import SetupMode
if TYPE_CHECKING:
from cassandra.cluster import Session
from cassio.table.table_types import RowType
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.messages import (
BaseMessage,
message_to_dict,
messages_from_dict,
)
DEFAULT_TABLE_NAME = "message_store"
DEFAULT_TTL_SECONDS = None
def _rows_to_messages(rows: Iterable[RowType]) -> List[BaseMessage]:
message_blobs = [row["body_blob"] for row in rows][::-1]
items = [json.loads(message_blob) for message_blob in message_blobs]
messages = messages_from_dict(items)
return messages
class CassandraChatMessageHistory(BaseChatMessageHistory):
"""Chat message history that is backed by Cassandra."""
def __init__(
self,
session_id: str,
session: Optional[Session] = None,
keyspace: Optional[str] = None,
table_name: str = DEFAULT_TABLE_NAME,
ttl_seconds: Optional[int] = DEFAULT_TTL_SECONDS,
*,
setup_mode: SetupMode = SetupMode.SYNC,
) -> None:
"""
Initialize a new instance of CassandraChatMessageHistory.
Args:
session_id: arbitrary key that is used to store the messages
of a single chat session.
session: Cassandra driver session.
If not provided, it is resolved from cassio.
keyspace: Cassandra key space. If not provided, it is resolved from cassio.
table_name: name of the table to use.
ttl_seconds: time-to-live (seconds) for automatic expiration
of stored entries. None (default) for no expiration.
setup_mode: mode used to create the Cassandra table (SYNC, ASYNC or OFF).
"""
try:
from cassio.table import ClusteredCassandraTable
except (ImportError, ModuleNotFoundError):
raise ImportError(
"Could not import cassio python package. "
"Please install it with `pip install cassio`."
)
self.session_id = session_id
self.ttl_seconds = ttl_seconds
kwargs: Dict[str, Any] = {}
if setup_mode == SetupMode.ASYNC:
kwargs["async_setup"] = True
self.table = ClusteredCassandraTable(
session=session,
keyspace=keyspace,
table=table_name,
ttl_seconds=ttl_seconds,
primary_key_type=["TEXT", "TIMEUUID"],
ordering_in_partition="DESC",
skip_provisioning=setup_mode == SetupMode.OFF,
**kwargs,
)
@property
def messages(self) -> List[BaseMessage]: # type: ignore
"""Retrieve all session messages from DB"""
# The latest are returned, in chronological order
rows = self.table.get_partition(
partition_id=self.session_id,
)
return _rows_to_messages(rows)
async def aget_messages(self) -> List[BaseMessage]:
"""Retrieve all session messages from DB"""
# The latest are returned, in chronological order
rows = await self.table.aget_partition(
partition_id=self.session_id,
)
return _rows_to_messages(rows)
def add_message(self, message: BaseMessage) -> None:
"""Write a message to the table
Args:
message: A message to write.
"""
this_row_id = uuid.uuid1()
self.table.put(
partition_id=self.session_id,
row_id=this_row_id,
body_blob=json.dumps(message_to_dict(message)),
ttl_seconds=self.ttl_seconds,
)
async def aadd_messages(self, messages: Sequence[BaseMessage]) -> None:
for message in messages:
this_row_id = uuid.uuid1()
await self.table.aput(
partition_id=self.session_id,
row_id=this_row_id,
body_blob=json.dumps(message_to_dict(message)),
ttl_seconds=self.ttl_seconds,
)
def clear(self) -> None:
"""Clear session memory from DB"""
self.table.delete_partition(self.session_id)
async def aclear(self) -> None:
"""Clear session memory from DB"""
await self.table.adelete_partition(self.session_id)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_message_histories/cosmos_db.py | """Azure CosmosDB Memory History."""
from __future__ import annotations
import logging
from types import TracebackType
from typing import TYPE_CHECKING, Any, List, Optional, Type
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.messages import (
BaseMessage,
messages_from_dict,
messages_to_dict,
)
logger = logging.getLogger(__name__)
if TYPE_CHECKING:
from azure.cosmos import ContainerProxy
class CosmosDBChatMessageHistory(BaseChatMessageHistory):
"""Chat message history backed by Azure CosmosDB."""
def __init__(
self,
cosmos_endpoint: str,
cosmos_database: str,
cosmos_container: str,
session_id: str,
user_id: str,
credential: Any = None,
connection_string: Optional[str] = None,
ttl: Optional[int] = None,
cosmos_client_kwargs: Optional[dict] = None,
):
"""
Initializes a new instance of the CosmosDBChatMessageHistory class.
Make sure to call prepare_cosmos or use the context manager to make
sure your database is ready.
Either a credential or a connection string must be provided.
:param cosmos_endpoint: The connection endpoint for the Azure Cosmos DB account.
:param cosmos_database: The name of the database to use.
:param cosmos_container: The name of the container to use.
:param session_id: The session ID to use, can be overwritten while loading.
:param user_id: The user ID to use, can be overwritten while loading.
:param credential: The credential to use to authenticate to Azure Cosmos DB.
:param connection_string: The connection string to use to authenticate.
:param ttl: The time to live (in seconds) to use for documents in the container.
:param cosmos_client_kwargs: Additional kwargs to pass to the CosmosClient.
"""
self.cosmos_endpoint = cosmos_endpoint
self.cosmos_database = cosmos_database
self.cosmos_container = cosmos_container
self.credential = credential
self.conn_string = connection_string
self.session_id = session_id
self.user_id = user_id
self.ttl = ttl
self.messages: List[BaseMessage] = []
try:
from azure.cosmos import ( # pylint: disable=import-outside-toplevel
CosmosClient,
)
except ImportError as exc:
raise ImportError(
"You must install the azure-cosmos package to use the CosmosDBChatMessageHistory." # noqa: E501
"Please install it with `pip install azure-cosmos`."
) from exc
if self.credential:
self._client = CosmosClient(
url=self.cosmos_endpoint,
credential=self.credential,
**cosmos_client_kwargs or {},
)
elif self.conn_string:
self._client = CosmosClient.from_connection_string(
conn_str=self.conn_string,
**cosmos_client_kwargs or {},
)
else:
raise ValueError("Either a connection string or a credential must be set.")
self._container: Optional[ContainerProxy] = None
def prepare_cosmos(self) -> None:
"""Prepare the CosmosDB client.
Use this function or the context manager to make sure your database is ready.
"""
try:
from azure.cosmos import ( # pylint: disable=import-outside-toplevel
PartitionKey,
)
except ImportError as exc:
raise ImportError(
"You must install the azure-cosmos package to use the CosmosDBChatMessageHistory." # noqa: E501
"Please install it with `pip install azure-cosmos`."
) from exc
database = self._client.create_database_if_not_exists(self.cosmos_database)
self._container = database.create_container_if_not_exists(
self.cosmos_container,
partition_key=PartitionKey("/user_id"),
default_ttl=self.ttl,
)
self.load_messages()
def __enter__(self) -> "CosmosDBChatMessageHistory":
"""Context manager entry point."""
self._client.__enter__()
self.prepare_cosmos()
return self
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
traceback: Optional[TracebackType],
) -> None:
"""Context manager exit"""
self.upsert_messages()
self._client.__exit__(exc_type, exc_val, traceback)
def load_messages(self) -> None:
"""Retrieve the messages from Cosmos"""
if not self._container:
raise ValueError("Container not initialized")
try:
from azure.cosmos.exceptions import ( # pylint: disable=import-outside-toplevel
CosmosHttpResponseError,
)
except ImportError as exc:
raise ImportError(
"You must install the azure-cosmos package to use the CosmosDBChatMessageHistory." # noqa: E501
"Please install it with `pip install azure-cosmos`."
) from exc
try:
item = self._container.read_item(
item=self.session_id, partition_key=self.user_id
)
except CosmosHttpResponseError:
logger.info("no session found")
return
if "messages" in item and len(item["messages"]) > 0:
self.messages = messages_from_dict(item["messages"])
def add_message(self, message: BaseMessage) -> None:
"""Add a self-created message to the store"""
self.messages.append(message)
self.upsert_messages()
def upsert_messages(self) -> None:
"""Update the cosmosdb item."""
if not self._container:
raise ValueError("Container not initialized")
self._container.upsert_item(
body={
"id": self.session_id,
"user_id": self.user_id,
"messages": messages_to_dict(self.messages),
}
)
def clear(self) -> None:
"""Clear session memory from this memory and cosmos."""
self.messages = []
if self._container:
self._container.delete_item(
item=self.session_id, partition_key=self.user_id
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_message_histories/in_memory.py | from langchain_core.chat_history import InMemoryChatMessageHistory as ChatMessageHistory
__all__ = [
"ChatMessageHistory",
]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_message_histories/tidb.py | import json
import logging
from datetime import datetime
from typing import List, Optional
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.messages import BaseMessage, message_to_dict, messages_from_dict
from sqlalchemy import create_engine, text
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.orm import sessionmaker
logger = logging.getLogger(__name__)
class TiDBChatMessageHistory(BaseChatMessageHistory):
"""
Represents a chat message history stored in a TiDB database.
"""
def __init__(
self,
session_id: str,
connection_string: str,
table_name: str = "langchain_message_store",
earliest_time: Optional[datetime] = None,
):
"""
Initializes a new instance of the TiDBChatMessageHistory class.
Args:
session_id (str): The ID of the chat session.
connection_string (str): The connection string for the TiDB database.
format: mysql+pymysql://<host>:<PASSWORD>@<host>:4000/<db>?ssl_ca=/etc/ssl/cert.pem&ssl_verify_cert=true&ssl_verify_identity=true
table_name (str, optional): the table name to store the chat messages.
Defaults to "langchain_message_store".
earliest_time (Optional[datetime], optional): The earliest time to retrieve messages from.
Defaults to None.
""" # noqa
self.session_id = session_id
self.table_name = table_name
self.earliest_time = earliest_time
self.cache: List = []
# Set up SQLAlchemy engine and session
self.engine = create_engine(connection_string)
Session = sessionmaker(bind=self.engine)
self.session = Session()
self._create_table_if_not_exists()
self._load_messages_to_cache()
def _create_table_if_not_exists(self) -> None:
"""
Creates a table if it does not already exist in the database.
"""
create_table_query = text(
f"""
CREATE TABLE IF NOT EXISTS {self.table_name} (
id INT AUTO_INCREMENT PRIMARY KEY,
session_id VARCHAR(255) NOT NULL,
message JSON NOT NULL,
create_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
INDEX session_idx (session_id)
);"""
)
try:
self.session.execute(create_table_query)
self.session.commit()
except SQLAlchemyError as e:
logger.error(f"Error creating table: {e}")
self.session.rollback()
def _load_messages_to_cache(self) -> None:
"""
Loads messages from the database into the cache.
This method retrieves messages from the database table. The retrieved messages
are then stored in the cache for faster access.
Raises:
SQLAlchemyError: If there is an error executing the database query.
"""
time_condition = (
f"AND create_time >= '{self.earliest_time}'" if self.earliest_time else ""
)
query = text(
f"""
SELECT message FROM {self.table_name}
WHERE session_id = :session_id {time_condition}
ORDER BY id;
"""
)
try:
result = self.session.execute(query, {"session_id": self.session_id})
for record in result.fetchall():
message_dict = json.loads(record[0])
self.cache.append(messages_from_dict([message_dict])[0])
except SQLAlchemyError as e:
logger.error(f"Error loading messages to cache: {e}")
@property
def messages(self) -> List[BaseMessage]: # type: ignore[override]
"""returns all messages"""
if len(self.cache) == 0:
self.reload_cache()
return self.cache
def add_message(self, message: BaseMessage) -> None:
"""adds a message to the database and cache"""
query = text(
f"INSERT INTO {self.table_name} (session_id, message) VALUES (:session_id, :message);" # noqa
)
try:
self.session.execute(
query,
{
"session_id": self.session_id,
"message": json.dumps(message_to_dict(message)),
},
)
self.session.commit()
self.cache.append(message)
except SQLAlchemyError as e:
logger.error(f"Error adding message: {e}")
self.session.rollback()
def clear(self) -> None:
"""clears all messages"""
query = text(f"DELETE FROM {self.table_name} WHERE session_id = :session_id;")
try:
self.session.execute(query, {"session_id": self.session_id})
self.session.commit()
self.cache.clear()
except SQLAlchemyError as e:
logger.error(f"Error clearing messages: {e}")
self.session.rollback()
def reload_cache(self) -> None:
"""reloads messages from database to cache"""
self.cache.clear()
self._load_messages_to_cache()
def __del__(self) -> None:
"""closes the session"""
self.session.close()
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_message_histories/upstash_redis.py | import json
import logging
from typing import List, Optional
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.messages import (
BaseMessage,
message_to_dict,
messages_from_dict,
)
logger = logging.getLogger(__name__)
class UpstashRedisChatMessageHistory(BaseChatMessageHistory):
"""Chat message history stored in an Upstash Redis database."""
def __init__(
self,
session_id: str,
url: str = "",
token: str = "",
key_prefix: str = "message_store:",
ttl: Optional[int] = None,
):
try:
from upstash_redis import Redis
except ImportError:
raise ImportError(
"Could not import upstash redis python package. "
"Please install it with `pip install upstash_redis`."
)
if url == "" or token == "":
raise ValueError(
"UPSTASH_REDIS_REST_URL and UPSTASH_REDIS_REST_TOKEN are needed."
)
try:
self.redis_client = Redis(url=url, token=token)
except Exception:
logger.error("Upstash Redis instance could not be initiated.")
self.session_id = session_id
self.key_prefix = key_prefix
self.ttl = ttl
@property
def key(self) -> str:
"""Construct the record key to use"""
return self.key_prefix + self.session_id
@property
def messages(self) -> List[BaseMessage]: # type: ignore
"""Retrieve the messages from Upstash Redis"""
_items = self.redis_client.lrange(self.key, 0, -1)
items = [json.loads(m) for m in _items[::-1]]
messages = messages_from_dict(items)
return messages
def add_message(self, message: BaseMessage) -> None:
"""Append the message to the record in Upstash Redis"""
self.redis_client.lpush(self.key, json.dumps(message_to_dict(message)))
if self.ttl:
self.redis_client.expire(self.key, self.ttl)
def clear(self) -> None:
"""Clear session memory from Upstash Redis"""
self.redis_client.delete(self.key)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_message_histories/dynamodb.py | from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Dict, List, Optional
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.messages import (
BaseMessage,
message_to_dict,
messages_from_dict,
messages_to_dict,
)
if TYPE_CHECKING:
from boto3.session import Session
logger = logging.getLogger(__name__)
class DynamoDBChatMessageHistory(BaseChatMessageHistory):
"""Chat message history that stores history in AWS DynamoDB.
This class expects that a DynamoDB table exists with name `table_name`
Args:
table_name: name of the DynamoDB table
session_id: arbitrary key that is used to store the messages
of a single chat session.
endpoint_url: URL of the AWS endpoint to connect to. This argument
is optional and useful for test purposes, like using Localstack.
If you plan to use AWS cloud service, you normally don't have to
worry about setting the endpoint_url.
primary_key_name: name of the primary key of the DynamoDB table. This argument
is optional, defaulting to "SessionId".
key: an optional dictionary with a custom primary and secondary key.
This argument is optional, but useful when using composite dynamodb keys, or
isolating records based off of application details such as a user id.
This may also contain global and local secondary index keys.
kms_key_id: an optional AWS KMS Key ID, AWS KMS Key ARN, or AWS KMS Alias for
client-side encryption
ttl: Optional Time-to-live (TTL) in seconds. Allows you to define a per-item
expiration timestamp that indicates when an item can be deleted from the
table. DynamoDB handles deletion of expired items without consuming
write throughput. To enable this feature on the table, follow the
[AWS DynamoDB documentation](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/time-to-live-ttl-how-to.html)
history_size: Maximum number of messages to store. If None then there is no
limit. If not None then only the latest `history_size` messages are stored.
history_messages_key: Key for the chat history where the messages
are stored and updated
"""
def __init__(
self,
table_name: str,
session_id: str,
endpoint_url: Optional[str] = None,
primary_key_name: str = "SessionId",
key: Optional[Dict[str, str]] = None,
boto3_session: Optional[Session] = None,
kms_key_id: Optional[str] = None,
ttl: Optional[int] = None,
ttl_key_name: str = "expireAt",
history_size: Optional[int] = None,
history_messages_key: Optional[str] = "History",
):
if boto3_session:
client = boto3_session.resource("dynamodb", endpoint_url=endpoint_url)
else:
try:
import boto3
except ImportError as e:
raise ImportError(
"Unable to import boto3, please install with `pip install boto3`."
) from e
if endpoint_url:
client = boto3.resource("dynamodb", endpoint_url=endpoint_url)
else:
client = boto3.resource("dynamodb")
self.table = client.Table(table_name)
self.session_id = session_id
self.key: Dict = key or {primary_key_name: session_id}
self.ttl = ttl
self.ttl_key_name = ttl_key_name
self.history_size = history_size
self.history_messages_key = history_messages_key
if kms_key_id:
try:
from dynamodb_encryption_sdk.encrypted.table import EncryptedTable
from dynamodb_encryption_sdk.identifiers import CryptoAction
from dynamodb_encryption_sdk.material_providers.aws_kms import (
AwsKmsCryptographicMaterialsProvider,
)
from dynamodb_encryption_sdk.structures import AttributeActions
except ImportError as e:
raise ImportError(
"Unable to import dynamodb_encryption_sdk, please install with "
"`pip install dynamodb-encryption-sdk`."
) from e
actions = AttributeActions(
default_action=CryptoAction.DO_NOTHING,
attribute_actions={
self.history_messages_key: CryptoAction.ENCRYPT_AND_SIGN
},
)
aws_kms_cmp = AwsKmsCryptographicMaterialsProvider(key_id=kms_key_id)
self.table = EncryptedTable(
table=self.table,
materials_provider=aws_kms_cmp,
attribute_actions=actions,
auto_refresh_table_indexes=False,
)
@property
def messages(self) -> List[BaseMessage]:
"""Retrieve the messages from DynamoDB"""
try:
from botocore.exceptions import ClientError
except ImportError as e:
raise ImportError(
"Unable to import botocore, please install with `pip install botocore`."
) from e
response = None
try:
response = self.table.get_item(Key=self.key)
except ClientError as error:
if error.response["Error"]["Code"] == "ResourceNotFoundException":
logger.warning("No record found with session id: %s", self.session_id)
else:
logger.error(error)
if response and "Item" in response:
items = response["Item"][self.history_messages_key]
else:
items = []
messages = messages_from_dict(items)
return messages
@messages.setter
def messages(self, messages: List[BaseMessage]) -> None:
raise NotImplementedError(
"Direct assignment to 'messages' is not allowed."
" Use the 'add_messages' instead."
)
def add_message(self, message: BaseMessage) -> None:
"""Append the message to the record in DynamoDB"""
try:
from botocore.exceptions import ClientError
except ImportError as e:
raise ImportError(
"Unable to import botocore, please install with `pip install botocore`."
) from e
messages = messages_to_dict(self.messages)
_message = message_to_dict(message)
messages.append(_message)
if self.history_size:
messages = messages[-self.history_size :]
try:
if self.ttl:
import time
expireAt = int(time.time()) + self.ttl
self.table.put_item(
Item={
**self.key,
self.history_messages_key: messages,
self.ttl_key_name: expireAt,
}
)
else:
self.table.put_item(
Item={**self.key, self.history_messages_key: messages}
)
except ClientError as err:
logger.error(err)
def clear(self) -> None:
"""Clear session memory from DynamoDB"""
try:
from botocore.exceptions import ClientError
except ImportError as e:
raise ImportError(
"Unable to import botocore, please install with `pip install botocore`."
) from e
try:
self.table.delete_item(Key=self.key)
except ClientError as err:
logger.error(err)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_message_histories/sql.py | import contextlib
import json
import logging
from abc import ABC, abstractmethod
from typing import (
Any,
AsyncGenerator,
Dict,
Generator,
List,
Optional,
Sequence,
Union,
cast,
)
from langchain_core._api import deprecated, warn_deprecated
from sqlalchemy import Column, Integer, Text, delete, select
try:
from sqlalchemy.orm import declarative_base
except ImportError:
from sqlalchemy.ext.declarative import declarative_base
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.messages import (
BaseMessage,
message_to_dict,
messages_from_dict,
)
from sqlalchemy import create_engine
from sqlalchemy.engine import Engine
from sqlalchemy.ext.asyncio import (
AsyncEngine,
AsyncSession,
create_async_engine,
)
from sqlalchemy.orm import (
Session as SQLSession,
)
from sqlalchemy.orm import (
declarative_base,
scoped_session,
sessionmaker,
)
try:
from sqlalchemy.ext.asyncio import async_sessionmaker
except ImportError:
# dummy for sqlalchemy < 2
async_sessionmaker = type("async_sessionmaker", (type,), {}) # type: ignore
logger = logging.getLogger(__name__)
class BaseMessageConverter(ABC):
"""Convert BaseMessage to the SQLAlchemy model."""
@abstractmethod
def from_sql_model(self, sql_message: Any) -> BaseMessage:
"""Convert a SQLAlchemy model to a BaseMessage instance."""
raise NotImplementedError
@abstractmethod
def to_sql_model(self, message: BaseMessage, session_id: str) -> Any:
"""Convert a BaseMessage instance to a SQLAlchemy model."""
raise NotImplementedError
@abstractmethod
def get_sql_model_class(self) -> Any:
"""Get the SQLAlchemy model class."""
raise NotImplementedError
def create_message_model(table_name: str, DynamicBase: Any) -> Any:
"""
Create a message model for a given table name.
Args:
table_name: The name of the table to use.
DynamicBase: The base class to use for the model.
Returns:
The model class.
"""
# Model declared inside a function to have a dynamic table name.
class Message(DynamicBase): # type: ignore[valid-type, misc]
__tablename__ = table_name
id = Column(Integer, primary_key=True)
session_id = Column(Text)
message = Column(Text)
return Message
class DefaultMessageConverter(BaseMessageConverter):
"""The default message converter for SQLChatMessageHistory."""
def __init__(self, table_name: str):
self.model_class = create_message_model(table_name, declarative_base())
def from_sql_model(self, sql_message: Any) -> BaseMessage:
return messages_from_dict([json.loads(sql_message.message)])[0]
def to_sql_model(self, message: BaseMessage, session_id: str) -> Any:
return self.model_class(
session_id=session_id, message=json.dumps(message_to_dict(message))
)
def get_sql_model_class(self) -> Any:
return self.model_class
DBConnection = Union[AsyncEngine, Engine, str]
_warned_once_already = False
class SQLChatMessageHistory(BaseChatMessageHistory):
"""Chat message history stored in an SQL database.
Example:
.. code-block:: python
from langchain_core.messages import HumanMessage
from langchain_community.chat_message_histories import SQLChatMessageHistory
# create sync sql message history by connection_string
message_history = SQLChatMessageHistory(
session_id='foo', connection_string='sqlite///:memory.db'
)
message_history.add_message(HumanMessage("hello"))
message_history.message
# create async sql message history using aiosqlite
# from sqlalchemy.ext.asyncio import create_async_engine
#
# async_engine = create_async_engine("sqlite+aiosqlite:///memory.db")
# async_message_history = SQLChatMessageHistory(
# session_id='foo', connection=async_engine,
# )
# await async_message_history.aadd_message(HumanMessage("hello"))
# await async_message_history.aget_messages()
"""
@property
@deprecated("0.2.2", removal="1.0", alternative="session_maker")
def Session(self) -> Union[scoped_session, async_sessionmaker]:
return self.session_maker
def __init__(
self,
session_id: str,
connection_string: Optional[str] = None,
table_name: str = "message_store",
session_id_field_name: str = "session_id",
custom_message_converter: Optional[BaseMessageConverter] = None,
connection: Union[None, DBConnection] = None,
engine_args: Optional[Dict[str, Any]] = None,
async_mode: Optional[bool] = None, # Use only if connection is a string
):
"""Initialize with a SQLChatMessageHistory instance.
Args:
session_id: Indicates the id of the same session.
connection_string: String parameter configuration for connecting
to the database.
table_name: Table name used to save data.
session_id_field_name: The name of field of `session_id`.
custom_message_converter: Custom message converter for converting
database data and `BaseMessage`
connection: Database connection object, which can be a string containing
connection configuration, Engine object or AsyncEngine object.
engine_args: Additional configuration for creating database engines.
async_mode: Whether it is an asynchronous connection.
"""
assert not (
connection_string and connection
), "connection_string and connection are mutually exclusive"
if connection_string:
global _warned_once_already
if not _warned_once_already:
warn_deprecated(
since="0.2.2",
removal="1.0",
name="connection_string",
alternative="connection",
)
_warned_once_already = True
connection = connection_string
self.connection_string = connection_string
if isinstance(connection, str):
self.async_mode = async_mode
if async_mode:
self.async_engine = create_async_engine(
connection, **(engine_args or {})
)
else:
self.engine = create_engine(url=connection, **(engine_args or {}))
elif isinstance(connection, Engine):
self.async_mode = False
self.engine = connection
elif isinstance(connection, AsyncEngine):
self.async_mode = True
self.async_engine = connection
else:
raise ValueError(
"connection should be a connection string or an instance of "
"sqlalchemy.engine.Engine or sqlalchemy.ext.asyncio.engine.AsyncEngine"
)
# To be consistent with others SQL implementations, rename to session_maker
self.session_maker: Union[scoped_session, async_sessionmaker]
if self.async_mode:
self.session_maker = async_sessionmaker(bind=self.async_engine)
else:
self.session_maker = scoped_session(sessionmaker(bind=self.engine))
self.session_id_field_name = session_id_field_name
self.converter = custom_message_converter or DefaultMessageConverter(table_name)
self.sql_model_class = self.converter.get_sql_model_class()
if not hasattr(self.sql_model_class, session_id_field_name):
raise ValueError("SQL model class must have session_id column")
self._table_created = False
if not self.async_mode:
self._create_table_if_not_exists()
self.session_id = session_id
def _create_table_if_not_exists(self) -> None:
self.sql_model_class.metadata.create_all(self.engine)
self._table_created = True
async def _acreate_table_if_not_exists(self) -> None:
if not self._table_created:
assert self.async_mode, "This method must be called with async_mode"
async with self.async_engine.begin() as conn:
await conn.run_sync(self.sql_model_class.metadata.create_all)
self._table_created = True
@property
def messages(self) -> List[BaseMessage]: # type: ignore
"""Retrieve all messages from db"""
with self._make_sync_session() as session:
result = (
session.query(self.sql_model_class)
.where(
getattr(self.sql_model_class, self.session_id_field_name)
== self.session_id
)
.order_by(self.sql_model_class.id.asc())
)
messages = []
for record in result:
messages.append(self.converter.from_sql_model(record))
return messages
def get_messages(self) -> List[BaseMessage]:
return self.messages
async def aget_messages(self) -> List[BaseMessage]:
"""Retrieve all messages from db"""
await self._acreate_table_if_not_exists()
async with self._make_async_session() as session:
stmt = (
select(self.sql_model_class)
.where(
getattr(self.sql_model_class, self.session_id_field_name)
== self.session_id
)
.order_by(self.sql_model_class.id.asc())
)
result = await session.execute(stmt)
messages = []
for record in result.scalars():
messages.append(self.converter.from_sql_model(record))
return messages
def add_message(self, message: BaseMessage) -> None:
"""Append the message to the record in db"""
with self._make_sync_session() as session:
session.add(self.converter.to_sql_model(message, self.session_id))
session.commit()
async def aadd_message(self, message: BaseMessage) -> None:
"""Add a Message object to the store.
Args:
message: A BaseMessage object to store.
"""
await self._acreate_table_if_not_exists()
async with self._make_async_session() as session:
session.add(self.converter.to_sql_model(message, self.session_id))
await session.commit()
def add_messages(self, messages: Sequence[BaseMessage]) -> None:
# Add all messages in one transaction
with self._make_sync_session() as session:
for message in messages:
session.add(self.converter.to_sql_model(message, self.session_id))
session.commit()
async def aadd_messages(self, messages: Sequence[BaseMessage]) -> None:
# Add all messages in one transaction
await self._acreate_table_if_not_exists()
async with self.session_maker() as session:
for message in messages:
session.add(self.converter.to_sql_model(message, self.session_id))
await session.commit()
def clear(self) -> None:
"""Clear session memory from db"""
with self._make_sync_session() as session:
session.query(self.sql_model_class).filter(
getattr(self.sql_model_class, self.session_id_field_name)
== self.session_id
).delete()
session.commit()
async def aclear(self) -> None:
"""Clear session memory from db"""
await self._acreate_table_if_not_exists()
async with self._make_async_session() as session:
stmt = delete(self.sql_model_class).filter(
getattr(self.sql_model_class, self.session_id_field_name)
== self.session_id
)
await session.execute(stmt)
await session.commit()
@contextlib.contextmanager
def _make_sync_session(self) -> Generator[SQLSession, None, None]:
"""Make an async session."""
if self.async_mode:
raise ValueError(
"Attempting to use a sync method in when async mode is turned on. "
"Please use the corresponding async method instead."
)
with self.session_maker() as session:
yield cast(SQLSession, session)
@contextlib.asynccontextmanager
async def _make_async_session(self) -> AsyncGenerator[AsyncSession, None]:
"""Make an async session."""
if not self.async_mode:
raise ValueError(
"Attempting to use an async method in when sync mode is turned on. "
"Please use the corresponding async method instead."
)
async with self.session_maker() as session:
yield cast(AsyncSession, session)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_message_histories/neo4j.py | from typing import List, Optional, Union
from langchain_core._api.deprecation import deprecated
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.messages import BaseMessage, messages_from_dict
from langchain_core.utils import get_from_dict_or_env
from langchain_community.graphs import Neo4jGraph
@deprecated(
since="0.3.8",
removal="1.0",
alternative_import="langchain_neo4j.Neo4jChatMessageHistory",
)
class Neo4jChatMessageHistory(BaseChatMessageHistory):
"""Chat message history stored in a Neo4j database."""
def __init__(
self,
session_id: Union[str, int],
url: Optional[str] = None,
username: Optional[str] = None,
password: Optional[str] = None,
database: str = "neo4j",
node_label: str = "Session",
window: int = 3,
*,
graph: Optional[Neo4jGraph] = None,
):
try:
import neo4j
except ImportError:
raise ImportError(
"Could not import neo4j python package. "
"Please install it with `pip install neo4j`."
)
# Make sure session id is not null
if not session_id:
raise ValueError("Please ensure that the session_id parameter is provided")
# Graph object takes precedent over env or input params
if graph:
self._driver = graph._driver
self._database = graph._database
else:
# Handle if the credentials are environment variables
url = get_from_dict_or_env({"url": url}, "url", "NEO4J_URI")
username = get_from_dict_or_env(
{"username": username}, "username", "NEO4J_USERNAME"
)
password = get_from_dict_or_env(
{"password": password}, "password", "NEO4J_PASSWORD"
)
database = get_from_dict_or_env(
{"database": database}, "database", "NEO4J_DATABASE", "neo4j"
)
self._driver = neo4j.GraphDatabase.driver(url, auth=(username, password))
self._database = database
# Verify connection
try:
self._driver.verify_connectivity()
except neo4j.exceptions.ServiceUnavailable:
raise ValueError(
"Could not connect to Neo4j database. "
"Please ensure that the url is correct"
)
except neo4j.exceptions.AuthError:
raise ValueError(
"Could not connect to Neo4j database. "
"Please ensure that the username and password are correct"
)
self._session_id = session_id
self._node_label = node_label
self._window = window
# Create session node
self._driver.execute_query(
f"MERGE (s:`{self._node_label}` {{id:$session_id}})",
{"session_id": self._session_id},
).summary
@property
def messages(self) -> List[BaseMessage]:
"""Retrieve the messages from Neo4j"""
query = (
f"MATCH (s:`{self._node_label}`)-[:LAST_MESSAGE]->(last_message) "
"WHERE s.id = $session_id MATCH p=(last_message)<-[:NEXT*0.."
f"{self._window*2}]-() WITH p, length(p) AS length "
"ORDER BY length DESC LIMIT 1 UNWIND reverse(nodes(p)) AS node "
"RETURN {data:{content: node.content}, type:node.type} AS result"
)
records, _, _ = self._driver.execute_query(
query, {"session_id": self._session_id}
)
messages = messages_from_dict([el["result"] for el in records])
return messages
@messages.setter
def messages(self, messages: List[BaseMessage]) -> None:
raise NotImplementedError(
"Direct assignment to 'messages' is not allowed."
" Use the 'add_messages' instead."
)
def add_message(self, message: BaseMessage) -> None:
"""Append the message to the record in Neo4j"""
query = (
f"MATCH (s:`{self._node_label}`) WHERE s.id = $session_id "
"OPTIONAL MATCH (s)-[lm:LAST_MESSAGE]->(last_message) "
"CREATE (s)-[:LAST_MESSAGE]->(new:Message) "
"SET new += {type:$type, content:$content} "
"WITH new, lm, last_message WHERE last_message IS NOT NULL "
"CREATE (last_message)-[:NEXT]->(new) "
"DELETE lm"
)
self._driver.execute_query(
query,
{
"type": message.type,
"content": message.content,
"session_id": self._session_id,
},
).summary
def clear(self) -> None:
"""Clear session memory from Neo4j"""
query = (
f"MATCH (s:`{self._node_label}`)-[:LAST_MESSAGE]->(last_message) "
"WHERE s.id = $session_id MATCH p=(last_message)<-[:NEXT]-() "
"WITH p, length(p) AS length ORDER BY length DESC LIMIT 1 "
"UNWIND nodes(p) as node DETACH DELETE node;"
)
self._driver.execute_query(query, {"session_id": self._session_id}).summary
def __del__(self) -> None:
if self._driver:
self._driver.close()
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_message_histories/rocksetdb.py | from datetime import datetime
from time import sleep
from typing import Any, Callable, List, Union
from uuid import uuid4
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.messages import (
BaseMessage,
message_to_dict,
messages_from_dict,
)
class RocksetChatMessageHistory(BaseChatMessageHistory):
"""Uses Rockset to store chat messages.
To use, ensure that the `rockset` python package installed.
Example:
.. code-block:: python
from langchain_community.chat_message_histories import (
RocksetChatMessageHistory
)
from rockset import RocksetClient
history = RocksetChatMessageHistory(
session_id="MySession",
client=RocksetClient(),
collection="langchain_demo",
sync=True
)
history.add_user_message("hi!")
history.add_ai_message("whats up?")
print(history.messages) # noqa: T201
"""
# You should set these values based on your VI.
# These values are configured for the typical
# free VI. Read more about VIs here:
# https://rockset.com/docs/instances
SLEEP_INTERVAL_MS: int = 5
ADD_TIMEOUT_MS: int = 5000
CREATE_TIMEOUT_MS: int = 20000
def _wait_until(self, method: Callable, timeout: int, **method_params: Any) -> None:
"""Sleeps until meth() evaluates to true. Passes kwargs into
meth.
"""
start = datetime.now()
while not method(**method_params):
curr = datetime.now()
if (curr - start).total_seconds() * 1000 > timeout:
raise TimeoutError(f"{method} timed out at {timeout} ms")
sleep(RocksetChatMessageHistory.SLEEP_INTERVAL_MS / 1000)
def _query(self, query: str, **query_params: Any) -> List[Any]:
"""Executes an SQL statement and returns the result
Args:
- query: The SQL string
- **query_params: Parameters to pass into the query
"""
return self.client.sql(query, params=query_params).results
def _create_collection(self) -> None:
"""Creates a collection for this message history"""
self.client.Collections.create_s3_collection(
name=self.collection, workspace=self.workspace
)
def _collection_exists(self) -> bool:
"""Checks whether a collection exists for this message history"""
try:
self.client.Collections.get(collection=self.collection)
except self.rockset.exceptions.NotFoundException:
return False
return True
def _collection_is_ready(self) -> bool:
"""Checks whether the collection for this message history is ready
to be queried
"""
return (
self.client.Collections.get(collection=self.collection).data.status
== "READY"
)
def _document_exists(self) -> bool:
return (
len(
self._query(
f"""
SELECT 1
FROM {self.location}
WHERE _id=:session_id
LIMIT 1
""",
session_id=self.session_id,
)
)
!= 0
)
def _wait_until_collection_created(self) -> None:
"""Sleeps until the collection for this message history is ready
to be queried
"""
self._wait_until(
lambda: self._collection_is_ready(),
RocksetChatMessageHistory.CREATE_TIMEOUT_MS,
)
def _wait_until_message_added(self, message_id: str) -> None:
"""Sleeps until a message is added to the messages list"""
self._wait_until(
lambda message_id: len(
self._query(
f"""
SELECT *
FROM UNNEST((
SELECT {self.messages_key}
FROM {self.location}
WHERE _id = :session_id
)) AS message
WHERE message.data.additional_kwargs.id = :message_id
LIMIT 1
""",
session_id=self.session_id,
message_id=message_id,
),
)
!= 0,
RocksetChatMessageHistory.ADD_TIMEOUT_MS,
message_id=message_id,
)
def _create_empty_doc(self) -> None:
"""Creates or replaces a document for this message history with no
messages"""
self.client.Documents.add_documents(
collection=self.collection,
workspace=self.workspace,
data=[{"_id": self.session_id, self.messages_key: []}],
)
def __init__(
self,
session_id: str,
client: Any,
collection: str,
workspace: str = "commons",
messages_key: str = "messages",
sync: bool = False,
message_uuid_method: Callable[[], Union[str, int]] = lambda: str(uuid4()),
) -> None:
"""Constructs a new RocksetChatMessageHistory.
Args:
- session_id: The ID of the chat session
- client: The RocksetClient object to use to query
- collection: The name of the collection to use to store chat
messages. If a collection with the given name
does not exist in the workspace, it is created.
- workspace: The workspace containing `collection`. Defaults
to `"commons"`
- messages_key: The DB column containing message history.
Defaults to `"messages"`
- sync: Whether to wait for messages to be added. Defaults
to `False`. NOTE: setting this to `True` will slow
down performance.
- message_uuid_method: The method that generates message IDs.
If set, all messages will have an `id` field within the
`additional_kwargs` property. If this param is not set
and `sync` is `False`, message IDs will not be created.
If this param is not set and `sync` is `True`, the
`uuid.uuid4` method will be used to create message IDs.
"""
try:
import rockset
except ImportError:
raise ImportError(
"Could not import rockset client python package. "
"Please install it with `pip install rockset`."
)
if not isinstance(client, rockset.RocksetClient):
raise ValueError(
f"client should be an instance of rockset.RocksetClient, "
f"got {type(client)}"
)
self.session_id = session_id
self.client = client
self.collection = collection
self.workspace = workspace
self.location = f'"{self.workspace}"."{self.collection}"'
self.rockset = rockset
self.messages_key = messages_key
self.message_uuid_method = message_uuid_method
self.sync = sync
try:
self.client.set_application("langchain")
except AttributeError:
# ignore
pass
if not self._collection_exists():
self._create_collection()
self._wait_until_collection_created()
self._create_empty_doc()
elif not self._document_exists():
self._create_empty_doc()
@property
def messages(self) -> List[BaseMessage]: # type: ignore
"""Messages in this chat history."""
return messages_from_dict(
self._query(
f"""
SELECT *
FROM UNNEST ((
SELECT "{self.messages_key}"
FROM {self.location}
WHERE _id = :session_id
))
""",
session_id=self.session_id,
)
)
def add_message(self, message: BaseMessage) -> None:
"""Add a Message object to the history.
Args:
message: A BaseMessage object to store.
"""
if self.sync and "id" not in message.additional_kwargs:
message.additional_kwargs["id"] = self.message_uuid_method()
self.client.Documents.patch_documents(
collection=self.collection,
workspace=self.workspace,
data=[
self.rockset.model.patch_document.PatchDocument(
id=self.session_id,
patch=[
self.rockset.model.patch_operation.PatchOperation(
op="ADD",
path=f"/{self.messages_key}/-",
value=message_to_dict(message),
)
],
)
],
)
if self.sync:
self._wait_until_message_added(message.additional_kwargs["id"])
def clear(self) -> None:
"""Removes all messages from the chat history"""
self._create_empty_doc()
if self.sync:
self._wait_until(
lambda: not self.messages,
RocksetChatMessageHistory.ADD_TIMEOUT_MS,
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_message_histories/zep.py | from __future__ import annotations
import logging
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.messages import (
AIMessage,
BaseMessage,
HumanMessage,
SystemMessage,
)
if TYPE_CHECKING:
from zep_python import Memory, MemorySearchResult, Message, NotFoundError
logger = logging.getLogger(__name__)
class SearchScope(str, Enum):
"""Scope for the document search. Messages or Summaries?"""
messages = "messages"
"""Search chat history messages."""
summary = "summary"
"""Search chat history summaries."""
class SearchType(str, Enum):
"""Enumerator of the types of search to perform."""
similarity = "similarity"
"""Similarity search."""
mmr = "mmr"
"""Maximal Marginal Relevance reranking of similarity search."""
class ZepChatMessageHistory(BaseChatMessageHistory):
"""Chat message history that uses Zep as a backend.
Recommended usage::
# Set up Zep Chat History
zep_chat_history = ZepChatMessageHistory(
session_id=session_id,
url=ZEP_API_URL,
api_key=<your_api_key>,
)
# Use a standard ConversationBufferMemory to encapsulate the Zep chat history
memory = ConversationBufferMemory(
memory_key="chat_history", chat_memory=zep_chat_history
)
Zep provides long-term conversation storage for LLM apps. The server stores,
summarizes, embeds, indexes, and enriches conversational AI chat
histories, and exposes them via simple, low-latency APIs.
For server installation instructions and more, see:
https://docs.getzep.com/deployment/quickstart/
This class is a thin wrapper around the zep-python package. Additional
Zep functionality is exposed via the `zep_summary` and `zep_messages`
properties.
For more information on the zep-python package, see:
https://github.com/getzep/zep-python
"""
def __init__(
self,
session_id: str,
url: str = "http://localhost:8000",
api_key: Optional[str] = None,
) -> None:
try:
from zep_python import ZepClient
except ImportError:
raise ImportError(
"Could not import zep-python package. "
"Please install it with `pip install zep-python`."
)
self.zep_client = ZepClient(base_url=url, api_key=api_key)
self.session_id = session_id
@property
def messages(self) -> List[BaseMessage]: # type: ignore
"""Retrieve messages from Zep memory"""
zep_memory: Optional[Memory] = self._get_memory()
if not zep_memory:
return []
messages: List[BaseMessage] = []
# Extract summary, if present, and messages
if zep_memory.summary:
if len(zep_memory.summary.content) > 0:
messages.append(SystemMessage(content=zep_memory.summary.content))
if zep_memory.messages:
msg: Message
for msg in zep_memory.messages:
metadata: Dict = {
"uuid": msg.uuid,
"created_at": msg.created_at,
"token_count": msg.token_count,
"metadata": msg.metadata,
}
if msg.role == "ai":
messages.append(
AIMessage(content=msg.content, additional_kwargs=metadata)
)
else:
messages.append(
HumanMessage(content=msg.content, additional_kwargs=metadata)
)
return messages
@property
def zep_messages(self) -> List[Message]:
"""Retrieve summary from Zep memory"""
zep_memory: Optional[Memory] = self._get_memory()
if not zep_memory:
return []
return zep_memory.messages
@property
def zep_summary(self) -> Optional[str]:
"""Retrieve summary from Zep memory"""
zep_memory: Optional[Memory] = self._get_memory()
if not zep_memory or not zep_memory.summary:
return None
return zep_memory.summary.content
def _get_memory(self) -> Optional[Memory]:
"""Retrieve memory from Zep"""
from zep_python import NotFoundError
try:
zep_memory: Memory = self.zep_client.memory.get_memory(self.session_id)
except NotFoundError:
logger.warning(
f"Session {self.session_id} not found in Zep. Returning None"
)
return None
return zep_memory
def add_user_message( # type: ignore[override]
self, message: str, metadata: Optional[Dict[str, Any]] = None
) -> None:
"""Convenience method for adding a human message string to the store.
Args:
message: The string contents of a human message.
metadata: Optional metadata to attach to the message.
"""
self.add_message(HumanMessage(content=message), metadata=metadata)
def add_ai_message( # type: ignore[override]
self, message: str, metadata: Optional[Dict[str, Any]] = None
) -> None:
"""Convenience method for adding an AI message string to the store.
Args:
message: The string contents of an AI message.
metadata: Optional metadata to attach to the message.
"""
self.add_message(AIMessage(content=message), metadata=metadata)
def add_message(
self, message: BaseMessage, metadata: Optional[Dict[str, Any]] = None
) -> None:
"""Append the message to the Zep memory history"""
from zep_python import Memory, Message
zep_message = Message(
content=message.content, role=message.type, metadata=metadata
)
zep_memory = Memory(messages=[zep_message])
self.zep_client.memory.add_memory(self.session_id, zep_memory)
def add_messages(self, messages: Sequence[BaseMessage]) -> None:
"""Append the messages to the Zep memory history"""
from zep_python import Memory, Message
zep_messages = [
Message(
content=message.content,
role=message.type,
metadata=message.additional_kwargs.get("metadata", None),
)
for message in messages
]
zep_memory = Memory(messages=zep_messages)
self.zep_client.memory.add_memory(self.session_id, zep_memory)
async def aadd_messages(self, messages: Sequence[BaseMessage]) -> None:
"""Append the messages to the Zep memory history asynchronously"""
from zep_python import Memory, Message
zep_messages = [
Message(
content=message.content,
role=message.type,
metadata=message.additional_kwargs.get("metadata", None),
)
for message in messages
]
zep_memory = Memory(messages=zep_messages)
await self.zep_client.memory.aadd_memory(self.session_id, zep_memory)
def search(
self,
query: str,
metadata: Optional[Dict] = None,
search_scope: SearchScope = SearchScope.messages,
search_type: SearchType = SearchType.similarity,
mmr_lambda: Optional[float] = None,
limit: Optional[int] = None,
) -> List[MemorySearchResult]:
"""Search Zep memory for messages matching the query"""
from zep_python import MemorySearchPayload
payload = MemorySearchPayload(
text=query,
metadata=metadata,
search_scope=search_scope,
search_type=search_type,
mmr_lambda=mmr_lambda,
)
return self.zep_client.memory.search_memory(
self.session_id, payload, limit=limit
)
def clear(self) -> None:
"""Clear session memory from Zep. Note that Zep is long-term storage for memory
and this is not advised unless you have specific data retention requirements.
"""
try:
self.zep_client.memory.delete_memory(self.session_id)
except NotFoundError:
logger.warning(
f"Session {self.session_id} not found in Zep. Skipping delete."
)
async def aclear(self) -> None:
"""Clear session memory from Zep asynchronously.
Note that Zep is long-term storage for memory and this is not advised
unless you have specific data retention requirements.
"""
try:
await self.zep_client.memory.adelete_memory(self.session_id)
except NotFoundError:
logger.warning(
f"Session {self.session_id} not found in Zep. Skipping delete."
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_message_histories/streamlit.py | from typing import List
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.messages import BaseMessage
class StreamlitChatMessageHistory(BaseChatMessageHistory):
"""
Chat message history that stores messages in Streamlit session state.
Args:
key: The key to use in Streamlit session state for storing messages.
"""
def __init__(self, key: str = "langchain_messages"):
try:
import streamlit as st
except ImportError as e:
raise ImportError(
"Unable to import streamlit, please run `pip install streamlit`."
) from e
if key not in st.session_state:
st.session_state[key] = []
self._messages = st.session_state[key]
self._key = key
@property
def messages(self) -> List[BaseMessage]:
"""Retrieve the current list of messages"""
return self._messages
@messages.setter
def messages(self, value: List[BaseMessage]) -> None:
"""Set the messages list with a new value"""
import streamlit as st
st.session_state[self._key] = value
self._messages = st.session_state[self._key]
def add_message(self, message: BaseMessage) -> None:
"""Add a message to the session memory"""
self.messages.append(message)
def clear(self) -> None:
"""Clear session memory"""
self.messages.clear()
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_message_histories/astradb.py | """Astra DB - based chat message history, based on astrapy."""
from __future__ import annotations
import json
import time
from typing import TYPE_CHECKING, List, Optional, Sequence
from langchain_community.utilities.astradb import (
SetupMode,
_AstraDBCollectionEnvironment,
)
if TYPE_CHECKING:
from astrapy.db import AstraDB, AsyncAstraDB
from langchain_core._api.deprecation import deprecated
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.messages import (
BaseMessage,
message_to_dict,
messages_from_dict,
)
DEFAULT_COLLECTION_NAME = "langchain_message_store"
@deprecated(
since="0.0.25",
removal="1.0",
alternative_import="langchain_astradb.AstraDBChatMessageHistory",
)
class AstraDBChatMessageHistory(BaseChatMessageHistory):
def __init__(
self,
*,
session_id: str,
collection_name: str = DEFAULT_COLLECTION_NAME,
token: Optional[str] = None,
api_endpoint: Optional[str] = None,
astra_db_client: Optional[AstraDB] = None,
async_astra_db_client: Optional[AsyncAstraDB] = None,
namespace: Optional[str] = None,
setup_mode: SetupMode = SetupMode.SYNC,
pre_delete_collection: bool = False,
) -> None:
"""Chat message history that stores history in Astra DB.
Args:
session_id: arbitrary key that is used to store the messages
of a single chat session.
collection_name: name of the Astra DB collection to create/use.
token: API token for Astra DB usage.
api_endpoint: full URL to the API endpoint,
such as "https://<DB-ID>-us-east1.apps.astra.datastax.com".
astra_db_client: *alternative to token+api_endpoint*,
you can pass an already-created 'astrapy.db.AstraDB' instance.
async_astra_db_client: *alternative to token+api_endpoint*,
you can pass an already-created 'astrapy.db.AsyncAstraDB' instance.
namespace: namespace (aka keyspace) where the
collection is created. Defaults to the database's "default namespace".
setup_mode: mode used to create the Astra DB collection (SYNC, ASYNC or
OFF).
pre_delete_collection: whether to delete the collection
before creating it. If False and the collection already exists,
the collection will be used as is.
"""
self.astra_env = _AstraDBCollectionEnvironment(
collection_name=collection_name,
token=token,
api_endpoint=api_endpoint,
astra_db_client=astra_db_client,
async_astra_db_client=async_astra_db_client,
namespace=namespace,
setup_mode=setup_mode,
pre_delete_collection=pre_delete_collection,
)
self.collection = self.astra_env.collection
self.async_collection = self.astra_env.async_collection
self.session_id = session_id
self.collection_name = collection_name
@property
def messages(self) -> List[BaseMessage]:
"""Retrieve all session messages from DB"""
self.astra_env.ensure_db_setup()
message_blobs = [
doc["body_blob"]
for doc in sorted(
self.collection.paginated_find(
filter={
"session_id": self.session_id,
},
projection={
"timestamp": 1,
"body_blob": 1,
},
),
key=lambda _doc: _doc["timestamp"],
)
]
items = [json.loads(message_blob) for message_blob in message_blobs]
messages = messages_from_dict(items)
return messages
@messages.setter
def messages(self, messages: List[BaseMessage]) -> None:
raise NotImplementedError("Use add_messages instead")
async def aget_messages(self) -> List[BaseMessage]:
await self.astra_env.aensure_db_setup()
docs = self.async_collection.paginated_find(
filter={
"session_id": self.session_id,
},
projection={
"timestamp": 1,
"body_blob": 1,
},
)
sorted_docs = sorted(
[doc async for doc in docs],
key=lambda _doc: _doc["timestamp"],
)
message_blobs = [doc["body_blob"] for doc in sorted_docs]
items = [json.loads(message_blob) for message_blob in message_blobs]
messages = messages_from_dict(items)
return messages
def add_messages(self, messages: Sequence[BaseMessage]) -> None:
self.astra_env.ensure_db_setup()
docs = [
{
"timestamp": time.time(),
"session_id": self.session_id,
"body_blob": json.dumps(message_to_dict(message)),
}
for message in messages
]
self.collection.chunked_insert_many(docs)
async def aadd_messages(self, messages: Sequence[BaseMessage]) -> None:
await self.astra_env.aensure_db_setup()
docs = [
{
"timestamp": time.time(),
"session_id": self.session_id,
"body_blob": json.dumps(message_to_dict(message)),
}
for message in messages
]
await self.async_collection.chunked_insert_many(docs)
def clear(self) -> None:
self.astra_env.ensure_db_setup()
self.collection.delete_many(filter={"session_id": self.session_id})
async def aclear(self) -> None:
await self.astra_env.aensure_db_setup()
await self.async_collection.delete_many(filter={"session_id": self.session_id})
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_message_histories/momento.py | from __future__ import annotations
import json
from datetime import timedelta
from typing import TYPE_CHECKING, Any, Optional
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.messages import (
BaseMessage,
message_to_dict,
messages_from_dict,
)
from langchain_core.utils import get_from_env
if TYPE_CHECKING:
import momento
def _ensure_cache_exists(cache_client: momento.CacheClient, cache_name: str) -> None:
"""Create cache if it doesn't exist.
Raises:
SdkException: Momento service or network error
Exception: Unexpected response
"""
from momento.responses import CreateCache
create_cache_response = cache_client.create_cache(cache_name)
if isinstance(create_cache_response, CreateCache.Success) or isinstance(
create_cache_response, CreateCache.CacheAlreadyExists
):
return None
elif isinstance(create_cache_response, CreateCache.Error):
raise create_cache_response.inner_exception
else:
raise Exception(f"Unexpected response cache creation: {create_cache_response}")
class MomentoChatMessageHistory(BaseChatMessageHistory):
"""Chat message history cache that uses Momento as a backend.
See https://gomomento.com/"""
def __init__(
self,
session_id: str,
cache_client: momento.CacheClient,
cache_name: str,
*,
key_prefix: str = "message_store:",
ttl: Optional[timedelta] = None,
ensure_cache_exists: bool = True,
):
"""Instantiate a chat message history cache that uses Momento as a backend.
Note: to instantiate the cache client passed to MomentoChatMessageHistory,
you must have a Momento account at https://gomomento.com/.
Args:
session_id (str): The session ID to use for this chat session.
cache_client (CacheClient): The Momento cache client.
cache_name (str): The name of the cache to use to store the messages.
key_prefix (str, optional): The prefix to apply to the cache key.
Defaults to "message_store:".
ttl (Optional[timedelta], optional): The TTL to use for the messages.
Defaults to None, ie the default TTL of the cache will be used.
ensure_cache_exists (bool, optional): Create the cache if it doesn't exist.
Defaults to True.
Raises:
ImportError: Momento python package is not installed.
TypeError: cache_client is not of type momento.CacheClientObject
"""
try:
from momento import CacheClient
from momento.requests import CollectionTtl
except ImportError:
raise ImportError(
"Could not import momento python package. "
"Please install it with `pip install momento`."
)
if not isinstance(cache_client, CacheClient):
raise TypeError("cache_client must be a momento.CacheClient object.")
if ensure_cache_exists:
_ensure_cache_exists(cache_client, cache_name)
self.key = key_prefix + session_id
self.cache_client = cache_client
self.cache_name = cache_name
if ttl is not None:
self.ttl = CollectionTtl.of(ttl)
else:
self.ttl = CollectionTtl.from_cache_ttl()
@classmethod
def from_client_params(
cls,
session_id: str,
cache_name: str,
ttl: timedelta,
*,
configuration: Optional[momento.config.Configuration] = None,
api_key: Optional[str] = None,
auth_token: Optional[str] = None, # for backwards compatibility
**kwargs: Any,
) -> MomentoChatMessageHistory:
"""Construct cache from CacheClient parameters."""
try:
from momento import CacheClient, Configurations, CredentialProvider
except ImportError:
raise ImportError(
"Could not import momento python package. "
"Please install it with `pip install momento`."
)
if configuration is None:
configuration = Configurations.Laptop.v1()
# Try checking `MOMENTO_AUTH_TOKEN` first for backwards compatibility
try:
api_key = auth_token or get_from_env("auth_token", "MOMENTO_AUTH_TOKEN")
except ValueError:
api_key = api_key or get_from_env("api_key", "MOMENTO_API_KEY")
credentials = CredentialProvider.from_string(api_key)
cache_client = CacheClient(configuration, credentials, default_ttl=ttl)
return cls(session_id, cache_client, cache_name, ttl=ttl, **kwargs)
@property
def messages(self) -> list[BaseMessage]: # type: ignore[override]
"""Retrieve the messages from Momento.
Raises:
SdkException: Momento service or network error
Exception: Unexpected response
Returns:
list[BaseMessage]: List of cached messages
"""
from momento.responses import CacheListFetch
fetch_response = self.cache_client.list_fetch(self.cache_name, self.key)
if isinstance(fetch_response, CacheListFetch.Hit):
items = [json.loads(m) for m in fetch_response.value_list_string]
return messages_from_dict(items)
elif isinstance(fetch_response, CacheListFetch.Miss):
return []
elif isinstance(fetch_response, CacheListFetch.Error):
raise fetch_response.inner_exception
else:
raise Exception(f"Unexpected response: {fetch_response}")
def add_message(self, message: BaseMessage) -> None:
"""Store a message in the cache.
Args:
message (BaseMessage): The message object to store.
Raises:
SdkException: Momento service or network error.
Exception: Unexpected response.
"""
from momento.responses import CacheListPushBack
item = json.dumps(message_to_dict(message))
push_response = self.cache_client.list_push_back(
self.cache_name, self.key, item, ttl=self.ttl
)
if isinstance(push_response, CacheListPushBack.Success):
return None
elif isinstance(push_response, CacheListPushBack.Error):
raise push_response.inner_exception
else:
raise Exception(f"Unexpected response: {push_response}")
def clear(self) -> None:
"""Remove the session's messages from the cache.
Raises:
SdkException: Momento service or network error.
Exception: Unexpected response.
"""
from momento.responses import CacheDelete
delete_response = self.cache_client.delete(self.cache_name, self.key)
if isinstance(delete_response, CacheDelete.Success):
return None
elif isinstance(delete_response, CacheDelete.Error):
raise delete_response.inner_exception
else:
raise Exception(f"Unexpected response: {delete_response}")
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_message_histories/__init__.py | """**Chat message history** stores a history of the message interactions in a chat.
**Class hierarchy:**
.. code-block::
BaseChatMessageHistory --> <name>ChatMessageHistory # Examples: FileChatMessageHistory, PostgresChatMessageHistory
**Main helpers:**
.. code-block::
AIMessage, HumanMessage, BaseMessage
""" # noqa: E501
import importlib
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from langchain_community.chat_message_histories.astradb import (
AstraDBChatMessageHistory,
)
from langchain_community.chat_message_histories.cassandra import (
CassandraChatMessageHistory,
)
from langchain_community.chat_message_histories.cosmos_db import (
CosmosDBChatMessageHistory,
)
from langchain_community.chat_message_histories.dynamodb import (
DynamoDBChatMessageHistory,
)
from langchain_community.chat_message_histories.elasticsearch import (
ElasticsearchChatMessageHistory,
)
from langchain_community.chat_message_histories.file import (
FileChatMessageHistory,
)
from langchain_community.chat_message_histories.firestore import (
FirestoreChatMessageHistory,
)
from langchain_community.chat_message_histories.in_memory import (
ChatMessageHistory,
)
from langchain_community.chat_message_histories.kafka import (
KafkaChatMessageHistory,
)
from langchain_community.chat_message_histories.momento import (
MomentoChatMessageHistory,
)
from langchain_community.chat_message_histories.mongodb import (
MongoDBChatMessageHistory,
)
from langchain_community.chat_message_histories.neo4j import (
Neo4jChatMessageHistory,
)
from langchain_community.chat_message_histories.postgres import (
PostgresChatMessageHistory,
)
from langchain_community.chat_message_histories.redis import (
RedisChatMessageHistory,
)
from langchain_community.chat_message_histories.rocksetdb import (
RocksetChatMessageHistory,
)
from langchain_community.chat_message_histories.singlestoredb import (
SingleStoreDBChatMessageHistory,
)
from langchain_community.chat_message_histories.sql import (
SQLChatMessageHistory,
)
from langchain_community.chat_message_histories.streamlit import (
StreamlitChatMessageHistory,
)
from langchain_community.chat_message_histories.tidb import (
TiDBChatMessageHistory,
)
from langchain_community.chat_message_histories.upstash_redis import (
UpstashRedisChatMessageHistory,
)
from langchain_community.chat_message_histories.xata import (
XataChatMessageHistory,
)
from langchain_community.chat_message_histories.zep import (
ZepChatMessageHistory,
)
from langchain_community.chat_message_histories.zep_cloud import (
ZepCloudChatMessageHistory,
)
__all__ = [
"AstraDBChatMessageHistory",
"CassandraChatMessageHistory",
"ChatMessageHistory",
"CosmosDBChatMessageHistory",
"DynamoDBChatMessageHistory",
"ElasticsearchChatMessageHistory",
"FileChatMessageHistory",
"FirestoreChatMessageHistory",
"MomentoChatMessageHistory",
"MongoDBChatMessageHistory",
"Neo4jChatMessageHistory",
"PostgresChatMessageHistory",
"RedisChatMessageHistory",
"RocksetChatMessageHistory",
"SQLChatMessageHistory",
"SingleStoreDBChatMessageHistory",
"StreamlitChatMessageHistory",
"TiDBChatMessageHistory",
"UpstashRedisChatMessageHistory",
"XataChatMessageHistory",
"ZepChatMessageHistory",
"ZepCloudChatMessageHistory",
"KafkaChatMessageHistory",
]
_module_lookup = {
"AstraDBChatMessageHistory": "langchain_community.chat_message_histories.astradb",
"CassandraChatMessageHistory": "langchain_community.chat_message_histories.cassandra", # noqa: E501
"ChatMessageHistory": "langchain_community.chat_message_histories.in_memory",
"CosmosDBChatMessageHistory": "langchain_community.chat_message_histories.cosmos_db", # noqa: E501
"DynamoDBChatMessageHistory": "langchain_community.chat_message_histories.dynamodb",
"ElasticsearchChatMessageHistory": "langchain_community.chat_message_histories.elasticsearch", # noqa: E501
"FileChatMessageHistory": "langchain_community.chat_message_histories.file",
"FirestoreChatMessageHistory": "langchain_community.chat_message_histories.firestore", # noqa: E501
"MomentoChatMessageHistory": "langchain_community.chat_message_histories.momento",
"MongoDBChatMessageHistory": "langchain_community.chat_message_histories.mongodb",
"Neo4jChatMessageHistory": "langchain_community.chat_message_histories.neo4j",
"PostgresChatMessageHistory": "langchain_community.chat_message_histories.postgres",
"RedisChatMessageHistory": "langchain_community.chat_message_histories.redis",
"RocksetChatMessageHistory": "langchain_community.chat_message_histories.rocksetdb",
"SQLChatMessageHistory": "langchain_community.chat_message_histories.sql",
"SingleStoreDBChatMessageHistory": "langchain_community.chat_message_histories.singlestoredb", # noqa: E501
"StreamlitChatMessageHistory": "langchain_community.chat_message_histories.streamlit", # noqa: E501
"TiDBChatMessageHistory": "langchain_community.chat_message_histories.tidb",
"UpstashRedisChatMessageHistory": "langchain_community.chat_message_histories.upstash_redis", # noqa: E501
"XataChatMessageHistory": "langchain_community.chat_message_histories.xata",
"ZepChatMessageHistory": "langchain_community.chat_message_histories.zep",
"ZepCloudChatMessageHistory": "langchain_community.chat_message_histories.zep_cloud", # noqa: E501
"KafkaChatMessageHistory": "langchain_community.chat_message_histories.kafka",
}
def __getattr__(name: str) -> Any:
if name in _module_lookup:
module = importlib.import_module(_module_lookup[name])
return getattr(module, name)
raise AttributeError(f"module {__name__} has no attribute {name}")
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_message_histories/elasticsearch.py | import json
import logging
from time import time
from typing import TYPE_CHECKING, Any, Dict, List, Optional
from langchain_core._api import deprecated
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.messages import (
BaseMessage,
message_to_dict,
messages_from_dict,
)
if TYPE_CHECKING:
from elasticsearch import Elasticsearch
logger = logging.getLogger(__name__)
@deprecated("0.0.27", alternative="Use langchain-elasticsearch package", pending=True)
class ElasticsearchChatMessageHistory(BaseChatMessageHistory):
"""Chat message history that stores history in Elasticsearch.
Args:
es_url: URL of the Elasticsearch instance to connect to.
es_cloud_id: Cloud ID of the Elasticsearch instance to connect to.
es_user: Username to use when connecting to Elasticsearch.
es_password: Password to use when connecting to Elasticsearch.
es_api_key: API key to use when connecting to Elasticsearch.
es_connection: Optional pre-existing Elasticsearch connection.
ensure_ascii: Used to escape ASCII symbols in json.dumps. Defaults to True.
index: Name of the index to use.
session_id: Arbitrary key that is used to store the messages
of a single chat session.
"""
def __init__(
self,
index: str,
session_id: str,
*,
es_connection: Optional["Elasticsearch"] = None,
es_url: Optional[str] = None,
es_cloud_id: Optional[str] = None,
es_user: Optional[str] = None,
es_api_key: Optional[str] = None,
es_password: Optional[str] = None,
ensure_ascii: Optional[bool] = True,
):
self.index: str = index
self.session_id: str = session_id
self.ensure_ascii = ensure_ascii
# Initialize Elasticsearch client from passed client arg or connection info
if es_connection is not None:
self.client = es_connection.options(
headers={"user-agent": self.get_user_agent()}
)
elif es_url is not None or es_cloud_id is not None:
self.client = ElasticsearchChatMessageHistory.connect_to_elasticsearch(
es_url=es_url,
username=es_user,
password=es_password,
cloud_id=es_cloud_id,
api_key=es_api_key,
)
else:
raise ValueError(
"""Either provide a pre-existing Elasticsearch connection, \
or valid credentials for creating a new connection."""
)
if self.client.indices.exists(index=index):
logger.debug(
f"Chat history index {index} already exists, skipping creation."
)
else:
logger.debug(f"Creating index {index} for storing chat history.")
self.client.indices.create(
index=index,
mappings={
"properties": {
"session_id": {"type": "keyword"},
"created_at": {"type": "date"},
"history": {"type": "text"},
}
},
)
@staticmethod
def get_user_agent() -> str:
from langchain_community import __version__
return f"langchain-py-ms/{__version__}"
@staticmethod
def connect_to_elasticsearch(
*,
es_url: Optional[str] = None,
cloud_id: Optional[str] = None,
api_key: Optional[str] = None,
username: Optional[str] = None,
password: Optional[str] = None,
) -> "Elasticsearch":
try:
import elasticsearch
except ImportError:
raise ImportError(
"Could not import elasticsearch python package. "
"Please install it with `pip install elasticsearch`."
)
if es_url and cloud_id:
raise ValueError(
"Both es_url and cloud_id are defined. Please provide only one."
)
connection_params: Dict[str, Any] = {}
if es_url:
connection_params["hosts"] = [es_url]
elif cloud_id:
connection_params["cloud_id"] = cloud_id
else:
raise ValueError("Please provide either elasticsearch_url or cloud_id.")
if api_key:
connection_params["api_key"] = api_key
elif username and password:
connection_params["basic_auth"] = (username, password)
es_client = elasticsearch.Elasticsearch(
**connection_params,
headers={"user-agent": ElasticsearchChatMessageHistory.get_user_agent()},
)
try:
es_client.info()
except Exception as err:
logger.error(f"Error connecting to Elasticsearch: {err}")
raise err
return es_client
@property
def messages(self) -> List[BaseMessage]:
"""Retrieve the messages from Elasticsearch"""
try:
from elasticsearch import ApiError
result = self.client.search(
index=self.index,
query={"term": {"session_id": self.session_id}},
sort="created_at:asc",
)
except ApiError as err:
logger.error(f"Could not retrieve messages from Elasticsearch: {err}")
raise err
if result and len(result["hits"]["hits"]) > 0:
items = [
json.loads(document["_source"]["history"])
for document in result["hits"]["hits"]
]
else:
items = []
return messages_from_dict(items)
@messages.setter
def messages(self, messages: List[BaseMessage]) -> None:
raise NotImplementedError(
"Direct assignment to 'messages' is not allowed."
" Use the 'add_messages' instead."
)
def add_message(self, message: BaseMessage) -> None:
"""Add a message to the chat session in Elasticsearch"""
try:
from elasticsearch import ApiError
self.client.index(
index=self.index,
document={
"session_id": self.session_id,
"created_at": round(time() * 1000),
"history": json.dumps(
message_to_dict(message),
ensure_ascii=bool(self.ensure_ascii),
),
},
refresh=True,
)
except ApiError as err:
logger.error(f"Could not add message to Elasticsearch: {err}")
raise err
def clear(self) -> None:
"""Clear session memory in Elasticsearch"""
try:
from elasticsearch import ApiError
self.client.delete_by_query(
index=self.index,
query={"term": {"session_id": self.session_id}},
refresh=True,
)
except ApiError as err:
logger.error(f"Could not clear session memory in Elasticsearch: {err}")
raise err
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_message_histories/xata.py | import json
from typing import List
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.messages import (
BaseMessage,
message_to_dict,
messages_from_dict,
)
class XataChatMessageHistory(BaseChatMessageHistory):
"""Chat message history stored in a Xata database."""
def __init__(
self,
session_id: str,
db_url: str,
api_key: str,
branch_name: str = "main",
table_name: str = "messages",
create_table: bool = True,
) -> None:
"""Initialize with Xata client."""
try:
from xata.client import XataClient
except ImportError:
raise ImportError(
"Could not import xata python package. "
"Please install it with `pip install xata`."
)
self._client = XataClient(
api_key=api_key, db_url=db_url, branch_name=branch_name
)
self._table_name = table_name
self._session_id = session_id
if create_table:
self._create_table_if_not_exists()
def _create_table_if_not_exists(self) -> None:
r = self._client.table().get_schema(self._table_name)
if r.status_code <= 299:
return
if r.status_code != 404:
raise Exception(
f"Error checking if table exists in Xata: {r.status_code} {r}"
)
r = self._client.table().create(self._table_name)
if r.status_code > 299:
raise Exception(f"Error creating table in Xata: {r.status_code} {r}")
r = self._client.table().set_schema(
self._table_name,
payload={
"columns": [
{"name": "sessionId", "type": "string"},
{"name": "type", "type": "string"},
{"name": "role", "type": "string"},
{"name": "content", "type": "text"},
{"name": "name", "type": "string"},
{"name": "additionalKwargs", "type": "json"},
]
},
)
if r.status_code > 299:
raise Exception(f"Error setting table schema in Xata: {r.status_code} {r}")
def add_message(self, message: BaseMessage) -> None:
"""Append the message to the Xata table"""
msg = message_to_dict(message)
r = self._client.records().insert(
self._table_name,
{
"sessionId": self._session_id,
"type": msg["type"],
"content": message.content,
"additionalKwargs": json.dumps(message.additional_kwargs),
"role": msg["data"].get("role"),
"name": msg["data"].get("name"),
},
)
if r.status_code > 299:
raise Exception(f"Error adding message to Xata: {r.status_code} {r}")
@property
def messages(self) -> List[BaseMessage]: # type: ignore
r = self._client.data().query(
self._table_name,
payload={
"filter": {
"sessionId": self._session_id,
},
"sort": {"xata.createdAt": "asc"},
},
)
if r.status_code != 200:
raise Exception(f"Error running query: {r.status_code} {r}")
msgs = messages_from_dict(
[
{
"type": m["type"],
"data": {
"content": m["content"],
"role": m.get("role"),
"name": m.get("name"),
"additional_kwargs": json.loads(m["additionalKwargs"]),
},
}
for m in r["records"]
]
)
return msgs
def clear(self) -> None:
"""Delete session from Xata table."""
while True:
r = self._client.data().query(
self._table_name,
payload={
"columns": ["id"],
"filter": {
"sessionId": self._session_id,
},
},
)
if r.status_code != 200:
raise Exception(f"Error running query: {r.status_code} {r}")
ids = [rec["id"] for rec in r["records"]]
if len(ids) == 0:
break
operations = [
{"delete": {"table": self._table_name, "id": id}} for id in ids
]
self._client.records().transaction(payload={"operations": operations})
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_message_histories/singlestoredb.py | import json
import logging
import re
from typing import (
Any,
List,
)
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.messages import (
BaseMessage,
message_to_dict,
messages_from_dict,
)
logger = logging.getLogger(__name__)
class SingleStoreDBChatMessageHistory(BaseChatMessageHistory):
"""Chat message history stored in a SingleStoreDB database."""
def __init__(
self,
session_id: str,
*,
table_name: str = "message_store",
id_field: str = "id",
session_id_field: str = "session_id",
message_field: str = "message",
pool_size: int = 5,
max_overflow: int = 10,
timeout: float = 30,
**kwargs: Any,
):
"""Initialize with necessary components.
Args:
table_name (str, optional): Specifies the name of the table in use.
Defaults to "message_store".
id_field (str, optional): Specifies the name of the id field in the table.
Defaults to "id".
session_id_field (str, optional): Specifies the name of the session_id
field in the table. Defaults to "session_id".
message_field (str, optional): Specifies the name of the message field
in the table. Defaults to "message".
Following arguments pertain to the connection pool:
pool_size (int, optional): Determines the number of active connections in
the pool. Defaults to 5.
max_overflow (int, optional): Determines the maximum number of connections
allowed beyond the pool_size. Defaults to 10.
timeout (float, optional): Specifies the maximum wait time in seconds for
establishing a connection. Defaults to 30.
Following arguments pertain to the database connection:
host (str, optional): Specifies the hostname, IP address, or URL for the
database connection. The default scheme is "mysql".
user (str, optional): Database username.
password (str, optional): Database password.
port (int, optional): Database port. Defaults to 3306 for non-HTTP
connections, 80 for HTTP connections, and 443 for HTTPS connections.
database (str, optional): Database name.
Additional optional arguments provide further customization over the
database connection:
pure_python (bool, optional): Toggles the connector mode. If True,
operates in pure Python mode.
local_infile (bool, optional): Allows local file uploads.
charset (str, optional): Specifies the character set for string values.
ssl_key (str, optional): Specifies the path of the file containing the SSL
key.
ssl_cert (str, optional): Specifies the path of the file containing the SSL
certificate.
ssl_ca (str, optional): Specifies the path of the file containing the SSL
certificate authority.
ssl_cipher (str, optional): Sets the SSL cipher list.
ssl_disabled (bool, optional): Disables SSL usage.
ssl_verify_cert (bool, optional): Verifies the server's certificate.
Automatically enabled if ``ssl_ca`` is specified.
ssl_verify_identity (bool, optional): Verifies the server's identity.
conv (dict[int, Callable], optional): A dictionary of data conversion
functions.
credential_type (str, optional): Specifies the type of authentication to
use: auth.PASSWORD, auth.JWT, or auth.BROWSER_SSO.
autocommit (bool, optional): Enables autocommits.
results_type (str, optional): Determines the structure of the query results:
tuples, namedtuples, dicts.
results_format (str, optional): Deprecated. This option has been renamed to
results_type.
Examples:
Basic Usage:
.. code-block:: python
from langchain_community.chat_message_histories import (
SingleStoreDBChatMessageHistory
)
message_history = SingleStoreDBChatMessageHistory(
session_id="my-session",
host="https://user:password@127.0.0.1:3306/database"
)
Advanced Usage:
.. code-block:: python
from langchain_community.chat_message_histories import (
SingleStoreDBChatMessageHistory
)
message_history = SingleStoreDBChatMessageHistory(
session_id="my-session",
host="127.0.0.1",
port=3306,
user="user",
password="password",
database="db",
table_name="my_custom_table",
pool_size=10,
timeout=60,
)
Using environment variables:
.. code-block:: python
from langchain_community.chat_message_histories import (
SingleStoreDBChatMessageHistory
)
os.environ['SINGLESTOREDB_URL'] = 'me:p455w0rd@s2-host.com/my_db'
message_history = SingleStoreDBChatMessageHistory("my-session")
"""
self.table_name = self._sanitize_input(table_name)
self.session_id = self._sanitize_input(session_id)
self.id_field = self._sanitize_input(id_field)
self.session_id_field = self._sanitize_input(session_id_field)
self.message_field = self._sanitize_input(message_field)
# Pass the rest of the kwargs to the connection.
self.connection_kwargs = kwargs
# Add connection attributes to the connection kwargs.
if "conn_attrs" not in self.connection_kwargs:
self.connection_kwargs["conn_attrs"] = dict()
self.connection_kwargs["conn_attrs"]["_connector_name"] = "langchain python sdk"
self.connection_kwargs["conn_attrs"]["_connector_version"] = "2.1.0"
# Create a connection pool.
try:
from sqlalchemy.pool import QueuePool
except ImportError:
raise ImportError(
"Could not import sqlalchemy.pool python package. "
"Please install it with `pip install singlestoredb`."
)
self.connection_pool = QueuePool(
self._get_connection,
max_overflow=max_overflow,
pool_size=pool_size,
timeout=timeout,
)
self.table_created = False
def _sanitize_input(self, input_str: str) -> str:
# Remove characters that are not alphanumeric or underscores
return re.sub(r"[^a-zA-Z0-9_]", "", input_str)
def _get_connection(self) -> Any:
try:
import singlestoredb as s2
except ImportError:
raise ImportError(
"Could not import singlestoredb python package. "
"Please install it with `pip install singlestoredb`."
)
return s2.connect(**self.connection_kwargs)
def _create_table_if_not_exists(self) -> None:
"""Create table if it doesn't exist."""
if self.table_created:
return
conn = self.connection_pool.connect()
try:
cur = conn.cursor()
try:
cur.execute(
"""CREATE TABLE IF NOT EXISTS {}
({} BIGINT PRIMARY KEY AUTO_INCREMENT,
{} TEXT NOT NULL,
{} JSON NOT NULL);""".format(
self.table_name,
self.id_field,
self.session_id_field,
self.message_field,
),
)
self.table_created = True
finally:
cur.close()
finally:
conn.close()
@property
def messages(self) -> List[BaseMessage]: # type: ignore
"""Retrieve the messages from SingleStoreDB"""
self._create_table_if_not_exists()
conn = self.connection_pool.connect()
items = []
try:
cur = conn.cursor()
try:
cur.execute(
"""SELECT {} FROM {} WHERE {} = %s""".format(
self.message_field,
self.table_name,
self.session_id_field,
),
(self.session_id),
)
for row in cur.fetchall():
items.append(row[0])
finally:
cur.close()
finally:
conn.close()
messages = messages_from_dict(items)
return messages
def add_message(self, message: BaseMessage) -> None:
"""Append the message to the record in SingleStoreDB"""
self._create_table_if_not_exists()
conn = self.connection_pool.connect()
try:
cur = conn.cursor()
try:
cur.execute(
"""INSERT INTO {} ({}, {}) VALUES (%s, %s)""".format(
self.table_name,
self.session_id_field,
self.message_field,
),
(self.session_id, json.dumps(message_to_dict(message))),
)
finally:
cur.close()
finally:
conn.close()
def clear(self) -> None:
"""Clear session memory from SingleStoreDB"""
self._create_table_if_not_exists()
conn = self.connection_pool.connect()
try:
cur = conn.cursor()
try:
cur.execute(
"""DELETE FROM {} WHERE {} = %s""".format(
self.table_name,
self.session_id_field,
),
(self.session_id),
)
finally:
cur.close()
finally:
conn.close()
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_message_histories/zep_cloud.py | from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.messages import (
AIMessage,
BaseMessage,
HumanMessage,
)
if TYPE_CHECKING:
from zep_cloud import (
Memory,
MemoryGetRequestMemoryType,
MemorySearchResult,
Message,
NotFoundError,
RoleType,
SearchScope,
SearchType,
)
logger = logging.getLogger(__name__)
def condense_zep_memory_into_human_message(zep_memory: Memory) -> BaseMessage:
"""Condense Zep memory into a human message.
Args:
zep_memory: The Zep memory object.
Returns:
BaseMessage: The human message.
"""
prompt = ""
if zep_memory.facts:
prompt = "\n".join(zep_memory.facts)
if zep_memory.summary and zep_memory.summary.content:
prompt += "\n" + zep_memory.summary.content
for msg in zep_memory.messages or []:
prompt += f"\n{msg.role or msg.role_type}: {msg.content}"
return HumanMessage(content=prompt)
def get_zep_message_role_type(role: str) -> RoleType:
"""Get the Zep role type from the role string.
Args:
role: The role string. One of "human", "ai", "system",
"function", "tool".
Returns:
RoleType: The Zep role type. One of "user", "assistant",
"system", "function", "tool".
"""
if role == "human":
return "user"
elif role == "ai":
return "assistant"
elif role == "system":
return "system"
elif role == "function":
return "function"
elif role == "tool":
return "tool"
else:
return "system"
class ZepCloudChatMessageHistory(BaseChatMessageHistory):
"""Chat message history that uses Zep Cloud as a backend.
Recommended usage::
# Set up Zep Chat History
zep_chat_history = ZepChatMessageHistory(
session_id=session_id,
api_key=<your_api_key>,
)
# Use a standard ConversationBufferMemory to encapsulate the Zep chat history
memory = ConversationBufferMemory(
memory_key="chat_history", chat_memory=zep_chat_history
)
Zep - Recall, understand, and extract data from chat histories.
Power personalized AI experiences.
Zep is a long-term memory service for AI Assistant apps.
With Zep, you can provide AI assistants with the
ability to recall past conversations,
no matter how distant,
while also reducing hallucinations, latency, and cost.
see Zep Cloud Docs: https://help.getzep.com
This class is a thin wrapper around the zep-python package. Additional
Zep functionality is exposed via the `zep_summary`, `zep_messages` and `zep_facts`
properties.
For more information on the zep-python package, see:
https://github.com/getzep/zep-python
"""
def __init__(
self,
session_id: str,
api_key: str,
*,
memory_type: Optional[MemoryGetRequestMemoryType] = None,
lastn: Optional[int] = None,
ai_prefix: Optional[str] = None,
human_prefix: Optional[str] = None,
summary_instruction: Optional[str] = None,
) -> None:
try:
from zep_cloud.client import AsyncZep, Zep
except ImportError:
raise ImportError(
"Could not import zep-cloud package. "
"Please install it with `pip install zep-cloud`."
)
self.zep_client = Zep(api_key=api_key)
self.zep_client_async = AsyncZep(api_key=api_key)
self.session_id = session_id
self.memory_type = memory_type or "perpetual"
self.lastn = lastn
self.ai_prefix = ai_prefix or "ai"
self.human_prefix = human_prefix or "human"
self.summary_instruction = summary_instruction
@property
def messages(self) -> List[BaseMessage]: # type: ignore
"""Retrieve messages from Zep memory"""
zep_memory: Optional[Memory] = self._get_memory()
if not zep_memory:
return []
return [condense_zep_memory_into_human_message(zep_memory)]
@property
def zep_messages(self) -> List[Message]:
"""Retrieve summary from Zep memory"""
zep_memory: Optional[Memory] = self._get_memory()
if not zep_memory:
return []
return zep_memory.messages or []
@property
def zep_summary(self) -> Optional[str]:
"""Retrieve summary from Zep memory"""
zep_memory: Optional[Memory] = self._get_memory()
if not zep_memory or not zep_memory.summary:
return None
return zep_memory.summary.content
@property
def zep_facts(self) -> Optional[List[str]]:
"""Retrieve conversation facts from Zep memory"""
if self.memory_type != "perpetual":
return None
zep_memory: Optional[Memory] = self._get_memory()
if not zep_memory or not zep_memory.facts:
return None
return zep_memory.facts
def _get_memory(self) -> Optional[Memory]:
"""Retrieve memory from Zep"""
from zep_cloud import NotFoundError
try:
zep_memory: Memory = self.zep_client.memory.get(
self.session_id, memory_type=self.memory_type, lastn=self.lastn
)
except NotFoundError:
logger.warning(
f"Session {self.session_id} not found in Zep. Returning None"
)
return None
return zep_memory
def add_user_message( # type: ignore[override]
self, message: str, metadata: Optional[Dict[str, Any]] = None
) -> None:
"""Convenience method for adding a human message string to the store.
Args:
message: The string contents of a human message.
metadata: Optional metadata to attach to the message.
"""
self.add_message(HumanMessage(content=message), metadata=metadata)
def add_ai_message( # type: ignore[override]
self, message: str, metadata: Optional[Dict[str, Any]] = None
) -> None:
"""Convenience method for adding an AI message string to the store.
Args:
message: The string contents of an AI message.
metadata: Optional metadata to attach to the message.
"""
self.add_message(AIMessage(content=message), metadata=metadata)
def add_message(
self, message: BaseMessage, metadata: Optional[Dict[str, Any]] = None
) -> None:
"""Append the message to the Zep memory history"""
from zep_cloud import Message
self.zep_client.memory.add(
self.session_id,
messages=[
Message(
content=str(message.content),
role=message.type,
role_type=get_zep_message_role_type(message.type),
metadata=metadata,
)
],
)
def add_messages(self, messages: Sequence[BaseMessage]) -> None:
"""Append the messages to the Zep memory history"""
from zep_cloud import Message
zep_messages = [
Message(
content=str(message.content),
role=message.type,
role_type=get_zep_message_role_type(message.type),
metadata=message.additional_kwargs.get("metadata", None),
)
for message in messages
]
self.zep_client.memory.add(self.session_id, messages=zep_messages)
async def aadd_messages(self, messages: Sequence[BaseMessage]) -> None:
"""Append the messages to the Zep memory history asynchronously"""
from zep_cloud import Message
zep_messages = [
Message(
content=str(message.content),
role=message.type,
role_type=get_zep_message_role_type(message.type),
metadata=message.additional_kwargs.get("metadata", None),
)
for message in messages
]
await self.zep_client_async.memory.add(self.session_id, messages=zep_messages)
def search(
self,
query: str,
metadata: Optional[Dict] = None,
search_scope: SearchScope = "messages",
search_type: SearchType = "similarity",
mmr_lambda: Optional[float] = None,
limit: Optional[int] = None,
) -> List[MemorySearchResult]:
"""Search Zep memory for messages matching the query"""
return self.zep_client.memory.search(
self.session_id,
text=query,
metadata=metadata,
search_scope=search_scope,
search_type=search_type,
mmr_lambda=mmr_lambda,
limit=limit,
)
def clear(self) -> None:
"""Clear session memory from Zep. Note that Zep is long-term storage for memory
and this is not advised unless you have specific data retention requirements.
"""
try:
self.zep_client.memory.delete(self.session_id)
except NotFoundError:
logger.warning(
f"Session {self.session_id} not found in Zep. Skipping delete."
)
async def aclear(self) -> None:
"""Clear session memory from Zep asynchronously.
Note that Zep is long-term storage for memory and this is not advised
unless you have specific data retention requirements.
"""
try:
await self.zep_client_async.memory.delete(self.session_id)
except NotFoundError:
logger.warning(
f"Session {self.session_id} not found in Zep. Skipping delete."
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_message_histories/postgres.py | import json
import logging
from typing import List
from langchain_core._api import deprecated
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.messages import (
BaseMessage,
message_to_dict,
messages_from_dict,
)
logger = logging.getLogger(__name__)
DEFAULT_CONNECTION_STRING = "postgresql://postgres:mypassword@localhost/chat_history"
@deprecated(
since="0.0.31",
message=(
"This class is deprecated and will be removed in a future version. "
"You can swap to using the `PostgresChatMessageHistory`"
" implementation in `langchain_postgres`. "
"Please do not submit further PRs to this class."
"See <https://github.com/langchain-ai/langchain-postgres>"
),
alternative="from langchain_postgres import PostgresChatMessageHistory;",
pending=True,
)
class PostgresChatMessageHistory(BaseChatMessageHistory):
"""Chat message history stored in a Postgres database.
**DEPRECATED**: This class is deprecated and will be removed in a future version.
Use the `PostgresChatMessageHistory` implementation in `langchain_postgres`.
"""
def __init__(
self,
session_id: str,
connection_string: str = DEFAULT_CONNECTION_STRING,
table_name: str = "message_store",
):
import psycopg
from psycopg.rows import dict_row
try:
self.connection = psycopg.connect(connection_string)
self.cursor = self.connection.cursor(row_factory=dict_row)
except psycopg.OperationalError as error:
logger.error(error)
self.session_id = session_id
self.table_name = table_name
self._create_table_if_not_exists()
def _create_table_if_not_exists(self) -> None:
create_table_query = f"""CREATE TABLE IF NOT EXISTS {self.table_name} (
id SERIAL PRIMARY KEY,
session_id TEXT NOT NULL,
message JSONB NOT NULL
);"""
self.cursor.execute(create_table_query)
self.connection.commit()
@property
def messages(self) -> List[BaseMessage]: # type: ignore
"""Retrieve the messages from PostgreSQL"""
query = (
f"SELECT message FROM {self.table_name} WHERE session_id = %s ORDER BY id;"
)
self.cursor.execute(query, (self.session_id,))
items = [record["message"] for record in self.cursor.fetchall()]
messages = messages_from_dict(items)
return messages
def add_message(self, message: BaseMessage) -> None:
"""Append the message to the record in PostgreSQL"""
from psycopg import sql
query = sql.SQL("INSERT INTO {} (session_id, message) VALUES (%s, %s);").format(
sql.Identifier(self.table_name)
)
self.cursor.execute(
query, (self.session_id, json.dumps(message_to_dict(message)))
)
self.connection.commit()
def clear(self) -> None:
"""Clear session memory from PostgreSQL"""
query = f"DELETE FROM {self.table_name} WHERE session_id = %s;"
self.cursor.execute(query, (self.session_id,))
self.connection.commit()
def __del__(self) -> None:
if self.cursor:
self.cursor.close()
if self.connection:
self.connection.close()
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_message_histories/mongodb.py | import json
import logging
from typing import List
from langchain_core._api.deprecation import deprecated
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.messages import (
BaseMessage,
message_to_dict,
messages_from_dict,
)
logger = logging.getLogger(__name__)
DEFAULT_DBNAME = "chat_history"
DEFAULT_COLLECTION_NAME = "message_store"
@deprecated(
since="0.0.25",
removal="1.0",
alternative_import="langchain_mongodb.MongoDBChatMessageHistory",
)
class MongoDBChatMessageHistory(BaseChatMessageHistory):
"""Chat message history that stores history in MongoDB.
Args:
connection_string: connection string to connect to MongoDB
session_id: arbitrary key that is used to store the messages
of a single chat session.
database_name: name of the database to use
collection_name: name of the collection to use
create_index: whether to create an index with name SessionId. Set to False if
such an index already exists.
"""
def __init__(
self,
connection_string: str,
session_id: str,
database_name: str = DEFAULT_DBNAME,
collection_name: str = DEFAULT_COLLECTION_NAME,
create_index: bool = True,
):
from pymongo import MongoClient, errors
self.connection_string = connection_string
self.session_id = session_id
self.database_name = database_name
self.collection_name = collection_name
try:
self.client: MongoClient = MongoClient(connection_string)
except errors.ConnectionFailure as error:
logger.error(error)
self.db = self.client[database_name]
self.collection = self.db[collection_name]
if create_index:
self.collection.create_index("SessionId")
@property
def messages(self) -> List[BaseMessage]: # type: ignore
"""Retrieve the messages from MongoDB"""
from pymongo import errors
try:
cursor = self.collection.find({"SessionId": self.session_id})
except errors.OperationFailure as error:
logger.error(error)
if cursor:
items = [json.loads(document["History"]) for document in cursor]
else:
items = []
messages = messages_from_dict(items)
return messages
def add_message(self, message: BaseMessage) -> None:
"""Append the message to the record in MongoDB"""
from pymongo import errors
try:
self.collection.insert_one(
{
"SessionId": self.session_id,
"History": json.dumps(message_to_dict(message)),
}
)
except errors.WriteError as err:
logger.error(err)
def clear(self) -> None:
"""Clear session memory from MongoDB"""
from pymongo import errors
try:
self.collection.delete_many({"SessionId": self.session_id})
except errors.WriteError as err:
logger.error(err)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/callbacks/arthur_callback.py | """ArthurAI's Callback Handler."""
from __future__ import annotations
import os
import uuid
from collections import defaultdict
from datetime import datetime
from time import time
from typing import TYPE_CHECKING, Any, DefaultDict, Dict, List, Optional
import numpy as np
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.outputs import LLMResult
if TYPE_CHECKING:
import arthurai
from arthurai.core.models import ArthurModel
PROMPT_TOKENS = "prompt_tokens"
COMPLETION_TOKENS = "completion_tokens"
TOKEN_USAGE = "token_usage"
FINISH_REASON = "finish_reason"
DURATION = "duration"
def _lazy_load_arthur() -> arthurai:
"""Lazy load Arthur."""
try:
import arthurai
except ImportError as e:
raise ImportError(
"To use the ArthurCallbackHandler you need the"
" `arthurai` package. Please install it with"
" `pip install arthurai`.",
e,
)
return arthurai
class ArthurCallbackHandler(BaseCallbackHandler):
"""Callback Handler that logs to Arthur platform.
Arthur helps enterprise teams optimize model operations
and performance at scale. The Arthur API tracks model
performance, explainability, and fairness across tabular,
NLP, and CV models. Our API is model- and platform-agnostic,
and continuously scales with complex and dynamic enterprise needs.
To learn more about Arthur, visit our website at
https://www.arthur.ai/ or read the Arthur docs at
https://docs.arthur.ai/
"""
def __init__(
self,
arthur_model: ArthurModel,
) -> None:
"""Initialize callback handler."""
super().__init__()
arthurai = _lazy_load_arthur()
Stage = arthurai.common.constants.Stage
ValueType = arthurai.common.constants.ValueType
self.arthur_model = arthur_model
# save the attributes of this model to be used when preparing
# inferences to log to Arthur in on_llm_end()
self.attr_names = set([a.name for a in self.arthur_model.get_attributes()])
self.input_attr = [
x
for x in self.arthur_model.get_attributes()
if x.stage == Stage.ModelPipelineInput
and x.value_type == ValueType.Unstructured_Text
][0].name
self.output_attr = [
x
for x in self.arthur_model.get_attributes()
if x.stage == Stage.PredictedValue
and x.value_type == ValueType.Unstructured_Text
][0].name
self.token_likelihood_attr = None
if (
len(
[
x
for x in self.arthur_model.get_attributes()
if x.value_type == ValueType.TokenLikelihoods
]
)
> 0
):
self.token_likelihood_attr = [
x
for x in self.arthur_model.get_attributes()
if x.value_type == ValueType.TokenLikelihoods
][0].name
self.run_map: DefaultDict[str, Any] = defaultdict(dict)
@classmethod
def from_credentials(
cls,
model_id: str,
arthur_url: Optional[str] = "https://app.arthur.ai",
arthur_login: Optional[str] = None,
arthur_password: Optional[str] = None,
) -> ArthurCallbackHandler:
"""Initialize callback handler from Arthur credentials.
Args:
model_id (str): The ID of the arthur model to log to.
arthur_url (str, optional): The URL of the Arthur instance to log to.
Defaults to "https://app.arthur.ai".
arthur_login (str, optional): The login to use to connect to Arthur.
Defaults to None.
arthur_password (str, optional): The password to use to connect to
Arthur. Defaults to None.
Returns:
ArthurCallbackHandler: The initialized callback handler.
"""
arthurai = _lazy_load_arthur()
ArthurAI = arthurai.ArthurAI
ResponseClientError = arthurai.common.exceptions.ResponseClientError
# connect to Arthur
if arthur_login is None:
try:
arthur_api_key = os.environ["ARTHUR_API_KEY"]
except KeyError:
raise ValueError(
"No Arthur authentication provided. Either give"
" a login to the ArthurCallbackHandler"
" or set an ARTHUR_API_KEY as an environment variable."
)
arthur = ArthurAI(url=arthur_url, access_key=arthur_api_key)
else:
if arthur_password is None:
arthur = ArthurAI(url=arthur_url, login=arthur_login)
else:
arthur = ArthurAI(
url=arthur_url, login=arthur_login, password=arthur_password
)
# get model from Arthur by the provided model ID
try:
arthur_model = arthur.get_model(model_id)
except ResponseClientError:
raise ValueError(
f"Was unable to retrieve model with id {model_id} from Arthur."
" Make sure the ID corresponds to a model that is currently"
" registered with your Arthur account."
)
return cls(arthur_model)
def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> None:
"""On LLM start, save the input prompts"""
run_id = kwargs["run_id"]
self.run_map[run_id]["input_texts"] = prompts
self.run_map[run_id]["start_time"] = time()
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""On LLM end, send data to Arthur."""
try:
import pytz
except ImportError as e:
raise ImportError(
"Could not import pytz. Please install it with 'pip install pytz'."
) from e
run_id = kwargs["run_id"]
# get the run params from this run ID,
# or raise an error if this run ID has no corresponding metadata in self.run_map
try:
run_map_data = self.run_map[run_id]
except KeyError as e:
raise KeyError(
"This function has been called with a run_id"
" that was never registered in on_llm_start()."
" Restart and try running the LLM again"
) from e
# mark the duration time between on_llm_start() and on_llm_end()
time_from_start_to_end = time() - run_map_data["start_time"]
# create inferences to log to Arthur
inferences = []
for i, generations in enumerate(response.generations):
for generation in generations:
inference = {
"partner_inference_id": str(uuid.uuid4()),
"inference_timestamp": datetime.now(tz=pytz.UTC),
self.input_attr: run_map_data["input_texts"][i],
self.output_attr: generation.text,
}
if generation.generation_info is not None:
# add finish reason to the inference
# if generation info contains a finish reason and
# if the ArthurModel was registered to monitor finish_reason
if (
FINISH_REASON in generation.generation_info
and FINISH_REASON in self.attr_names
):
inference[FINISH_REASON] = generation.generation_info[
FINISH_REASON
]
# add token likelihoods data to the inference if the ArthurModel
# was registered to monitor token likelihoods
logprobs_data = generation.generation_info["logprobs"]
if (
logprobs_data is not None
and self.token_likelihood_attr is not None
):
logprobs = logprobs_data["top_logprobs"]
likelihoods = [
{k: np.exp(v) for k, v in logprobs[i].items()}
for i in range(len(logprobs))
]
inference[self.token_likelihood_attr] = likelihoods
# add token usage counts to the inference if the
# ArthurModel was registered to monitor token usage
if (
isinstance(response.llm_output, dict)
and TOKEN_USAGE in response.llm_output
):
token_usage = response.llm_output[TOKEN_USAGE]
if (
PROMPT_TOKENS in token_usage
and PROMPT_TOKENS in self.attr_names
):
inference[PROMPT_TOKENS] = token_usage[PROMPT_TOKENS]
if (
COMPLETION_TOKENS in token_usage
and COMPLETION_TOKENS in self.attr_names
):
inference[COMPLETION_TOKENS] = token_usage[COMPLETION_TOKENS]
# add inference duration to the inference if the ArthurModel
# was registered to monitor inference duration
if DURATION in self.attr_names:
inference[DURATION] = time_from_start_to_end
inferences.append(inference)
# send inferences to arthur
self.arthur_model.send_inferences(inferences)
def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> None:
"""On chain start, do nothing."""
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
"""On chain end, do nothing."""
def on_llm_error(self, error: BaseException, **kwargs: Any) -> None:
"""Do nothing when LLM outputs an error."""
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
"""On new token, pass."""
def on_chain_error(self, error: BaseException, **kwargs: Any) -> None:
"""Do nothing when LLM chain outputs an error."""
def on_tool_start(
self,
serialized: Dict[str, Any],
input_str: str,
**kwargs: Any,
) -> None:
"""Do nothing when tool starts."""
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Do nothing when agent takes a specific action."""
def on_tool_end(
self,
output: Any,
observation_prefix: Optional[str] = None,
llm_prefix: Optional[str] = None,
**kwargs: Any,
) -> None:
"""Do nothing when tool ends."""
def on_tool_error(self, error: BaseException, **kwargs: Any) -> None:
"""Do nothing when tool outputs an error."""
def on_text(self, text: str, **kwargs: Any) -> None:
"""Do nothing"""
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:
"""Do nothing"""
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/callbacks/fiddler_callback.py | import time
from typing import Any, Dict, List, Optional
from uuid import UUID
from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.outputs import LLMResult
from langchain_core.utils import guard_import
from langchain_community.callbacks.utils import import_pandas
# Define constants
# LLMResult keys
TOKEN_USAGE = "token_usage"
TOTAL_TOKENS = "total_tokens"
PROMPT_TOKENS = "prompt_tokens"
COMPLETION_TOKENS = "completion_tokens"
RUN_ID = "run_id"
MODEL_NAME = "model_name"
GOOD = "good"
BAD = "bad"
NEUTRAL = "neutral"
SUCCESS = "success"
FAILURE = "failure"
# Default values
DEFAULT_MAX_TOKEN = 65536
DEFAULT_MAX_DURATION = 120000
# Fiddler specific constants
PROMPT = "prompt"
RESPONSE = "response"
CONTEXT = "context"
DURATION = "duration"
FEEDBACK = "feedback"
LLM_STATUS = "llm_status"
FEEDBACK_POSSIBLE_VALUES = [GOOD, BAD, NEUTRAL]
# Define a dataset dictionary
_dataset_dict = {
PROMPT: ["fiddler"] * 10,
RESPONSE: ["fiddler"] * 10,
CONTEXT: ["fiddler"] * 10,
FEEDBACK: ["good"] * 10,
LLM_STATUS: ["success"] * 10,
MODEL_NAME: ["fiddler"] * 10,
RUN_ID: ["123e4567-e89b-12d3-a456-426614174000"] * 10,
TOTAL_TOKENS: [0, DEFAULT_MAX_TOKEN] * 5,
PROMPT_TOKENS: [0, DEFAULT_MAX_TOKEN] * 5,
COMPLETION_TOKENS: [0, DEFAULT_MAX_TOKEN] * 5,
DURATION: [1, DEFAULT_MAX_DURATION] * 5,
}
def import_fiddler() -> Any:
"""Import the fiddler python package and raise an error if it is not installed."""
return guard_import("fiddler", pip_name="fiddler-client")
# First, define custom callback handler implementations
class FiddlerCallbackHandler(BaseCallbackHandler):
def __init__(
self,
url: str,
org: str,
project: str,
model: str,
api_key: str,
) -> None:
"""
Initialize Fiddler callback handler.
Args:
url: Fiddler URL (e.g. https://demo.fiddler.ai).
Make sure to include the protocol (http/https).
org: Fiddler organization id
project: Fiddler project name to publish events to
model: Fiddler model name to publish events to
api_key: Fiddler authentication token
"""
super().__init__()
# Initialize Fiddler client and other necessary properties
self.fdl = import_fiddler()
self.pd = import_pandas()
self.url = url
self.org = org
self.project = project
self.model = model
self.api_key = api_key
self._df = self.pd.DataFrame(_dataset_dict)
self.run_id_prompts: Dict[UUID, List[str]] = {}
self.run_id_response: Dict[UUID, List[str]] = {}
self.run_id_starttime: Dict[UUID, int] = {}
# Initialize Fiddler client here
self.fiddler_client = self.fdl.FiddlerApi(url, org_id=org, auth_token=api_key)
if self.project not in self.fiddler_client.get_project_names():
print( # noqa: T201
f"adding project {self.project}." "This only has to be done once."
)
try:
self.fiddler_client.add_project(self.project)
except Exception as e:
print( # noqa: T201
f"Error adding project {self.project}:"
"{e}. Fiddler integration will not work."
)
raise e
dataset_info = self.fdl.DatasetInfo.from_dataframe(
self._df, max_inferred_cardinality=0
)
# Set feedback column to categorical
for i in range(len(dataset_info.columns)):
if dataset_info.columns[i].name == FEEDBACK:
dataset_info.columns[i].data_type = self.fdl.DataType.CATEGORY
dataset_info.columns[i].possible_values = FEEDBACK_POSSIBLE_VALUES
elif dataset_info.columns[i].name == LLM_STATUS:
dataset_info.columns[i].data_type = self.fdl.DataType.CATEGORY
dataset_info.columns[i].possible_values = [SUCCESS, FAILURE]
if self.model not in self.fiddler_client.get_model_names(self.project):
if self.model not in self.fiddler_client.get_dataset_names(self.project):
print( # noqa: T201
f"adding dataset {self.model} to project {self.project}."
"This only has to be done once."
)
try:
self.fiddler_client.upload_dataset(
project_id=self.project,
dataset_id=self.model,
dataset={"train": self._df},
info=dataset_info,
)
except Exception as e:
print( # noqa: T201
f"Error adding dataset {self.model}: {e}."
"Fiddler integration will not work."
)
raise e
model_info = self.fdl.ModelInfo.from_dataset_info(
dataset_info=dataset_info,
dataset_id="train",
model_task=self.fdl.ModelTask.LLM,
features=[PROMPT, CONTEXT, RESPONSE],
target=FEEDBACK,
metadata_cols=[
RUN_ID,
TOTAL_TOKENS,
PROMPT_TOKENS,
COMPLETION_TOKENS,
MODEL_NAME,
DURATION,
],
custom_features=self.custom_features,
)
print( # noqa: T201
f"adding model {self.model} to project {self.project}."
"This only has to be done once."
)
try:
self.fiddler_client.add_model(
project_id=self.project,
dataset_id=self.model,
model_id=self.model,
model_info=model_info,
)
except Exception as e:
print( # noqa: T201
f"Error adding model {self.model}: {e}."
"Fiddler integration will not work."
)
raise e
@property
def custom_features(self) -> list:
"""
Define custom features for the model to automatically enrich the data with.
Here, we enable the following enrichments:
- Automatic Embedding generation for prompt and response
- Text Statistics such as:
- Automated Readability Index
- Coleman Liau Index
- Dale Chall Readability Score
- Difficult Words
- Flesch Reading Ease
- Flesch Kincaid Grade
- Gunning Fog
- Linsear Write Formula
- PII - Personal Identifiable Information
- Sentiment Analysis
"""
return [
self.fdl.Enrichment(
name="Prompt Embedding",
enrichment="embedding",
columns=[PROMPT],
),
self.fdl.TextEmbedding(
name="Prompt CF",
source_column=PROMPT,
column="Prompt Embedding",
),
self.fdl.Enrichment(
name="Response Embedding",
enrichment="embedding",
columns=[RESPONSE],
),
self.fdl.TextEmbedding(
name="Response CF",
source_column=RESPONSE,
column="Response Embedding",
),
self.fdl.Enrichment(
name="Text Statistics",
enrichment="textstat",
columns=[PROMPT, RESPONSE],
config={
"statistics": [
"automated_readability_index",
"coleman_liau_index",
"dale_chall_readability_score",
"difficult_words",
"flesch_reading_ease",
"flesch_kincaid_grade",
"gunning_fog",
"linsear_write_formula",
]
},
),
self.fdl.Enrichment(
name="PII",
enrichment="pii",
columns=[PROMPT, RESPONSE],
),
self.fdl.Enrichment(
name="Sentiment",
enrichment="sentiment",
columns=[PROMPT, RESPONSE],
),
]
def _publish_events(
self,
run_id: UUID,
prompt_responses: List[str],
duration: int,
llm_status: str,
model_name: Optional[str] = "",
token_usage_dict: Optional[Dict[str, Any]] = None,
) -> None:
"""
Publish events to fiddler
"""
prompt_count = len(self.run_id_prompts[run_id])
df = self.pd.DataFrame(
{
PROMPT: self.run_id_prompts[run_id],
RESPONSE: prompt_responses,
RUN_ID: [str(run_id)] * prompt_count,
DURATION: [duration] * prompt_count,
LLM_STATUS: [llm_status] * prompt_count,
MODEL_NAME: [model_name] * prompt_count,
}
)
if token_usage_dict:
for key, value in token_usage_dict.items():
df[key] = [value] * prompt_count if isinstance(value, int) else value
try:
if df.shape[0] > 1:
self.fiddler_client.publish_events_batch(self.project, self.model, df)
else:
df_dict = df.to_dict(orient="records")
self.fiddler_client.publish_event(
self.project, self.model, event=df_dict[0]
)
except Exception as e:
print( # noqa: T201
f"Error publishing events to fiddler: {e}. continuing..."
)
def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> Any:
run_id = kwargs[RUN_ID]
self.run_id_prompts[run_id] = prompts
self.run_id_starttime[run_id] = int(time.time() * 1000)
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
flattened_llmresult = response.flatten()
run_id = kwargs[RUN_ID]
run_duration = int(time.time() * 1000) - self.run_id_starttime[run_id]
model_name = ""
token_usage_dict = {}
if isinstance(response.llm_output, dict):
token_usage_dict = {
k: v
for k, v in response.llm_output.items()
if k in [TOTAL_TOKENS, PROMPT_TOKENS, COMPLETION_TOKENS]
}
model_name = response.llm_output.get(MODEL_NAME, "")
prompt_responses = [
llmresult.generations[0][0].text for llmresult in flattened_llmresult
]
self._publish_events(
run_id,
prompt_responses,
run_duration,
SUCCESS,
model_name,
token_usage_dict,
)
def on_llm_error(self, error: BaseException, **kwargs: Any) -> None:
run_id = kwargs[RUN_ID]
duration = int(time.time() * 1000) - self.run_id_starttime[run_id]
self._publish_events(
run_id, [""] * len(self.run_id_prompts[run_id]), duration, FAILURE
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/callbacks/context_callback.py | """Callback handler for Context AI"""
import os
from typing import Any, Dict, List
from uuid import UUID
from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.messages import BaseMessage
from langchain_core.outputs import LLMResult
from langchain_core.utils import guard_import
def import_context() -> Any:
"""Import the `getcontext` package."""
return (
guard_import("getcontext", pip_name="python-context"),
guard_import("getcontext.token", pip_name="python-context").Credential,
guard_import(
"getcontext.generated.models", pip_name="python-context"
).Conversation,
guard_import("getcontext.generated.models", pip_name="python-context").Message,
guard_import(
"getcontext.generated.models", pip_name="python-context"
).MessageRole,
guard_import("getcontext.generated.models", pip_name="python-context").Rating,
)
class ContextCallbackHandler(BaseCallbackHandler):
"""Callback Handler that records transcripts to the Context service.
(https://context.ai).
Keyword Args:
token (optional): The token with which to authenticate requests to Context.
Visit https://with.context.ai/settings to generate a token.
If not provided, the value of the `CONTEXT_TOKEN` environment
variable will be used.
Raises:
ImportError: if the `context-python` package is not installed.
Chat Example:
>>> from langchain_community.llms import ChatOpenAI
>>> from langchain_community.callbacks import ContextCallbackHandler
>>> context_callback = ContextCallbackHandler(
... token="<CONTEXT_TOKEN_HERE>",
... )
>>> chat = ChatOpenAI(
... temperature=0,
... headers={"user_id": "123"},
... callbacks=[context_callback],
... openai_api_key="API_KEY_HERE",
... )
>>> messages = [
... SystemMessage(content="You translate English to French."),
... HumanMessage(content="I love programming with LangChain."),
... ]
>>> chat.invoke(messages)
Chain Example:
>>> from langchain.chains import LLMChain
>>> from langchain_community.chat_models import ChatOpenAI
>>> from langchain_community.callbacks import ContextCallbackHandler
>>> context_callback = ContextCallbackHandler(
... token="<CONTEXT_TOKEN_HERE>",
... )
>>> human_message_prompt = HumanMessagePromptTemplate(
... prompt=PromptTemplate(
... template="What is a good name for a company that makes {product}?",
... input_variables=["product"],
... ),
... )
>>> chat_prompt_template = ChatPromptTemplate.from_messages(
... [human_message_prompt]
... )
>>> callback = ContextCallbackHandler(token)
>>> # Note: the same callback object must be shared between the
... LLM and the chain.
>>> chat = ChatOpenAI(temperature=0.9, callbacks=[callback])
>>> chain = LLMChain(
... llm=chat,
... prompt=chat_prompt_template,
... callbacks=[callback]
... )
>>> chain.run("colorful socks")
"""
def __init__(self, token: str = "", verbose: bool = False, **kwargs: Any) -> None:
(
self.context,
self.credential,
self.conversation_model,
self.message_model,
self.message_role_model,
self.rating_model,
) = import_context()
token = token or os.environ.get("CONTEXT_TOKEN") or ""
self.client = self.context.ContextAPI(credential=self.credential(token))
self.chain_run_id = None
self.llm_model = None
self.messages: List[Any] = []
self.metadata: Dict[str, str] = {}
def on_chat_model_start(
self,
serialized: Dict[str, Any],
messages: List[List[BaseMessage]],
*,
run_id: UUID,
**kwargs: Any,
) -> Any:
"""Run when the chat model is started."""
llm_model = kwargs.get("invocation_params", {}).get("model", None)
if llm_model is not None:
self.metadata["model"] = llm_model
if len(messages) == 0:
return
for message in messages[0]:
role = self.message_role_model.SYSTEM
if message.type == "human":
role = self.message_role_model.USER
elif message.type == "system":
role = self.message_role_model.SYSTEM
elif message.type == "ai":
role = self.message_role_model.ASSISTANT
self.messages.append(
self.message_model(
message=message.content,
role=role,
)
)
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Run when LLM ends."""
if len(response.generations) == 0 or len(response.generations[0]) == 0:
return
if not self.chain_run_id:
generation = response.generations[0][0]
self.messages.append(
self.message_model(
message=generation.text,
role=self.message_role_model.ASSISTANT,
)
)
self._log_conversation()
def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> None:
"""Run when chain starts."""
self.chain_run_id = kwargs.get("run_id", None)
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
"""Run when chain ends."""
self.messages.append(
self.message_model(
message=outputs["text"],
role=self.message_role_model.ASSISTANT,
)
)
self._log_conversation()
self.chain_run_id = None
def _log_conversation(self) -> None:
"""Log the conversation to the context API."""
if len(self.messages) == 0:
return
self.client.log.conversation_upsert(
body={
"conversation": self.conversation_model(
messages=self.messages,
metadata=self.metadata,
)
}
)
self.messages = []
self.metadata = {}
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/callbacks/argilla_callback.py | import os
import warnings
from typing import Any, Dict, List, Optional, cast
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.outputs import LLMResult
from packaging.version import parse
class ArgillaCallbackHandler(BaseCallbackHandler):
"""Callback Handler that logs into Argilla.
Args:
dataset_name: name of the `FeedbackDataset` in Argilla. Note that it must
exist in advance. If you need help on how to create a `FeedbackDataset` in
Argilla, please visit
https://docs.argilla.io/en/latest/tutorials_and_integrations/integrations/use_argilla_callback_in_langchain.html.
workspace_name: name of the workspace in Argilla where the specified
`FeedbackDataset` lives in. Defaults to `None`, which means that the
default workspace will be used.
api_url: URL of the Argilla Server that we want to use, and where the
`FeedbackDataset` lives in. Defaults to `None`, which means that either
`ARGILLA_API_URL` environment variable or the default will be used.
api_key: API Key to connect to the Argilla Server. Defaults to `None`, which
means that either `ARGILLA_API_KEY` environment variable or the default
will be used.
Raises:
ImportError: if the `argilla` package is not installed.
ConnectionError: if the connection to Argilla fails.
FileNotFoundError: if the `FeedbackDataset` retrieval from Argilla fails.
Examples:
>>> from langchain_community.llms import OpenAI
>>> from langchain_community.callbacks import ArgillaCallbackHandler
>>> argilla_callback = ArgillaCallbackHandler(
... dataset_name="my-dataset",
... workspace_name="my-workspace",
... api_url="http://localhost:6900",
... api_key="argilla.apikey",
... )
>>> llm = OpenAI(
... temperature=0,
... callbacks=[argilla_callback],
... verbose=True,
... openai_api_key="API_KEY_HERE",
... )
>>> llm.generate([
... "What is the best NLP-annotation tool out there? (no bias at all)",
... ])
"Argilla, no doubt about it."
"""
REPO_URL: str = "https://github.com/argilla-io/argilla"
ISSUES_URL: str = f"{REPO_URL}/issues"
BLOG_URL: str = "https://docs.argilla.io/en/latest/tutorials_and_integrations/integrations/use_argilla_callback_in_langchain.html"
DEFAULT_API_URL: str = "http://localhost:6900"
def __init__(
self,
dataset_name: str,
workspace_name: Optional[str] = None,
api_url: Optional[str] = None,
api_key: Optional[str] = None,
) -> None:
"""Initializes the `ArgillaCallbackHandler`.
Args:
dataset_name: name of the `FeedbackDataset` in Argilla. Note that it must
exist in advance. If you need help on how to create a `FeedbackDataset`
in Argilla, please visit
https://docs.argilla.io/en/latest/tutorials_and_integrations/integrations/use_argilla_callback_in_langchain.html.
workspace_name: name of the workspace in Argilla where the specified
`FeedbackDataset` lives in. Defaults to `None`, which means that the
default workspace will be used.
api_url: URL of the Argilla Server that we want to use, and where the
`FeedbackDataset` lives in. Defaults to `None`, which means that either
`ARGILLA_API_URL` environment variable or the default will be used.
api_key: API Key to connect to the Argilla Server. Defaults to `None`, which
means that either `ARGILLA_API_KEY` environment variable or the default
will be used.
Raises:
ImportError: if the `argilla` package is not installed.
ConnectionError: if the connection to Argilla fails.
FileNotFoundError: if the `FeedbackDataset` retrieval from Argilla fails.
"""
super().__init__()
# Import Argilla (not via `import_argilla` to keep hints in IDEs)
try:
import argilla as rg
self.ARGILLA_VERSION = rg.__version__
except ImportError:
raise ImportError(
"To use the Argilla callback manager you need to have the `argilla` "
"Python package installed. Please install it with `pip install argilla`"
)
# Check whether the Argilla version is compatible
if parse(self.ARGILLA_VERSION) < parse("1.8.0"):
raise ImportError(
f"The installed `argilla` version is {self.ARGILLA_VERSION} but "
"`ArgillaCallbackHandler` requires at least version 1.8.0. Please "
"upgrade `argilla` with `pip install --upgrade argilla`."
)
# Show a warning message if Argilla will assume the default values will be used
if api_url is None and os.getenv("ARGILLA_API_URL") is None:
warnings.warn(
(
"Since `api_url` is None, and the env var `ARGILLA_API_URL` is not"
f" set, it will default to `{self.DEFAULT_API_URL}`, which is the"
" default API URL in Argilla Quickstart."
),
)
api_url = self.DEFAULT_API_URL
if api_key is None and os.getenv("ARGILLA_API_KEY") is None:
self.DEFAULT_API_KEY = (
"admin.apikey"
if parse(self.ARGILLA_VERSION) < parse("1.11.0")
else "owner.apikey"
)
warnings.warn(
(
"Since `api_key` is None, and the env var `ARGILLA_API_KEY` is not"
f" set, it will default to `{self.DEFAULT_API_KEY}`, which is the"
" default API key in Argilla Quickstart."
),
)
api_key = self.DEFAULT_API_KEY
# Connect to Argilla with the provided credentials, if applicable
try:
rg.init(api_key=api_key, api_url=api_url)
except Exception as e:
raise ConnectionError(
f"Could not connect to Argilla with exception: '{e}'.\n"
"Please check your `api_key` and `api_url`, and make sure that "
"the Argilla server is up and running. If the problem persists "
f"please report it to {self.ISSUES_URL} as an `integration` issue."
) from e
# Set the Argilla variables
self.dataset_name = dataset_name
self.workspace_name = workspace_name or rg.get_workspace()
# Retrieve the `FeedbackDataset` from Argilla (without existing records)
try:
extra_args = {}
if parse(self.ARGILLA_VERSION) < parse("1.14.0"):
warnings.warn(
f"You have Argilla {self.ARGILLA_VERSION}, but Argilla 1.14.0 or"
" higher is recommended.",
UserWarning,
)
extra_args = {"with_records": False}
self.dataset = rg.FeedbackDataset.from_argilla(
name=self.dataset_name,
workspace=self.workspace_name,
**extra_args,
)
except Exception as e:
raise FileNotFoundError(
f"`FeedbackDataset` retrieval from Argilla failed with exception `{e}`."
f"\nPlease check that the dataset with name={self.dataset_name} in the"
f" workspace={self.workspace_name} exists in advance. If you need help"
" on how to create a `langchain`-compatible `FeedbackDataset` in"
f" Argilla, please visit {self.BLOG_URL}. If the problem persists"
f" please report it to {self.ISSUES_URL} as an `integration` issue."
) from e
supported_fields = ["prompt", "response"]
if supported_fields != [field.name for field in self.dataset.fields]:
raise ValueError(
f"`FeedbackDataset` with name={self.dataset_name} in the workspace="
f"{self.workspace_name} had fields that are not supported yet for the"
f"`langchain` integration. Supported fields are: {supported_fields},"
f" and the current `FeedbackDataset` fields are {[field.name for field in self.dataset.fields]}." # noqa: E501
" For more information on how to create a `langchain`-compatible"
f" `FeedbackDataset` in Argilla, please visit {self.BLOG_URL}."
)
self.prompts: Dict[str, List[str]] = {}
warnings.warn(
(
"The `ArgillaCallbackHandler` is currently in beta and is subject to"
" change based on updates to `langchain`. Please report any issues to"
f" {self.ISSUES_URL} as an `integration` issue."
),
)
def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> None:
"""Save the prompts in memory when an LLM starts."""
self.prompts.update({str(kwargs["parent_run_id"] or kwargs["run_id"]): prompts})
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
"""Do nothing when a new token is generated."""
pass
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Log records to Argilla when an LLM ends."""
# Do nothing if there's a parent_run_id, since we will log the records when
# the chain ends
if kwargs["parent_run_id"]:
return
# Creates the records and adds them to the `FeedbackDataset`
prompts = self.prompts[str(kwargs["run_id"])]
for prompt, generations in zip(prompts, response.generations):
self.dataset.add_records(
records=[
{
"fields": {
"prompt": prompt,
"response": generation.text.strip(),
},
}
for generation in generations
]
)
# Pop current run from `self.runs`
self.prompts.pop(str(kwargs["run_id"]))
if parse(self.ARGILLA_VERSION) < parse("1.14.0"):
# Push the records to Argilla
self.dataset.push_to_argilla()
def on_llm_error(self, error: BaseException, **kwargs: Any) -> None:
"""Do nothing when LLM outputs an error."""
pass
def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> None:
"""If the key `input` is in `inputs`, then save it in `self.prompts` using
either the `parent_run_id` or the `run_id` as the key. This is done so that
we don't log the same input prompt twice, once when the LLM starts and once
when the chain starts.
"""
if "input" in inputs:
self.prompts.update(
{
str(kwargs["parent_run_id"] or kwargs["run_id"]): (
inputs["input"]
if isinstance(inputs["input"], list)
else [inputs["input"]]
)
}
)
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
"""If either the `parent_run_id` or the `run_id` is in `self.prompts`, then
log the outputs to Argilla, and pop the run from `self.prompts`. The behavior
differs if the output is a list or not.
"""
if not any(
key in self.prompts
for key in [str(kwargs["parent_run_id"]), str(kwargs["run_id"])]
):
return
prompts: List = self.prompts.get(str(kwargs["parent_run_id"])) or cast(
List, self.prompts.get(str(kwargs["run_id"]), [])
)
for chain_output_key, chain_output_val in outputs.items():
if isinstance(chain_output_val, list):
# Creates the records and adds them to the `FeedbackDataset`
self.dataset.add_records(
records=[
{
"fields": {
"prompt": prompt,
"response": output["text"].strip(),
},
}
for prompt, output in zip(prompts, chain_output_val)
]
)
else:
# Creates the records and adds them to the `FeedbackDataset`
self.dataset.add_records(
records=[
{
"fields": {
"prompt": " ".join(prompts),
"response": chain_output_val.strip(),
},
}
]
)
# Pop current run from `self.runs`
if str(kwargs["parent_run_id"]) in self.prompts:
self.prompts.pop(str(kwargs["parent_run_id"]))
if str(kwargs["run_id"]) in self.prompts:
self.prompts.pop(str(kwargs["run_id"]))
if parse(self.ARGILLA_VERSION) < parse("1.14.0"):
# Push the records to Argilla
self.dataset.push_to_argilla()
def on_chain_error(self, error: BaseException, **kwargs: Any) -> None:
"""Do nothing when LLM chain outputs an error."""
pass
def on_tool_start(
self,
serialized: Dict[str, Any],
input_str: str,
**kwargs: Any,
) -> None:
"""Do nothing when tool starts."""
pass
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Do nothing when agent takes a specific action."""
pass
def on_tool_end(
self,
output: Any,
observation_prefix: Optional[str] = None,
llm_prefix: Optional[str] = None,
**kwargs: Any,
) -> None:
"""Do nothing when tool ends."""
pass
def on_tool_error(self, error: BaseException, **kwargs: Any) -> None:
"""Do nothing when tool outputs an error."""
pass
def on_text(self, text: str, **kwargs: Any) -> None:
"""Do nothing"""
pass
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:
"""Do nothing"""
pass
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/callbacks/clearml_callback.py | from __future__ import annotations
import tempfile
from copy import deepcopy
from pathlib import Path
from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional, Sequence
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.outputs import LLMResult
from langchain_core.utils import guard_import
from langchain_community.callbacks.utils import (
BaseMetadataCallbackHandler,
flatten_dict,
hash_string,
import_pandas,
import_spacy,
import_textstat,
load_json,
)
if TYPE_CHECKING:
import pandas as pd
def import_clearml() -> Any:
"""Import the clearml python package and raise an error if it is not installed."""
return guard_import("clearml")
class ClearMLCallbackHandler(BaseMetadataCallbackHandler, BaseCallbackHandler):
"""Callback Handler that logs to ClearML.
Parameters:
job_type (str): The type of clearml task such as "inference", "testing" or "qc"
project_name (str): The clearml project name
tags (list): Tags to add to the task
task_name (str): Name of the clearml task
visualize (bool): Whether to visualize the run.
complexity_metrics (bool): Whether to log complexity metrics
stream_logs (bool): Whether to stream callback actions to ClearML
This handler will utilize the associated callback method and formats
the input of each callback function with metadata regarding the state of LLM run,
and adds the response to the list of records for both the {method}_records and
action. It then logs the response to the ClearML console.
"""
def __init__(
self,
task_type: Optional[str] = "inference",
project_name: Optional[str] = "langchain_callback_demo",
tags: Optional[Sequence] = None,
task_name: Optional[str] = None,
visualize: bool = False,
complexity_metrics: bool = False,
stream_logs: bool = False,
) -> None:
"""Initialize callback handler."""
clearml = import_clearml()
spacy = import_spacy()
super().__init__()
self.task_type = task_type
self.project_name = project_name
self.tags = tags
self.task_name = task_name
self.visualize = visualize
self.complexity_metrics = complexity_metrics
self.stream_logs = stream_logs
self.temp_dir = tempfile.TemporaryDirectory()
# Check if ClearML task already exists (e.g. in pipeline)
if clearml.Task.current_task():
self.task = clearml.Task.current_task()
else:
self.task = clearml.Task.init(
task_type=self.task_type,
project_name=self.project_name,
tags=self.tags,
task_name=self.task_name,
output_uri=True,
)
self.logger = self.task.get_logger()
warning = (
"The clearml callback is currently in beta and is subject to change "
"based on updates to `langchain`. Please report any issues to "
"https://github.com/allegroai/clearml/issues with the tag `langchain`."
)
self.logger.report_text(warning, level=30, print_console=True)
self.callback_columns: list = []
self.action_records: list = []
self.complexity_metrics = complexity_metrics
self.visualize = visualize
self.nlp = spacy.load("en_core_web_sm")
def _init_resp(self) -> Dict:
return {k: None for k in self.callback_columns}
def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> None:
"""Run when LLM starts."""
self.step += 1
self.llm_starts += 1
self.starts += 1
resp = self._init_resp()
resp.update({"action": "on_llm_start"})
resp.update(flatten_dict(serialized))
resp.update(self.get_custom_callback_meta())
for prompt in prompts:
prompt_resp = deepcopy(resp)
prompt_resp["prompts"] = prompt
self.on_llm_start_records.append(prompt_resp)
self.action_records.append(prompt_resp)
if self.stream_logs:
self.logger.report_text(prompt_resp)
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
"""Run when LLM generates a new token."""
self.step += 1
self.llm_streams += 1
resp = self._init_resp()
resp.update({"action": "on_llm_new_token", "token": token})
resp.update(self.get_custom_callback_meta())
self.on_llm_token_records.append(resp)
self.action_records.append(resp)
if self.stream_logs:
self.logger.report_text(resp)
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Run when LLM ends running."""
self.step += 1
self.llm_ends += 1
self.ends += 1
resp = self._init_resp()
resp.update({"action": "on_llm_end"})
resp.update(flatten_dict(response.llm_output or {}))
resp.update(self.get_custom_callback_meta())
for generations in response.generations:
for generation in generations:
generation_resp = deepcopy(resp)
generation_resp.update(flatten_dict(generation.dict()))
generation_resp.update(self.analyze_text(generation.text))
self.on_llm_end_records.append(generation_resp)
self.action_records.append(generation_resp)
if self.stream_logs:
self.logger.report_text(generation_resp)
def on_llm_error(self, error: BaseException, **kwargs: Any) -> None:
"""Run when LLM errors."""
self.step += 1
self.errors += 1
def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> None:
"""Run when chain starts running."""
self.step += 1
self.chain_starts += 1
self.starts += 1
resp = self._init_resp()
resp.update({"action": "on_chain_start"})
resp.update(flatten_dict(serialized))
resp.update(self.get_custom_callback_meta())
chain_input = inputs.get("input", inputs.get("human_input"))
if isinstance(chain_input, str):
input_resp = deepcopy(resp)
input_resp["input"] = chain_input
self.on_chain_start_records.append(input_resp)
self.action_records.append(input_resp)
if self.stream_logs:
self.logger.report_text(input_resp)
elif isinstance(chain_input, list):
for inp in chain_input:
input_resp = deepcopy(resp)
input_resp.update(inp)
self.on_chain_start_records.append(input_resp)
self.action_records.append(input_resp)
if self.stream_logs:
self.logger.report_text(input_resp)
else:
raise ValueError("Unexpected data format provided!")
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
"""Run when chain ends running."""
self.step += 1
self.chain_ends += 1
self.ends += 1
resp = self._init_resp()
resp.update(
{
"action": "on_chain_end",
"outputs": outputs.get("output", outputs.get("text")),
}
)
resp.update(self.get_custom_callback_meta())
self.on_chain_end_records.append(resp)
self.action_records.append(resp)
if self.stream_logs:
self.logger.report_text(resp)
def on_chain_error(self, error: BaseException, **kwargs: Any) -> None:
"""Run when chain errors."""
self.step += 1
self.errors += 1
def on_tool_start(
self, serialized: Dict[str, Any], input_str: str, **kwargs: Any
) -> None:
"""Run when tool starts running."""
self.step += 1
self.tool_starts += 1
self.starts += 1
resp = self._init_resp()
resp.update({"action": "on_tool_start", "input_str": input_str})
resp.update(flatten_dict(serialized))
resp.update(self.get_custom_callback_meta())
self.on_tool_start_records.append(resp)
self.action_records.append(resp)
if self.stream_logs:
self.logger.report_text(resp)
def on_tool_end(self, output: Any, **kwargs: Any) -> None:
"""Run when tool ends running."""
output = str(output)
self.step += 1
self.tool_ends += 1
self.ends += 1
resp = self._init_resp()
resp.update({"action": "on_tool_end", "output": output})
resp.update(self.get_custom_callback_meta())
self.on_tool_end_records.append(resp)
self.action_records.append(resp)
if self.stream_logs:
self.logger.report_text(resp)
def on_tool_error(self, error: BaseException, **kwargs: Any) -> None:
"""Run when tool errors."""
self.step += 1
self.errors += 1
def on_text(self, text: str, **kwargs: Any) -> None:
"""
Run when agent is ending.
"""
self.step += 1
self.text_ctr += 1
resp = self._init_resp()
resp.update({"action": "on_text", "text": text})
resp.update(self.get_custom_callback_meta())
self.on_text_records.append(resp)
self.action_records.append(resp)
if self.stream_logs:
self.logger.report_text(resp)
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:
"""Run when agent ends running."""
self.step += 1
self.agent_ends += 1
self.ends += 1
resp = self._init_resp()
resp.update(
{
"action": "on_agent_finish",
"output": finish.return_values["output"],
"log": finish.log,
}
)
resp.update(self.get_custom_callback_meta())
self.on_agent_finish_records.append(resp)
self.action_records.append(resp)
if self.stream_logs:
self.logger.report_text(resp)
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Run on agent action."""
self.step += 1
self.tool_starts += 1
self.starts += 1
resp = self._init_resp()
resp.update(
{
"action": "on_agent_action",
"tool": action.tool,
"tool_input": action.tool_input,
"log": action.log,
}
)
resp.update(self.get_custom_callback_meta())
self.on_agent_action_records.append(resp)
self.action_records.append(resp)
if self.stream_logs:
self.logger.report_text(resp)
def analyze_text(self, text: str) -> dict:
"""Analyze text using textstat and spacy.
Parameters:
text (str): The text to analyze.
Returns:
(dict): A dictionary containing the complexity metrics.
"""
resp = {}
textstat = import_textstat()
spacy = import_spacy()
if self.complexity_metrics:
text_complexity_metrics = {
"flesch_reading_ease": textstat.flesch_reading_ease(text),
"flesch_kincaid_grade": textstat.flesch_kincaid_grade(text),
"smog_index": textstat.smog_index(text),
"coleman_liau_index": textstat.coleman_liau_index(text),
"automated_readability_index": textstat.automated_readability_index(
text
),
"dale_chall_readability_score": textstat.dale_chall_readability_score(
text
),
"difficult_words": textstat.difficult_words(text),
"linsear_write_formula": textstat.linsear_write_formula(text),
"gunning_fog": textstat.gunning_fog(text),
"text_standard": textstat.text_standard(text),
"fernandez_huerta": textstat.fernandez_huerta(text),
"szigriszt_pazos": textstat.szigriszt_pazos(text),
"gutierrez_polini": textstat.gutierrez_polini(text),
"crawford": textstat.crawford(text),
"gulpease_index": textstat.gulpease_index(text),
"osman": textstat.osman(text),
}
resp.update(text_complexity_metrics)
if self.visualize and self.nlp and self.temp_dir.name is not None:
doc = self.nlp(text)
dep_out = spacy.displacy.render(doc, style="dep", jupyter=False, page=True)
dep_output_path = Path(
self.temp_dir.name, hash_string(f"dep-{text}") + ".html"
)
dep_output_path.open("w", encoding="utf-8").write(dep_out)
ent_out = spacy.displacy.render(doc, style="ent", jupyter=False, page=True)
ent_output_path = Path(
self.temp_dir.name, hash_string(f"ent-{text}") + ".html"
)
ent_output_path.open("w", encoding="utf-8").write(ent_out)
self.logger.report_media(
"Dependencies Plot", text, local_path=dep_output_path
)
self.logger.report_media("Entities Plot", text, local_path=ent_output_path)
return resp
@staticmethod
def _build_llm_df(
base_df: pd.DataFrame, base_df_fields: Sequence, rename_map: Mapping
) -> pd.DataFrame:
base_df_fields = [field for field in base_df_fields if field in base_df]
rename_map = {
map_entry_k: map_entry_v
for map_entry_k, map_entry_v in rename_map.items()
if map_entry_k in base_df_fields
}
llm_df = base_df[base_df_fields].dropna(axis=1)
if rename_map:
llm_df = llm_df.rename(rename_map, axis=1)
return llm_df
def _create_session_analysis_df(self) -> Any:
"""Create a dataframe with all the information from the session."""
pd = import_pandas()
on_llm_end_records_df = pd.DataFrame(self.on_llm_end_records)
llm_input_prompts_df = ClearMLCallbackHandler._build_llm_df(
base_df=on_llm_end_records_df,
base_df_fields=["step", "prompts"]
+ (["name"] if "name" in on_llm_end_records_df else ["id"]),
rename_map={"step": "prompt_step"},
)
complexity_metrics_columns = []
visualizations_columns: List = []
if self.complexity_metrics:
complexity_metrics_columns = [
"flesch_reading_ease",
"flesch_kincaid_grade",
"smog_index",
"coleman_liau_index",
"automated_readability_index",
"dale_chall_readability_score",
"difficult_words",
"linsear_write_formula",
"gunning_fog",
"text_standard",
"fernandez_huerta",
"szigriszt_pazos",
"gutierrez_polini",
"crawford",
"gulpease_index",
"osman",
]
llm_outputs_df = ClearMLCallbackHandler._build_llm_df(
on_llm_end_records_df,
[
"step",
"text",
"token_usage_total_tokens",
"token_usage_prompt_tokens",
"token_usage_completion_tokens",
]
+ complexity_metrics_columns
+ visualizations_columns,
{"step": "output_step", "text": "output"},
)
session_analysis_df = pd.concat([llm_input_prompts_df, llm_outputs_df], axis=1)
return session_analysis_df
def flush_tracker(
self,
name: Optional[str] = None,
langchain_asset: Any = None,
finish: bool = False,
) -> None:
"""Flush the tracker and setup the session.
Everything after this will be a new table.
Args:
name: Name of the performed session so far so it is identifiable
langchain_asset: The langchain asset to save.
finish: Whether to finish the run.
Returns:
None
"""
pd = import_pandas()
clearml = import_clearml()
# Log the action records
self.logger.report_table(
"Action Records", name, table_plot=pd.DataFrame(self.action_records)
)
# Session analysis
session_analysis_df = self._create_session_analysis_df()
self.logger.report_table(
"Session Analysis", name, table_plot=session_analysis_df
)
if self.stream_logs:
self.logger.report_text(
{
"action_records": pd.DataFrame(self.action_records),
"session_analysis": session_analysis_df,
}
)
if langchain_asset:
langchain_asset_path = Path(self.temp_dir.name, "model.json")
try:
langchain_asset.save(langchain_asset_path)
# Create output model and connect it to the task
output_model = clearml.OutputModel(
task=self.task, config_text=load_json(langchain_asset_path)
)
output_model.update_weights(
weights_filename=str(langchain_asset_path),
auto_delete_file=False,
target_filename=name,
)
except ValueError:
langchain_asset.save_agent(langchain_asset_path)
output_model = clearml.OutputModel(
task=self.task, config_text=load_json(langchain_asset_path)
)
output_model.update_weights(
weights_filename=str(langchain_asset_path),
auto_delete_file=False,
target_filename=name,
)
except NotImplementedError as e:
print("Could not save model.") # noqa: T201
print(repr(e)) # noqa: T201
pass
# Cleanup after adding everything to ClearML
self.task.flush(wait_for_uploads=True)
self.temp_dir.cleanup()
self.temp_dir = tempfile.TemporaryDirectory()
self.reset_callback_meta()
if finish:
self.task.close()
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/callbacks/llmonitor_callback.py | import importlib.metadata
import logging
import os
import traceback
import warnings
from contextvars import ContextVar
from typing import Any, Dict, List, Union, cast
from uuid import UUID
import requests
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.messages import BaseMessage
from langchain_core.outputs import LLMResult
from packaging.version import parse
logger = logging.getLogger(__name__)
DEFAULT_API_URL = "https://app.llmonitor.com"
user_ctx = ContextVar[Union[str, None]]("user_ctx", default=None)
user_props_ctx = ContextVar[Union[str, None]]("user_props_ctx", default=None)
PARAMS_TO_CAPTURE = [
"temperature",
"top_p",
"top_k",
"stop",
"presence_penalty",
"frequence_penalty",
"seed",
"function_call",
"functions",
"tools",
"tool_choice",
"response_format",
"max_tokens",
"logit_bias",
]
class UserContextManager:
"""Context manager for LLMonitor user context."""
def __init__(self, user_id: str, user_props: Any = None) -> None:
user_ctx.set(user_id)
user_props_ctx.set(user_props)
def __enter__(self) -> Any:
pass
def __exit__(self, exc_type: Any, exc_value: Any, exc_tb: Any) -> Any:
user_ctx.set(None)
user_props_ctx.set(None)
def identify(user_id: str, user_props: Any = None) -> UserContextManager:
"""Builds an LLMonitor UserContextManager
Parameters:
- `user_id`: The user id.
- `user_props`: The user properties.
Returns:
A context manager that sets the user context.
"""
return UserContextManager(user_id, user_props)
def _serialize(obj: Any) -> Union[Dict[str, Any], List[Any], Any]:
if hasattr(obj, "to_json"):
return obj.to_json()
if isinstance(obj, dict):
return {key: _serialize(value) for key, value in obj.items()}
if isinstance(obj, list):
return [_serialize(element) for element in obj]
return obj
def _parse_input(raw_input: Any) -> Any:
if not raw_input:
return None
# if it's an array of 1, just parse the first element
if isinstance(raw_input, list) and len(raw_input) == 1:
return _parse_input(raw_input[0])
if not isinstance(raw_input, dict):
return _serialize(raw_input)
input_value = raw_input.get("input")
inputs_value = raw_input.get("inputs")
question_value = raw_input.get("question")
query_value = raw_input.get("query")
if input_value:
return input_value
if inputs_value:
return inputs_value
if question_value:
return question_value
if query_value:
return query_value
return _serialize(raw_input)
def _parse_output(raw_output: dict) -> Any:
if not raw_output:
return None
if not isinstance(raw_output, dict):
return _serialize(raw_output)
text_value = raw_output.get("text")
output_value = raw_output.get("output")
output_text_value = raw_output.get("output_text")
answer_value = raw_output.get("answer")
result_value = raw_output.get("result")
if text_value:
return text_value
if answer_value:
return answer_value
if output_value:
return output_value
if output_text_value:
return output_text_value
if result_value:
return result_value
return _serialize(raw_output)
def _parse_lc_role(
role: str,
) -> str:
if role == "human":
return "user"
else:
return role
def _get_user_id(metadata: Any) -> Any:
if user_ctx.get() is not None:
return user_ctx.get()
metadata = metadata or {}
user_id = metadata.get("user_id")
if user_id is None:
user_id = metadata.get("userId") # legacy, to delete in the future
return user_id
def _get_user_props(metadata: Any) -> Any:
if user_props_ctx.get() is not None:
return user_props_ctx.get()
metadata = metadata or {}
return metadata.get("user_props", None)
def _parse_lc_message(message: BaseMessage) -> Dict[str, Any]:
keys = ["function_call", "tool_calls", "tool_call_id", "name"]
parsed = {"text": message.content, "role": _parse_lc_role(message.type)}
parsed.update(
{
key: cast(Any, message.additional_kwargs.get(key))
for key in keys
if message.additional_kwargs.get(key) is not None
}
)
return parsed
def _parse_lc_messages(messages: Union[List[BaseMessage], Any]) -> List[Dict[str, Any]]:
return [_parse_lc_message(message) for message in messages]
class LLMonitorCallbackHandler(BaseCallbackHandler):
"""Callback Handler for LLMonitor`.
#### Parameters:
- `app_id`: The app id of the app you want to report to. Defaults to
`None`, which means that `LLMONITOR_APP_ID` will be used.
- `api_url`: The url of the LLMonitor API. Defaults to `None`,
which means that either `LLMONITOR_API_URL` environment variable
or `https://app.llmonitor.com` will be used.
#### Raises:
- `ValueError`: if `app_id` is not provided either as an
argument or as an environment variable.
- `ConnectionError`: if the connection to the API fails.
#### Example:
```python
from langchain_community.llms import OpenAI
from langchain_community.callbacks import LLMonitorCallbackHandler
llmonitor_callback = LLMonitorCallbackHandler()
llm = OpenAI(callbacks=[llmonitor_callback],
metadata={"userId": "user-123"})
llm.invoke("Hello, how are you?")
```
"""
__api_url: str
__app_id: str
__verbose: bool
__llmonitor_version: str
__has_valid_config: bool
def __init__(
self,
app_id: Union[str, None] = None,
api_url: Union[str, None] = None,
verbose: bool = False,
) -> None:
super().__init__()
self.__has_valid_config = True
try:
import llmonitor
self.__llmonitor_version = importlib.metadata.version("llmonitor")
self.__track_event = llmonitor.track_event
except ImportError:
logger.warning(
"""[LLMonitor] To use the LLMonitor callback handler you need to
have the `llmonitor` Python package installed. Please install it
with `pip install llmonitor`"""
)
self.__has_valid_config = False
return
if parse(self.__llmonitor_version) < parse("0.0.32"):
logger.warning(
f"""[LLMonitor] The installed `llmonitor` version is
{self.__llmonitor_version}
but `LLMonitorCallbackHandler` requires at least version 0.0.32
upgrade `llmonitor` with `pip install --upgrade llmonitor`"""
)
self.__has_valid_config = False
self.__has_valid_config = True
self.__api_url = api_url or os.getenv("LLMONITOR_API_URL") or DEFAULT_API_URL
self.__verbose = verbose or bool(os.getenv("LLMONITOR_VERBOSE"))
_app_id = app_id or os.getenv("LLMONITOR_APP_ID")
if _app_id is None:
logger.warning(
"""[LLMonitor] app_id must be provided either as an argument or
as an environment variable"""
)
self.__has_valid_config = False
else:
self.__app_id = _app_id
if self.__has_valid_config is False:
return None
try:
res = requests.get(f"{self.__api_url}/api/app/{self.__app_id}")
if not res.ok:
raise ConnectionError()
except Exception:
logger.warning(
f"""[LLMonitor] Could not connect to the LLMonitor API at
{self.__api_url}"""
)
def on_llm_start(
self,
serialized: Dict[str, Any],
prompts: List[str],
*,
run_id: UUID,
parent_run_id: Union[UUID, None] = None,
tags: Union[List[str], None] = None,
metadata: Union[Dict[str, Any], None] = None,
**kwargs: Any,
) -> None:
if self.__has_valid_config is False:
return
try:
user_id = _get_user_id(metadata)
user_props = _get_user_props(metadata)
params = kwargs.get("invocation_params", {})
params.update(
serialized.get("kwargs", {})
) # Sometimes, for example with ChatAnthropic, `invocation_params` is empty
name = (
params.get("model")
or params.get("model_name")
or params.get("model_id")
)
if not name and "anthropic" in params.get("_type"):
name = "claude-2"
extra = {
param: params.get(param)
for param in PARAMS_TO_CAPTURE
if params.get(param) is not None
}
input = _parse_input(prompts)
self.__track_event(
"llm",
"start",
user_id=user_id,
run_id=str(run_id),
parent_run_id=str(parent_run_id) if parent_run_id else None,
name=name,
input=input,
tags=tags,
extra=extra,
metadata=metadata,
user_props=user_props,
app_id=self.__app_id,
)
except Exception as e:
warnings.warn(f"[LLMonitor] An error occurred in on_llm_start: {e}")
def on_chat_model_start(
self,
serialized: Dict[str, Any],
messages: List[List[BaseMessage]],
*,
run_id: UUID,
parent_run_id: Union[UUID, None] = None,
tags: Union[List[str], None] = None,
metadata: Union[Dict[str, Any], None] = None,
**kwargs: Any,
) -> Any:
if self.__has_valid_config is False:
return
try:
user_id = _get_user_id(metadata)
user_props = _get_user_props(metadata)
params = kwargs.get("invocation_params", {})
params.update(
serialized.get("kwargs", {})
) # Sometimes, for example with ChatAnthropic, `invocation_params` is empty
name = (
params.get("model")
or params.get("model_name")
or params.get("model_id")
)
if not name and "anthropic" in params.get("_type"):
name = "claude-2"
extra = {
param: params.get(param)
for param in PARAMS_TO_CAPTURE
if params.get(param) is not None
}
input = _parse_lc_messages(messages[0])
self.__track_event(
"llm",
"start",
user_id=user_id,
run_id=str(run_id),
parent_run_id=str(parent_run_id) if parent_run_id else None,
name=name,
input=input,
tags=tags,
extra=extra,
metadata=metadata,
user_props=user_props,
app_id=self.__app_id,
)
except Exception as e:
logger.error(f"[LLMonitor] An error occurred in on_chat_model_start: {e}")
def on_llm_end(
self,
response: LLMResult,
*,
run_id: UUID,
parent_run_id: Union[UUID, None] = None,
**kwargs: Any,
) -> None:
if self.__has_valid_config is False:
return
try:
token_usage = (response.llm_output or {}).get("token_usage", {})
parsed_output: Any = [
_parse_lc_message(generation.message)
if hasattr(generation, "message")
else generation.text
for generation in response.generations[0]
]
# if it's an array of 1, just parse the first element
if len(parsed_output) == 1:
parsed_output = parsed_output[0]
self.__track_event(
"llm",
"end",
run_id=str(run_id),
parent_run_id=str(parent_run_id) if parent_run_id else None,
output=parsed_output,
token_usage={
"prompt": token_usage.get("prompt_tokens"),
"completion": token_usage.get("completion_tokens"),
},
app_id=self.__app_id,
)
except Exception as e:
logger.error(f"[LLMonitor] An error occurred in on_llm_end: {e}")
def on_tool_start(
self,
serialized: Dict[str, Any],
input_str: str,
*,
run_id: UUID,
parent_run_id: Union[UUID, None] = None,
tags: Union[List[str], None] = None,
metadata: Union[Dict[str, Any], None] = None,
**kwargs: Any,
) -> None:
if self.__has_valid_config is False:
return
try:
user_id = _get_user_id(metadata)
user_props = _get_user_props(metadata)
name = serialized.get("name")
self.__track_event(
"tool",
"start",
user_id=user_id,
run_id=str(run_id),
parent_run_id=str(parent_run_id) if parent_run_id else None,
name=name,
input=input_str,
tags=tags,
metadata=metadata,
user_props=user_props,
app_id=self.__app_id,
)
except Exception as e:
logger.error(f"[LLMonitor] An error occurred in on_tool_start: {e}")
def on_tool_end(
self,
output: Any,
*,
run_id: UUID,
parent_run_id: Union[UUID, None] = None,
tags: Union[List[str], None] = None,
**kwargs: Any,
) -> None:
output = str(output)
if self.__has_valid_config is False:
return
try:
self.__track_event(
"tool",
"end",
run_id=str(run_id),
parent_run_id=str(parent_run_id) if parent_run_id else None,
output=output,
app_id=self.__app_id,
)
except Exception as e:
logger.error(f"[LLMonitor] An error occurred in on_tool_end: {e}")
def on_chain_start(
self,
serialized: Dict[str, Any],
inputs: Dict[str, Any],
*,
run_id: UUID,
parent_run_id: Union[UUID, None] = None,
tags: Union[List[str], None] = None,
metadata: Union[Dict[str, Any], None] = None,
**kwargs: Any,
) -> Any:
if self.__has_valid_config is False:
return
try:
name = serialized.get("id", [None, None, None, None])[3]
type = "chain"
metadata = metadata or {}
agentName = metadata.get("agent_name")
if agentName is None:
agentName = metadata.get("agentName")
if name == "AgentExecutor" or name == "PlanAndExecute":
type = "agent"
if agentName is not None:
type = "agent"
name = agentName
if parent_run_id is not None:
type = "chain"
user_id = _get_user_id(metadata)
user_props = _get_user_props(metadata)
input = _parse_input(inputs)
self.__track_event(
type,
"start",
user_id=user_id,
run_id=str(run_id),
parent_run_id=str(parent_run_id) if parent_run_id else None,
name=name,
input=input,
tags=tags,
metadata=metadata,
user_props=user_props,
app_id=self.__app_id,
)
except Exception as e:
logger.error(f"[LLMonitor] An error occurred in on_chain_start: {e}")
def on_chain_end(
self,
outputs: Dict[str, Any],
*,
run_id: UUID,
parent_run_id: Union[UUID, None] = None,
**kwargs: Any,
) -> Any:
if self.__has_valid_config is False:
return
try:
output = _parse_output(outputs)
self.__track_event(
"chain",
"end",
run_id=str(run_id),
parent_run_id=str(parent_run_id) if parent_run_id else None,
output=output,
app_id=self.__app_id,
)
except Exception as e:
logger.error(f"[LLMonitor] An error occurred in on_chain_end: {e}")
def on_agent_action(
self,
action: AgentAction,
*,
run_id: UUID,
parent_run_id: Union[UUID, None] = None,
**kwargs: Any,
) -> Any:
if self.__has_valid_config is False:
return
try:
name = action.tool
input = _parse_input(action.tool_input)
self.__track_event(
"tool",
"start",
run_id=str(run_id),
parent_run_id=str(parent_run_id) if parent_run_id else None,
name=name,
input=input,
app_id=self.__app_id,
)
except Exception as e:
logger.error(f"[LLMonitor] An error occurred in on_agent_action: {e}")
def on_agent_finish(
self,
finish: AgentFinish,
*,
run_id: UUID,
parent_run_id: Union[UUID, None] = None,
**kwargs: Any,
) -> Any:
if self.__has_valid_config is False:
return
try:
output = _parse_output(finish.return_values)
self.__track_event(
"agent",
"end",
run_id=str(run_id),
parent_run_id=str(parent_run_id) if parent_run_id else None,
output=output,
app_id=self.__app_id,
)
except Exception as e:
logger.error(f"[LLMonitor] An error occurred in on_agent_finish: {e}")
def on_chain_error(
self,
error: BaseException,
*,
run_id: UUID,
parent_run_id: Union[UUID, None] = None,
**kwargs: Any,
) -> Any:
if self.__has_valid_config is False:
return
try:
self.__track_event(
"chain",
"error",
run_id=str(run_id),
parent_run_id=str(parent_run_id) if parent_run_id else None,
error={"message": str(error), "stack": traceback.format_exc()},
app_id=self.__app_id,
)
except Exception as e:
logger.error(f"[LLMonitor] An error occurred in on_chain_error: {e}")
def on_tool_error(
self,
error: BaseException,
*,
run_id: UUID,
parent_run_id: Union[UUID, None] = None,
**kwargs: Any,
) -> Any:
if self.__has_valid_config is False:
return
try:
self.__track_event(
"tool",
"error",
run_id=str(run_id),
parent_run_id=str(parent_run_id) if parent_run_id else None,
error={"message": str(error), "stack": traceback.format_exc()},
app_id=self.__app_id,
)
except Exception as e:
logger.error(f"[LLMonitor] An error occurred in on_tool_error: {e}")
def on_llm_error(
self,
error: BaseException,
*,
run_id: UUID,
parent_run_id: Union[UUID, None] = None,
**kwargs: Any,
) -> Any:
if self.__has_valid_config is False:
return
try:
self.__track_event(
"llm",
"error",
run_id=str(run_id),
parent_run_id=str(parent_run_id) if parent_run_id else None,
error={"message": str(error), "stack": traceback.format_exc()},
app_id=self.__app_id,
)
except Exception as e:
logger.error(f"[LLMonitor] An error occurred in on_llm_error: {e}")
__all__ = ["LLMonitorCallbackHandler", "identify"]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/callbacks/labelstudio_callback.py | import os
import warnings
from datetime import datetime
from enum import Enum
from typing import Any, Dict, List, Optional, Tuple, Union
from uuid import UUID
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.messages import BaseMessage, ChatMessage
from langchain_core.outputs import Generation, LLMResult
class LabelStudioMode(Enum):
"""Label Studio mode enumerator."""
PROMPT = "prompt"
CHAT = "chat"
def get_default_label_configs(
mode: Union[str, LabelStudioMode],
) -> Tuple[str, LabelStudioMode]:
"""Get default Label Studio configs for the given mode.
Parameters:
mode: Label Studio mode ("prompt" or "chat")
Returns: Tuple of Label Studio config and mode
"""
_default_label_configs = {
LabelStudioMode.PROMPT.value: """
<View>
<Style>
.prompt-box {
background-color: white;
border-radius: 10px;
box-shadow: 0px 4px 6px rgba(0, 0, 0, 0.1);
padding: 20px;
}
</Style>
<View className="root">
<View className="prompt-box">
<Text name="prompt" value="$prompt"/>
</View>
<TextArea name="response" toName="prompt"
maxSubmissions="1" editable="true"
required="true"/>
</View>
<Header value="Rate the response:"/>
<Rating name="rating" toName="prompt"/>
</View>""",
LabelStudioMode.CHAT.value: """
<View>
<View className="root">
<Paragraphs name="dialogue"
value="$prompt"
layout="dialogue"
textKey="content"
nameKey="role"
granularity="sentence"/>
<Header value="Final response:"/>
<TextArea name="response" toName="dialogue"
maxSubmissions="1" editable="true"
required="true"/>
</View>
<Header value="Rate the response:"/>
<Rating name="rating" toName="dialogue"/>
</View>""",
}
if isinstance(mode, str):
mode = LabelStudioMode(mode)
return _default_label_configs[mode.value], mode
class LabelStudioCallbackHandler(BaseCallbackHandler):
"""Label Studio callback handler.
Provides the ability to send predictions to Label Studio
for human evaluation, feedback and annotation.
Parameters:
api_key: Label Studio API key
url: Label Studio URL
project_id: Label Studio project ID
project_name: Label Studio project name
project_config: Label Studio project config (XML)
mode: Label Studio mode ("prompt" or "chat")
Examples:
>>> from langchain_community.llms import OpenAI
>>> from langchain_community.callbacks import LabelStudioCallbackHandler
>>> handler = LabelStudioCallbackHandler(
... api_key='<your_key_here>',
... url='http://localhost:8080',
... project_name='LangChain-%Y-%m-%d',
... mode='prompt'
... )
>>> llm = OpenAI(callbacks=[handler])
>>> llm.invoke('Tell me a story about a dog.')
"""
DEFAULT_PROJECT_NAME: str = "LangChain-%Y-%m-%d"
def __init__(
self,
api_key: Optional[str] = None,
url: Optional[str] = None,
project_id: Optional[int] = None,
project_name: str = DEFAULT_PROJECT_NAME,
project_config: Optional[str] = None,
mode: Union[str, LabelStudioMode] = LabelStudioMode.PROMPT,
):
super().__init__()
# Import LabelStudio SDK
try:
import label_studio_sdk as ls
except ImportError:
raise ImportError(
f"You're using {self.__class__.__name__} in your code,"
f" but you don't have the LabelStudio SDK "
f"Python package installed or upgraded to the latest version. "
f"Please run `pip install -U label-studio-sdk`"
f" before using this callback."
)
# Check if Label Studio API key is provided
if not api_key:
if os.getenv("LABEL_STUDIO_API_KEY"):
api_key = str(os.getenv("LABEL_STUDIO_API_KEY"))
else:
raise ValueError(
f"You're using {self.__class__.__name__} in your code,"
f" Label Studio API key is not provided. "
f"Please provide Label Studio API key: "
f"go to the Label Studio instance, navigate to "
f"Account & Settings -> Access Token and copy the key. "
f"Use the key as a parameter for the callback: "
f"{self.__class__.__name__}"
f"(label_studio_api_key='<your_key_here>', ...) or "
f"set the environment variable LABEL_STUDIO_API_KEY=<your_key_here>"
)
self.api_key = api_key
if not url:
if os.getenv("LABEL_STUDIO_URL"):
url = os.getenv("LABEL_STUDIO_URL")
else:
warnings.warn(
f"Label Studio URL is not provided, "
f"using default URL: {ls.LABEL_STUDIO_DEFAULT_URL}"
f"If you want to provide your own URL, use the parameter: "
f"{self.__class__.__name__}"
f"(label_studio_url='<your_url_here>', ...) "
f"or set the environment variable LABEL_STUDIO_URL=<your_url_here>"
)
url = ls.LABEL_STUDIO_DEFAULT_URL
self.url = url
# Maps run_id to prompts
self.payload: Dict[str, Dict] = {}
self.ls_client = ls.Client(url=self.url, api_key=self.api_key)
self.project_name = project_name
if project_config:
self.project_config = project_config
self.mode = None
else:
self.project_config, self.mode = get_default_label_configs(mode)
self.project_id = project_id or os.getenv("LABEL_STUDIO_PROJECT_ID")
if self.project_id is not None:
self.ls_project = self.ls_client.get_project(int(self.project_id))
else:
project_title = datetime.today().strftime(self.project_name)
existing_projects = self.ls_client.get_projects(title=project_title)
if existing_projects:
self.ls_project = existing_projects[0]
self.project_id = self.ls_project.id
else:
self.ls_project = self.ls_client.create_project(
title=project_title, label_config=self.project_config
)
self.project_id = self.ls_project.id
self.parsed_label_config = self.ls_project.parsed_label_config
# Find the first TextArea tag
# "from_name", "to_name", "value" will be used to create predictions
self.from_name, self.to_name, self.value, self.input_type = (
None,
None,
None,
None,
)
for tag_name, tag_info in self.parsed_label_config.items():
if tag_info["type"] == "TextArea":
self.from_name = tag_name
self.to_name = tag_info["to_name"][0]
self.value = tag_info["inputs"][0]["value"]
self.input_type = tag_info["inputs"][0]["type"]
break
if not self.from_name:
error_message = (
f'Label Studio project "{self.project_name}" '
f"does not have a TextArea tag. "
f"Please add a TextArea tag to the project."
)
if self.mode == LabelStudioMode.PROMPT:
error_message += (
"\nHINT: go to project Settings -> "
"Labeling Interface -> Browse Templates"
' and select "Generative AI -> '
'Supervised Language Model Fine-tuning" template.'
)
else:
error_message += (
"\nHINT: go to project Settings -> "
"Labeling Interface -> Browse Templates"
" and check available templates under "
'"Generative AI" section.'
)
raise ValueError(error_message)
def add_prompts_generations(
self, run_id: str, generations: List[List[Generation]]
) -> None:
# Create tasks in Label Studio
tasks = []
prompts = self.payload[run_id]["prompts"]
model_version = (
self.payload[run_id]["kwargs"]
.get("invocation_params", {})
.get("model_name")
)
for prompt, generation in zip(prompts, generations):
tasks.append(
{
"data": {
self.value: prompt,
"run_id": run_id,
},
"predictions": [
{
"result": [
{
"from_name": self.from_name,
"to_name": self.to_name,
"type": "textarea",
"value": {"text": [g.text for g in generation]},
}
],
"model_version": model_version,
}
],
}
)
self.ls_project.import_tasks(tasks)
def on_llm_start(
self,
serialized: Dict[str, Any],
prompts: List[str],
**kwargs: Any,
) -> None:
"""Save the prompts in memory when an LLM starts."""
if self.input_type != "Text":
raise ValueError(
f'\nLabel Studio project "{self.project_name}" '
f"has an input type <{self.input_type}>. "
f'To make it work with the mode="chat", '
f"the input type should be <Text>.\n"
f"Read more here https://labelstud.io/tags/text"
)
run_id = str(kwargs["run_id"])
self.payload[run_id] = {"prompts": prompts, "kwargs": kwargs}
def _get_message_role(self, message: BaseMessage) -> str:
"""Get the role of the message."""
if isinstance(message, ChatMessage):
return message.role
else:
return message.__class__.__name__
def on_chat_model_start(
self,
serialized: Dict[str, Any],
messages: List[List[BaseMessage]],
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
tags: Optional[List[str]] = None,
metadata: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> Any:
"""Save the prompts in memory when an LLM starts."""
if self.input_type != "Paragraphs":
raise ValueError(
f'\nLabel Studio project "{self.project_name}" '
f"has an input type <{self.input_type}>. "
f'To make it work with the mode="chat", '
f"the input type should be <Paragraphs>.\n"
f"Read more here https://labelstud.io/tags/paragraphs"
)
prompts = []
for message_list in messages:
dialog = []
for message in message_list:
dialog.append(
{
"role": self._get_message_role(message),
"content": message.content,
}
)
prompts.append(dialog)
self.payload[str(run_id)] = {
"prompts": prompts,
"tags": tags,
"metadata": metadata,
"run_id": run_id,
"parent_run_id": parent_run_id,
"kwargs": kwargs,
}
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
"""Do nothing when a new token is generated."""
pass
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Create a new Label Studio task for each prompt and generation."""
run_id = str(kwargs["run_id"])
# Submit results to Label Studio
self.add_prompts_generations(run_id, response.generations)
# Pop current run from `self.runs`
self.payload.pop(run_id)
def on_llm_error(self, error: BaseException, **kwargs: Any) -> None:
"""Do nothing when LLM outputs an error."""
pass
def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> None:
pass
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
pass
def on_chain_error(self, error: BaseException, **kwargs: Any) -> None:
"""Do nothing when LLM chain outputs an error."""
pass
def on_tool_start(
self,
serialized: Dict[str, Any],
input_str: str,
**kwargs: Any,
) -> None:
"""Do nothing when tool starts."""
pass
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Do nothing when agent takes a specific action."""
pass
def on_tool_end(
self,
output: str,
observation_prefix: Optional[str] = None,
llm_prefix: Optional[str] = None,
**kwargs: Any,
) -> None:
"""Do nothing when tool ends."""
pass
def on_tool_error(self, error: BaseException, **kwargs: Any) -> None:
"""Do nothing when tool outputs an error."""
pass
def on_text(self, text: str, **kwargs: Any) -> None:
"""Do nothing"""
pass
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:
"""Do nothing"""
pass
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/callbacks/aim_callback.py | from copy import deepcopy
from typing import Any, Dict, List, Optional
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.outputs import LLMResult
from langchain_core.utils import guard_import
def import_aim() -> Any:
"""Import the aim python package and raise an error if it is not installed."""
return guard_import("aim")
class BaseMetadataCallbackHandler:
"""Callback handler for the metadata and associated function states for callbacks.
Attributes:
step (int): The current step.
starts (int): The number of times the start method has been called.
ends (int): The number of times the end method has been called.
errors (int): The number of times the error method has been called.
text_ctr (int): The number of times the text method has been called.
ignore_llm_ (bool): Whether to ignore llm callbacks.
ignore_chain_ (bool): Whether to ignore chain callbacks.
ignore_agent_ (bool): Whether to ignore agent callbacks.
ignore_retriever_ (bool): Whether to ignore retriever callbacks.
always_verbose_ (bool): Whether to always be verbose.
chain_starts (int): The number of times the chain start method has been called.
chain_ends (int): The number of times the chain end method has been called.
llm_starts (int): The number of times the llm start method has been called.
llm_ends (int): The number of times the llm end method has been called.
llm_streams (int): The number of times the text method has been called.
tool_starts (int): The number of times the tool start method has been called.
tool_ends (int): The number of times the tool end method has been called.
agent_ends (int): The number of times the agent end method has been called.
"""
def __init__(self) -> None:
self.step = 0
self.starts = 0
self.ends = 0
self.errors = 0
self.text_ctr = 0
self.ignore_llm_ = False
self.ignore_chain_ = False
self.ignore_agent_ = False
self.ignore_retriever_ = False
self.always_verbose_ = False
self.chain_starts = 0
self.chain_ends = 0
self.llm_starts = 0
self.llm_ends = 0
self.llm_streams = 0
self.tool_starts = 0
self.tool_ends = 0
self.agent_ends = 0
@property
def always_verbose(self) -> bool:
"""Whether to call verbose callbacks even if verbose is False."""
return self.always_verbose_
@property
def ignore_llm(self) -> bool:
"""Whether to ignore LLM callbacks."""
return self.ignore_llm_
@property
def ignore_chain(self) -> bool:
"""Whether to ignore chain callbacks."""
return self.ignore_chain_
@property
def ignore_agent(self) -> bool:
"""Whether to ignore agent callbacks."""
return self.ignore_agent_
@property
def ignore_retriever(self) -> bool:
"""Whether to ignore retriever callbacks."""
return self.ignore_retriever_
def get_custom_callback_meta(self) -> Dict[str, Any]:
return {
"step": self.step,
"starts": self.starts,
"ends": self.ends,
"errors": self.errors,
"text_ctr": self.text_ctr,
"chain_starts": self.chain_starts,
"chain_ends": self.chain_ends,
"llm_starts": self.llm_starts,
"llm_ends": self.llm_ends,
"llm_streams": self.llm_streams,
"tool_starts": self.tool_starts,
"tool_ends": self.tool_ends,
"agent_ends": self.agent_ends,
}
def reset_callback_meta(self) -> None:
"""Reset the callback metadata."""
self.step = 0
self.starts = 0
self.ends = 0
self.errors = 0
self.text_ctr = 0
self.ignore_llm_ = False
self.ignore_chain_ = False
self.ignore_agent_ = False
self.always_verbose_ = False
self.chain_starts = 0
self.chain_ends = 0
self.llm_starts = 0
self.llm_ends = 0
self.llm_streams = 0
self.tool_starts = 0
self.tool_ends = 0
self.agent_ends = 0
return None
class AimCallbackHandler(BaseMetadataCallbackHandler, BaseCallbackHandler):
"""Callback Handler that logs to Aim.
Parameters:
repo (:obj:`str`, optional): Aim repository path or Repo object to which
Run object is bound. If skipped, default Repo is used.
experiment_name (:obj:`str`, optional): Sets Run's `experiment` property.
'default' if not specified. Can be used later to query runs/sequences.
system_tracking_interval (:obj:`int`, optional): Sets the tracking interval
in seconds for system usage metrics (CPU, Memory, etc.). Set to `None`
to disable system metrics tracking.
log_system_params (:obj:`bool`, optional): Enable/Disable logging of system
params such as installed packages, git info, environment variables, etc.
This handler will utilize the associated callback method called and formats
the input of each callback function with metadata regarding the state of LLM run
and then logs the response to Aim.
"""
def __init__(
self,
repo: Optional[str] = None,
experiment_name: Optional[str] = None,
system_tracking_interval: Optional[int] = 10,
log_system_params: bool = True,
) -> None:
"""Initialize callback handler."""
super().__init__()
aim = import_aim()
self.repo = repo
self.experiment_name = experiment_name
self.system_tracking_interval = system_tracking_interval
self.log_system_params = log_system_params
self._run = aim.Run(
repo=self.repo,
experiment=self.experiment_name,
system_tracking_interval=self.system_tracking_interval,
log_system_params=self.log_system_params,
)
self._run_hash = self._run.hash
self.action_records: list = []
def setup(self, **kwargs: Any) -> None:
aim = import_aim()
if not self._run:
if self._run_hash:
self._run = aim.Run(
self._run_hash,
repo=self.repo,
system_tracking_interval=self.system_tracking_interval,
)
else:
self._run = aim.Run(
repo=self.repo,
experiment=self.experiment_name,
system_tracking_interval=self.system_tracking_interval,
log_system_params=self.log_system_params,
)
self._run_hash = self._run.hash
if kwargs:
for key, value in kwargs.items():
self._run.set(key, value, strict=False)
def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> None:
"""Run when LLM starts."""
aim = import_aim()
self.step += 1
self.llm_starts += 1
self.starts += 1
resp = {"action": "on_llm_start"}
resp.update(self.get_custom_callback_meta())
prompts_res = deepcopy(prompts)
self._run.track(
[aim.Text(prompt) for prompt in prompts_res],
name="on_llm_start",
context=resp,
)
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Run when LLM ends running."""
aim = import_aim()
self.step += 1
self.llm_ends += 1
self.ends += 1
resp = {"action": "on_llm_end"}
resp.update(self.get_custom_callback_meta())
response_res = deepcopy(response)
generated = [
aim.Text(generation.text)
for generations in response_res.generations
for generation in generations
]
self._run.track(
generated,
name="on_llm_end",
context=resp,
)
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
"""Run when LLM generates a new token."""
self.step += 1
self.llm_streams += 1
def on_llm_error(self, error: BaseException, **kwargs: Any) -> None:
"""Run when LLM errors."""
self.step += 1
self.errors += 1
def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> None:
"""Run when chain starts running."""
aim = import_aim()
self.step += 1
self.chain_starts += 1
self.starts += 1
resp = {"action": "on_chain_start"}
resp.update(self.get_custom_callback_meta())
inputs_res = deepcopy(inputs)
self._run.track(
aim.Text(inputs_res["input"]), name="on_chain_start", context=resp
)
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
"""Run when chain ends running."""
aim = import_aim()
self.step += 1
self.chain_ends += 1
self.ends += 1
resp = {"action": "on_chain_end"}
resp.update(self.get_custom_callback_meta())
outputs_res = deepcopy(outputs)
self._run.track(
aim.Text(outputs_res["output"]), name="on_chain_end", context=resp
)
def on_chain_error(self, error: BaseException, **kwargs: Any) -> None:
"""Run when chain errors."""
self.step += 1
self.errors += 1
def on_tool_start(
self, serialized: Dict[str, Any], input_str: str, **kwargs: Any
) -> None:
"""Run when tool starts running."""
aim = import_aim()
self.step += 1
self.tool_starts += 1
self.starts += 1
resp = {"action": "on_tool_start"}
resp.update(self.get_custom_callback_meta())
self._run.track(aim.Text(input_str), name="on_tool_start", context=resp)
def on_tool_end(self, output: Any, **kwargs: Any) -> None:
"""Run when tool ends running."""
output = str(output)
aim = import_aim()
self.step += 1
self.tool_ends += 1
self.ends += 1
resp = {"action": "on_tool_end"}
resp.update(self.get_custom_callback_meta())
self._run.track(aim.Text(output), name="on_tool_end", context=resp)
def on_tool_error(self, error: BaseException, **kwargs: Any) -> None:
"""Run when tool errors."""
self.step += 1
self.errors += 1
def on_text(self, text: str, **kwargs: Any) -> None:
"""
Run when agent is ending.
"""
self.step += 1
self.text_ctr += 1
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:
"""Run when agent ends running."""
aim = import_aim()
self.step += 1
self.agent_ends += 1
self.ends += 1
resp = {"action": "on_agent_finish"}
resp.update(self.get_custom_callback_meta())
finish_res = deepcopy(finish)
text = "OUTPUT:\n{}\n\nLOG:\n{}".format(
finish_res.return_values["output"], finish_res.log
)
self._run.track(aim.Text(text), name="on_agent_finish", context=resp)
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Run on agent action."""
aim = import_aim()
self.step += 1
self.tool_starts += 1
self.starts += 1
resp = {
"action": "on_agent_action",
"tool": action.tool,
}
resp.update(self.get_custom_callback_meta())
action_res = deepcopy(action)
text = "TOOL INPUT:\n{}\n\nLOG:\n{}".format(
action_res.tool_input, action_res.log
)
self._run.track(aim.Text(text), name="on_agent_action", context=resp)
def flush_tracker(
self,
repo: Optional[str] = None,
experiment_name: Optional[str] = None,
system_tracking_interval: Optional[int] = 10,
log_system_params: bool = True,
langchain_asset: Any = None,
reset: bool = True,
finish: bool = False,
) -> None:
"""Flush the tracker and reset the session.
Args:
repo (:obj:`str`, optional): Aim repository path or Repo object to which
Run object is bound. If skipped, default Repo is used.
experiment_name (:obj:`str`, optional): Sets Run's `experiment` property.
'default' if not specified. Can be used later to query runs/sequences.
system_tracking_interval (:obj:`int`, optional): Sets the tracking interval
in seconds for system usage metrics (CPU, Memory, etc.). Set to `None`
to disable system metrics tracking.
log_system_params (:obj:`bool`, optional): Enable/Disable logging of system
params such as installed packages, git info, environment variables, etc.
langchain_asset: The langchain asset to save.
reset: Whether to reset the session.
finish: Whether to finish the run.
Returns:
None
"""
if langchain_asset:
try:
for key, value in langchain_asset.dict().items():
self._run.set(key, value, strict=False)
except Exception:
pass
if finish or reset:
self._run.close()
self.reset_callback_meta()
if reset:
aim = import_aim()
self.repo = repo if repo else self.repo
self.experiment_name = (
experiment_name if experiment_name else self.experiment_name
)
self.system_tracking_interval = (
system_tracking_interval
if system_tracking_interval
else self.system_tracking_interval
)
self.log_system_params = (
log_system_params if log_system_params else self.log_system_params
)
self._run = aim.Run(
repo=self.repo,
experiment=self.experiment_name,
system_tracking_interval=self.system_tracking_interval,
log_system_params=self.log_system_params,
)
self._run_hash = self._run.hash
self.action_records = []
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/callbacks/upstash_ratelimit_callback.py | """Ratelimiting Handler to limit requests or tokens"""
import logging
from typing import Any, Dict, List, Literal, Optional
from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.outputs import LLMResult
logger = logging.getLogger(__name__)
try:
from upstash_ratelimit import Ratelimit
except ImportError:
Ratelimit = None
class UpstashRatelimitError(Exception):
"""
Upstash Ratelimit Error
Raised when the rate limit is reached in `UpstashRatelimitHandler`
"""
def __init__(
self,
message: str,
type: Literal["token", "request"],
limit: Optional[int] = None,
reset: Optional[float] = None,
):
"""
Args:
message (str): error message
type (str): The kind of the limit which was reached. One of
"token" or "request"
limit (Optional[int]): The limit which was reached. Passed when type
is request
reset (Optional[int]): unix timestamp in milliseconds when the limits
are reset. Passed when type is request
"""
# Call the base class constructor with the parameters it needs
super().__init__(message)
self.type = type
self.limit = limit
self.reset = reset
class UpstashRatelimitHandler(BaseCallbackHandler):
"""
Callback to handle rate limiting based on the number of requests
or the number of tokens in the input.
It uses Upstash Ratelimit to track the ratelimit which utilizes
Upstash Redis to track the state.
Should not be passed to the chain when initialising the chain.
This is because the handler has a state which should be fresh
every time invoke is called. Instead, initialise and pass a handler
every time you invoke.
"""
raise_error: bool = True
_checked: bool = False
def __init__(
self,
identifier: str,
*,
token_ratelimit: Optional[Ratelimit] = None,
request_ratelimit: Optional[Ratelimit] = None,
include_output_tokens: bool = False,
):
"""
Creates UpstashRatelimitHandler. Must be passed an identifier to
ratelimit like a user id or an ip address.
Additionally, it must be passed at least one of token_ratelimit
or request_ratelimit parameters.
Args:
identifier Union[int, str]: the identifier
token_ratelimit Optional[Ratelimit]: Ratelimit to limit the
number of tokens. Only works with OpenAI models since only
these models provide the number of tokens as information
in their output.
request_ratelimit Optional[Ratelimit]: Ratelimit to limit the
number of requests
include_output_tokens bool: Whether to count output tokens when
rate limiting based on number of tokens. Only used when
`token_ratelimit` is passed. False by default.
Example:
.. code-block:: python
from upstash_redis import Redis
from upstash_ratelimit import Ratelimit, FixedWindow
redis = Redis.from_env()
ratelimit = Ratelimit(
redis=redis,
# fixed window to allow 10 requests every 10 seconds:
limiter=FixedWindow(max_requests=10, window=10),
)
user_id = "foo"
handler = UpstashRatelimitHandler(
identifier=user_id,
request_ratelimit=ratelimit
)
# Initialize a simple runnable to test
chain = RunnableLambda(str)
# pass handler as callback:
output = chain.invoke(
"input",
config={
"callbacks": [handler]
}
)
"""
if not any([token_ratelimit, request_ratelimit]):
raise ValueError(
"You must pass at least one of input_token_ratelimit or"
" request_ratelimit parameters for handler to work."
)
self.identifier = identifier
self.token_ratelimit = token_ratelimit
self.request_ratelimit = request_ratelimit
self.include_output_tokens = include_output_tokens
def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> Any:
"""
Run when chain starts running.
on_chain_start runs multiple times during a chain execution. To make
sure that it's only called once, we keep a bool state `_checked`. If
not `self._checked`, we call limit with `request_ratelimit` and raise
`UpstashRatelimitError` if the identifier is rate limited.
"""
if self.request_ratelimit and not self._checked:
response = self.request_ratelimit.limit(self.identifier)
if not response.allowed:
raise UpstashRatelimitError(
"Request limit reached!", "request", response.limit, response.reset
)
self._checked = True
def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> None:
"""
Run when LLM starts running
"""
if self.token_ratelimit:
remaining = self.token_ratelimit.get_remaining(self.identifier)
if remaining <= 0:
raise UpstashRatelimitError("Token limit reached!", "token")
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""
Run when LLM ends running
If the `include_output_tokens` is set to True, number of tokens
in LLM completion are counted for rate limiting
"""
if self.token_ratelimit:
try:
llm_output = response.llm_output or {}
token_usage = llm_output["token_usage"]
token_count = (
token_usage["total_tokens"]
if self.include_output_tokens
else token_usage["prompt_tokens"]
)
except KeyError:
raise ValueError(
"LLM response doesn't include"
" `token_usage: {total_tokens: int, prompt_tokens: int}`"
" field. To use UpstashRatelimitHandler with token_ratelimit,"
" either use a model which returns token_usage (like "
" OpenAI models) or rate limit only with request_ratelimit."
)
# call limit to add the completion tokens to rate limit
# but don't raise exception since we already generated
# the tokens and would rather continue execution.
self.token_ratelimit.limit(self.identifier, rate=token_count)
def reset(self, identifier: Optional[str] = None) -> "UpstashRatelimitHandler":
"""
Creates a new UpstashRatelimitHandler object with the same
ratelimit configurations but with a new identifier if it's
provided.
Also resets the state of the handler.
"""
return UpstashRatelimitHandler(
identifier=identifier or self.identifier,
token_ratelimit=self.token_ratelimit,
request_ratelimit=self.request_ratelimit,
include_output_tokens=self.include_output_tokens,
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/callbacks/trubrics_callback.py | import os
from typing import Any, Dict, List, Optional
from uuid import UUID
from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.messages import (
AIMessage,
BaseMessage,
ChatMessage,
FunctionMessage,
HumanMessage,
SystemMessage,
)
from langchain_core.outputs import LLMResult
def _convert_message_to_dict(message: BaseMessage) -> dict:
message_dict: Dict[str, Any]
if isinstance(message, ChatMessage):
message_dict = {"role": message.role, "content": message.content}
elif isinstance(message, HumanMessage):
message_dict = {"role": "user", "content": message.content}
elif isinstance(message, AIMessage):
message_dict = {"role": "assistant", "content": message.content}
if "function_call" in message.additional_kwargs:
message_dict["function_call"] = message.additional_kwargs["function_call"]
# If function call only, content is None not empty string
if message_dict["content"] == "":
message_dict["content"] = None
elif isinstance(message, SystemMessage):
message_dict = {"role": "system", "content": message.content}
elif isinstance(message, FunctionMessage):
message_dict = {
"role": "function",
"content": message.content,
"name": message.name,
}
else:
raise TypeError(f"Got unknown type {message}")
if "name" in message.additional_kwargs:
message_dict["name"] = message.additional_kwargs["name"]
return message_dict
class TrubricsCallbackHandler(BaseCallbackHandler):
"""
Callback handler for Trubrics.
Args:
project: a trubrics project, default project is "default"
email: a trubrics account email, can equally be set in env variables
password: a trubrics account password, can equally be set in env variables
**kwargs: all other kwargs are parsed and set to trubrics prompt variables,
or added to the `metadata` dict
"""
def __init__(
self,
project: str = "default",
email: Optional[str] = None,
password: Optional[str] = None,
**kwargs: Any,
) -> None:
super().__init__()
try:
from trubrics import Trubrics
except ImportError:
raise ImportError(
"The TrubricsCallbackHandler requires installation of "
"the trubrics package. "
"Please install it with `pip install trubrics`."
)
self.trubrics = Trubrics(
project=project,
email=email or os.environ["TRUBRICS_EMAIL"],
password=password or os.environ["TRUBRICS_PASSWORD"],
)
self.config_model: dict = {}
self.prompt: Optional[str] = None
self.messages: Optional[list] = None
self.trubrics_kwargs: Optional[dict] = kwargs if kwargs else None
def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> None:
self.prompt = prompts[0]
def on_chat_model_start(
self,
serialized: Dict[str, Any],
messages: List[List[BaseMessage]],
**kwargs: Any,
) -> None:
self.messages = [_convert_message_to_dict(message) for message in messages[0]]
self.prompt = self.messages[-1]["content"]
def on_llm_end(self, response: LLMResult, run_id: UUID, **kwargs: Any) -> None:
tags = ["langchain"]
user_id = None
session_id = None
metadata: dict = {"langchain_run_id": run_id}
if self.messages:
metadata["messages"] = self.messages
if self.trubrics_kwargs:
if self.trubrics_kwargs.get("tags"):
tags.append(*self.trubrics_kwargs.pop("tags"))
user_id = self.trubrics_kwargs.pop("user_id", None)
session_id = self.trubrics_kwargs.pop("session_id", None)
metadata.update(self.trubrics_kwargs)
for generation in response.generations:
self.trubrics.log_prompt(
config_model={
"model": response.llm_output.get("model_name")
if response.llm_output
else "NA"
},
prompt=self.prompt,
generation=generation[0].text,
user_id=user_id,
session_id=session_id,
tags=tags,
metadata=metadata,
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/callbacks/whylabs_callback.py | from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Optional
from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.utils import get_from_env, guard_import
if TYPE_CHECKING:
from whylogs.api.logger.logger import Logger
diagnostic_logger = logging.getLogger(__name__)
def import_langkit(
sentiment: bool = False,
toxicity: bool = False,
themes: bool = False,
) -> Any:
"""Import the langkit python package and raise an error if it is not installed.
Args:
sentiment: Whether to import the langkit.sentiment module. Defaults to False.
toxicity: Whether to import the langkit.toxicity module. Defaults to False.
themes: Whether to import the langkit.themes module. Defaults to False.
Returns:
The imported langkit module.
"""
langkit = guard_import("langkit")
guard_import("langkit.regexes")
guard_import("langkit.textstat")
if sentiment:
guard_import("langkit.sentiment")
if toxicity:
guard_import("langkit.toxicity")
if themes:
guard_import("langkit.themes")
return langkit
class WhyLabsCallbackHandler(BaseCallbackHandler):
"""
Callback Handler for logging to WhyLabs. This callback handler utilizes
`langkit` to extract features from the prompts & responses when interacting with
an LLM. These features can be used to guardrail, evaluate, and observe interactions
over time to detect issues relating to hallucinations, prompt engineering,
or output validation. LangKit is an LLM monitoring toolkit developed by WhyLabs.
Here are some examples of what can be monitored with LangKit:
* Text Quality
- readability score
- complexity and grade scores
* Text Relevance
- Similarity scores between prompt/responses
- Similarity scores against user-defined themes
- Topic classification
* Security and Privacy
- patterns - count of strings matching a user-defined regex pattern group
- jailbreaks - similarity scores with respect to known jailbreak attempts
- prompt injection - similarity scores with respect to known prompt attacks
- refusals - similarity scores with respect to known LLM refusal responses
* Sentiment and Toxicity
- sentiment analysis
- toxicity analysis
For more information, see https://docs.whylabs.ai/docs/language-model-monitoring
or check out the LangKit repo here: https://github.com/whylabs/langkit
---
Args:
api_key (Optional[str]): WhyLabs API key. Optional because the preferred
way to specify the API key is with environment variable
WHYLABS_API_KEY.
org_id (Optional[str]): WhyLabs organization id to write profiles to.
Optional because the preferred way to specify the organization id is
with environment variable WHYLABS_DEFAULT_ORG_ID.
dataset_id (Optional[str]): WhyLabs dataset id to write profiles to.
Optional because the preferred way to specify the dataset id is
with environment variable WHYLABS_DEFAULT_DATASET_ID.
sentiment (bool): Whether to enable sentiment analysis. Defaults to False.
toxicity (bool): Whether to enable toxicity analysis. Defaults to False.
themes (bool): Whether to enable theme analysis. Defaults to False.
"""
def __init__(self, logger: Logger, handler: Any):
"""Initiate the rolling logger."""
super().__init__()
if hasattr(handler, "init"):
handler.init(self)
if hasattr(handler, "_get_callbacks"):
self._callbacks = handler._get_callbacks()
else:
self._callbacks = dict()
diagnostic_logger.warning("initialized handler without callbacks.")
self._logger = logger
def flush(self) -> None:
"""Explicitly write current profile if using a rolling logger."""
if self._logger and hasattr(self._logger, "_do_rollover"):
self._logger._do_rollover()
diagnostic_logger.info("Flushing WhyLabs logger, writing profile...")
def close(self) -> None:
"""Close any loggers to allow writing out of any profiles before exiting."""
if self._logger and hasattr(self._logger, "close"):
self._logger.close()
diagnostic_logger.info("Closing WhyLabs logger, see you next time!")
def __enter__(self) -> WhyLabsCallbackHandler:
return self
def __exit__(
self, exception_type: Any, exception_value: Any, traceback: Any
) -> None:
self.close()
@classmethod
def from_params(
cls,
*,
api_key: Optional[str] = None,
org_id: Optional[str] = None,
dataset_id: Optional[str] = None,
sentiment: bool = False,
toxicity: bool = False,
themes: bool = False,
logger: Optional[Logger] = None,
) -> WhyLabsCallbackHandler:
"""Instantiate whylogs Logger from params.
Args:
api_key (Optional[str]): WhyLabs API key. Optional because the preferred
way to specify the API key is with environment variable
WHYLABS_API_KEY.
org_id (Optional[str]): WhyLabs organization id to write profiles to.
If not set must be specified in environment variable
WHYLABS_DEFAULT_ORG_ID.
dataset_id (Optional[str]): The model or dataset this callback is gathering
telemetry for. If not set must be specified in environment variable
WHYLABS_DEFAULT_DATASET_ID.
sentiment (bool): If True will initialize a model to perform
sentiment analysis compound score. Defaults to False and will not gather
this metric.
toxicity (bool): If True will initialize a model to score
toxicity. Defaults to False and will not gather this metric.
themes (bool): If True will initialize a model to calculate
distance to configured themes. Defaults to None and will not gather this
metric.
logger (Optional[Logger]): If specified will bind the configured logger as
the telemetry gathering agent. Defaults to LangKit schema with periodic
WhyLabs writer.
"""
# langkit library will import necessary whylogs libraries
import_langkit(sentiment=sentiment, toxicity=toxicity, themes=themes)
why = guard_import("whylogs")
get_callback_instance = guard_import(
"langkit.callback_handler"
).get_callback_instance
WhyLabsWriter = guard_import("whylogs.api.writer.whylabs").WhyLabsWriter
udf_schema = guard_import("whylogs.experimental.core.udf_schema").udf_schema
if logger is None:
api_key = api_key or get_from_env("api_key", "WHYLABS_API_KEY")
org_id = org_id or get_from_env("org_id", "WHYLABS_DEFAULT_ORG_ID")
dataset_id = dataset_id or get_from_env(
"dataset_id", "WHYLABS_DEFAULT_DATASET_ID"
)
whylabs_writer = WhyLabsWriter(
api_key=api_key, org_id=org_id, dataset_id=dataset_id
)
whylabs_logger = why.logger(
mode="rolling", interval=5, when="M", schema=udf_schema()
)
whylabs_logger.append_writer(writer=whylabs_writer)
else:
diagnostic_logger.info("Using passed in whylogs logger {logger}")
whylabs_logger = logger
callback_handler_cls = get_callback_instance(logger=whylabs_logger, impl=cls)
diagnostic_logger.info(
"Started whylogs Logger with WhyLabsWriter and initialized LangKit. 📝"
)
return callback_handler_cls
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/callbacks/human.py | from typing import Any, Awaitable, Callable, Dict, Optional
from uuid import UUID
from langchain_core.callbacks import AsyncCallbackHandler, BaseCallbackHandler
def _default_approve(_input: str) -> bool:
msg = (
"Do you approve of the following input? "
"Anything except 'Y'/'Yes' (case-insensitive) will be treated as a no."
)
msg += "\n\n" + _input + "\n"
resp = input(msg)
return resp.lower() in ("yes", "y")
async def _adefault_approve(_input: str) -> bool:
msg = (
"Do you approve of the following input? "
"Anything except 'Y'/'Yes' (case-insensitive) will be treated as a no."
)
msg += "\n\n" + _input + "\n"
resp = input(msg)
return resp.lower() in ("yes", "y")
def _default_true(_: Dict[str, Any]) -> bool:
return True
class HumanRejectedException(Exception):
"""Exception to raise when a person manually review and rejects a value."""
class HumanApprovalCallbackHandler(BaseCallbackHandler):
"""Callback for manually validating values."""
raise_error: bool = True
def __init__(
self,
approve: Callable[[Any], bool] = _default_approve,
should_check: Callable[[Dict[str, Any]], bool] = _default_true,
):
self._approve = approve
self._should_check = should_check
def on_tool_start(
self,
serialized: Dict[str, Any],
input_str: str,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> Any:
if self._should_check(serialized) and not self._approve(input_str):
raise HumanRejectedException(
f"Inputs {input_str} to tool {serialized} were rejected."
)
class AsyncHumanApprovalCallbackHandler(AsyncCallbackHandler):
"""Asynchronous callback for manually validating values."""
raise_error: bool = True
def __init__(
self,
approve: Callable[[Any], Awaitable[bool]] = _adefault_approve,
should_check: Callable[[Dict[str, Any]], bool] = _default_true,
):
self._approve = approve
self._should_check = should_check
async def on_tool_start(
self,
serialized: Dict[str, Any],
input_str: str,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> Any:
if self._should_check(serialized) and not await self._approve(input_str):
raise HumanRejectedException(
f"Inputs {input_str} to tool {serialized} were rejected."
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/callbacks/confident_callback.py | # flake8: noqa
import os
import warnings
from typing import Any, Dict, List, Optional, Union
from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.outputs import LLMResult
class DeepEvalCallbackHandler(BaseCallbackHandler):
"""Callback Handler that logs into deepeval.
Args:
implementation_name: name of the `implementation` in deepeval
metrics: A list of metrics
Raises:
ImportError: if the `deepeval` package is not installed.
Examples:
>>> from langchain_community.llms import OpenAI
>>> from langchain_community.callbacks import DeepEvalCallbackHandler
>>> from deepeval.metrics import AnswerRelevancy
>>> metric = AnswerRelevancy(minimum_score=0.3)
>>> deepeval_callback = DeepEvalCallbackHandler(
... implementation_name="exampleImplementation",
... metrics=[metric],
... )
>>> llm = OpenAI(
... temperature=0,
... callbacks=[deepeval_callback],
... verbose=True,
... openai_api_key="API_KEY_HERE",
... )
>>> llm.generate([
... "What is the best evaluation tool out there? (no bias at all)",
... ])
"Deepeval, no doubt about it."
"""
REPO_URL: str = "https://github.com/confident-ai/deepeval"
ISSUES_URL: str = f"{REPO_URL}/issues"
BLOG_URL: str = "https://docs.confident-ai.com" # noqa: E501
def __init__(
self,
metrics: List[Any],
implementation_name: Optional[str] = None,
) -> None:
"""Initializes the `deepevalCallbackHandler`.
Args:
implementation_name: Name of the implementation you want.
metrics: What metrics do you want to track?
Raises:
ImportError: if the `deepeval` package is not installed.
ConnectionError: if the connection to deepeval fails.
"""
super().__init__()
# Import deepeval (not via `import_deepeval` to keep hints in IDEs)
try:
import deepeval # ignore: F401,I001
except ImportError:
raise ImportError(
"""To use the deepeval callback manager you need to have the
`deepeval` Python package installed. Please install it with
`pip install deepeval`"""
)
if os.path.exists(".deepeval"):
warnings.warn(
"""You are currently not logging anything to the dashboard, we
recommend using `deepeval login`."""
)
# Set the deepeval variables
self.implementation_name = implementation_name
self.metrics = metrics
warnings.warn(
(
"The `DeepEvalCallbackHandler` is currently in beta and is subject to"
" change based on updates to `langchain`. Please report any issues to"
f" {self.ISSUES_URL} as an `integration` issue."
),
)
def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> None:
"""Store the prompts"""
self.prompts = prompts
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
"""Do nothing when a new token is generated."""
pass
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Log records to deepeval when an LLM ends."""
from deepeval.metrics.answer_relevancy import AnswerRelevancy
from deepeval.metrics.bias_classifier import UnBiasedMetric
from deepeval.metrics.metric import Metric
from deepeval.metrics.toxic_classifier import NonToxicMetric
for metric in self.metrics:
for i, generation in enumerate(response.generations):
# Here, we only measure the first generation's output
output = generation[0].text
query = self.prompts[i]
if isinstance(metric, AnswerRelevancy):
result = metric.measure(
output=output,
query=query,
)
print(f"Answer Relevancy: {result}") # noqa: T201
elif isinstance(metric, UnBiasedMetric):
score = metric.measure(output)
print(f"Bias Score: {score}") # noqa: T201
elif isinstance(metric, NonToxicMetric):
score = metric.measure(output)
print(f"Toxic Score: {score}") # noqa: T201
else:
raise ValueError(
f"""Metric {metric.__name__} is not supported by deepeval
callbacks."""
)
def on_llm_error(self, error: BaseException, **kwargs: Any) -> None:
"""Do nothing when LLM outputs an error."""
pass
def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> None:
"""Do nothing when chain starts"""
pass
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
"""Do nothing when chain ends."""
pass
def on_chain_error(self, error: BaseException, **kwargs: Any) -> None:
"""Do nothing when LLM chain outputs an error."""
pass
def on_tool_start(
self,
serialized: Dict[str, Any],
input_str: str,
**kwargs: Any,
) -> None:
"""Do nothing when tool starts."""
pass
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Do nothing when agent takes a specific action."""
pass
def on_tool_end(
self,
output: Any,
observation_prefix: Optional[str] = None,
llm_prefix: Optional[str] = None,
**kwargs: Any,
) -> None:
"""Do nothing when tool ends."""
pass
def on_tool_error(self, error: BaseException, **kwargs: Any) -> None:
"""Do nothing when tool outputs an error."""
pass
def on_text(self, text: str, **kwargs: Any) -> None:
"""Do nothing"""
pass
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:
"""Do nothing"""
pass
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/callbacks/uptrain_callback.py | """
UpTrain Callback Handler
UpTrain is an open-source platform to evaluate and improve LLM applications. It provides
grades for 20+ preconfigured checks (covering language, code, embedding use cases),
performs root cause analyses on instances of failure cases and provides guidance for
resolving them.
This module contains a callback handler for integrating UpTrain seamlessly into your
pipeline and facilitating diverse evaluations. The callback handler automates various
evaluations to assess the performance and effectiveness of the components within the
pipeline.
The evaluations conducted include:
1. RAG:
- Context Relevance: Determines the relevance of the context extracted from the query
to the response.
- Factual Accuracy: Assesses if the Language Model (LLM) is providing accurate
information or hallucinating.
- Response Completeness: Checks if the response contains all the information
requested by the query.
2. Multi Query Generation:
MultiQueryRetriever generates multiple variants of a question with similar meanings
to the original question. This evaluation includes previous assessments and adds:
- Multi Query Accuracy: Ensures that the multi-queries generated convey the same
meaning as the original query.
3. Context Compression and Reranking:
Re-ranking involves reordering nodes based on relevance to the query and selecting
top n nodes.
Due to the potential reduction in the number of nodes after re-ranking, the following
evaluations
are performed in addition to the RAG evaluations:
- Context Reranking: Determines if the order of re-ranked nodes is more relevant to
the query than the original order.
- Context Conciseness: Examines whether the reduced number of nodes still provides
all the required information.
These evaluations collectively ensure the robustness and effectiveness of the RAG query
engine, MultiQueryRetriever, and the re-ranking process within the pipeline.
Useful links:
Github: https://github.com/uptrain-ai/uptrain
Website: https://uptrain.ai/
Docs: https://docs.uptrain.ai/getting-started/introduction
"""
import logging
import sys
from collections import defaultdict
from typing import (
Any,
DefaultDict,
Dict,
List,
Optional,
Sequence,
Set,
)
from uuid import UUID
from langchain_core.callbacks.base import BaseCallbackHandler
from langchain_core.documents import Document
from langchain_core.outputs import LLMResult
from langchain_core.utils import guard_import
logger = logging.getLogger(__name__)
handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter("%(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
def import_uptrain() -> Any:
"""Import the `uptrain` package."""
return guard_import("uptrain")
class UpTrainDataSchema:
"""The UpTrain data schema for tracking evaluation results.
Args:
project_name (str): The project name to be shown in UpTrain dashboard.
Attributes:
project_name (str): The project name to be shown in UpTrain dashboard.
uptrain_results (DefaultDict[str, Any]): Dictionary to store evaluation results.
eval_types (Set[str]): Set to store the types of evaluations.
query (str): Query for the RAG evaluation.
context (str): Context for the RAG evaluation.
response (str): Response for the RAG evaluation.
old_context (List[str]): Old context nodes for Context Conciseness evaluation.
new_context (List[str]): New context nodes for Context Conciseness evaluation.
context_conciseness_run_id (str): Run ID for Context Conciseness evaluation.
multi_queries (List[str]): List of multi queries for Multi Query evaluation.
multi_query_run_id (str): Run ID for Multi Query evaluation.
multi_query_daugher_run_id (str): Run ID for Multi Query daughter evaluation.
"""
def __init__(self, project_name: str) -> None:
"""Initialize the UpTrain data schema."""
# For tracking project name and results
self.project_name: str = project_name
self.uptrain_results: DefaultDict[str, Any] = defaultdict(list)
# For tracking event types
self.eval_types: Set[str] = set()
## RAG
self.query: str = ""
self.context: str = ""
self.response: str = ""
## CONTEXT CONCISENESS
self.old_context: List[str] = []
self.new_context: List[str] = []
self.context_conciseness_run_id: UUID = UUID(int=0)
# MULTI QUERY
self.multi_queries: List[str] = []
self.multi_query_run_id: UUID = UUID(int=0)
self.multi_query_daugher_run_id: UUID = UUID(int=0)
class UpTrainCallbackHandler(BaseCallbackHandler):
"""Callback Handler that logs evaluation results to uptrain and the console.
Args:
project_name (str): The project name to be shown in UpTrain dashboard.
key_type (str): Type of key to use. Must be 'uptrain' or 'openai'.
api_key (str): API key for the UpTrain or OpenAI API.
(This key is required to perform evaluations using GPT.)
Raises:
ValueError: If the key type is invalid.
ImportError: If the `uptrain` package is not installed.
"""
def __init__(
self,
*,
project_name: str = "langchain",
key_type: str = "openai",
api_key: str = "sk-****************", # The API key to use for evaluation
model: str = "gpt-3.5-turbo", # The model to use for evaluation
log_results: bool = True,
) -> None:
"""Initializes the `UpTrainCallbackHandler`."""
super().__init__()
uptrain = import_uptrain()
self.log_results = log_results
# Set uptrain variables
self.schema = UpTrainDataSchema(project_name=project_name)
self.first_score_printed_flag = False
if key_type == "uptrain":
settings = uptrain.Settings(uptrain_access_token=api_key, model=model)
self.uptrain_client = uptrain.APIClient(settings=settings)
elif key_type == "openai":
settings = uptrain.Settings(
openai_api_key=api_key, evaluate_locally=True, model=model
)
self.uptrain_client = uptrain.EvalLLM(settings=settings)
else:
raise ValueError("Invalid key type: Must be 'uptrain' or 'openai'")
def uptrain_evaluate(
self,
evaluation_name: str,
data: List[Dict[str, Any]],
checks: List[str],
) -> None:
"""Run an evaluation on the UpTrain server using UpTrain client."""
if self.uptrain_client.__class__.__name__ == "APIClient":
uptrain_result = self.uptrain_client.log_and_evaluate(
project_name=self.schema.project_name,
evaluation_name=evaluation_name,
data=data,
checks=checks,
)
else:
uptrain_result = self.uptrain_client.evaluate(
project_name=self.schema.project_name,
evaluation_name=evaluation_name,
data=data,
checks=checks,
)
self.schema.uptrain_results[self.schema.project_name].append(uptrain_result)
score_name_map = {
"score_context_relevance": "Context Relevance Score",
"score_factual_accuracy": "Factual Accuracy Score",
"score_response_completeness": "Response Completeness Score",
"score_sub_query_completeness": "Sub Query Completeness Score",
"score_context_reranking": "Context Reranking Score",
"score_context_conciseness": "Context Conciseness Score",
"score_multi_query_accuracy": "Multi Query Accuracy Score",
}
if self.log_results:
# Set logger level to INFO to print the evaluation results
logger.setLevel(logging.INFO)
for row in uptrain_result:
columns = list(row.keys())
for column in columns:
if column == "question":
logger.info(f"\nQuestion: {row[column]}")
self.first_score_printed_flag = False
elif column == "response":
logger.info(f"Response: {row[column]}")
self.first_score_printed_flag = False
elif column == "variants":
logger.info("Multi Queries:")
for variant in row[column]:
logger.info(f" - {variant}")
self.first_score_printed_flag = False
elif column.startswith("score"):
if not self.first_score_printed_flag:
logger.info("")
self.first_score_printed_flag = True
if column in score_name_map:
logger.info(f"{score_name_map[column]}: {row[column]}")
else:
logger.info(f"{column}: {row[column]}")
if self.log_results:
# Set logger level back to WARNING
# (We are doing this to avoid printing the logs from HTTP requests)
logger.setLevel(logging.WARNING)
def on_llm_end(
self,
response: LLMResult,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> None:
"""Log records to uptrain when an LLM ends."""
uptrain = import_uptrain()
self.schema.response = response.generations[0][0].text
if (
"qa_rag" in self.schema.eval_types
and parent_run_id != self.schema.multi_query_daugher_run_id
):
data = [
{
"question": self.schema.query,
"context": self.schema.context,
"response": self.schema.response,
}
]
self.uptrain_evaluate(
evaluation_name="rag",
data=data,
checks=[
uptrain.Evals.CONTEXT_RELEVANCE,
uptrain.Evals.FACTUAL_ACCURACY,
uptrain.Evals.RESPONSE_COMPLETENESS,
],
)
def on_chain_start(
self,
serialized: Dict[str, Any],
inputs: Dict[str, Any],
*,
run_id: UUID,
tags: Optional[List[str]] = None,
parent_run_id: Optional[UUID] = None,
metadata: Optional[Dict[str, Any]] = None,
run_type: Optional[str] = None,
name: Optional[str] = None,
**kwargs: Any,
) -> None:
"""Do nothing when chain starts"""
if parent_run_id == self.schema.multi_query_run_id:
self.schema.multi_query_daugher_run_id = run_id
if isinstance(inputs, dict) and set(inputs.keys()) == {"context", "question"}:
self.schema.eval_types.add("qa_rag")
context = ""
if isinstance(inputs["context"], Document):
context = inputs["context"].page_content
elif isinstance(inputs["context"], list):
for doc in inputs["context"]:
context += doc.page_content + "\n"
elif isinstance(inputs["context"], str):
context = inputs["context"]
self.schema.context = context
self.schema.query = inputs["question"]
pass
def on_retriever_start(
self,
serialized: Dict[str, Any],
query: str,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
tags: Optional[List[str]] = None,
metadata: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> None:
if "contextual_compression" in serialized["id"]:
self.schema.eval_types.add("contextual_compression")
self.schema.query = query
self.schema.context_conciseness_run_id = run_id
if "multi_query" in serialized["id"]:
self.schema.eval_types.add("multi_query")
self.schema.multi_query_run_id = run_id
self.schema.query = query
elif "multi_query" in self.schema.eval_types:
self.schema.multi_queries.append(query)
def on_retriever_end(
self,
documents: Sequence[Document],
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> Any:
"""Run when Retriever ends running."""
uptrain = import_uptrain()
if run_id == self.schema.multi_query_run_id:
data = [
{
"question": self.schema.query,
"variants": self.schema.multi_queries,
}
]
self.uptrain_evaluate(
evaluation_name="multi_query",
data=data,
checks=[uptrain.Evals.MULTI_QUERY_ACCURACY],
)
if "contextual_compression" in self.schema.eval_types:
if parent_run_id == self.schema.context_conciseness_run_id:
for doc in documents:
self.schema.old_context.append(doc.page_content)
elif run_id == self.schema.context_conciseness_run_id:
for doc in documents:
self.schema.new_context.append(doc.page_content)
context = "\n".join(
[
f"{index}. {string}"
for index, string in enumerate(self.schema.old_context, start=1)
]
)
reranked_context = "\n".join(
[
f"{index}. {string}"
for index, string in enumerate(self.schema.new_context, start=1)
]
)
data = [
{
"question": self.schema.query,
"context": context,
"concise_context": reranked_context,
"reranked_context": reranked_context,
}
]
self.uptrain_evaluate(
evaluation_name="context_reranking",
data=data,
checks=[
uptrain.Evals.CONTEXT_CONCISENESS,
uptrain.Evals.CONTEXT_RERANKING,
],
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/callbacks/flyte_callback.py | """FlyteKit callback handler."""
from __future__ import annotations
import logging
from copy import deepcopy
from typing import TYPE_CHECKING, Any, Dict, List, Tuple
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.outputs import LLMResult
from langchain_core.utils import guard_import
from langchain_community.callbacks.utils import (
BaseMetadataCallbackHandler,
flatten_dict,
import_pandas,
import_spacy,
import_textstat,
)
if TYPE_CHECKING:
import flytekit
from flytekitplugins.deck import renderer
logger = logging.getLogger(__name__)
def import_flytekit() -> Tuple[flytekit, renderer]:
"""Import flytekit and flytekitplugins-deck-standard."""
return (
guard_import("flytekit"),
guard_import(
"flytekitplugins.deck", pip_name="flytekitplugins-deck-standard"
).renderer,
)
def analyze_text(
text: str,
nlp: Any = None,
textstat: Any = None,
) -> dict:
"""Analyze text using textstat and spacy.
Parameters:
text (str): The text to analyze.
nlp (spacy.lang): The spacy language model to use for visualization.
Returns:
(dict): A dictionary containing the complexity metrics and visualization
files serialized to HTML string.
"""
resp: Dict[str, Any] = {}
if textstat is not None:
text_complexity_metrics = {
"flesch_reading_ease": textstat.flesch_reading_ease(text),
"flesch_kincaid_grade": textstat.flesch_kincaid_grade(text),
"smog_index": textstat.smog_index(text),
"coleman_liau_index": textstat.coleman_liau_index(text),
"automated_readability_index": textstat.automated_readability_index(text),
"dale_chall_readability_score": textstat.dale_chall_readability_score(text),
"difficult_words": textstat.difficult_words(text),
"linsear_write_formula": textstat.linsear_write_formula(text),
"gunning_fog": textstat.gunning_fog(text),
"fernandez_huerta": textstat.fernandez_huerta(text),
"szigriszt_pazos": textstat.szigriszt_pazos(text),
"gutierrez_polini": textstat.gutierrez_polini(text),
"crawford": textstat.crawford(text),
"gulpease_index": textstat.gulpease_index(text),
"osman": textstat.osman(text),
}
resp.update({"text_complexity_metrics": text_complexity_metrics})
resp.update(text_complexity_metrics)
if nlp is not None:
spacy = import_spacy()
doc = nlp(text)
dep_out = spacy.displacy.render(doc, style="dep", jupyter=False, page=True)
ent_out = spacy.displacy.render(doc, style="ent", jupyter=False, page=True)
text_visualizations = {
"dependency_tree": dep_out,
"entities": ent_out,
}
resp.update(text_visualizations)
return resp
class FlyteCallbackHandler(BaseMetadataCallbackHandler, BaseCallbackHandler):
"""Callback handler that is used within a Flyte task."""
def __init__(self) -> None:
"""Initialize callback handler."""
flytekit, renderer = import_flytekit()
self.pandas = import_pandas()
self.textstat = None
try:
self.textstat = import_textstat()
except ImportError:
logger.warning(
"Textstat library is not installed. \
It may result in the inability to log \
certain metrics that can be captured with Textstat."
)
spacy = None
try:
spacy = import_spacy()
except ImportError:
logger.warning(
"Spacy library is not installed. \
It may result in the inability to log \
certain metrics that can be captured with Spacy."
)
super().__init__()
self.nlp = None
if spacy:
try:
self.nlp = spacy.load("en_core_web_sm")
except OSError:
logger.warning(
"FlyteCallbackHandler uses spacy's en_core_web_sm model"
" for certain metrics. To download,"
" run the following command in your terminal:"
" `python -m spacy download en_core_web_sm`"
)
self.table_renderer = renderer.TableRenderer
self.markdown_renderer = renderer.MarkdownRenderer
self.deck = flytekit.Deck(
"LangChain Metrics",
self.markdown_renderer().to_html("## LangChain Metrics"),
)
def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> None:
"""Run when LLM starts."""
self.step += 1
self.llm_starts += 1
self.starts += 1
resp: Dict[str, Any] = {}
resp.update({"action": "on_llm_start"})
resp.update(flatten_dict(serialized))
resp.update(self.get_custom_callback_meta())
prompt_responses = []
for prompt in prompts:
prompt_responses.append(prompt)
resp.update({"prompts": prompt_responses})
self.deck.append(self.markdown_renderer().to_html("### LLM Start"))
self.deck.append(
self.table_renderer().to_html(self.pandas.DataFrame([resp])) + "\n"
)
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
"""Run when LLM generates a new token."""
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Run when LLM ends running."""
self.step += 1
self.llm_ends += 1
self.ends += 1
resp: Dict[str, Any] = {}
resp.update({"action": "on_llm_end"})
resp.update(flatten_dict(response.llm_output or {}))
resp.update(self.get_custom_callback_meta())
self.deck.append(self.markdown_renderer().to_html("### LLM End"))
self.deck.append(self.table_renderer().to_html(self.pandas.DataFrame([resp])))
for generations in response.generations:
for generation in generations:
generation_resp = deepcopy(resp)
generation_resp.update(flatten_dict(generation.dict()))
if self.nlp or self.textstat:
generation_resp.update(
analyze_text(
generation.text, nlp=self.nlp, textstat=self.textstat
)
)
complexity_metrics: Dict[str, float] = generation_resp.pop(
"text_complexity_metrics"
)
self.deck.append(
self.markdown_renderer().to_html("#### Text Complexity Metrics")
)
self.deck.append(
self.table_renderer().to_html(
self.pandas.DataFrame([complexity_metrics])
)
+ "\n"
)
dependency_tree = generation_resp["dependency_tree"]
self.deck.append(
self.markdown_renderer().to_html("#### Dependency Tree")
)
self.deck.append(dependency_tree)
entities = generation_resp["entities"]
self.deck.append(self.markdown_renderer().to_html("#### Entities"))
self.deck.append(entities)
else:
self.deck.append(
self.markdown_renderer().to_html("#### Generated Response")
)
self.deck.append(self.markdown_renderer().to_html(generation.text))
def on_llm_error(self, error: BaseException, **kwargs: Any) -> None:
"""Run when LLM errors."""
self.step += 1
self.errors += 1
def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> None:
"""Run when chain starts running."""
self.step += 1
self.chain_starts += 1
self.starts += 1
resp: Dict[str, Any] = {}
resp.update({"action": "on_chain_start"})
resp.update(flatten_dict(serialized))
resp.update(self.get_custom_callback_meta())
chain_input = ",".join([f"{k}={v}" for k, v in inputs.items()])
input_resp = deepcopy(resp)
input_resp["inputs"] = chain_input
self.deck.append(self.markdown_renderer().to_html("### Chain Start"))
self.deck.append(
self.table_renderer().to_html(self.pandas.DataFrame([input_resp])) + "\n"
)
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
"""Run when chain ends running."""
self.step += 1
self.chain_ends += 1
self.ends += 1
resp: Dict[str, Any] = {}
chain_output = ",".join([f"{k}={v}" for k, v in outputs.items()])
resp.update({"action": "on_chain_end", "outputs": chain_output})
resp.update(self.get_custom_callback_meta())
self.deck.append(self.markdown_renderer().to_html("### Chain End"))
self.deck.append(
self.table_renderer().to_html(self.pandas.DataFrame([resp])) + "\n"
)
def on_chain_error(self, error: BaseException, **kwargs: Any) -> None:
"""Run when chain errors."""
self.step += 1
self.errors += 1
def on_tool_start(
self, serialized: Dict[str, Any], input_str: str, **kwargs: Any
) -> None:
"""Run when tool starts running."""
self.step += 1
self.tool_starts += 1
self.starts += 1
resp: Dict[str, Any] = {}
resp.update({"action": "on_tool_start", "input_str": input_str})
resp.update(flatten_dict(serialized))
resp.update(self.get_custom_callback_meta())
self.deck.append(self.markdown_renderer().to_html("### Tool Start"))
self.deck.append(
self.table_renderer().to_html(self.pandas.DataFrame([resp])) + "\n"
)
def on_tool_end(self, output: str, **kwargs: Any) -> None:
"""Run when tool ends running."""
self.step += 1
self.tool_ends += 1
self.ends += 1
resp: Dict[str, Any] = {}
resp.update({"action": "on_tool_end", "output": output})
resp.update(self.get_custom_callback_meta())
self.deck.append(self.markdown_renderer().to_html("### Tool End"))
self.deck.append(
self.table_renderer().to_html(self.pandas.DataFrame([resp])) + "\n"
)
def on_tool_error(self, error: BaseException, **kwargs: Any) -> None:
"""Run when tool errors."""
self.step += 1
self.errors += 1
def on_text(self, text: str, **kwargs: Any) -> None:
"""
Run when agent is ending.
"""
self.step += 1
self.text_ctr += 1
resp: Dict[str, Any] = {}
resp.update({"action": "on_text", "text": text})
resp.update(self.get_custom_callback_meta())
self.deck.append(self.markdown_renderer().to_html("### On Text"))
self.deck.append(
self.table_renderer().to_html(self.pandas.DataFrame([resp])) + "\n"
)
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:
"""Run when agent ends running."""
self.step += 1
self.agent_ends += 1
self.ends += 1
resp: Dict[str, Any] = {}
resp.update(
{
"action": "on_agent_finish",
"output": finish.return_values["output"],
"log": finish.log,
}
)
resp.update(self.get_custom_callback_meta())
self.deck.append(self.markdown_renderer().to_html("### Agent Finish"))
self.deck.append(
self.table_renderer().to_html(self.pandas.DataFrame([resp])) + "\n"
)
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Run on agent action."""
self.step += 1
self.tool_starts += 1
self.starts += 1
resp: Dict[str, Any] = {}
resp.update(
{
"action": "on_agent_action",
"tool": action.tool,
"tool_input": action.tool_input,
"log": action.log,
}
)
resp.update(self.get_custom_callback_meta())
self.deck.append(self.markdown_renderer().to_html("### Agent Action"))
self.deck.append(
self.table_renderer().to_html(self.pandas.DataFrame([resp])) + "\n"
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/callbacks/comet_ml_callback.py | import tempfile
from copy import deepcopy
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Sequence
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.outputs import Generation, LLMResult
from langchain_core.utils import guard_import
import langchain_community
from langchain_community.callbacks.utils import (
BaseMetadataCallbackHandler,
flatten_dict,
import_pandas,
import_spacy,
import_textstat,
)
LANGCHAIN_MODEL_NAME = "langchain-model"
def import_comet_ml() -> Any:
"""Import comet_ml and raise an error if it is not installed."""
return guard_import("comet_ml")
def _get_experiment(
workspace: Optional[str] = None, project_name: Optional[str] = None
) -> Any:
comet_ml = import_comet_ml()
experiment = comet_ml.Experiment(
workspace=workspace,
project_name=project_name,
)
return experiment
def _fetch_text_complexity_metrics(text: str) -> dict:
textstat = import_textstat()
text_complexity_metrics = {
"flesch_reading_ease": textstat.flesch_reading_ease(text),
"flesch_kincaid_grade": textstat.flesch_kincaid_grade(text),
"smog_index": textstat.smog_index(text),
"coleman_liau_index": textstat.coleman_liau_index(text),
"automated_readability_index": textstat.automated_readability_index(text),
"dale_chall_readability_score": textstat.dale_chall_readability_score(text),
"difficult_words": textstat.difficult_words(text),
"linsear_write_formula": textstat.linsear_write_formula(text),
"gunning_fog": textstat.gunning_fog(text),
"text_standard": textstat.text_standard(text),
"fernandez_huerta": textstat.fernandez_huerta(text),
"szigriszt_pazos": textstat.szigriszt_pazos(text),
"gutierrez_polini": textstat.gutierrez_polini(text),
"crawford": textstat.crawford(text),
"gulpease_index": textstat.gulpease_index(text),
"osman": textstat.osman(text),
}
return text_complexity_metrics
def _summarize_metrics_for_generated_outputs(metrics: Sequence) -> dict:
pd = import_pandas()
metrics_df = pd.DataFrame(metrics)
metrics_summary = metrics_df.describe()
return metrics_summary.to_dict()
class CometCallbackHandler(BaseMetadataCallbackHandler, BaseCallbackHandler):
"""Callback Handler that logs to Comet.
Parameters:
job_type (str): The type of comet_ml task such as "inference",
"testing" or "qc"
project_name (str): The comet_ml project name
tags (list): Tags to add to the task
task_name (str): Name of the comet_ml task
visualize (bool): Whether to visualize the run.
complexity_metrics (bool): Whether to log complexity metrics
stream_logs (bool): Whether to stream callback actions to Comet
This handler will utilize the associated callback method and formats
the input of each callback function with metadata regarding the state of LLM run,
and adds the response to the list of records for both the {method}_records and
action. It then logs the response to Comet.
"""
def __init__(
self,
task_type: Optional[str] = "inference",
workspace: Optional[str] = None,
project_name: Optional[str] = None,
tags: Optional[Sequence] = None,
name: Optional[str] = None,
visualizations: Optional[List[str]] = None,
complexity_metrics: bool = False,
custom_metrics: Optional[Callable] = None,
stream_logs: bool = True,
) -> None:
"""Initialize callback handler."""
self.comet_ml = import_comet_ml()
super().__init__()
self.task_type = task_type
self.workspace = workspace
self.project_name = project_name
self.tags = tags
self.visualizations = visualizations
self.complexity_metrics = complexity_metrics
self.custom_metrics = custom_metrics
self.stream_logs = stream_logs
self.temp_dir = tempfile.TemporaryDirectory()
self.experiment = _get_experiment(workspace, project_name)
self.experiment.log_other("Created from", "langchain")
if tags:
self.experiment.add_tags(tags)
self.name = name
if self.name:
self.experiment.set_name(self.name)
warning = (
"The comet_ml callback is currently in beta and is subject to change "
"based on updates to `langchain`. Please report any issues to "
"https://github.com/comet-ml/issue-tracking/issues with the tag "
"`langchain`."
)
self.comet_ml.LOGGER.warning(warning)
self.callback_columns: list = []
self.action_records: list = []
self.complexity_metrics = complexity_metrics
if self.visualizations:
spacy = import_spacy()
self.nlp = spacy.load("en_core_web_sm")
else:
self.nlp = None
def _init_resp(self) -> Dict:
return {k: None for k in self.callback_columns}
def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> None:
"""Run when LLM starts."""
self.step += 1
self.llm_starts += 1
self.starts += 1
metadata = self._init_resp()
metadata.update({"action": "on_llm_start"})
metadata.update(flatten_dict(serialized))
metadata.update(self.get_custom_callback_meta())
for prompt in prompts:
prompt_resp = deepcopy(metadata)
prompt_resp["prompts"] = prompt
self.on_llm_start_records.append(prompt_resp)
self.action_records.append(prompt_resp)
if self.stream_logs:
self._log_stream(prompt, metadata, self.step)
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
"""Run when LLM generates a new token."""
self.step += 1
self.llm_streams += 1
resp = self._init_resp()
resp.update({"action": "on_llm_new_token", "token": token})
resp.update(self.get_custom_callback_meta())
self.action_records.append(resp)
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Run when LLM ends running."""
self.step += 1
self.llm_ends += 1
self.ends += 1
metadata = self._init_resp()
metadata.update({"action": "on_llm_end"})
metadata.update(flatten_dict(response.llm_output or {}))
metadata.update(self.get_custom_callback_meta())
output_complexity_metrics = []
output_custom_metrics = []
for prompt_idx, generations in enumerate(response.generations):
for gen_idx, generation in enumerate(generations):
text = generation.text
generation_resp = deepcopy(metadata)
generation_resp.update(flatten_dict(generation.dict()))
complexity_metrics = self._get_complexity_metrics(text)
if complexity_metrics:
output_complexity_metrics.append(complexity_metrics)
generation_resp.update(complexity_metrics)
custom_metrics = self._get_custom_metrics(
generation, prompt_idx, gen_idx
)
if custom_metrics:
output_custom_metrics.append(custom_metrics)
generation_resp.update(custom_metrics)
if self.stream_logs:
self._log_stream(text, metadata, self.step)
self.action_records.append(generation_resp)
self.on_llm_end_records.append(generation_resp)
self._log_text_metrics(output_complexity_metrics, step=self.step)
self._log_text_metrics(output_custom_metrics, step=self.step)
def on_llm_error(self, error: BaseException, **kwargs: Any) -> None:
"""Run when LLM errors."""
self.step += 1
self.errors += 1
def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> None:
"""Run when chain starts running."""
self.step += 1
self.chain_starts += 1
self.starts += 1
resp = self._init_resp()
resp.update({"action": "on_chain_start"})
resp.update(flatten_dict(serialized))
resp.update(self.get_custom_callback_meta())
for chain_input_key, chain_input_val in inputs.items():
if isinstance(chain_input_val, str):
input_resp = deepcopy(resp)
if self.stream_logs:
self._log_stream(chain_input_val, resp, self.step)
input_resp.update({chain_input_key: chain_input_val})
self.action_records.append(input_resp)
else:
self.comet_ml.LOGGER.warning(
f"Unexpected data format provided! "
f"Input Value for {chain_input_key} will not be logged"
)
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
"""Run when chain ends running."""
self.step += 1
self.chain_ends += 1
self.ends += 1
resp = self._init_resp()
resp.update({"action": "on_chain_end"})
resp.update(self.get_custom_callback_meta())
for chain_output_key, chain_output_val in outputs.items():
if isinstance(chain_output_val, str):
output_resp = deepcopy(resp)
if self.stream_logs:
self._log_stream(chain_output_val, resp, self.step)
output_resp.update({chain_output_key: chain_output_val})
self.action_records.append(output_resp)
else:
self.comet_ml.LOGGER.warning(
f"Unexpected data format provided! "
f"Output Value for {chain_output_key} will not be logged"
)
def on_chain_error(self, error: BaseException, **kwargs: Any) -> None:
"""Run when chain errors."""
self.step += 1
self.errors += 1
def on_tool_start(
self, serialized: Dict[str, Any], input_str: str, **kwargs: Any
) -> None:
"""Run when tool starts running."""
self.step += 1
self.tool_starts += 1
self.starts += 1
resp = self._init_resp()
resp.update({"action": "on_tool_start"})
resp.update(flatten_dict(serialized))
resp.update(self.get_custom_callback_meta())
if self.stream_logs:
self._log_stream(input_str, resp, self.step)
resp.update({"input_str": input_str})
self.action_records.append(resp)
def on_tool_end(self, output: Any, **kwargs: Any) -> None:
"""Run when tool ends running."""
output = str(output)
self.step += 1
self.tool_ends += 1
self.ends += 1
resp = self._init_resp()
resp.update({"action": "on_tool_end"})
resp.update(self.get_custom_callback_meta())
if self.stream_logs:
self._log_stream(output, resp, self.step)
resp.update({"output": output})
self.action_records.append(resp)
def on_tool_error(self, error: BaseException, **kwargs: Any) -> None:
"""Run when tool errors."""
self.step += 1
self.errors += 1
def on_text(self, text: str, **kwargs: Any) -> None:
"""
Run when agent is ending.
"""
self.step += 1
self.text_ctr += 1
resp = self._init_resp()
resp.update({"action": "on_text"})
resp.update(self.get_custom_callback_meta())
if self.stream_logs:
self._log_stream(text, resp, self.step)
resp.update({"text": text})
self.action_records.append(resp)
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:
"""Run when agent ends running."""
self.step += 1
self.agent_ends += 1
self.ends += 1
resp = self._init_resp()
output = finish.return_values["output"]
log = finish.log
resp.update({"action": "on_agent_finish", "log": log})
resp.update(self.get_custom_callback_meta())
if self.stream_logs:
self._log_stream(output, resp, self.step)
resp.update({"output": output})
self.action_records.append(resp)
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Run on agent action."""
self.step += 1
self.tool_starts += 1
self.starts += 1
tool = action.tool
tool_input = str(action.tool_input)
log = action.log
resp = self._init_resp()
resp.update({"action": "on_agent_action", "log": log, "tool": tool})
resp.update(self.get_custom_callback_meta())
if self.stream_logs:
self._log_stream(tool_input, resp, self.step)
resp.update({"tool_input": tool_input})
self.action_records.append(resp)
def _get_complexity_metrics(self, text: str) -> dict:
"""Compute text complexity metrics using textstat.
Parameters:
text (str): The text to analyze.
Returns:
(dict): A dictionary containing the complexity metrics.
"""
resp = {}
if self.complexity_metrics:
text_complexity_metrics = _fetch_text_complexity_metrics(text)
resp.update(text_complexity_metrics)
return resp
def _get_custom_metrics(
self, generation: Generation, prompt_idx: int, gen_idx: int
) -> dict:
"""Compute Custom Metrics for an LLM Generated Output
Args:
generation (LLMResult): Output generation from an LLM
prompt_idx (int): List index of the input prompt
gen_idx (int): List index of the generated output
Returns:
dict: A dictionary containing the custom metrics.
"""
resp = {}
if self.custom_metrics:
custom_metrics = self.custom_metrics(generation, prompt_idx, gen_idx)
resp.update(custom_metrics)
return resp
def flush_tracker(
self,
langchain_asset: Any = None,
task_type: Optional[str] = "inference",
workspace: Optional[str] = None,
project_name: Optional[str] = "comet-langchain-demo",
tags: Optional[Sequence] = None,
name: Optional[str] = None,
visualizations: Optional[List[str]] = None,
complexity_metrics: bool = False,
custom_metrics: Optional[Callable] = None,
finish: bool = False,
reset: bool = False,
) -> None:
"""Flush the tracker and setup the session.
Everything after this will be a new table.
Args:
name: Name of the performed session so far so it is identifiable
langchain_asset: The langchain asset to save.
finish: Whether to finish the run.
Returns:
None
"""
self._log_session(langchain_asset)
if langchain_asset:
try:
self._log_model(langchain_asset)
except Exception:
self.comet_ml.LOGGER.error(
"Failed to export agent or LLM to Comet",
exc_info=True,
extra={"show_traceback": True},
)
if finish:
self.experiment.end()
if reset:
self._reset(
task_type,
workspace,
project_name,
tags,
name,
visualizations,
complexity_metrics,
custom_metrics,
)
def _log_stream(self, prompt: str, metadata: dict, step: int) -> None:
self.experiment.log_text(prompt, metadata=metadata, step=step)
def _log_model(self, langchain_asset: Any) -> None:
model_parameters = self._get_llm_parameters(langchain_asset)
self.experiment.log_parameters(model_parameters, prefix="model")
langchain_asset_path = Path(self.temp_dir.name, "model.json")
model_name = self.name if self.name else LANGCHAIN_MODEL_NAME
try:
if hasattr(langchain_asset, "save"):
langchain_asset.save(langchain_asset_path)
self.experiment.log_model(model_name, str(langchain_asset_path))
except (ValueError, AttributeError, NotImplementedError) as e:
if hasattr(langchain_asset, "save_agent"):
langchain_asset.save_agent(langchain_asset_path)
self.experiment.log_model(model_name, str(langchain_asset_path))
else:
self.comet_ml.LOGGER.error(
f"{e}"
" Could not save Langchain Asset "
f"for {langchain_asset.__class__.__name__}"
)
def _log_session(self, langchain_asset: Optional[Any] = None) -> None:
try:
llm_session_df = self._create_session_analysis_dataframe(langchain_asset)
# Log the cleaned dataframe as a table
self.experiment.log_table("langchain-llm-session.csv", llm_session_df)
except Exception:
self.comet_ml.LOGGER.warning(
"Failed to log session data to Comet",
exc_info=True,
extra={"show_traceback": True},
)
try:
metadata = {"langchain_version": str(langchain_community.__version__)}
# Log the langchain low-level records as a JSON file directly
self.experiment.log_asset_data(
self.action_records, "langchain-action_records.json", metadata=metadata
)
except Exception:
self.comet_ml.LOGGER.warning(
"Failed to log session data to Comet",
exc_info=True,
extra={"show_traceback": True},
)
try:
self._log_visualizations(llm_session_df)
except Exception:
self.comet_ml.LOGGER.warning(
"Failed to log visualizations to Comet",
exc_info=True,
extra={"show_traceback": True},
)
def _log_text_metrics(self, metrics: Sequence[dict], step: int) -> None:
if not metrics:
return
metrics_summary = _summarize_metrics_for_generated_outputs(metrics)
for key, value in metrics_summary.items():
self.experiment.log_metrics(value, prefix=key, step=step)
def _log_visualizations(self, session_df: Any) -> None:
if not (self.visualizations and self.nlp):
return
spacy = import_spacy()
prompts = session_df["prompts"].tolist()
outputs = session_df["text"].tolist()
for idx, (prompt, output) in enumerate(zip(prompts, outputs)):
doc = self.nlp(output)
sentence_spans = list(doc.sents)
for visualization in self.visualizations:
try:
html = spacy.displacy.render(
sentence_spans,
style=visualization,
options={"compact": True},
jupyter=False,
page=True,
)
self.experiment.log_asset_data(
html,
name=f"langchain-viz-{visualization}-{idx}.html",
metadata={"prompt": prompt},
step=idx,
)
except Exception as e:
self.comet_ml.LOGGER.warning(
e, exc_info=True, extra={"show_traceback": True}
)
return
def _reset(
self,
task_type: Optional[str] = None,
workspace: Optional[str] = None,
project_name: Optional[str] = None,
tags: Optional[Sequence] = None,
name: Optional[str] = None,
visualizations: Optional[List[str]] = None,
complexity_metrics: bool = False,
custom_metrics: Optional[Callable] = None,
) -> None:
_task_type = task_type if task_type else self.task_type
_workspace = workspace if workspace else self.workspace
_project_name = project_name if project_name else self.project_name
_tags = tags if tags else self.tags
_name = name if name else self.name
_visualizations = visualizations if visualizations else self.visualizations
_complexity_metrics = (
complexity_metrics if complexity_metrics else self.complexity_metrics
)
_custom_metrics = custom_metrics if custom_metrics else self.custom_metrics
self.__init__( # type: ignore
task_type=_task_type,
workspace=_workspace,
project_name=_project_name,
tags=_tags,
name=_name,
visualizations=_visualizations,
complexity_metrics=_complexity_metrics,
custom_metrics=_custom_metrics,
)
self.reset_callback_meta()
self.temp_dir = tempfile.TemporaryDirectory()
def _create_session_analysis_dataframe(self, langchain_asset: Any = None) -> dict:
pd = import_pandas()
llm_parameters = self._get_llm_parameters(langchain_asset)
num_generations_per_prompt = llm_parameters.get("n", 1)
llm_start_records_df = pd.DataFrame(self.on_llm_start_records)
# Repeat each input row based on the number of outputs generated per prompt
llm_start_records_df = llm_start_records_df.loc[
llm_start_records_df.index.repeat(num_generations_per_prompt)
].reset_index(drop=True)
llm_end_records_df = pd.DataFrame(self.on_llm_end_records)
llm_session_df = pd.merge(
llm_start_records_df,
llm_end_records_df,
left_index=True,
right_index=True,
suffixes=["_llm_start", "_llm_end"],
)
return llm_session_df
def _get_llm_parameters(self, langchain_asset: Any = None) -> dict:
if not langchain_asset:
return {}
try:
if hasattr(langchain_asset, "agent"):
llm_parameters = langchain_asset.agent.llm_chain.llm.dict()
elif hasattr(langchain_asset, "llm_chain"):
llm_parameters = langchain_asset.llm_chain.llm.dict()
elif hasattr(langchain_asset, "llm"):
llm_parameters = langchain_asset.llm.dict()
else:
llm_parameters = langchain_asset.dict()
except Exception:
return {}
return llm_parameters
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/callbacks/sagemaker_callback.py | import json
import os
import shutil
import tempfile
from copy import deepcopy
from typing import Any, Dict, List, Optional
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.outputs import LLMResult
from langchain_community.callbacks.utils import (
flatten_dict,
)
def save_json(data: dict, file_path: str) -> None:
"""Save dict to local file path.
Parameters:
data (dict): The dictionary to be saved.
file_path (str): Local file path.
"""
with open(file_path, "w") as outfile:
json.dump(data, outfile)
class SageMakerCallbackHandler(BaseCallbackHandler):
"""Callback Handler that logs prompt artifacts and metrics to SageMaker Experiments.
Parameters:
run (sagemaker.experiments.run.Run): Run object where the experiment is logged.
"""
def __init__(self, run: Any) -> None:
"""Initialize callback handler."""
super().__init__()
self.run = run
self.metrics = {
"step": 0,
"starts": 0,
"ends": 0,
"errors": 0,
"text_ctr": 0,
"chain_starts": 0,
"chain_ends": 0,
"llm_starts": 0,
"llm_ends": 0,
"llm_streams": 0,
"tool_starts": 0,
"tool_ends": 0,
"agent_ends": 0,
}
# Create a temporary directory
self.temp_dir = tempfile.mkdtemp()
def _reset(self) -> None:
for k, v in self.metrics.items():
self.metrics[k] = 0
def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> None:
"""Run when LLM starts."""
self.metrics["step"] += 1
self.metrics["llm_starts"] += 1
self.metrics["starts"] += 1
llm_starts = self.metrics["llm_starts"]
resp: Dict[str, Any] = {}
resp.update({"action": "on_llm_start"})
resp.update(flatten_dict(serialized))
resp.update(self.metrics)
for idx, prompt in enumerate(prompts):
prompt_resp = deepcopy(resp)
prompt_resp["prompt"] = prompt
self.jsonf(
prompt_resp,
self.temp_dir,
f"llm_start_{llm_starts}_prompt_{idx}",
)
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
"""Run when LLM generates a new token."""
self.metrics["step"] += 1
self.metrics["llm_streams"] += 1
llm_streams = self.metrics["llm_streams"]
resp: Dict[str, Any] = {}
resp.update({"action": "on_llm_new_token", "token": token})
resp.update(self.metrics)
self.jsonf(resp, self.temp_dir, f"llm_new_tokens_{llm_streams}")
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Run when LLM ends running."""
self.metrics["step"] += 1
self.metrics["llm_ends"] += 1
self.metrics["ends"] += 1
llm_ends = self.metrics["llm_ends"]
resp: Dict[str, Any] = {}
resp.update({"action": "on_llm_end"})
resp.update(flatten_dict(response.llm_output or {}))
resp.update(self.metrics)
for generations in response.generations:
for idx, generation in enumerate(generations):
generation_resp = deepcopy(resp)
generation_resp.update(flatten_dict(generation.dict()))
self.jsonf(
resp,
self.temp_dir,
f"llm_end_{llm_ends}_generation_{idx}",
)
def on_llm_error(self, error: BaseException, **kwargs: Any) -> None:
"""Run when LLM errors."""
self.metrics["step"] += 1
self.metrics["errors"] += 1
def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> None:
"""Run when chain starts running."""
self.metrics["step"] += 1
self.metrics["chain_starts"] += 1
self.metrics["starts"] += 1
chain_starts = self.metrics["chain_starts"]
resp: Dict[str, Any] = {}
resp.update({"action": "on_chain_start"})
resp.update(flatten_dict(serialized))
resp.update(self.metrics)
chain_input = ",".join([f"{k}={v}" for k, v in inputs.items()])
input_resp = deepcopy(resp)
input_resp["inputs"] = chain_input
self.jsonf(input_resp, self.temp_dir, f"chain_start_{chain_starts}")
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
"""Run when chain ends running."""
self.metrics["step"] += 1
self.metrics["chain_ends"] += 1
self.metrics["ends"] += 1
chain_ends = self.metrics["chain_ends"]
resp: Dict[str, Any] = {}
chain_output = ",".join([f"{k}={v}" for k, v in outputs.items()])
resp.update({"action": "on_chain_end", "outputs": chain_output})
resp.update(self.metrics)
self.jsonf(resp, self.temp_dir, f"chain_end_{chain_ends}")
def on_chain_error(self, error: BaseException, **kwargs: Any) -> None:
"""Run when chain errors."""
self.metrics["step"] += 1
self.metrics["errors"] += 1
def on_tool_start(
self, serialized: Dict[str, Any], input_str: str, **kwargs: Any
) -> None:
"""Run when tool starts running."""
self.metrics["step"] += 1
self.metrics["tool_starts"] += 1
self.metrics["starts"] += 1
tool_starts = self.metrics["tool_starts"]
resp: Dict[str, Any] = {}
resp.update({"action": "on_tool_start", "input_str": input_str})
resp.update(flatten_dict(serialized))
resp.update(self.metrics)
self.jsonf(resp, self.temp_dir, f"tool_start_{tool_starts}")
def on_tool_end(self, output: Any, **kwargs: Any) -> None:
"""Run when tool ends running."""
output = str(output)
self.metrics["step"] += 1
self.metrics["tool_ends"] += 1
self.metrics["ends"] += 1
tool_ends = self.metrics["tool_ends"]
resp: Dict[str, Any] = {}
resp.update({"action": "on_tool_end", "output": output})
resp.update(self.metrics)
self.jsonf(resp, self.temp_dir, f"tool_end_{tool_ends}")
def on_tool_error(self, error: BaseException, **kwargs: Any) -> None:
"""Run when tool errors."""
self.metrics["step"] += 1
self.metrics["errors"] += 1
def on_text(self, text: str, **kwargs: Any) -> None:
"""
Run when agent is ending.
"""
self.metrics["step"] += 1
self.metrics["text_ctr"] += 1
text_ctr = self.metrics["text_ctr"]
resp: Dict[str, Any] = {}
resp.update({"action": "on_text", "text": text})
resp.update(self.metrics)
self.jsonf(resp, self.temp_dir, f"on_text_{text_ctr}")
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:
"""Run when agent ends running."""
self.metrics["step"] += 1
self.metrics["agent_ends"] += 1
self.metrics["ends"] += 1
agent_ends = self.metrics["agent_ends"]
resp: Dict[str, Any] = {}
resp.update(
{
"action": "on_agent_finish",
"output": finish.return_values["output"],
"log": finish.log,
}
)
resp.update(self.metrics)
self.jsonf(resp, self.temp_dir, f"agent_finish_{agent_ends}")
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Run on agent action."""
self.metrics["step"] += 1
self.metrics["tool_starts"] += 1
self.metrics["starts"] += 1
tool_starts = self.metrics["tool_starts"]
resp: Dict[str, Any] = {}
resp.update(
{
"action": "on_agent_action",
"tool": action.tool,
"tool_input": action.tool_input,
"log": action.log,
}
)
resp.update(self.metrics)
self.jsonf(resp, self.temp_dir, f"agent_action_{tool_starts}")
def jsonf(
self,
data: Dict[str, Any],
data_dir: str,
filename: str,
is_output: Optional[bool] = True,
) -> None:
"""To log the input data as json file artifact."""
file_path = os.path.join(data_dir, f"{filename}.json")
save_json(data, file_path)
self.run.log_file(file_path, name=filename, is_output=is_output)
def flush_tracker(self) -> None:
"""Reset the steps and delete the temporary local directory."""
self._reset()
shutil.rmtree(self.temp_dir)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/callbacks/promptlayer_callback.py | """Callback handler for promptlayer."""
from __future__ import annotations
import datetime
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple
from uuid import UUID
from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.messages import (
AIMessage,
BaseMessage,
ChatMessage,
HumanMessage,
SystemMessage,
)
from langchain_core.outputs import (
ChatGeneration,
LLMResult,
)
if TYPE_CHECKING:
import promptlayer
def _lazy_import_promptlayer() -> promptlayer:
"""Lazy import promptlayer to avoid circular imports."""
try:
import promptlayer
except ImportError:
raise ImportError(
"The PromptLayerCallbackHandler requires the promptlayer package. "
" Please install it with `pip install promptlayer`."
)
return promptlayer
class PromptLayerCallbackHandler(BaseCallbackHandler):
"""Callback handler for promptlayer."""
def __init__(
self,
pl_id_callback: Optional[Callable[..., Any]] = None,
pl_tags: Optional[List[str]] = None,
) -> None:
"""Initialize the PromptLayerCallbackHandler."""
_lazy_import_promptlayer()
self.pl_id_callback = pl_id_callback
self.pl_tags = pl_tags or []
self.runs: Dict[UUID, Dict[str, Any]] = {}
def on_chat_model_start(
self,
serialized: Dict[str, Any],
messages: List[List[BaseMessage]],
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
tags: Optional[List[str]] = None,
**kwargs: Any,
) -> Any:
self.runs[run_id] = {
"messages": [self._create_message_dicts(m)[0] for m in messages],
"invocation_params": kwargs.get("invocation_params", {}),
"name": ".".join(serialized["id"]),
"request_start_time": datetime.datetime.now().timestamp(),
"tags": tags,
}
def on_llm_start(
self,
serialized: Dict[str, Any],
prompts: List[str],
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
tags: Optional[List[str]] = None,
**kwargs: Any,
) -> Any:
self.runs[run_id] = {
"prompts": prompts,
"invocation_params": kwargs.get("invocation_params", {}),
"name": ".".join(serialized["id"]),
"request_start_time": datetime.datetime.now().timestamp(),
"tags": tags,
}
def on_llm_end(
self,
response: LLMResult,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> None:
from promptlayer.utils import get_api_key, promptlayer_api_request
run_info = self.runs.get(run_id, {})
if not run_info:
return
run_info["request_end_time"] = datetime.datetime.now().timestamp()
for i in range(len(response.generations)):
generation = response.generations[i][0]
resp = {
"text": generation.text,
"llm_output": response.llm_output,
}
model_params = run_info.get("invocation_params", {})
is_chat_model = run_info.get("messages", None) is not None
model_input = (
run_info.get("messages", [])[i]
if is_chat_model
else [run_info.get("prompts", [])[i]]
)
model_response = (
[self._convert_message_to_dict(generation.message)]
if is_chat_model and isinstance(generation, ChatGeneration)
else resp
)
pl_request_id = promptlayer_api_request(
run_info.get("name"),
"langchain",
model_input,
model_params,
self.pl_tags,
model_response,
run_info.get("request_start_time"),
run_info.get("request_end_time"),
get_api_key(),
return_pl_id=bool(self.pl_id_callback is not None),
metadata={
"_langchain_run_id": str(run_id),
"_langchain_parent_run_id": str(parent_run_id),
"_langchain_tags": str(run_info.get("tags", [])),
},
)
if self.pl_id_callback:
self.pl_id_callback(pl_request_id)
def _convert_message_to_dict(self, message: BaseMessage) -> Dict[str, Any]:
if isinstance(message, HumanMessage):
message_dict = {"role": "user", "content": message.content}
elif isinstance(message, AIMessage):
message_dict = {"role": "assistant", "content": message.content}
elif isinstance(message, SystemMessage):
message_dict = {"role": "system", "content": message.content}
elif isinstance(message, ChatMessage):
message_dict = {"role": message.role, "content": message.content}
else:
raise ValueError(f"Got unknown type {message}")
if "name" in message.additional_kwargs:
message_dict["name"] = message.additional_kwargs["name"]
return message_dict
def _create_message_dicts(
self, messages: List[BaseMessage]
) -> Tuple[List[Dict[str, Any]], Dict[str, Any]]:
params: Dict[str, Any] = {}
message_dicts = [self._convert_message_to_dict(m) for m in messages]
return message_dicts, params
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/callbacks/utils.py | import hashlib
from pathlib import Path
from typing import Any, Dict, Iterable, Tuple, Union
from langchain_core.utils import guard_import
def import_spacy() -> Any:
"""Import the spacy python package and raise an error if it is not installed."""
return guard_import("spacy")
def import_pandas() -> Any:
"""Import the pandas python package and raise an error if it is not installed."""
return guard_import("pandas")
def import_textstat() -> Any:
"""Import the textstat python package and raise an error if it is not installed."""
return guard_import("textstat")
def _flatten_dict(
nested_dict: Dict[str, Any], parent_key: str = "", sep: str = "_"
) -> Iterable[Tuple[str, Any]]:
"""
Generator that yields flattened items from a nested dictionary for a flat dict.
Parameters:
nested_dict (dict): The nested dictionary to flatten.
parent_key (str): The prefix to prepend to the keys of the flattened dict.
sep (str): The separator to use between the parent key and the key of the
flattened dictionary.
Yields:
(str, any): A key-value pair from the flattened dictionary.
"""
for key, value in nested_dict.items():
new_key = parent_key + sep + key if parent_key else key
if isinstance(value, dict):
yield from _flatten_dict(value, new_key, sep)
else:
yield new_key, value
def flatten_dict(
nested_dict: Dict[str, Any], parent_key: str = "", sep: str = "_"
) -> Dict[str, Any]:
"""Flatten a nested dictionary into a flat dictionary.
Parameters:
nested_dict (dict): The nested dictionary to flatten.
parent_key (str): The prefix to prepend to the keys of the flattened dict.
sep (str): The separator to use between the parent key and the key of the
flattened dictionary.
Returns:
(dict): A flat dictionary.
"""
flat_dict = {k: v for k, v in _flatten_dict(nested_dict, parent_key, sep)}
return flat_dict
def hash_string(s: str) -> str:
"""Hash a string using sha1.
Parameters:
s (str): The string to hash.
Returns:
(str): The hashed string.
"""
return hashlib.sha1(s.encode("utf-8")).hexdigest()
def load_json(json_path: Union[str, Path]) -> str:
"""Load json file to a string.
Parameters:
json_path (str): The path to the json file.
Returns:
(str): The string representation of the json file.
"""
with open(json_path, "r") as f:
data = f.read()
return data
class BaseMetadataCallbackHandler:
"""Handle the metadata and associated function states for callbacks.
Attributes:
step (int): The current step.
starts (int): The number of times the start method has been called.
ends (int): The number of times the end method has been called.
errors (int): The number of times the error method has been called.
text_ctr (int): The number of times the text method has been called.
ignore_llm_ (bool): Whether to ignore llm callbacks.
ignore_chain_ (bool): Whether to ignore chain callbacks.
ignore_agent_ (bool): Whether to ignore agent callbacks.
ignore_retriever_ (bool): Whether to ignore retriever callbacks.
always_verbose_ (bool): Whether to always be verbose.
chain_starts (int): The number of times the chain start method has been called.
chain_ends (int): The number of times the chain end method has been called.
llm_starts (int): The number of times the llm start method has been called.
llm_ends (int): The number of times the llm end method has been called.
llm_streams (int): The number of times the text method has been called.
tool_starts (int): The number of times the tool start method has been called.
tool_ends (int): The number of times the tool end method has been called.
agent_ends (int): The number of times the agent end method has been called.
on_llm_start_records (list): A list of records of the on_llm_start method.
on_llm_token_records (list): A list of records of the on_llm_token method.
on_llm_end_records (list): A list of records of the on_llm_end method.
on_chain_start_records (list): A list of records of the on_chain_start method.
on_chain_end_records (list): A list of records of the on_chain_end method.
on_tool_start_records (list): A list of records of the on_tool_start method.
on_tool_end_records (list): A list of records of the on_tool_end method.
on_agent_finish_records (list): A list of records of the on_agent_end method.
"""
def __init__(self) -> None:
self.step = 0
self.starts = 0
self.ends = 0
self.errors = 0
self.text_ctr = 0
self.ignore_llm_ = False
self.ignore_chain_ = False
self.ignore_agent_ = False
self.ignore_retriever_ = False
self.always_verbose_ = False
self.chain_starts = 0
self.chain_ends = 0
self.llm_starts = 0
self.llm_ends = 0
self.llm_streams = 0
self.tool_starts = 0
self.tool_ends = 0
self.agent_ends = 0
self.on_llm_start_records: list = []
self.on_llm_token_records: list = []
self.on_llm_end_records: list = []
self.on_chain_start_records: list = []
self.on_chain_end_records: list = []
self.on_tool_start_records: list = []
self.on_tool_end_records: list = []
self.on_text_records: list = []
self.on_agent_finish_records: list = []
self.on_agent_action_records: list = []
@property
def always_verbose(self) -> bool:
"""Whether to call verbose callbacks even if verbose is False."""
return self.always_verbose_
@property
def ignore_llm(self) -> bool:
"""Whether to ignore LLM callbacks."""
return self.ignore_llm_
@property
def ignore_chain(self) -> bool:
"""Whether to ignore chain callbacks."""
return self.ignore_chain_
@property
def ignore_agent(self) -> bool:
"""Whether to ignore agent callbacks."""
return self.ignore_agent_
def get_custom_callback_meta(self) -> Dict[str, Any]:
return {
"step": self.step,
"starts": self.starts,
"ends": self.ends,
"errors": self.errors,
"text_ctr": self.text_ctr,
"chain_starts": self.chain_starts,
"chain_ends": self.chain_ends,
"llm_starts": self.llm_starts,
"llm_ends": self.llm_ends,
"llm_streams": self.llm_streams,
"tool_starts": self.tool_starts,
"tool_ends": self.tool_ends,
"agent_ends": self.agent_ends,
}
def reset_callback_meta(self) -> None:
"""Reset the callback metadata."""
self.step = 0
self.starts = 0
self.ends = 0
self.errors = 0
self.text_ctr = 0
self.ignore_llm_ = False
self.ignore_chain_ = False
self.ignore_agent_ = False
self.always_verbose_ = False
self.chain_starts = 0
self.chain_ends = 0
self.llm_starts = 0
self.llm_ends = 0
self.llm_streams = 0
self.tool_starts = 0
self.tool_ends = 0
self.agent_ends = 0
self.on_llm_start_records = []
self.on_llm_token_records = []
self.on_llm_end_records = []
self.on_chain_start_records = []
self.on_chain_end_records = []
self.on_tool_start_records = []
self.on_tool_end_records = []
self.on_text_records = []
self.on_agent_finish_records = []
self.on_agent_action_records = []
return None
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/callbacks/arize_callback.py | from datetime import datetime
from typing import Any, Dict, List, Optional
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.outputs import LLMResult
from langchain_community.callbacks.utils import import_pandas
class ArizeCallbackHandler(BaseCallbackHandler):
"""Callback Handler that logs to Arize."""
def __init__(
self,
model_id: Optional[str] = None,
model_version: Optional[str] = None,
SPACE_KEY: Optional[str] = None,
API_KEY: Optional[str] = None,
) -> None:
"""Initialize callback handler."""
super().__init__()
self.model_id = model_id
self.model_version = model_version
self.space_key = SPACE_KEY
self.api_key = API_KEY
self.prompt_records: List[str] = []
self.response_records: List[str] = []
self.prediction_ids: List[str] = []
self.pred_timestamps: List[int] = []
self.response_embeddings: List[float] = []
self.prompt_embeddings: List[float] = []
self.prompt_tokens = 0
self.completion_tokens = 0
self.total_tokens = 0
self.step = 0
from arize.pandas.embeddings import EmbeddingGenerator, UseCases
from arize.pandas.logger import Client
self.generator = EmbeddingGenerator.from_use_case(
use_case=UseCases.NLP.SEQUENCE_CLASSIFICATION,
model_name="distilbert-base-uncased",
tokenizer_max_length=512,
batch_size=256,
)
self.arize_client = Client(space_key=SPACE_KEY, api_key=API_KEY)
if SPACE_KEY == "SPACE_KEY" or API_KEY == "API_KEY":
raise ValueError("❌ CHANGE SPACE AND API KEYS")
else:
print("✅ Arize client setup done! Now you can start using Arize!") # noqa: T201
def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> None:
for prompt in prompts:
self.prompt_records.append(prompt.replace("\n", ""))
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
"""Do nothing."""
pass
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
pd = import_pandas()
from arize.utils.types import (
EmbeddingColumnNames,
Environments,
ModelTypes,
Schema,
)
# Safe check if 'llm_output' and 'token_usage' exist
if response.llm_output and "token_usage" in response.llm_output:
self.prompt_tokens = response.llm_output["token_usage"].get(
"prompt_tokens", 0
)
self.total_tokens = response.llm_output["token_usage"].get(
"total_tokens", 0
)
self.completion_tokens = response.llm_output["token_usage"].get(
"completion_tokens", 0
)
else:
self.prompt_tokens = self.total_tokens = self.completion_tokens = (
0 # assign default value
)
for generations in response.generations:
for generation in generations:
prompt = self.prompt_records[self.step]
self.step = self.step + 1
prompt_embedding = pd.Series(
self.generator.generate_embeddings(
text_col=pd.Series(prompt.replace("\n", " "))
).reset_index(drop=True)
)
# Assigning text to response_text instead of response
response_text = generation.text.replace("\n", " ")
response_embedding = pd.Series(
self.generator.generate_embeddings(
text_col=pd.Series(generation.text.replace("\n", " "))
).reset_index(drop=True)
)
pred_timestamp = datetime.now().timestamp()
# Define the columns and data
columns = [
"prediction_ts",
"response",
"prompt",
"response_vector",
"prompt_vector",
"prompt_token",
"completion_token",
"total_token",
]
data = [
[
pred_timestamp,
response_text,
prompt,
response_embedding[0],
prompt_embedding[0],
self.prompt_tokens,
self.total_tokens,
self.completion_tokens,
]
]
# Create the DataFrame
df = pd.DataFrame(data, columns=columns)
# Declare prompt and response columns
prompt_columns = EmbeddingColumnNames(
vector_column_name="prompt_vector", data_column_name="prompt"
)
response_columns = EmbeddingColumnNames(
vector_column_name="response_vector", data_column_name="response"
)
schema = Schema(
timestamp_column_name="prediction_ts",
tag_column_names=[
"prompt_token",
"completion_token",
"total_token",
],
prompt_column_names=prompt_columns,
response_column_names=response_columns,
)
response_from_arize = self.arize_client.log(
dataframe=df,
schema=schema,
model_id=self.model_id,
model_version=self.model_version,
model_type=ModelTypes.GENERATIVE_LLM,
environment=Environments.PRODUCTION,
)
if response_from_arize.status_code == 200:
print("✅ Successfully logged data to Arize!") # noqa: T201
else:
print(f'❌ Logging failed "{response_from_arize.text}"') # noqa: T201
def on_llm_error(self, error: BaseException, **kwargs: Any) -> None:
"""Do nothing."""
pass
def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> None:
pass
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
"""Do nothing."""
pass
def on_chain_error(self, error: BaseException, **kwargs: Any) -> None:
"""Do nothing."""
pass
def on_tool_start(
self,
serialized: Dict[str, Any],
input_str: str,
**kwargs: Any,
) -> None:
pass
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Do nothing."""
pass
def on_tool_end(
self,
output: Any,
observation_prefix: Optional[str] = None,
llm_prefix: Optional[str] = None,
**kwargs: Any,
) -> None:
pass
def on_tool_error(self, error: BaseException, **kwargs: Any) -> None:
pass
def on_text(self, text: str, **kwargs: Any) -> None:
pass
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:
pass
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/callbacks/__init__.py | """**Callback handlers** allow listening to events in LangChain.
**Class hierarchy:**
.. code-block::
BaseCallbackHandler --> <name>CallbackHandler # Example: AimCallbackHandler
"""
import importlib
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from langchain_community.callbacks.aim_callback import (
AimCallbackHandler,
)
from langchain_community.callbacks.argilla_callback import (
ArgillaCallbackHandler,
)
from langchain_community.callbacks.arize_callback import (
ArizeCallbackHandler,
)
from langchain_community.callbacks.arthur_callback import (
ArthurCallbackHandler,
)
from langchain_community.callbacks.clearml_callback import (
ClearMLCallbackHandler,
)
from langchain_community.callbacks.comet_ml_callback import (
CometCallbackHandler,
)
from langchain_community.callbacks.context_callback import (
ContextCallbackHandler,
)
from langchain_community.callbacks.fiddler_callback import (
FiddlerCallbackHandler,
)
from langchain_community.callbacks.flyte_callback import (
FlyteCallbackHandler,
)
from langchain_community.callbacks.human import (
HumanApprovalCallbackHandler,
)
from langchain_community.callbacks.infino_callback import (
InfinoCallbackHandler,
)
from langchain_community.callbacks.labelstudio_callback import (
LabelStudioCallbackHandler,
)
from langchain_community.callbacks.llmonitor_callback import (
LLMonitorCallbackHandler,
)
from langchain_community.callbacks.manager import (
get_openai_callback,
wandb_tracing_enabled,
)
from langchain_community.callbacks.mlflow_callback import (
MlflowCallbackHandler,
)
from langchain_community.callbacks.openai_info import (
OpenAICallbackHandler,
)
from langchain_community.callbacks.promptlayer_callback import (
PromptLayerCallbackHandler,
)
from langchain_community.callbacks.sagemaker_callback import (
SageMakerCallbackHandler,
)
from langchain_community.callbacks.streamlit import (
LLMThoughtLabeler,
StreamlitCallbackHandler,
)
from langchain_community.callbacks.trubrics_callback import (
TrubricsCallbackHandler,
)
from langchain_community.callbacks.upstash_ratelimit_callback import (
UpstashRatelimitError,
UpstashRatelimitHandler, # noqa: F401
)
from langchain_community.callbacks.uptrain_callback import (
UpTrainCallbackHandler,
)
from langchain_community.callbacks.wandb_callback import (
WandbCallbackHandler,
)
from langchain_community.callbacks.whylabs_callback import (
WhyLabsCallbackHandler,
)
_module_lookup = {
"AimCallbackHandler": "langchain_community.callbacks.aim_callback",
"ArgillaCallbackHandler": "langchain_community.callbacks.argilla_callback",
"ArizeCallbackHandler": "langchain_community.callbacks.arize_callback",
"ArthurCallbackHandler": "langchain_community.callbacks.arthur_callback",
"ClearMLCallbackHandler": "langchain_community.callbacks.clearml_callback",
"CometCallbackHandler": "langchain_community.callbacks.comet_ml_callback",
"ContextCallbackHandler": "langchain_community.callbacks.context_callback",
"FiddlerCallbackHandler": "langchain_community.callbacks.fiddler_callback",
"FlyteCallbackHandler": "langchain_community.callbacks.flyte_callback",
"HumanApprovalCallbackHandler": "langchain_community.callbacks.human",
"InfinoCallbackHandler": "langchain_community.callbacks.infino_callback",
"LLMThoughtLabeler": "langchain_community.callbacks.streamlit",
"LLMonitorCallbackHandler": "langchain_community.callbacks.llmonitor_callback",
"LabelStudioCallbackHandler": "langchain_community.callbacks.labelstudio_callback",
"MlflowCallbackHandler": "langchain_community.callbacks.mlflow_callback",
"OpenAICallbackHandler": "langchain_community.callbacks.openai_info",
"PromptLayerCallbackHandler": "langchain_community.callbacks.promptlayer_callback",
"SageMakerCallbackHandler": "langchain_community.callbacks.sagemaker_callback",
"StreamlitCallbackHandler": "langchain_community.callbacks.streamlit",
"TrubricsCallbackHandler": "langchain_community.callbacks.trubrics_callback",
"UpstashRatelimitError": "langchain_community.callbacks.upstash_ratelimit_callback",
"UpstashRatelimitHandler": "langchain_community.callbacks.upstash_ratelimit_callback", # noqa
"UpTrainCallbackHandler": "langchain_community.callbacks.uptrain_callback",
"WandbCallbackHandler": "langchain_community.callbacks.wandb_callback",
"WhyLabsCallbackHandler": "langchain_community.callbacks.whylabs_callback",
"get_openai_callback": "langchain_community.callbacks.manager",
"wandb_tracing_enabled": "langchain_community.callbacks.manager",
}
def __getattr__(name: str) -> Any:
if name in _module_lookup:
module = importlib.import_module(_module_lookup[name])
return getattr(module, name)
raise AttributeError(f"module {__name__} has no attribute {name}")
__all__ = [
"AimCallbackHandler",
"ArgillaCallbackHandler",
"ArizeCallbackHandler",
"ArthurCallbackHandler",
"ClearMLCallbackHandler",
"CometCallbackHandler",
"ContextCallbackHandler",
"FiddlerCallbackHandler",
"FlyteCallbackHandler",
"HumanApprovalCallbackHandler",
"InfinoCallbackHandler",
"LLMThoughtLabeler",
"LLMonitorCallbackHandler",
"LabelStudioCallbackHandler",
"MlflowCallbackHandler",
"OpenAICallbackHandler",
"PromptLayerCallbackHandler",
"SageMakerCallbackHandler",
"StreamlitCallbackHandler",
"TrubricsCallbackHandler",
"UpstashRatelimitError",
"UpstashRatelimitHandler",
"UpTrainCallbackHandler",
"WandbCallbackHandler",
"WhyLabsCallbackHandler",
"get_openai_callback",
"wandb_tracing_enabled",
]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/callbacks/bedrock_anthropic_callback.py | import threading
from typing import Any, Dict, List, Union
from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.outputs import LLMResult
MODEL_COST_PER_1K_INPUT_TOKENS = {
"anthropic.claude-instant-v1": 0.0008,
"anthropic.claude-v2": 0.008,
"anthropic.claude-v2:1": 0.008,
"anthropic.claude-3-sonnet-20240229-v1:0": 0.003,
"anthropic.claude-3-5-sonnet-20240620-v1:0": 0.003,
"anthropic.claude-3-5-sonnet-20241022-v2:0": 0.003,
"anthropic.claude-3-haiku-20240307-v1:0": 0.00025,
}
MODEL_COST_PER_1K_OUTPUT_TOKENS = {
"anthropic.claude-instant-v1": 0.0024,
"anthropic.claude-v2": 0.024,
"anthropic.claude-v2:1": 0.024,
"anthropic.claude-3-sonnet-20240229-v1:0": 0.015,
"anthropic.claude-3-5-sonnet-20240620-v1:0": 0.015,
"anthropic.claude-3-5-sonnet-20241022-v2:0": 0.015,
"anthropic.claude-3-haiku-20240307-v1:0": 0.00125,
}
def _get_anthropic_claude_token_cost(
prompt_tokens: int, completion_tokens: int, model_id: Union[str, None]
) -> float:
"""Get the cost of tokens for the Claude model."""
if model_id not in MODEL_COST_PER_1K_INPUT_TOKENS:
raise ValueError(
f"Unknown model: {model_id}. Please provide a valid Anthropic model name."
"Known models are: " + ", ".join(MODEL_COST_PER_1K_INPUT_TOKENS.keys())
)
return (prompt_tokens / 1000) * MODEL_COST_PER_1K_INPUT_TOKENS[model_id] + (
completion_tokens / 1000
) * MODEL_COST_PER_1K_OUTPUT_TOKENS[model_id]
class BedrockAnthropicTokenUsageCallbackHandler(BaseCallbackHandler):
"""Callback Handler that tracks bedrock anthropic info."""
total_tokens: int = 0
prompt_tokens: int = 0
completion_tokens: int = 0
successful_requests: int = 0
total_cost: float = 0.0
def __init__(self) -> None:
super().__init__()
self._lock = threading.Lock()
def __repr__(self) -> str:
return (
f"Tokens Used: {self.total_tokens}\n"
f"\tPrompt Tokens: {self.prompt_tokens}\n"
f"\tCompletion Tokens: {self.completion_tokens}\n"
f"Successful Requests: {self.successful_requests}\n"
f"Total Cost (USD): ${self.total_cost}"
)
@property
def always_verbose(self) -> bool:
"""Whether to call verbose callbacks even if verbose is False."""
return True
def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> None:
"""Print out the prompts."""
pass
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
"""Print out the token."""
pass
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Collect token usage."""
if response.llm_output is None:
return None
if "usage" not in response.llm_output:
with self._lock:
self.successful_requests += 1
return None
# compute tokens and cost for this request
token_usage = response.llm_output["usage"]
completion_tokens = token_usage.get("completion_tokens", 0)
prompt_tokens = token_usage.get("prompt_tokens", 0)
total_tokens = token_usage.get("total_tokens", 0)
model_id = response.llm_output.get("model_id", None)
total_cost = _get_anthropic_claude_token_cost(
prompt_tokens=prompt_tokens,
completion_tokens=completion_tokens,
model_id=model_id,
)
# update shared state behind lock
with self._lock:
self.total_cost += total_cost
self.total_tokens += total_tokens
self.prompt_tokens += prompt_tokens
self.completion_tokens += completion_tokens
self.successful_requests += 1
def __copy__(self) -> "BedrockAnthropicTokenUsageCallbackHandler":
"""Return a copy of the callback handler."""
return self
def __deepcopy__(self, memo: Any) -> "BedrockAnthropicTokenUsageCallbackHandler":
"""Return a deep copy of the callback handler."""
return self
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/callbacks/infino_callback.py | import time
from typing import Any, Dict, List, Optional, cast
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.messages import BaseMessage
from langchain_core.outputs import ChatGeneration, LLMResult
from langchain_core.utils import guard_import
def import_infino() -> Any:
"""Import the infino client."""
return guard_import("infinopy").InfinoClient()
def import_tiktoken() -> Any:
"""Import tiktoken for counting tokens for OpenAI models."""
return guard_import("tiktoken")
def get_num_tokens(string: str, openai_model_name: str) -> int:
"""Calculate num tokens for OpenAI with tiktoken package.
Official documentation: https://github.com/openai/openai-cookbook/blob/main
/examples/How_to_count_tokens_with_tiktoken.ipynb
"""
tiktoken = import_tiktoken()
encoding = tiktoken.encoding_for_model(openai_model_name)
num_tokens = len(encoding.encode(string))
return num_tokens
class InfinoCallbackHandler(BaseCallbackHandler):
"""Callback Handler that logs to Infino."""
def __init__(
self,
model_id: Optional[str] = None,
model_version: Optional[str] = None,
verbose: bool = False,
) -> None:
# Set Infino client
self.client = import_infino()
self.model_id = model_id
self.model_version = model_version
self.verbose = verbose
self.is_chat_openai_model = False
self.chat_openai_model_name = "gpt-3.5-turbo"
def _send_to_infino(
self,
key: str,
value: Any,
is_ts: bool = True,
) -> None:
"""Send the key-value to Infino.
Parameters:
key (str): the key to send to Infino.
value (Any): the value to send to Infino.
is_ts (bool): if True, the value is part of a time series, else it
is sent as a log message.
"""
payload = {
"date": int(time.time()),
key: value,
"labels": {
"model_id": self.model_id,
"model_version": self.model_version,
},
}
if self.verbose:
print(f"Tracking {key} with Infino: {payload}") # noqa: T201
# Append to Infino time series only if is_ts is True, otherwise
# append to Infino log.
if is_ts:
self.client.append_ts(payload)
else:
self.client.append_log(payload)
def on_llm_start(
self,
serialized: Dict[str, Any],
prompts: List[str],
**kwargs: Any,
) -> None:
"""Log the prompts to Infino, and set start time and error flag."""
for prompt in prompts:
self._send_to_infino("prompt", prompt, is_ts=False)
# Set the error flag to indicate no error (this will get overridden
# in on_llm_error if an error occurs).
self.error = 0
# Set the start time (so that we can calculate the request
# duration in on_llm_end).
self.start_time = time.time()
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
"""Do nothing when a new token is generated."""
pass
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Log the latency, error, token usage, and response to Infino."""
# Calculate and track the request latency.
self.end_time = time.time()
duration = self.end_time - self.start_time
self._send_to_infino("latency", duration)
# Track success or error flag.
self._send_to_infino("error", self.error)
# Track prompt response.
for generations in response.generations:
for generation in generations:
self._send_to_infino("prompt_response", generation.text, is_ts=False)
# Track token usage (for non-chat models).
if (response.llm_output is not None) and isinstance(response.llm_output, Dict):
token_usage = response.llm_output["token_usage"]
if token_usage is not None:
prompt_tokens = token_usage["prompt_tokens"]
total_tokens = token_usage["total_tokens"]
completion_tokens = token_usage["completion_tokens"]
self._send_to_infino("prompt_tokens", prompt_tokens)
self._send_to_infino("total_tokens", total_tokens)
self._send_to_infino("completion_tokens", completion_tokens)
# Track completion token usage (for openai chat models).
if self.is_chat_openai_model:
messages = " ".join(
cast(str, cast(ChatGeneration, generation).message.content)
for generation in generations
)
completion_tokens = get_num_tokens(
messages, openai_model_name=self.chat_openai_model_name
)
self._send_to_infino("completion_tokens", completion_tokens)
def on_llm_error(self, error: BaseException, **kwargs: Any) -> None:
"""Set the error flag."""
self.error = 1
def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> None:
"""Do nothing when LLM chain starts."""
pass
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
"""Do nothing when LLM chain ends."""
pass
def on_chain_error(self, error: BaseException, **kwargs: Any) -> None:
"""Need to log the error."""
pass
def on_tool_start(
self,
serialized: Dict[str, Any],
input_str: str,
**kwargs: Any,
) -> None:
"""Do nothing when tool starts."""
pass
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Do nothing when agent takes a specific action."""
pass
def on_tool_end(
self,
output: str,
observation_prefix: Optional[str] = None,
llm_prefix: Optional[str] = None,
**kwargs: Any,
) -> None:
"""Do nothing when tool ends."""
pass
def on_tool_error(self, error: BaseException, **kwargs: Any) -> None:
"""Do nothing when tool outputs an error."""
pass
def on_text(self, text: str, **kwargs: Any) -> None:
"""Do nothing."""
pass
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:
"""Do nothing."""
pass
def on_chat_model_start(
self,
serialized: Dict[str, Any],
messages: List[List[BaseMessage]],
**kwargs: Any,
) -> None:
"""Run when LLM starts running."""
# Currently, for chat models, we only support input prompts for ChatOpenAI.
# Check if this model is a ChatOpenAI model.
values = serialized.get("id")
if values:
for value in values:
if value == "ChatOpenAI":
self.is_chat_openai_model = True
break
# Track prompt tokens for ChatOpenAI model.
if self.is_chat_openai_model:
invocation_params = kwargs.get("invocation_params")
if invocation_params:
model_name = invocation_params.get("model_name")
if model_name:
self.chat_openai_model_name = model_name
prompt_tokens = 0
for message_list in messages:
message_string = " ".join(
cast(str, msg.content) for msg in message_list
)
num_tokens = get_num_tokens(
message_string,
openai_model_name=self.chat_openai_model_name,
)
prompt_tokens += num_tokens
self._send_to_infino("prompt_tokens", prompt_tokens)
if self.verbose:
print( # noqa: T201
f"on_chat_model_start: is_chat_openai_model= \
{self.is_chat_openai_model}, \
chat_openai_model_name={self.chat_openai_model_name}"
)
# Send the prompt to infino
prompt = " ".join(
cast(str, msg.content) for sublist in messages for msg in sublist
)
self._send_to_infino("prompt", prompt, is_ts=False)
# Set the error flag to indicate no error (this will get overridden
# in on_llm_error if an error occurs).
self.error = 0
# Set the start time (so that we can calculate the request
# duration in on_llm_end).
self.start_time = time.time()
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/callbacks/manager.py | from __future__ import annotations
import logging
from contextlib import contextmanager
from contextvars import ContextVar
from typing import (
Generator,
Optional,
)
from langchain_core.tracers.context import register_configure_hook
from langchain_community.callbacks.bedrock_anthropic_callback import (
BedrockAnthropicTokenUsageCallbackHandler,
)
from langchain_community.callbacks.openai_info import OpenAICallbackHandler
from langchain_community.callbacks.tracers.comet import CometTracer
from langchain_community.callbacks.tracers.wandb import WandbTracer
logger = logging.getLogger(__name__)
openai_callback_var: ContextVar[Optional[OpenAICallbackHandler]] = ContextVar(
"openai_callback", default=None
)
bedrock_anthropic_callback_var: (ContextVar)[
Optional[BedrockAnthropicTokenUsageCallbackHandler]
] = ContextVar("bedrock_anthropic_callback", default=None)
wandb_tracing_callback_var: ContextVar[Optional[WandbTracer]] = ContextVar(
"tracing_wandb_callback", default=None
)
comet_tracing_callback_var: ContextVar[Optional[CometTracer]] = ContextVar(
"tracing_comet_callback", default=None
)
register_configure_hook(openai_callback_var, True)
register_configure_hook(bedrock_anthropic_callback_var, True)
register_configure_hook(
wandb_tracing_callback_var, True, WandbTracer, "LANGCHAIN_WANDB_TRACING"
)
register_configure_hook(
comet_tracing_callback_var, True, CometTracer, "LANGCHAIN_COMET_TRACING"
)
@contextmanager
def get_openai_callback() -> Generator[OpenAICallbackHandler, None, None]:
"""Get the OpenAI callback handler in a context manager.
which conveniently exposes token and cost information.
Returns:
OpenAICallbackHandler: The OpenAI callback handler.
Example:
>>> with get_openai_callback() as cb:
... # Use the OpenAI callback handler
"""
cb = OpenAICallbackHandler()
openai_callback_var.set(cb)
yield cb
openai_callback_var.set(None)
@contextmanager
def get_bedrock_anthropic_callback() -> (
Generator[BedrockAnthropicTokenUsageCallbackHandler, None, None]
):
"""Get the Bedrock anthropic callback handler in a context manager.
which conveniently exposes token and cost information.
Returns:
BedrockAnthropicTokenUsageCallbackHandler:
The Bedrock anthropic callback handler.
Example:
>>> with get_bedrock_anthropic_callback() as cb:
... # Use the Bedrock anthropic callback handler
"""
cb = BedrockAnthropicTokenUsageCallbackHandler()
bedrock_anthropic_callback_var.set(cb)
yield cb
bedrock_anthropic_callback_var.set(None)
@contextmanager
def wandb_tracing_enabled(
session_name: str = "default",
) -> Generator[None, None, None]:
"""Get the WandbTracer in a context manager.
Args:
session_name (str, optional): The name of the session.
Defaults to "default".
Returns:
None
Example:
>>> with wandb_tracing_enabled() as session:
... # Use the WandbTracer session
"""
cb = WandbTracer()
wandb_tracing_callback_var.set(cb)
yield None
wandb_tracing_callback_var.set(None)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/callbacks/wandb_callback.py | import json
import tempfile
from copy import deepcopy
from pathlib import Path
from typing import Any, Dict, List, Optional, Sequence, Union
from langchain_core._api import warn_deprecated
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.outputs import LLMResult
from langchain_core.utils import guard_import
from langchain_community.callbacks.utils import (
BaseMetadataCallbackHandler,
flatten_dict,
hash_string,
import_pandas,
import_spacy,
import_textstat,
)
def import_wandb() -> Any:
"""Import the wandb python package and raise an error if it is not installed."""
return guard_import("wandb")
def load_json_to_dict(json_path: Union[str, Path]) -> dict:
"""Load json file to a dictionary.
Parameters:
json_path (str): The path to the json file.
Returns:
(dict): The dictionary representation of the json file.
"""
with open(json_path, "r") as f:
data = json.load(f)
return data
def analyze_text(
text: str,
complexity_metrics: bool = True,
visualize: bool = True,
nlp: Any = None,
output_dir: Optional[Union[str, Path]] = None,
) -> dict:
"""Analyze text using textstat and spacy.
Parameters:
text (str): The text to analyze.
complexity_metrics (bool): Whether to compute complexity metrics.
visualize (bool): Whether to visualize the text.
nlp (spacy.lang): The spacy language model to use for visualization.
output_dir (str): The directory to save the visualization files to.
Returns:
(dict): A dictionary containing the complexity metrics and visualization
files serialized in a wandb.Html element.
"""
resp = {}
textstat = import_textstat()
wandb = import_wandb()
spacy = import_spacy()
if complexity_metrics:
text_complexity_metrics = {
"flesch_reading_ease": textstat.flesch_reading_ease(text),
"flesch_kincaid_grade": textstat.flesch_kincaid_grade(text),
"smog_index": textstat.smog_index(text),
"coleman_liau_index": textstat.coleman_liau_index(text),
"automated_readability_index": textstat.automated_readability_index(text),
"dale_chall_readability_score": textstat.dale_chall_readability_score(text),
"difficult_words": textstat.difficult_words(text),
"linsear_write_formula": textstat.linsear_write_formula(text),
"gunning_fog": textstat.gunning_fog(text),
"text_standard": textstat.text_standard(text),
"fernandez_huerta": textstat.fernandez_huerta(text),
"szigriszt_pazos": textstat.szigriszt_pazos(text),
"gutierrez_polini": textstat.gutierrez_polini(text),
"crawford": textstat.crawford(text),
"gulpease_index": textstat.gulpease_index(text),
"osman": textstat.osman(text),
}
resp.update(text_complexity_metrics)
if visualize and nlp and output_dir is not None:
doc = nlp(text)
dep_out = spacy.displacy.render(doc, style="dep", jupyter=False, page=True)
dep_output_path = Path(output_dir, hash_string(f"dep-{text}") + ".html")
dep_output_path.open("w", encoding="utf-8").write(dep_out)
ent_out = spacy.displacy.render(doc, style="ent", jupyter=False, page=True)
ent_output_path = Path(output_dir, hash_string(f"ent-{text}") + ".html")
ent_output_path.open("w", encoding="utf-8").write(ent_out)
text_visualizations = {
"dependency_tree": wandb.Html(str(dep_output_path)),
"entities": wandb.Html(str(ent_output_path)),
}
resp.update(text_visualizations)
return resp
def construct_html_from_prompt_and_generation(prompt: str, generation: str) -> Any:
"""Construct an html element from a prompt and a generation.
Parameters:
prompt (str): The prompt.
generation (str): The generation.
Returns:
(wandb.Html): The html element."""
wandb = import_wandb()
formatted_prompt = prompt.replace("\n", "<br>")
formatted_generation = generation.replace("\n", "<br>")
return wandb.Html(
f"""
<p style="color:black;">{formatted_prompt}:</p>
<blockquote>
<p style="color:green;">
{formatted_generation}
</p>
</blockquote>
""",
inject=False,
)
class WandbCallbackHandler(BaseMetadataCallbackHandler, BaseCallbackHandler):
"""Callback Handler that logs to Weights and Biases.
Parameters:
job_type (str): The type of job.
project (str): The project to log to.
entity (str): The entity to log to.
tags (list): The tags to log.
group (str): The group to log to.
name (str): The name of the run.
notes (str): The notes to log.
visualize (bool): Whether to visualize the run.
complexity_metrics (bool): Whether to log complexity metrics.
stream_logs (bool): Whether to stream callback actions to W&B
This handler will utilize the associated callback method called and formats
the input of each callback function with metadata regarding the state of LLM run,
and adds the response to the list of records for both the {method}_records and
action. It then logs the response using the run.log() method to Weights and Biases.
"""
def __init__(
self,
job_type: Optional[str] = None,
project: Optional[str] = "langchain_callback_demo",
entity: Optional[str] = None,
tags: Optional[Sequence] = None,
group: Optional[str] = None,
name: Optional[str] = None,
notes: Optional[str] = None,
visualize: bool = False,
complexity_metrics: bool = False,
stream_logs: bool = False,
) -> None:
"""Initialize callback handler."""
wandb = import_wandb()
import_pandas()
import_textstat()
spacy = import_spacy()
super().__init__()
self.job_type = job_type
self.project = project
self.entity = entity
self.tags = tags
self.group = group
self.name = name
self.notes = notes
self.visualize = visualize
self.complexity_metrics = complexity_metrics
self.stream_logs = stream_logs
self.temp_dir = tempfile.TemporaryDirectory()
self.run = wandb.init(
job_type=self.job_type,
project=self.project,
entity=self.entity,
tags=self.tags,
group=self.group,
name=self.name,
notes=self.notes,
)
warning = (
"DEPRECATION: The `WandbCallbackHandler` will soon be deprecated in favor "
"of the `WandbTracer`. Please update your code to use the `WandbTracer` "
"instead."
)
wandb.termwarn(
warning,
repeat=False,
)
self.callback_columns: list = []
self.action_records: list = []
self.complexity_metrics = complexity_metrics
self.visualize = visualize
self.nlp = spacy.load("en_core_web_sm")
warn_deprecated(
"0.3.8",
pending=False,
message=(
"Please use the WeaveTracer instead of the WandbCallbackHandler. "
"The WeaveTracer is a more flexible and powerful tool for logging "
"and tracing your LangChain callables."
"Find more information at https://weave-docs.wandb.ai/guides/integrations/langchain"
),
alternative=(
"Please instantiate the WeaveTracer from "
"weave.integrations.langchain import WeaveTracer ."
"For autologging simply use weave.init() and log all traces "
"from your LangChain callables."
),
)
def _init_resp(self) -> Dict:
return {k: None for k in self.callback_columns}
def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> None:
"""Run when LLM starts."""
self.step += 1
self.llm_starts += 1
self.starts += 1
resp = self._init_resp()
resp.update({"action": "on_llm_start"})
resp.update(flatten_dict(serialized))
resp.update(self.get_custom_callback_meta())
for prompt in prompts:
prompt_resp = deepcopy(resp)
prompt_resp["prompts"] = prompt
self.on_llm_start_records.append(prompt_resp)
self.action_records.append(prompt_resp)
if self.stream_logs:
self.run.log(prompt_resp)
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
"""Run when LLM generates a new token."""
self.step += 1
self.llm_streams += 1
resp = self._init_resp()
resp.update({"action": "on_llm_new_token", "token": token})
resp.update(self.get_custom_callback_meta())
self.on_llm_token_records.append(resp)
self.action_records.append(resp)
if self.stream_logs:
self.run.log(resp)
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Run when LLM ends running."""
self.step += 1
self.llm_ends += 1
self.ends += 1
resp = self._init_resp()
resp.update({"action": "on_llm_end"})
resp.update(flatten_dict(response.llm_output or {}))
resp.update(self.get_custom_callback_meta())
for generations in response.generations:
for generation in generations:
generation_resp = deepcopy(resp)
generation_resp.update(flatten_dict(generation.dict()))
generation_resp.update(
analyze_text(
generation.text,
complexity_metrics=self.complexity_metrics,
visualize=self.visualize,
nlp=self.nlp,
output_dir=self.temp_dir.name,
)
)
self.on_llm_end_records.append(generation_resp)
self.action_records.append(generation_resp)
if self.stream_logs:
self.run.log(generation_resp)
def on_llm_error(self, error: BaseException, **kwargs: Any) -> None:
"""Run when LLM errors."""
self.step += 1
self.errors += 1
def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> None:
"""Run when chain starts running."""
self.step += 1
self.chain_starts += 1
self.starts += 1
resp = self._init_resp()
resp.update({"action": "on_chain_start"})
resp.update(flatten_dict(serialized))
resp.update(self.get_custom_callback_meta())
chain_input = inputs["input"]
if isinstance(chain_input, str):
input_resp = deepcopy(resp)
input_resp["input"] = chain_input
self.on_chain_start_records.append(input_resp)
self.action_records.append(input_resp)
if self.stream_logs:
self.run.log(input_resp)
elif isinstance(chain_input, list):
for inp in chain_input:
input_resp = deepcopy(resp)
input_resp.update(inp)
self.on_chain_start_records.append(input_resp)
self.action_records.append(input_resp)
if self.stream_logs:
self.run.log(input_resp)
else:
raise ValueError("Unexpected data format provided!")
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
"""Run when chain ends running."""
self.step += 1
self.chain_ends += 1
self.ends += 1
resp = self._init_resp()
resp.update({"action": "on_chain_end", "outputs": outputs["output"]})
resp.update(self.get_custom_callback_meta())
self.on_chain_end_records.append(resp)
self.action_records.append(resp)
if self.stream_logs:
self.run.log(resp)
def on_chain_error(self, error: BaseException, **kwargs: Any) -> None:
"""Run when chain errors."""
self.step += 1
self.errors += 1
def on_tool_start(
self, serialized: Dict[str, Any], input_str: str, **kwargs: Any
) -> None:
"""Run when tool starts running."""
self.step += 1
self.tool_starts += 1
self.starts += 1
resp = self._init_resp()
resp.update({"action": "on_tool_start", "input_str": input_str})
resp.update(flatten_dict(serialized))
resp.update(self.get_custom_callback_meta())
self.on_tool_start_records.append(resp)
self.action_records.append(resp)
if self.stream_logs:
self.run.log(resp)
def on_tool_end(self, output: Any, **kwargs: Any) -> None:
"""Run when tool ends running."""
output = str(output)
self.step += 1
self.tool_ends += 1
self.ends += 1
resp = self._init_resp()
resp.update({"action": "on_tool_end", "output": output})
resp.update(self.get_custom_callback_meta())
self.on_tool_end_records.append(resp)
self.action_records.append(resp)
if self.stream_logs:
self.run.log(resp)
def on_tool_error(self, error: BaseException, **kwargs: Any) -> None:
"""Run when tool errors."""
self.step += 1
self.errors += 1
def on_text(self, text: str, **kwargs: Any) -> None:
"""
Run when agent is ending.
"""
self.step += 1
self.text_ctr += 1
resp = self._init_resp()
resp.update({"action": "on_text", "text": text})
resp.update(self.get_custom_callback_meta())
self.on_text_records.append(resp)
self.action_records.append(resp)
if self.stream_logs:
self.run.log(resp)
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:
"""Run when agent ends running."""
self.step += 1
self.agent_ends += 1
self.ends += 1
resp = self._init_resp()
resp.update(
{
"action": "on_agent_finish",
"output": finish.return_values["output"],
"log": finish.log,
}
)
resp.update(self.get_custom_callback_meta())
self.on_agent_finish_records.append(resp)
self.action_records.append(resp)
if self.stream_logs:
self.run.log(resp)
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Run on agent action."""
self.step += 1
self.tool_starts += 1
self.starts += 1
resp = self._init_resp()
resp.update(
{
"action": "on_agent_action",
"tool": action.tool,
"tool_input": action.tool_input,
"log": action.log,
}
)
resp.update(self.get_custom_callback_meta())
self.on_agent_action_records.append(resp)
self.action_records.append(resp)
if self.stream_logs:
self.run.log(resp)
def _create_session_analysis_df(self) -> Any:
"""Create a dataframe with all the information from the session."""
pd = import_pandas()
on_llm_start_records_df = pd.DataFrame(self.on_llm_start_records)
on_llm_end_records_df = pd.DataFrame(self.on_llm_end_records)
llm_input_prompts_df = (
on_llm_start_records_df[["step", "prompts", "name"]]
.dropna(axis=1)
.rename({"step": "prompt_step"}, axis=1)
)
complexity_metrics_columns = []
visualizations_columns = []
if self.complexity_metrics:
complexity_metrics_columns = [
"flesch_reading_ease",
"flesch_kincaid_grade",
"smog_index",
"coleman_liau_index",
"automated_readability_index",
"dale_chall_readability_score",
"difficult_words",
"linsear_write_formula",
"gunning_fog",
"text_standard",
"fernandez_huerta",
"szigriszt_pazos",
"gutierrez_polini",
"crawford",
"gulpease_index",
"osman",
]
if self.visualize:
visualizations_columns = ["dependency_tree", "entities"]
llm_outputs_df = (
on_llm_end_records_df[
[
"step",
"text",
"token_usage_total_tokens",
"token_usage_prompt_tokens",
"token_usage_completion_tokens",
]
+ complexity_metrics_columns
+ visualizations_columns
]
.dropna(axis=1)
.rename({"step": "output_step", "text": "output"}, axis=1)
)
session_analysis_df = pd.concat([llm_input_prompts_df, llm_outputs_df], axis=1)
session_analysis_df["chat_html"] = session_analysis_df[
["prompts", "output"]
].apply(
lambda row: construct_html_from_prompt_and_generation(
row["prompts"], row["output"]
),
axis=1,
)
return session_analysis_df
def flush_tracker(
self,
langchain_asset: Any = None,
reset: bool = True,
finish: bool = False,
job_type: Optional[str] = None,
project: Optional[str] = None,
entity: Optional[str] = None,
tags: Optional[Sequence] = None,
group: Optional[str] = None,
name: Optional[str] = None,
notes: Optional[str] = None,
visualize: Optional[bool] = None,
complexity_metrics: Optional[bool] = None,
) -> None:
"""Flush the tracker and reset the session.
Args:
langchain_asset: The langchain asset to save.
reset: Whether to reset the session.
finish: Whether to finish the run.
job_type: The job type.
project: The project.
entity: The entity.
tags: The tags.
group: The group.
name: The name.
notes: The notes.
visualize: Whether to visualize.
complexity_metrics: Whether to compute complexity metrics.
Returns:
None
"""
pd = import_pandas()
wandb = import_wandb()
action_records_table = wandb.Table(dataframe=pd.DataFrame(self.action_records))
session_analysis_table = wandb.Table(
dataframe=self._create_session_analysis_df()
)
self.run.log(
{
"action_records": action_records_table,
"session_analysis": session_analysis_table,
}
)
if langchain_asset:
langchain_asset_path = Path(self.temp_dir.name, "model.json")
model_artifact = wandb.Artifact(name="model", type="model")
model_artifact.add(action_records_table, name="action_records")
model_artifact.add(session_analysis_table, name="session_analysis")
try:
langchain_asset.save(langchain_asset_path)
model_artifact.add_file(str(langchain_asset_path))
model_artifact.metadata = load_json_to_dict(langchain_asset_path)
except ValueError:
langchain_asset.save_agent(langchain_asset_path)
model_artifact.add_file(str(langchain_asset_path))
model_artifact.metadata = load_json_to_dict(langchain_asset_path)
except NotImplementedError as e:
print("Could not save model.") # noqa: T201
print(repr(e)) # noqa: T201
pass
self.run.log_artifact(model_artifact)
if finish or reset:
self.run.finish()
self.temp_dir.cleanup()
self.reset_callback_meta()
if reset:
self.__init__( # type: ignore
job_type=job_type if job_type else self.job_type,
project=project if project else self.project,
entity=entity if entity else self.entity,
tags=tags if tags else self.tags,
group=group if group else self.group,
name=name if name else self.name,
notes=notes if notes else self.notes,
visualize=visualize if visualize else self.visualize,
complexity_metrics=(
complexity_metrics
if complexity_metrics
else self.complexity_metrics
),
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/callbacks/mlflow_callback.py | import logging
import os
import random
import string
import tempfile
import traceback
from copy import deepcopy
from pathlib import Path
from typing import Any, Dict, List, Optional, Sequence, Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.documents import Document
from langchain_core.outputs import LLMResult
from langchain_core.utils import get_from_dict_or_env, guard_import
from langchain_community.callbacks.utils import (
BaseMetadataCallbackHandler,
flatten_dict,
hash_string,
import_pandas,
import_spacy,
import_textstat,
)
logger = logging.getLogger(__name__)
def import_mlflow() -> Any:
"""Import the mlflow python package and raise an error if it is not installed."""
return guard_import("mlflow")
def mlflow_callback_metrics() -> List[str]:
"""Get the metrics to log to MLFlow."""
return [
"step",
"starts",
"ends",
"errors",
"text_ctr",
"chain_starts",
"chain_ends",
"llm_starts",
"llm_ends",
"llm_streams",
"tool_starts",
"tool_ends",
"agent_ends",
"retriever_starts",
"retriever_ends",
]
def get_text_complexity_metrics() -> List[str]:
"""Get the text complexity metrics from textstat."""
return [
"flesch_reading_ease",
"flesch_kincaid_grade",
"smog_index",
"coleman_liau_index",
"automated_readability_index",
"dale_chall_readability_score",
"difficult_words",
"linsear_write_formula",
"gunning_fog",
# "text_standard"
"fernandez_huerta",
"szigriszt_pazos",
"gutierrez_polini",
"crawford",
"gulpease_index",
"osman",
]
def analyze_text(
text: str,
nlp: Any = None,
textstat: Any = None,
) -> dict:
"""Analyze text using textstat and spacy.
Parameters:
text (str): The text to analyze.
nlp (spacy.lang): The spacy language model to use for visualization.
textstat: The textstat library to use for complexity metrics calculation.
Returns:
(dict): A dictionary containing the complexity metrics and visualization
files serialized to HTML string.
"""
resp: Dict[str, Any] = {}
if textstat is not None:
text_complexity_metrics = {
key: getattr(textstat, key)(text) for key in get_text_complexity_metrics()
}
resp.update({"text_complexity_metrics": text_complexity_metrics})
resp.update(text_complexity_metrics)
if nlp is not None:
spacy = import_spacy()
doc = nlp(text)
dep_out = spacy.displacy.render(doc, style="dep", jupyter=False, page=True)
ent_out = spacy.displacy.render(doc, style="ent", jupyter=False, page=True)
text_visualizations = {
"dependency_tree": dep_out,
"entities": ent_out,
}
resp.update(text_visualizations)
return resp
def construct_html_from_prompt_and_generation(prompt: str, generation: str) -> Any:
"""Construct an html element from a prompt and a generation.
Parameters:
prompt (str): The prompt.
generation (str): The generation.
Returns:
(str): The html string."""
formatted_prompt = prompt.replace("\n", "<br>")
formatted_generation = generation.replace("\n", "<br>")
return f"""
<p style="color:black;">{formatted_prompt}:</p>
<blockquote>
<p style="color:green;">
{formatted_generation}
</p>
</blockquote>
"""
class MlflowLogger:
"""Callback Handler that logs metrics and artifacts to mlflow server.
Parameters:
name (str): Name of the run.
experiment (str): Name of the experiment.
tags (dict): Tags to be attached for the run.
tracking_uri (str): MLflow tracking server uri.
This handler implements the helper functions to initialize,
log metrics and artifacts to the mlflow server.
"""
def __init__(self, **kwargs: Any):
self.mlflow = import_mlflow()
if "DATABRICKS_RUNTIME_VERSION" in os.environ:
self.mlflow.set_tracking_uri("databricks")
self.mlf_expid = self.mlflow.tracking.fluent._get_experiment_id()
self.mlf_exp = self.mlflow.get_experiment(self.mlf_expid)
else:
tracking_uri = get_from_dict_or_env(
kwargs, "tracking_uri", "MLFLOW_TRACKING_URI", ""
)
self.mlflow.set_tracking_uri(tracking_uri)
if run_id := kwargs.get("run_id"):
self.mlf_expid = self.mlflow.get_run(run_id).info.experiment_id
else:
# User can set other env variables described here
# > https://www.mlflow.org/docs/latest/tracking.html#logging-to-a-tracking-server
experiment_name = get_from_dict_or_env(
kwargs, "experiment_name", "MLFLOW_EXPERIMENT_NAME"
)
self.mlf_exp = self.mlflow.get_experiment_by_name(experiment_name)
if self.mlf_exp is not None:
self.mlf_expid = self.mlf_exp.experiment_id
else:
self.mlf_expid = self.mlflow.create_experiment(experiment_name)
self.start_run(
kwargs["run_name"], kwargs["run_tags"], kwargs.get("run_id", None)
)
self.dir = kwargs.get("artifacts_dir", "")
def start_run(
self, name: str, tags: Dict[str, str], run_id: Optional[str] = None
) -> None:
"""
If run_id is provided, it will reuse the run with the given run_id.
Otherwise, it starts a new run, auto generates the random suffix for name.
"""
if run_id is None:
if name.endswith("-%"):
rname = "".join(
random.choices(string.ascii_uppercase + string.digits, k=7)
)
name = name[:-1] + rname
run = self.mlflow.MlflowClient().create_run(
self.mlf_expid, run_name=name, tags=tags
)
run_id = run.info.run_id
self.run_id = run_id
def finish_run(self) -> None:
"""To finish the run."""
self.mlflow.end_run()
def metric(self, key: str, value: float) -> None:
"""To log metric to mlflow server."""
self.mlflow.log_metric(key, value, run_id=self.run_id)
def metrics(
self, data: Union[Dict[str, float], Dict[str, int]], step: Optional[int] = 0
) -> None:
"""To log all metrics in the input dict."""
self.mlflow.log_metrics(data, run_id=self.run_id)
def jsonf(self, data: Dict[str, Any], filename: str) -> None:
"""To log the input data as json file artifact."""
self.mlflow.log_dict(
data, os.path.join(self.dir, f"{filename}.json"), run_id=self.run_id
)
def table(self, name: str, dataframe: Any) -> None:
"""To log the input pandas dataframe as a html table"""
self.html(dataframe.to_html(), f"table_{name}")
def html(self, html: str, filename: str) -> None:
"""To log the input html string as html file artifact."""
self.mlflow.log_text(
html, os.path.join(self.dir, f"{filename}.html"), run_id=self.run_id
)
def text(self, text: str, filename: str) -> None:
"""To log the input text as text file artifact."""
self.mlflow.log_text(
text, os.path.join(self.dir, f"{filename}.txt"), run_id=self.run_id
)
def artifact(self, path: str) -> None:
"""To upload the file from given path as artifact."""
self.mlflow.log_artifact(path, run_id=self.run_id)
def langchain_artifact(self, chain: Any) -> None:
self.mlflow.langchain.log_model(chain, "langchain-model", run_id=self.run_id)
class MlflowCallbackHandler(BaseMetadataCallbackHandler, BaseCallbackHandler):
"""Callback Handler that logs metrics and artifacts to mlflow server.
Parameters:
name (str): Name of the run.
experiment (str): Name of the experiment.
tags (dict): Tags to be attached for the run.
tracking_uri (str): MLflow tracking server uri.
This handler will utilize the associated callback method called and formats
the input of each callback function with metadata regarding the state of LLM run,
and adds the response to the list of records for both the {method}_records and
action. It then logs the response to mlflow server.
"""
def __init__(
self,
name: Optional[str] = "langchainrun-%",
experiment: Optional[str] = "langchain",
tags: Optional[Dict] = None,
tracking_uri: Optional[str] = None,
run_id: Optional[str] = None,
artifacts_dir: str = "",
) -> None:
"""Initialize callback handler."""
import_pandas()
import_mlflow()
super().__init__()
self.name = name
self.experiment = experiment
self.tags = tags or {}
self.tracking_uri = tracking_uri
self.run_id = run_id
self.artifacts_dir = artifacts_dir
self.temp_dir = tempfile.TemporaryDirectory()
self.mlflg = MlflowLogger(
tracking_uri=self.tracking_uri,
experiment_name=self.experiment,
run_name=self.name,
run_tags=self.tags,
run_id=self.run_id,
artifacts_dir=self.artifacts_dir,
)
self.action_records: list = []
self.nlp = None
try:
spacy = import_spacy()
except ImportError as e:
logger.warning(e.msg)
else:
try:
self.nlp = spacy.load("en_core_web_sm")
except OSError:
logger.warning(
"Run `python -m spacy download en_core_web_sm` "
"to download en_core_web_sm model for text visualization."
)
try:
self.textstat = import_textstat()
except ImportError as e:
logger.warning(e.msg)
self.textstat = None
self.metrics = {key: 0 for key in mlflow_callback_metrics()}
self.records: Dict[str, Any] = {
"on_llm_start_records": [],
"on_llm_token_records": [],
"on_llm_end_records": [],
"on_chain_start_records": [],
"on_chain_end_records": [],
"on_tool_start_records": [],
"on_tool_end_records": [],
"on_text_records": [],
"on_agent_finish_records": [],
"on_agent_action_records": [],
"on_retriever_start_records": [],
"on_retriever_end_records": [],
"action_records": [],
}
def _reset(self) -> None:
for k, v in self.metrics.items():
self.metrics[k] = 0
for k, v in self.records.items():
self.records[k] = []
def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> None:
"""Run when LLM starts."""
self.metrics["step"] += 1
self.metrics["llm_starts"] += 1
self.metrics["starts"] += 1
llm_starts = self.metrics["llm_starts"]
resp: Dict[str, Any] = {}
resp.update({"action": "on_llm_start"})
resp.update(flatten_dict(serialized))
resp.update(self.metrics)
self.mlflg.metrics(self.metrics, step=self.metrics["step"])
for idx, prompt in enumerate(prompts):
prompt_resp = deepcopy(resp)
prompt_resp["prompt"] = prompt
self.records["on_llm_start_records"].append(prompt_resp)
self.records["action_records"].append(prompt_resp)
self.mlflg.jsonf(prompt_resp, f"llm_start_{llm_starts}_prompt_{idx}")
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
"""Run when LLM generates a new token."""
self.metrics["step"] += 1
self.metrics["llm_streams"] += 1
llm_streams = self.metrics["llm_streams"]
resp: Dict[str, Any] = {}
resp.update({"action": "on_llm_new_token", "token": token})
resp.update(self.metrics)
self.mlflg.metrics(self.metrics, step=self.metrics["step"])
self.records["on_llm_token_records"].append(resp)
self.records["action_records"].append(resp)
self.mlflg.jsonf(resp, f"llm_new_tokens_{llm_streams}")
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Run when LLM ends running."""
self.metrics["step"] += 1
self.metrics["llm_ends"] += 1
self.metrics["ends"] += 1
llm_ends = self.metrics["llm_ends"]
resp: Dict[str, Any] = {}
resp.update({"action": "on_llm_end"})
resp.update(flatten_dict(response.llm_output or {}))
resp.update(self.metrics)
self.mlflg.metrics(self.metrics, step=self.metrics["step"])
for generations in response.generations:
for idx, generation in enumerate(generations):
generation_resp = deepcopy(resp)
generation_resp.update(flatten_dict(generation.dict()))
generation_resp.update(
analyze_text(
generation.text,
nlp=self.nlp,
textstat=self.textstat,
)
)
if "text_complexity_metrics" in generation_resp:
complexity_metrics: Dict[str, float] = generation_resp.pop(
"text_complexity_metrics"
)
self.mlflg.metrics(
complexity_metrics,
step=self.metrics["step"],
)
self.records["on_llm_end_records"].append(generation_resp)
self.records["action_records"].append(generation_resp)
self.mlflg.jsonf(resp, f"llm_end_{llm_ends}_generation_{idx}")
if "dependency_tree" in generation_resp:
dependency_tree = generation_resp["dependency_tree"]
self.mlflg.html(
dependency_tree, "dep-" + hash_string(generation.text)
)
if "entities" in generation_resp:
entities = generation_resp["entities"]
self.mlflg.html(entities, "ent-" + hash_string(generation.text))
def on_llm_error(self, error: BaseException, **kwargs: Any) -> None:
"""Run when LLM errors."""
self.metrics["step"] += 1
self.metrics["errors"] += 1
def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> None:
"""Run when chain starts running."""
self.metrics["step"] += 1
self.metrics["chain_starts"] += 1
self.metrics["starts"] += 1
chain_starts = self.metrics["chain_starts"]
resp: Dict[str, Any] = {}
resp.update({"action": "on_chain_start"})
resp.update(flatten_dict(serialized))
resp.update(self.metrics)
self.mlflg.metrics(self.metrics, step=self.metrics["step"])
if isinstance(inputs, dict):
chain_input = ",".join([f"{k}={v}" for k, v in inputs.items()])
elif isinstance(inputs, list):
chain_input = ",".join([str(input) for input in inputs])
else:
chain_input = str(inputs)
input_resp = deepcopy(resp)
input_resp["inputs"] = chain_input
self.records["on_chain_start_records"].append(input_resp)
self.records["action_records"].append(input_resp)
self.mlflg.jsonf(input_resp, f"chain_start_{chain_starts}")
def on_chain_end(
self, outputs: Union[Dict[str, Any], str, List[str]], **kwargs: Any
) -> None:
"""Run when chain ends running."""
self.metrics["step"] += 1
self.metrics["chain_ends"] += 1
self.metrics["ends"] += 1
chain_ends = self.metrics["chain_ends"]
resp: Dict[str, Any] = {}
if isinstance(outputs, dict):
chain_output = ",".join([f"{k}={v}" for k, v in outputs.items()])
elif isinstance(outputs, list):
chain_output = ",".join(map(str, outputs))
else:
chain_output = str(outputs)
resp.update({"action": "on_chain_end", "outputs": chain_output})
resp.update(self.metrics)
self.mlflg.metrics(self.metrics, step=self.metrics["step"])
self.records["on_chain_end_records"].append(resp)
self.records["action_records"].append(resp)
self.mlflg.jsonf(resp, f"chain_end_{chain_ends}")
def on_chain_error(self, error: BaseException, **kwargs: Any) -> None:
"""Run when chain errors."""
self.metrics["step"] += 1
self.metrics["errors"] += 1
def on_tool_start(
self, serialized: Dict[str, Any], input_str: str, **kwargs: Any
) -> None:
"""Run when tool starts running."""
self.metrics["step"] += 1
self.metrics["tool_starts"] += 1
self.metrics["starts"] += 1
tool_starts = self.metrics["tool_starts"]
resp: Dict[str, Any] = {}
resp.update({"action": "on_tool_start", "input_str": input_str})
resp.update(flatten_dict(serialized))
resp.update(self.metrics)
self.mlflg.metrics(self.metrics, step=self.metrics["step"])
self.records["on_tool_start_records"].append(resp)
self.records["action_records"].append(resp)
self.mlflg.jsonf(resp, f"tool_start_{tool_starts}")
def on_tool_end(self, output: Any, **kwargs: Any) -> None:
"""Run when tool ends running."""
output = str(output)
self.metrics["step"] += 1
self.metrics["tool_ends"] += 1
self.metrics["ends"] += 1
tool_ends = self.metrics["tool_ends"]
resp: Dict[str, Any] = {}
resp.update({"action": "on_tool_end", "output": output})
resp.update(self.metrics)
self.mlflg.metrics(self.metrics, step=self.metrics["step"])
self.records["on_tool_end_records"].append(resp)
self.records["action_records"].append(resp)
self.mlflg.jsonf(resp, f"tool_end_{tool_ends}")
def on_tool_error(self, error: BaseException, **kwargs: Any) -> None:
"""Run when tool errors."""
self.metrics["step"] += 1
self.metrics["errors"] += 1
def on_text(self, text: str, **kwargs: Any) -> None:
"""
Run when text is received.
"""
self.metrics["step"] += 1
self.metrics["text_ctr"] += 1
text_ctr = self.metrics["text_ctr"]
resp: Dict[str, Any] = {}
resp.update({"action": "on_text", "text": text})
resp.update(self.metrics)
self.mlflg.metrics(self.metrics, step=self.metrics["step"])
self.records["on_text_records"].append(resp)
self.records["action_records"].append(resp)
self.mlflg.jsonf(resp, f"on_text_{text_ctr}")
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:
"""Run when agent ends running."""
self.metrics["step"] += 1
self.metrics["agent_ends"] += 1
self.metrics["ends"] += 1
agent_ends = self.metrics["agent_ends"]
resp: Dict[str, Any] = {}
resp.update(
{
"action": "on_agent_finish",
"output": finish.return_values["output"],
"log": finish.log,
}
)
resp.update(self.metrics)
self.mlflg.metrics(self.metrics, step=self.metrics["step"])
self.records["on_agent_finish_records"].append(resp)
self.records["action_records"].append(resp)
self.mlflg.jsonf(resp, f"agent_finish_{agent_ends}")
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Run on agent action."""
self.metrics["step"] += 1
self.metrics["tool_starts"] += 1
self.metrics["starts"] += 1
tool_starts = self.metrics["tool_starts"]
resp: Dict[str, Any] = {}
resp.update(
{
"action": "on_agent_action",
"tool": action.tool,
"tool_input": action.tool_input,
"log": action.log,
}
)
resp.update(self.metrics)
self.mlflg.metrics(self.metrics, step=self.metrics["step"])
self.records["on_agent_action_records"].append(resp)
self.records["action_records"].append(resp)
self.mlflg.jsonf(resp, f"agent_action_{tool_starts}")
def on_retriever_start(
self,
serialized: Dict[str, Any],
query: str,
**kwargs: Any,
) -> Any:
"""Run when Retriever starts running."""
self.metrics["step"] += 1
self.metrics["retriever_starts"] += 1
self.metrics["starts"] += 1
retriever_starts = self.metrics["retriever_starts"]
resp: Dict[str, Any] = {}
resp.update({"action": "on_retriever_start", "query": query})
resp.update(flatten_dict(serialized))
resp.update(self.metrics)
self.mlflg.metrics(self.metrics, step=self.metrics["step"])
self.records["on_retriever_start_records"].append(resp)
self.records["action_records"].append(resp)
self.mlflg.jsonf(resp, f"retriever_start_{retriever_starts}")
def on_retriever_end(
self,
documents: Sequence[Document],
**kwargs: Any,
) -> Any:
"""Run when Retriever ends running."""
self.metrics["step"] += 1
self.metrics["retriever_ends"] += 1
self.metrics["ends"] += 1
retriever_ends = self.metrics["retriever_ends"]
resp: Dict[str, Any] = {}
retriever_documents = [
{
"page_content": doc.page_content,
"metadata": {
k: (
str(v)
if not isinstance(v, list)
else ",".join(str(x) for x in v)
)
for k, v in doc.metadata.items()
},
}
for doc in documents
]
resp.update({"action": "on_retriever_end", "documents": retriever_documents})
resp.update(self.metrics)
self.mlflg.metrics(self.metrics, step=self.metrics["step"])
self.records["on_retriever_end_records"].append(resp)
self.records["action_records"].append(resp)
self.mlflg.jsonf(resp, f"retriever_end_{retriever_ends}")
def on_retriever_error(self, error: BaseException, **kwargs: Any) -> Any:
"""Run when Retriever errors."""
self.metrics["step"] += 1
self.metrics["errors"] += 1
def _create_session_analysis_df(self) -> Any:
"""Create a dataframe with all the information from the session."""
pd = import_pandas()
on_llm_start_records_df = pd.DataFrame(self.records["on_llm_start_records"])
on_llm_end_records_df = pd.DataFrame(self.records["on_llm_end_records"])
llm_input_columns = ["step", "prompt"]
if "name" in on_llm_start_records_df.columns:
llm_input_columns.append("name")
elif "id" in on_llm_start_records_df.columns:
# id is llm class's full import path. For example:
# ["langchain", "llms", "openai", "AzureOpenAI"]
on_llm_start_records_df["name"] = on_llm_start_records_df["id"].apply(
lambda id_: id_[-1]
)
llm_input_columns.append("name")
llm_input_prompts_df = (
on_llm_start_records_df[llm_input_columns]
.dropna(axis=1)
.rename({"step": "prompt_step"}, axis=1)
)
complexity_metrics_columns = (
get_text_complexity_metrics() if self.textstat is not None else []
)
visualizations_columns = (
["dependency_tree", "entities"] if self.nlp is not None else []
)
token_usage_columns = [
"token_usage_total_tokens",
"token_usage_prompt_tokens",
"token_usage_completion_tokens",
]
token_usage_columns = [
x for x in token_usage_columns if x in on_llm_end_records_df.columns
]
llm_outputs_df = (
on_llm_end_records_df[
[
"step",
"text",
]
+ token_usage_columns
+ complexity_metrics_columns
+ visualizations_columns
]
.dropna(axis=1)
.rename({"step": "output_step", "text": "output"}, axis=1)
)
session_analysis_df = pd.concat([llm_input_prompts_df, llm_outputs_df], axis=1)
session_analysis_df["chat_html"] = session_analysis_df[
["prompt", "output"]
].apply(
lambda row: construct_html_from_prompt_and_generation(
row["prompt"], row["output"]
),
axis=1,
)
return session_analysis_df
def _contain_llm_records(self) -> bool:
return bool(self.records["on_llm_start_records"])
def flush_tracker(self, langchain_asset: Any = None, finish: bool = False) -> None:
pd = import_pandas()
self.mlflg.table("action_records", pd.DataFrame(self.records["action_records"]))
if self._contain_llm_records():
session_analysis_df = self._create_session_analysis_df()
chat_html = session_analysis_df.pop("chat_html")
chat_html = chat_html.replace("\n", "", regex=True)
self.mlflg.table("session_analysis", pd.DataFrame(session_analysis_df))
self.mlflg.html("".join(chat_html.tolist()), "chat_html")
if langchain_asset:
# To avoid circular import error
# mlflow only supports LLMChain asset
if "langchain.chains.llm.LLMChain" in str(type(langchain_asset)):
self.mlflg.langchain_artifact(langchain_asset)
else:
langchain_asset_path = str(Path(self.temp_dir.name, "model.json"))
try:
langchain_asset.save(langchain_asset_path)
self.mlflg.artifact(langchain_asset_path)
except ValueError:
try:
langchain_asset.save_agent(langchain_asset_path)
self.mlflg.artifact(langchain_asset_path)
except AttributeError:
print("Could not save model.") # noqa: T201
traceback.print_exc()
pass
except NotImplementedError:
print("Could not save model.") # noqa: T201
traceback.print_exc()
pass
except NotImplementedError:
print("Could not save model.") # noqa: T201
traceback.print_exc()
pass
if finish:
self.mlflg.finish_run()
self._reset()
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/callbacks/openai_info.py | """Callback Handler that prints to std out."""
import threading
from typing import Any, Dict, List
from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.messages import AIMessage
from langchain_core.outputs import ChatGeneration, LLMResult
MODEL_COST_PER_1K_TOKENS = {
# OpenAI o1-preview input
"o1-preview": 0.015,
"o1-preview-2024-09-12": 0.015,
# OpenAI o1-preview output
"o1-preview-completion": 0.06,
"o1-preview-2024-09-12-completion": 0.06,
# OpenAI o1-mini input
"o1-mini": 0.003,
"o1-mini-2024-09-12": 0.003,
# OpenAI o1-mini output
"o1-mini-completion": 0.012,
"o1-mini-2024-09-12-completion": 0.012,
# GPT-4o-mini input
"gpt-4o-mini": 0.00015,
"gpt-4o-mini-2024-07-18": 0.00015,
# GPT-4o-mini output
"gpt-4o-mini-completion": 0.0006,
"gpt-4o-mini-2024-07-18-completion": 0.0006,
# GPT-4o input
"gpt-4o": 0.0025,
"gpt-4o-2024-05-13": 0.005,
"gpt-4o-2024-08-06": 0.0025,
# GPT-4o output
"gpt-4o-completion": 0.01,
"gpt-4o-2024-05-13-completion": 0.015,
"gpt-4o-2024-08-06-completion": 0.01,
# GPT-4 input
"gpt-4": 0.03,
"gpt-4-0314": 0.03,
"gpt-4-0613": 0.03,
"gpt-4-32k": 0.06,
"gpt-4-32k-0314": 0.06,
"gpt-4-32k-0613": 0.06,
"gpt-4-vision-preview": 0.01,
"gpt-4-1106-preview": 0.01,
"gpt-4-0125-preview": 0.01,
"gpt-4-turbo-preview": 0.01,
"gpt-4-turbo": 0.01,
"gpt-4-turbo-2024-04-09": 0.01,
# GPT-4 output
"gpt-4-completion": 0.06,
"gpt-4-0314-completion": 0.06,
"gpt-4-0613-completion": 0.06,
"gpt-4-32k-completion": 0.12,
"gpt-4-32k-0314-completion": 0.12,
"gpt-4-32k-0613-completion": 0.12,
"gpt-4-vision-preview-completion": 0.03,
"gpt-4-1106-preview-completion": 0.03,
"gpt-4-0125-preview-completion": 0.03,
"gpt-4-turbo-preview-completion": 0.03,
"gpt-4-turbo-completion": 0.03,
"gpt-4-turbo-2024-04-09-completion": 0.03,
# GPT-3.5 input
# gpt-3.5-turbo points at gpt-3.5-turbo-0613 until Feb 16, 2024.
# Switches to gpt-3.5-turbo-0125 after.
"gpt-3.5-turbo": 0.0015,
"gpt-3.5-turbo-0125": 0.0005,
"gpt-3.5-turbo-0301": 0.0015,
"gpt-3.5-turbo-0613": 0.0015,
"gpt-3.5-turbo-1106": 0.001,
"gpt-3.5-turbo-instruct": 0.0015,
"gpt-3.5-turbo-16k": 0.003,
"gpt-3.5-turbo-16k-0613": 0.003,
# GPT-3.5 output
# gpt-3.5-turbo points at gpt-3.5-turbo-0613 until Feb 16, 2024.
# Switches to gpt-3.5-turbo-0125 after.
"gpt-3.5-turbo-completion": 0.002,
"gpt-3.5-turbo-0125-completion": 0.0015,
"gpt-3.5-turbo-0301-completion": 0.002,
"gpt-3.5-turbo-0613-completion": 0.002,
"gpt-3.5-turbo-1106-completion": 0.002,
"gpt-3.5-turbo-instruct-completion": 0.002,
"gpt-3.5-turbo-16k-completion": 0.004,
"gpt-3.5-turbo-16k-0613-completion": 0.004,
# Azure GPT-35 input
"gpt-35-turbo": 0.0015, # Azure OpenAI version of ChatGPT
"gpt-35-turbo-0125": 0.0005,
"gpt-35-turbo-0301": 0.002, # Azure OpenAI version of ChatGPT
"gpt-35-turbo-0613": 0.0015,
"gpt-35-turbo-instruct": 0.0015,
"gpt-35-turbo-16k": 0.003,
"gpt-35-turbo-16k-0613": 0.003,
# Azure GPT-35 output
"gpt-35-turbo-completion": 0.002, # Azure OpenAI version of ChatGPT
"gpt-35-turbo-0125-completion": 0.0015,
"gpt-35-turbo-0301-completion": 0.002, # Azure OpenAI version of ChatGPT
"gpt-35-turbo-0613-completion": 0.002,
"gpt-35-turbo-instruct-completion": 0.002,
"gpt-35-turbo-16k-completion": 0.004,
"gpt-35-turbo-16k-0613-completion": 0.004,
# Others
"text-ada-001": 0.0004,
"ada": 0.0004,
"text-babbage-001": 0.0005,
"babbage": 0.0005,
"text-curie-001": 0.002,
"curie": 0.002,
"text-davinci-003": 0.02,
"text-davinci-002": 0.02,
"code-davinci-002": 0.02,
# Fine Tuned input
"babbage-002-finetuned": 0.0016,
"davinci-002-finetuned": 0.012,
"gpt-3.5-turbo-0613-finetuned": 0.003,
"gpt-3.5-turbo-1106-finetuned": 0.003,
"gpt-3.5-turbo-0125-finetuned": 0.003,
"gpt-4o-mini-2024-07-18-finetuned": 0.0003,
# Fine Tuned output
"babbage-002-finetuned-completion": 0.0016,
"davinci-002-finetuned-completion": 0.012,
"gpt-3.5-turbo-0613-finetuned-completion": 0.006,
"gpt-3.5-turbo-1106-finetuned-completion": 0.006,
"gpt-3.5-turbo-0125-finetuned-completion": 0.006,
"gpt-4o-mini-2024-07-18-finetuned-completion": 0.0012,
# Azure Fine Tuned input
"babbage-002-azure-finetuned": 0.0004,
"davinci-002-azure-finetuned": 0.002,
"gpt-35-turbo-0613-azure-finetuned": 0.0015,
# Azure Fine Tuned output
"babbage-002-azure-finetuned-completion": 0.0004,
"davinci-002-azure-finetuned-completion": 0.002,
"gpt-35-turbo-0613-azure-finetuned-completion": 0.002,
# Legacy fine-tuned models
"ada-finetuned-legacy": 0.0016,
"babbage-finetuned-legacy": 0.0024,
"curie-finetuned-legacy": 0.012,
"davinci-finetuned-legacy": 0.12,
}
def standardize_model_name(
model_name: str,
is_completion: bool = False,
) -> str:
"""
Standardize the model name to a format that can be used in the OpenAI API.
Args:
model_name: Model name to standardize.
is_completion: Whether the model is used for completion or not.
Defaults to False.
Returns:
Standardized model name.
"""
model_name = model_name.lower()
if ".ft-" in model_name:
model_name = model_name.split(".ft-")[0] + "-azure-finetuned"
if ":ft-" in model_name:
model_name = model_name.split(":")[0] + "-finetuned-legacy"
if "ft:" in model_name:
model_name = model_name.split(":")[1] + "-finetuned"
if is_completion and (
model_name.startswith("gpt-4")
or model_name.startswith("gpt-3.5")
or model_name.startswith("gpt-35")
or model_name.startswith("o1-")
or ("finetuned" in model_name and "legacy" not in model_name)
):
return model_name + "-completion"
else:
return model_name
def get_openai_token_cost_for_model(
model_name: str, num_tokens: int, is_completion: bool = False
) -> float:
"""
Get the cost in USD for a given model and number of tokens.
Args:
model_name: Name of the model
num_tokens: Number of tokens.
is_completion: Whether the model is used for completion or not.
Defaults to False.
Returns:
Cost in USD.
"""
model_name = standardize_model_name(model_name, is_completion=is_completion)
if model_name not in MODEL_COST_PER_1K_TOKENS:
raise ValueError(
f"Unknown model: {model_name}. Please provide a valid OpenAI model name."
"Known models are: " + ", ".join(MODEL_COST_PER_1K_TOKENS.keys())
)
return MODEL_COST_PER_1K_TOKENS[model_name] * (num_tokens / 1000)
class OpenAICallbackHandler(BaseCallbackHandler):
"""Callback Handler that tracks OpenAI info."""
total_tokens: int = 0
prompt_tokens: int = 0
completion_tokens: int = 0
successful_requests: int = 0
total_cost: float = 0.0
def __init__(self) -> None:
super().__init__()
self._lock = threading.Lock()
def __repr__(self) -> str:
return (
f"Tokens Used: {self.total_tokens}\n"
f"\tPrompt Tokens: {self.prompt_tokens}\n"
f"\tCompletion Tokens: {self.completion_tokens}\n"
f"Successful Requests: {self.successful_requests}\n"
f"Total Cost (USD): ${self.total_cost}"
)
@property
def always_verbose(self) -> bool:
"""Whether to call verbose callbacks even if verbose is False."""
return True
def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> None:
"""Print out the prompts."""
pass
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
"""Print out the token."""
pass
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Collect token usage."""
# Check for usage_metadata (langchain-core >= 0.2.2)
try:
generation = response.generations[0][0]
except IndexError:
generation = None
if isinstance(generation, ChatGeneration):
try:
message = generation.message
if isinstance(message, AIMessage):
usage_metadata = message.usage_metadata
response_metadata = message.response_metadata
else:
usage_metadata = None
response_metadata = None
except AttributeError:
usage_metadata = None
response_metadata = None
else:
usage_metadata = None
response_metadata = None
if usage_metadata:
token_usage = {"total_tokens": usage_metadata["total_tokens"]}
completion_tokens = usage_metadata["output_tokens"]
prompt_tokens = usage_metadata["input_tokens"]
if response_model_name := (response_metadata or {}).get("model_name"):
model_name = standardize_model_name(response_model_name)
elif response.llm_output is None:
model_name = ""
else:
model_name = standardize_model_name(
response.llm_output.get("model_name", "")
)
else:
if response.llm_output is None:
return None
if "token_usage" not in response.llm_output:
with self._lock:
self.successful_requests += 1
return None
# compute tokens and cost for this request
token_usage = response.llm_output["token_usage"]
completion_tokens = token_usage.get("completion_tokens", 0)
prompt_tokens = token_usage.get("prompt_tokens", 0)
model_name = standardize_model_name(
response.llm_output.get("model_name", "")
)
if model_name in MODEL_COST_PER_1K_TOKENS:
completion_cost = get_openai_token_cost_for_model(
model_name, completion_tokens, is_completion=True
)
prompt_cost = get_openai_token_cost_for_model(model_name, prompt_tokens)
else:
completion_cost = 0
prompt_cost = 0
# update shared state behind lock
with self._lock:
self.total_cost += prompt_cost + completion_cost
self.total_tokens += token_usage.get("total_tokens", 0)
self.prompt_tokens += prompt_tokens
self.completion_tokens += completion_tokens
self.successful_requests += 1
def __copy__(self) -> "OpenAICallbackHandler":
"""Return a copy of the callback handler."""
return self
def __deepcopy__(self, memo: Any) -> "OpenAICallbackHandler":
"""Return a deep copy of the callback handler."""
return self
|
0 | lc_public_repos/langchain/libs/community/langchain_community/callbacks | lc_public_repos/langchain/libs/community/langchain_community/callbacks/tracers/comet.py | from types import ModuleType, SimpleNamespace
from typing import TYPE_CHECKING, Any, Callable, Dict
from langchain_core.tracers import BaseTracer
from langchain_core.utils import guard_import
if TYPE_CHECKING:
from uuid import UUID
from comet_llm import Span
from comet_llm.chains.chain import Chain
from langchain_community.callbacks.tracers.schemas import Run
def _get_run_type(run: "Run") -> str:
if isinstance(run.run_type, str):
return run.run_type
elif hasattr(run.run_type, "value"):
return run.run_type.value
else:
return str(run.run_type)
def import_comet_llm_api() -> SimpleNamespace:
"""Import comet_llm api and raise an error if it is not installed."""
comet_llm = guard_import("comet_llm")
comet_llm_chains = guard_import("comet_llm.chains")
return SimpleNamespace(
chain=comet_llm_chains.chain,
span=comet_llm_chains.span,
chain_api=comet_llm_chains.api,
experiment_info=comet_llm.experiment_info,
flush=comet_llm.flush,
)
class CometTracer(BaseTracer):
"""Comet Tracer."""
def __init__(self, **kwargs: Any) -> None:
"""Initialize the Comet Tracer."""
super().__init__(**kwargs)
self._span_map: Dict["UUID", "Span"] = {}
"""Map from run id to span."""
self._chains_map: Dict["UUID", "Chain"] = {}
"""Map from run id to chain."""
self._initialize_comet_modules()
def _initialize_comet_modules(self) -> None:
comet_llm_api = import_comet_llm_api()
self._chain: ModuleType = comet_llm_api.chain
self._span: ModuleType = comet_llm_api.span
self._chain_api: ModuleType = comet_llm_api.chain_api
self._experiment_info: ModuleType = comet_llm_api.experiment_info
self._flush: Callable[[], None] = comet_llm_api.flush
def _persist_run(self, run: "Run") -> None:
run_dict: Dict[str, Any] = run.dict()
chain_ = self._chains_map[run.id]
chain_.set_outputs(outputs=run_dict["outputs"])
self._chain_api.log_chain(chain_)
def _process_start_trace(self, run: "Run") -> None:
run_dict: Dict[str, Any] = run.dict()
if not run.parent_run_id:
# This is the first run, which maps to a chain
metadata = run_dict["extra"].get("metadata", None)
chain_: "Chain" = self._chain.Chain(
inputs=run_dict["inputs"],
metadata=metadata,
experiment_info=self._experiment_info.get(),
)
self._chains_map[run.id] = chain_
else:
span: "Span" = self._span.Span(
inputs=run_dict["inputs"],
category=_get_run_type(run),
metadata=run_dict["extra"],
name=run.name,
)
span.__api__start__(self._chains_map[run.parent_run_id])
self._chains_map[run.id] = self._chains_map[run.parent_run_id]
self._span_map[run.id] = span
def _process_end_trace(self, run: "Run") -> None:
run_dict: Dict[str, Any] = run.dict()
if not run.parent_run_id:
pass
# Langchain will call _persist_run for us
else:
span = self._span_map[run.id]
span.set_outputs(outputs=run_dict["outputs"])
span.__api__end__()
def flush(self) -> None:
self._flush()
def _on_llm_start(self, run: "Run") -> None:
"""Process the LLM Run upon start."""
self._process_start_trace(run)
def _on_llm_end(self, run: "Run") -> None:
"""Process the LLM Run."""
self._process_end_trace(run)
def _on_llm_error(self, run: "Run") -> None:
"""Process the LLM Run upon error."""
self._process_end_trace(run)
def _on_chain_start(self, run: "Run") -> None:
"""Process the Chain Run upon start."""
self._process_start_trace(run)
def _on_chain_end(self, run: "Run") -> None:
"""Process the Chain Run."""
self._process_end_trace(run)
def _on_chain_error(self, run: "Run") -> None:
"""Process the Chain Run upon error."""
self._process_end_trace(run)
def _on_tool_start(self, run: "Run") -> None:
"""Process the Tool Run upon start."""
self._process_start_trace(run)
def _on_tool_end(self, run: "Run") -> None:
"""Process the Tool Run."""
self._process_end_trace(run)
def _on_tool_error(self, run: "Run") -> None:
"""Process the Tool Run upon error."""
self._process_end_trace(run)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/callbacks | lc_public_repos/langchain/libs/community/langchain_community/callbacks/tracers/__init__.py | """Tracers that record execution of LangChain runs."""
from langchain_core.tracers.langchain import LangChainTracer
from langchain_core.tracers.langchain_v1 import LangChainTracerV1
from langchain_core.tracers.stdout import (
ConsoleCallbackHandler,
FunctionCallbackHandler,
)
from langchain_community.callbacks.tracers.wandb import WandbTracer
__all__ = [
"ConsoleCallbackHandler",
"FunctionCallbackHandler",
"LangChainTracer",
"LangChainTracerV1",
"WandbTracer",
]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/callbacks | lc_public_repos/langchain/libs/community/langchain_community/callbacks/tracers/wandb.py | """A Tracer Implementation that records activity to Weights & Biases."""
from __future__ import annotations
import json
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
List,
Optional,
Sequence,
Tuple,
TypedDict,
Union,
)
from langchain_core._api import warn_deprecated
from langchain_core.output_parsers.pydantic import PydanticBaseModel
from langchain_core.tracers.base import BaseTracer
from langchain_core.tracers.schemas import Run
if TYPE_CHECKING:
from wandb import Settings as WBSettings
from wandb.sdk.data_types.trace_tree import Trace
from wandb.sdk.lib.paths import StrPath
from wandb.wandb_run import Run as WBRun
PRINT_WARNINGS = True
def _serialize_io(run_io: Optional[dict]) -> dict:
"""Utility to serialize the input and output of a run to store in wandb.
Currently, supports serializing pydantic models and protobuf messages.
:param run_io: The inputs and outputs of the run.
:return: The serialized inputs and outputs.
"""
if not run_io:
return {}
from google.protobuf.json_format import MessageToJson
from google.protobuf.message import Message
serialized_inputs = {}
for key, value in run_io.items():
if isinstance(value, Message):
serialized_inputs[key] = MessageToJson(value)
elif isinstance(value, PydanticBaseModel):
serialized_inputs[key] = (
value.model_dump_json()
if hasattr(value, "model_dump_json")
else value.json()
)
elif key == "input_documents":
serialized_inputs.update(
{f"input_document_{i}": doc.json() for i, doc in enumerate(value)}
)
else:
serialized_inputs[key] = value
return serialized_inputs
def flatten_run(run: Dict[str, Any]) -> List[Dict[str, Any]]:
"""Utility to flatten a nest run object into a list of runs.
:param run: The base run to flatten.
:return: The flattened list of runs.
"""
def flatten(child_runs: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""Utility to recursively flatten a list of child runs in a run.
:param child_runs: The list of child runs to flatten.
:return: The flattened list of runs.
"""
if child_runs is None:
return []
result = []
for item in child_runs:
child_runs = item.pop("child_runs", [])
result.append(item)
result.extend(flatten(child_runs))
return result
return flatten([run])
def truncate_run_iterative(
runs: List[Dict[str, Any]], keep_keys: Tuple[str, ...] = ()
) -> List[Dict[str, Any]]:
"""Utility to truncate a list of runs dictionaries to only keep the specified
keys in each run.
:param runs: The list of runs to truncate.
:param keep_keys: The keys to keep in each run.
:return: The truncated list of runs.
"""
def truncate_single(run: Dict[str, Any]) -> Dict[str, Any]:
"""Utility to truncate a single run dictionary to only keep the specified
keys.
:param run: The run dictionary to truncate.
:return: The truncated run dictionary
"""
new_dict = {}
for key in run:
if key in keep_keys:
new_dict[key] = run.get(key)
return new_dict
return list(map(truncate_single, runs))
def modify_serialized_iterative(
runs: List[Dict[str, Any]],
exact_keys: Tuple[str, ...] = (),
partial_keys: Tuple[str, ...] = (),
) -> List[Dict[str, Any]]:
"""Utility to modify the serialized field of a list of runs dictionaries.
removes any keys that match the exact_keys and any keys that contain any of the
partial_keys.
recursively moves the dictionaries under the kwargs key to the top level.
changes the "id" field to a string "_kind" field that tells WBTraceTree how to
visualize the run. promotes the "serialized" field to the top level.
:param runs: The list of runs to modify.
:param exact_keys: A tuple of keys to remove from the serialized field.
:param partial_keys: A tuple of partial keys to remove from the serialized
field.
:return: The modified list of runs.
"""
def remove_exact_and_partial_keys(obj: Dict[str, Any]) -> Dict[str, Any]:
"""Recursively removes exact and partial keys from a dictionary.
:param obj: The dictionary to remove keys from.
:return: The modified dictionary.
"""
if isinstance(obj, dict):
obj = {
k: v
for k, v in obj.items()
if k not in exact_keys
and not any(partial in k for partial in partial_keys)
}
for k, v in obj.items():
obj[k] = remove_exact_and_partial_keys(v)
elif isinstance(obj, list):
obj = [remove_exact_and_partial_keys(x) for x in obj]
return obj
def handle_id_and_kwargs(obj: Dict[str, Any], root: bool = False) -> Dict[str, Any]:
"""Recursively handles the id and kwargs fields of a dictionary.
changes the id field to a string "_kind" field that tells WBTraceTree how
to visualize the run. recursively moves the dictionaries under the kwargs
key to the top level.
:param obj: a run dictionary with id and kwargs fields.
:param root: whether this is the root dictionary or the serialized
dictionary.
:return: The modified dictionary.
"""
if isinstance(obj, dict):
if "data" in obj and isinstance(obj["data"], dict):
obj = obj["data"]
if ("id" in obj or "name" in obj) and not root:
_kind = obj.get("id")
if not _kind:
_kind = [obj.get("name")]
if isinstance(_kind, list):
obj["_kind"] = _kind[-1]
obj.pop("id", None)
obj.pop("name", None)
if "kwargs" in obj:
kwargs = obj.pop("kwargs")
for k, v in kwargs.items():
obj[k] = v
for k, v in obj.items():
obj[k] = handle_id_and_kwargs(v)
elif isinstance(obj, list):
obj = [handle_id_and_kwargs(x) for x in obj]
return obj
def transform_serialized(serialized: Dict[str, Any]) -> Dict[str, Any]:
"""Transforms the serialized field of a run dictionary to be compatible
with WBTraceTree.
:param serialized: The serialized field of a run dictionary.
:return: The transformed serialized field.
"""
serialized = handle_id_and_kwargs(serialized, root=True)
serialized = remove_exact_and_partial_keys(serialized)
return serialized
def transform_run(run: Dict[str, Any]) -> Dict[str, Any]:
"""Transforms a run dictionary to be compatible with WBTraceTree.
:param run: The run dictionary to transform.
:return: The transformed run dictionary.
"""
transformed_dict = transform_serialized(run)
serialized = transformed_dict.pop("serialized")
for k, v in serialized.items():
transformed_dict[k] = v
_kind = transformed_dict.get("_kind", None)
name = transformed_dict.pop("name", None)
if not name:
name = _kind
output_dict = {
f"{name}": transformed_dict,
}
return output_dict
return list(map(transform_run, runs))
def build_tree(runs: List[Dict[str, Any]]) -> Dict[str, Any]:
"""Builds a nested dictionary from a list of runs.
:param runs: The list of runs to build the tree from.
:return: The nested dictionary representing the langchain Run in a tree
structure compatible with WBTraceTree.
"""
id_to_data = {}
child_to_parent = {}
for entity in runs:
for key, data in entity.items():
id_val = data.pop("id", None)
parent_run_id = data.pop("parent_run_id", None)
id_to_data[id_val] = {key: data}
if parent_run_id:
child_to_parent[id_val] = parent_run_id
for child_id, parent_id in child_to_parent.items():
parent_dict = id_to_data[parent_id]
parent_dict[next(iter(parent_dict))][next(iter(id_to_data[child_id]))] = (
id_to_data[child_id][next(iter(id_to_data[child_id]))]
)
root_dict = next(
data for id_val, data in id_to_data.items() if id_val not in child_to_parent
)
return root_dict
class WandbRunArgs(TypedDict):
"""Arguments for the WandbTracer."""
job_type: Optional[str]
dir: Optional[StrPath]
config: Union[Dict, str, None]
project: Optional[str]
entity: Optional[str]
reinit: Optional[bool]
tags: Optional[Sequence]
group: Optional[str]
name: Optional[str]
notes: Optional[str]
magic: Optional[Union[dict, str, bool]]
config_exclude_keys: Optional[List[str]]
config_include_keys: Optional[List[str]]
anonymous: Optional[str]
mode: Optional[str]
allow_val_change: Optional[bool]
resume: Optional[Union[bool, str]]
force: Optional[bool]
tensorboard: Optional[bool]
sync_tensorboard: Optional[bool]
monitor_gym: Optional[bool]
save_code: Optional[bool]
id: Optional[str]
settings: Union[WBSettings, Dict[str, Any], None]
class WandbTracer(BaseTracer):
"""Callback Handler that logs to Weights and Biases.
This handler will log the model architecture and run traces to Weights and Biases.
This will ensure that all LangChain activity is logged to W&B.
"""
_run: Optional[WBRun] = None
_run_args: Optional[WandbRunArgs] = None
def __init__(
self,
run_args: Optional[WandbRunArgs] = None,
io_serializer: Callable = _serialize_io,
**kwargs: Any,
) -> None:
"""Initializes the WandbTracer.
Parameters:
run_args: (dict, optional) Arguments to pass to `wandb.init()`. If not
provided, `wandb.init()` will be called with no arguments. Please
refer to the `wandb.init` for more details.
io_serializer: callable A function that serializes the input and outputs
of a run to store in wandb. Defaults to "_serialize_io"
To use W&B to monitor all LangChain activity, add this tracer like any other
LangChain callback:
```
from wandb.integration.langchain import WandbTracer
tracer = WandbTracer()
chain = LLMChain(llm, callbacks=[tracer])
# ...end of notebook / script:
tracer.finish()
```
"""
super().__init__(**kwargs)
try:
import wandb
from wandb.sdk.data_types import trace_tree
except ImportError as e:
raise ImportError(
"Could not import wandb python package."
"Please install it with `pip install -U wandb`."
) from e
self._wandb = wandb
self._trace_tree = trace_tree
self._run_args = run_args
self._ensure_run(should_print_url=(wandb.run is None))
self._io_serializer = io_serializer
warn_deprecated(
"0.3.8",
pending=False,
message=(
"Please use the `WeaveTracer` from the `weave` package instead of this."
"The `WeaveTracer` is a more flexible and powerful tool for logging "
"and tracing your LangChain callables."
"Find more information at https://weave-docs.wandb.ai/guides/integrations/langchain"
),
alternative=(
"Please instantiate the WeaveTracer from "
"`weave.integrations.langchain import WeaveTracer` ."
"For autologging simply use `weave.init()` and log all traces "
"from your LangChain callables."
),
)
def finish(self) -> None:
"""Waits for all asynchronous processes to finish and data to upload.
Proxy for `wandb.finish()`.
"""
self._wandb.finish()
def _ensure_run(self, should_print_url: bool = False) -> None:
"""Ensures an active W&B run exists.
If not, will start a new run with the provided run_args.
"""
if self._wandb.run is None:
run_args: Dict = {**(self._run_args or {})}
if "settings" not in run_args:
run_args["settings"] = {"silent": True}
self._wandb.init(**run_args)
if self._wandb.run is not None:
if should_print_url:
run_url = self._wandb.run.settings.run_url
self._wandb.termlog(
f"Streaming LangChain activity to W&B at {run_url}\n"
"`WandbTracer` is currently in beta.\n"
"Please report any issues to "
"https://github.com/wandb/wandb/issues with the tag "
"`langchain`."
)
self._wandb.run._label(repo="langchain")
def process_model_dict(self, run: Run) -> Optional[Dict[str, Any]]:
"""Utility to process a run for wandb model_dict serialization.
:param run: The run to process.
:return: The convert model_dict to pass to WBTraceTree.
"""
try:
data = json.loads(run.json())
processed = flatten_run(data)
keep_keys = (
"id",
"name",
"serialized",
"parent_run_id",
)
processed = truncate_run_iterative(processed, keep_keys=keep_keys)
exact_keys, partial_keys = (
("lc", "type", "graph"),
(
"api_key",
"input",
"output",
),
)
processed = modify_serialized_iterative(
processed, exact_keys=exact_keys, partial_keys=partial_keys
)
output = build_tree(processed)
return output
except Exception as e:
if PRINT_WARNINGS:
self._wandb.termerror(f"WARNING: Failed to serialize model: {e}")
return None
def _log_trace_from_run(self, run: Run) -> None:
"""Logs a LangChain Run to W*B as a W&B Trace."""
self._ensure_run()
def create_trace(
run: "Run", parent: Optional["Trace"] = None
) -> Optional["Trace"]:
"""
Create a trace for a given run and its child runs.
Args:
run (Run): The run for which to create a trace.
parent (Optional[Trace]): The parent trace.
If provided, the created trace is added as a child to the parent trace.
Returns:
Optional[Trace]: The created trace.
If an error occurs during the creation of the trace, None is returned.
Raises:
Exception: If an error occurs during the creation of the trace,
no exception is raised and a warning is printed.
"""
def get_metadata_dict(r: "Run") -> Dict[str, Any]:
"""
Extract metadata from a given run.
This function extracts metadata from a given run
and returns it as a dictionary.
Args:
r (Run): The run from which to extract metadata.
Returns:
Dict[str, Any]: A dictionary containing the extracted metadata.
"""
run_dict = json.loads(r.json())
metadata_dict = run_dict.get("metadata", {})
metadata_dict["run_id"] = run_dict.get("id")
metadata_dict["parent_run_id"] = run_dict.get("parent_run_id")
metadata_dict["tags"] = run_dict.get("tags")
metadata_dict["execution_order"] = run_dict.get(
"dotted_order", ""
).count(".")
return metadata_dict
try:
if run.run_type in ["llm", "tool"]:
run_type = run.run_type
elif run.run_type == "chain":
run_type = "agent" if "agent" in run.name.lower() else "chain"
else:
run_type = None
metadata = get_metadata_dict(run)
trace_tree = self._trace_tree.Trace(
name=run.name,
kind=run_type,
status_code="error" if run.error else "success",
start_time_ms=int(run.start_time.timestamp() * 1000)
if run.start_time is not None
else None,
end_time_ms=int(run.end_time.timestamp() * 1000)
if run.end_time is not None
else None,
metadata=metadata,
inputs=self._io_serializer(run.inputs),
outputs=self._io_serializer(run.outputs),
)
# If the run has child runs, recursively create traces for them
for child_run in run.child_runs:
create_trace(child_run, trace_tree)
if parent is None:
return trace_tree
else:
parent.add_child(trace_tree)
return parent
except Exception as e:
if PRINT_WARNINGS:
self._wandb.termwarn(
f"WARNING: Failed to serialize trace for run due to: {e}"
)
return None
run_trace = create_trace(run)
model_dict = self.process_model_dict(run)
if model_dict is not None and run_trace is not None:
run_trace._model_dict = model_dict
if self._wandb.run is not None and run_trace is not None:
run_trace.log("langchain_trace")
def _persist_run(self, run: "Run") -> None:
"""Persist a run."""
self._log_trace_from_run(run)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/callbacks | lc_public_repos/langchain/libs/community/langchain_community/callbacks/streamlit/mutable_expander.py | from __future__ import annotations
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional
if TYPE_CHECKING:
from streamlit.delta_generator import DeltaGenerator
from streamlit.type_util import SupportsStr
class ChildType(Enum):
"""Enumerator of the child type."""
MARKDOWN = "MARKDOWN"
EXCEPTION = "EXCEPTION"
class ChildRecord(NamedTuple):
"""Child record as a NamedTuple."""
type: ChildType
kwargs: Dict[str, Any]
dg: DeltaGenerator
class MutableExpander:
"""Streamlit expander that can be renamed and dynamically expanded/collapsed."""
def __init__(self, parent_container: DeltaGenerator, label: str, expanded: bool):
"""Create a new MutableExpander.
Parameters
----------
parent_container
The `st.container` that the expander will be created inside.
The expander transparently deletes and recreates its underlying
`st.expander` instance when its label changes, and it uses
`parent_container` to ensure it recreates this underlying expander in the
same location onscreen.
label
The expander's initial label.
expanded
The expander's initial `expanded` value.
"""
self._label = label
self._expanded = expanded
self._parent_cursor = parent_container.empty()
self._container = self._parent_cursor.expander(label, expanded)
self._child_records: List[ChildRecord] = []
@property
def label(self) -> str:
"""Expander's label string."""
return self._label
@property
def expanded(self) -> bool:
"""True if the expander was created with `expanded=True`."""
return self._expanded
def clear(self) -> None:
"""Remove the container and its contents entirely. A cleared container can't
be reused.
"""
self._container = self._parent_cursor.empty()
self._child_records.clear()
def append_copy(self, other: MutableExpander) -> None:
"""Append a copy of another MutableExpander's children to this
MutableExpander.
"""
other_records = other._child_records.copy()
for record in other_records:
self._create_child(record.type, record.kwargs)
def update(
self, *, new_label: Optional[str] = None, new_expanded: Optional[bool] = None
) -> None:
"""Change the expander's label and expanded state"""
if new_label is None:
new_label = self._label
if new_expanded is None:
new_expanded = self._expanded
if self._label == new_label and self._expanded == new_expanded:
# No change!
return
self._label = new_label
self._expanded = new_expanded
self._container = self._parent_cursor.expander(new_label, new_expanded)
prev_records = self._child_records
self._child_records = []
# Replay all children into the new container
for record in prev_records:
self._create_child(record.type, record.kwargs)
def markdown(
self,
body: SupportsStr,
unsafe_allow_html: bool = False,
*,
help: Optional[str] = None,
index: Optional[int] = None,
) -> int:
"""Add a Markdown element to the container and return its index."""
kwargs = {"body": body, "unsafe_allow_html": unsafe_allow_html, "help": help}
new_dg = self._get_dg(index).markdown(**kwargs)
record = ChildRecord(ChildType.MARKDOWN, kwargs, new_dg)
return self._add_record(record, index)
def exception(
self, exception: BaseException, *, index: Optional[int] = None
) -> int:
"""Add an Exception element to the container and return its index."""
kwargs = {"exception": exception}
new_dg = self._get_dg(index).exception(**kwargs)
record = ChildRecord(ChildType.EXCEPTION, kwargs, new_dg)
return self._add_record(record, index)
def _create_child(self, type: ChildType, kwargs: Dict[str, Any]) -> None:
"""Create a new child with the given params"""
if type == ChildType.MARKDOWN:
self.markdown(**kwargs)
elif type == ChildType.EXCEPTION:
self.exception(**kwargs)
else:
raise RuntimeError(f"Unexpected child type {type}")
def _add_record(self, record: ChildRecord, index: Optional[int]) -> int:
"""Add a ChildRecord to self._children. If `index` is specified, replace
the existing record at that index. Otherwise, append the record to the
end of the list.
Return the index of the added record.
"""
if index is not None:
# Replace existing child
self._child_records[index] = record
return index
# Append new child
self._child_records.append(record)
return len(self._child_records) - 1
def _get_dg(self, index: Optional[int]) -> DeltaGenerator:
if index is not None:
# Existing index: reuse child's DeltaGenerator
assert 0 <= index < len(self._child_records), f"Bad index: {index}"
return self._child_records[index].dg
# No index: use container's DeltaGenerator
return self._container
|
0 | lc_public_repos/langchain/libs/community/langchain_community/callbacks | lc_public_repos/langchain/libs/community/langchain_community/callbacks/streamlit/streamlit_callback_handler.py | """Callback Handler that prints to streamlit."""
from __future__ import annotations
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.outputs import LLMResult
from langchain_community.callbacks.streamlit.mutable_expander import MutableExpander
if TYPE_CHECKING:
from streamlit.delta_generator import DeltaGenerator
def _convert_newlines(text: str) -> str:
"""Convert newline characters to markdown newline sequences
(space, space, newline).
"""
return text.replace("\n", " \n")
CHECKMARK_EMOJI = "✅"
THINKING_EMOJI = ":thinking_face:"
HISTORY_EMOJI = ":books:"
EXCEPTION_EMOJI = "⚠️"
class LLMThoughtState(Enum):
"""Enumerator of the LLMThought state."""
# The LLM is thinking about what to do next. We don't know which tool we'll run.
THINKING = "THINKING"
# The LLM has decided to run a tool. We don't have results from the tool yet.
RUNNING_TOOL = "RUNNING_TOOL"
# We have results from the tool.
COMPLETE = "COMPLETE"
class ToolRecord(NamedTuple):
"""Tool record as a NamedTuple."""
name: str
input_str: str
class LLMThoughtLabeler:
"""
Generates markdown labels for LLMThought containers. Pass a custom
subclass of this to StreamlitCallbackHandler to override its default
labeling logic.
"""
@staticmethod
def get_initial_label() -> str:
"""Return the markdown label for a new LLMThought that doesn't have
an associated tool yet.
"""
return f"{THINKING_EMOJI} **Thinking...**"
@staticmethod
def get_tool_label(tool: ToolRecord, is_complete: bool) -> str:
"""Return the label for an LLMThought that has an associated
tool.
Parameters
----------
tool
The tool's ToolRecord
is_complete
True if the thought is complete; False if the thought
is still receiving input.
Returns
-------
The markdown label for the thought's container.
"""
input = tool.input_str
name = tool.name
emoji = CHECKMARK_EMOJI if is_complete else THINKING_EMOJI
if name == "_Exception":
emoji = EXCEPTION_EMOJI
name = "Parsing error"
idx = min([60, len(input)])
input = input[0:idx]
if len(tool.input_str) > idx:
input = input + "..."
input = input.replace("\n", " ")
label = f"{emoji} **{name}:** {input}"
return label
@staticmethod
def get_history_label() -> str:
"""Return a markdown label for the special 'history' container
that contains overflow thoughts.
"""
return f"{HISTORY_EMOJI} **History**"
@staticmethod
def get_final_agent_thought_label() -> str:
"""Return the markdown label for the agent's final thought -
the "Now I have the answer" thought, that doesn't involve
a tool.
"""
return f"{CHECKMARK_EMOJI} **Complete!**"
class LLMThought:
"""A thought in the LLM's thought stream."""
def __init__(
self,
parent_container: DeltaGenerator,
labeler: LLMThoughtLabeler,
expanded: bool,
collapse_on_complete: bool,
):
"""Initialize the LLMThought.
Args:
parent_container: The container we're writing into.
labeler: The labeler to use for this thought.
expanded: Whether the thought should be expanded by default.
collapse_on_complete: Whether the thought should be collapsed.
"""
self._container = MutableExpander(
parent_container=parent_container,
label=labeler.get_initial_label(),
expanded=expanded,
)
self._state = LLMThoughtState.THINKING
self._llm_token_stream = ""
self._llm_token_writer_idx: Optional[int] = None
self._last_tool: Optional[ToolRecord] = None
self._collapse_on_complete = collapse_on_complete
self._labeler = labeler
@property
def container(self) -> MutableExpander:
"""The container we're writing into."""
return self._container
@property
def last_tool(self) -> Optional[ToolRecord]:
"""The last tool executed by this thought"""
return self._last_tool
def _reset_llm_token_stream(self) -> None:
self._llm_token_stream = ""
self._llm_token_writer_idx = None
def on_llm_start(self, serialized: Dict[str, Any], prompts: List[str]) -> None:
self._reset_llm_token_stream()
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
# This is only called when the LLM is initialized with `streaming=True`
self._llm_token_stream += _convert_newlines(token)
self._llm_token_writer_idx = self._container.markdown(
self._llm_token_stream, index=self._llm_token_writer_idx
)
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
# `response` is the concatenation of all the tokens received by the LLM.
# If we're receiving streaming tokens from `on_llm_new_token`, this response
# data is redundant
self._reset_llm_token_stream()
def on_llm_error(self, error: BaseException, **kwargs: Any) -> None:
self._container.markdown("**LLM encountered an error...**")
self._container.exception(error)
def on_tool_start(
self, serialized: Dict[str, Any], input_str: str, **kwargs: Any
) -> None:
# Called with the name of the tool we're about to run (in `serialized[name]`),
# and its input. We change our container's label to be the tool name.
self._state = LLMThoughtState.RUNNING_TOOL
tool_name = serialized["name"]
self._last_tool = ToolRecord(name=tool_name, input_str=input_str)
self._container.update(
new_label=self._labeler.get_tool_label(self._last_tool, is_complete=False)
)
def on_tool_end(
self,
output: Any,
color: Optional[str] = None,
observation_prefix: Optional[str] = None,
llm_prefix: Optional[str] = None,
**kwargs: Any,
) -> None:
self._container.markdown(f"**{str(output)}**")
def on_tool_error(self, error: BaseException, **kwargs: Any) -> None:
self._container.markdown("**Tool encountered an error...**")
self._container.exception(error)
def on_agent_action(
self, action: AgentAction, color: Optional[str] = None, **kwargs: Any
) -> Any:
# Called when we're about to kick off a new tool. The `action` data
# tells us the tool we're about to use, and the input we'll give it.
# We don't output anything here, because we'll receive this same data
# when `on_tool_start` is called immediately after.
pass
def complete(self, final_label: Optional[str] = None) -> None:
"""Finish the thought."""
if final_label is None and self._state == LLMThoughtState.RUNNING_TOOL:
assert (
self._last_tool is not None
), "_last_tool should never be null when _state == RUNNING_TOOL"
final_label = self._labeler.get_tool_label(
self._last_tool, is_complete=True
)
self._state = LLMThoughtState.COMPLETE
if self._collapse_on_complete:
self._container.update(new_label=final_label, new_expanded=False)
else:
self._container.update(new_label=final_label)
def clear(self) -> None:
"""Remove the thought from the screen. A cleared thought can't be reused."""
self._container.clear()
class StreamlitCallbackHandler(BaseCallbackHandler):
"""Callback handler that writes to a Streamlit app."""
def __init__(
self,
parent_container: DeltaGenerator,
*,
max_thought_containers: int = 4,
expand_new_thoughts: bool = True,
collapse_completed_thoughts: bool = True,
thought_labeler: Optional[LLMThoughtLabeler] = None,
):
"""Create a StreamlitCallbackHandler instance.
Parameters
----------
parent_container
The `st.container` that will contain all the Streamlit elements that the
Handler creates.
max_thought_containers
The max number of completed LLM thought containers to show at once. When
this threshold is reached, a new thought will cause the oldest thoughts to
be collapsed into a "History" expander. Defaults to 4.
expand_new_thoughts
Each LLM "thought" gets its own `st.expander`. This param controls whether
that expander is expanded by default. Defaults to True.
collapse_completed_thoughts
If True, LLM thought expanders will be collapsed when completed.
Defaults to True.
thought_labeler
An optional custom LLMThoughtLabeler instance. If unspecified, the handler
will use the default thought labeling logic. Defaults to None.
"""
self._parent_container = parent_container
self._history_parent = parent_container.container()
self._history_container: Optional[MutableExpander] = None
self._current_thought: Optional[LLMThought] = None
self._completed_thoughts: List[LLMThought] = []
self._max_thought_containers = max(max_thought_containers, 1)
self._expand_new_thoughts = expand_new_thoughts
self._collapse_completed_thoughts = collapse_completed_thoughts
self._thought_labeler = thought_labeler or LLMThoughtLabeler()
def _require_current_thought(self) -> LLMThought:
"""Return our current LLMThought. Raise an error if we have no current
thought.
"""
if self._current_thought is None:
raise RuntimeError("Current LLMThought is unexpectedly None!")
return self._current_thought
def _get_last_completed_thought(self) -> Optional[LLMThought]:
"""Return our most recent completed LLMThought, or None if we don't have one."""
if len(self._completed_thoughts) > 0:
return self._completed_thoughts[len(self._completed_thoughts) - 1]
return None
@property
def _num_thought_containers(self) -> int:
"""The number of 'thought containers' we're currently showing: the
number of completed thought containers, the history container (if it exists),
and the current thought container (if it exists).
"""
count = len(self._completed_thoughts)
if self._history_container is not None:
count += 1
if self._current_thought is not None:
count += 1
return count
def _complete_current_thought(self, final_label: Optional[str] = None) -> None:
"""Complete the current thought, optionally assigning it a new label.
Add it to our _completed_thoughts list.
"""
thought = self._require_current_thought()
thought.complete(final_label)
self._completed_thoughts.append(thought)
self._current_thought = None
def _prune_old_thought_containers(self) -> None:
"""If we have too many thoughts onscreen, move older thoughts to the
'history container.'
"""
while (
self._num_thought_containers > self._max_thought_containers
and len(self._completed_thoughts) > 0
):
# Create our history container if it doesn't exist, and if
# max_thought_containers is > 1. (if max_thought_containers is 1, we don't
# have room to show history.)
if self._history_container is None and self._max_thought_containers > 1:
self._history_container = MutableExpander(
self._history_parent,
label=self._thought_labeler.get_history_label(),
expanded=False,
)
oldest_thought = self._completed_thoughts.pop(0)
if self._history_container is not None:
self._history_container.markdown(oldest_thought.container.label)
self._history_container.append_copy(oldest_thought.container)
oldest_thought.clear()
def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> None:
if self._current_thought is None:
self._current_thought = LLMThought(
parent_container=self._parent_container,
expanded=self._expand_new_thoughts,
collapse_on_complete=self._collapse_completed_thoughts,
labeler=self._thought_labeler,
)
self._current_thought.on_llm_start(serialized, prompts)
# We don't prune_old_thought_containers here, because our container won't
# be visible until it has a child.
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
self._require_current_thought().on_llm_new_token(token, **kwargs)
self._prune_old_thought_containers()
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
self._require_current_thought().on_llm_end(response, **kwargs)
self._prune_old_thought_containers()
def on_llm_error(self, error: BaseException, **kwargs: Any) -> None:
self._require_current_thought().on_llm_error(error, **kwargs)
self._prune_old_thought_containers()
def on_tool_start(
self, serialized: Dict[str, Any], input_str: str, **kwargs: Any
) -> None:
self._require_current_thought().on_tool_start(serialized, input_str, **kwargs)
self._prune_old_thought_containers()
def on_tool_end(
self,
output: Any,
color: Optional[str] = None,
observation_prefix: Optional[str] = None,
llm_prefix: Optional[str] = None,
**kwargs: Any,
) -> None:
output = str(output)
self._require_current_thought().on_tool_end(
output, color, observation_prefix, llm_prefix, **kwargs
)
self._complete_current_thought()
def on_tool_error(self, error: BaseException, **kwargs: Any) -> None:
self._require_current_thought().on_tool_error(error, **kwargs)
self._prune_old_thought_containers()
def on_text(
self,
text: str,
color: Optional[str] = None,
end: str = "",
**kwargs: Any,
) -> None:
pass
def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> None:
pass
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
pass
def on_chain_error(self, error: BaseException, **kwargs: Any) -> None:
pass
def on_agent_action(
self, action: AgentAction, color: Optional[str] = None, **kwargs: Any
) -> Any:
self._require_current_thought().on_agent_action(action, color, **kwargs)
self._prune_old_thought_containers()
def on_agent_finish(
self, finish: AgentFinish, color: Optional[str] = None, **kwargs: Any
) -> None:
if self._current_thought is not None:
self._current_thought.complete(
self._thought_labeler.get_final_agent_thought_label()
)
self._current_thought = None
|
0 | lc_public_repos/langchain/libs/community/langchain_community/callbacks | lc_public_repos/langchain/libs/community/langchain_community/callbacks/streamlit/__init__.py | from __future__ import annotations
from typing import TYPE_CHECKING, Optional
from langchain_core.callbacks import BaseCallbackHandler
from langchain_community.callbacks.streamlit.streamlit_callback_handler import (
LLMThoughtLabeler as LLMThoughtLabeler,
)
from langchain_community.callbacks.streamlit.streamlit_callback_handler import (
StreamlitCallbackHandler as _InternalStreamlitCallbackHandler,
)
if TYPE_CHECKING:
from streamlit.delta_generator import DeltaGenerator
def StreamlitCallbackHandler(
parent_container: DeltaGenerator,
*,
max_thought_containers: int = 4,
expand_new_thoughts: bool = True,
collapse_completed_thoughts: bool = True,
thought_labeler: Optional[LLMThoughtLabeler] = None,
) -> BaseCallbackHandler:
"""Callback Handler that writes to a Streamlit app.
This CallbackHandler is geared towards
use with a LangChain Agent; it displays the Agent's LLM and tool-usage "thoughts"
inside a series of Streamlit expanders.
Parameters
----------
parent_container
The `st.container` that will contain all the Streamlit elements that the
Handler creates.
max_thought_containers
The max number of completed LLM thought containers to show at once. When this
threshold is reached, a new thought will cause the oldest thoughts to be
collapsed into a "History" expander. Defaults to 4.
expand_new_thoughts
Each LLM "thought" gets its own `st.expander`. This param controls whether that
expander is expanded by default. Defaults to True.
collapse_completed_thoughts
If True, LLM thought expanders will be collapsed when completed.
Defaults to True.
thought_labeler
An optional custom LLMThoughtLabeler instance. If unspecified, the handler
will use the default thought labeling logic. Defaults to None.
Returns
-------
A new StreamlitCallbackHandler instance.
Note that this is an "auto-updating" API: if the installed version of Streamlit
has a more recent StreamlitCallbackHandler implementation, an instance of that class
will be used.
"""
# If we're using a version of Streamlit that implements StreamlitCallbackHandler,
# delegate to it instead of using our built-in handler. The official handler is
# guaranteed to support the same set of kwargs.
try:
from streamlit.external.langchain import (
StreamlitCallbackHandler as OfficialStreamlitCallbackHandler,
)
return OfficialStreamlitCallbackHandler(
parent_container,
max_thought_containers=max_thought_containers,
expand_new_thoughts=expand_new_thoughts,
collapse_completed_thoughts=collapse_completed_thoughts,
thought_labeler=thought_labeler,
)
except ImportError:
return _InternalStreamlitCallbackHandler(
parent_container,
max_thought_containers=max_thought_containers,
expand_new_thoughts=expand_new_thoughts,
collapse_completed_thoughts=collapse_completed_thoughts,
thought_labeler=thought_labeler,
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/docstore/wikipedia.py | """Wrapper around wikipedia API."""
from typing import Union
from langchain_core.documents import Document
from langchain_community.docstore.base import Docstore
class Wikipedia(Docstore):
"""Wikipedia API."""
def __init__(self) -> None:
"""Check that wikipedia package is installed."""
try:
import wikipedia # noqa: F401
except ImportError:
raise ImportError(
"Could not import wikipedia python package. "
"Please install it with `pip install wikipedia`."
)
def search(self, search: str) -> Union[str, Document]:
"""Try to search for wiki page.
If page exists, return the page summary, and a PageWithLookups object.
If page does not exist, return similar entries.
Args:
search: search string.
Returns: a Document object or error message.
"""
import wikipedia
try:
page_content = wikipedia.page(search).content
url = wikipedia.page(search).url
result: Union[str, Document] = Document(
page_content=page_content, metadata={"page": url}
)
except wikipedia.PageError:
result = f"Could not find [{search}]. Similar: {wikipedia.search(search)}"
except wikipedia.DisambiguationError:
result = f"Could not find [{search}]. Similar: {wikipedia.search(search)}"
return result
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/docstore/in_memory.py | """Simple in memory docstore in the form of a dict."""
from typing import Dict, List, Optional, Union
from langchain_core.documents import Document
from langchain_community.docstore.base import AddableMixin, Docstore
class InMemoryDocstore(Docstore, AddableMixin):
"""Simple in memory docstore in the form of a dict."""
def __init__(self, _dict: Optional[Dict[str, Document]] = None):
"""Initialize with dict."""
self._dict = _dict if _dict is not None else {}
def add(self, texts: Dict[str, Document]) -> None:
"""Add texts to in memory dictionary.
Args:
texts: dictionary of id -> document.
Returns:
None
"""
overlapping = set(texts).intersection(self._dict)
if overlapping:
raise ValueError(f"Tried to add ids that already exist: {overlapping}")
self._dict = {**self._dict, **texts}
def delete(self, ids: List) -> None:
"""Deleting IDs from in memory dictionary."""
overlapping = set(ids).intersection(self._dict)
if not overlapping:
raise ValueError(f"Tried to delete ids that does not exist: {ids}")
for _id in ids:
self._dict.pop(_id)
def search(self, search: str) -> Union[str, Document]:
"""Search via direct lookup.
Args:
search: id of a document to search for.
Returns:
Document if found, else error message.
"""
if search not in self._dict:
return f"ID {search} not found."
else:
return self._dict[search]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/docstore/base.py | """Interface to access to place that stores documents."""
from abc import ABC, abstractmethod
from typing import Dict, List, Union
from langchain_core.documents import Document
class Docstore(ABC):
"""Interface to access to place that stores documents."""
@abstractmethod
def search(self, search: str) -> Union[str, Document]:
"""Search for document.
If page exists, return the page summary, and a Document object.
If page does not exist, return similar entries.
"""
def delete(self, ids: List) -> None:
"""Deleting IDs from in memory dictionary."""
raise NotImplementedError
class AddableMixin(ABC):
"""Mixin class that supports adding texts."""
@abstractmethod
def add(self, texts: Dict[str, Document]) -> None:
"""Add more documents."""
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/docstore/arbitrary_fn.py | from typing import Callable, Union
from langchain_core.documents import Document
from langchain_community.docstore.base import Docstore
class DocstoreFn(Docstore):
"""Docstore via arbitrary lookup function.
This is useful when:
* it's expensive to construct an InMemoryDocstore/dict
* you retrieve documents from remote sources
* you just want to reuse existing objects
"""
def __init__(
self,
lookup_fn: Callable[[str], Union[Document, str]],
):
self._lookup_fn = lookup_fn
def search(self, search: str) -> Document:
"""Search for a document.
Args:
search: search string
Returns:
Document if found, else error message.
"""
r = self._lookup_fn(search)
if isinstance(r, str):
# NOTE: assume the search string is the source ID
return Document(page_content=r, metadata={"source": search})
elif isinstance(r, Document):
return r
raise ValueError(f"Unexpected type of document {type(r)}")
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/docstore/document.py | from langchain_core.documents import Document
__all__ = ["Document"]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.