sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
microsoft/graphrag:packages/graphrag-chunking/graphrag_chunking/chunk_strategy_type.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Chunk strategy type enumeration."""
from enum import StrEnum
class ChunkerType(StrEnum):
"""ChunkerType class definition."""
Tokens = "tokens"
Sentence = "sentence"
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-chunking/graphrag_chunking/chunk_strategy_type.py",
"license": "MIT License",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-chunking/graphrag_chunking/chunker.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""A module containing the 'Chunker' class."""
from abc import ABC, abstractmethod
from collections.abc import Callable
from typing import Any
from graphrag_chunking.text_chunk import TextChunk
class Chunker(ABC):
"""Abstract base class for document chunkers."""
@abstractmethod
def __init__(self, **kwargs: Any) -> None:
"""Create a chunker instance."""
@abstractmethod
def chunk(
self, text: str, transform: Callable[[str], str] | None = None
) -> list[TextChunk]:
"""Chunk method definition."""
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-chunking/graphrag_chunking/chunker.py",
"license": "MIT License",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-chunking/graphrag_chunking/chunker_factory.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""A module containing 'ChunkerFactory', 'register_chunker', and 'create_chunker'."""
from collections.abc import Callable
from graphrag_common.factory.factory import Factory, ServiceScope
from graphrag_chunking.chunk_strategy_type import ChunkerType
from graphrag_chunking.chunker import Chunker
from graphrag_chunking.chunking_config import ChunkingConfig
class ChunkerFactory(Factory[Chunker]):
"""Factory for creating Chunker instances."""
chunker_factory = ChunkerFactory()
def register_chunker(
chunker_type: str,
chunker_initializer: Callable[..., Chunker],
scope: ServiceScope = "transient",
) -> None:
"""Register a custom chunker implementation.
Args
----
- chunker_type: str
The chunker id to register.
- chunker_initializer: Callable[..., Chunker]
The chunker initializer to register.
"""
chunker_factory.register(chunker_type, chunker_initializer, scope)
def create_chunker(
config: ChunkingConfig,
encode: Callable[[str], list[int]] | None = None,
decode: Callable[[list[int]], str] | None = None,
) -> Chunker:
"""Create a chunker implementation based on the given configuration.
Args
----
- config: ChunkingConfig
The chunker configuration to use.
Returns
-------
Chunker
The created chunker implementation.
"""
config_model = config.model_dump()
if encode is not None:
config_model["encode"] = encode
if decode is not None:
config_model["decode"] = decode
chunker_strategy = config.type
if chunker_strategy not in chunker_factory:
match chunker_strategy:
case ChunkerType.Tokens:
from graphrag_chunking.token_chunker import TokenChunker
register_chunker(ChunkerType.Tokens, TokenChunker)
case ChunkerType.Sentence:
from graphrag_chunking.sentence_chunker import SentenceChunker
register_chunker(ChunkerType.Sentence, SentenceChunker)
case _:
msg = f"ChunkingConfig.strategy '{chunker_strategy}' is not registered in the ChunkerFactory. Registered types: {', '.join(chunker_factory.keys())}."
raise ValueError(msg)
return chunker_factory.create(chunker_strategy, init_args=config_model)
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-chunking/graphrag_chunking/chunker_factory.py",
"license": "MIT License",
"lines": 58,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-chunking/graphrag_chunking/chunking_config.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Parameterization settings for the default configuration."""
from pydantic import BaseModel, ConfigDict, Field
from graphrag_chunking.chunk_strategy_type import ChunkerType
class ChunkingConfig(BaseModel):
"""Configuration section for chunking."""
model_config = ConfigDict(extra="allow")
"""Allow extra fields to support custom cache implementations."""
type: str = Field(
description="The chunking type to use.",
default=ChunkerType.Tokens,
)
encoding_model: str | None = Field(
description="The encoding model to use.",
default=None,
)
size: int = Field(
description="The chunk size to use.",
default=1200,
)
overlap: int = Field(
description="The chunk overlap to use.",
default=100,
)
prepend_metadata: list[str] | None = Field(
description="Metadata fields from the source document to prepend on each chunk.",
default=None,
)
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-chunking/graphrag_chunking/chunking_config.py",
"license": "MIT License",
"lines": 29,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-chunking/graphrag_chunking/create_chunk_results.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""A module containing 'create_chunk_results' function."""
from collections.abc import Callable
from graphrag_chunking.text_chunk import TextChunk
def create_chunk_results(
chunks: list[str],
transform: Callable[[str], str] | None = None,
encode: Callable[[str], list[int]] | None = None,
) -> list[TextChunk]:
"""Create chunk results from a list of text chunks. The index assignments are 0-based and assume chunks were not stripped relative to the source text."""
results = []
start_char = 0
for index, chunk in enumerate(chunks):
end_char = start_char + len(chunk) - 1 # 0-based indices
result = TextChunk(
original=chunk,
text=transform(chunk) if transform else chunk,
index=index,
start_char=start_char,
end_char=end_char,
)
if encode:
result.token_count = len(encode(result.text))
results.append(result)
start_char = end_char + 1
return results
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-chunking/graphrag_chunking/create_chunk_results.py",
"license": "MIT License",
"lines": 27,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-chunking/graphrag_chunking/sentence_chunker.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""A module containing 'SentenceChunker' class."""
from collections.abc import Callable
from typing import Any
import nltk
from graphrag_chunking.bootstrap_nltk import bootstrap
from graphrag_chunking.chunker import Chunker
from graphrag_chunking.create_chunk_results import create_chunk_results
from graphrag_chunking.text_chunk import TextChunk
class SentenceChunker(Chunker):
"""A chunker that splits text into sentence-based chunks."""
def __init__(
self, encode: Callable[[str], list[int]] | None = None, **kwargs: Any
) -> None:
"""Create a sentence chunker instance."""
self._encode = encode
bootstrap()
def chunk(
self, text: str, transform: Callable[[str], str] | None = None
) -> list[TextChunk]:
"""Chunk the text into sentence-based chunks."""
sentences = nltk.sent_tokenize(text.strip())
results = create_chunk_results(
sentences, transform=transform, encode=self._encode
)
# nltk sentence tokenizer may trim whitespace, so we need to adjust start/end chars
for index, result in enumerate(results):
txt = result.text
start = result.start_char
actual_start = text.find(txt, start)
delta = actual_start - start
if delta > 0:
result.start_char += delta
result.end_char += delta
# bump the next to keep the start check from falling too far behind
if index < len(results) - 1:
results[index + 1].start_char += delta
results[index + 1].end_char += delta
return results
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-chunking/graphrag_chunking/sentence_chunker.py",
"license": "MIT License",
"lines": 40,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-chunking/graphrag_chunking/text_chunk.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""The TextChunk dataclass."""
from dataclasses import dataclass
@dataclass
class TextChunk:
"""Result of chunking a document."""
original: str
"""Raw original text chunk before any transformation."""
text: str
"""The final text content of this chunk."""
index: int
"""Zero-based index of this chunk within the source document."""
start_char: int
"""Character index where the raw chunk text begins in the source document."""
end_char: int
"""Character index where the raw chunk text ends in the source document."""
token_count: int | None = None
"""Number of tokens in the final chunk text, if computed."""
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-chunking/graphrag_chunking/text_chunk.py",
"license": "MIT License",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-chunking/graphrag_chunking/token_chunker.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""A module containing 'TokenChunker' class."""
from collections.abc import Callable
from typing import Any
from graphrag_chunking.chunker import Chunker
from graphrag_chunking.create_chunk_results import create_chunk_results
from graphrag_chunking.text_chunk import TextChunk
class TokenChunker(Chunker):
"""A chunker that splits text into token-based chunks."""
def __init__(
self,
size: int,
overlap: int,
encode: Callable[[str], list[int]],
decode: Callable[[list[int]], str],
**kwargs: Any,
) -> None:
"""Create a token chunker instance."""
self._size = size
self._overlap = overlap
self._encode = encode
self._decode = decode
def chunk(
self, text: str, transform: Callable[[str], str] | None = None
) -> list[TextChunk]:
"""Chunk the text into token-based chunks."""
chunks = split_text_on_tokens(
text,
chunk_size=self._size,
chunk_overlap=self._overlap,
encode=self._encode,
decode=self._decode,
)
return create_chunk_results(chunks, transform=transform, encode=self._encode)
def split_text_on_tokens(
text: str,
chunk_size: int,
chunk_overlap: int,
encode: Callable[[str], list[int]],
decode: Callable[[list[int]], str],
) -> list[str]:
"""Split a single text and return chunks using the tokenizer."""
result = []
input_tokens = encode(text)
start_idx = 0
cur_idx = min(start_idx + chunk_size, len(input_tokens))
chunk_tokens = input_tokens[start_idx:cur_idx]
while start_idx < len(input_tokens):
chunk_text = decode(list(chunk_tokens))
result.append(chunk_text) # Append chunked text as string
if cur_idx == len(input_tokens):
break
start_idx += chunk_size - chunk_overlap
cur_idx = min(start_idx + chunk_size, len(input_tokens))
chunk_tokens = input_tokens[start_idx:cur_idx]
return result
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-chunking/graphrag_chunking/token_chunker.py",
"license": "MIT License",
"lines": 57,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-chunking/graphrag_chunking/transformers.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""A collection of useful built-in transformers you can use for chunking."""
from collections.abc import Callable
from typing import Any
def add_metadata(
metadata: dict[str, Any],
delimiter: str = ": ",
line_delimiter: str = "\n",
append: bool = False,
) -> Callable[[str], str]:
"""Add metadata to the given text, prepending by default. This utility writes the dict as rows of key/value pairs."""
def transformer(text: str) -> str:
metadata_str = (
line_delimiter.join(f"{k}{delimiter}{v}" for k, v in metadata.items())
+ line_delimiter
)
return text + metadata_str if append else metadata_str + text
return transformer
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-chunking/graphrag_chunking/transformers.py",
"license": "MIT License",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-common/graphrag_common/config/load_config.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Load configuration."""
import json
import os
from collections.abc import Callable
from pathlib import Path
from string import Template
from typing import Any, TypeVar
import yaml
from dotenv import load_dotenv
T = TypeVar("T", covariant=True)
_default_config_files = ["settings.yaml", "settings.yml", "settings.json"]
class ConfigParsingError(ValueError):
"""Configuration Parsing Error."""
def __init__(self, msg: str) -> None:
"""Initialize the ConfigParsingError."""
super().__init__(msg)
def _get_config_file_path(config_dir_or_file: Path) -> Path:
"""Resolve the config path from the given directory or file."""
config_dir_or_file = Path(config_dir_or_file)
if config_dir_or_file.is_file():
return config_dir_or_file
if not config_dir_or_file.is_dir():
msg = f"Invalid config path: {config_dir_or_file} is not a directory"
raise FileNotFoundError(msg)
for file in _default_config_files:
if (config_dir_or_file / file).is_file():
return config_dir_or_file / file
msg = f"No 'settings.[yaml|yml|json]' config file found in directory: {config_dir_or_file}"
raise FileNotFoundError(msg)
def _load_dotenv(env_file_path: Path, required: bool) -> None:
"""Load the .env file if it exists."""
if not env_file_path.is_file():
if not required:
return
msg = f"dot_env_path not found: {env_file_path}"
raise FileNotFoundError(msg)
load_dotenv(env_file_path)
def _parse_json(data: str) -> dict[str, Any]:
"""Parse JSON data."""
return json.loads(data)
def _parse_yaml(data: str) -> dict[str, Any]:
"""Parse YAML data."""
return yaml.safe_load(data)
def _get_parser_for_file(file_path: str | Path) -> Callable[[str], dict[str, Any]]:
"""Get the parser for the given file path."""
file_path = Path(file_path).resolve()
match file_path.suffix.lower():
case ".json":
return _parse_json
case ".yaml" | ".yml":
return _parse_yaml
case _:
msg = (
f"Failed to parse, {file_path}. Unsupported file extension, "
+ f"{file_path.suffix}. Pass in a custom config_parser argument or "
+ "use one of the supported file extensions, .json, .yaml, .yml, .toml."
)
raise ConfigParsingError(msg)
def _parse_env_variables(text: str) -> str:
"""Parse environment variables in the configuration text."""
try:
return Template(text).substitute(os.environ)
except KeyError as error:
msg = f"Environment variable not found: {error}"
raise ConfigParsingError(msg) from error
def _recursive_merge_dicts(dest: dict[str, Any], src: dict[str, Any]) -> None:
"""Recursively merge two dictionaries in place."""
for key, value in src.items():
if isinstance(value, dict):
if isinstance(dest.get(key), dict):
_recursive_merge_dicts(dest[key], value)
else:
dest[key] = value
else:
dest[key] = value
def load_config(
config_initializer: Callable[..., T],
config_path: str | Path | None = None,
overrides: dict[str, Any] | None = None,
set_cwd: bool = True,
parse_env_vars: bool = True,
load_dot_env_file: bool = True,
dot_env_path: str | Path | None = None,
config_parser: Callable[[str], dict[str, Any]] | None = None,
file_encoding: str = "utf-8",
) -> T:
"""Load configuration from a file.
Parameters
----------
config_initializer : Callable[..., T]
Configuration constructor/initializer.
Should accept **kwargs to initialize the configuration,
e.g., Config(**kwargs).
config_path : str | Path | None, optional (default=None)
Path to the configuration directory containing settings.[yaml|yml|json].
Or path to a configuration file itself.
If None, search the current working directory for
settings.[yaml|yml|json].
overrides : dict[str, Any] | None, optional (default=None)
Configuration overrides.
Useful for overriding configuration settings programmatically,
perhaps from CLI flags.
set_cwd : bool, optional (default=True)
Whether to set the current working directory to the directory
containing the configuration file. Helpful for resolving relative paths
in the configuration file.
parse_env_vars : bool, optional (default=True)
Whether to parse environment variables in the configuration text.
load_dot_env_file : bool, optional (default=True)
Whether to load the .env file prior to parsing environment variables.
dot_env_path : str | Path | None, optional (default=None)
Optional .env file to load prior to parsing env variables.
If None and load_dot_env_file is True, looks for a .env file in the
same directory as the config file.
config_parser : Callable[[str], dict[str, Any]] | None, optional (default=None)
function to parse the configuration text, (str) -> dict[str, Any].
If None, the parser is inferred from the file extension.
Supported extensions: .json, .yaml, .yml.
file_encoding : str, optional (default="utf-8")
File encoding to use when reading the configuration file.
Returns
-------
T
The initialized configuration object.
Raises
------
FileNotFoundError
- If the config file is not found.
- If the .env file is not found when parse_env_vars is True and dot_env_path is provided.
ConfigParsingError
- If an environment variable is not found when parsing env variables.
- If there was a problem merging the overrides with the configuration.
- If parser=None and load_config was unable to determine how to parse
the file based on the file extension.
- If the parser fails to parse the configuration text.
"""
config_path = Path(config_path).resolve() if config_path else Path.cwd()
config_path = _get_config_file_path(config_path)
file_contents = config_path.read_text(encoding=file_encoding)
if parse_env_vars:
if load_dot_env_file:
required = dot_env_path is not None
dot_env_path = (
Path(dot_env_path) if dot_env_path else config_path.parent / ".env"
)
_load_dotenv(dot_env_path, required=required)
file_contents = _parse_env_variables(file_contents)
if config_parser is None:
config_parser = _get_parser_for_file(config_path)
config_data: dict[str, Any] = {}
try:
config_data = config_parser(file_contents)
except Exception as error:
msg = f"Failed to parse config_path: {config_path}. Error: {error}"
raise ConfigParsingError(msg) from error
if overrides is not None:
try:
_recursive_merge_dicts(config_data, overrides)
except Exception as error:
msg = f"Failed to merge overrides with config_path: {config_path}. Error: {error}"
raise ConfigParsingError(msg) from error
if set_cwd:
os.chdir(config_path.parent)
return config_initializer(**config_data)
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-common/graphrag_common/config/load_config.py",
"license": "MIT License",
"lines": 166,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-common/graphrag_common/factory/factory.py | # Copyright (c) 2025 Microsoft Corporation.
# Licensed under the MIT License
"""Factory ABC."""
from abc import ABC
from collections.abc import Callable
from dataclasses import dataclass
from typing import Any, ClassVar, Generic, Literal, TypeVar
from graphrag_common.hasher import hash_data
T = TypeVar("T", covariant=True)
ServiceScope = Literal["singleton", "transient"]
@dataclass
class _ServiceDescriptor(Generic[T]):
"""Descriptor for a service."""
scope: ServiceScope
initializer: Callable[..., T]
class Factory(ABC, Generic[T]):
"""Abstract base class for factories."""
_instance: ClassVar["Factory | None"] = None
def __new__(cls, *args: Any, **kwargs: Any) -> "Factory[T]":
"""Create a new instance of Factory if it does not exist."""
if cls._instance is None:
cls._instance = super().__new__(cls, *args, **kwargs)
return cls._instance
def __init__(self):
if not hasattr(self, "_initialized"):
self._service_initializers: dict[str, _ServiceDescriptor[T]] = {}
self._initialized_services: dict[str, T] = {}
self._initialized = True
def __contains__(self, strategy: str) -> bool:
"""Check if a strategy is registered."""
return strategy in self._service_initializers
def keys(self) -> list[str]:
"""Get a list of registered strategy names."""
return list(self._service_initializers.keys())
def register(
self,
strategy: str,
initializer: Callable[..., T],
scope: ServiceScope = "transient",
) -> None:
"""
Register a new service.
Args
----
strategy: str
The name of the strategy.
initializer: Callable[..., T]
A callable that creates an instance of T.
scope: ServiceScope (default: "transient")
The scope of the service ("singleton" or "transient").
Singleton services are cached based on their init args
so that the same instance is returned for the same init args.
"""
self._service_initializers[strategy] = _ServiceDescriptor(scope, initializer)
def create(self, strategy: str, init_args: dict[str, Any] | None = None) -> T:
"""
Create a service instance based on the strategy.
Args
----
strategy: str
The name of the strategy.
init_args: dict[str, Any] | None
A dictionary of keyword arguments to pass to the service initializer.
Returns
-------
An instance of T.
Raises
------
ValueError: If the strategy is not registered.
"""
if strategy not in self._service_initializers:
msg = f"Strategy '{strategy}' is not registered. Registered strategies are: {', '.join(list(self._service_initializers.keys()))}"
raise ValueError(msg)
# Delete entries with value None
# That way services can have default values
init_args = {k: v for k, v in (init_args or {}).items() if v is not None}
service_descriptor = self._service_initializers[strategy]
if service_descriptor.scope == "singleton":
cache_key = hash_data({
"strategy": strategy,
"init_args": init_args,
})
if cache_key not in self._initialized_services:
self._initialized_services[cache_key] = service_descriptor.initializer(
**init_args
)
return self._initialized_services[cache_key]
return service_descriptor.initializer(**(init_args or {}))
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-common/graphrag_common/factory/factory.py",
"license": "MIT License",
"lines": 88,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-common/graphrag_common/hasher/hasher.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""The GraphRAG hasher module."""
import hashlib
from collections.abc import Callable
from typing import Any
import yaml
Hasher = Callable[[str], str]
"""Type alias for a hasher function (data: str) -> str."""
def sha256_hasher(data: str) -> str:
"""Generate a SHA-256 hash for the input data."""
return hashlib.sha256(data.encode("utf-8")).hexdigest()
def make_yaml_serializable(data: Any) -> Any:
"""Convert data to a YAML-serializable format."""
if isinstance(data, (list, tuple)):
return tuple(make_yaml_serializable(item) for item in data)
if isinstance(data, set):
return tuple(sorted(make_yaml_serializable(item) for item in data))
if isinstance(data, dict):
return tuple(
sorted((key, make_yaml_serializable(value)) for key, value in data.items())
)
return str(data)
def hash_data(data: Any, *, hasher: Hasher | None = None) -> str:
"""Hash the input data dictionary using the specified hasher function.
Args
----
data: dict[str, Any]
The input data to be hashed.
The input data is serialized using yaml
to support complex data structures such as classes and functions.
hasher: Hasher | None (default: sha256_hasher)
The hasher function to use. (data: str) -> str
Returns
-------
str
The resulting hash of the input data.
"""
hasher = hasher or sha256_hasher
try:
return hasher(yaml.dump(data, sort_keys=True))
except TypeError:
return hasher(yaml.dump(make_yaml_serializable(data), sort_keys=True))
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-common/graphrag_common/hasher/hasher.py",
"license": "MIT License",
"lines": 43,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-input/graphrag_input/csv.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""A module containing 'CSVFileReader' model."""
import csv
import io
import logging
import sys
from graphrag_input.structured_file_reader import StructuredFileReader
from graphrag_input.text_document import TextDocument
logger = logging.getLogger(__name__)
try:
csv.field_size_limit(sys.maxsize)
except OverflowError:
csv.field_size_limit(100 * 1024 * 1024)
class CSVFileReader(StructuredFileReader):
"""Reader implementation for csv files."""
def __init__(self, file_pattern: str | None = None, **kwargs):
super().__init__(
file_pattern=file_pattern if file_pattern is not None else ".*\\.csv$",
**kwargs,
)
async def read_file(self, path: str) -> list[TextDocument]:
"""Read a csv file into a list of documents.
Args:
- path - The path to read the file from.
Returns
-------
- output - list with a TextDocument for each row in the file.
"""
file = await self._storage.get(path, encoding=self._encoding)
reader = csv.DictReader(io.StringIO(file))
rows = list(reader)
return await self.process_data_columns(rows, path)
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-input/graphrag_input/csv.py",
"license": "MIT License",
"lines": 33,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-input/graphrag_input/get_property.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Utility for retrieving properties from nested dictionaries."""
from typing import Any
def get_property(data: dict[str, Any], path: str) -> Any:
"""Retrieve a property from a dictionary using dot notation.
Parameters
----------
data : dict[str, Any]
The dictionary to retrieve the property from.
path : str
A dot-separated string representing the path to the property (e.g., "foo.bar.baz").
Returns
-------
Any
The value at the specified path.
Raises
------
KeyError
If the path does not exist in the dictionary.
"""
keys = path.split(".")
current = data
for key in keys:
if not isinstance(current, dict) or key not in current:
msg = f"Property '{path}' not found"
raise KeyError(msg)
current = current[key]
return current
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-input/graphrag_input/get_property.py",
"license": "MIT License",
"lines": 29,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-input/graphrag_input/hashing.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Hashing utilities."""
from collections.abc import Iterable
from hashlib import sha512
from typing import Any
def gen_sha512_hash(item: dict[str, Any], hashcode: Iterable[str]) -> str:
"""Generate a SHA512 hash.
Parameters
----------
item : dict[str, Any]
The dictionary containing values to hash.
hashcode : Iterable[str]
The keys to include in the hash.
Returns
-------
str
The SHA512 hash as a hexadecimal string.
"""
hashed = "".join([str(item[column]) for column in hashcode])
return f"{sha512(hashed.encode('utf-8'), usedforsecurity=False).hexdigest()}"
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-input/graphrag_input/hashing.py",
"license": "MIT License",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-input/graphrag_input/input_config.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Parameterization settings for the default configuration."""
from pydantic import BaseModel, ConfigDict, Field
from graphrag_input.input_type import InputType
class InputConfig(BaseModel):
"""The default configuration section for Input."""
model_config = ConfigDict(extra="allow")
"""Allow extra fields to support custom reader implementations."""
type: str = Field(
description="The input file type to use.",
default=InputType.Text,
)
encoding: str | None = Field(
description="The input file encoding to use.",
default=None,
)
file_pattern: str | None = Field(
description="The input file pattern to use.",
default=None,
)
id_column: str | None = Field(
description="The input ID column to use.",
default=None,
)
title_column: str | None = Field(
description="The input title column to use.",
default=None,
)
text_column: str | None = Field(
description="The input text column to use.",
default=None,
)
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-input/graphrag_input/input_config.py",
"license": "MIT License",
"lines": 33,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-input/graphrag_input/input_reader.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""A module containing 'InputReader' model."""
from __future__ import annotations
import logging
import re
from abc import ABCMeta, abstractmethod
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from collections.abc import AsyncIterator
from graphrag_storage import Storage
from graphrag_input.text_document import TextDocument
logger = logging.getLogger(__name__)
class InputReader(metaclass=ABCMeta):
"""Provide a cache interface for the pipeline."""
def __init__(
self,
storage: Storage,
file_pattern: str,
encoding: str = "utf-8",
**kwargs,
):
self._storage = storage
self._encoding = encoding
self._file_pattern = file_pattern
async def read_files(self) -> list[TextDocument]:
"""Load all files from storage and return them as a single list."""
return [doc async for doc in self]
def __aiter__(self) -> AsyncIterator[TextDocument]:
"""Return the async iterator, enabling `async for doc in reader`."""
return self._iterate_files()
async def _iterate_files(self) -> AsyncIterator[TextDocument]:
"""Async generator that yields documents one at a time as files are loaded."""
files = list(self._storage.find(re.compile(self._file_pattern)))
if len(files) == 0:
msg = f"No {self._file_pattern} matches found in storage"
logger.warning(msg)
return
file_count = len(files)
doc_count = 0
for file in files:
try:
for doc in await self.read_file(file):
doc_count += 1
yield doc
except Exception as e: # noqa: BLE001 (catching Exception is fine here)
logger.warning("Warning! Error loading file %s. Skipping...", file)
logger.warning("Error: %s", e)
logger.info(
"Found %d %s files, loading %d",
file_count,
self._file_pattern,
doc_count,
)
logger.info(
"Total number of unfiltered %s rows: %d",
self._file_pattern,
doc_count,
)
@abstractmethod
async def read_file(self, path: str) -> list[TextDocument]:
"""Read a file into a list of documents.
Args:
- path - The path to read the file from.
Returns
-------
- output - List with an entry for each document in the file.
"""
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-input/graphrag_input/input_reader.py",
"license": "MIT License",
"lines": 68,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-input/graphrag_input/input_reader_factory.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""A module containing 'InputReaderFactory' model."""
import logging
from collections.abc import Callable
from graphrag_common.factory import Factory
from graphrag_common.factory.factory import ServiceScope
from graphrag_storage.storage import Storage
from graphrag_input.input_config import InputConfig
from graphrag_input.input_reader import InputReader
from graphrag_input.input_type import InputType
logger = logging.getLogger(__name__)
class InputReaderFactory(Factory[InputReader]):
"""Factory for creating Input Reader instances."""
input_reader_factory = InputReaderFactory()
def register_input_reader(
input_reader_type: str,
input_reader_initializer: Callable[..., InputReader],
scope: ServiceScope = "transient",
) -> None:
"""Register a custom input reader implementation.
Args
----
- input_reader_type: str
The input reader id to register.
- input_reader_initializer: Callable[..., InputReader]
The input reader initializer to register.
"""
input_reader_factory.register(input_reader_type, input_reader_initializer, scope)
def create_input_reader(config: InputConfig, storage: Storage) -> InputReader:
"""Create an input reader implementation based on the given configuration.
Args
----
- config: InputConfig
The input reader configuration to use.
- storage: Storage | None
The storage implementation to use for reading the files.
Returns
-------
InputReader
The created input reader implementation.
"""
config_model = config.model_dump()
input_strategy = config.type
if input_strategy not in input_reader_factory:
match input_strategy:
case InputType.Csv:
from graphrag_input.csv import CSVFileReader
register_input_reader(InputType.Csv, CSVFileReader)
case InputType.Text:
from graphrag_input.text import TextFileReader
register_input_reader(InputType.Text, TextFileReader)
case InputType.Json:
from graphrag_input.json import JSONFileReader
register_input_reader(InputType.Json, JSONFileReader)
case InputType.JsonLines:
from graphrag_input.jsonl import JSONLinesFileReader
register_input_reader(InputType.JsonLines, JSONLinesFileReader)
case InputType.MarkItDown:
from graphrag_input.markitdown import MarkItDownFileReader
register_input_reader(InputType.MarkItDown, MarkItDownFileReader)
case _:
msg = f"InputConfig.type '{input_strategy}' is not registered in the InputReaderFactory. Registered types: {', '.join(input_reader_factory.keys())}."
raise ValueError(msg)
config_model["storage"] = storage
return input_reader_factory.create(input_strategy, init_args=config_model)
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-input/graphrag_input/input_reader_factory.py",
"license": "MIT License",
"lines": 66,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-input/graphrag_input/input_type.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""A module containing input file type enum."""
from enum import StrEnum
class InputType(StrEnum):
"""The input file type for the pipeline."""
Csv = "csv"
"""The CSV input type."""
Text = "text"
"""The text input type."""
Json = "json"
"""The JSON input type."""
JsonLines = "jsonl"
"""The JSON Lines input type."""
MarkItDown = "markitdown"
"""The MarkItDown input type."""
def __repr__(self):
"""Get a string representation."""
return f'"{self.value}"'
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-input/graphrag_input/input_type.py",
"license": "MIT License",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-input/graphrag_input/json.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""A module containing 'JSONFileReader' model."""
import json
import logging
from graphrag_input.structured_file_reader import StructuredFileReader
from graphrag_input.text_document import TextDocument
logger = logging.getLogger(__name__)
class JSONFileReader(StructuredFileReader):
"""Reader implementation for json files."""
def __init__(self, file_pattern: str | None = None, **kwargs):
super().__init__(
file_pattern=file_pattern if file_pattern is not None else ".*\\.json$",
**kwargs,
)
async def read_file(self, path: str) -> list[TextDocument]:
"""Read a JSON file into a list of documents.
Args:
- path - The path to read the file from.
Returns
-------
- output - list with a TextDocument for each row in the file.
"""
text = await self._storage.get(path, encoding=self._encoding)
as_json = json.loads(text)
# json file could just be a single object, or an array of objects
rows = as_json if isinstance(as_json, list) else [as_json]
return await self.process_data_columns(rows, path)
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-input/graphrag_input/json.py",
"license": "MIT License",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-input/graphrag_input/jsonl.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""A module containing 'JSONLinesFileReader' model."""
import json
import logging
from graphrag_input.structured_file_reader import StructuredFileReader
from graphrag_input.text_document import TextDocument
logger = logging.getLogger(__name__)
class JSONLinesFileReader(StructuredFileReader):
"""Reader implementation for json lines files."""
def __init__(self, file_pattern: str | None = None, **kwargs):
super().__init__(
file_pattern=file_pattern if file_pattern is not None else ".*\\.jsonl$",
**kwargs,
)
async def read_file(self, path: str) -> list[TextDocument]:
"""Read a JSON lines file into a list of documents.
This differs from standard JSON files in that each line is a separate JSON object.
Args:
- path - The path to read the file from.
Returns
-------
- output - list with a TextDocument for each row in the file.
"""
text = await self._storage.get(path, encoding=self._encoding)
rows = [json.loads(line) for line in text.splitlines()]
return await self.process_data_columns(rows, path)
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-input/graphrag_input/jsonl.py",
"license": "MIT License",
"lines": 27,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-input/graphrag_input/markitdown.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""A module containing 'TextFileReader' model."""
import logging
from io import BytesIO
from pathlib import Path
from markitdown import MarkItDown, StreamInfo
from graphrag_input.hashing import gen_sha512_hash
from graphrag_input.input_reader import InputReader
from graphrag_input.text_document import TextDocument
logger = logging.getLogger(__name__)
class MarkItDownFileReader(InputReader):
"""Reader implementation for any file type supported by markitdown.
https://github.com/microsoft/markitdown
"""
async def read_file(self, path: str) -> list[TextDocument]:
"""Read a text file into a DataFrame of documents.
Args:
- path - The path to read the file from.
Returns
-------
- output - list with a TextDocument for each row in the file.
"""
bytes = await self._storage.get(path, encoding=self._encoding, as_bytes=True)
md = MarkItDown()
result = md.convert_stream(
BytesIO(bytes), stream_info=StreamInfo(extension=Path(path).suffix)
)
text = result.markdown
document = TextDocument(
id=gen_sha512_hash({"text": text}, ["text"]),
title=result.title if result.title else str(Path(path).name),
text=text,
creation_date=await self._storage.get_creation_date(path),
raw_data=None,
)
return [document]
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-input/graphrag_input/markitdown.py",
"license": "MIT License",
"lines": 37,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-input/graphrag_input/structured_file_reader.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""A module containing 'StructuredFileReader' model."""
import logging
from typing import Any
from graphrag_input.get_property import get_property
from graphrag_input.hashing import gen_sha512_hash
from graphrag_input.input_reader import InputReader
from graphrag_input.text_document import TextDocument
logger = logging.getLogger(__name__)
class StructuredFileReader(InputReader):
"""Base reader implementation for structured files such as csv and json."""
def __init__(
self,
id_column: str | None = None,
title_column: str | None = None,
text_column: str = "text",
**kwargs,
):
super().__init__(**kwargs)
self._id_column = id_column
self._title_column = title_column
self._text_column = text_column
async def process_data_columns(
self,
rows: list[dict[str, Any]],
path: str,
) -> list[TextDocument]:
"""Process configured data columns from a list of loaded dicts."""
documents = []
for index, row in enumerate(rows):
# text column is required - harvest from dict
text = get_property(row, self._text_column)
# id is optional - harvest from dict or hash from text
id = (
get_property(row, self._id_column)
if self._id_column
else gen_sha512_hash({"text": text}, ["text"])
)
# title is optional - harvest from dict or use filename
num = f" ({index})" if len(rows) > 1 else ""
title = (
get_property(row, self._title_column)
if self._title_column
else f"{path}{num}"
)
creation_date = await self._storage.get_creation_date(path)
documents.append(
TextDocument(
id=id,
title=title,
text=text,
creation_date=creation_date,
raw_data=row,
)
)
return documents
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-input/graphrag_input/structured_file_reader.py",
"license": "MIT License",
"lines": 57,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-input/graphrag_input/text.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""A module containing 'TextFileReader' model."""
import logging
from pathlib import Path
from graphrag_input.hashing import gen_sha512_hash
from graphrag_input.input_reader import InputReader
from graphrag_input.text_document import TextDocument
logger = logging.getLogger(__name__)
class TextFileReader(InputReader):
"""Reader implementation for text files."""
def __init__(self, file_pattern: str | None = None, **kwargs):
super().__init__(
file_pattern=file_pattern if file_pattern is not None else ".*\\.txt$",
**kwargs,
)
async def read_file(self, path: str) -> list[TextDocument]:
"""Read a text file into a list of documents.
Args:
- path - The path to read the file from.
Returns
-------
- output - list with a TextDocument for each row in the file.
"""
text = await self._storage.get(path, encoding=self._encoding)
document = TextDocument(
id=gen_sha512_hash({"text": text}, ["text"]),
title=str(Path(path).name),
text=text,
creation_date=await self._storage.get_creation_date(path),
raw_data=None,
)
return [document]
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-input/graphrag_input/text.py",
"license": "MIT License",
"lines": 33,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-input/graphrag_input/text_document.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""TextDocument dataclass."""
import logging
from dataclasses import dataclass
from typing import Any
from graphrag_input.get_property import get_property
logger = logging.getLogger(__name__)
@dataclass
class TextDocument:
"""The TextDocument holds relevant content for GraphRAG indexing."""
id: str
"""Unique identifier for the document."""
text: str
"""The main text content of the document."""
title: str
"""The title of the document."""
creation_date: str
"""The creation date of the document, ISO-8601 format."""
raw_data: dict[str, Any] | None = None
"""Raw data from source document."""
def get(self, field: str, default_value: Any = None) -> Any:
"""
Get a single field from the TextDocument.
Functions like the get method on a dictionary, returning default_value if the field is not found.
Supports nested fields using dot notation.
This takes a two step approach for flexibility:
1. If the field is one of the standard text document fields (id, title, text, creation_date), just grab it directly. This accommodates unstructured text for example, which just has the standard fields.
2. Otherwise. try to extract it from the raw_data dict. This allows users to specify any column from the original input file.
"""
if field in ["id", "title", "text", "creation_date"]:
return getattr(self, field)
raw = self.raw_data or {}
try:
return get_property(raw, field)
except KeyError:
return default_value
def collect(self, fields: list[str]) -> dict[str, Any]:
"""Extract data fields from a TextDocument into a dict."""
data = {}
for field in fields:
value = self.get(field)
if value is not None:
data[field] = value
return data
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-input/graphrag_input/text_document.py",
"license": "MIT License",
"lines": 45,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-llm/graphrag_llm/cache/create_cache_key.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Create cache key."""
from typing import Any
from graphrag_cache import create_cache_key as default_create_cache_key
_CACHE_VERSION = 4
"""
If there's a breaking change in what we cache, we should increment this version number to invalidate existing caches.
fnllm was on cache version 2 and though we generate
similar cache keys, the objects stored in cache by fnllm and litellm are different.
Using litellm model providers will not be able to reuse caches generated by fnllm
thus we start with version 3 for litellm.
graphrag-llm package is now on version 4.
This is to account for changes to the ModelConfig that affect the cache key and
occurred when pulling this package out of graphrag.
graphrag-llm, now that is supports metrics, also caches metrics which were not cached before.
"""
def create_cache_key(
input_args: dict[str, Any],
) -> str:
"""Generate a cache key based on the model configuration and input arguments.
Args
____
input_args: dict[str, Any]
The input arguments for the model call.
Returns
-------
str
The generated cache key in the format
`{prefix}_{data_hash}_v{version}` if prefix is provided.
"""
cache_key_parameters = _get_parameters(
input_args=input_args,
)
return default_create_cache_key(cache_key_parameters)
def _get_parameters(
# model_config: "ModelConfig",
input_args: dict[str, Any],
) -> dict[str, Any]:
"""Pluck out the parameters that define a cache key."""
excluded_keys = [
"metrics",
"stream",
"stream_options",
"mock_response",
"timeout",
"base_url",
"api_base",
"api_version",
"api_key",
"azure_ad_token_provider",
"drop_params",
]
parameters: dict[str, Any] = {
k: v for k, v in input_args.items() if k not in excluded_keys
}
return parameters
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-llm/graphrag_llm/cache/create_cache_key.py",
"license": "MIT License",
"lines": 57,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-llm/graphrag_llm/completion/completion.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Completion Abstract Base Class."""
from abc import ABC, abstractmethod
from contextlib import contextmanager
from typing import TYPE_CHECKING, Any, Unpack
from graphrag_llm.threading.completion_thread_runner import completion_thread_runner
if TYPE_CHECKING:
from collections.abc import AsyncIterator, Iterator
from graphrag_cache import Cache, CacheKeyCreator
from graphrag_llm.config import ModelConfig
from graphrag_llm.metrics import MetricsProcessor, MetricsStore
from graphrag_llm.rate_limit import RateLimiter
from graphrag_llm.retry import Retry
from graphrag_llm.threading.completion_thread_runner import (
ThreadedLLMCompletionFunction,
ThreadedLLMCompletionResponseHandler,
)
from graphrag_llm.tokenizer import Tokenizer
from graphrag_llm.types import (
LLMCompletionArgs,
LLMCompletionChunk,
LLMCompletionResponse,
ResponseFormat,
)
class LLMCompletion(ABC):
"""Abstract base class for language model completions."""
@abstractmethod
def __init__(
self,
*,
model_id: str,
model_config: "ModelConfig",
tokenizer: "Tokenizer",
metrics_store: "MetricsStore",
metrics_processor: "MetricsProcessor | None" = None,
rate_limiter: "RateLimiter | None" = None,
retrier: "Retry | None" = None,
cache: "Cache | None" = None,
cache_key_creator: "CacheKeyCreator",
**kwargs: Any,
):
"""Initialize the LLMCompletion.
Args
----
model_id: str
The model ID, e.g., "openai/gpt-4o".
model_config: ModelConfig
The configuration for the language model.
tokenizer: Tokenizer
The tokenizer to use.
metrics_store: MetricsStore | None (default=None)
The metrics store to use.
metrics_processor: MetricsProcessor | None (default: None)
The metrics processor to use.
rate_limiter: RateLimiter | None (default=None)
The rate limiter to use.
retrier: Retry | None (default=None)
The retry strategy to use.
cache: Cache | None (default=None)
Optional cache for embeddings.
cache_key_creator: CacheKeyCreator | None (default=None)
Optional cache key creator function.
(dict[str, Any]) -> str
**kwargs: Any
Additional keyword arguments.
"""
raise NotImplementedError
@abstractmethod
def completion(
self,
/,
**kwargs: Unpack["LLMCompletionArgs[ResponseFormat]"],
) -> "LLMCompletionResponse[ResponseFormat] | Iterator[LLMCompletionChunk]":
"""Sync completion method.
Args
----
messages: LLMCompletionMessagesParam
The messages to send to the LLM.
Can be str | list[dict[str, str]] | list[ChatCompletionMessageParam].
response_format: BaseModel | None (default=None)
The structured response format.
Must extend pydantic BaseModel.
stream: bool (default=False)
Whether to stream the response.
streaming is not supported when using response_format.
max_completion_tokens: int | None (default=None)
The maximum number of tokens to generate in the completion.
temperature: float | None (default=None)
The temperature to control how deterministic vs. creative the responses are.
top_p: float | None (default=None)
top_p for nucleus sampling, where the model considers tokens with
cumulative probabilities up to top_p. Values range from 0 to 1.
n: int | None (default=None)
The number of completions to generate for each prompt.
tools: list[Tool] | None (default=None)
Optional tools to use during completion.
https://docs.litellm.ai/docs/completion/function_call
**kwargs: Any
Additional keyword arguments.
Returns
-------
LLMCompletionResponse[ResponseFormat] | Iterator[LLMCompletionChunk]:
The completion response or an iterator of completion chunks if streaming.
"""
raise NotImplementedError
@abstractmethod
async def completion_async(
self,
/,
**kwargs: Unpack["LLMCompletionArgs[ResponseFormat]"],
) -> "LLMCompletionResponse[ResponseFormat] | AsyncIterator[LLMCompletionChunk]":
"""Async completion method.
Args
----
messages: LLMCompletionMessagesParam
The messages to send to the LLM.
Can be str | list[dict[str, str]] | list[ChatCompletionMessageParam].
response_format: BaseModel | None (default=None)
The structured response format.
Must extend pydantic BaseModel.
stream: bool (default=False)
Whether to stream the response.
streaming is not supported when using response_format.
max_completion_tokens: int | None (default=None)
The maximum number of tokens to generate in the completion.
temperature: float | None (default=None)
The temperature to control how deterministic vs. creative the responses are.
top_p: float | None (default=None)
top_p for nucleus sampling, where the model considers tokens with
cumulative probabilities up to top_p. Values range from 0 to 1.
n: int | None (default=None)
The number of completions to generate for each prompt.
tools: list[Tool] | None (default=None)
Optional tools to use during completion.
https://docs.litellm.ai/docs/completion/function_call
**kwargs: Any
Additional keyword arguments.
Returns
-------
LLMCompletionResponse[ResponseFormat] | Iterator[LLMCompletionChunk]:
The completion response or an iterator of completion chunks if streaming.
"""
raise NotImplementedError
@contextmanager
def completion_thread_pool(
self,
*,
response_handler: "ThreadedLLMCompletionResponseHandler",
concurrency: int,
queue_limit: int = 0,
) -> "Iterator[ThreadedLLMCompletionFunction]":
"""Run a completion thread pool.
Args
----
response_handler: ThreadedLLMCompletionResponseHandler
The callback function to handle completion responses.
(request_id, response|exception) -> Awaitable[None] | None
concurrency: int
The number of threads to spin up in a thread pool.
queue_limit: int (default=0)
The maximum number of items allowed in the input queue.
0 means unlimited.
Set this to a value to create backpressure on the caller.
Yields
------
ThreadedLLMCompletionFunction:
A function that can be used to submit completion requests to the thread pool.
(messages, request_id, **kwargs) -> None
The thread pool will process the requests and invoke the provided callback
with the responses.
same signature as LLMCompletionFunction but requires a `request_id` parameter
to identify the request and does not return anything.
"""
with completion_thread_runner(
completion=self.completion,
response_handler=response_handler,
concurrency=concurrency,
queue_limit=queue_limit,
metrics_store=self.metrics_store,
) as completion:
yield completion
def completion_batch(
self,
completion_requests: list["LLMCompletionArgs[ResponseFormat]"],
*,
concurrency: int,
queue_limit: int = 0,
) -> list[
"LLMCompletionResponse[ResponseFormat] | Iterator[LLMCompletionChunk] | Exception"
]:
"""Process a batch of completion requests using a thread pool.
Args
----
completion_requests: list[LLMCompletionArgs]
A list of completion request arguments to process in parallel.
concurrency: int
The number of threads to spin up in a thread pool.
queue_limit: int (default=0)
The maximum number of items allowed in the input queue.
0 means unlimited.
Set this to a value to create backpressure on the caller.
Returns
-------
list[LLMCompletionResponse[ResponseFormat] | Iterator[LLMCompletionChunk] | Exception]:
A list of completion responses or exceptions corresponding to all the requests.
"""
responses: list[
LLMCompletionResponse[ResponseFormat]
| Iterator[LLMCompletionChunk]
| Exception
] = [None] * len(completion_requests) # type: ignore
def handle_response(
request_id: str,
resp: "LLMCompletionResponse[ResponseFormat] | Iterator[LLMCompletionChunk] | Exception",
):
responses[int(request_id)] = resp
with self.completion_thread_pool(
response_handler=handle_response,
concurrency=concurrency,
queue_limit=queue_limit,
) as threaded_completion:
for idx, request in enumerate(completion_requests):
threaded_completion(request_id=str(idx), **request)
return responses
@property
@abstractmethod
def metrics_store(self) -> "MetricsStore":
"""Metrics store."""
raise NotImplementedError
@property
@abstractmethod
def tokenizer(self) -> "Tokenizer":
"""Tokenizer."""
raise NotImplementedError
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-llm/graphrag_llm/completion/completion.py",
"license": "MIT License",
"lines": 235,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-llm/graphrag_llm/completion/completion_factory.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Completion factory."""
from collections.abc import Callable
from typing import TYPE_CHECKING, Any
from graphrag_common.factory import Factory
from graphrag_llm.cache import create_cache_key
from graphrag_llm.config.tokenizer_config import TokenizerConfig
from graphrag_llm.config.types import LLMProviderType
from graphrag_llm.metrics.noop_metrics_store import NoopMetricsStore
from graphrag_llm.tokenizer.tokenizer_factory import create_tokenizer
if TYPE_CHECKING:
from graphrag_cache import Cache, CacheKeyCreator
from graphrag_common.factory import ServiceScope
from graphrag_llm.completion.completion import LLMCompletion
from graphrag_llm.config import ModelConfig
from graphrag_llm.metrics import MetricsProcessor, MetricsStore
from graphrag_llm.rate_limit import RateLimiter
from graphrag_llm.retry import Retry
from graphrag_llm.tokenizer import Tokenizer
class CompletionFactory(Factory["LLMCompletion"]):
"""Factory for creating Completion instances."""
completion_factory = CompletionFactory()
def register_completion(
completion_type: str,
completion_initializer: Callable[..., "LLMCompletion"],
scope: "ServiceScope" = "transient",
) -> None:
"""Register a custom completion implementation.
Args
----
completion_type: str
The completion id to register.
completion_initializer: Callable[..., LLMCompletion]
The completion initializer to register.
scope: ServiceScope (default: "transient")
The service scope for the completion.
"""
completion_factory.register(completion_type, completion_initializer, scope)
def create_completion(
model_config: "ModelConfig",
*,
cache: "Cache | None" = None,
cache_key_creator: "CacheKeyCreator | None" = None,
tokenizer: "Tokenizer | None" = None,
) -> "LLMCompletion":
"""Create a Completion instance based on the model configuration.
Args
----
model_config: ModelConfig
The configuration for the model.
cache: Cache | None (default: None)
An optional cache instance.
cache_key_creator: CacheKeyCreator | None (default: create_cache_key)
An optional cache key creator function.
(dict[str, Any]) -> str
tokenizer: Tokenizer | None (default: litellm)
An optional tokenizer instance.
Returns
-------
LLMCompletion:
An instance of a LLMCompletion subclass.
"""
cache_key_creator = cache_key_creator or create_cache_key
model_id = f"{model_config.model_provider}/{model_config.model}"
strategy = model_config.type
extra: dict[str, Any] = model_config.model_extra or {}
if strategy not in completion_factory:
match strategy:
case LLMProviderType.LiteLLM:
from graphrag_llm.completion.lite_llm_completion import (
LiteLLMCompletion,
)
register_completion(
completion_type=LLMProviderType.LiteLLM,
completion_initializer=LiteLLMCompletion,
scope="singleton",
)
case LLMProviderType.MockLLM:
from graphrag_llm.completion.mock_llm_completion import (
MockLLMCompletion,
)
register_completion(
completion_type=LLMProviderType.MockLLM,
completion_initializer=MockLLMCompletion,
)
case _:
msg = f"ModelConfig.type '{strategy}' is not registered in the CompletionFactory. Registered strategies: {', '.join(completion_factory.keys())}"
raise ValueError(msg)
tokenizer = tokenizer or create_tokenizer(TokenizerConfig(model_id=model_id))
rate_limiter: RateLimiter | None = None
if model_config.rate_limit:
from graphrag_llm.rate_limit.rate_limit_factory import create_rate_limiter
rate_limiter = create_rate_limiter(rate_limit_config=model_config.rate_limit)
retrier: Retry | None = None
if model_config.retry:
from graphrag_llm.retry.retry_factory import create_retry
retrier = create_retry(retry_config=model_config.retry)
metrics_store: MetricsStore = NoopMetricsStore()
metrics_processor: MetricsProcessor | None = None
if model_config.metrics:
from graphrag_llm.metrics import create_metrics_processor, create_metrics_store
metrics_store = create_metrics_store(
config=model_config.metrics,
id=model_id,
)
metrics_processor = create_metrics_processor(model_config.metrics)
return completion_factory.create(
strategy=strategy,
init_args={
**extra,
"model_id": model_id,
"model_config": model_config,
"tokenizer": tokenizer,
"metrics_store": metrics_store,
"metrics_processor": metrics_processor,
"rate_limiter": rate_limiter,
"retrier": retrier,
"cache": cache,
"cache_key_creator": cache_key_creator,
},
)
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-llm/graphrag_llm/completion/completion_factory.py",
"license": "MIT License",
"lines": 122,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-llm/graphrag_llm/completion/lite_llm_completion.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""LLMCompletion based on litellm."""
from collections.abc import AsyncIterator, Iterator
from typing import TYPE_CHECKING, Any, Unpack
import litellm
from azure.identity import DefaultAzureCredential, get_bearer_token_provider
from litellm import ModelResponse # type: ignore
from graphrag_llm.completion.completion import LLMCompletion
from graphrag_llm.config.types import AuthMethod
from graphrag_llm.middleware import (
with_middleware_pipeline,
)
from graphrag_llm.types import LLMCompletionChunk, LLMCompletionResponse
from graphrag_llm.utils import (
structure_completion_response,
)
if TYPE_CHECKING:
from graphrag_cache import Cache, CacheKeyCreator
from graphrag_llm.config import ModelConfig
from graphrag_llm.metrics import MetricsProcessor, MetricsStore
from graphrag_llm.rate_limit import RateLimiter
from graphrag_llm.retry import Retry
from graphrag_llm.tokenizer import Tokenizer
from graphrag_llm.types import (
AsyncLLMCompletionFunction,
LLMCompletionArgs,
LLMCompletionFunction,
LLMCompletionMessagesParam,
Metrics,
ResponseFormat,
)
litellm.suppress_debug_info = True
class LiteLLMCompletion(LLMCompletion):
"""LLMCompletion based on litellm."""
_model_config: "ModelConfig"
_model_id: str
_track_metrics: bool = False
_metrics_store: "MetricsStore"
_metrics_processor: "MetricsProcessor | None"
_cache: "Cache | None"
_cache_key_creator: "CacheKeyCreator"
_tokenizer: "Tokenizer"
_rate_limiter: "RateLimiter | None"
_retrier: "Retry | None"
def __init__(
self,
*,
model_id: str,
model_config: "ModelConfig",
tokenizer: "Tokenizer",
metrics_store: "MetricsStore",
metrics_processor: "MetricsProcessor | None" = None,
rate_limiter: "RateLimiter | None" = None,
retrier: "Retry | None" = None,
cache: "Cache | None" = None,
cache_key_creator: "CacheKeyCreator",
azure_cognitive_services_audience: str = "https://cognitiveservices.azure.com/.default",
drop_unsupported_params: bool = True,
**kwargs: Any,
) -> None:
"""Initialize LiteLLMCompletion.
Args
----
model_id: str
The LiteLLM model ID, e.g., "openai/gpt-4o"
model_config: ModelConfig
The configuration for the model.
tokenizer: Tokenizer
The tokenizer to use.
metrics_store: MetricsStore | None (default: None)
The metrics store to use.
metrics_processor: MetricsProcessor | None (default: None)
The metrics processor to use.
cache: Cache | None (default: None)
An optional cache instance.
cache_key_prefix: str | None (default: "chat")
The cache key prefix. Required if cache is provided.
rate_limiter: RateLimiter | None (default: None)
The rate limiter to use.
retrier: Retry | None (default: None)
The retry strategy to use.
azure_cognitive_services_audience: str (default: "https://cognitiveservices.azure.com/.default")
The audience for Azure Cognitive Services when using Managed Identity.
drop_unsupported_params: bool (default: True)
Whether to drop unsupported parameters for the model provider.
"""
self._model_id = model_id
self._model_config = model_config
self._tokenizer = tokenizer
self._metrics_store = metrics_store
self._metrics_processor = metrics_processor
self._cache = cache
self._track_metrics = metrics_processor is not None
self._cache_key_creator = cache_key_creator
self._rate_limiter = rate_limiter
self._retrier = retrier
self._completion, self._completion_async = _create_base_completions(
model_config=model_config,
drop_unsupported_params=drop_unsupported_params,
azure_cognitive_services_audience=azure_cognitive_services_audience,
)
self._completion, self._completion_async = with_middleware_pipeline(
model_config=self._model_config,
model_fn=self._completion,
async_model_fn=self._completion_async,
request_type="chat",
cache=self._cache,
cache_key_creator=self._cache_key_creator,
tokenizer=self._tokenizer,
metrics_processor=self._metrics_processor,
rate_limiter=self._rate_limiter,
retrier=self._retrier,
)
def completion(
self,
/,
**kwargs: Unpack["LLMCompletionArgs[ResponseFormat]"],
) -> "LLMCompletionResponse[ResponseFormat] | Iterator[LLMCompletionChunk]":
"""Sync completion method."""
messages: LLMCompletionMessagesParam = kwargs.pop("messages")
response_format = kwargs.pop("response_format", None)
is_streaming = kwargs.get("stream") or False
if response_format is not None and is_streaming:
msg = "response_format is not supported for streaming completions."
raise ValueError(msg)
request_metrics: Metrics | None = kwargs.pop("metrics", None) or {}
if not self._track_metrics:
request_metrics = None
if isinstance(messages, str):
messages = [{"role": "user", "content": messages}]
try:
response = self._completion(
messages=messages,
metrics=request_metrics,
response_format=response_format,
**kwargs, # type: ignore
)
if response_format is not None:
structured_response = structure_completion_response(
response.content, response_format
)
response.formatted_response = structured_response
return response
finally:
if request_metrics is not None:
self._metrics_store.update_metrics(metrics=request_metrics)
async def completion_async(
self,
/,
**kwargs: Unpack["LLMCompletionArgs[ResponseFormat]"],
) -> "LLMCompletionResponse[ResponseFormat] | AsyncIterator[LLMCompletionChunk]":
"""Async completion method."""
messages: LLMCompletionMessagesParam = kwargs.pop("messages")
response_format = kwargs.pop("response_format", None)
is_streaming = kwargs.get("stream") or False
if response_format is not None and is_streaming:
msg = "response_format is not supported for streaming completions."
raise ValueError(msg)
request_metrics: Metrics | None = kwargs.pop("metrics", None) or {}
if not self._track_metrics:
request_metrics = None
if isinstance(messages, str):
messages = [{"role": "user", "content": messages}]
try:
response = await self._completion_async(
messages=messages,
metrics=request_metrics,
response_format=response_format,
**kwargs, # type: ignore
)
if response_format is not None:
structured_response = structure_completion_response(
response.content, response_format
)
response.formatted_response = structured_response
return response
finally:
if request_metrics is not None:
self._metrics_store.update_metrics(metrics=request_metrics)
@property
def metrics_store(self) -> "MetricsStore":
"""Get metrics store."""
return self._metrics_store
@property
def tokenizer(self) -> "Tokenizer":
"""Get tokenizer."""
return self._tokenizer
def _create_base_completions(
*,
model_config: "ModelConfig",
drop_unsupported_params: bool,
azure_cognitive_services_audience: str,
) -> tuple["LLMCompletionFunction", "AsyncLLMCompletionFunction"]:
"""Create base completions for LiteLLM.
Convert litellm completion functions to graphrag_llm LLMCompletionFunction.
LLMCompletionFunction is close to the litellm completion function signature,
but uses a few extra params such as metrics. Remove graphrag_llm LLMCompletionFunction
specific params before calling litellm completion functions.
"""
model_provider = model_config.model_provider
model = model_config.azure_deployment_name or model_config.model
base_args: dict[str, Any] = {
"drop_params": drop_unsupported_params,
"model": f"{model_provider}/{model}",
"api_key": model_config.api_key,
"api_base": model_config.api_base,
"api_version": model_config.api_version,
**model_config.call_args,
}
if model_config.auth_method == AuthMethod.AzureManagedIdentity:
base_args["azure_ad_token_provider"] = get_bearer_token_provider(
DefaultAzureCredential(), azure_cognitive_services_audience
)
def _base_completion(
**kwargs: Any,
) -> LLMCompletionResponse | Iterator[LLMCompletionChunk]:
kwargs.pop("metrics", None)
mock_response: str | None = kwargs.pop("mock_response", None)
json_object: bool | None = kwargs.pop("response_format_json_object", None)
new_args: dict[str, Any] = {**base_args, **kwargs}
if model_config.mock_responses and mock_response is not None:
new_args["mock_response"] = mock_response
if json_object and "response_format" not in new_args:
new_args["response_format"] = {"type": "json_object"}
response = litellm.completion(
**new_args,
)
if isinstance(response, ModelResponse):
return LLMCompletionResponse(**response.model_dump())
def _run_iterator() -> Iterator[LLMCompletionChunk]:
for chunk in response:
yield LLMCompletionChunk(**chunk.model_dump())
return _run_iterator()
async def _base_completion_async(
**kwargs: Any,
) -> LLMCompletionResponse | AsyncIterator[LLMCompletionChunk]:
kwargs.pop("metrics", None)
mock_response: str | None = kwargs.pop("mock_response", None)
json_object: bool | None = kwargs.pop("response_format_json_object", None)
new_args: dict[str, Any] = {**base_args, **kwargs}
if model_config.mock_responses and mock_response is not None:
new_args["mock_response"] = mock_response
if json_object and "response_format" not in new_args:
new_args["response_format"] = {"type": "json_object"}
response = await litellm.acompletion(
**new_args,
)
if isinstance(response, ModelResponse):
return LLMCompletionResponse(**response.model_dump())
async def _run_iterator() -> AsyncIterator[LLMCompletionChunk]:
async for chunk in response:
yield LLMCompletionChunk(**chunk.model_dump()) # type: ignore
return _run_iterator()
return (_base_completion, _base_completion_async)
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-llm/graphrag_llm/completion/lite_llm_completion.py",
"license": "MIT License",
"lines": 255,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-llm/graphrag_llm/completion/mock_llm_completion.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Mock LLMCompletion."""
from typing import TYPE_CHECKING, Any, Unpack
import litellm
from graphrag_llm.completion.completion import LLMCompletion
from graphrag_llm.utils import (
create_completion_response,
structure_completion_response,
)
if TYPE_CHECKING:
from collections.abc import AsyncIterator, Iterator
from graphrag_llm.config import ModelConfig
from graphrag_llm.metrics import MetricsStore
from graphrag_llm.tokenizer import Tokenizer
from graphrag_llm.types import (
LLMCompletionArgs,
LLMCompletionChunk,
LLMCompletionResponse,
ResponseFormat,
)
litellm.suppress_debug_info = True
class MockLLMCompletion(LLMCompletion):
"""LLMCompletion based on litellm."""
_metrics_store: "MetricsStore"
_tokenizer: "Tokenizer"
_mock_responses: list[str]
_mock_index: int = 0
def __init__(
self,
*,
model_config: "ModelConfig",
tokenizer: "Tokenizer",
metrics_store: "MetricsStore",
**kwargs: Any,
) -> None:
"""Initialize LiteLLMCompletion.
Args
----
model_id: str
The LiteLLM model ID, e.g., "openai/gpt-4o"
model_config: ModelConfig
The configuration for the model.
tokenizer: Tokenizer
The tokenizer to use.
metrics_store: MetricsStore | None (default: None)
The metrics store to use.
metrics_processor: MetricsProcessor | None (default: None)
The metrics processor to use.
cache: Cache | None (default: None)
An optional cache instance.
cache_key_prefix: str | None (default: "chat")
The cache key prefix. Required if cache is provided.
rate_limiter: RateLimiter | None (default: None)
The rate limiter to use.
retrier: Retry | None (default: None)
The retry strategy to use.
azure_cognitive_services_audience: str (default: "https://cognitiveservices.azure.com/.default")
The audience for Azure Cognitive Services when using Managed Identity.
drop_unsupported_params: bool (default: True)
Whether to drop unsupported parameters for the model provider.
"""
self._tokenizer = tokenizer
self._metrics_store = metrics_store
mock_responses = model_config.mock_responses
if not isinstance(mock_responses, list) or len(mock_responses) == 0:
msg = "ModelConfig.mock_responses must be a non-empty list."
raise ValueError(msg)
if not all(isinstance(resp, str) for resp in mock_responses):
msg = "Each item in ModelConfig.mock_responses must be a string."
raise ValueError(msg)
self._mock_responses = mock_responses # type: ignore
def completion(
self,
/,
**kwargs: Unpack["LLMCompletionArgs[ResponseFormat]"],
) -> "LLMCompletionResponse[ResponseFormat] | Iterator[LLMCompletionChunk]":
"""Sync completion method."""
response_format = kwargs.pop("response_format", None)
is_streaming = kwargs.get("stream", False)
if is_streaming:
msg = "MockLLMCompletion does not support streaming completions."
raise ValueError(msg)
response = create_completion_response(
self._mock_responses[self._mock_index % len(self._mock_responses)]
)
self._mock_index += 1
if response_format is not None:
structured_response = structure_completion_response(
response.content, response_format
)
response.formatted_response = structured_response
return response
async def completion_async(
self,
/,
**kwargs: Unpack["LLMCompletionArgs[ResponseFormat]"],
) -> "LLMCompletionResponse[ResponseFormat] | AsyncIterator[LLMCompletionChunk]":
"""Async completion method."""
return self.completion(**kwargs) # type: ignore
@property
def metrics_store(self) -> "MetricsStore":
"""Get metrics store."""
return self._metrics_store
@property
def tokenizer(self) -> "Tokenizer":
"""Get tokenizer."""
return self._tokenizer
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-llm/graphrag_llm/completion/mock_llm_completion.py",
"license": "MIT License",
"lines": 108,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-llm/graphrag_llm/config/metrics_config.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Metrics configuration."""
from pydantic import BaseModel, ConfigDict, Field, model_validator
from graphrag_llm.config.types import (
MetricsProcessorType,
MetricsStoreType,
MetricsWriterType,
)
class MetricsConfig(BaseModel):
"""Configuration for metrics."""
model_config = ConfigDict(extra="allow")
"""Allow extra fields to support custom metrics implementations."""
type: str = Field(
default=MetricsProcessorType.Default,
description="MetricsProcessor implementation to use.",
)
store: str = Field(
default=MetricsStoreType.Memory,
description="MetricsStore implementation to use. [memory] (default: memory).",
)
writer: str | None = Field(
default=MetricsWriterType.Log,
description="MetricsWriter implementation to use. [log, file] (default: log).",
)
log_level: int | None = Field(
default=None,
description="Log level to use when using the 'Log' metrics writer. (default: INFO)",
)
base_dir: str | None = Field(
default=None,
description="Base directory for file-based metrics writer. (default: ./metrics)",
)
def _validate_file_metrics_writer_config(self) -> None:
"""Validate parameters for file-based metrics writer."""
if self.base_dir is not None and self.base_dir.strip() == "":
msg = "base_dir must be specified for file-based metrics writer."
raise ValueError(msg)
@model_validator(mode="after")
def _validate_model(self):
"""Validate the metrics configuration based on its writer type."""
if self.writer == MetricsWriterType.File:
self._validate_file_metrics_writer_config()
return self
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-llm/graphrag_llm/config/metrics_config.py",
"license": "MIT License",
"lines": 44,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-llm/graphrag_llm/config/model_config.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Language model configuration."""
import logging
from typing import Any
from pydantic import BaseModel, ConfigDict, Field, model_validator
from graphrag_llm.config.metrics_config import MetricsConfig
from graphrag_llm.config.rate_limit_config import RateLimitConfig
from graphrag_llm.config.retry_config import RetryConfig
from graphrag_llm.config.types import AuthMethod, LLMProviderType
logger = logging.getLogger(__name__)
class ModelConfig(BaseModel):
"""Configuration for a language model."""
model_config = ConfigDict(extra="allow")
"""Allow extra fields to support custom LLM provider implementations."""
type: str = Field(
default=LLMProviderType.LiteLLM,
description="The type of LLM provider to use. (default: litellm)",
)
model_provider: str = Field(
description="The provider of the model, e.g., 'openai', 'azure', etc.",
)
model: str = Field(
description="The specific model to use, e.g., 'gpt-4o', 'gpt-3.5-turbo', etc.",
)
call_args: dict[str, Any] = Field(
default_factory=dict,
description="Base keyword arguments to pass to the model provider's API.",
)
api_base: str | None = Field(
default=None,
description="The base URL for the API, required for some providers like Azure.",
)
api_version: str | None = Field(
default=None,
description="The version of the API to use.",
)
api_key: str | None = Field(
default=None,
description="API key for authentication with the model provider.",
)
auth_method: AuthMethod = Field(
default=AuthMethod.ApiKey,
description="The authentication method to use. (default: api_key)",
)
azure_deployment_name: str | None = Field(
default=None,
description="The deployment name for Azure models.",
)
retry: RetryConfig | None = Field(
default=None,
description="Configuration for the retry strategy.",
)
rate_limit: RateLimitConfig | None = Field(
default=None,
description="Configuration for the rate limit behavior.",
)
metrics: MetricsConfig | None = Field(
default_factory=MetricsConfig,
description="Specify and configure the metric services.",
)
mock_responses: list[str] | list[float] = Field(
default_factory=list,
description="List of mock responses for testing.",
)
def _validate_lite_llm_config(self) -> None:
"""Validate LiteLLM specific configuration."""
if self.model_provider == "azure" and not self.api_base:
msg = "api_base must be specified with the 'azure' model provider."
raise ValueError(msg)
if self.model_provider != "azure" and self.azure_deployment_name is not None:
msg = "azure_deployment_name should not be specified for non-Azure model providers."
raise ValueError(msg)
if self.auth_method == AuthMethod.AzureManagedIdentity:
if self.api_key is not None:
msg = "api_key should not be set when using Azure Managed Identity."
raise ValueError(msg)
elif not self.api_key:
msg = "api_key must be set when auth_method=api_key."
raise ValueError(msg)
@model_validator(mode="after")
def _validate_model(self):
"""Validate model configuration after initialization."""
if self.type == LLMProviderType.LiteLLM:
self._validate_lite_llm_config()
return self
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-llm/graphrag_llm/config/model_config.py",
"license": "MIT License",
"lines": 86,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-llm/graphrag_llm/config/rate_limit_config.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""RateLimit configuration."""
from pydantic import BaseModel, ConfigDict, Field, model_validator
from graphrag_llm.config.types import RateLimitType
class RateLimitConfig(BaseModel):
"""Configuration for rate limit behavior."""
model_config = ConfigDict(extra="allow")
"""Allow extra fields to support custom RateLimit implementations."""
type: str = Field(
default=RateLimitType.SlidingWindow,
description="The type of rate limit strategy to use. [sliding_window] (default: sliding_window).",
)
period_in_seconds: int | None = Field(
default=None,
description="The period in seconds for the rate limit window. (default: 60).",
)
requests_per_period: int | None = Field(
default=None,
description="The maximum number of requests allowed per period. (default: None, no limit).",
)
tokens_per_period: int | None = Field(
default=None,
description="The maximum number of tokens allowed per period. (default: None, no limit).",
)
def _validate_sliding_window_config(self) -> None:
"""Validate Sliding Window rate limit configuration."""
if self.period_in_seconds is not None and self.period_in_seconds <= 0:
msg = "period_in_seconds must be a positive integer for Sliding Window rate limit."
raise ValueError(msg)
if not self.requests_per_period and not self.tokens_per_period:
msg = "At least one of requests_per_period or tokens_per_period must be specified for Sliding Window rate limit."
raise ValueError(msg)
if self.requests_per_period is not None and self.requests_per_period <= 0:
msg = "requests_per_period must be a positive integer for Sliding Window rate limit."
raise ValueError(msg)
if self.tokens_per_period is not None and self.tokens_per_period <= 0:
msg = "tokens_per_period must be a positive integer for Sliding Window rate limit."
raise ValueError(msg)
@model_validator(mode="after")
def _validate_model(self):
"""Validate the rate limit configuration based on its type."""
if self.type == RateLimitType.SlidingWindow:
self._validate_sliding_window_config()
return self
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-llm/graphrag_llm/config/rate_limit_config.py",
"license": "MIT License",
"lines": 45,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-llm/graphrag_llm/config/retry_config.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Retry configuration."""
from pydantic import BaseModel, ConfigDict, Field, model_validator
from graphrag_llm.config.types import RetryType
class RetryConfig(BaseModel):
"""Configuration for retry behavior."""
model_config = ConfigDict(extra="allow")
"""Allow extra fields to support custom Retry implementations."""
type: str = Field(
default=RetryType.ExponentialBackoff,
description="The type of retry strategy to use. [exponential_backoff, immediate] (default: exponential_backoff).",
)
max_retries: int | None = Field(
default=None,
description="The maximum number of retry attempts.",
)
base_delay: float | None = Field(
default=None,
description="The base delay in seconds for exponential backoff.",
)
jitter: bool | None = Field(
default=None,
description="Whether to apply jitter to the delay intervals in exponential backoff.",
)
max_delay: float | None = Field(
default=None,
description="The maximum delay in seconds between retries.",
)
def _validate_exponential_backoff_config(self) -> None:
"""Validate Exponential Backoff retry configuration."""
if self.max_retries is not None and self.max_retries <= 1:
msg = "max_retries must be greater than 1 for Exponential Backoff retry."
raise ValueError(msg)
if self.base_delay is not None and self.base_delay <= 1.0:
msg = "base_delay must be greater than 1.0 for Exponential Backoff retry."
raise ValueError(msg)
if self.max_delay is not None and self.max_delay <= 1:
msg = "max_delay must be greater than 1 for Exponential Backoff retry."
raise ValueError(msg)
def _validate_immediate_config(self) -> None:
"""Validate Immediate retry configuration."""
if self.max_retries is not None and self.max_retries <= 1:
msg = "max_retries must be greater than 1 for Immediate retry."
raise ValueError(msg)
@model_validator(mode="after")
def _validate_model(self):
"""Validate the retry configuration based on its type."""
if self.type == RetryType.ExponentialBackoff:
self._validate_exponential_backoff_config()
elif self.type == RetryType.Immediate:
self._validate_immediate_config()
return self
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-llm/graphrag_llm/config/retry_config.py",
"license": "MIT License",
"lines": 53,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-llm/graphrag_llm/config/template_engine_config.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Template engine configuration."""
from pydantic import BaseModel, ConfigDict, Field, model_validator
from graphrag_llm.config.types import (
TemplateEngineType,
TemplateManagerType,
)
class TemplateEngineConfig(BaseModel):
"""Configuration for the template engine."""
model_config = ConfigDict(extra="allow")
"""Allow extra fields to support custom metrics implementations."""
type: str = Field(
default=TemplateEngineType.Jinja,
description="The template engine to use. [jinja]",
)
template_manager: str = Field(
default=TemplateManagerType.File,
description="The template manager to use. [file, memory] (default: file)",
)
base_dir: str | None = Field(
default=None,
description="The base directory for file-based template managers.",
)
template_extension: str | None = Field(
default=None,
description="The file extension for locating templates in file-based template managers.",
)
encoding: str | None = Field(
default=None,
description="The file encoding for reading templates in file-based template managers.",
)
def _validate_file_template_manager_config(self) -> None:
"""Validate parameters for file-based template managers."""
if self.base_dir is not None and self.base_dir.strip() == "":
msg = "base_dir must be specified for file-based template managers."
raise ValueError(msg)
if (
self.template_extension is not None
and self.template_extension.strip() == ""
):
msg = "template_extension cannot be an empty string for file-based template managers."
raise ValueError(msg)
if (
self.template_extension is not None
and not self.template_extension.startswith(".")
):
self.template_extension = f".{self.template_extension}"
@model_validator(mode="after")
def _validate_model(self):
"""Validate the template engine configuration based on its type."""
if self.template_manager == TemplateManagerType.File:
self._validate_file_template_manager_config()
return self
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-llm/graphrag_llm/config/template_engine_config.py",
"license": "MIT License",
"lines": 54,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-llm/graphrag_llm/config/tokenizer_config.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Tokenizer model configuration."""
from pydantic import BaseModel, ConfigDict, Field, model_validator
from graphrag_llm.config.types import TokenizerType
class TokenizerConfig(BaseModel):
"""Configuration for a tokenizer."""
model_config = ConfigDict(extra="allow")
"""Allow extra fields to support custom LLM provider implementations."""
type: str = Field(
default=TokenizerType.LiteLLM,
description="The type of tokenizer to use. [litellm] (default: litellm).",
)
model_id: str | None = Field(
default=None,
description="The identifier for the tokenizer model. Example: openai/gpt-4o. Used by the litellm tokenizer.",
)
encoding_name: str | None = Field(
default=None,
description="The encoding name for the tokenizer. Example: gpt-4o.",
)
def _validate_litellm_config(self) -> None:
"""Validate LiteLLM tokenizer configuration."""
if self.model_id is None or self.model_id.strip() == "":
msg = "model_id must be specified for LiteLLM tokenizer."
raise ValueError(msg)
def _validate_tiktoken_config(self) -> None:
"""Validate TikToken tokenizer configuration."""
if self.encoding_name is None or self.encoding_name.strip() == "":
msg = "encoding_name must be specified for TikToken tokenizer."
raise ValueError(msg)
@model_validator(mode="after")
def _validate_model(self):
"""Validate the tokenizer configuration based on its type."""
if self.type == TokenizerType.LiteLLM:
self._validate_litellm_config()
elif self.type == TokenizerType.Tiktoken:
self._validate_tiktoken_config()
return self
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-llm/graphrag_llm/config/tokenizer_config.py",
"license": "MIT License",
"lines": 39,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-llm/graphrag_llm/config/types.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""GraphRAG LLM configuration types."""
from enum import StrEnum
class LLMProviderType(StrEnum):
"""Enum for LLM provider types."""
LiteLLM = "litellm"
MockLLM = "mock"
class AuthMethod(StrEnum):
"""Enum for authentication methods."""
ApiKey = "api_key"
AzureManagedIdentity = "azure_managed_identity"
class MetricsProcessorType(StrEnum):
"""Enum for built-in MetricsProcessor types."""
Default = "default"
class MetricsWriterType(StrEnum):
"""Enum for built-in MetricsWriter types."""
Log = "log"
File = "file"
class MetricsStoreType(StrEnum):
"""Enum for built-in MetricsStore types."""
Memory = "memory"
class RateLimitType(StrEnum):
"""Enum for built-in RateLimit types."""
SlidingWindow = "sliding_window"
class RetryType(StrEnum):
"""Enum for built-in Retry types."""
ExponentialBackoff = "exponential_backoff"
Immediate = "immediate"
class TemplateEngineType(StrEnum):
"""Enum for built-in TemplateEngine types."""
Jinja = "jinja"
class TemplateManagerType(StrEnum):
"""Enum for built-in TemplateEngine types."""
File = "file"
class TokenizerType(StrEnum):
"""Enum for tokenizer types."""
LiteLLM = "litellm"
Tiktoken = "tiktoken"
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-llm/graphrag_llm/config/types.py",
"license": "MIT License",
"lines": 39,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-llm/graphrag_llm/embedding/embedding.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Completion Abstract Base Class."""
from abc import ABC, abstractmethod
from contextlib import contextmanager
from typing import TYPE_CHECKING, Any, Unpack
from graphrag_llm.threading.embedding_thread_runner import embedding_thread_runner
if TYPE_CHECKING:
from collections.abc import Iterator
from graphrag_cache import Cache, CacheKeyCreator
from graphrag_llm.config import ModelConfig
from graphrag_llm.metrics import MetricsProcessor, MetricsStore
from graphrag_llm.rate_limit import RateLimiter
from graphrag_llm.retry import Retry
from graphrag_llm.threading.embedding_thread_runner import (
ThreadedLLMEmbeddingFunction,
ThreadedLLMEmbeddingResponseHandler,
)
from graphrag_llm.tokenizer import Tokenizer
from graphrag_llm.types import LLMEmbeddingArgs, LLMEmbeddingResponse
class LLMEmbedding(ABC):
"""Abstract base class for language model embeddings."""
@abstractmethod
def __init__(
self,
*,
model_id: str,
model_config: "ModelConfig",
tokenizer: "Tokenizer",
metrics_store: "MetricsStore",
metrics_processor: "MetricsProcessor | None" = None,
rate_limiter: "RateLimiter | None" = None,
retrier: "Retry | None" = None,
cache: "Cache | None" = None,
cache_key_creator: "CacheKeyCreator",
**kwargs: Any,
):
"""Initialize the LLMEmbedding.
Args
----
model_id: str
The model ID, e.g., "openai/gpt-4o".
model_config: ModelConfig
The configuration for the language model.
tokenizer: Tokenizer
The tokenizer to use.
metrics_store: MetricsStore | None (default=None)
The metrics store to use.
metrics_processor: MetricsProcessor | None (default: None)
The metrics processor to use.
rate_limiter: RateLimiter | None (default=None)
The rate limiter to use.
retrier: Retry | None (default=None)
The retry strategy to use.
cache: Cache | None (default=None)
Optional cache for embeddings.
cache_key_creator: CacheKeyCreator | None (default=None)
Optional cache key creator function.
(dict[str, Any]) -> str
**kwargs: Any
Additional keyword arguments.
"""
raise NotImplementedError
@abstractmethod
def embedding(
self, /, **kwargs: Unpack["LLMEmbeddingArgs"]
) -> "LLMEmbeddingResponse":
"""Sync embedding method."""
raise NotImplementedError
@abstractmethod
async def embedding_async(
self, /, **kwargs: Unpack["LLMEmbeddingArgs"]
) -> "LLMEmbeddingResponse":
"""Async embedding method."""
raise NotImplementedError
@contextmanager
def embedding_thread_pool(
self,
*,
response_handler: "ThreadedLLMEmbeddingResponseHandler",
concurrency: int,
queue_limit: int = 0,
) -> "Iterator[ThreadedLLMEmbeddingFunction]":
"""Run an embedding thread pool.
Args
----
response_handler: ThreadedLLMEmbeddingResponseHandler
The callback function to handle embedding responses.
(request_id, response|exception) -> Awaitable[None] | None
concurrency: int
The number of threads to spin up in a thread pool.
queue_limit: int (default=0)
The maximum number of items allowed in the input queue.
0 means unlimited.
Set this to a value to create backpressure on the caller.
Yields
------
ThreadedLLMEmbeddingFunction:
A function that can be used to submit embedding requests to the thread pool.
(input, request_id, **kwargs) -> None
The thread pool will process the requests and invoke the provided callback
with the responses.
same signature as LLMEmbeddingFunction but requires a `request_id` parameter
to identify the request and does not return anything.
"""
with embedding_thread_runner(
embedding=self.embedding,
response_handler=response_handler,
concurrency=concurrency,
queue_limit=queue_limit,
metrics_store=self.metrics_store,
) as embedding:
yield embedding
def embedding_batch(
self,
embedding_requests: list["LLMEmbeddingArgs"],
*,
concurrency: int,
queue_limit: int = 0,
) -> list["LLMEmbeddingResponse | Exception"]:
"""Process a batch of embedding requests using a thread pool.
Args
----
embedding_requests: list[LLMEmbeddingArgs]
A list of embedding request arguments to process in parallel.
batch_size: int
The number of inputs to process in each batch.
concurrency: int
The number of threads to spin up in a thread pool.
queue_limit: int (default=0)
The maximum number of items allowed in the input queue.
0 means unlimited.
Set this to a value to create backpressure on the caller.
Returns
-------
list[LLMEmbeddingResponse | Exception]
A list of embedding responses or exceptions for each input.
"""
results: list[LLMEmbeddingResponse | Exception] = [None] * len(
embedding_requests
) # type: ignore
def handle_response(
request_id: str,
response: "LLMEmbeddingResponse | Exception",
) -> None:
index = int(request_id)
results[index] = response
with self.embedding_thread_pool(
response_handler=handle_response,
concurrency=concurrency,
queue_limit=queue_limit,
) as embedding:
for idx, embedding_request in enumerate(embedding_requests):
embedding(request_id=str(idx), **embedding_request)
return results
@property
@abstractmethod
def metrics_store(self) -> "MetricsStore":
"""Metrics store."""
raise NotImplementedError
@property
@abstractmethod
def tokenizer(self) -> "Tokenizer":
"""Tokenizer."""
raise NotImplementedError
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-llm/graphrag_llm/embedding/embedding.py",
"license": "MIT License",
"lines": 165,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-llm/graphrag_llm/embedding/embedding_factory.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Embedding factory."""
from collections.abc import Callable
from typing import TYPE_CHECKING, Any
from graphrag_common.factory import Factory
from graphrag_llm.cache import create_cache_key
from graphrag_llm.config.tokenizer_config import TokenizerConfig
from graphrag_llm.config.types import LLMProviderType
from graphrag_llm.metrics.noop_metrics_store import NoopMetricsStore
from graphrag_llm.tokenizer.tokenizer_factory import create_tokenizer
if TYPE_CHECKING:
from graphrag_cache import Cache, CacheKeyCreator
from graphrag_common.factory import ServiceScope
from graphrag_llm.config.model_config import ModelConfig
from graphrag_llm.embedding.embedding import LLMEmbedding
from graphrag_llm.metrics import MetricsProcessor, MetricsStore
from graphrag_llm.rate_limit import RateLimiter
from graphrag_llm.retry import Retry
from graphrag_llm.tokenizer import Tokenizer
class EmbeddingFactory(Factory["LLMEmbedding"]):
"""Factory for creating Embedding instances."""
embedding_factory = EmbeddingFactory()
def register_embedding(
embedding_type: str,
embedding_initializer: Callable[..., "LLMEmbedding"],
scope: "ServiceScope" = "transient",
) -> None:
"""Register a custom completion implementation.
Args
----
embedding_type: str
The embedding id to register.
embedding_initializer: Callable[..., LLMEmbedding]
The embedding initializer to register.
scope: ServiceScope (default: "transient")
The service scope for the embedding.
"""
embedding_factory.register(embedding_type, embedding_initializer, scope)
def create_embedding(
model_config: "ModelConfig",
*,
cache: "Cache | None" = None,
cache_key_creator: "CacheKeyCreator | None" = None,
tokenizer: "Tokenizer | None" = None,
) -> "LLMEmbedding":
"""Create an Embedding instance based on the model configuration.
Args
----
model_config: ModelConfig
The configuration for the model.
cache: Cache | None (default: None)
An optional cache instance.
cache_key_creator: CacheKeyCreator | None (default: create_cache_key)
An optional cache key creator function.
tokenizer: Tokenizer | None (default: litellm)
An optional tokenizer instance.
Returns
-------
LLMEmbedding:
An instance of an Embedding subclass.
"""
cache_key_creator = cache_key_creator or create_cache_key
model_id = f"{model_config.model_provider}/{model_config.model}"
strategy = model_config.type
extra: dict[str, Any] = model_config.model_extra or {}
if strategy not in embedding_factory:
match strategy:
case LLMProviderType.LiteLLM:
from graphrag_llm.embedding.lite_llm_embedding import (
LiteLLMEmbedding,
)
register_embedding(
embedding_type=LLMProviderType.LiteLLM,
embedding_initializer=LiteLLMEmbedding,
scope="singleton",
)
case LLMProviderType.MockLLM:
from graphrag_llm.embedding.mock_llm_embedding import MockLLMEmbedding
register_embedding(
embedding_type=LLMProviderType.MockLLM,
embedding_initializer=MockLLMEmbedding,
)
case _:
msg = f"ModelConfig.type '{strategy}' is not registered in the CompletionFactory. Registered strategies: {', '.join(embedding_factory.keys())}"
raise ValueError(msg)
tokenizer = tokenizer or create_tokenizer(TokenizerConfig(model_id=model_id))
rate_limiter: RateLimiter | None = None
if model_config.rate_limit:
from graphrag_llm.rate_limit.rate_limit_factory import create_rate_limiter
rate_limiter = create_rate_limiter(rate_limit_config=model_config.rate_limit)
retrier: Retry | None = None
if model_config.retry:
from graphrag_llm.retry.retry_factory import create_retry
retrier = create_retry(retry_config=model_config.retry)
metrics_store: MetricsStore = NoopMetricsStore()
metrics_processor: MetricsProcessor | None = None
if model_config.metrics:
from graphrag_llm.metrics import (
create_metrics_processor,
create_metrics_store,
)
metrics_store = create_metrics_store(
config=model_config.metrics,
id=model_id,
)
metrics_processor = create_metrics_processor(model_config.metrics)
return embedding_factory.create(
strategy=strategy,
init_args={
**extra,
"model_id": model_id,
"model_config": model_config,
"tokenizer": tokenizer,
"metrics_store": metrics_store,
"metrics_processor": metrics_processor,
"rate_limiter": rate_limiter,
"retrier": retrier,
"cache": cache,
"cache_key_creator": cache_key_creator,
},
)
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-llm/graphrag_llm/embedding/embedding_factory.py",
"license": "MIT License",
"lines": 122,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-llm/graphrag_llm/embedding/lite_llm_embedding.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""LLMEmbedding based on litellm."""
from typing import TYPE_CHECKING, Any, Unpack
import litellm
from azure.identity import DefaultAzureCredential, get_bearer_token_provider
from graphrag_llm.config.types import AuthMethod
from graphrag_llm.embedding.embedding import LLMEmbedding
from graphrag_llm.middleware import with_middleware_pipeline
from graphrag_llm.types import LLMEmbeddingResponse
if TYPE_CHECKING:
from graphrag_cache import Cache, CacheKeyCreator
from graphrag_llm.config import ModelConfig
from graphrag_llm.metrics import MetricsProcessor, MetricsStore
from graphrag_llm.rate_limit import RateLimiter
from graphrag_llm.retry import Retry
from graphrag_llm.tokenizer import Tokenizer
from graphrag_llm.types import (
AsyncLLMEmbeddingFunction,
LLMEmbeddingArgs,
LLMEmbeddingFunction,
Metrics,
)
litellm.suppress_debug_info = True
class LiteLLMEmbedding(LLMEmbedding):
"""LLMEmbedding based on litellm."""
_model_config: "ModelConfig"
_model_id: str
_track_metrics: bool = False
_metrics_store: "MetricsStore"
_metrics_processor: "MetricsProcessor | None"
_cache: "Cache | None"
_cache_key_creator: "CacheKeyCreator"
_tokenizer: "Tokenizer"
_rate_limiter: "RateLimiter | None"
_retrier: "Retry | None"
def __init__(
self,
*,
model_id: str,
model_config: "ModelConfig",
tokenizer: "Tokenizer",
metrics_store: "MetricsStore",
metrics_processor: "MetricsProcessor | None" = None,
rate_limiter: "RateLimiter | None" = None,
retrier: "Retry | None" = None,
cache: "Cache | None" = None,
cache_key_creator: "CacheKeyCreator",
azure_cognitive_services_audience: str = "https://cognitiveservices.azure.com/.default",
drop_unsupported_params: bool = True,
**kwargs: Any,
):
"""Initialize LiteLLMEmbedding.
Args
----
model_id: str
The LiteLLM model ID, e.g., "openai/gpt-4o"
model_config: ModelConfig
The configuration for the model.
tokenizer: Tokenizer
The tokenizer to use.
metrics_store: MetricsStore | None (default: None)
The metrics store to use.
metrics_processor: MetricsProcessor | None (default: None)
The metrics processor to use.
cache: Cache | None (default: None)
An optional cache instance.
cache_key_prefix: str | None (default: "chat")
The cache key prefix. Required if cache is provided.
rate_limiter: RateLimiter | None (default: None)
The rate limiter to use.
retrier: Retry | None (default: None)
The retry strategy to use.
azure_cognitive_services_audience: str (default: "https://cognitiveservices.azure.com/.default")
The audience for Azure Cognitive Services when using Managed Identity.
drop_unsupported_params: bool (default: True)
Whether to drop unsupported parameters for the model provider.
"""
self._model_id = model_id
self._model_config = model_config
self._tokenizer = tokenizer
self._metrics_store = metrics_store
self._metrics_processor = metrics_processor
self._track_metrics = metrics_processor is not None
self._cache = cache
self._cache_key_creator = cache_key_creator
self._rate_limiter = rate_limiter
self._retrier = retrier
self._embedding, self._embedding_async = _create_base_embeddings(
model_config=model_config,
drop_unsupported_params=drop_unsupported_params,
azure_cognitive_services_audience=azure_cognitive_services_audience,
)
self._embedding, self._embedding_async = with_middleware_pipeline(
model_config=self._model_config,
model_fn=self._embedding,
async_model_fn=self._embedding_async,
request_type="embedding",
cache=self._cache,
cache_key_creator=self._cache_key_creator,
tokenizer=self._tokenizer,
metrics_processor=self._metrics_processor,
rate_limiter=self._rate_limiter,
retrier=self._retrier,
)
def embedding(
self, /, **kwargs: Unpack["LLMEmbeddingArgs"]
) -> "LLMEmbeddingResponse":
"""Sync embedding method."""
request_metrics: Metrics | None = kwargs.pop("metrics", None) or {}
if not self._track_metrics:
request_metrics = None
try:
return self._embedding(metrics=request_metrics, **kwargs)
finally:
if request_metrics:
self._metrics_store.update_metrics(metrics=request_metrics)
async def embedding_async(
self, /, **kwargs: Unpack["LLMEmbeddingArgs"]
) -> "LLMEmbeddingResponse":
"""Async embedding method."""
request_metrics: Metrics | None = kwargs.pop("metrics", None) or {}
if not self._track_metrics:
request_metrics = None
try:
return await self._embedding_async(metrics=request_metrics, **kwargs)
finally:
if request_metrics:
self._metrics_store.update_metrics(metrics=request_metrics)
@property
def metrics_store(self) -> "MetricsStore":
"""Get metrics store."""
return self._metrics_store
@property
def tokenizer(self) -> "Tokenizer":
"""Get tokenizer."""
return self._tokenizer
def _create_base_embeddings(
*,
model_config: "ModelConfig",
drop_unsupported_params: bool,
azure_cognitive_services_audience: str,
) -> tuple["LLMEmbeddingFunction", "AsyncLLMEmbeddingFunction"]:
"""Create base embedding functions."""
model_provider = model_config.model_provider
model = model_config.azure_deployment_name or model_config.model
base_args: dict[str, Any] = {
"drop_params": drop_unsupported_params,
"model": f"{model_provider}/{model}",
"api_key": model_config.api_key,
"api_base": model_config.api_base,
"api_version": model_config.api_version,
**model_config.call_args,
}
if model_config.auth_method == AuthMethod.AzureManagedIdentity:
base_args["azure_ad_token_provider"] = get_bearer_token_provider(
DefaultAzureCredential(), azure_cognitive_services_audience
)
def _base_embedding(**kwargs: Any) -> LLMEmbeddingResponse:
kwargs.pop("metrics", None) # Remove metrics if present
new_args: dict[str, Any] = {**base_args, **kwargs}
response = litellm.embedding(**new_args)
return LLMEmbeddingResponse(**response.model_dump())
async def _base_embedding_async(**kwargs: Any) -> LLMEmbeddingResponse:
kwargs.pop("metrics", None) # Remove metrics if present
new_args: dict[str, Any] = {**base_args, **kwargs}
response = await litellm.aembedding(**new_args)
return LLMEmbeddingResponse(**response.model_dump())
return _base_embedding, _base_embedding_async
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-llm/graphrag_llm/embedding/lite_llm_embedding.py",
"license": "MIT License",
"lines": 169,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-llm/graphrag_llm/embedding/mock_llm_embedding.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""MockLLMEmbedding."""
from typing import TYPE_CHECKING, Any, Unpack
import litellm
from graphrag_llm.embedding.embedding import LLMEmbedding
from graphrag_llm.utils import create_embedding_response
if TYPE_CHECKING:
from graphrag_llm.config import ModelConfig
from graphrag_llm.metrics import MetricsStore
from graphrag_llm.tokenizer import Tokenizer
from graphrag_llm.types import (
LLMEmbeddingArgs,
LLMEmbeddingResponse,
)
litellm.suppress_debug_info = True
class MockLLMEmbedding(LLMEmbedding):
"""MockLLMEmbedding."""
_metrics_store: "MetricsStore"
_tokenizer: "Tokenizer"
_mock_responses: list[float]
_mock_index: int = 0
def __init__(
self,
*,
model_config: "ModelConfig",
tokenizer: "Tokenizer",
metrics_store: "MetricsStore",
**kwargs: Any,
):
"""Initialize MockLLMEmbedding."""
self._tokenizer = tokenizer
self._metrics_store = metrics_store
mock_responses = model_config.mock_responses
if not isinstance(mock_responses, list) or len(mock_responses) == 0:
msg = "ModelConfig.mock_responses must be a non-empty list of embedding responses."
raise ValueError(msg)
if not all(isinstance(resp, float) for resp in mock_responses):
msg = "Each item in ModelConfig.mock_responses must be a float."
raise ValueError(msg)
self._mock_responses = mock_responses # type: ignore
def embedding(
self, /, **kwargs: Unpack["LLMEmbeddingArgs"]
) -> "LLMEmbeddingResponse":
"""Sync embedding method."""
input = kwargs.get("input")
response = create_embedding_response(
self._mock_responses, batch_size=len(input)
)
self._mock_index += 1
return response
async def embedding_async(
self, /, **kwargs: Unpack["LLMEmbeddingArgs"]
) -> "LLMEmbeddingResponse":
"""Async embedding method."""
return self.embedding(**kwargs)
@property
def metrics_store(self) -> "MetricsStore":
"""Get metrics store."""
return self._metrics_store
@property
def tokenizer(self) -> "Tokenizer":
"""Get tokenizer."""
return self._tokenizer
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-llm/graphrag_llm/embedding/mock_llm_embedding.py",
"license": "MIT License",
"lines": 64,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-llm/graphrag_llm/metrics/default_metrics_processor.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Default Metrics Processor."""
from typing import TYPE_CHECKING, Any
from graphrag_llm.metrics.metrics_processor import MetricsProcessor
from graphrag_llm.model_cost_registry import model_cost_registry
from graphrag_llm.types import LLMCompletionResponse, LLMEmbeddingResponse
if TYPE_CHECKING:
from collections.abc import AsyncIterator, Iterator
from graphrag_llm.config import ModelConfig
from graphrag_llm.types import (
LLMCompletionChunk,
Metrics,
)
class DefaultMetricsProcessor(MetricsProcessor):
"""Default metrics processor that does nothing."""
def __init__(self, **kwargs: Any) -> None:
"""Initialize DefaultMetricsProcessor."""
def process_metrics(
self,
*,
model_config: "ModelConfig",
metrics: "Metrics",
input_args: dict[str, Any],
response: "LLMCompletionResponse \
| Iterator[LLMCompletionChunk] \
| AsyncIterator[LLMCompletionChunk] \
| LLMEmbeddingResponse",
) -> None:
"""Process metrics."""
self._process_metrics_common(
model_config=model_config,
metrics=metrics,
input_args=input_args,
response=response,
)
def _process_metrics_common(
self,
*,
model_config: "ModelConfig",
metrics: "Metrics",
input_args: dict[str, Any],
response: "LLMCompletionResponse \
| Iterator[LLMCompletionChunk] \
| AsyncIterator[LLMCompletionChunk] \
| LLMEmbeddingResponse",
) -> None:
if isinstance(response, LLMCompletionResponse):
self._process_lm_chat_completion(
model_config=model_config,
metrics=metrics,
input_args=input_args,
response=response,
)
elif isinstance(response, LLMEmbeddingResponse):
self._process_lm_embedding_response(
model_config=model_config,
metrics=metrics,
input_args=input_args,
response=response,
)
def _process_lm_chat_completion(
self,
model_config: "ModelConfig",
metrics: "Metrics",
input_args: dict[str, Any],
response: "LLMCompletionResponse",
) -> None:
"""Process LMChatCompletion metrics."""
prompt_tokens = response.usage.prompt_tokens if response.usage else 0
completion_tokens = response.usage.completion_tokens if response.usage else 0
total_tokens = prompt_tokens + completion_tokens
if total_tokens > 0:
metrics["responses_with_tokens"] = 1
metrics["prompt_tokens"] = prompt_tokens
metrics["completion_tokens"] = completion_tokens
metrics["total_tokens"] = total_tokens
model_id = f"{model_config.model_provider}/{model_config.model}"
model_costs = model_cost_registry.get_model_costs(model_id)
if not model_costs:
return
input_cost = prompt_tokens * model_costs["input_cost_per_token"]
output_cost = completion_tokens * model_costs["output_cost_per_token"]
total_cost = input_cost + output_cost
metrics["responses_with_cost"] = 1
metrics["input_cost"] = input_cost
metrics["output_cost"] = output_cost
metrics["total_cost"] = total_cost
def _process_lm_embedding_response(
self,
model_config: "ModelConfig",
metrics: "Metrics",
input_args: dict[str, Any],
response: "LLMEmbeddingResponse",
) -> None:
"""Process LLMEmbeddingResponse metrics."""
prompt_tokens = response.usage.prompt_tokens if response.usage else 0
if prompt_tokens > 0:
metrics["responses_with_tokens"] = 1
metrics["prompt_tokens"] = prompt_tokens
metrics["total_tokens"] = prompt_tokens
model_id = f"{model_config.model_provider}/{model_config.model}"
model_costs = model_cost_registry.get_model_costs(model_id)
if not model_costs:
return
input_cost = prompt_tokens * model_costs["input_cost_per_token"]
metrics["responses_with_cost"] = 1
metrics["input_cost"] = input_cost
metrics["total_cost"] = input_cost
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-llm/graphrag_llm/metrics/default_metrics_processor.py",
"license": "MIT License",
"lines": 109,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-llm/graphrag_llm/metrics/file_metrics_writer.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""File metrics writer implementation."""
import json
from collections.abc import Callable
from datetime import datetime, timezone
from pathlib import Path
from typing import TYPE_CHECKING, Any
from graphrag_llm.metrics.metrics_writer import MetricsWriter
if TYPE_CHECKING:
from graphrag_llm.types import Metrics
class FileMetricsWriter(MetricsWriter):
"""File metrics writer implementation."""
_log_method: Callable[..., None] | None = None
_base_dir: Path
_file_path: Path
def __init__(self, *, base_dir: str | None = None, **kwargs: Any) -> None:
"""Initialize FileMetricsWriter."""
self._base_dir = Path(base_dir or Path.cwd()).resolve()
now = datetime.now(timezone.utc).astimezone().strftime("%Y%m%d_%H%M%S")
self._file_path = self._base_dir / f"{now}.jsonl"
self._base_dir.mkdir(parents=True, exist_ok=True)
def write_metrics(self, *, id: str, metrics: "Metrics") -> None:
"""Write the given metrics."""
record = json.dumps({"id": id, "metrics": metrics})
with self._file_path.open("a", encoding="utf-8") as f:
f.write(f"{record}\n")
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-llm/graphrag_llm/metrics/file_metrics_writer.py",
"license": "MIT License",
"lines": 27,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-llm/graphrag_llm/metrics/log_metrics_writer.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Log metrics writer implementation."""
import json
import logging
from collections.abc import Callable
from typing import TYPE_CHECKING, Any
from graphrag_llm.metrics.metrics_writer import MetricsWriter
if TYPE_CHECKING:
from graphrag_llm.types import Metrics
logger = logging.getLogger(__name__)
_log_methods = {
logging.DEBUG: logger.debug,
logging.INFO: logger.info,
logging.WARNING: logger.warning,
logging.ERROR: logger.error,
logging.CRITICAL: logger.critical,
}
class LogMetricsWriter(MetricsWriter):
"""Log metrics writer implementation."""
_log_method: Callable[..., None] = _log_methods[logging.INFO]
def __init__(self, *, log_level: int | None = None, **kwargs: Any) -> None:
"""Initialize LogMetricsWriter."""
if log_level and log_level in _log_methods:
self._log_method = _log_methods[log_level]
def write_metrics(self, *, id: str, metrics: "Metrics") -> None:
"""Write the given metrics."""
self._log_method(f"Metrics for {id}: {json.dumps(metrics, indent=2)}")
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-llm/graphrag_llm/metrics/log_metrics_writer.py",
"license": "MIT License",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-llm/graphrag_llm/metrics/memory_metrics_store.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Default metrics store."""
import atexit
import threading
from typing import TYPE_CHECKING, Any
from graphrag_llm.metrics.metrics_aggregator import metrics_aggregator
from graphrag_llm.metrics.metrics_store import MetricsStore
if TYPE_CHECKING:
from graphrag_llm.metrics.metrics_writer import MetricsWriter
from graphrag_llm.types import Metrics
_default_sort_order: list[str] = [
"attempted_request_count",
"successful_response_count",
"failed_response_count",
"failure_rate",
"requests_with_retries",
"retries",
"retry_rate",
"compute_duration_seconds",
"compute_duration_per_response_seconds",
"runtime_duration_seconds",
"cached_responses",
"cache_hit_rate",
"streaming_responses",
"responses_with_tokens",
"prompt_tokens",
"completion_tokens",
"total_tokens",
"tokens_per_response",
"responses_with_cost",
"input_cost",
"output_cost",
"total_cost",
"cost_per_response",
]
class MemoryMetricsStore(MetricsStore):
"""Store for metrics."""
_metrics_writer: "MetricsWriter | None" = None
_id: str
_sort_order: list[str]
_thread_lock: threading.Lock
_metrics: "Metrics"
def __init__(
self,
*,
id: str,
metrics_writer: "MetricsWriter | None" = None,
sort_order: list[str] | None = None,
**kwargs: Any,
) -> None:
"""Initialize MemoryMetricsStore."""
self._id = id
self._sort_order = sort_order or _default_sort_order
self._thread_lock = threading.Lock()
self._metrics = {}
if metrics_writer:
self._metrics_writer = metrics_writer
atexit.register(self._on_exit_)
def _on_exit_(self) -> None:
if self._metrics_writer:
self._metrics_writer.write_metrics(id=self._id, metrics=self.get_metrics())
@property
def id(self) -> str:
"""Get the ID of the metrics store."""
return self._id
def update_metrics(self, *, metrics: "Metrics") -> None:
"""Update the store with multiple metrics."""
with self._thread_lock:
for name, value in metrics.items():
if name in self._metrics:
self._metrics[name] += value
else:
self._metrics[name] = value
def _sort_metrics(self) -> "Metrics":
"""Sort metrics based on the predefined sort order."""
sorted_metrics: Metrics = {}
for key in self._sort_order:
if key in self._metrics:
sorted_metrics[key] = self._metrics[key]
for key in self._metrics:
if key not in sorted_metrics:
sorted_metrics[key] = self._metrics[key]
return sorted_metrics
def get_metrics(self) -> "Metrics":
"""Get all metrics from the store."""
metrics_aggregator.aggregate(self._metrics)
return self._sort_metrics()
def clear_metrics(self) -> None:
"""Clear all metrics from the store.
Returns
-------
None
"""
self._metrics = {}
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-llm/graphrag_llm/metrics/memory_metrics_store.py",
"license": "MIT License",
"lines": 95,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-llm/graphrag_llm/metrics/metrics_aggregator.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Metrics aggregator module."""
from collections.abc import Callable
from typing import TYPE_CHECKING, Any, ClassVar
from typing_extensions import Self
if TYPE_CHECKING:
from graphrag_llm.types.types import Metrics
class MetricsAggregator:
"""Metrics Aggregator."""
_instance: ClassVar["Self | None"] = None
_aggregate_functions: dict[str, Callable[["Metrics"], None]]
def __new__(cls, *args: Any, **kwargs: Any) -> Self:
"""Create a new instance of MetricsAggregator if it does not exist."""
if cls._instance is None:
cls._instance = super().__new__(cls, *args, **kwargs)
return cls._instance
def __init__(self):
if not hasattr(self, "_initialized"):
self._initialized = True
self._aggregate_functions = {}
def register(self, name: str, func: Callable[["Metrics"], None]) -> None:
"""Register an aggregate function.
Args
----
name: str
The name of the aggregate function.
func: Callable[[Metrics], None]
The aggregate function to register. It should take a Metrics
dictionary as input and return None, modifying the Metrics in place.
"""
self._aggregate_functions[name] = func
def clear(self, name: str | None = None) -> None:
"""Clear registered aggregate functions.
Args
----
name: str | None
The name of the aggregate function to clear. If None, clears all
registered aggregate functions.
"""
if name:
self._aggregate_functions.pop(name, None)
else:
self._aggregate_functions.clear()
def aggregate(self, metrics: "Metrics") -> None:
"""Aggregate metrics using registered aggregate functions.
Args
----
metrics: Metrics
The metrics dictionary to aggregate.
"""
for func in self._aggregate_functions.values():
func(metrics)
def _failure_rate(metrics: "Metrics") -> None:
"""Calculate failure rate metric."""
attempted = metrics.get("attempted_request_count", 0)
failed = metrics.get("failed_response_count", 0)
if attempted > 0:
metrics["failure_rate"] = failed / attempted
else:
metrics["failure_rate"] = 0.0
def _retry_rate(metrics: "Metrics") -> None:
"""Calculate failure rate metric."""
attempted = metrics.get("attempted_request_count", 0)
retries = metrics.get("retries", 0)
if attempted > 0 and "retries" in metrics:
metrics["retry_rate"] = retries / (retries + attempted)
elif "retries" in metrics:
metrics["retry_rate"] = 0.0
def _tokens_per_response(metrics: "Metrics") -> None:
"""Calculate tokens per response metric."""
responses = metrics.get("responses_with_tokens", 0)
total_tokens = metrics.get("total_tokens", 0)
if responses > 0:
metrics["tokens_per_response"] = total_tokens / responses
else:
metrics["tokens_per_response"] = 0.0
def _cost_per_response(metrics: "Metrics") -> None:
"""Calculate cost per response metric."""
responses = metrics.get("responses_with_cost", 0)
total_cost = metrics.get("total_cost", 0)
if responses > 0:
metrics["cost_per_response"] = total_cost / responses
else:
metrics["cost_per_response"] = 0.0
def _compute_duration_per_response(metrics: "Metrics") -> None:
"""Calculate compute duration per response metric."""
responses = metrics.get("successful_response_count", 0)
streaming_responses = metrics.get("streaming_responses", 0)
responses = responses - streaming_responses
compute_duration = metrics.get("compute_duration_seconds", 0)
if responses > 0:
metrics["compute_duration_per_response_seconds"] = compute_duration / responses
else:
metrics["compute_duration_per_response_seconds"] = 0.0
def _cache_hit_rate(metrics: "Metrics") -> None:
"""Calculate cache hit rate metric."""
responses = metrics.get("successful_response_count", 0)
cached = metrics.get("cached_responses", 0)
if responses > 0:
metrics["cache_hit_rate"] = cached / responses
else:
metrics["cache_hit_rate"] = 0.0
metrics_aggregator = MetricsAggregator()
metrics_aggregator.register("failure_rate", _failure_rate)
metrics_aggregator.register("retry_rate", _retry_rate)
metrics_aggregator.register("tokens_per_response", _tokens_per_response)
metrics_aggregator.register("cost_per_response", _cost_per_response)
metrics_aggregator.register(
"compute_duration_per_response", _compute_duration_per_response
)
metrics_aggregator.register("cache_hit_rate", _cache_hit_rate)
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-llm/graphrag_llm/metrics/metrics_aggregator.py",
"license": "MIT License",
"lines": 112,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-llm/graphrag_llm/metrics/metrics_processor.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Metrics processor abstract base class."""
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from collections.abc import AsyncIterator, Iterator
from graphrag_llm.config import ModelConfig
from graphrag_llm.types import (
LLMCompletionChunk,
LLMCompletionResponse,
LLMEmbeddingResponse,
Metrics,
)
class MetricsProcessor(ABC):
"""Abstract base class for metrics processors."""
@abstractmethod
def __init__(self, **kwargs: Any):
"""Initialize MetricsProcessor."""
raise NotImplementedError
@abstractmethod
def process_metrics(
self,
*,
model_config: "ModelConfig",
metrics: "Metrics",
input_args: dict[str, Any],
response: "LLMCompletionResponse \
| Iterator[LLMCompletionChunk] \
| AsyncIterator[LLMCompletionChunk] \
| LLMEmbeddingResponse",
) -> None:
"""Process metrics.
Update the metrics dictionary in place.
Args
----
metrics: Metrics
The metrics to process.
input_args: dict[str, Any]
The input arguments passed to completion or embedding
used to generate the response.
response: LLMCompletionResponse | Iterator[LLMCompletionChunk] | LLMEmbeddingResponse
Either a completion or embedding response from the LLM.
Returns
-------
None
"""
raise NotImplementedError
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-llm/graphrag_llm/metrics/metrics_processor.py",
"license": "MIT License",
"lines": 48,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-llm/graphrag_llm/metrics/metrics_processor_factory.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Metrics processor factory."""
from collections.abc import Callable
from typing import TYPE_CHECKING
from graphrag_common.factory import Factory
from graphrag_llm.config.types import MetricsProcessorType
from graphrag_llm.metrics.metrics_processor import MetricsProcessor
if TYPE_CHECKING:
from graphrag_llm.config import MetricsConfig
class MetricsProcessorFactory(Factory[MetricsProcessor]):
"""Factory for creating MetricsProcessor instances."""
metrics_processor_factory = MetricsProcessorFactory()
def register_metrics_processor(
processor_type: str,
processor_initializer: Callable[..., MetricsProcessor],
) -> None:
"""Register a custom metrics processor implementation.
Args
----
processor_type: str
The metrics processor id to register.
processor_initializer: Callable[..., MetricsProcessor]
The metrics processor initializer to register.
"""
metrics_processor_factory.register(processor_type, processor_initializer)
def create_metrics_processor(metrics_config: "MetricsConfig") -> MetricsProcessor:
"""Create a MetricsProcessor instance based on the configuration.
Args
----
metrics_config: MetricsConfig
The configuration for the metrics processor.
Returns
-------
MetricsProcessor:
An instance of a MetricsProcessor subclass.
"""
strategy = metrics_config.type
init_args = metrics_config.model_dump()
if strategy not in metrics_processor_factory:
match strategy:
case MetricsProcessorType.Default:
from graphrag_llm.metrics.default_metrics_processor import (
DefaultMetricsProcessor,
)
metrics_processor_factory.register(
strategy=MetricsProcessorType.Default,
initializer=DefaultMetricsProcessor,
scope="singleton",
)
case _:
msg = f"MetricsConfig.processor '{strategy}' is not registered in the MetricsProcessorFactory. Registered strategies: {', '.join(metrics_processor_factory.keys())}"
raise ValueError(msg)
return metrics_processor_factory.create(
strategy=strategy,
init_args={
**init_args,
"metrics_config": metrics_config,
},
)
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-llm/graphrag_llm/metrics/metrics_processor_factory.py",
"license": "MIT License",
"lines": 60,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-llm/graphrag_llm/metrics/metrics_store.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Metrics Store."""
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from graphrag_llm.metrics.metrics_writer import MetricsWriter
from graphrag_llm.types import Metrics
class MetricsStore(ABC):
"""Abstract base class for metrics stores."""
@abstractmethod
def __init__(
self,
*,
id: str,
metrics_writer: "MetricsWriter | None" = None,
**kwargs: Any,
) -> None:
"""Initialize MetricsStore.
Args
----
id: str
The ID of the metrics store.
One metric store is created per ID so a good
candidate is the model id (e.g., openai/gpt-4o).
That way one store tracks and aggregates the metrics
per model.
metrics_writer: MetricsWriter
The metrics writer to use for writing metrics.
"""
raise NotImplementedError
@property
@abstractmethod
def id(self) -> str:
"""Get the ID of the metrics store."""
raise NotImplementedError
@abstractmethod
def update_metrics(self, *, metrics: "Metrics") -> None:
"""Update the store with multiple metrics.
Args
----
metrics: Metrics
The metrics to merge into the store.
Returns
-------
None
"""
raise NotImplementedError
@abstractmethod
def get_metrics(self) -> "Metrics":
"""Get all metrics from the store.
Returns
-------
Metrics:
All metrics stored in the store.
"""
raise NotImplementedError
@abstractmethod
def clear_metrics(self) -> None:
"""Clear all metrics from the store.
Returns
-------
None
"""
raise NotImplementedError
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-llm/graphrag_llm/metrics/metrics_store.py",
"license": "MIT License",
"lines": 65,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-llm/graphrag_llm/metrics/metrics_store_factory.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Metrics store factory."""
from collections.abc import Callable
from typing import TYPE_CHECKING, Any
from graphrag_common.factory import Factory
from graphrag_llm.config.types import MetricsStoreType
from graphrag_llm.metrics.metrics_store import MetricsStore
if TYPE_CHECKING:
from graphrag_common.factory import ServiceScope
from graphrag_llm.config import MetricsConfig
from graphrag_llm.metrics.metrics_writer import MetricsWriter
class MetricsStoreFactory(Factory[MetricsStore]):
"""Factory for creating MetricsProcessor instances."""
metrics_store_factory = MetricsStoreFactory()
def register_metrics_store(
store_type: str,
store_initializer: Callable[..., MetricsStore],
scope: "ServiceScope" = "transient",
) -> None:
"""Register a custom metrics store implementation.
Args
----
store_type: str
The metrics store id to register.
store_initializer: Callable[..., MetricsStore]
The metrics store initializer to register.
"""
metrics_store_factory.register(store_type, store_initializer, scope)
def create_metrics_store(config: "MetricsConfig", id: str) -> MetricsStore:
"""Create a MetricsStore instance based on the configuration.
Args
----
config: MetricsConfig
The configuration for the metrics store.
id: str
The identifier for the metrics store.
Example: openai/gpt-4o
Returns
-------
MetricsStore:
An instance of a MetricsStore subclass.
"""
strategy = config.store
metrics_writer: MetricsWriter | None = None
if config.writer:
from graphrag_llm.metrics.metrics_writer_factory import create_metrics_writer
metrics_writer = create_metrics_writer(config)
init_args: dict[str, Any] = config.model_dump()
if strategy not in metrics_store_factory:
match strategy:
case MetricsStoreType.Memory:
from graphrag_llm.metrics.memory_metrics_store import MemoryMetricsStore
register_metrics_store(
store_type=strategy,
store_initializer=MemoryMetricsStore,
scope="singleton",
)
case _:
msg = f"MetricsConfig.store '{strategy}' is not registered in the MetricsStoreFactory. Registered strategies: {', '.join(metrics_store_factory.keys())}"
raise ValueError(msg)
return metrics_store_factory.create(
strategy=strategy,
init_args={
**init_args,
"id": id,
"metrics_config": config,
"metrics_writer": metrics_writer,
},
)
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-llm/graphrag_llm/metrics/metrics_store_factory.py",
"license": "MIT License",
"lines": 70,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-llm/graphrag_llm/metrics/metrics_writer.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Metrics writer abstract base class."""
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from graphrag_llm.types import Metrics
class MetricsWriter(ABC):
"""Abstract base class for metrics writers."""
@abstractmethod
def __init__(self, **kwargs: Any) -> None:
"""Initialize MetricsWriter."""
raise NotImplementedError
@abstractmethod
def write_metrics(self, *, id: str, metrics: "Metrics") -> None:
"""Write the given metrics.
Args
----
id : str
The identifier for the metrics.
metrics : Metrics
The metrics data to write.
"""
raise NotImplementedError
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-llm/graphrag_llm/metrics/metrics_writer.py",
"license": "MIT License",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-llm/graphrag_llm/metrics/metrics_writer_factory.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Metrics writer factory."""
from collections.abc import Callable
from typing import TYPE_CHECKING
from graphrag_common.factory import Factory
from graphrag_llm.config.types import MetricsWriterType
from graphrag_llm.metrics.metrics_writer import MetricsWriter
if TYPE_CHECKING:
from graphrag_common.factory import ServiceScope
from graphrag_llm.config import MetricsConfig
class MetricsWriterFactory(Factory[MetricsWriter]):
"""Metrics writer factory."""
metrics_writer_factory = MetricsWriterFactory()
def register_metrics_writer(
metrics_writer_type: str,
metrics_writer_initializer: Callable[..., MetricsWriter],
scope: "ServiceScope" = "transient",
) -> None:
"""Register a custom metrics writer implementation.
Args
----
metrics_writer_type: str
The metrics writer id to register.
metrics_writer_initializer: Callable[..., MetricsWriter]
The metrics writer initializer to register.
scope: ServiceScope (default: "transient")
The service scope for the metrics writer.
"""
metrics_writer_factory.register(
metrics_writer_type, metrics_writer_initializer, scope
)
def create_metrics_writer(metrics_config: "MetricsConfig") -> MetricsWriter:
"""Create a MetricsWriter instance based on the configuration.
Args
----
metrics_config: MetricsConfig
The configuration for the metrics writer.
Returns
-------
MetricsWriter:
An instance of a MetricsWriter subclass.
"""
strategy = metrics_config.writer
if not strategy:
msg = "MetricsConfig.writer needs to be set to create a MetricsWriter."
raise ValueError(msg)
init_args = metrics_config.model_dump()
if strategy not in metrics_writer_factory:
match strategy:
case MetricsWriterType.Log:
from graphrag_llm.metrics.log_metrics_writer import LogMetricsWriter
metrics_writer_factory.register(
strategy=MetricsWriterType.Log,
initializer=LogMetricsWriter,
scope="singleton",
)
case MetricsWriterType.File:
from graphrag_llm.metrics.file_metrics_writer import FileMetricsWriter
metrics_writer_factory.register(
strategy=MetricsWriterType.File,
initializer=FileMetricsWriter,
scope="singleton",
)
case _:
msg = f"MetricsConfig.writer '{strategy}' is not registered in the MetricsWriterFactory. Registered strategies: {', '.join(metrics_writer_factory.keys())}"
raise ValueError(msg)
return metrics_writer_factory.create(strategy=strategy, init_args=init_args)
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-llm/graphrag_llm/metrics/metrics_writer_factory.py",
"license": "MIT License",
"lines": 68,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-llm/graphrag_llm/metrics/noop_metrics_store.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Noop metrics store."""
from typing import Any
from graphrag_llm.metrics.metrics_store import MetricsStore
from graphrag_llm.types import Metrics
class NoopMetricsStore(MetricsStore):
"""Noop store for metrics."""
def __init__(
self,
**kwargs: Any,
) -> None:
"""Initialize NoopMetricsStore."""
@property
def id(self) -> str:
"""Get the ID of the metrics store."""
return ""
def update_metrics(self, *, metrics: Metrics) -> None:
"""Noop update."""
return
def get_metrics(self) -> Metrics:
"""Noop get all metrics from the store."""
return {}
def clear_metrics(self) -> None:
"""Clear all metrics from the store.
Returns
-------
None
"""
return
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-llm/graphrag_llm/metrics/noop_metrics_store.py",
"license": "MIT License",
"lines": 30,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-llm/graphrag_llm/middleware/with_cache.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Cache middleware."""
import asyncio
from typing import TYPE_CHECKING, Any, Literal
from graphrag_llm.types import LLMCompletionResponse, LLMEmbeddingResponse
if TYPE_CHECKING:
from graphrag_cache import Cache, CacheKeyCreator
from graphrag_llm.types import (
AsyncLLMFunction,
LLMFunction,
Metrics,
)
def with_cache(
*,
sync_middleware: "LLMFunction",
async_middleware: "AsyncLLMFunction",
request_type: Literal["chat", "embedding"],
cache: "Cache",
cache_key_creator: "CacheKeyCreator",
) -> tuple[
"LLMFunction",
"AsyncLLMFunction",
]:
"""Wrap model functions with cache middleware.
Args
----
sync_middleware: LLMFunction
The synchronous model function to wrap.
Either a completion function or an embedding function.
async_middleware: AsyncLLMFunction
The asynchronous model function to wrap.
Either a completion function or an embedding function.
cache: Cache
The cache instance to use.
request_type: Literal["chat", "embedding"]
The type of request, either "chat" or "embedding".
cache_key_creator: CacheKeyCreator
The cache key creator to use.
Returns
-------
tuple[LLMFunction, AsyncLLMFunction]
The synchronous and asynchronous model functions with caching.
"""
def _cache_middleware(
**kwargs: Any,
):
is_streaming = kwargs.get("stream") or False
is_mocked = kwargs.get("mock_response") or False
metrics: Metrics | None = kwargs.get("metrics")
if is_streaming or is_mocked:
# don't cache streaming or mocked responses
return sync_middleware(**kwargs)
cache_key = cache_key_creator(kwargs)
event_loop = asyncio.new_event_loop()
asyncio.set_event_loop(event_loop)
cached_response = event_loop.run_until_complete(cache.get(cache_key))
if (
cached_response is not None
and isinstance(cached_response, dict)
and "response" in cached_response
and cached_response["response"] is not None
and isinstance(cached_response["response"], dict)
):
try:
if (
metrics is not None
and "metrics" in cached_response
and cached_response["metrics"] is not None
and isinstance(cached_response["metrics"], dict)
):
metrics.update(cached_response["metrics"])
metrics["cached_responses"] = 1
if request_type == "chat":
return LLMCompletionResponse(**cached_response["response"])
return LLMEmbeddingResponse(**cached_response["response"])
except Exception: # noqa: BLE001
# Try to retrieve value from cache but if it fails, continue
# to make the request.
...
response = sync_middleware(**kwargs)
cache_value = {
"response": response.model_dump(), # type: ignore
"metrics": metrics if metrics is not None else {},
}
event_loop.run_until_complete(cache.set(cache_key, cache_value))
event_loop.close()
return response
async def _cache_middleware_async(
**kwargs: Any,
):
is_streaming = kwargs.get("stream") or False
is_mocked = kwargs.get("mock_response") or False
metrics: Metrics | None = kwargs.get("metrics")
if is_streaming or is_mocked:
# don't cache streaming or mocked responses
return await async_middleware(**kwargs)
cache_key = cache_key_creator(kwargs)
cached_response = await cache.get(cache_key)
if (
cached_response is not None
and isinstance(cached_response, dict)
and "response" in cached_response
and cached_response["response"] is not None
and isinstance(cached_response["response"], dict)
):
try:
if (
metrics is not None
and "metrics" in cached_response
and cached_response["metrics"] is not None
and isinstance(cached_response["metrics"], dict)
):
metrics.update(cached_response["metrics"])
metrics["cached_responses"] = 1
if request_type == "chat":
return LLMCompletionResponse(**cached_response["response"])
return LLMEmbeddingResponse(**cached_response["response"])
except Exception: # noqa: BLE001
# Try to retrieve value from cache but if it fails, continue
# to make the request.
...
response = await async_middleware(**kwargs)
cache_value = {
"response": response.model_dump(), # type: ignore
"metrics": metrics if metrics is not None else {},
}
await cache.set(cache_key, cache_value)
return response
return (_cache_middleware, _cache_middleware_async) # type: ignore
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-llm/graphrag_llm/middleware/with_cache.py",
"license": "MIT License",
"lines": 130,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-llm/graphrag_llm/middleware/with_errors_for_testing.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Error testing middleware."""
import asyncio
import random
import time
from typing import TYPE_CHECKING, Any
import litellm.exceptions as exceptions
if TYPE_CHECKING:
from graphrag_llm.types import (
AsyncLLMFunction,
LLMFunction,
)
def with_errors_for_testing(
*,
sync_middleware: "LLMFunction",
async_middleware: "AsyncLLMFunction",
failure_rate: float = 0.0,
exception_type: str = "ValueError",
exception_args: list[Any] | None = None,
) -> tuple[
"LLMFunction",
"AsyncLLMFunction",
]:
"""Wrap model functions with error testing middleware.
Args
----
sync_middleware: LLMFunction
The synchronous model function to wrap.
Either a completion function or an embedding function.
async_middleware: AsyncLLMFunction
The asynchronous model function to wrap.
Either a completion function or an embedding function.
failure_rate: float
The failure rate for testing, between 0.0 and 1.0.
Defaults to 0.0 (no failures).
exception_type: str
The name of the exceptions class from litellm.exceptions to raise.
Defaults to "ValueError".
exception_args: list[Any] | None
The arguments to pass to the exception when raising it. Defaults to None,
which results in a default message.
Returns
-------
tuple[LLMFunction, AsyncLLMFunction]
The synchronous and asynchronous model functions wrapped with error testing middleware.
"""
def _errors_middleware(
**kwargs: Any,
):
if failure_rate > 0.0 and random.random() <= failure_rate: # noqa: S311
time.sleep(0.5)
exception_cls = exceptions.__dict__.get(exception_type, ValueError)
raise exception_cls(
*(exception_args or ["Simulated failure for debugging purposes."])
)
return sync_middleware(**kwargs)
async def _errors_middleware_async(
**kwargs: Any,
):
if failure_rate > 0.0 and random.random() <= failure_rate: # noqa: S311
await asyncio.sleep(0.5)
exception_cls = exceptions.__dict__.get(exception_type, ValueError)
raise exception_cls(
*(exception_args or ["Simulated failure for debugging purposes."])
)
return await async_middleware(**kwargs)
return (_errors_middleware, _errors_middleware_async) # type: ignore
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-llm/graphrag_llm/middleware/with_errors_for_testing.py",
"license": "MIT License",
"lines": 68,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-llm/graphrag_llm/middleware/with_logging.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Request count middleware."""
import logging
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from graphrag_llm.types import (
AsyncLLMFunction,
LLMFunction,
Metrics,
)
logger = logging.getLogger(__name__)
def with_logging(
*,
sync_middleware: "LLMFunction",
async_middleware: "AsyncLLMFunction",
) -> tuple[
"LLMFunction",
"AsyncLLMFunction",
]:
"""Wrap model functions with logging middleware.
Args
----
sync_middleware: LLMFunction
The synchronous model function to wrap.
Either a completion function or an embedding function.
async_middleware: AsyncLLMFunction
The asynchronous model function to wrap.
Either a completion function or an embedding function.
Returns
-------
tuple[LLMFunction, AsyncLLMFunction]
The synchronous and asynchronous model functions wrapped with request count middleware.
"""
def _request_count_middleware(
**kwargs: Any,
):
metrics: Metrics | None = kwargs.get("metrics")
try:
return sync_middleware(**kwargs)
except Exception as e:
retries = metrics.get("retries", None) if metrics else None
retry_str = f" after {retries} retries" if retries else ""
logger.exception(
f"Request failed{retry_str} with exception={e}", # noqa: G004, TRY401
)
raise
async def _request_count_middleware_async(
**kwargs: Any,
):
metrics: Metrics | None = kwargs.get("metrics")
try:
return await async_middleware(**kwargs)
except Exception as e:
retries = metrics.get("retries", None) if metrics else None
retry_str = f" after {retries} retries" if retries else ""
logger.exception(
f"Async request failed{retry_str} with exception={e}", # noqa: G004, TRY401
)
raise
return (_request_count_middleware, _request_count_middleware_async) # type: ignore
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-llm/graphrag_llm/middleware/with_logging.py",
"license": "MIT License",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-llm/graphrag_llm/middleware/with_metrics.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Metrics middleware to process metrics using a MetricsProcessor."""
import time
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from graphrag_llm.config import ModelConfig
from graphrag_llm.metrics import MetricsProcessor
from graphrag_llm.types import (
AsyncLLMFunction,
LLMFunction,
Metrics,
)
def with_metrics(
*,
model_config: "ModelConfig",
sync_middleware: "LLMFunction",
async_middleware: "AsyncLLMFunction",
metrics_processor: "MetricsProcessor",
) -> tuple[
"LLMFunction",
"AsyncLLMFunction",
]:
"""Wrap model functions with metrics middleware.
Args
----
model_config: ModelConfig
The model configuration.
sync_middleware: LLMFunction
The synchronous model function to wrap.
Either a completion function or an embedding function.
async_middleware: AsyncLLMFunction
The asynchronous model function to wrap.
Either a completion function or an embedding function.
metrics_processor: MetricsProcessor
The metrics processor to use.
Returns
-------
tuple[LLMFunction, AsyncLLMFunction]
The synchronous and asynchronous model functions wrapped with metrics middleware.
"""
def _metrics_middleware(
**kwargs: Any,
):
metrics: Metrics | None = kwargs.get("metrics")
start_time = time.time()
response = sync_middleware(**kwargs)
end_time = time.time()
if metrics is not None:
metrics_processor.process_metrics(
model_config=model_config,
metrics=metrics,
input_args=kwargs,
response=response,
)
if kwargs.get("stream"):
metrics["compute_duration_seconds"] = 0
metrics["streaming_responses"] = 1
else:
metrics["compute_duration_seconds"] = end_time - start_time
metrics["streaming_responses"] = 0
return response
async def _metrics_middleware_async(
**kwargs: Any,
):
metrics: Metrics | None = kwargs.get("metrics")
start_time = time.time()
response = await async_middleware(**kwargs)
end_time = time.time()
if metrics is not None:
metrics_processor.process_metrics(
model_config=model_config,
metrics=metrics,
input_args=kwargs,
response=response,
)
if kwargs.get("stream"):
metrics["compute_duration_seconds"] = 0
metrics["streaming_responses"] = 1
else:
metrics["compute_duration_seconds"] = end_time - start_time
metrics["streaming_responses"] = 0
return response
return (_metrics_middleware, _metrics_middleware_async) # type: ignore
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-llm/graphrag_llm/middleware/with_metrics.py",
"license": "MIT License",
"lines": 84,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-llm/graphrag_llm/middleware/with_middleware_pipeline.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Wraps model functions in middleware pipeline."""
from typing import TYPE_CHECKING, Literal
from graphrag_llm.middleware.with_cache import with_cache
from graphrag_llm.middleware.with_errors_for_testing import with_errors_for_testing
from graphrag_llm.middleware.with_logging import with_logging
from graphrag_llm.middleware.with_metrics import with_metrics
from graphrag_llm.middleware.with_rate_limiting import with_rate_limiting
from graphrag_llm.middleware.with_request_count import with_request_count
from graphrag_llm.middleware.with_retries import with_retries
if TYPE_CHECKING:
from graphrag_cache import Cache, CacheKeyCreator
from graphrag_llm.config import ModelConfig
from graphrag_llm.metrics import MetricsProcessor
from graphrag_llm.rate_limit import RateLimiter
from graphrag_llm.retry import Retry
from graphrag_llm.tokenizer import Tokenizer
from graphrag_llm.types import (
AsyncLLMFunction,
LLMFunction,
)
def with_middleware_pipeline(
*,
model_config: "ModelConfig",
model_fn: "LLMFunction",
async_model_fn: "AsyncLLMFunction",
metrics_processor: "MetricsProcessor | None",
cache: "Cache | None",
cache_key_creator: "CacheKeyCreator",
request_type: Literal["chat", "embedding"],
tokenizer: "Tokenizer",
rate_limiter: "RateLimiter | None",
retrier: "Retry | None",
) -> tuple[
"LLMFunction",
"AsyncLLMFunction",
]:
"""Wrap model functions in middleware pipeline.
Full Pipeline Order:
- with_requests_counts: Counts incoming requests and
successes, and failures that bubble back up.
- with_cache: Returns cached responses when available
and caches new successful responses that bubble back up.
- with_retries: Retries failed requests.
Since the retry middleware occurs prior to rate limiting,
all retries get back in line for rate limiting. This is
to avoid threads that retry rapidly against an endpoint,
thus increasing the required cooldown.
- with_rate_limiting: Rate limits requests.
- with_metrics: Collects metrics about the request and responses.
- with_errors_for_testing: Raises errors for testing purposes.
Relies on ModelConfig.failure_rate_for_testing to determine
the failure rate. 'failure_rate_for_testing' is not an exposed
configuration option and is only intended for internal testing.
Args
----
model_config: ModelConfig
The model configuration.
model_fn: LLMFunction
The synchronous model function to wrap.
Either a completion function or an embedding function.
async_model_fn: AsyncLLMFunction
The asynchronous model function to wrap.
Either a completion function or an embedding function.
metrics_processor: MetricsProcessor | None
The metrics processor to use. If None, metrics middleware is skipped.
cache: Cache | None
The cache instance to use. If None, caching middleware is skipped.
cache_key_creator: CacheKeyCreator
The cache key creator to use.
request_type: Literal["chat", "embedding"]
The type of request, either "chat" or "embedding".
The middleware pipeline is used for both completions and embeddings
and some of the steps need to know which type of request it is.
tokenizer: Tokenizer
The tokenizer to use for rate limiting.
rate_limiter: RateLimiter | None
The rate limiter to use. If None, rate limiting middleware is skipped.
retrier: Retry | None
The retrier to use. If None, retry middleware is skipped.
Returns
-------
tuple[LLMFunction, AsyncLLMFunction]
The synchronous and asynchronous model functions wrapped in the middleware pipeline.
"""
extra_config = model_config.model_extra or {}
failure_rate_for_testing = extra_config.get("failure_rate_for_testing", 0.0)
if failure_rate_for_testing > 0.0:
model_fn, async_model_fn = with_errors_for_testing(
sync_middleware=model_fn,
async_middleware=async_model_fn,
failure_rate=failure_rate_for_testing,
exception_type=extra_config.get(
"failure_rate_for_testing_exception_type", "ValueError"
),
exception_args=extra_config.get("failure_rate_for_testing_exception_args"),
)
if metrics_processor:
model_fn, async_model_fn = with_metrics(
model_config=model_config,
sync_middleware=model_fn,
async_middleware=async_model_fn,
metrics_processor=metrics_processor,
)
if rate_limiter:
model_fn, async_model_fn = with_rate_limiting(
sync_middleware=model_fn,
async_middleware=async_model_fn,
tokenizer=tokenizer,
rate_limiter=rate_limiter,
)
if retrier:
model_fn, async_model_fn = with_retries(
sync_middleware=model_fn,
async_middleware=async_model_fn,
retrier=retrier,
)
if cache:
model_fn, async_model_fn = with_cache(
sync_middleware=model_fn,
async_middleware=async_model_fn,
request_type=request_type,
cache=cache,
cache_key_creator=cache_key_creator,
)
if metrics_processor:
model_fn, async_model_fn = with_request_count(
sync_middleware=model_fn,
async_middleware=async_model_fn,
)
model_fn, async_model_fn = with_logging(
sync_middleware=model_fn,
async_middleware=async_model_fn,
)
return (model_fn, async_model_fn)
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-llm/graphrag_llm/middleware/with_middleware_pipeline.py",
"license": "MIT License",
"lines": 136,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-llm/graphrag_llm/middleware/with_rate_limiting.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Rate limit middleware."""
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from graphrag_llm.rate_limit import RateLimiter
from graphrag_llm.tokenizer import Tokenizer
from graphrag_llm.types import (
AsyncLLMFunction,
LLMFunction,
)
def with_rate_limiting(
*,
sync_middleware: "LLMFunction",
async_middleware: "AsyncLLMFunction",
rate_limiter: "RateLimiter",
tokenizer: "Tokenizer",
) -> tuple[
"LLMFunction",
"AsyncLLMFunction",
]:
"""Wrap model functions with rate limit middleware.
Args
----
sync_middleware: LLMFunction
The synchronous model function to wrap.
Either a completion function or an embedding function.
async_middleware: AsyncLLMFunction
The asynchronous model function to wrap.
Either a completion function or an embedding function.
rate_limiter: RateLimiter
The rate limiter to use.
tokenizer: Tokenizer
The tokenizer to use for counting tokens.
Returns
-------
tuple[LLMFunction, AsyncLLMFunction]
The synchronous and asynchronous model functions wrapped with rate limit middleware.
"""
def _rate_limit_middleware(
**kwargs: Any,
):
token_count = int(
kwargs.get("max_tokens") or kwargs.get("max_completion_tokens") or 0
)
messages = kwargs.get("messages") # completion call
input: list[str] | None = kwargs.get("input") # embedding call
if messages:
token_count += tokenizer.num_prompt_tokens(messages=messages)
elif input:
token_count += sum(tokenizer.num_tokens(text) for text in input)
with rate_limiter.acquire(token_count):
return sync_middleware(**kwargs)
async def _rate_limit_middleware_async(
**kwargs: Any,
):
token_count = int(
kwargs.get("max_tokens") or kwargs.get("max_completion_tokens") or 0
)
messages = kwargs.get("messages") # completion call
input = kwargs.get("input") # embedding call
if messages:
token_count += tokenizer.num_prompt_tokens(messages=messages)
elif input:
token_count += sum(tokenizer.num_tokens(text) for text in input)
with rate_limiter.acquire(token_count):
return await async_middleware(**kwargs)
return (_rate_limit_middleware, _rate_limit_middleware_async) # type: ignore
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-llm/graphrag_llm/middleware/with_rate_limiting.py",
"license": "MIT License",
"lines": 68,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-llm/graphrag_llm/middleware/with_request_count.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Request count middleware."""
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from graphrag_llm.types import (
AsyncLLMFunction,
LLMFunction,
Metrics,
)
def with_request_count(
*,
sync_middleware: "LLMFunction",
async_middleware: "AsyncLLMFunction",
) -> tuple[
"LLMFunction",
"AsyncLLMFunction",
]:
"""Wrap model functions with request count middleware.
This is the first step in the middleware pipeline.
It counts how many requests were made, how many succeeded, and how many failed
Args
----
sync_middleware: LLMFunction
The synchronous model function to wrap.
Either a completion function or an embedding function.
async_middleware: AsyncLLMFunction
The asynchronous model function to wrap.
Either a completion function or an embedding function.
Returns
-------
tuple[LLMFunction, AsyncLLMFunction]
The synchronous and asynchronous model functions wrapped with request count middleware.
"""
def _request_count_middleware(
**kwargs: Any,
):
metrics: Metrics | None = kwargs.get("metrics")
if metrics is not None:
metrics["attempted_request_count"] = 1
metrics["successful_response_count"] = 0
metrics["failed_response_count"] = 0
try:
result = sync_middleware(**kwargs)
if metrics is not None:
metrics["successful_response_count"] = 1
return result # noqa: TRY300
except Exception:
if metrics is not None:
metrics["failed_response_count"] = 1
raise
async def _request_count_middleware_async(
**kwargs: Any,
):
metrics: Metrics | None = kwargs.get("metrics")
if metrics is not None:
metrics["attempted_request_count"] = 1
metrics["successful_response_count"] = 0
metrics["failed_response_count"] = 0
try:
result = await async_middleware(**kwargs)
if metrics is not None:
metrics["successful_response_count"] = 1
return result # noqa: TRY300
except Exception:
if metrics is not None:
metrics["failed_response_count"] = 1
raise
return (_request_count_middleware, _request_count_middleware_async) # type: ignore
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-llm/graphrag_llm/middleware/with_request_count.py",
"license": "MIT License",
"lines": 69,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-llm/graphrag_llm/middleware/with_retries.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Retry middleware."""
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from graphrag_llm.retry import Retry
from graphrag_llm.types import (
AsyncLLMFunction,
LLMFunction,
)
def with_retries(
*,
sync_middleware: "LLMFunction",
async_middleware: "AsyncLLMFunction",
retrier: "Retry",
) -> tuple[
"LLMFunction",
"AsyncLLMFunction",
]:
"""Wrap model functions with retry middleware.
Args
----
sync_middleware: LLMFunction
The synchronous model function to wrap.
Either a completion function or an embedding function.
async_middleware: AsyncLLMFunction
The asynchronous model function to wrap.
Either a completion function or an embedding function.
retrier: Retry
The retrier instance to use for retrying failed requests.
Returns
-------
tuple[LLMFunction, AsyncLLMFunction]
The synchronous and asynchronous model functions wrapped with retry middleware.
"""
def _retry_middleware(
**kwargs: Any,
):
return retrier.retry(
func=sync_middleware,
input_args=kwargs,
)
async def _retry_middleware_async(
**kwargs: Any,
):
return await retrier.retry_async(
func=async_middleware,
input_args=kwargs,
)
return (_retry_middleware, _retry_middleware_async) # type: ignore
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-llm/graphrag_llm/middleware/with_retries.py",
"license": "MIT License",
"lines": 50,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-llm/graphrag_llm/model_cost_registry/model_cost_registry.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Model cost registry module."""
from typing import Any, ClassVar, TypedDict
from litellm import model_cost
from typing_extensions import Self
class ModelCosts(TypedDict):
"""Model costs."""
input_cost_per_token: float
output_cost_per_token: float
class ModelCostRegistry:
"""Registry for model costs."""
_instance: ClassVar["Self | None"] = None
_model_costs: dict[str, ModelCosts]
def __new__(cls, *args: Any, **kwargs: Any) -> Self:
"""Create a new instance of ModelCostRegistry if it does not exist."""
if cls._instance is None:
cls._instance = super().__new__(cls, *args, **kwargs)
return cls._instance
def __init__(self):
if not hasattr(self, "_initialized"):
self._model_costs = model_cost
self._initialized = True
def register_model_costs(self, model: str, costs: ModelCosts) -> None:
"""Register the cost per unit for a given model.
Args
----
model: str
The model id, e.g., "openai/gpt-4o".
costs: ModelCosts
The costs associated with the model.
"""
self._model_costs[model] = costs
def get_model_costs(self, model: str) -> ModelCosts | None:
"""Retrieve the cost per unit for a given model.
Args
----
model: str
The model id, e.g., "openai/gpt-4o".
Returns
-------
ModelCosts | None
The costs associated with the model, or None if not found.
"""
return self._model_costs.get(model)
model_cost_registry = ModelCostRegistry()
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-llm/graphrag_llm/model_cost_registry/model_cost_registry.py",
"license": "MIT License",
"lines": 46,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-llm/graphrag_llm/rate_limit/rate_limit_factory.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Rate limit factory."""
from collections.abc import Callable
from typing import TYPE_CHECKING
from graphrag_common.factory import Factory
from graphrag_llm.config import RateLimitType
from graphrag_llm.rate_limit.rate_limiter import RateLimiter
if TYPE_CHECKING:
from graphrag_common.factory import ServiceScope
from graphrag_llm.config import RateLimitConfig
class RateLimitFactory(Factory[RateLimiter]):
"""Factory to create RateLimiter instances."""
rate_limit_factory = RateLimitFactory()
def register_rate_limiter(
rate_limit_type: str,
rate_limiter_initializer: Callable[..., RateLimiter],
scope: "ServiceScope" = "transient",
) -> None:
"""Register a custom RateLimiter implementation.
Args
----
rate_limit_type: str
The rate limit id to register.
rate_limiter_initializer: Callable[..., RateLimiter]
The rate limiter initializer to register.
scope: ServiceScope (default: "transient")
The service scope for the rate limiter instance.
"""
rate_limit_factory.register(
strategy=rate_limit_type,
initializer=rate_limiter_initializer,
scope=scope,
)
def create_rate_limiter(
rate_limit_config: "RateLimitConfig",
) -> RateLimiter:
"""Create a RateLimiter instance.
Args
----
rate_limit_config: RateLimitConfig
The configuration for the rate limit strategy.
Returns
-------
RateLimiter:
An instance of a RateLimiter subclass.
"""
strategy = rate_limit_config.type
init_args = rate_limit_config.model_dump()
if strategy not in rate_limit_factory:
match strategy:
case RateLimitType.SlidingWindow:
from graphrag_llm.rate_limit.sliding_window_rate_limiter import (
SlidingWindowRateLimiter,
)
register_rate_limiter(
rate_limit_type=RateLimitType.SlidingWindow,
rate_limiter_initializer=SlidingWindowRateLimiter,
)
case _:
msg = f"RateLimitConfig.type '{strategy}' is not registered in the RateLimitFactory. Registered strategies: {', '.join(rate_limit_factory.keys())}"
raise ValueError(msg)
return rate_limit_factory.create(strategy=strategy, init_args=init_args)
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-llm/graphrag_llm/rate_limit/rate_limit_factory.py",
"license": "MIT License",
"lines": 63,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-llm/graphrag_llm/rate_limit/sliding_window_rate_limiter.py | # Copyright (c) 2025 Microsoft Corporation.
# Licensed under the MIT License
"""LiteLLM Static Rate Limiter."""
import threading
import time
from collections import deque
from collections.abc import Iterator
from contextlib import contextmanager
from typing import Any
from graphrag_llm.rate_limit.rate_limiter import RateLimiter
class SlidingWindowRateLimiter(RateLimiter):
"""Sliding Window Rate Limiter implementation."""
_rpp: int | None = None
_tpp: int | None = None
_lock: threading.Lock
_rate_queue: deque[float]
_token_queue: deque[int]
_period_in_seconds: int
_last_time: float | None = None
_stagger: float = 0.0
def __init__(
self,
*,
period_in_seconds: int = 60,
requests_per_period: int | None = None,
tokens_per_period: int | None = None,
**kwargs: Any,
):
"""Initialize the Sliding Window Rate Limiter.
Args
----
period_in_seconds: int
The time period in seconds for rate limiting.
requests_per_period: int | None
The maximum number of requests allowed per time period. If None, request limiting is disabled.
tokens_per_period: int | None
The maximum number of tokens allowed per time period. If None, token limiting is disabled.
Raises
------
ValueError
If period_in_seconds is not a positive integer.
If requests_per_period or tokens_per_period are not positive integers.
"""
self._rpp = requests_per_period
self._tpp = tokens_per_period
self._lock = threading.Lock()
self._rate_queue: deque[float] = deque()
self._token_queue: deque[int] = deque()
self._period_in_seconds = period_in_seconds
self._last_time: float | None = None
if self._rpp is not None and self._rpp > 0:
self._stagger = self._period_in_seconds / self._rpp
@contextmanager
def acquire(self, token_count: int) -> Iterator[None]:
"""
Acquire Rate Limiter.
Args
----
token_count: The estimated number of tokens for the current request.
Yields
------
None: This context manager does not return any value.
"""
while True:
with self._lock:
current_time = time.time()
# Use two sliding windows to keep track of requests and tokens per period
# Drop old requests and tokens out of the sliding windows
while (
len(self._rate_queue) > 0
and self._rate_queue[0] < current_time - self._period_in_seconds
):
self._rate_queue.popleft()
self._token_queue.popleft()
# If sliding window still exceed request limit, wait again
# Waiting requires reacquiring the lock, allowing other threads
# to see if their request fits within the rate limiting windows
# Makes more sense for token limit than request limit
if (
self._rpp is not None
and self._rpp > 0
and len(self._rate_queue) >= self._rpp
):
continue
# Check if current token window exceeds token limit
# If it does, wait again
# This does not account for the tokens from the current request
# This is intentional, as we want to allow the current request
# to be processed if it is larger than the tpm but smaller than context window.
# tpm is a rate/soft limit and not the hard limit of context window limits.
if (
self._tpp is not None
and self._tpp > 0
and sum(self._token_queue) >= self._tpp
):
continue
# This check accounts for the current request token usage
# is within the token limits bound.
# If the current requests tokens exceeds the token limit,
# Then let it be processed.
if (
self._tpp is not None
and self._tpp > 0
and token_count <= self._tpp
and sum(self._token_queue) + token_count > self._tpp
):
continue
# If there was a previous call, check if we need to stagger
if (
self._stagger > 0
and (
self._last_time # is None if this is the first hit to the rate limiter
and current_time - self._last_time
< self._stagger # If more time has passed than the stagger time, we can proceed
)
):
time.sleep(self._stagger - (current_time - self._last_time))
current_time = time.time()
# Add the current request to the sliding window
self._rate_queue.append(current_time)
self._token_queue.append(token_count)
self._last_time = current_time
break
yield
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-llm/graphrag_llm/rate_limit/sliding_window_rate_limiter.py",
"license": "MIT License",
"lines": 124,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-llm/graphrag_llm/retry/exceptions_to_skip.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""List of exception names to skip for retries."""
_default_exceptions_to_skip = [
"BadRequestError",
"UnsupportedParamsError",
"ContextWindowExceededError",
"ContentPolicyViolationError",
"ImageFetchError",
"InvalidRequestError",
"AuthenticationError",
"PermissionDeniedError",
"NotFoundError",
"UnprocessableEntityError",
"APIConnectionError",
"APIError",
"ServiceUnavailableError",
"APIResponseValidationError",
"BudgetExceededError",
]
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-llm/graphrag_llm/retry/exceptions_to_skip.py",
"license": "MIT License",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-llm/graphrag_llm/retry/exponential_retry.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Exponential backoff retry implementation."""
import asyncio
import random
import time
from collections.abc import Awaitable, Callable
from typing import TYPE_CHECKING, Any
from graphrag_llm.retry.exceptions_to_skip import _default_exceptions_to_skip
from graphrag_llm.retry.retry import Retry
if TYPE_CHECKING:
from graphrag_llm.types import Metrics
class ExponentialRetry(Retry):
"""Exponential backoff retry implementation."""
_base_delay: float
_jitter: bool
_max_retries: int
_max_delay: float
_exceptions_to_skip: list[str]
def __init__(
self,
*,
max_retries: int = 7, # 2^7 = 128 second max delay with default settings
base_delay: float = 2.0,
jitter: bool = True,
max_delay: float | None = None,
exceptions_to_skip: list[str] | None = None,
**kwargs: dict,
) -> None:
"""Initialize ExponentialRetry.
Args
----
max_retries: int (default=7, 2^7 = 128 second max delay with default settings)
The maximum number of retries to attempt.
base_delay: float
The base delay multiplier for exponential backoff.
jitter: bool
Whether to apply jitter to the delay intervals.
max_delay: float | None
The maximum delay between retries. If None, there is no limit.
Raises
------
ValueError
If max_retries is less than or equal to 0.
If base_delay is less than or equal to 1.0.
"""
self._base_delay = base_delay
self._jitter = jitter
self._max_retries = max_retries
self._max_delay = max_delay or float("inf")
self._exceptions_to_skip = exceptions_to_skip or _default_exceptions_to_skip
def retry(self, *, func: Callable[..., Any], input_args: dict[str, Any]) -> Any:
"""Retry a synchronous function."""
retries: int = 0
delay = 1.0
metrics: Metrics | None = input_args.get("metrics")
while True:
try:
return func(**input_args)
except Exception as e:
if e.__class__.__name__ in self._exceptions_to_skip:
raise
if retries >= self._max_retries:
raise
retries += 1
delay *= self._base_delay
sleep_delay = min(
self._max_delay,
delay + (self._jitter * random.uniform(0, 1)), # noqa: S311
)
time.sleep(sleep_delay)
finally:
if metrics is not None:
metrics["retries"] = retries
metrics["requests_with_retries"] = 1 if retries > 0 else 0
async def retry_async(
self,
*,
func: Callable[..., Awaitable[Any]],
input_args: dict[str, Any],
) -> Any:
"""Retry an asynchronous function."""
retries: int = 0
delay = 1.0
metrics: Metrics | None = input_args.get("metrics")
while True:
try:
return await func(**input_args)
except Exception as e:
if e.__class__.__name__ in self._exceptions_to_skip:
raise
if retries >= self._max_retries:
raise
retries += 1
delay *= self._base_delay
sleep_delay = min(
self._max_delay,
delay + (self._jitter * random.uniform(0, 1)), # noqa: S311
)
await asyncio.sleep(sleep_delay)
finally:
if metrics is not None:
metrics["retries"] = retries
metrics["requests_with_retries"] = 1 if retries > 0 else 0
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-llm/graphrag_llm/retry/exponential_retry.py",
"license": "MIT License",
"lines": 104,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-llm/graphrag_llm/retry/immediate_retry.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Native (immediate) retry implementation."""
from collections.abc import Awaitable, Callable
from typing import TYPE_CHECKING, Any
from graphrag_llm.retry.exceptions_to_skip import _default_exceptions_to_skip
from graphrag_llm.retry.retry import Retry
if TYPE_CHECKING:
from graphrag_llm.types import Metrics
class ImmediateRetry(Retry):
"""Immediate retry implementation."""
_max_retries: int
_exceptions_to_skip: list[str]
def __init__(
self,
*,
max_retries: int = 7,
exceptions_to_skip: list[str] | None = None,
**kwargs: dict,
) -> None:
"""Initialize ImmediateRetry.
Args
----
max_retries: int (default=7)
The maximum number of retries to attempt.
Raises
------
ValueError
If max_retries is less than or equal to 0.
"""
self._max_retries = max_retries
self._exceptions_to_skip = exceptions_to_skip or _default_exceptions_to_skip
def retry(self, *, func: Callable[..., Any], input_args: dict[str, Any]) -> Any:
"""Retry a synchronous function."""
retries: int = 0
metrics: Metrics | None = input_args.get("metrics")
while True:
try:
return func(**input_args)
except Exception as e:
if e.__class__.__name__ in self._exceptions_to_skip:
raise
if retries >= self._max_retries:
raise
retries += 1
finally:
if metrics is not None:
metrics["retries"] = retries
metrics["requests_with_retries"] = 1 if retries > 0 else 0
async def retry_async(
self,
*,
func: Callable[..., Awaitable[Any]],
input_args: dict[str, Any],
) -> Any:
"""Retry an asynchronous function."""
retries: int = 0
metrics: Metrics | None = input_args.get("metrics")
while True:
try:
return await func(**input_args)
except Exception as e:
if e.__class__.__name__ in self._exceptions_to_skip:
raise
if retries >= self._max_retries:
raise
retries += 1
finally:
if metrics is not None:
metrics["retries"] = retries
metrics["requests_with_retries"] = 1 if retries > 0 else 0
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-llm/graphrag_llm/retry/immediate_retry.py",
"license": "MIT License",
"lines": 71,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-llm/graphrag_llm/retry/retry.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Retry abstract base class."""
from abc import ABC, abstractmethod
from collections.abc import Awaitable, Callable
from typing import Any
class Retry(ABC):
"""Retry Abstract Base Class."""
@abstractmethod
def __init__(self, /, **kwargs: Any):
"""Initialize Retry."""
raise NotImplementedError
@abstractmethod
def retry(self, *, func: Callable[..., Any], input_args: dict[str, Any]) -> Any:
"""Retry a synchronous function."""
raise NotImplementedError
@abstractmethod
async def retry_async(
self,
*,
func: Callable[..., Awaitable[Any]],
input_args: dict[str, Any],
) -> Any:
"""Retry an asynchronous function."""
raise NotImplementedError
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-llm/graphrag_llm/retry/retry.py",
"license": "MIT License",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-llm/graphrag_llm/retry/retry_factory.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Retry factory."""
from collections.abc import Callable
from typing import TYPE_CHECKING
from graphrag_common.factory import Factory
from graphrag_llm.config.types import RetryType
from graphrag_llm.retry.retry import Retry
if TYPE_CHECKING:
from graphrag_common.factory import ServiceScope
from graphrag_llm.config.retry_config import RetryConfig
class RetryFactory(Factory[Retry]):
"""Factory to create Retry instances."""
retry_factory = RetryFactory()
def register_retry(
retry_type: str,
retry_initializer: Callable[..., Retry],
scope: "ServiceScope" = "transient",
) -> None:
"""Register a custom Retry implementation.
Args
----
retry_type: str
The retry id to register.
retry_initializer: Callable[..., Retry]
The retry initializer to register.
"""
retry_factory.register(
strategy=retry_type,
initializer=retry_initializer,
scope=scope,
)
def create_retry(
retry_config: "RetryConfig",
) -> Retry:
"""Create a Retry instance.
Args
----
retry_config: RetryConfig
The configuration for the retry strategy.
Returns
-------
Retry:
An instance of a Retry subclass.
"""
strategy = retry_config.type
init_args = retry_config.model_dump()
if strategy not in retry_factory:
match strategy:
case RetryType.ExponentialBackoff:
from graphrag_llm.retry.exponential_retry import ExponentialRetry
retry_factory.register(
strategy=RetryType.ExponentialBackoff,
initializer=ExponentialRetry,
)
case RetryType.Immediate:
from graphrag_llm.retry.immediate_retry import ImmediateRetry
retry_factory.register(
strategy=RetryType.Immediate,
initializer=ImmediateRetry,
)
case _:
msg = f"RetryConfig.type '{strategy}' is not registered in the RetryFactory. Registered strategies: {', '.join(retry_factory.keys())}"
raise ValueError(msg)
return retry_factory.create(strategy=strategy, init_args=init_args)
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-llm/graphrag_llm/retry/retry_factory.py",
"license": "MIT License",
"lines": 65,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-llm/graphrag_llm/templating/file_template_manager.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""In-memory template manager implementation."""
from pathlib import Path
from typing import Any
from graphrag_llm.templating.template_manager import TemplateManager
class FileTemplateManager(TemplateManager):
"""Abstract base class for template managers."""
_encoding: str
_templates_extension: str
_templates_dir: Path
def __init__(
self,
base_dir: str = "templates",
template_extension: str = ".jinja",
encoding: str = "utf-8",
**kwargs: Any,
) -> None:
"""Initialize the template manager.
Args
----
base_dir: str (default="./templates")
The base directory where templates are stored.
template_extension: str (default=".jinja")
The file extension for template files.
encoding: str (default="utf-8")
The encoding used to read template files.
Raises
------
ValueError
If the base directory does not exist or is not a directory.
If the template_extension is an empty string.
"""
self._templates = {}
self._encoding = encoding
self._templates_extension = template_extension
self._templates_dir = Path(base_dir).resolve()
if not self._templates_dir.exists() or not self._templates_dir.is_dir():
msg = f"Templates directory '{base_dir}' does not exist or is not a directory."
raise ValueError(msg)
def get(self, template_name: str) -> str | None:
"""Retrieve a template by its name."""
template_file = (
self._templates_dir / f"{template_name}{self._templates_extension}"
)
if template_file.exists() and template_file.is_file():
return template_file.read_text(encoding=self._encoding)
return None
def register(self, template_name: str, template: str) -> None:
"""Register a new template."""
self._templates[template_name] = template
template_path = (
self._templates_dir / f"{template_name}{self._templates_extension}"
)
template_path.write_text(template, encoding=self._encoding)
def keys(self) -> list[str]:
"""List all registered template names."""
return list(self._templates.keys())
def __contains__(self, template_name: str) -> bool:
"""Check if a template is registered."""
return template_name in self._templates
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-llm/graphrag_llm/templating/file_template_manager.py",
"license": "MIT License",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-llm/graphrag_llm/templating/jinja_template_engine.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Jinja template engine."""
from typing import TYPE_CHECKING, Any
from jinja2 import StrictUndefined, Template, UndefinedError
from graphrag_llm.templating.template_engine import TemplateEngine
if TYPE_CHECKING:
from graphrag_llm.templating.template_manager import TemplateManager
class JinjaTemplateEngine(TemplateEngine):
"""Jinja template engine."""
_templates: dict[str, Template]
_template_manager: "TemplateManager"
def __init__(self, *, template_manager: "TemplateManager", **kwargs: Any) -> None:
"""Initialize the template engine.
Args
----
template_manager: TemplateManager
The template manager to use for loading templates.
"""
self._templates = {}
self._template_manager = template_manager
def render(self, template_name: str, context: dict[str, Any]) -> str:
"""Render a template with the given context."""
jinja_template = self._templates.get(template_name)
if jinja_template is None:
template_contents = self._template_manager.get(template_name)
if template_contents is None:
msg = f"Template '{template_name}' not found."
raise KeyError(msg)
jinja_template = Template(template_contents, undefined=StrictUndefined)
self._templates[template_name] = jinja_template
try:
return jinja_template.render(**context)
except UndefinedError as e:
msg = f"Missing key in context for template '{template_name}': {e.message}"
raise KeyError(msg) from e
except Exception as e:
msg = f"Error rendering template '{template_name}': {e!s}"
raise RuntimeError(msg) from e
@property
def template_manager(self) -> "TemplateManager":
"""Template manager associated with this engine."""
return self._template_manager
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-llm/graphrag_llm/templating/jinja_template_engine.py",
"license": "MIT License",
"lines": 43,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-llm/graphrag_llm/templating/template_engine.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Abstract base class for template engines."""
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from graphrag_llm.templating.template_manager import TemplateManager
class TemplateEngine(ABC):
"""Abstract base class for template engines."""
@abstractmethod
def __init__(self, *, template_manager: "TemplateManager", **kwargs: Any) -> None:
"""Initialize the template engine.
Args
----
template_manager: TemplateManager
The template manager to use for loading templates.
"""
raise NotImplementedError
@abstractmethod
def render(self, template_name: str, context: dict[str, Any]) -> str:
"""Render a template with the given context.
Args
----
template_name: str
The name of the template to render.
context: dict[str, str]
The context to use for rendering the template.
Returns
-------
str: The rendered template.
Raises
------
KeyError: If the template is not found or a required key is missing in the context.
"""
raise NotImplementedError
@property
@abstractmethod
def template_manager(self) -> "TemplateManager":
"""Template manager associated with this engine."""
raise NotImplementedError
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-llm/graphrag_llm/templating/template_engine.py",
"license": "MIT License",
"lines": 40,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-llm/graphrag_llm/templating/template_engine_factory.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Template engine factory implementation."""
from collections.abc import Callable
from typing import TYPE_CHECKING
from graphrag_common.factory import Factory
from graphrag_llm.config.template_engine_config import TemplateEngineConfig
from graphrag_llm.config.types import TemplateEngineType
from graphrag_llm.templating.template_engine import TemplateEngine
from graphrag_llm.templating.template_manager_factory import create_template_manager
if TYPE_CHECKING:
from graphrag_common.factory import ServiceScope
class TemplateEngineFactory(Factory[TemplateEngine]):
"""Factory for creating template engine instances."""
template_engine_factory = TemplateEngineFactory()
def register_template_engine(
template_engine_type: str,
template_engine_initializer: Callable[..., TemplateEngine],
scope: "ServiceScope" = "transient",
) -> None:
"""Register a custom template engine implementation.
Args
----
template_engine_type: str
The template engine id to register.
template_engine_initializer: Callable[..., TemplateEngine]
The template engine initializer to register.
scope: ServiceScope (default: "transient")
The service scope for the template engine instance.
"""
template_engine_factory.register(
strategy=template_engine_type,
initializer=template_engine_initializer,
scope=scope,
)
def create_template_engine(
template_engine_config: TemplateEngineConfig | None = None,
) -> TemplateEngine:
"""Create a TemplateEngine instance.
Args
----
template_engine_config: TemplateEngineConfig | None
The configuration for the template engine. If None, defaults will be used.
Returns
-------
TemplateEngine:
An instance of a TemplateEngine subclass.
"""
template_engine_config = template_engine_config or TemplateEngineConfig()
strategy = template_engine_config.type
template_manager = create_template_manager(
template_engine_config=template_engine_config
)
init_args = template_engine_config.model_dump()
if strategy not in template_engine_factory:
match strategy:
case TemplateEngineType.Jinja:
from graphrag_llm.templating.jinja_template_engine import (
JinjaTemplateEngine,
)
template_engine_factory.register(
strategy=TemplateEngineType.Jinja,
initializer=JinjaTemplateEngine,
scope="singleton",
)
case _:
msg = f"TemplateEngineConfig.type '{strategy}' is not registered in the TemplateEngineFactory. Registered strategies: {', '.join(template_engine_factory.keys())}"
raise ValueError(msg)
return template_engine_factory.create(
strategy=strategy,
init_args={
**init_args,
"template_manager": template_manager,
},
)
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-llm/graphrag_llm/templating/template_engine_factory.py",
"license": "MIT License",
"lines": 75,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-llm/graphrag_llm/templating/template_manager.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Abstract base class for template managers."""
from abc import ABC, abstractmethod
from typing import Any
class TemplateManager(ABC):
"""Abstract base class for template managers."""
@abstractmethod
def __init__(self, **kwargs: Any) -> None:
"""Initialize the template manager."""
raise NotImplementedError
@abstractmethod
def get(self, template_name: str) -> str | None:
"""Retrieve a template by its name.
Args
----
template_name: str
The name of the template to retrieve.
Returns
-------
str | None: The content of the template, if found.
"""
raise NotImplementedError
@abstractmethod
def register(self, template_name: str, template: str) -> None:
"""Register a new template.
Args
----
template_name: str
The name of the template.
template: str
The content of the template.
"""
raise NotImplementedError
@abstractmethod
def keys(self) -> list[str]:
"""List all registered template names.
Returns
-------
list[str]: A list of registered template names.
"""
raise NotImplementedError
@abstractmethod
def __contains__(self, template_name: str) -> bool:
"""Check if a template is registered.
Args
----
template_name: str
The name of the template to check.
"""
raise NotImplementedError
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-llm/graphrag_llm/templating/template_manager.py",
"license": "MIT License",
"lines": 51,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-llm/graphrag_llm/templating/template_manager_factory.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Template manager factory implementation."""
from collections.abc import Callable
from typing import TYPE_CHECKING
from graphrag_common.factory import Factory
from graphrag_llm.config.template_engine_config import TemplateEngineConfig
from graphrag_llm.config.types import TemplateManagerType
from graphrag_llm.templating.template_manager import TemplateManager
if TYPE_CHECKING:
from graphrag_common.factory import ServiceScope
class TemplateManagerFactory(Factory[TemplateManager]):
"""Factory for creating template manager instances."""
template_manager_factory = TemplateManagerFactory()
def register_template_manager(
template_manager_type: str,
template_manager_initializer: Callable[..., TemplateManager],
scope: "ServiceScope" = "transient",
) -> None:
"""Register a custom template manager implementation.
Args
----
- template_manager_type: str
The template manager id to register.
- template_manager_initializer: Callable[..., TemplateManager]
The template manager initializer to register.
"""
template_manager_factory.register(
strategy=template_manager_type,
initializer=template_manager_initializer,
scope=scope,
)
def create_template_manager(
template_engine_config: TemplateEngineConfig | None = None,
) -> TemplateManager:
"""Create a TemplateManager instance.
Args
----
template_engine_config: TemplateEngineConfig
The configuration for the template engine.
Returns
-------
TemplateManager:
An instance of a TemplateManager subclass.
"""
template_engine_config = template_engine_config or TemplateEngineConfig()
strategy = template_engine_config.template_manager
init_args = template_engine_config.model_dump()
if strategy not in template_manager_factory:
match strategy:
case TemplateManagerType.File:
from graphrag_llm.templating.file_template_manager import (
FileTemplateManager,
)
template_manager_factory.register(
strategy=TemplateManagerType.File,
initializer=FileTemplateManager,
scope="singleton",
)
case _:
msg = f"TemplateEngineConfig.template_manager '{strategy}' is not registered in the TemplateManagerFactory. Registered strategies: {', '.join(template_manager_factory.keys())}"
raise ValueError(msg)
return template_manager_factory.create(strategy=strategy, init_args=init_args)
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-llm/graphrag_llm/templating/template_manager_factory.py",
"license": "MIT License",
"lines": 63,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-llm/graphrag_llm/threading/completion_thread.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Completion Thread."""
import threading
from queue import Empty, Queue
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from collections.abc import Iterator
from graphrag_llm.types import (
LLMCompletionArgs,
LLMCompletionChunk,
LLMCompletionFunction,
LLMCompletionResponse,
)
LLMCompletionRequestQueue = Queue[tuple[str, "LLMCompletionArgs"] | None]
"""Input queue for LLM completions.
A queue for tracking requests to be made to a completion endpoint.
Each item in the queue is a tuple containing a request ID and a dictionary of
completion arguments. A `None` value indicates that the thread should terminate.
Queue Item Type:
tuple[request_id, completion_args_dict] | None
Items in the queue are processed by a thread pool in which the results are placed
into an output queue to be handled by a response handler.
"""
LLMCompletionResponseQueue = Queue[
tuple[
str,
"LLMCompletionResponse | Iterator[LLMCompletionChunk] | Exception",
]
| None
]
"""Output queue for LLM completion responses.
A queue for tracking responses from completion requests. Each item in the queue is a tuple
containing the request ID and the corresponding response, which can be a full response,
a stream of chunks, or an exception if the request failed. A `None` value indicates that the
thread should terminate.
Queue Item Type:
tuple[request_id, response | exception] | None
Items in the queue are produced by a thread pool that processes completion requests
from an input queue. This output queue is then consumed by a response handler provided
by the user.
"""
class CompletionThread(threading.Thread):
"""Thread for handling LLM completions."""
def __init__(
self,
*,
quit_process_event: threading.Event,
input_queue: LLMCompletionRequestQueue,
output_queue: LLMCompletionResponseQueue,
completion: "LLMCompletionFunction",
) -> None:
super().__init__()
self._quit_process_event = quit_process_event
self._input_queue = input_queue
self._output_queue = output_queue
self._completion = completion
def run(self):
"""Run the completion thread."""
while True and not self._quit_process_event.is_set():
try:
input_data = self._input_queue.get(timeout=1)
except Empty:
continue
if input_data is None:
break
request_id, data = input_data
try:
response = self._completion(**data)
self._output_queue.put((request_id, response))
except Exception as e: # noqa: BLE001
self._output_queue.put((request_id, e))
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-llm/graphrag_llm/threading/completion_thread.py",
"license": "MIT License",
"lines": 72,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-llm/graphrag_llm/threading/completion_thread_runner.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Completion Thread Runner."""
import asyncio
import sys
import threading
import time
from collections.abc import Awaitable, Iterator
from contextlib import contextmanager
from queue import Empty, Queue
from typing import TYPE_CHECKING, Protocol, Unpack, runtime_checkable
from graphrag_llm.threading.completion_thread import CompletionThread
if TYPE_CHECKING:
from graphrag_llm.metrics import MetricsStore
from graphrag_llm.threading.completion_thread import (
LLMCompletionRequestQueue,
LLMCompletionResponseQueue,
)
from graphrag_llm.types import (
LLMCompletionArgs,
LLMCompletionChunk,
LLMCompletionFunction,
LLMCompletionResponse,
)
@runtime_checkable
class ThreadedLLMCompletionResponseHandler(Protocol):
"""Threaded completion response handler.
This function is used to handle responses from the threaded completion runner.
Args
----
request_id: str
The request ID associated with the completion request.
resp: LLMCompletionResponse | Iterator[LLMCompletionChunk] | Exception
The completion response, which can be a full response, a stream of chunks,
or an exception if the request failed.
Returns
-------
Awaitable[None] | None
The callback can be asynchronous or synchronous.
"""
def __call__(
self,
request_id: str,
response: "LLMCompletionResponse | Iterator[LLMCompletionChunk] | Exception",
/,
) -> Awaitable[None] | None:
"""Threaded completion response handler."""
...
@runtime_checkable
class ThreadedLLMCompletionFunction(Protocol):
"""Threaded completion function.
This function is used to submit requests to a thread pool for processing.
The thread pool will process the requests and invoke the provided callback
with the responses.
same signature as LLMCompletionFunction but requires a `request_id` parameter
to identify the request and does not return anything.
Args
----
messages: LLMCompletionMessagesParam
The messages to send to the LLM.
Can be str | list[dict[str, str]] | list[ChatCompletionMessageParam].
request_id: str
The request ID to associate with the completion request.
response_format: BaseModel | None (default=None)
The structured response format.
Must extend pydantic BaseModel.
stream: bool (default=False)
Whether to stream the response.
streaming is not supported when using response_format.
max_completion_tokens: int | None (default=None)
The maximum number of tokens to generate in the completion.
temperature: float | None (default=None)
The temperature to control how deterministic vs. creative the responses are.
top_p: float | None (default=None)
top_p for nucleus sampling, where the model considers tokens with
cumulative probabilities up to top_p. Values range from 0 to 1.
n: int | None (default=None)
The number of completions to generate for each prompt.
tools: list[Tool] | None (default=None)
Optional tools to use during completion.
https://docs.litellm.ai/docs/completion/function_call
**kwargs: Any
Additional keyword arguments.
Returns
-------
None
"""
def __call__(
self,
/,
request_id: str,
**kwargs: Unpack["LLMCompletionArgs"],
) -> None:
"""Threaded Chat completion function."""
...
def _start_completion_thread_pool(
*,
completion: "LLMCompletionFunction",
quit_process_event: threading.Event,
concurrency: int,
queue_limit: int,
) -> tuple[
list[CompletionThread],
"LLMCompletionRequestQueue",
"LLMCompletionResponseQueue",
]:
threads: list[CompletionThread] = []
input_queue: LLMCompletionRequestQueue = Queue(queue_limit)
output_queue: LLMCompletionResponseQueue = Queue()
for _ in range(concurrency):
thread = CompletionThread(
quit_process_event=quit_process_event,
input_queue=input_queue,
output_queue=output_queue,
completion=completion,
)
thread.start()
threads.append(thread)
return threads, input_queue, output_queue
@contextmanager
def completion_thread_runner(
*,
completion: "LLMCompletionFunction",
response_handler: ThreadedLLMCompletionResponseHandler,
concurrency: int,
queue_limit: int = 0,
metrics_store: "MetricsStore | None" = None,
) -> Iterator[ThreadedLLMCompletionFunction]:
"""Run a completion thread pool.
Args
----
completion: LLMCompletion
The LLMCompletion instance to use for processing requests.
response_handler: ThreadedLLMCompletionResponseHandler
The callback function to handle completion responses.
(request_id, response|exception) -> Awaitable[None] | None
concurrency: int
The number of threads to spin up in a thread pool.
queue_limit: int (default=0)
The maximum number of items allowed in the input queue.
0 means unlimited.
Set this to a value to create backpressure on the caller.
metrics_store: MetricsStore | None (default=None)
Optional metrics store to record runtime duration.
Yields
------
ThreadedLLMCompletionFunction:
A function that can be used to submit completion requests to the thread pool.
(messages, request_id, **kwargs) -> None
The thread pool will process the requests and invoke the provided callback
with the responses.
same signature as LLMCompletionFunction but requires a `request_id` parameter
to identify the request and does not return anything.
"""
quit_process_event = threading.Event()
threads, input_queue, output_queue = _start_completion_thread_pool(
completion=completion,
quit_process_event=quit_process_event,
concurrency=concurrency,
queue_limit=queue_limit,
)
def _process_output(
quit_process_event: threading.Event,
output_queue: "LLMCompletionResponseQueue",
callback: ThreadedLLMCompletionResponseHandler,
):
while True and not quit_process_event.is_set():
try:
data = output_queue.get(timeout=1)
except Empty:
continue
if data is None:
break
request_id, response = data
response = callback(request_id, response)
if asyncio.iscoroutine(response):
response = asyncio.run(response)
def _process_input(request_id: str, **kwargs: Unpack["LLMCompletionArgs"]):
if not request_id:
msg = "request_id needs to be passed as a keyword argument"
raise ValueError(msg)
input_queue.put((request_id, kwargs))
handle_response_thread = threading.Thread(
target=_process_output,
args=(quit_process_event, output_queue, response_handler),
)
handle_response_thread.start()
def _cleanup():
for _ in threads:
input_queue.put(None)
for thread in threads:
while thread.is_alive():
thread.join(timeout=1)
output_queue.put(None)
while handle_response_thread.is_alive():
handle_response_thread.join(timeout=1)
start_time = time.time()
try:
yield _process_input
_cleanup()
except KeyboardInterrupt:
quit_process_event.set()
sys.exit(1)
finally:
end_time = time.time()
runtime = end_time - start_time
if metrics_store:
metrics_store.update_metrics(metrics={"runtime_duration_seconds": runtime})
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-llm/graphrag_llm/threading/completion_thread_runner.py",
"license": "MIT License",
"lines": 208,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-llm/graphrag_llm/threading/embedding_thread.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Embedding Thread."""
import threading
from queue import Empty, Queue
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from graphrag_llm.types import (
LLMEmbeddingArgs,
LLMEmbeddingFunction,
LLMEmbeddingResponse,
)
LLMEmbeddingRequestQueue = Queue[tuple[str, "LLMEmbeddingArgs"] | None]
"""Input queue for LLM embeddings.
A queue for tracking requests to be made to an embedding endpoint.
Each item in the queue is a tuple containing a request ID and a dictionary of
embedding arguments. A `None` value indicates that the thread should terminate.
Queue Item Type:
tuple[request_id, embedding_args_dict] | None
Items in the queue are processed by a thread pool in which the results are placed
into an output queue to be handled by a response handler.
"""
LLMEmbeddingResponseQueue = Queue[
tuple[
str,
"LLMEmbeddingResponse | Exception",
]
| None
]
"""Output queue for LLM embedding responses.
A queue for tracking responses from embedding requests. Each item in the queue is a tuple
containing the request ID and the corresponding response, which can be a full response
or an exception if the request failed. A `None` value indicates that the
thread should terminate.
Queue Item Type:
tuple[request_id, response | exception] | None
Items in the queue are produced by a thread pool that processes embedding requests
from an input queue. This output queue is then consumed by a response handler provided
by the user.
"""
class EmbeddingThread(threading.Thread):
"""Thread for handling LLM embeddings."""
def __init__(
self,
*,
quit_process_event: threading.Event,
input_queue: LLMEmbeddingRequestQueue,
output_queue: LLMEmbeddingResponseQueue,
embedding: "LLMEmbeddingFunction",
) -> None:
super().__init__()
self._quit_process_event = quit_process_event
self._input_queue = input_queue
self._output_queue = output_queue
self._embedding = embedding
def run(self) -> None:
"""Run the embedding thread."""
while not self._quit_process_event.is_set():
try:
input_data = self._input_queue.get(timeout=0.1)
except Empty:
continue
if input_data is None:
break
request_id, data = input_data
try:
response = self._embedding(**data)
self._output_queue.put((request_id, response))
except Exception as e: # noqa: BLE001
self._output_queue.put((request_id, e))
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-llm/graphrag_llm/threading/embedding_thread.py",
"license": "MIT License",
"lines": 70,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-llm/graphrag_llm/threading/embedding_thread_runner.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Embedding Thread Runner."""
import asyncio
import sys
import threading
import time
from collections.abc import Awaitable, Iterator
from contextlib import contextmanager
from queue import Empty, Queue
from typing import TYPE_CHECKING, Protocol, Unpack, runtime_checkable
from graphrag_llm.threading.embedding_thread import EmbeddingThread
if TYPE_CHECKING:
from graphrag_llm.metrics import MetricsStore
from graphrag_llm.threading.embedding_thread import (
LLMEmbeddingRequestQueue,
LLMEmbeddingResponseQueue,
)
from graphrag_llm.types import (
LLMEmbeddingArgs,
LLMEmbeddingFunction,
LLMEmbeddingResponse,
)
@runtime_checkable
class ThreadedLLMEmbeddingResponseHandler(Protocol):
"""Threaded embedding response handler.
This function is used to handle responses from the threaded embedding runner.
Args
----
request_id: str
The request ID associated with the embedding request.
resp: LLMEmbeddingResponse | Exception
The embedding response, which can be a full response or
an exception if the request failed.
Returns
-------
Awaitable[None] | None
The callback can be asynchronous or synchronous.
"""
def __call__(
self,
request_id: str,
response: "LLMEmbeddingResponse | Exception",
/,
) -> Awaitable[None] | None:
"""Threaded embedding response handler."""
...
@runtime_checkable
class ThreadedLLMEmbeddingFunction(Protocol):
"""Threaded embedding function.
This function is used to make embedding requests in a threaded context.
Args
----
request_id: str
The request ID associated with the embedding request.
input: list[str]
The input texts to be embedded.
**kwargs: Any
Additional keyword arguments.
Returns
-------
LLMEmbeddingResponse
The embedding response.
"""
def __call__(
self, /, request_id: str, **kwargs: Unpack["LLMEmbeddingArgs"]
) -> None:
"""Threaded embedding function."""
...
def _start_embedding_thread_pool(
*,
embedding: "LLMEmbeddingFunction",
quit_process_event: threading.Event,
concurrency: int,
queue_limit: int,
) -> tuple[
list["EmbeddingThread"],
"LLMEmbeddingRequestQueue",
"LLMEmbeddingResponseQueue",
]:
threads: list[EmbeddingThread] = []
input_queue: LLMEmbeddingRequestQueue = Queue(queue_limit)
output_queue: LLMEmbeddingResponseQueue = Queue()
for _ in range(concurrency):
thread = EmbeddingThread(
quit_process_event=quit_process_event,
input_queue=input_queue,
output_queue=output_queue,
embedding=embedding,
)
thread.start()
threads.append(thread)
return threads, input_queue, output_queue
@contextmanager
def embedding_thread_runner(
*,
embedding: "LLMEmbeddingFunction",
response_handler: ThreadedLLMEmbeddingResponseHandler,
concurrency: int,
queue_limit: int = 0,
metrics_store: "MetricsStore | None" = None,
) -> Iterator[ThreadedLLMEmbeddingFunction]:
"""Run an embedding thread pool.
Args
----
embedding: LLMEmbeddingFunction
The LLMEmbeddingFunction instance to use for processing requests.
response_handler: ThreadedLLMEmbeddingResponseHandler
The callback function to handle embedding responses.
(request_id, response|exception) -> Awaitable[None] | None
concurrency: int
The number of threads to spin up in a thread pool.
queue_limit: int (default=0)
The maximum number of items allowed in the input queue.
0 means unlimited.
Set this to a value to create backpressure on the caller.
metrics_store: MetricsStore | None (default=None)
Optional metrics store to record runtime duration.
Yields
------
ThreadedLLMEmbeddingFunction:
A function that can be used to submit embedding requests to the thread pool.
(input, request_id, **kwargs) -> None
The thread pool will process the requests and invoke the provided callback
with the responses.
same signature as LLMEmbeddingFunction but requires a `request_id` parameter
to identify the request and does not return anything.
"""
quit_process_event = threading.Event()
threads, input_queue, output_queue = _start_embedding_thread_pool(
embedding=embedding,
quit_process_event=quit_process_event,
concurrency=concurrency,
queue_limit=queue_limit,
)
def _process_output(
quit_process_event: threading.Event,
output_queue: "LLMEmbeddingResponseQueue",
callback: ThreadedLLMEmbeddingResponseHandler,
):
while True and not quit_process_event.is_set():
try:
data = output_queue.get(timeout=1)
except Empty:
continue
if data is None:
break
request_id, response = data
response = callback(request_id, response)
if asyncio.iscoroutine(response):
response = asyncio.run(response)
def _process_input(request_id: str, **kwargs: Unpack["LLMEmbeddingArgs"]):
if not request_id:
msg = "request_id needs to be passed as a keyword argument"
raise ValueError(msg)
input_queue.put((request_id, kwargs))
handle_response_thread = threading.Thread(
target=_process_output,
args=(quit_process_event, output_queue, response_handler),
)
handle_response_thread.start()
def _cleanup():
for _ in threads:
input_queue.put(None)
for thread in threads:
while thread.is_alive():
thread.join(timeout=1)
output_queue.put(None)
while handle_response_thread.is_alive():
handle_response_thread.join(timeout=1)
start_time = time.time()
try:
yield _process_input
_cleanup()
except KeyboardInterrupt:
quit_process_event.set()
sys.exit(1)
finally:
end_time = time.time()
runtime = end_time - start_time
if metrics_store:
metrics_store.update_metrics(metrics={"runtime_duration_seconds": runtime})
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-llm/graphrag_llm/threading/embedding_thread_runner.py",
"license": "MIT License",
"lines": 182,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-llm/graphrag_llm/tokenizer/tokenizer.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Tokenizer Abstract Base Class."""
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from graphrag_llm.types import LLMCompletionMessagesParam
class Tokenizer(ABC):
"""Tokenizer Abstract Base Class."""
@abstractmethod
def __init__(self, **kwargs: Any) -> None:
"""Initialize the LiteLLM Tokenizer."""
@abstractmethod
def encode(self, text: str) -> list[int]:
"""Encode the given text into a list of tokens.
Args
----
text: str
The input text to encode.
Returns
-------
list[int]: A list of tokens representing the encoded text.
"""
raise NotImplementedError
@abstractmethod
def decode(self, tokens: list[int]) -> str:
"""Decode a list of tokens back into a string.
Args
----
tokens: list[int]
A list of tokens to decode.
Returns
-------
str: The decoded string from the list of tokens.
"""
raise NotImplementedError
def num_prompt_tokens(
self,
messages: "LLMCompletionMessagesParam",
) -> int:
"""Count the number of tokens in a prompt for a given model.
Counts the number of tokens used for roles, names, and content in the messages.
modeled after: https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
Args
----
messages: LLMCompletionMessagesParam
The messages comprising the prompt. Can either be a string or a list of message dicts.
Returns
-------
int: The number of tokens in the prompt.
"""
total_tokens = 3 # overhead for reply
tokens_per_message = 3 # fixed overhead per message
tokens_per_name = 1 # fixed overhead per name field
if isinstance(messages, str):
return (
self.num_tokens(messages)
+ total_tokens
+ tokens_per_message
+ tokens_per_name
)
for message in messages:
total_tokens += tokens_per_message
if not isinstance(message, dict):
message = message.model_dump()
for key, value in message.items():
if key == "content":
if isinstance(value, str):
total_tokens += self.num_tokens(value)
elif isinstance(value, list):
for part in value:
if isinstance(part, dict) and "text" in part:
total_tokens += self.num_tokens(part["text"])
elif key == "role":
total_tokens += self.num_tokens(str(value))
elif key == "name":
total_tokens += self.num_tokens(str(value)) + tokens_per_name
return total_tokens
def num_tokens(self, text: str) -> int:
"""Return the number of tokens in the given text.
Args
----
text: str
The input text to analyze.
Returns
-------
int: The number of tokens in the input text.
"""
return len(self.encode(text))
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-llm/graphrag_llm/tokenizer/tokenizer.py",
"license": "MIT License",
"lines": 89,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-llm/graphrag_llm/tokenizer/tokenizer_factory.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Tokenizer factory."""
from collections.abc import Callable
from typing import TYPE_CHECKING
from graphrag_common.factory import Factory
from graphrag_llm.config.types import TokenizerType
from graphrag_llm.tokenizer.tokenizer import Tokenizer
if TYPE_CHECKING:
from graphrag_common.factory import ServiceScope
from graphrag_llm.config.tokenizer_config import TokenizerConfig
class TokenizerFactory(Factory[Tokenizer]):
"""Factory for creating Tokenizer instances."""
tokenizer_factory = TokenizerFactory()
def register_tokenizer(
tokenizer_type: str,
tokenizer_initializer: Callable[..., Tokenizer],
scope: "ServiceScope" = "transient",
) -> None:
"""Register a custom tokenizer implementation.
Args
----
tokenizer_type: str
The tokenizer id to register.
tokenizer_initializer: Callable[..., Tokenizer]
The tokenizer initializer to register.
"""
tokenizer_factory.register(tokenizer_type, tokenizer_initializer, scope)
def create_tokenizer(tokenizer_config: "TokenizerConfig") -> Tokenizer:
"""Create a Tokenizer instance based on the configuration.
Args
----
tokenizer_config: TokenizerConfig
The configuration for the tokenizer.
Returns
-------
Tokenizer:
An instance of a Tokenizer subclass.
"""
strategy = tokenizer_config.type
init_args = tokenizer_config.model_dump()
if strategy not in tokenizer_factory:
match strategy:
case TokenizerType.LiteLLM:
from graphrag_llm.tokenizer.lite_llm_tokenizer import (
LiteLLMTokenizer,
)
register_tokenizer(
TokenizerType.LiteLLM,
LiteLLMTokenizer,
scope="singleton",
)
case TokenizerType.Tiktoken:
from graphrag_llm.tokenizer.tiktoken_tokenizer import (
TiktokenTokenizer,
)
register_tokenizer(
TokenizerType.Tiktoken,
TiktokenTokenizer,
scope="singleton",
)
case _:
msg = f"TokenizerConfig.type '{strategy}' is not registered in the TokenizerFactory. Registered strategies: {', '.join(tokenizer_factory.keys())}"
raise ValueError(msg)
return tokenizer_factory.create(
strategy=strategy,
init_args=init_args,
)
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-llm/graphrag_llm/tokenizer/tokenizer_factory.py",
"license": "MIT License",
"lines": 68,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-llm/graphrag_llm/types/types.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Types for graphrag-llm."""
from collections.abc import AsyncIterator, Awaitable, Iterator, Sequence
from typing import (
Any,
Generic,
Literal,
Protocol,
Required,
TypeVar,
Unpack,
runtime_checkable,
)
from litellm import (
AnthropicThinkingParam,
ChatCompletionAudioParam,
ChatCompletionModality,
ChatCompletionPredictionContentParam,
OpenAIWebSearchOptions,
)
from openai.types.chat.chat_completion import (
ChatCompletion,
Choice,
)
from openai.types.chat.chat_completion_chunk import ChatCompletionChunk, ChoiceDelta
from openai.types.chat.chat_completion_chunk import Choice as ChunkChoice
from openai.types.chat.chat_completion_function_tool_param import (
ChatCompletionFunctionToolParam,
)
from openai.types.chat.chat_completion_message import ChatCompletionMessage
from openai.types.chat.chat_completion_message_param import ChatCompletionMessageParam
from openai.types.completion_usage import (
CompletionTokensDetails,
CompletionUsage,
PromptTokensDetails,
)
from openai.types.create_embedding_response import CreateEmbeddingResponse, Usage
from openai.types.embedding import Embedding
from pydantic import BaseModel, computed_field
from typing_extensions import TypedDict
LLMCompletionMessagesParam = str | Sequence[ChatCompletionMessageParam | dict[str, Any]]
LLMChoice = Choice
LLMCompletionMessage = ChatCompletionMessage
LLMCompletionChunk = ChatCompletionChunk
LLMChoiceChunk = ChunkChoice
LLMChoiceDelta = ChoiceDelta
LLMCompletionUsage = CompletionUsage
LLMPromptTokensDetails = PromptTokensDetails
LLMCompletionTokensDetails = CompletionTokensDetails
LLMEmbedding = Embedding
LLMEmbeddingUsage = Usage
LLMCompletionFunctionToolParam = ChatCompletionFunctionToolParam
Metrics = dict[str, float]
"""Represents single request metrics and aggregated metrics for an entire model.
example: {
"duration_ms": 123.45,
"successful_requests": 1,
}
On the individual request level, successful_requests will be either 0 or 1.
On the aggregated model level, successful_requests will be the sum of all
successful requests.
"""
ResponseFormat = TypeVar(
"ResponseFormat",
bound=BaseModel,
)
"""Generic type variable for structured response format."""
class LLMCompletionResponse(ChatCompletion, Generic[ResponseFormat]):
"""LLM Completion Response extending OpenAI ChatCompletion.
The response type returned by graphrag-llm LLMCompletionFunction.
graphrag-llm automatically handles structured response parsing based on the
provided ResponseFormat model.
"""
formatted_response: ResponseFormat | None = None # type: ignore
"""Formatted response according to the specified response_format json schema."""
@computed_field
@property
def content(self) -> str:
"""Get the content of the first choice message."""
return self.choices[0].message.content or ""
class LLMCompletionArgs(
TypedDict, Generic[ResponseFormat], total=False, extra_items=Any
):
"""Arguments for LLMCompletionFunction.
Same signature as litellm.completion but without the `model` parameter
as this is already set in the model configuration.
"""
messages: Required[LLMCompletionMessagesParam]
response_format: type[ResponseFormat] | None
timeout: float | None
temperature: float | None
top_p: float | None
n: int | None
stream: bool | None
stream_options: dict | None
stop: None
max_completion_tokens: int | None
max_tokens: int | None
modalities: list[ChatCompletionModality] | None
prediction: ChatCompletionPredictionContentParam | None
audio: ChatCompletionAudioParam | None
presence_penalty: float | None
frequency_penalty: float | None
logit_bias: dict | None
user: str | None
reasoning_effort: (
Literal["none", "minimal", "low", "medium", "high", "default"] | None
)
seed: int | None
tools: list | None
tool_choice: str | dict | None
logprobs: bool | None
top_logprobs: int | None
parallel_tool_calls: bool | None
web_search_options: OpenAIWebSearchOptions | None
deployment_id: Any
extra_headers: dict | None
safety_identifier: str | None
functions: list | None
function_call: str | None
thinking: AnthropicThinkingParam | None
@runtime_checkable
class LLMCompletionFunction(Protocol):
"""Synchronous completion function.
Same signature as litellm.completion but without the `model` parameter
as this is already set in the model configuration.
"""
def __call__(
self, /, **kwargs: Unpack[LLMCompletionArgs[ResponseFormat]]
) -> LLMCompletionResponse[ResponseFormat] | Iterator[LLMCompletionChunk]:
"""Completion function."""
...
@runtime_checkable
class AsyncLLMCompletionFunction(Protocol):
"""Asynchronous completion function.
Same signature as litellm.completion but without the `model` parameter
as this is already set in the model configuration.
"""
def __call__(
self, /, **kwargs: Unpack[LLMCompletionArgs[ResponseFormat]]
) -> Awaitable[
LLMCompletionResponse[ResponseFormat] | AsyncIterator[LLMCompletionChunk]
]:
"""Completion function."""
...
class LLMEmbeddingResponse(CreateEmbeddingResponse):
"""LLM Embedding Response extending OpenAI CreateEmbeddingResponse.
The response type returned by graphrag-llm LLMEmbeddingFunction.
Adds utilities for accessing embeddings.
"""
@computed_field
@property
def embeddings(self) -> list[list[float]]:
"""Get the embeddings as a list of lists of floats."""
return [data.embedding for data in self.data]
@computed_field
@property
def first_embedding(self) -> list[float]:
"""Get the first embedding."""
return self.embeddings[0] if self.embeddings else []
class LLMEmbeddingArgs(TypedDict, total=False, extra_items=Any):
"""Arguments for embedding functions.
Same signature as litellm.embedding but without the `model` parameter
as this is already set in the model configuration.
"""
input: Required[list[str]]
dimensions: int | None
encoding_format: str | None
timeout: int
user: str | None
@runtime_checkable
class LLMEmbeddingFunction(Protocol):
"""Synchronous embedding function.
Same signature as litellm.embedding but without the `model` parameter
as this is already set in the model configuration.
"""
def __call__(
self,
/,
**kwargs: Unpack[LLMEmbeddingArgs],
) -> LLMEmbeddingResponse:
"""Embedding function."""
...
@runtime_checkable
class AsyncLLMEmbeddingFunction(Protocol):
"""Asynchronous embedding function.
Same signature as litellm.aembedding but without the `model` parameter
as this is already set in the model configuration.
"""
async def __call__(
self,
/,
**kwargs: Unpack[LLMEmbeddingArgs],
) -> LLMEmbeddingResponse:
"""Embedding function."""
...
LLMFunction = TypeVar("LLMFunction", LLMCompletionFunction, LLMEmbeddingFunction)
"""Generic representation of completion and embedding functions.
This type is used in the middleware pipeline as the pipeline can handle both
completion and embedding functions. That way services such as retries, caching,
and rate limiting can be reused for both completions and embeddings.
"""
AsyncLLMFunction = TypeVar(
"AsyncLLMFunction", AsyncLLMCompletionFunction, AsyncLLMEmbeddingFunction
)
"""Generic representation of asynchronous completion and embedding functions.
This type is used in the middleware pipeline as the pipeline can handle both
completion and embedding functions. That way services such as retries, caching,
and rate limiting can be reused for both completions and embeddings.
"""
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-llm/graphrag_llm/types/types.py",
"license": "MIT License",
"lines": 211,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-llm/graphrag_llm/utils/completion_messages_builder.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""ChatCompletionMessageParamBuilder class."""
from collections.abc import Iterable
from typing import TYPE_CHECKING, Literal
from openai.types.chat.chat_completion_assistant_message_param import (
ChatCompletionAssistantMessageParam,
)
from openai.types.chat.chat_completion_content_part_image_param import (
ChatCompletionContentPartImageParam,
ImageURL,
)
from openai.types.chat.chat_completion_content_part_input_audio_param import (
ChatCompletionContentPartInputAudioParam,
InputAudio,
)
from openai.types.chat.chat_completion_content_part_param import (
ChatCompletionContentPartParam,
)
from openai.types.chat.chat_completion_content_part_text_param import (
ChatCompletionContentPartTextParam,
)
from openai.types.chat.chat_completion_developer_message_param import (
ChatCompletionDeveloperMessageParam,
)
from openai.types.chat.chat_completion_function_message_param import (
ChatCompletionFunctionMessageParam,
)
from openai.types.chat.chat_completion_message import ChatCompletionMessage
from openai.types.chat.chat_completion_system_message_param import (
ChatCompletionSystemMessageParam,
)
from openai.types.chat.chat_completion_tool_message_param import (
ChatCompletionToolMessageParam,
)
from openai.types.chat.chat_completion_user_message_param import (
ChatCompletionUserMessageParam,
)
if TYPE_CHECKING:
from openai.types.chat.chat_completion_message_param import (
ChatCompletionMessageParam,
)
from graphrag_llm.types import LLMCompletionMessagesParam
class CompletionMessagesBuilder:
"""CompletionMessagesBuilder class."""
def __init__(self) -> None:
"""Initialize CompletionMessagesBuilder."""
self._messages: list[ChatCompletionMessageParam] = []
def add_system_message(
self,
content: str | Iterable[ChatCompletionContentPartTextParam],
name: str | None = None,
) -> "CompletionMessagesBuilder":
"""Add system message.
Parameters
----------
content : str | Iterable[ChatCompletionContentPartTextParam]
Content of the system message.
If passing in Iterable[ChatCompletionContentPartTextParam], may use
`CompletionContentPartBuilder` to build the content.
name : str | None
Optional name for the participant.
Returns
-------
None
"""
if name:
self._messages.append(
ChatCompletionSystemMessageParam(
role="system", content=content, name=name
)
)
else:
self._messages.append(
ChatCompletionSystemMessageParam(role="system", content=content)
)
return self
def add_developer_message(
self,
content: str | Iterable[ChatCompletionContentPartTextParam],
name: str | None = None,
) -> "CompletionMessagesBuilder":
"""Add developer message.
Parameters
----------
content : str | Iterable[ChatCompletionContentPartTextParam]
Content of the developer message.
If passing in Iterable[ChatCompletionContentPartTextParam], may use
`CompletionContentPartBuilder` to build the content.
name : str | None
Optional name for the participant.
Returns
-------
None
"""
if name:
self._messages.append(
ChatCompletionDeveloperMessageParam(
role="developer", content=content, name=name
)
)
else:
self._messages.append(
ChatCompletionDeveloperMessageParam(role="developer", content=content)
)
return self
def add_tool_message(
self,
content: str | Iterable[ChatCompletionContentPartTextParam],
tool_call_id: str,
) -> "CompletionMessagesBuilder":
"""Add developer message.
Parameters
----------
content : str | Iterable[ChatCompletionContentPartTextParam]
Content of the developer message.
If passing in Iterable[ChatCompletionContentPartTextParam], may use
`CompletionContentPartBuilder` to build the content.
tool_call_id : str
ID of the tool call that this message is responding to.
Returns
-------
None
"""
self._messages.append(
ChatCompletionToolMessageParam(
role="tool", content=content, tool_call_id=tool_call_id
)
)
return self
def add_function_message(
self,
function_name: str,
content: str | None = None,
) -> "CompletionMessagesBuilder":
"""Add function message.
Parameters
----------
function_name : str
Name of the function to call.
content : str | None
Content of the function message.
Returns
-------
None
"""
self._messages.append(
ChatCompletionFunctionMessageParam(
role="function", content=content, name=function_name
)
)
return self
def add_user_message(
self,
content: str | Iterable[ChatCompletionContentPartParam],
name: str | None = None,
) -> "CompletionMessagesBuilder":
"""Add user message.
Parameters
----------
content : str | Iterable[ChatCompletionContentPartParam]
Content of the user message.
If passing in Iterable[ChatCompletionContentPartParam], may use
`CompletionContentPartBuilder` to build the content.
name : str | None
Optional name for the participant.
Returns
-------
None
"""
if name:
self._messages.append(
ChatCompletionUserMessageParam(role="user", content=content, name=name)
)
else:
self._messages.append(
ChatCompletionUserMessageParam(role="user", content=content)
)
return self
def add_assistant_message(
self,
message: str | ChatCompletionMessage,
name: str | None = None,
) -> "CompletionMessagesBuilder":
"""Add assistant message.
Parameters
----------
message : ChatCompletionMessage
Previous response message.
name : str | None
Optional name for the participant.
Returns
-------
None
"""
args = {
"role": "assistant",
"content": message if isinstance(message, str) else message.content,
"refusal": None if isinstance(message, str) else message.refusal,
}
if name:
args["name"] = name
if not isinstance(message, str):
if message.function_call:
args["function_call"] = message.function_call
if message.tool_calls:
args["tool_calls"] = message.tool_calls
if message.audio:
args["audio"] = message.audio
self._messages.append(ChatCompletionAssistantMessageParam(**args))
return self
def build(self) -> "LLMCompletionMessagesParam":
"""Get messages."""
return self._messages
class CompletionContentPartBuilder:
"""CompletionContentPartBuilder class."""
def __init__(self) -> None:
"""Initialize CompletionContentPartBuilder."""
self._content_parts: list[ChatCompletionContentPartParam] = []
def add_text_part(self, text: str) -> "CompletionContentPartBuilder":
"""Add text part.
Parameters
----------
text : str
Text content.
Returns
-------
None
"""
self._content_parts.append(
ChatCompletionContentPartTextParam(text=text, type="text")
)
return self
def add_image_part(
self, url: str, detail: Literal["auto", "low", "high"]
) -> "CompletionContentPartBuilder":
"""Add image part.
Parameters
----------
url : str
Either an URL of the image or the base64 encoded image data.
detail : Literal["auto", "low", "high"]
Specifies the detail level of the image.
Returns
-------
None
"""
self._content_parts.append(
ChatCompletionContentPartImageParam(
image_url=ImageURL(url=url, detail=detail), type="image_url"
)
)
return self
def add_audio_part(
self, data: str, _format: Literal["wav", "mp3"]
) -> "CompletionContentPartBuilder":
"""Add audio part.
Parameters
----------
data : str
Base64 encoded audio data.
_format : Literal["wav", "mp3"]
The format of the encoded audio data. Currently supports "wav" and "mp3".
Returns
-------
None
"""
self._content_parts.append(
ChatCompletionContentPartInputAudioParam(
input_audio=InputAudio(data=data, format=_format), type="input_audio"
)
)
return self
def build(self) -> list[ChatCompletionContentPartParam]:
"""Get content parts.
Returns
-------
list[ChatCompletionContentPartParam]
List of content parts.
"""
return self._content_parts
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-llm/graphrag_llm/utils/completion_messages_builder.py",
"license": "MIT License",
"lines": 281,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-llm/graphrag_llm/utils/create_completion_response.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Create completion response."""
from graphrag_llm.types import (
LLMChoice,
LLMCompletionMessage,
LLMCompletionResponse,
LLMCompletionUsage,
)
def create_completion_response(response: str) -> LLMCompletionResponse:
"""Create a completion response object.
Args:
response: The completion response string.
Returns
-------
LLMCompletionResponse: The completion response object.
"""
return LLMCompletionResponse(
id="completion-id",
object="chat.completion",
created=0,
model="mock-model",
choices=[
LLMChoice(
index=0,
message=LLMCompletionMessage(
role="assistant",
content=response,
),
finish_reason="stop",
)
],
usage=LLMCompletionUsage(
prompt_tokens=0,
completion_tokens=0,
total_tokens=0,
),
formatted_response=None,
)
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-llm/graphrag_llm/utils/create_completion_response.py",
"license": "MIT License",
"lines": 39,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-llm/graphrag_llm/utils/create_embedding_response.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Create embedding response utilities."""
from graphrag_llm.types import LLMEmbedding, LLMEmbeddingResponse, LLMEmbeddingUsage
def create_embedding_response(
embeddings: list[float], batch_size: int = 1
) -> LLMEmbeddingResponse:
"""Create a CreateEmbeddingResponse object.
Args:
embeddings: List of embedding vectors.
model: The model used to create the embeddings.
Returns
-------
An LLMEmbeddingResponse object.
"""
embeddings_objects = [
LLMEmbedding(
object="embedding",
embedding=embeddings,
index=index,
)
for index in range(batch_size)
]
return LLMEmbeddingResponse(
object="list",
data=embeddings_objects,
model="mock-model",
usage=LLMEmbeddingUsage(
prompt_tokens=0,
total_tokens=0,
),
)
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-llm/graphrag_llm/utils/create_embedding_response.py",
"license": "MIT License",
"lines": 32,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-llm/graphrag_llm/utils/function_tool_manager.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Function tool manager."""
import json
from collections.abc import Callable
from typing import TYPE_CHECKING, Any, Generic, TypeVar
from openai import pydantic_function_tool
from pydantic import BaseModel
from typing_extensions import TypedDict
if TYPE_CHECKING:
from graphrag_llm.types import LLMCompletionFunctionToolParam, LLMCompletionResponse
FunctionArgumentModel = TypeVar(
"FunctionArgumentModel", bound=BaseModel, covariant=True
)
class FunctionDefinition(TypedDict, Generic[FunctionArgumentModel]):
"""Function definition."""
name: str
description: str
input_model: type[FunctionArgumentModel]
function: Callable[[FunctionArgumentModel], str]
class ToolMessage(TypedDict):
"""Function tool response message to be added to message history."""
content: str
tool_call_id: str
class FunctionToolManager:
"""Function tool manager."""
_tools: dict[str, FunctionDefinition[Any]]
def __init__(self) -> None:
"""Initialize FunctionToolManager."""
self._tools = {}
def register_function_tool(
self,
*,
name: str,
description: str,
input_model: type[FunctionArgumentModel],
function: Callable[[FunctionArgumentModel], str],
) -> None:
"""Register function tool.
Args
----
name: str
The name of the function tool.
description: str
The description of the function tool.
input_model: type[T]
The pydantic model type for the function tool input.
function: Callable[[T], str]
The function to call for the function tool.
"""
self._tools[name] = {
"name": name,
"description": description,
"input_model": input_model,
"function": function,
}
def definitions(self) -> list["LLMCompletionFunctionToolParam"]:
"""Get function tool definitions.
Returns
-------
list[LLMCompletionFunctionToolParam]
List of function tool definitions.
"""
return [
pydantic_function_tool(
tool_def["input_model"],
name=tool_def["name"],
description=tool_def["description"],
)
for tool_def in self._tools.values()
]
def call_functions(self, response: "LLMCompletionResponse") -> list[ToolMessage]:
"""Call functions based on the response.
Args
----
response: LLMCompletionResponse
The LLM completion response.
Returns
-------
list[ToolMessage]
The list of tool response messages to be added to the message history.
"""
if not response.choices[0].message.tool_calls:
return []
tool_messages: list[ToolMessage] = []
for tool_call in response.choices[0].message.tool_calls:
if tool_call.type != "function":
continue
tool_id = tool_call.id
function_name = tool_call.function.name
function_args = tool_call.function.arguments
if function_name not in self._tools:
msg = f"Function '{function_name}' not registered."
raise ValueError(msg)
tool_def = self._tools[function_name]
input_model = tool_def["input_model"]
function = tool_def["function"]
try:
parsed_args_dict = json.loads(function_args)
input_model_instance = input_model(**parsed_args_dict)
except Exception as e:
msg = f"Failed to parse arguments for function '{function_name}': {e}"
raise ValueError(msg) from e
result = function(input_model_instance)
tool_messages.append({
"content": result,
"tool_call_id": tool_id,
})
return tool_messages
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-llm/graphrag_llm/utils/function_tool_manager.py",
"license": "MIT License",
"lines": 109,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-llm/graphrag_llm/utils/gather_completion_response.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Gather Completion Response Utility."""
from collections.abc import AsyncIterator, Iterator
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from graphrag_llm.types import (
LLMCompletionChunk,
LLMCompletionResponse,
)
def gather_completion_response(
response: "LLMCompletionResponse | Iterator[LLMCompletionChunk]",
) -> str:
"""Gather completion response from an iterator of response chunks.
Args
----
response: LMChatCompletion | Iterator[LLMChatCompletionChunk]
The completion response or an iterator of response chunks.
Returns
-------
The gathered response as a single string.
"""
if isinstance(response, Iterator):
return "".join(chunk.choices[0].delta.content or "" for chunk in response)
return response.choices[0].message.content or ""
async def gather_completion_response_async(
response: "LLMCompletionResponse | AsyncIterator[LLMCompletionChunk]",
) -> str:
"""Gather completion response from an iterator of response chunks.
Args
----
response: LMChatCompletion | AsyncIterator[LLMChatCompletionChunk]
The completion response or an iterator of response chunks.
Returns
-------
The gathered response as a single string.
"""
if isinstance(response, AsyncIterator):
gathered_content = ""
async for chunk in response:
gathered_content += chunk.choices[0].delta.content or ""
return gathered_content
return response.choices[0].message.content or ""
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-llm/graphrag_llm/utils/gather_completion_response.py",
"license": "MIT License",
"lines": 43,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-llm/graphrag_llm/utils/structure_response.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Structure response as pydantic base model."""
import json
from typing import Any, TypeVar
from pydantic import BaseModel
T = TypeVar("T", bound=BaseModel, covariant=True)
def structure_completion_response(response: str, model: type[T]) -> T:
"""Structure completion response as pydantic base model.
Args
----
response: str
The completion response as a JSON string.
model: type[T]
The pydantic base model type to structure the response into.
Returns
-------
The structured response as a pydantic base model.
"""
parsed_dict: dict[str, Any] = json.loads(response)
return model(**parsed_dict)
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-llm/graphrag_llm/utils/structure_response.py",
"license": "MIT License",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-storage/graphrag_storage/storage.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Abstract base class for storage."""
import re
from abc import ABC, abstractmethod
from collections.abc import Iterator
from datetime import datetime
from typing import Any
class Storage(ABC):
"""Provide a storage interface."""
@abstractmethod
def __init__(self, **kwargs: Any) -> None:
"""Create a storage instance."""
@abstractmethod
def find(
self,
file_pattern: re.Pattern[str],
) -> Iterator[str]:
"""Find files in the storage using a file pattern.
Args
----
- file_pattern: re.Pattern[str]
The file pattern to use for finding files.
Returns
-------
Iterator[str]:
An iterator over the found file keys.
"""
@abstractmethod
async def get(
self, key: str, as_bytes: bool | None = None, encoding: str | None = None
) -> Any:
"""Get the value for the given key.
Args
----
- key: str
The key to get the value for.
- as_bytes: bool | None, optional (default=None)
Whether or not to return the value as bytes.
- encoding: str | None, optional (default=None)
The encoding to use when decoding the value.
Returns
-------
Any:
The value for the given key.
"""
@abstractmethod
async def set(self, key: str, value: Any, encoding: str | None = None) -> None:
"""Set the value for the given key.
Args
----
- key: str
The key to set the value for.
- value: Any
The value to set.
"""
@abstractmethod
async def has(self, key: str) -> bool:
"""Return True if the given key exists in the storage.
Args
----
- key: str
The key to check for.
Returns
-------
bool:
True if the key exists in the storage, False otherwise.
"""
@abstractmethod
async def delete(self, key: str) -> None:
"""Delete the given key from the storage.
Args
----
- key: str
The key to delete.
"""
@abstractmethod
async def clear(self) -> None:
"""Clear the storage."""
@abstractmethod
def child(self, name: str | None) -> "Storage":
"""Create a child storage instance.
Args
----
- name: str | None
The name of the child storage.
Returns
-------
Storage
The child storage instance.
"""
@abstractmethod
def keys(self) -> list[str]:
"""List all keys in the storage."""
@abstractmethod
async def get_creation_date(self, key: str) -> str:
"""Get the creation date for the given key.
Args
----
- key: str
The key to get the creation date for.
Returns
-------
str:
The creation date for the given key.
"""
def get_timestamp_formatted_with_local_tz(timestamp: datetime) -> str:
"""Get the formatted timestamp with the local time zone."""
creation_time_local = timestamp.astimezone()
return creation_time_local.strftime("%Y-%m-%d %H:%M:%S %z")
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-storage/graphrag_storage/storage.py",
"license": "MIT License",
"lines": 110,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-storage/graphrag_storage/storage_config.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Storage configuration model."""
from pydantic import BaseModel, ConfigDict, Field
from graphrag_storage.storage_type import StorageType
class StorageConfig(BaseModel):
"""The default configuration section for storage."""
model_config = ConfigDict(extra="allow")
"""Allow extra fields to support custom storage implementations."""
type: str = Field(
description="The storage type to use. Builtin types include 'File', 'AzureBlob', and 'AzureCosmos'.",
default=StorageType.File,
)
encoding: str | None = Field(
description="The encoding to use for file storage.",
default=None,
)
base_dir: str | None = Field(
description="The base directory for the output when using file or AzureBlob storage.",
default=None,
)
connection_string: str | None = Field(
description="The connection string for remote services.",
default=None,
)
container_name: str | None = Field(
description="The Azure Blob Storage container name or CosmosDB container name to use.",
default=None,
)
account_url: str | None = Field(
description="The account url for Azure services.",
default=None,
)
database_name: str | None = Field(
description="The database name to use.",
default=None,
)
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-storage/graphrag_storage/storage_config.py",
"license": "MIT License",
"lines": 37,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-storage/graphrag_storage/storage_factory.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Storage factory implementation."""
from collections.abc import Callable
from graphrag_common.factory import Factory, ServiceScope
from graphrag_storage.storage import Storage
from graphrag_storage.storage_config import StorageConfig
from graphrag_storage.storage_type import StorageType
class StorageFactory(Factory[Storage]):
"""A factory class for storage implementations."""
storage_factory = StorageFactory()
def register_storage(
storage_type: str,
storage_initializer: Callable[..., Storage],
scope: ServiceScope = "transient",
) -> None:
"""Register a custom storage implementation.
Args
----
- storage_type: str
The storage id to register.
- storage_initializer: Callable[..., Storage]
The storage initializer to register.
"""
storage_factory.register(storage_type, storage_initializer, scope)
def create_storage(config: StorageConfig) -> Storage:
"""Create a storage implementation based on the given configuration.
Args
----
- config: StorageConfig
The storage configuration to use.
Returns
-------
Storage
The created storage implementation.
"""
config_model = config.model_dump()
storage_strategy = config.type
if storage_strategy not in storage_factory:
match storage_strategy:
case StorageType.File:
from graphrag_storage.file_storage import FileStorage
register_storage(StorageType.File, FileStorage)
case StorageType.Memory:
from graphrag_storage.memory_storage import MemoryStorage
register_storage(StorageType.Memory, MemoryStorage)
case StorageType.AzureBlob:
from graphrag_storage.azure_blob_storage import AzureBlobStorage
register_storage(StorageType.AzureBlob, AzureBlobStorage)
case StorageType.AzureCosmos:
from graphrag_storage.azure_cosmos_storage import AzureCosmosStorage
register_storage(StorageType.AzureCosmos, AzureCosmosStorage)
case _:
msg = f"StorageConfig.type '{storage_strategy}' is not registered in the StorageFactory. Registered types: {', '.join(storage_factory.keys())}."
raise ValueError(msg)
return storage_factory.create(storage_strategy, config_model)
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-storage/graphrag_storage/storage_factory.py",
"license": "MIT License",
"lines": 56,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-storage/graphrag_storage/storage_type.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Builtin storage implementation types."""
from enum import StrEnum
class StorageType(StrEnum):
"""Enum for storage types."""
File = "file"
Memory = "memory"
AzureBlob = "blob"
AzureCosmos = "cosmosdb"
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-storage/graphrag_storage/storage_type.py",
"license": "MIT License",
"lines": 10,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-vectors/graphrag_vectors/azure_ai_search.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""A package containing the Azure AI Search vector store implementation."""
from typing import Any
from azure.core.credentials import AzureKeyCredential
from azure.identity import DefaultAzureCredential
from azure.search.documents import SearchClient
from azure.search.documents.indexes import SearchIndexClient
from azure.search.documents.indexes.models import (
HnswAlgorithmConfiguration,
HnswParameters,
SearchField,
SearchFieldDataType,
SearchIndex,
SimpleField,
VectorSearch,
VectorSearchAlgorithmMetric,
VectorSearchProfile,
)
from azure.search.documents.models import VectorizedQuery
from graphrag_vectors.filtering import (
AndExpr,
Condition,
FilterExpr,
NotExpr,
Operator,
OrExpr,
)
from graphrag_vectors.vector_store import (
VectorStore,
VectorStoreDocument,
VectorStoreSearchResult,
)
# Mapping from field type strings to Azure AI Search data types
FIELD_TYPE_MAPPING: dict[str, SearchFieldDataType] = {
"str": SearchFieldDataType.String,
"int": SearchFieldDataType.Int64,
"float": SearchFieldDataType.Double,
"bool": SearchFieldDataType.Boolean,
}
class AzureAISearchVectorStore(VectorStore):
"""Azure AI Search vector storage implementation."""
index_client: SearchIndexClient
def __init__(
self,
url: str,
api_key: str | None = None,
audience: str | None = None,
vector_search_profile_name: str = "vectorSearchProfile",
**kwargs: Any,
):
super().__init__(**kwargs)
if not url:
msg = "url must be provided for Azure AI Search."
raise ValueError(msg)
self.url = url
self.api_key = api_key
self.audience = audience
self.vector_search_profile_name = vector_search_profile_name
def connect(self) -> Any:
"""Connect to AI search vector storage."""
audience_arg = (
{"audience": self.audience} if self.audience and not self.api_key else {}
)
self.db_connection = SearchClient(
endpoint=self.url,
index_name=self.index_name,
credential=(
AzureKeyCredential(self.api_key)
if self.api_key
else DefaultAzureCredential()
),
**audience_arg,
)
self.index_client = SearchIndexClient(
endpoint=self.url,
credential=(
AzureKeyCredential(self.api_key)
if self.api_key
else DefaultAzureCredential()
),
**audience_arg,
)
def create_index(self) -> None:
"""Load documents into an Azure AI Search index."""
if (
self.index_name is not None
and self.index_name in self.index_client.list_index_names()
):
self.index_client.delete_index(self.index_name)
# Configure vector search profile
vector_search = VectorSearch(
algorithms=[
HnswAlgorithmConfiguration(
name="HnswAlg",
parameters=HnswParameters(
metric=VectorSearchAlgorithmMetric.COSINE
),
)
],
profiles=[
VectorSearchProfile(
name=self.vector_search_profile_name,
algorithm_configuration_name="HnswAlg",
)
],
)
# Build the list of fields
fields = [
SimpleField(
name=self.id_field,
type=SearchFieldDataType.String,
key=True,
),
SearchField(
name=self.vector_field,
type=SearchFieldDataType.Collection(SearchFieldDataType.Single),
searchable=True,
hidden=False, # DRIFT needs to return the vector for client-side similarity
vector_search_dimensions=self.vector_size,
vector_search_profile_name=self.vector_search_profile_name,
),
SimpleField(
name=self.create_date_field,
type=SearchFieldDataType.String,
filterable=True,
),
SimpleField(
name=self.update_date_field,
type=SearchFieldDataType.String,
filterable=True,
),
]
# Add additional fields from the fields dictionary
for field_name, field_type in self.fields.items():
fields.append(
SimpleField(
name=field_name,
type=FIELD_TYPE_MAPPING[field_type],
filterable=True,
)
)
# Configure the index
index = SearchIndex(
name=self.index_name,
fields=fields,
vector_search=vector_search,
)
self.index_client.create_or_update_index(
index,
)
def load_documents(self, documents: list[VectorStoreDocument]) -> None:
"""Load documents into Azure AI Search as a single batch upload."""
batch: list[dict[str, Any]] = []
for document in documents:
self._prepare_document(document)
if document.vector is None:
continue
doc_dict: dict[str, Any] = {
self.id_field: document.id,
self.vector_field: document.vector,
self.create_date_field: document.create_date,
self.update_date_field: document.update_date,
}
if document.data:
for field_name in self.fields:
if field_name in document.data:
doc_dict[field_name] = document.data[field_name]
batch.append(doc_dict)
if batch:
self.db_connection.upload_documents(batch)
def _compile_filter(self, expr: FilterExpr) -> str:
"""Compile a FilterExpr into an Azure AI Search OData filter string."""
match expr:
case Condition():
return self._compile_condition(expr)
case AndExpr():
parts = [self._compile_filter(e) for e in expr.and_]
return " and ".join(f"({p})" for p in parts)
case OrExpr():
parts = [self._compile_filter(e) for e in expr.or_]
return " or ".join(f"({p})" for p in parts)
case NotExpr():
inner = self._compile_filter(expr.not_)
return f"not ({inner})"
case _:
msg = f"Unsupported filter expression type: {type(expr)}"
raise ValueError(msg)
def _compile_condition(self, cond: Condition) -> str:
"""Compile a single Condition to OData filter syntax."""
field = cond.field
value = cond.value
def quote(v: Any) -> str:
return (
f"'{v}'"
if isinstance(v, str)
else str(v).lower()
if isinstance(v, bool)
else str(v)
)
match cond.operator:
case Operator.eq:
return f"{field} eq {quote(value)}"
case Operator.ne:
return f"{field} ne {quote(value)}"
case Operator.gt:
return f"{field} gt {quote(value)}"
case Operator.gte:
return f"{field} ge {quote(value)}"
case Operator.lt:
return f"{field} lt {quote(value)}"
case Operator.lte:
return f"{field} le {quote(value)}"
case Operator.in_:
items = " or ".join(f"{field} eq {quote(v)}" for v in value)
return f"({items})"
case Operator.not_in:
items = " and ".join(f"{field} ne {quote(v)}" for v in value)
return f"({items})"
case Operator.contains:
return f"search.ismatch('{value}', '{field}')"
case Operator.startswith:
return f"search.ismatch('{value}*', '{field}')"
case Operator.endswith:
return f"search.ismatch('*{value}', '{field}')"
case Operator.exists:
return f"{field} ne null" if value else f"{field} eq null"
case _:
msg = f"Unsupported operator for Azure AI Search: {cond.operator}"
raise ValueError(msg)
def _extract_data(
self, doc: dict[str, Any], select: list[str] | None = None
) -> dict[str, Any]:
"""Extract additional field data from a document response."""
fields_to_extract = select if select is not None else list(self.fields.keys())
return {
field_name: doc[field_name]
for field_name in fields_to_extract
if field_name in doc
}
def similarity_search_by_vector(
self,
query_embedding: list[float],
k: int = 10,
select: list[str] | None = None,
filters: FilterExpr | None = None,
include_vectors: bool = True,
) -> list[VectorStoreSearchResult]:
"""Perform a vector-based similarity search."""
vectorized_query = VectorizedQuery(
vector=query_embedding,
k_nearest_neighbors=k,
fields=self.vector_field,
)
# Build the list of fields to select - always include id, vector, and timestamps
fields_to_select = [
self.id_field,
self.create_date_field,
self.update_date_field,
]
if include_vectors:
fields_to_select.append(self.vector_field)
if select is not None:
fields_to_select.extend(select)
else:
fields_to_select.extend(self.fields.keys())
# Build OData filter string
filter_str = self._compile_filter(filters) if filters is not None else None
response = self.db_connection.search(
vector_queries=[vectorized_query],
select=fields_to_select,
filter=filter_str,
)
return [
VectorStoreSearchResult(
document=VectorStoreDocument(
id=doc.get(self.id_field, ""),
vector=doc.get(self.vector_field, []) if include_vectors else None,
data=self._extract_data(doc, select),
create_date=doc.get(self.create_date_field),
update_date=doc.get(self.update_date_field),
),
# Cosine similarity between 0.333 and 1.000
# https://learn.microsoft.com/en-us/azure/search/hybrid-search-ranking#scores-in-a-hybrid-search-results
score=doc["@search.score"],
)
for doc in response
]
def search_by_id(
self,
id: str,
select: list[str] | None = None,
include_vectors: bool = True,
) -> VectorStoreDocument:
"""Search for a document by id."""
# Build the list of fields to select - always include id, vector, and timestamps
fields_to_select = [
self.id_field,
self.create_date_field,
self.update_date_field,
]
if include_vectors:
fields_to_select.append(self.vector_field)
if select is not None:
fields_to_select.extend(select)
else:
fields_to_select.extend(self.fields.keys())
response = self.db_connection.get_document(id, selected_fields=fields_to_select)
return VectorStoreDocument(
id=response[self.id_field],
vector=response.get(self.vector_field, []) if include_vectors else None,
data=self._extract_data(response, select),
create_date=response.get(self.create_date_field),
update_date=response.get(self.update_date_field),
)
def count(self) -> int:
"""Return the total number of documents in the store."""
return self.db_connection.get_document_count()
def remove(self, ids: list[str]) -> None:
"""Remove documents by their IDs."""
batch = [{"@search.action": "delete", self.id_field: id} for id in ids]
self.db_connection.upload_documents(batch)
def update(self, document: VectorStoreDocument) -> None:
"""Update an existing document in the store."""
self._prepare_update(document)
doc: dict[str, Any] = {
"@search.action": "merge",
self.id_field: document.id,
self.update_date_field: document.update_date,
}
if document.vector is not None:
doc[self.vector_field] = document.vector
if document.data:
for field_name in self.fields:
if field_name in document.data:
doc[field_name] = document.data[field_name]
self.db_connection.upload_documents([doc])
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-vectors/graphrag_vectors/azure_ai_search.py",
"license": "MIT License",
"lines": 337,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-vectors/graphrag_vectors/lancedb.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""The LanceDB vector storage implementation package."""
from typing import Any
import lancedb
import numpy as np
import pyarrow as pa
from graphrag_vectors.filtering import (
AndExpr,
Condition,
FilterExpr,
NotExpr,
Operator,
OrExpr,
)
from graphrag_vectors.vector_store import (
VectorStore,
VectorStoreDocument,
VectorStoreSearchResult,
)
class LanceDBVectorStore(VectorStore):
"""LanceDB vector storage implementation."""
def __init__(self, db_uri: str = "lancedb", **kwargs: Any):
super().__init__(**kwargs)
self.db_uri = db_uri
def connect(self) -> Any:
"""Connect to the vector storage."""
self.db_connection = lancedb.connect(self.db_uri)
if self.index_name and self.index_name in self.db_connection.table_names():
self.document_collection = self.db_connection.open_table(self.index_name)
def create_index(self) -> None:
"""Create index."""
dummy_vector = np.zeros(self.vector_size, dtype=np.float32)
flat_array = pa.array(dummy_vector, type=pa.float32())
vector_column = pa.FixedSizeListArray.from_arrays(flat_array, self.vector_size)
types = {
"str": (pa.string, "___DUMMY___"),
"int": (pa.int64, 1),
"float": (pa.float32, 1.0),
"bool": (pa.bool_, True),
}
others = {}
for field_name, field_type in self.fields.items():
pa_type, dummy_value = types[field_type]
others[field_name] = pa.array([dummy_value], type=pa_type())
data = pa.table({
self.id_field: pa.array(["__DUMMY__"], type=pa.string()),
self.vector_field: vector_column,
self.create_date_field: pa.array(["___DUMMY___"], type=pa.string()),
self.update_date_field: pa.array(["___DUMMY___"], type=pa.string()),
**others,
})
self.document_collection = self.db_connection.create_table(
self.index_name if self.index_name else "",
data=data,
mode="overwrite",
schema=data.schema,
)
# Create index now that schema exists
self.document_collection.create_index(
vector_column_name=self.vector_field, index_type="IVF_FLAT"
)
# Remove the dummy document used to set up the schema
self.document_collection.delete(f"{self.id_field} = '__DUMMY__'")
def load_documents(self, documents: list[VectorStoreDocument]) -> None:
"""Load documents into LanceDB as a single batch write."""
ids: list[str] = []
vectors: list[np.ndarray] = []
create_dates: list[str | None] = []
update_dates: list[str | None] = []
field_columns: dict[str, list[Any]] = {name: [] for name in self.fields}
for document in documents:
self._prepare_document(document)
if document.vector is None:
continue
ids.append(str(document.id))
vectors.append(np.array(document.vector, dtype=np.float32))
create_dates.append(document.create_date)
update_dates.append(document.update_date)
for field_name in self.fields:
value = document.data.get(field_name) if document.data else None
field_columns[field_name].append(value)
if not ids:
return
flat_vector = np.concatenate(vectors).astype(np.float32)
flat_array = pa.array(flat_vector, type=pa.float32())
vector_column = pa.FixedSizeListArray.from_arrays(flat_array, self.vector_size)
data = pa.table({
self.id_field: pa.array(ids, type=pa.string()),
self.vector_field: vector_column,
self.create_date_field: pa.array(create_dates, type=pa.string()),
self.update_date_field: pa.array(update_dates, type=pa.string()),
**{name: pa.array(values) for name, values in field_columns.items()},
})
self.document_collection.add(data)
def _extract_data(
self, doc: dict[str, Any], select: list[str] | None = None
) -> dict[str, Any]:
"""Extract additional field data from a document response."""
fields_to_extract = select if select is not None else list(self.fields.keys())
return {
field_name: doc[field_name]
for field_name in fields_to_extract
if field_name in doc
}
def _compile_filter(self, expr: FilterExpr) -> str:
"""Compile a FilterExpr into a LanceDB SQL WHERE clause."""
match expr:
case Condition():
return self._compile_condition(expr)
case AndExpr():
parts = [self._compile_filter(e) for e in expr.and_]
return " AND ".join(f"({p})" for p in parts)
case OrExpr():
parts = [self._compile_filter(e) for e in expr.or_]
return " OR ".join(f"({p})" for p in parts)
case NotExpr():
inner = self._compile_filter(expr.not_)
return f"NOT ({inner})"
case _:
msg = f"Unsupported filter expression type: {type(expr)}"
raise ValueError(msg)
def _compile_condition(self, cond: Condition) -> str:
"""Compile a single Condition to LanceDB SQL syntax."""
field = cond.field
value = cond.value
def quote(v: Any) -> str:
return f"'{v}'" if isinstance(v, str) else str(v)
match cond.operator:
case Operator.eq:
return f"{field} = {quote(value)}"
case Operator.ne:
return f"{field} != {quote(value)}"
case Operator.gt:
return f"{field} > {quote(value)}"
case Operator.gte:
return f"{field} >= {quote(value)}"
case Operator.lt:
return f"{field} < {quote(value)}"
case Operator.lte:
return f"{field} <= {quote(value)}"
case Operator.in_:
items = ", ".join(quote(v) for v in value)
return f"{field} IN ({items})"
case Operator.not_in:
items = ", ".join(quote(v) for v in value)
return f"{field} NOT IN ({items})"
case Operator.contains:
return f"{field} LIKE '%{value}%'"
case Operator.startswith:
return f"{field} LIKE '{value}%'"
case Operator.endswith:
return f"{field} LIKE '%{value}'"
case Operator.exists:
return f"{field} IS NOT NULL" if value else f"{field} IS NULL"
case _:
msg = f"Unsupported operator for LanceDB: {cond.operator}"
raise ValueError(msg)
def similarity_search_by_vector(
self,
query_embedding: list[float] | np.ndarray,
k: int = 10,
select: list[str] | None = None,
filters: FilterExpr | None = None,
include_vectors: bool = True,
) -> list[VectorStoreSearchResult]:
"""Perform a vector-based similarity search."""
query_embedding = np.array(query_embedding, dtype=np.float32)
query = self.document_collection.search(
query=query_embedding, vector_column_name=self.vector_field
)
if filters is not None:
query = query.where(self._compile_filter(filters), prefilter=True)
docs = query.limit(k).to_list()
return [
VectorStoreSearchResult(
document=VectorStoreDocument(
id=doc[self.id_field],
vector=doc[self.vector_field] if include_vectors else None,
data=self._extract_data(doc, select),
create_date=doc.get(self.create_date_field),
update_date=doc.get(self.update_date_field),
),
score=1 - abs(float(doc["_distance"])),
)
for doc in docs
]
def search_by_id(
self,
id: str,
select: list[str] | None = None,
include_vectors: bool = True,
) -> VectorStoreDocument:
"""Search for a document by id."""
result = (
self.document_collection
.search()
.where(f"{self.id_field} == '{id}'", prefilter=True)
.to_list()
)
if result is None or len(result) == 0:
msg = f"Document with id '{id}' not found."
raise IndexError(msg)
doc = result[0]
return VectorStoreDocument(
id=doc[self.id_field],
vector=doc[self.vector_field] if include_vectors else None,
data=self._extract_data(doc, select),
create_date=doc.get(self.create_date_field),
update_date=doc.get(self.update_date_field),
)
def count(self) -> int:
"""Return the total number of documents in the store."""
return self.document_collection.count_rows()
def remove(self, ids: list[str]) -> None:
"""Remove documents by their IDs."""
id_list = ", ".join(f"'{id}'" for id in ids)
self.document_collection.delete(f"{self.id_field} IN ({id_list})")
def update(self, document: VectorStoreDocument) -> None:
"""Update an existing document in the store."""
self._prepare_update(document)
# Build update values
updates: dict[str, Any] = {
self.update_date_field: document.update_date,
}
if document.vector is not None:
updates[self.vector_field] = np.array(document.vector, dtype=np.float32)
if document.data:
for field_name in self.fields:
if field_name in document.data:
updates[field_name] = document.data[field_name]
self.document_collection.update(
where=f"{self.id_field} = '{document.id}'",
values=updates,
)
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-vectors/graphrag_vectors/lancedb.py",
"license": "MIT License",
"lines": 235,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-vectors/graphrag_vectors/types.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Common types for vector stores."""
from collections.abc import Callable
TextEmbedder = Callable[[str], list[float]]
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-vectors/graphrag_vectors/types.py",
"license": "MIT License",
"lines": 5,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-vectors/graphrag_vectors/vector_store.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Base classes for vector stores."""
from abc import ABC, abstractmethod
from collections.abc import Callable
from dataclasses import dataclass, field
from datetime import datetime, timezone
from typing import Any
from graphrag_vectors.filtering import FilterExpr
from graphrag_vectors.timestamp import (
TIMESTAMP_FIELDS,
_timestamp_fields_for,
explode_timestamp,
)
from graphrag_vectors.types import TextEmbedder
# Signature for a function that explodes an ISO 8601 timestamp into
# a dict of filterable component fields keyed by "{prefix}_{suffix}".
TimestampExploder = Callable[[str, str], dict[str, str | int]]
@dataclass
class VectorStoreDocument:
"""A document that is stored in vector storage."""
id: str | int
"""unique id for the document"""
vector: list[float] | None
"""the vector embedding for the document"""
data: dict[str, Any] = field(default_factory=dict)
"""additional data associated with the document"""
create_date: str | None = None
"""optional ISO 8601 timestamp for when the document was created"""
update_date: str | None = None
"""optional ISO 8601 timestamp for when the document was last updated"""
@dataclass
class VectorStoreSearchResult:
"""A vector storage search result."""
document: VectorStoreDocument
"""Document that was found."""
score: float
"""Similarity score between -1 and 1. Higher is more similar."""
class VectorStore(ABC):
"""The base class for vector storage data-access classes."""
def __init__(
self,
index_name: str = "vector_index",
id_field: str = "id",
vector_field: str = "vector",
create_date_field: str = "create_date",
update_date_field: str = "update_date",
vector_size: int = 3072,
fields: dict[str, str] | None = None,
timestamp_exploder: TimestampExploder = explode_timestamp,
**kwargs: Any,
):
self.index_name = index_name
self.id_field = id_field
self.vector_field = vector_field
self.create_date_field = create_date_field
self.update_date_field = update_date_field
self.vector_size = vector_size
self.fields = fields or {}
self.timestamp_exploder = timestamp_exploder
# Detect user-defined date fields, store raw value as str,
# and register their exploded component fields.
self.date_fields: list[str] = [
name for name, ftype in self.fields.items() if ftype == "date"
]
for name in self.date_fields:
self.fields[name] = "str"
self.fields.update(_timestamp_fields_for(name))
# Auto-register built-in timestamp component fields
self.fields.update(TIMESTAMP_FIELDS)
@staticmethod
def _now_iso() -> str:
"""Return the current UTC time as an ISO 8601 string."""
return datetime.now(timezone.utc).isoformat()
def _prepare_document(self, document: VectorStoreDocument) -> None:
"""Enrich a document's data dict with exploded timestamp fields.
Automatically sets create_date to now if not already provided.
Explodes any user-defined date fields found in document.data.
Call this during insert before extracting field values.
"""
if document.data is None:
document.data = {}
if not document.create_date:
document.create_date = self._now_iso()
document.data.update(
self.timestamp_exploder(document.create_date, "create_date")
)
if document.update_date:
document.data.update(
self.timestamp_exploder(document.update_date, "update_date")
)
# Explode user-defined date fields
for name in self.date_fields:
value = document.data.get(name)
if value:
document.data.update(self.timestamp_exploder(value, name))
def _prepare_update(self, document: VectorStoreDocument) -> None:
"""Set update_date to now and explode its timestamp fields.
Call this during update before persisting changes.
"""
if document.data is None:
document.data = {}
if not document.update_date:
document.update_date = self._now_iso()
document.data.update(
self.timestamp_exploder(document.update_date, "update_date")
)
@abstractmethod
def connect(self) -> None:
"""Connect to vector storage."""
@abstractmethod
def create_index(self) -> None:
"""Create index."""
@abstractmethod
def load_documents(self, documents: list[VectorStoreDocument]) -> None:
"""Load documents into the vector-store."""
def insert(self, document: VectorStoreDocument) -> None:
"""Insert a single document by delegating to load_documents."""
self.load_documents([document])
@abstractmethod
def similarity_search_by_vector(
self,
query_embedding: list[float],
k: int = 10,
select: list[str] | None = None,
filters: FilterExpr | None = None,
include_vectors: bool = True,
) -> list[VectorStoreSearchResult]:
"""Perform ANN search by vector.
Parameters
----------
query_embedding : list[float]
The query vector.
k : int
Number of results to return.
select : list[str] | None
Fields to include in results.
filters : FilterExpr | None
Optional filter expression to pre-filter candidates before search.
include_vectors : bool
Whether to include vector embeddings in results.
"""
def similarity_search_by_text(
self,
text: str,
text_embedder: TextEmbedder,
k: int = 10,
select: list[str] | None = None,
filters: FilterExpr | None = None,
include_vectors: bool = True,
) -> list[VectorStoreSearchResult]:
"""Perform a text-based similarity search."""
query_embedding = text_embedder(text)
if query_embedding:
return self.similarity_search_by_vector(
query_embedding=query_embedding,
k=k,
select=select,
filters=filters,
include_vectors=include_vectors,
)
return []
@abstractmethod
def search_by_id(
self,
id: str,
select: list[str] | None = None,
include_vectors: bool = True,
) -> VectorStoreDocument:
"""Search for a document by id."""
@abstractmethod
def count(self) -> int:
"""Return the total number of documents in the store."""
@abstractmethod
def remove(self, ids: list[str]) -> None:
"""Remove documents by id."""
@abstractmethod
def update(self, document: VectorStoreDocument) -> None:
"""Update a document in the store."""
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-vectors/graphrag_vectors/vector_store.py",
"license": "MIT License",
"lines": 179,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-vectors/graphrag_vectors/vector_store_config.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Parameterization settings for the default configuration."""
from pydantic import BaseModel, ConfigDict, Field
from graphrag_vectors.index_schema import DEFAULT_VECTOR_SIZE, IndexSchema
from graphrag_vectors.vector_store_type import VectorStoreType
class VectorStoreConfig(BaseModel):
"""The default configuration section for Vector Store."""
model_config = ConfigDict(extra="allow")
"""Allow extra fields to support custom vector implementations."""
type: str = Field(
description="The vector store type to use.",
default=VectorStoreType.LanceDB,
)
db_uri: str | None = Field(
description="The database URI to use (only used by lancedb for built-in stores).",
default=None,
)
url: str | None = Field(
description="The database URL when type == azure_ai_search or cosmosdb.",
default=None,
)
api_key: str | None = Field(
description="The database API key when type == azure_ai_search.",
default=None,
)
audience: str | None = Field(
description="The database audience when type == azure_ai_search.",
default=None,
)
connection_string: str | None = Field(
description="The connection string when type == cosmosdb.",
default=None,
)
database_name: str | None = Field(
description="The database name to use when type == cosmosdb.",
default=None,
)
vector_size: int = Field(
description="Default vector size for all index schemas. Individual index schemas can override this value.",
default=DEFAULT_VECTOR_SIZE,
)
index_schema: dict[str, IndexSchema] = {}
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-vectors/graphrag_vectors/vector_store_config.py",
"license": "MIT License",
"lines": 43,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-vectors/graphrag_vectors/vector_store_factory.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Factory functions for creating a vector store."""
from __future__ import annotations
from typing import TYPE_CHECKING
from graphrag_common.factory import Factory, ServiceScope
from graphrag_vectors.vector_store import VectorStore
from graphrag_vectors.vector_store_type import VectorStoreType
if TYPE_CHECKING:
from collections.abc import Callable
from graphrag_vectors.index_schema import IndexSchema
from graphrag_vectors.vector_store_config import VectorStoreConfig
class VectorStoreFactory(Factory[VectorStore]):
"""A factory for vector stores.
Includes a method for users to register a custom vector store implementation.
Configuration arguments are passed to each vector store implementation as kwargs
for individual enforcement of required/optional arguments.
"""
vector_store_factory = VectorStoreFactory()
def register_vector_store(
vector_store_type: str,
vector_store_initializer: Callable[..., VectorStore],
scope: ServiceScope = "transient",
) -> None:
"""Register a custom vector store implementation.
Args
----
- vector_store_type: str
The vector store id to register.
- vector_store_initializer: Callable[..., VectorStore]
The vector store initializer to register.
- scope: ServiceScope
The service scope for the vector store (default: "transient").
"""
vector_store_factory.register(vector_store_type, vector_store_initializer, scope)
def create_vector_store(
config: VectorStoreConfig, index_schema: IndexSchema
) -> VectorStore:
"""Create a vector store implementation based on the given type and configuration.
Args
----
- config: VectorStoreConfig
The base vector store configuration.
- index_schema: IndexSchema
The index schema configuration for the vector store instance - i.e., for the specific table we are reading/writing.
Returns
-------
VectorStore
The created vector store implementation.
"""
strategy = config.type
# Lazy load built-in implementations
if strategy not in vector_store_factory:
match strategy:
case VectorStoreType.LanceDB:
from graphrag_vectors.lancedb import LanceDBVectorStore
register_vector_store(VectorStoreType.LanceDB, LanceDBVectorStore)
case VectorStoreType.AzureAISearch:
from graphrag_vectors.azure_ai_search import AzureAISearchVectorStore
register_vector_store(
VectorStoreType.AzureAISearch, AzureAISearchVectorStore
)
case VectorStoreType.CosmosDB:
from graphrag_vectors.cosmosdb import CosmosDBVectorStore
register_vector_store(VectorStoreType.CosmosDB, CosmosDBVectorStore)
case _:
msg = f"Vector store type '{strategy}' is not registered in the VectorStoreFactory. Registered types: {', '.join(vector_store_factory.keys())}."
raise ValueError(msg)
# collapse the base config and specific index config into a single dict for the initializer
config_model = config.model_dump()
index_model = index_schema.model_dump()
return vector_store_factory.create(
strategy, init_args={**config_model, **index_model}
)
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-vectors/graphrag_vectors/vector_store_factory.py",
"license": "MIT License",
"lines": 74,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag-vectors/graphrag_vectors/vector_store_type.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Vector store type enum."""
from enum import StrEnum
class VectorStoreType(StrEnum):
"""The supported vector store types."""
LanceDB = "lancedb"
AzureAISearch = "azure_ai_search"
CosmosDB = "cosmosdb"
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag-vectors/graphrag_vectors/vector_store_type.py",
"license": "MIT License",
"lines": 9,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag/graphrag/api/query.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""
Query Engine API.
This API provides access to the query engine of graphrag, allowing external applications
to hook into graphrag and run queries over a knowledge graph generated by graphrag.
Contains the following functions:
- global_search: Perform a global search.
- global_search_streaming: Perform a global search and stream results back.
- local_search: Perform a local search.
- local_search_streaming: Perform a local search and stream results back.
WARNING: This API is under development and may undergo changes in future releases.
Backwards compatibility is not guaranteed at this time.
"""
import logging
from collections.abc import AsyncGenerator
from typing import Any
import pandas as pd
from pydantic import validate_call
from graphrag.callbacks.noop_query_callbacks import NoopQueryCallbacks
from graphrag.callbacks.query_callbacks import QueryCallbacks
from graphrag.config.embeddings import (
community_full_content_embedding,
entity_description_embedding,
text_unit_text_embedding,
)
from graphrag.config.models.graph_rag_config import GraphRagConfig
from graphrag.logger.standard_logging import init_loggers
from graphrag.query.factory import (
get_basic_search_engine,
get_drift_search_engine,
get_global_search_engine,
get_local_search_engine,
)
from graphrag.query.indexer_adapters import (
read_indexer_communities,
read_indexer_covariates,
read_indexer_entities,
read_indexer_relationships,
read_indexer_report_embeddings,
read_indexer_reports,
read_indexer_text_units,
)
from graphrag.utils.api import (
get_embedding_store,
load_search_prompt,
truncate,
)
from graphrag.utils.cli import redact
# Initialize standard logger
logger = logging.getLogger(__name__)
@validate_call(config={"arbitrary_types_allowed": True})
async def global_search(
config: GraphRagConfig,
entities: pd.DataFrame,
communities: pd.DataFrame,
community_reports: pd.DataFrame,
community_level: int | None,
dynamic_community_selection: bool,
response_type: str,
query: str,
callbacks: list[QueryCallbacks] | None = None,
verbose: bool = False,
) -> tuple[
str | dict[str, Any] | list[dict[str, Any]],
str | list[pd.DataFrame] | dict[str, pd.DataFrame],
]:
"""Perform a global search and return the context data and response.
Parameters
----------
- config (GraphRagConfig): A graphrag configuration (from settings.yaml)
- entities (pd.DataFrame): A DataFrame containing the final entities (from entities.parquet)
- communities (pd.DataFrame): A DataFrame containing the final communities (from communities.parquet)
- community_reports (pd.DataFrame): A DataFrame containing the final community reports (from community_reports.parquet)
- community_level (int): The community level to search at.
- dynamic_community_selection (bool): Enable dynamic community selection instead of using all community reports at a fixed level. Note that you can still provide community_level cap the maximum level to search.
- response_type (str): The type of response to return.
- query (str): The user query to search for.
Returns
-------
TODO: Document the search response type and format.
"""
init_loggers(config=config, verbose=verbose, filename="query.log")
callbacks = callbacks or []
full_response = ""
context_data = {}
def on_context(context: Any) -> None:
nonlocal context_data
context_data = context
local_callbacks = NoopQueryCallbacks()
local_callbacks.on_context = on_context
callbacks.append(local_callbacks)
logger.debug("Executing global search query: %s", query)
async for chunk in global_search_streaming(
config=config,
entities=entities,
communities=communities,
community_reports=community_reports,
community_level=community_level,
dynamic_community_selection=dynamic_community_selection,
response_type=response_type,
query=query,
callbacks=callbacks,
):
full_response += chunk
logger.debug("Query response: %s", truncate(full_response, 400))
return full_response, context_data
@validate_call(config={"arbitrary_types_allowed": True})
def global_search_streaming(
config: GraphRagConfig,
entities: pd.DataFrame,
communities: pd.DataFrame,
community_reports: pd.DataFrame,
community_level: int | None,
dynamic_community_selection: bool,
response_type: str,
query: str,
callbacks: list[QueryCallbacks] | None = None,
verbose: bool = False,
) -> AsyncGenerator:
"""Perform a global search and return the context data and response via a generator.
Context data is returned as a dictionary of lists, with one list entry for each record.
Parameters
----------
- config (GraphRagConfig): A graphrag configuration (from settings.yaml)
- entities (pd.DataFrame): A DataFrame containing the final entities (from entities.parquet)
- communities (pd.DataFrame): A DataFrame containing the final communities (from communities.parquet)
- community_reports (pd.DataFrame): A DataFrame containing the final community reports (from community_reports.parquet)
- community_level (int): The community level to search at.
- dynamic_community_selection (bool): Enable dynamic community selection instead of using all community reports at a fixed level. Note that you can still provide community_level cap the maximum level to search.
- response_type (str): The type of response to return.
- query (str): The user query to search for.
Returns
-------
TODO: Document the search response type and format.
"""
init_loggers(config=config, verbose=verbose, filename="query.log")
communities_ = read_indexer_communities(communities, community_reports)
reports = read_indexer_reports(
community_reports,
communities,
community_level=community_level,
dynamic_community_selection=dynamic_community_selection,
)
entities_ = read_indexer_entities(
entities, communities, community_level=community_level
)
map_prompt = load_search_prompt(config.global_search.map_prompt)
reduce_prompt = load_search_prompt(config.global_search.reduce_prompt)
knowledge_prompt = load_search_prompt(config.global_search.knowledge_prompt)
logger.debug("Executing streaming global search query: %s", query)
search_engine = get_global_search_engine(
config,
reports=reports,
entities=entities_,
communities=communities_,
response_type=response_type,
dynamic_community_selection=dynamic_community_selection,
map_system_prompt=map_prompt,
reduce_system_prompt=reduce_prompt,
general_knowledge_inclusion_prompt=knowledge_prompt,
callbacks=callbacks,
)
return search_engine.stream_search(query=query)
@validate_call(config={"arbitrary_types_allowed": True})
async def local_search(
config: GraphRagConfig,
entities: pd.DataFrame,
communities: pd.DataFrame,
community_reports: pd.DataFrame,
text_units: pd.DataFrame,
relationships: pd.DataFrame,
covariates: pd.DataFrame | None,
community_level: int,
response_type: str,
query: str,
callbacks: list[QueryCallbacks] | None = None,
verbose: bool = False,
) -> tuple[
str | dict[str, Any] | list[dict[str, Any]],
str | list[pd.DataFrame] | dict[str, pd.DataFrame],
]:
"""Perform a local search and return the context data and response.
----------
- config (GraphRagConfig): A graphrag configuration (from settings.yaml)
- entities (pd.DataFrame): A DataFrame containing the final entities (from entities.parquet)
- community_reports (pd.DataFrame): A DataFrame containing the final community reports (from community_reports.parquet)
- text_units (pd.DataFrame): A DataFrame containing the final text units (from text_units.parquet)
- relationships (pd.DataFrame): A DataFrame containing the final relationships (from relationships.parquet)
- covariates (pd.DataFrame): A DataFrame containing the final covariates (from covariates.parquet)
- community_level (int): The community level to search at.
- response_type (str): The response type to return.
- query (str): The user query to search for.
Returns
-------
TODO: Document the search response type and format.
"""
init_loggers(config=config, verbose=verbose, filename="query.log")
callbacks = callbacks or []
full_response = ""
context_data = {}
def on_context(context: Any) -> None:
nonlocal context_data
context_data = context
local_callbacks = NoopQueryCallbacks()
local_callbacks.on_context = on_context
callbacks.append(local_callbacks)
logger.debug("Executing local search query: %s", query)
async for chunk in local_search_streaming(
config=config,
entities=entities,
communities=communities,
community_reports=community_reports,
text_units=text_units,
relationships=relationships,
covariates=covariates,
community_level=community_level,
response_type=response_type,
query=query,
callbacks=callbacks,
):
full_response += chunk
logger.debug("Query response: %s", truncate(full_response, 400))
return full_response, context_data
@validate_call(config={"arbitrary_types_allowed": True})
def local_search_streaming(
config: GraphRagConfig,
entities: pd.DataFrame,
communities: pd.DataFrame,
community_reports: pd.DataFrame,
text_units: pd.DataFrame,
relationships: pd.DataFrame,
covariates: pd.DataFrame | None,
community_level: int,
response_type: str,
query: str,
callbacks: list[QueryCallbacks] | None = None,
verbose: bool = False,
) -> AsyncGenerator:
"""Perform a local search and return the context data and response via a generator.
Parameters
----------
- config (GraphRagConfig): A graphrag configuration (from settings.yaml)
- entities (pd.DataFrame): A DataFrame containing the final entities (from entities.parquet)
- community_reports (pd.DataFrame): A DataFrame containing the final community reports (from community_reports.parquet)
- text_units (pd.DataFrame): A DataFrame containing the final text units (from text_units.parquet)
- relationships (pd.DataFrame): A DataFrame containing the final relationships (from relationships.parquet)
- covariates (pd.DataFrame): A DataFrame containing the final covariates (from covariates.parquet)
- community_level (int): The community level to search at.
- response_type (str): The response type to return.
- query (str): The user query to search for.
Returns
-------
TODO: Document the search response type and format.
"""
init_loggers(config=config, verbose=verbose, filename="query.log")
msg = f"Vector Store Args: {redact(config.vector_store.model_dump())}"
logger.debug(msg)
description_embedding_store = get_embedding_store(
config=config.vector_store,
embedding_name=entity_description_embedding,
)
entities_ = read_indexer_entities(entities, communities, community_level)
covariates_ = read_indexer_covariates(covariates) if covariates is not None else []
prompt = load_search_prompt(config.local_search.prompt)
logger.debug("Executing streaming local search query: %s", query)
search_engine = get_local_search_engine(
config=config,
reports=read_indexer_reports(community_reports, communities, community_level),
text_units=read_indexer_text_units(text_units),
entities=entities_,
relationships=read_indexer_relationships(relationships),
covariates={"claims": covariates_},
description_embedding_store=description_embedding_store,
response_type=response_type,
system_prompt=prompt,
callbacks=callbacks,
)
return search_engine.stream_search(query=query)
@validate_call(config={"arbitrary_types_allowed": True})
async def drift_search(
config: GraphRagConfig,
entities: pd.DataFrame,
communities: pd.DataFrame,
community_reports: pd.DataFrame,
text_units: pd.DataFrame,
relationships: pd.DataFrame,
community_level: int,
response_type: str,
query: str,
callbacks: list[QueryCallbacks] | None = None,
verbose: bool = False,
) -> tuple[
str | dict[str, Any] | list[dict[str, Any]],
str | list[pd.DataFrame] | dict[str, pd.DataFrame],
]:
"""Perform a DRIFT search and return the context data and response.
Parameters
----------
- config (GraphRagConfig): A graphrag configuration (from settings.yaml)
- entities (pd.DataFrame): A DataFrame containing the final entities (from entities.parquet)
- community_reports (pd.DataFrame): A DataFrame containing the final community reports (from community_reports.parquet)
- text_units (pd.DataFrame): A DataFrame containing the final text units (from text_units.parquet)
- relationships (pd.DataFrame): A DataFrame containing the final relationships (from relationships.parquet)
- community_level (int): The community level to search at.
- query (str): The user query to search for.
Returns
-------
TODO: Document the search response type and format.
"""
init_loggers(config=config, verbose=verbose, filename="query.log")
callbacks = callbacks or []
full_response = ""
context_data = {}
def on_context(context: Any) -> None:
nonlocal context_data
context_data = context
local_callbacks = NoopQueryCallbacks()
local_callbacks.on_context = on_context
callbacks.append(local_callbacks)
logger.debug("Executing drift search query: %s", query)
async for chunk in drift_search_streaming(
config=config,
entities=entities,
communities=communities,
community_reports=community_reports,
text_units=text_units,
relationships=relationships,
community_level=community_level,
response_type=response_type,
query=query,
callbacks=callbacks,
):
full_response += chunk
logger.debug("Query response: %s", truncate(full_response, 400))
return full_response, context_data
@validate_call(config={"arbitrary_types_allowed": True})
def drift_search_streaming(
config: GraphRagConfig,
entities: pd.DataFrame,
communities: pd.DataFrame,
community_reports: pd.DataFrame,
text_units: pd.DataFrame,
relationships: pd.DataFrame,
community_level: int,
response_type: str,
query: str,
callbacks: list[QueryCallbacks] | None = None,
verbose: bool = False,
) -> AsyncGenerator:
"""Perform a DRIFT search and return the context data and response.
Parameters
----------
- config (GraphRagConfig): A graphrag configuration (from settings.yaml)
- entities (pd.DataFrame): A DataFrame containing the final entities (from entities.parquet)
- community_reports (pd.DataFrame): A DataFrame containing the final community reports (from community_reports.parquet)
- text_units (pd.DataFrame): A DataFrame containing the final text units (from text_units.parquet)
- relationships (pd.DataFrame): A DataFrame containing the final relationships (from relationships.parquet)
- community_level (int): The community level to search at.
- query (str): The user query to search for.
Returns
-------
TODO: Document the search response type and format.
"""
init_loggers(config=config, verbose=verbose, filename="query.log")
msg = f"Vector Store Args: {redact(config.vector_store.model_dump())}"
logger.debug(msg)
description_embedding_store = get_embedding_store(
config=config.vector_store,
embedding_name=entity_description_embedding,
)
full_content_embedding_store = get_embedding_store(
config=config.vector_store,
embedding_name=community_full_content_embedding,
)
entities_ = read_indexer_entities(entities, communities, community_level)
reports = read_indexer_reports(community_reports, communities, community_level)
read_indexer_report_embeddings(reports, full_content_embedding_store)
prompt = load_search_prompt(config.drift_search.prompt)
reduce_prompt = load_search_prompt(config.drift_search.reduce_prompt)
logger.debug("Executing streaming drift search query: %s", query)
search_engine = get_drift_search_engine(
config=config,
reports=reports,
text_units=read_indexer_text_units(text_units),
entities=entities_,
relationships=read_indexer_relationships(relationships),
description_embedding_store=description_embedding_store,
local_system_prompt=prompt,
reduce_system_prompt=reduce_prompt,
response_type=response_type,
callbacks=callbacks,
)
return search_engine.stream_search(query=query)
@validate_call(config={"arbitrary_types_allowed": True})
async def basic_search(
config: GraphRagConfig,
text_units: pd.DataFrame,
response_type: str,
query: str,
callbacks: list[QueryCallbacks] | None = None,
verbose: bool = False,
) -> tuple[
str | dict[str, Any] | list[dict[str, Any]],
str | list[pd.DataFrame] | dict[str, pd.DataFrame],
]:
"""Perform a basic search and return the context data and response.
Parameters
----------
- config (GraphRagConfig): A graphrag configuration (from settings.yaml)
- text_units (pd.DataFrame): A DataFrame containing the final text units (from text_units.parquet)
- query (str): The user query to search for.
Returns
-------
TODO: Document the search response type and format.
"""
init_loggers(config=config, verbose=verbose, filename="query.log")
callbacks = callbacks or []
full_response = ""
context_data = {}
def on_context(context: Any) -> None:
nonlocal context_data
context_data = context
local_callbacks = NoopQueryCallbacks()
local_callbacks.on_context = on_context
callbacks.append(local_callbacks)
logger.debug("Executing basic search query: %s", query)
async for chunk in basic_search_streaming(
config=config,
text_units=text_units,
response_type=response_type,
query=query,
callbacks=callbacks,
):
full_response += chunk
logger.debug("Query response: %s", truncate(full_response, 400))
return full_response, context_data
@validate_call(config={"arbitrary_types_allowed": True})
def basic_search_streaming(
config: GraphRagConfig,
text_units: pd.DataFrame,
response_type: str,
query: str,
callbacks: list[QueryCallbacks] | None = None,
verbose: bool = False,
) -> AsyncGenerator:
"""Perform a local search and return the context data and response via a generator.
Parameters
----------
- config (GraphRagConfig): A graphrag configuration (from settings.yaml)
- text_units (pd.DataFrame): A DataFrame containing the final text units (from text_units.parquet)
- query (str): The user query to search for.
Returns
-------
TODO: Document the search response type and format.
"""
init_loggers(config=config, verbose=verbose, filename="query.log")
msg = f"Vector Store Args: {redact(config.vector_store.model_dump())}"
logger.debug(msg)
embedding_store = get_embedding_store(
config=config.vector_store,
embedding_name=text_unit_text_embedding,
)
prompt = load_search_prompt(config.basic_search.prompt)
logger.debug("Executing streaming basic search query: %s", query)
search_engine = get_basic_search_engine(
config=config,
text_units=read_indexer_text_units(text_units),
text_unit_embeddings=embedding_store,
response_type=response_type,
system_prompt=prompt,
callbacks=callbacks,
)
return search_engine.stream_search(query=query)
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag/graphrag/api/query.py",
"license": "MIT License",
"lines": 474,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.