sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
microsoft/graphrag:packages/graphrag/graphrag/cache/cache_key_creator.py | # Copyright (c) 2025 Microsoft Corporation.
# Licensed under the MIT License
"""Cache key creation for Graphrag."""
from typing import Any
from graphrag_llm.cache import create_cache_key
_CACHE_VERSION = 4
"""
If there's a breaking change in what we cache, we should increment this version number to invalidate existing caches.
fnllm was on cache version 2 and though we generate
similar cache keys, the objects stored in cache by fnllm and litellm are different.
Using litellm model providers will not be able to reuse caches generated by fnllm
thus we start with version 3 for litellm.
graphrag-llm package is now on version 4.
This is to account for changes to the ModelConfig that affect the cache key and
occurred when pulling this package out of graphrag.
graphrag-llm, now that is supports metrics, also caches metrics which were not cached before.
"""
def cache_key_creator(
input_args: dict[str, Any],
) -> str:
"""Generate a cache key based on input arguments.
Args
____
input_args: dict[str, Any]
The input arguments for the model call.
Returns
-------
str
The generated cache key in the format
`{prefix}_{data_hash}_v{version}` if prefix is provided.
"""
base_key = create_cache_key(input_args)
return f"{base_key}_v{_CACHE_VERSION}"
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag/graphrag/cache/cache_key_creator.py",
"license": "MIT License",
"lines": 33,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag/graphrag/cli/query.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""CLI implementation of the query subcommand."""
import asyncio
import sys
from pathlib import Path
from typing import TYPE_CHECKING, Any
from graphrag_storage import create_storage
from graphrag_storage.tables.table_provider_factory import create_table_provider
import graphrag.api as api
from graphrag.callbacks.noop_query_callbacks import NoopQueryCallbacks
from graphrag.config.load_config import load_config
from graphrag.config.models.graph_rag_config import GraphRagConfig
from graphrag.data_model.data_reader import DataReader
if TYPE_CHECKING:
import pandas as pd
# ruff: noqa: T201
def run_global_search(
data_dir: Path | None,
root_dir: Path,
community_level: int | None,
dynamic_community_selection: bool,
response_type: str,
streaming: bool,
query: str,
verbose: bool,
):
"""Perform a global search with a given query.
Loads index files required for global search and calls the Query API.
"""
cli_overrides: dict[str, Any] = {}
if data_dir:
cli_overrides["output_storage"] = {"base_dir": str(data_dir)}
config = load_config(
root_dir=root_dir,
cli_overrides=cli_overrides,
)
dataframe_dict = _resolve_output_files(
config=config,
output_list=[
"entities",
"communities",
"community_reports",
],
optional_list=[],
)
entities: pd.DataFrame = dataframe_dict["entities"]
communities: pd.DataFrame = dataframe_dict["communities"]
community_reports: pd.DataFrame = dataframe_dict["community_reports"]
if streaming:
async def run_streaming_search():
full_response = ""
context_data = {}
def on_context(context: Any) -> None:
nonlocal context_data
context_data = context
callbacks = NoopQueryCallbacks()
callbacks.on_context = on_context
async for stream_chunk in api.global_search_streaming(
config=config,
entities=entities,
communities=communities,
community_reports=community_reports,
community_level=community_level,
dynamic_community_selection=dynamic_community_selection,
response_type=response_type,
query=query,
callbacks=[callbacks],
verbose=verbose,
):
full_response += stream_chunk
print(stream_chunk, end="")
sys.stdout.flush()
print()
return full_response, context_data
return asyncio.run(run_streaming_search())
# not streaming
response, context_data = asyncio.run(
api.global_search(
config=config,
entities=entities,
communities=communities,
community_reports=community_reports,
community_level=community_level,
dynamic_community_selection=dynamic_community_selection,
response_type=response_type,
query=query,
verbose=verbose,
)
)
print(response)
return response, context_data
def run_local_search(
data_dir: Path | None,
root_dir: Path,
community_level: int,
response_type: str,
streaming: bool,
query: str,
verbose: bool,
):
"""Perform a local search with a given query.
Loads index files required for local search and calls the Query API.
"""
cli_overrides: dict[str, Any] = {}
if data_dir:
cli_overrides["output_storage"] = {"base_dir": str(data_dir)}
config = load_config(
root_dir=root_dir,
cli_overrides=cli_overrides,
)
dataframe_dict = _resolve_output_files(
config=config,
output_list=[
"communities",
"community_reports",
"text_units",
"relationships",
"entities",
],
optional_list=[
"covariates",
],
)
communities: pd.DataFrame = dataframe_dict["communities"]
community_reports: pd.DataFrame = dataframe_dict["community_reports"]
text_units: pd.DataFrame = dataframe_dict["text_units"]
relationships: pd.DataFrame = dataframe_dict["relationships"]
entities: pd.DataFrame = dataframe_dict["entities"]
covariates: pd.DataFrame | None = dataframe_dict["covariates"]
if streaming:
async def run_streaming_search():
full_response = ""
context_data = {}
def on_context(context: Any) -> None:
nonlocal context_data
context_data = context
callbacks = NoopQueryCallbacks()
callbacks.on_context = on_context
async for stream_chunk in api.local_search_streaming(
config=config,
entities=entities,
communities=communities,
community_reports=community_reports,
text_units=text_units,
relationships=relationships,
covariates=covariates,
community_level=community_level,
response_type=response_type,
query=query,
callbacks=[callbacks],
verbose=verbose,
):
full_response += stream_chunk
print(stream_chunk, end="")
sys.stdout.flush()
print()
return full_response, context_data
return asyncio.run(run_streaming_search())
# not streaming
response, context_data = asyncio.run(
api.local_search(
config=config,
entities=entities,
communities=communities,
community_reports=community_reports,
text_units=text_units,
relationships=relationships,
covariates=covariates,
community_level=community_level,
response_type=response_type,
query=query,
verbose=verbose,
)
)
print(response)
return response, context_data
def run_drift_search(
data_dir: Path | None,
root_dir: Path,
community_level: int,
response_type: str,
streaming: bool,
query: str,
verbose: bool,
):
"""Perform a local search with a given query.
Loads index files required for local search and calls the Query API.
"""
cli_overrides: dict[str, Any] = {}
if data_dir:
cli_overrides["output_storage"] = {"base_dir": str(data_dir)}
config = load_config(
root_dir=root_dir,
cli_overrides=cli_overrides,
)
dataframe_dict = _resolve_output_files(
config=config,
output_list=[
"communities",
"community_reports",
"text_units",
"relationships",
"entities",
],
)
communities: pd.DataFrame = dataframe_dict["communities"]
community_reports: pd.DataFrame = dataframe_dict["community_reports"]
text_units: pd.DataFrame = dataframe_dict["text_units"]
relationships: pd.DataFrame = dataframe_dict["relationships"]
entities: pd.DataFrame = dataframe_dict["entities"]
if streaming:
async def run_streaming_search():
full_response = ""
context_data = {}
def on_context(context: Any) -> None:
nonlocal context_data
context_data = context
callbacks = NoopQueryCallbacks()
callbacks.on_context = on_context
async for stream_chunk in api.drift_search_streaming(
config=config,
entities=entities,
communities=communities,
community_reports=community_reports,
text_units=text_units,
relationships=relationships,
community_level=community_level,
response_type=response_type,
query=query,
callbacks=[callbacks],
verbose=verbose,
):
full_response += stream_chunk
print(stream_chunk, end="")
sys.stdout.flush()
print()
return full_response, context_data
return asyncio.run(run_streaming_search())
# not streaming
response, context_data = asyncio.run(
api.drift_search(
config=config,
entities=entities,
communities=communities,
community_reports=community_reports,
text_units=text_units,
relationships=relationships,
community_level=community_level,
response_type=response_type,
query=query,
verbose=verbose,
)
)
print(response)
return response, context_data
def run_basic_search(
data_dir: Path | None,
root_dir: Path,
response_type: str,
streaming: bool,
query: str,
verbose: bool,
):
"""Perform a basics search with a given query.
Loads index files required for basic search and calls the Query API.
"""
cli_overrides: dict[str, Any] = {}
if data_dir:
cli_overrides["output_storage"] = {"base_dir": str(data_dir)}
config = load_config(
root_dir=root_dir,
cli_overrides=cli_overrides,
)
dataframe_dict = _resolve_output_files(
config=config,
output_list=[
"text_units",
],
)
text_units: pd.DataFrame = dataframe_dict["text_units"]
if streaming:
async def run_streaming_search():
full_response = ""
context_data = {}
def on_context(context: Any) -> None:
nonlocal context_data
context_data = context
callbacks = NoopQueryCallbacks()
callbacks.on_context = on_context
async for stream_chunk in api.basic_search_streaming(
config=config,
text_units=text_units,
response_type=response_type,
query=query,
callbacks=[callbacks],
verbose=verbose,
):
full_response += stream_chunk
print(stream_chunk, end="")
sys.stdout.flush()
print()
return full_response, context_data
return asyncio.run(run_streaming_search())
# not streaming
response, context_data = asyncio.run(
api.basic_search(
config=config,
text_units=text_units,
response_type=response_type,
query=query,
verbose=verbose,
)
)
print(response)
return response, context_data
def _resolve_output_files(
config: GraphRagConfig,
output_list: list[str],
optional_list: list[str] | None = None,
) -> dict[str, Any]:
"""Read indexing output files to a dataframe dict, with correct column types."""
dataframe_dict = {}
storage_obj = create_storage(config.output_storage)
table_provider = create_table_provider(config.table_provider, storage=storage_obj)
reader = DataReader(table_provider)
for name in output_list:
df_value = asyncio.run(getattr(reader, name)())
dataframe_dict[name] = df_value
# for optional output files, set the dict entry to None instead of erroring out if it does not exist
if optional_list:
for optional_file in optional_list:
file_exists = asyncio.run(table_provider.has(optional_file))
if file_exists:
df_value = asyncio.run(getattr(reader, optional_file)())
dataframe_dict[optional_file] = df_value
else:
dataframe_dict[optional_file] = None
return dataframe_dict
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag/graphrag/cli/query.py",
"license": "MIT License",
"lines": 339,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag/graphrag/config/embeddings.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""A module containing embeddings values."""
entity_description_embedding = "entity_description"
community_full_content_embedding = "community_full_content"
text_unit_text_embedding = "text_unit_text"
all_embeddings: set[str] = {
entity_description_embedding,
community_full_content_embedding,
text_unit_text_embedding,
}
default_embeddings: list[str] = [
entity_description_embedding,
community_full_content_embedding,
text_unit_text_embedding,
]
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag/graphrag/config/embeddings.py",
"license": "MIT License",
"lines": 16,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag/graphrag/config/enums.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""A module containing config enums."""
from __future__ import annotations
from enum import Enum
class ReportingType(str, Enum):
"""The reporting configuration type for the pipeline."""
file = "file"
"""The file reporting configuration type."""
blob = "blob"
"""The blob reporting configuration type."""
def __repr__(self):
"""Get a string representation."""
return f'"{self.value}"'
class AsyncType(str, Enum):
"""Enum for the type of async to use."""
AsyncIO = "asyncio"
Threaded = "threaded"
class SearchMethod(Enum):
"""The type of search to run."""
LOCAL = "local"
GLOBAL = "global"
DRIFT = "drift"
BASIC = "basic"
def __str__(self):
"""Return the string representation of the enum value."""
return self.value
class IndexingMethod(str, Enum):
"""Enum for the type of indexing to perform."""
Standard = "standard"
"""Traditional GraphRAG indexing, with all graph construction and summarization performed by a language model."""
Fast = "fast"
"""Fast indexing, using NLP for graph construction and language model for summarization."""
StandardUpdate = "standard-update"
"""Incremental update with standard indexing."""
FastUpdate = "fast-update"
"""Incremental update with fast indexing."""
class NounPhraseExtractorType(str, Enum):
"""Enum for the noun phrase extractor options."""
RegexEnglish = "regex_english"
"""Standard extractor using regex. Fastest, but limited to English."""
Syntactic = "syntactic_parser"
"""Noun phrase extractor based on dependency parsing and NER using SpaCy."""
CFG = "cfg"
"""Noun phrase extractor combining CFG-based noun-chunk extraction and NER."""
class ModularityMetric(str, Enum):
"""Enum for the modularity metric to use."""
Graph = "graph"
"""Graph modularity metric."""
LCC = "lcc"
WeightedComponents = "weighted_components"
"""Weighted components modularity metric."""
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag/graphrag/config/enums.py",
"license": "MIT License",
"lines": 52,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag/graphrag/config/load_config.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Default method for loading config."""
from pathlib import Path
from typing import Any
from graphrag_common.config import load_config as lc
from graphrag.config.models.graph_rag_config import GraphRagConfig
def load_config(
root_dir: str | Path,
cli_overrides: dict[str, Any] | None = None,
) -> GraphRagConfig:
"""Load configuration from a file.
Parameters
----------
root_dir : str | Path
The root directory of the project.
Searches for settings.[yaml|yml|json] config files.
cli_overrides : dict[str, Any] | None
A nested dictionary of cli overrides.
Example: {'output': {'base_dir': 'override_value'}}
Returns
-------
GraphRagConfig
The loaded configuration.
Raises
------
FileNotFoundError
If the config file is not found.
ConfigParsingError
If there was an error parsing the config file or its environment variables.
ValidationError
If there are pydantic validation errors when instantiating the config.
"""
return lc(
config_initializer=GraphRagConfig,
config_path=root_dir,
overrides=cli_overrides,
)
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag/graphrag/config/load_config.py",
"license": "MIT License",
"lines": 38,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag/graphrag/config/models/embed_text_config.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Parameterization settings for the default configuration."""
from pydantic import BaseModel, Field
from graphrag.config.defaults import graphrag_config_defaults
class EmbedTextConfig(BaseModel):
"""Configuration section for text embeddings."""
embedding_model_id: str = Field(
description="The model ID to use for text embeddings.",
default=graphrag_config_defaults.embed_text.embedding_model_id,
)
model_instance_name: str = Field(
description="The model singleton instance name. This primarily affects the cache storage partitioning.",
default=graphrag_config_defaults.embed_text.model_instance_name,
)
batch_size: int = Field(
description="The batch size to use.",
default=graphrag_config_defaults.embed_text.batch_size,
)
batch_max_tokens: int = Field(
description="The batch max tokens to use.",
default=graphrag_config_defaults.embed_text.batch_max_tokens,
)
names: list[str] = Field(
description="The specific embeddings to perform.",
default=graphrag_config_defaults.embed_text.names,
)
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag/graphrag/config/models/embed_text_config.py",
"license": "MIT License",
"lines": 27,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag/graphrag/config/models/graph_rag_config.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Parameterization settings for the default configuration."""
from dataclasses import asdict
from pathlib import Path
from devtools import pformat
from graphrag_cache import CacheConfig
from graphrag_chunking.chunking_config import ChunkingConfig
from graphrag_input import InputConfig
from graphrag_llm.config import ModelConfig
from graphrag_storage import StorageConfig, StorageType
from graphrag_storage.tables.table_provider_config import TableProviderConfig
from graphrag_vectors import IndexSchema, VectorStoreConfig, VectorStoreType
from pydantic import BaseModel, Field, model_validator
from graphrag.config.defaults import graphrag_config_defaults
from graphrag.config.embeddings import all_embeddings
from graphrag.config.enums import AsyncType, ReportingType
from graphrag.config.models.basic_search_config import BasicSearchConfig
from graphrag.config.models.cluster_graph_config import ClusterGraphConfig
from graphrag.config.models.community_reports_config import CommunityReportsConfig
from graphrag.config.models.drift_search_config import DRIFTSearchConfig
from graphrag.config.models.embed_text_config import EmbedTextConfig
from graphrag.config.models.extract_claims_config import ExtractClaimsConfig
from graphrag.config.models.extract_graph_config import ExtractGraphConfig
from graphrag.config.models.extract_graph_nlp_config import ExtractGraphNLPConfig
from graphrag.config.models.global_search_config import GlobalSearchConfig
from graphrag.config.models.local_search_config import LocalSearchConfig
from graphrag.config.models.prune_graph_config import PruneGraphConfig
from graphrag.config.models.reporting_config import ReportingConfig
from graphrag.config.models.snapshots_config import SnapshotsConfig
from graphrag.config.models.summarize_descriptions_config import (
SummarizeDescriptionsConfig,
)
class GraphRagConfig(BaseModel):
"""Base class for the Default-Configuration parameterization settings."""
def __repr__(self) -> str:
"""Get a string representation."""
return pformat(self, highlight=False)
def __str__(self):
"""Get a string representation."""
return self.model_dump_json(indent=4)
completion_models: dict[str, ModelConfig] = Field(
description="Available completion model configurations.",
default=graphrag_config_defaults.completion_models,
)
embedding_models: dict[str, ModelConfig] = Field(
description="Available embedding model configurations.",
default=graphrag_config_defaults.embedding_models,
)
concurrent_requests: int = Field(
description="The default number of concurrent requests to make to language models.",
default=graphrag_config_defaults.concurrent_requests,
)
async_mode: AsyncType = Field(
description="The default asynchronous mode to use for language model requests.",
default=graphrag_config_defaults.async_mode,
)
input: InputConfig = Field(
description="The input configuration.", default=InputConfig()
)
"""The input configuration."""
input_storage: StorageConfig = Field(
description="The input storage configuration.",
default=StorageConfig(
base_dir=graphrag_config_defaults.input_storage.base_dir,
),
)
"""The input storage configuration."""
def _validate_input_base_dir(self) -> None:
"""Validate the input base directory."""
if self.input_storage.type == StorageType.File:
if not self.input_storage.base_dir:
msg = "input storage base directory is required for file input storage. Please rerun `graphrag init` and set the input storage configuration."
raise ValueError(msg)
self.input_storage.base_dir = str(
Path(self.input_storage.base_dir).resolve()
)
chunking: ChunkingConfig = Field(
description="The chunking configuration to use.",
default=ChunkingConfig(
type=graphrag_config_defaults.chunking.type,
size=graphrag_config_defaults.chunking.size,
overlap=graphrag_config_defaults.chunking.overlap,
encoding_model=graphrag_config_defaults.chunking.encoding_model,
prepend_metadata=graphrag_config_defaults.chunking.prepend_metadata,
),
)
"""The chunking configuration to use."""
output_storage: StorageConfig = Field(
description="The output configuration.",
default=StorageConfig(
base_dir=graphrag_config_defaults.output_storage.base_dir,
),
)
"""The output configuration."""
def _validate_output_base_dir(self) -> None:
"""Validate the output base directory."""
if self.output_storage.type == StorageType.File:
if not self.output_storage.base_dir:
msg = "output base directory is required for file output. Please rerun `graphrag init` and set the output configuration."
raise ValueError(msg)
self.output_storage.base_dir = str(
Path(self.output_storage.base_dir).resolve()
)
update_output_storage: StorageConfig = Field(
description="The output configuration for the updated index.",
default=StorageConfig(
base_dir=graphrag_config_defaults.update_output_storage.base_dir,
),
)
"""The output configuration for the updated index."""
def _validate_update_output_storage_base_dir(self) -> None:
"""Validate the update output base directory."""
if self.update_output_storage.type == StorageType.File:
if not self.update_output_storage.base_dir:
msg = "update_output_storage base directory is required for file output. Please rerun `graphrag init` and set the update_output_storage configuration."
raise ValueError(msg)
self.update_output_storage.base_dir = str(
Path(self.update_output_storage.base_dir).resolve()
)
table_provider: TableProviderConfig = Field(
description="The table provider configuration.", default=TableProviderConfig()
)
"""The table provider configuration. By default we read/write parquet to disk. You can register custom output table storage."""
cache: CacheConfig = Field(
description="The cache configuration.",
default=CacheConfig(**asdict(graphrag_config_defaults.cache)),
)
"""The cache configuration."""
reporting: ReportingConfig = Field(
description="The reporting configuration.", default=ReportingConfig()
)
"""The reporting configuration."""
def _validate_reporting_base_dir(self) -> None:
"""Validate the reporting base directory."""
if self.reporting.type == ReportingType.file:
if self.reporting.base_dir.strip() == "":
msg = "Reporting base directory is required for file reporting. Please rerun `graphrag init` and set the reporting configuration."
raise ValueError(msg)
self.reporting.base_dir = str(Path(self.reporting.base_dir).resolve())
vector_store: VectorStoreConfig = Field(
description="The vector store configuration.", default=VectorStoreConfig()
)
"""The vector store configuration."""
workflows: list[str] | None = Field(
description="List of workflows to run, in execution order. This always overrides any built-in workflow methods.",
default=graphrag_config_defaults.workflows,
)
"""List of workflows to run, in execution order."""
embed_text: EmbedTextConfig = Field(
description="Text embedding configuration.",
default=EmbedTextConfig(),
)
"""Text embedding configuration."""
extract_graph: ExtractGraphConfig = Field(
description="The entity extraction configuration to use.",
default=ExtractGraphConfig(),
)
"""The entity extraction configuration to use."""
summarize_descriptions: SummarizeDescriptionsConfig = Field(
description="The description summarization configuration to use.",
default=SummarizeDescriptionsConfig(),
)
"""The description summarization configuration to use."""
extract_graph_nlp: ExtractGraphNLPConfig = Field(
description="The NLP-based graph extraction configuration to use.",
default=ExtractGraphNLPConfig(),
)
"""The NLP-based graph extraction configuration to use."""
prune_graph: PruneGraphConfig = Field(
description="The graph pruning configuration to use.",
default=PruneGraphConfig(),
)
"""The graph pruning configuration to use."""
cluster_graph: ClusterGraphConfig = Field(
description="The cluster graph configuration to use.",
default=ClusterGraphConfig(),
)
"""The cluster graph configuration to use."""
extract_claims: ExtractClaimsConfig = Field(
description="The claim extraction configuration to use.",
default=ExtractClaimsConfig(
enabled=graphrag_config_defaults.extract_claims.enabled,
),
)
"""The claim extraction configuration to use."""
community_reports: CommunityReportsConfig = Field(
description="The community reports configuration to use.",
default=CommunityReportsConfig(),
)
"""The community reports configuration to use."""
snapshots: SnapshotsConfig = Field(
description="The snapshots configuration to use.",
default=SnapshotsConfig(),
)
"""The snapshots configuration to use."""
local_search: LocalSearchConfig = Field(
description="The local search configuration.", default=LocalSearchConfig()
)
"""The local search configuration."""
global_search: GlobalSearchConfig = Field(
description="The global search configuration.", default=GlobalSearchConfig()
)
"""The global search configuration."""
drift_search: DRIFTSearchConfig = Field(
description="The drift search configuration.", default=DRIFTSearchConfig()
)
"""The drift search configuration."""
basic_search: BasicSearchConfig = Field(
description="The basic search configuration.", default=BasicSearchConfig()
)
"""The basic search configuration."""
def _validate_vector_store(self) -> None:
"""Validate the vector store configuration specifically in the GraphRAG context. This checks and sets required dynamic defaults for the embeddings we require."""
self._validate_vector_store_db_uri()
# check and insert/overlay schemas for all of the core embeddings
# note that this does not require that they are used, only that they have a schema
# the embed_text block has the list of actual embeddings
if not self.vector_store.index_schema:
self.vector_store.index_schema = {}
default_vector_size = self.vector_store.vector_size
for embedding in all_embeddings:
if embedding not in self.vector_store.index_schema:
self.vector_store.index_schema[embedding] = IndexSchema(
index_name=embedding,
vector_size=default_vector_size,
)
def _validate_vector_store_db_uri(self) -> None:
"""Validate the vector store configuration."""
store = self.vector_store
if store.type == VectorStoreType.LanceDB:
if not store.db_uri or store.db_uri.strip == "":
store.db_uri = graphrag_config_defaults.vector_store.db_uri
store.db_uri = str(Path(store.db_uri).resolve())
def get_completion_model_config(self, model_id: str) -> ModelConfig:
"""Get a completion model configuration by ID.
Parameters
----------
model_id : str
The ID of the model to get. Should match an ID in the completion_models list.
Returns
-------
ModelConfig
The model configuration if found.
Raises
------
ValueError
If the model ID is not found in the configuration.
"""
if model_id not in self.completion_models:
err_msg = f"Model ID {model_id} not found in completion_models. Please rerun `graphrag init` and set the completion_models configuration."
raise ValueError(err_msg)
return self.completion_models[model_id]
def get_embedding_model_config(self, model_id: str) -> ModelConfig:
"""Get an embedding model configuration by ID.
Parameters
----------
model_id : str
The ID of the model to get. Should match an ID in the embedding_models list.
Returns
-------
ModelConfig
The model configuration if found.
Raises
------
ValueError
If the model ID is not found in the configuration.
"""
if model_id not in self.embedding_models:
err_msg = f"Model ID {model_id} not found in embedding_models. Please rerun `graphrag init` and set the embedding_models configuration."
raise ValueError(err_msg)
return self.embedding_models[model_id]
@model_validator(mode="after")
def _validate_model(self):
"""Validate the model configuration."""
self._validate_input_base_dir()
self._validate_reporting_base_dir()
self._validate_output_base_dir()
self._validate_update_output_storage_base_dir()
self._validate_vector_store()
return self
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag/graphrag/config/models/graph_rag_config.py",
"license": "MIT License",
"lines": 281,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag/graphrag/index/operations/embed_text/embed_text.py | # Copyright (C) 2026 Microsoft
# Licensed under the MIT License
"""Streaming text embedding operation."""
import logging
from typing import TYPE_CHECKING, Any
import numpy as np
from graphrag_llm.tokenizer import Tokenizer
from graphrag_storage.tables.table import Table
from graphrag_vectors import VectorStore, VectorStoreDocument
from graphrag.callbacks.workflow_callbacks import WorkflowCallbacks
from graphrag.index.operations.embed_text.run_embed_text import run_embed_text
if TYPE_CHECKING:
from graphrag_llm.embedding import LLMEmbedding
logger = logging.getLogger(__name__)
async def embed_text(
input_table: Table,
callbacks: WorkflowCallbacks,
model: "LLMEmbedding",
tokenizer: Tokenizer,
embed_column: str,
batch_size: int,
batch_max_tokens: int,
num_threads: int,
vector_store: VectorStore,
id_column: str = "id",
output_table: Table | None = None,
) -> int:
"""Embed text from a streaming Table into a vector store.
Rows are buffered before flushing to ``run_embed_text``,
which dispatches API batches concurrently up to
``num_threads``. The buffer is sized so each flush produces
enough batches to saturate the concurrency limit.
"""
vector_store.create_index()
buffer: list[dict[str, Any]] = []
total_rows = 0
flush_size = batch_size * num_threads
async for row in input_table:
text = row.get(embed_column)
if text is None:
text = ""
buffer.append({
id_column: row[id_column],
embed_column: text,
})
if len(buffer) >= flush_size:
total_rows += await _flush_embedding_buffer(
buffer,
embed_column,
id_column,
callbacks,
model,
tokenizer,
batch_size,
batch_max_tokens,
num_threads,
vector_store,
output_table,
)
buffer.clear()
if buffer:
total_rows += await _flush_embedding_buffer(
buffer,
embed_column,
id_column,
callbacks,
model,
tokenizer,
batch_size,
batch_max_tokens,
num_threads,
vector_store,
output_table,
)
return total_rows
async def _flush_embedding_buffer(
buffer: list[dict[str, Any]],
embed_column: str,
id_column: str,
callbacks: WorkflowCallbacks,
model: "LLMEmbedding",
tokenizer: Tokenizer,
batch_size: int,
batch_max_tokens: int,
num_threads: int,
vector_store: VectorStore,
output_table: Table | None,
) -> int:
"""Embed a buffer of rows and load results into the vector store."""
texts: list[str] = [row[embed_column] for row in buffer]
ids: list[str] = [row[id_column] for row in buffer]
result = await run_embed_text(
texts,
callbacks,
model,
tokenizer,
batch_size,
batch_max_tokens,
num_threads,
)
vectors = result.embeddings or []
skipped = 0
documents: list[VectorStoreDocument] = []
for doc_id, doc_vector in zip(ids, vectors, strict=True):
if doc_vector is None:
skipped += 1
continue
if type(doc_vector) is np.ndarray:
doc_vector = doc_vector.tolist()
documents.append(
VectorStoreDocument(
id=doc_id,
vector=doc_vector,
)
)
vector_store.load_documents(documents)
if skipped > 0:
logger.warning(
"Skipped %d rows with None embeddings out of %d",
skipped,
len(buffer),
)
if output_table is not None:
for doc_id, doc_vector in zip(ids, vectors, strict=True):
if doc_vector is None:
continue
if type(doc_vector) is np.ndarray:
doc_vector = doc_vector.tolist()
await output_table.write({"id": doc_id, "embedding": doc_vector})
return len(buffer)
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag/graphrag/index/operations/embed_text/embed_text.py",
"license": "MIT License",
"lines": 130,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag/graphrag/index/operations/extract_covariates/claim_extractor.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""A module containing 'ClaimExtractorResult' and 'ClaimExtractor' models."""
import logging
import traceback
from dataclasses import dataclass
from typing import TYPE_CHECKING, Any
from graphrag_llm.utils import (
CompletionMessagesBuilder,
)
from graphrag.config.defaults import graphrag_config_defaults
from graphrag.index.typing.error_handler import ErrorHandlerFn
from graphrag.prompts.index.extract_claims import (
CONTINUE_PROMPT,
LOOP_PROMPT,
)
if TYPE_CHECKING:
from graphrag_llm.completion import LLMCompletion
from graphrag_llm.types import LLMCompletionResponse
INPUT_TEXT_KEY = "input_text"
INPUT_ENTITY_SPEC_KEY = "entity_specs"
INPUT_CLAIM_DESCRIPTION_KEY = "claim_description"
INPUT_RESOLVED_ENTITIES_KEY = "resolved_entities"
RECORD_DELIMITER_KEY = "record_delimiter"
COMPLETION_DELIMITER_KEY = "completion_delimiter"
TUPLE_DELIMITER = "<|>"
RECORD_DELIMITER = "##"
COMPLETION_DELIMITER = "<|COMPLETE|>"
logger = logging.getLogger(__name__)
@dataclass
class ClaimExtractorResult:
"""Claim extractor result class definition."""
output: list[dict]
source_docs: dict[str, Any]
class ClaimExtractor:
"""Claim extractor class definition."""
_model: "LLMCompletion"
_extraction_prompt: str
_max_gleanings: int
_on_error: ErrorHandlerFn
def __init__(
self,
model: "LLMCompletion",
extraction_prompt: str,
max_gleanings: int | None = None,
on_error: ErrorHandlerFn | None = None,
):
"""Init method definition."""
self._model = model
self._extraction_prompt = extraction_prompt
self._max_gleanings = (
max_gleanings
if max_gleanings is not None
else graphrag_config_defaults.extract_claims.max_gleanings
)
self._on_error = on_error or (lambda _e, _s, _d: None)
async def __call__(
self,
texts,
entity_spec,
resolved_entities,
claim_description,
) -> ClaimExtractorResult:
"""Call method definition."""
source_doc_map = {}
all_claims: list[dict] = []
for doc_index, text in enumerate(texts):
document_id = f"d{doc_index}"
try:
claims = await self._process_document(
text, claim_description, entity_spec
)
all_claims += [
self._clean_claim(c, document_id, resolved_entities) for c in claims
]
source_doc_map[document_id] = text
except Exception as e:
logger.exception("error extracting claim")
self._on_error(
e,
traceback.format_exc(),
{"doc_index": doc_index, "text": text},
)
continue
return ClaimExtractorResult(
output=all_claims,
source_docs=source_doc_map,
)
def _clean_claim(
self, claim: dict, document_id: str, resolved_entities: dict
) -> dict:
# clean the parsed claims to remove any claims with status = False
obj = claim.get("object_id", claim.get("object"))
subject = claim.get("subject_id", claim.get("subject"))
# If subject or object in resolved entities, then replace with resolved entity
obj = resolved_entities.get(obj, obj)
subject = resolved_entities.get(subject, subject)
claim["object_id"] = obj
claim["subject_id"] = subject
return claim
async def _process_document(
self, text: str, claim_description: str, entity_spec: dict
) -> list[dict]:
messages_builder = CompletionMessagesBuilder().add_user_message(
self._extraction_prompt.format(**{
INPUT_TEXT_KEY: text,
INPUT_CLAIM_DESCRIPTION_KEY: claim_description,
INPUT_ENTITY_SPEC_KEY: entity_spec,
})
)
response: LLMCompletionResponse = await self._model.completion_async(
messages=messages_builder.build(),
) # type: ignore
results = response.content
messages_builder.add_assistant_message(results)
claims = results.strip().removesuffix(COMPLETION_DELIMITER)
# if gleanings are specified, enter a loop to extract more claims
# there are two exit criteria: (a) we hit the configured max, (b) the model says there are no more claims
if self._max_gleanings > 0:
for i in range(self._max_gleanings):
messages_builder.add_user_message(CONTINUE_PROMPT)
response: LLMCompletionResponse = await self._model.completion_async(
messages=messages_builder.build(),
) # type: ignore
extension = response.content
messages_builder.add_assistant_message(extension)
claims += RECORD_DELIMITER + extension.strip().removesuffix(
COMPLETION_DELIMITER
)
# If this isn't the last loop, check to see if we should continue
if i >= self._max_gleanings - 1:
break
messages_builder.add_user_message(LOOP_PROMPT)
response: LLMCompletionResponse = await self._model.completion_async(
messages=messages_builder.build(),
) # type: ignore
if response.content != "Y":
break
return self._parse_claim_tuples(results)
def _parse_claim_tuples(self, claims: str) -> list[dict[str, Any]]:
"""Parse claim tuples."""
def pull_field(index: int, fields: list[str]) -> str | None:
return fields[index].strip() if len(fields) > index else None
result: list[dict[str, Any]] = []
claims_values = (
claims.strip().removesuffix(COMPLETION_DELIMITER).split(RECORD_DELIMITER)
)
for claim in claims_values:
claim = claim.strip().removeprefix("(").removesuffix(")")
# Ignore the completion delimiter
if claim == COMPLETION_DELIMITER:
continue
claim_fields = claim.split(TUPLE_DELIMITER)
result.append({
"subject_id": pull_field(0, claim_fields),
"object_id": pull_field(1, claim_fields),
"type": pull_field(2, claim_fields),
"status": pull_field(3, claim_fields),
"start_date": pull_field(4, claim_fields),
"end_date": pull_field(5, claim_fields),
"description": pull_field(6, claim_fields),
"source_text": pull_field(7, claim_fields),
})
return result
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag/graphrag/index/operations/extract_covariates/claim_extractor.py",
"license": "MIT License",
"lines": 164,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag/graphrag/index/operations/extract_graph/graph_extractor.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Graph extraction helpers that return tabular data."""
import logging
import re
import traceback
from typing import TYPE_CHECKING, Any
import pandas as pd
from graphrag_llm.utils import (
CompletionMessagesBuilder,
)
from graphrag.index.typing.error_handler import ErrorHandlerFn
from graphrag.index.utils.string import clean_str
from graphrag.prompts.index.extract_graph import (
CONTINUE_PROMPT,
LOOP_PROMPT,
)
if TYPE_CHECKING:
from graphrag_llm.completion import LLMCompletion
from graphrag_llm.types import LLMCompletionResponse
INPUT_TEXT_KEY = "input_text"
RECORD_DELIMITER_KEY = "record_delimiter"
COMPLETION_DELIMITER_KEY = "completion_delimiter"
ENTITY_TYPES_KEY = "entity_types"
TUPLE_DELIMITER = "<|>"
RECORD_DELIMITER = "##"
COMPLETION_DELIMITER = "<|COMPLETE|>"
logger = logging.getLogger(__name__)
class GraphExtractor:
"""Unipartite graph extractor class definition."""
_model: "LLMCompletion"
_extraction_prompt: str
_max_gleanings: int
_on_error: ErrorHandlerFn
def __init__(
self,
model: "LLMCompletion",
prompt: str,
max_gleanings: int,
on_error: ErrorHandlerFn | None = None,
):
"""Init method definition."""
self._model = model
self._extraction_prompt = prompt
self._max_gleanings = max_gleanings
self._on_error = on_error or (lambda _e, _s, _d: None)
async def __call__(
self, text: str, entity_types: list[str], source_id: str
) -> tuple[pd.DataFrame, pd.DataFrame]:
"""Extract entities and relationships from the supplied text."""
try:
# Invoke the entity extraction
result = await self._process_document(text, entity_types)
except Exception as e: # pragma: no cover - defensive logging
logger.exception("error extracting graph")
self._on_error(
e,
traceback.format_exc(),
{
"source_id": source_id,
"text": text,
},
)
return _empty_entities_df(), _empty_relationships_df()
return self._process_result(
result,
source_id,
TUPLE_DELIMITER,
RECORD_DELIMITER,
)
async def _process_document(self, text: str, entity_types: list[str]) -> str:
messages_builder = CompletionMessagesBuilder().add_user_message(
self._extraction_prompt.format(**{
INPUT_TEXT_KEY: text,
ENTITY_TYPES_KEY: ",".join(entity_types),
})
)
response: LLMCompletionResponse = await self._model.completion_async(
messages=messages_builder.build(),
) # type: ignore
results = response.content
messages_builder.add_assistant_message(results)
# if gleanings are specified, enter a loop to extract more entities
# there are two exit criteria: (a) we hit the configured max, (b) the model says there are no more entities
if self._max_gleanings > 0:
for i in range(self._max_gleanings):
messages_builder.add_user_message(CONTINUE_PROMPT)
response: LLMCompletionResponse = await self._model.completion_async(
messages=messages_builder.build(),
) # type: ignore
response_text = response.content
messages_builder.add_assistant_message(response_text)
results += response_text
# if this is the final glean, don't bother updating the continuation flag
if i >= self._max_gleanings - 1:
break
messages_builder.add_user_message(LOOP_PROMPT)
response: LLMCompletionResponse = await self._model.completion_async(
messages=messages_builder.build(),
) # type: ignore
if response.content != "Y":
break
return results
def _process_result(
self,
result: str,
source_id: str,
tuple_delimiter: str,
record_delimiter: str,
) -> tuple[pd.DataFrame, pd.DataFrame]:
"""Parse the result string into entity and relationship data frames."""
entities: list[dict[str, Any]] = []
relationships: list[dict[str, Any]] = []
records = [r.strip() for r in result.split(record_delimiter)]
for raw_record in records:
record = re.sub(r"^\(|\)$", "", raw_record.strip())
if not record or record == COMPLETION_DELIMITER:
continue
record_attributes = record.split(tuple_delimiter)
record_type = record_attributes[0]
if record_type == '"entity"' and len(record_attributes) >= 4:
entity_name = clean_str(record_attributes[1].upper())
entity_type = clean_str(record_attributes[2].upper())
entity_description = clean_str(record_attributes[3])
entities.append({
"title": entity_name,
"type": entity_type,
"description": entity_description,
"source_id": source_id,
})
if record_type == '"relationship"' and len(record_attributes) >= 5:
source = clean_str(record_attributes[1].upper())
target = clean_str(record_attributes[2].upper())
edge_description = clean_str(record_attributes[3])
try:
weight = float(record_attributes[-1])
except ValueError:
weight = 1.0
relationships.append({
"source": source,
"target": target,
"description": edge_description,
"source_id": source_id,
"weight": weight,
})
entities_df = pd.DataFrame(entities) if entities else _empty_entities_df()
relationships_df = (
pd.DataFrame(relationships) if relationships else _empty_relationships_df()
)
return entities_df, relationships_df
def _empty_entities_df() -> pd.DataFrame:
return pd.DataFrame(columns=["title", "type", "description", "source_id"])
def _empty_relationships_df() -> pd.DataFrame:
return pd.DataFrame(
columns=["source", "target", "weight", "description", "source_id"]
)
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag/graphrag/index/operations/extract_graph/graph_extractor.py",
"license": "MIT License",
"lines": 156,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag/graphrag/index/operations/summarize_descriptions/typing.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""A module containing 'SummarizedDescriptionResult' model."""
from dataclasses import dataclass
from typing import Any, NamedTuple
@dataclass
class SummarizedDescriptionResult:
"""Entity summarization result class definition."""
id: str | tuple[str, str]
description: str
class DescriptionSummarizeRow(NamedTuple):
"""DescriptionSummarizeRow class definition."""
graph: Any
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag/graphrag/index/operations/summarize_descriptions/typing.py",
"license": "MIT License",
"lines": 13,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag/graphrag/index/text_splitting/text_splitting.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""A module containing 'TokenTextSplitter' class and 'split_single_text_on_tokens' function."""
import logging
from abc import ABC
from collections.abc import Callable
from typing import cast
import pandas as pd
from graphrag_llm.tokenizer import Tokenizer
from graphrag.tokenizer.get_tokenizer import get_tokenizer
EncodedText = list[int]
DecodeFn = Callable[[EncodedText], str]
EncodeFn = Callable[[str], EncodedText]
LengthFn = Callable[[str], int]
logger = logging.getLogger(__name__)
class TokenTextSplitter(ABC):
"""Token text splitter class definition."""
_chunk_size: int
_chunk_overlap: int
_length_function: LengthFn
_keep_separator: bool
_add_start_index: bool
_strip_whitespace: bool
def __init__(
self,
# based on OpenAI embedding chunk size limits
# https://devblogs.microsoft.com/azure-sql/embedding-models-and-dimensions-optimizing-the-performance-resource-usage-ratio/
chunk_size: int = 8191,
chunk_overlap: int = 100,
length_function: LengthFn = len,
keep_separator: bool = False,
add_start_index: bool = False,
strip_whitespace: bool = True,
tokenizer: Tokenizer | None = None,
):
"""Init method definition."""
self._chunk_size = chunk_size
self._chunk_overlap = chunk_overlap
self._length_function = length_function
self._keep_separator = keep_separator
self._add_start_index = add_start_index
self._strip_whitespace = strip_whitespace
self._tokenizer = tokenizer or get_tokenizer()
def num_tokens(self, text: str) -> int:
"""Return the number of tokens in a string."""
return self._tokenizer.num_tokens(text)
def split_text(self, text: str | list[str]) -> list[str]:
"""Split text method."""
if isinstance(text, list):
text = " ".join(text)
elif cast("bool", pd.isna(text)) or text == "":
return []
if not isinstance(text, str):
msg = f"Attempting to split a non-string value, actual is {type(text)}"
raise TypeError(msg)
return split_single_text_on_tokens(
text,
chunk_overlap=self._chunk_overlap,
tokens_per_chunk=self._chunk_size,
decode=self._tokenizer.decode,
encode=self._tokenizer.encode,
)
def split_single_text_on_tokens(
text: str,
tokens_per_chunk: int,
chunk_overlap: int,
encode: EncodeFn,
decode: DecodeFn,
) -> list[str]:
"""Split a single text and return chunks using the tokenizer."""
result = []
input_ids = encode(text)
start_idx = 0
cur_idx = min(start_idx + tokens_per_chunk, len(input_ids))
chunk_ids = input_ids[start_idx:cur_idx]
while start_idx < len(input_ids):
chunk_text = decode(list(chunk_ids))
result.append(chunk_text) # Append chunked text as string
if cur_idx == len(input_ids):
break
start_idx += tokens_per_chunk - chunk_overlap
cur_idx = min(start_idx + tokens_per_chunk, len(input_ids))
chunk_ids = input_ids[start_idx:cur_idx]
return result
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag/graphrag/index/text_splitting/text_splitting.py",
"license": "MIT License",
"lines": 84,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag/graphrag/index/validate_config.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""A module containing validate_config_names definition."""
import asyncio
import logging
import sys
from graphrag_llm.completion import create_completion
from graphrag_llm.embedding import create_embedding
from graphrag.config.models.graph_rag_config import GraphRagConfig
logger = logging.getLogger(__name__)
def validate_config_names(parameters: GraphRagConfig) -> None:
"""Validate config file for model deployment name typos, by running a quick test message for each."""
for id, config in parameters.completion_models.items():
llm = create_completion(config)
try:
llm.completion(messages="This is an LLM connectivity test. Say Hello World")
logger.info("LLM Config Params Validated")
except Exception as e: # noqa: BLE001
logger.error(f"LLM configuration error detected.\n{e}") # noqa
print(f"Failed to validate language model ({id}) params", e) # noqa: T201
sys.exit(1)
for id, config in parameters.embedding_models.items():
embed_llm = create_embedding(config)
try:
asyncio.run(
embed_llm.embedding_async(
input=["This is an LLM Embedding Test String"]
)
)
logger.info("Embedding LLM Config Params Validated")
except Exception as e: # noqa: BLE001
logger.error(f"Embedding configuration error detected.\n{e}") # noqa
print(f"Failed to validate embedding model ({id}) params", e) # noqa: T201
sys.exit(1)
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag/graphrag/index/validate_config.py",
"license": "MIT License",
"lines": 34,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag/graphrag/index/workflows/create_base_text_units.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""A module containing run_workflow method definition."""
import logging
from typing import Any
from graphrag_chunking.chunker import Chunker
from graphrag_chunking.chunker_factory import create_chunker
from graphrag_chunking.transformers import add_metadata
from graphrag_input import TextDocument
from graphrag_llm.tokenizer import Tokenizer
from graphrag_storage.tables.table import Table
from graphrag.callbacks.workflow_callbacks import WorkflowCallbacks
from graphrag.config.models.graph_rag_config import GraphRagConfig
from graphrag.index.typing.context import PipelineRunContext
from graphrag.index.typing.workflow import WorkflowFunctionOutput
from graphrag.index.utils.hashing import gen_sha512_hash
from graphrag.logger.progress import progress_ticker
from graphrag.tokenizer.get_tokenizer import get_tokenizer
logger = logging.getLogger(__name__)
async def run_workflow(
config: GraphRagConfig,
context: PipelineRunContext,
) -> WorkflowFunctionOutput:
"""All the steps to transform base text_units."""
logger.info("Workflow started: create_base_text_units")
tokenizer = get_tokenizer(encoding_model=config.chunking.encoding_model)
chunker = create_chunker(config.chunking, tokenizer.encode, tokenizer.decode)
async with (
context.output_table_provider.open("documents") as documents_table,
context.output_table_provider.open("text_units") as text_units_table,
):
total_rows = await documents_table.length()
sample_rows = await create_base_text_units(
documents_table,
text_units_table,
total_rows,
context.callbacks,
tokenizer=tokenizer,
chunker=chunker,
prepend_metadata=config.chunking.prepend_metadata,
)
logger.info("Workflow completed: create_base_text_units")
return WorkflowFunctionOutput(result=sample_rows)
async def create_base_text_units(
documents_table: Table,
text_units_table: Table,
total_rows: int,
callbacks: WorkflowCallbacks,
tokenizer: Tokenizer,
chunker: Chunker,
prepend_metadata: list[str] | None = None,
) -> list[dict[str, Any]]:
"""Transform documents into chunked text units via streaming read/write.
Reads documents row-by-row from an async iterable and writes text units
directly to the output table, avoiding loading all data into memory.
Args
----
documents_table: Table
Table instance for reading documents. Supports async iteration.
text_units_table: Table
Table instance for writing text units row by row.
total_rows: int
Total number of documents for progress reporting.
callbacks: WorkflowCallbacks
Callbacks for progress reporting.
tokenizer: Tokenizer
Tokenizer for measuring chunk token counts.
chunker: Chunker
Chunker instance for splitting document text.
prepend_metadata: list[str] | None
Optional list of metadata fields to prepend to
each chunk.
"""
tick = progress_ticker(callbacks.progress, total_rows)
logger.info(
"Starting chunking process for %d documents",
total_rows,
)
doc_index = 0
sample_rows: list[dict[str, Any]] = []
sample_size = 5
async for doc in documents_table:
chunks = chunk_document(doc, chunker, prepend_metadata)
for chunk_text in chunks:
if chunk_text is None:
continue
row = {
"id": "",
"document_id": doc["id"],
"text": chunk_text,
"n_tokens": len(tokenizer.encode(chunk_text)),
}
row["id"] = gen_sha512_hash(row, ["text"])
await text_units_table.write(row)
if len(sample_rows) < sample_size:
sample_rows.append(row)
doc_index += 1
tick()
logger.info(
"chunker progress: %d/%d",
doc_index,
total_rows,
)
return sample_rows
def chunk_document(
doc: dict[str, Any],
chunker: Chunker,
prepend_metadata: list[str] | None = None,
) -> list[str]:
"""Chunk a single document row into text fragments.
Args
----
doc: dict[str, Any]
A single document row as a dictionary.
chunker: Chunker
Chunker instance for splitting text.
prepend_metadata: list[str] | None
Optional metadata fields to prepend.
Returns
-------
list[str]:
List of chunk text strings.
"""
transformer = None
if prepend_metadata:
document = TextDocument(
id=doc["id"],
title=doc.get("title", ""),
text=doc["text"],
creation_date=doc.get("creation_date", ""),
raw_data=doc.get("raw_data"),
)
metadata = document.collect(prepend_metadata)
transformer = add_metadata(metadata=metadata, line_delimiter=".\n")
return [chunk.text for chunk in chunker.chunk(doc["text"], transform=transformer)]
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag/graphrag/index/workflows/create_base_text_units.py",
"license": "MIT License",
"lines": 135,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag/graphrag/logger/factory.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Factory functions for creating a logger."""
from __future__ import annotations
import logging
from pathlib import Path
from graphrag_common.factory import Factory
from graphrag.config.enums import ReportingType
LOG_FORMAT = "%(asctime)s.%(msecs)04d - %(levelname)s - %(name)s - %(message)s"
DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
class LoggerFactory(Factory[logging.Handler]):
"""A factory class for logger implementations.
Includes a method for users to register a custom logger implementation.
Configuration arguments are passed to each logger implementation as kwargs
for individual enforcement of required/optional arguments.
Note that because we rely on the built-in Python logging architecture, this factory does not return an instance,
it merely configures the logger to your specified storage location.
"""
# --- register built-in logger implementations ---
def create_file_logger(**kwargs) -> logging.Handler:
"""Create a file-based logger."""
base_dir = kwargs["base_dir"]
filename = kwargs["filename"]
log_dir = Path(base_dir)
log_dir.mkdir(parents=True, exist_ok=True)
log_file_path = log_dir / filename
handler = logging.FileHandler(str(log_file_path), mode="a")
formatter = logging.Formatter(fmt=LOG_FORMAT, datefmt=DATE_FORMAT)
handler.setFormatter(formatter)
return handler
def create_blob_logger(**kwargs) -> logging.Handler:
"""Create a blob storage-based logger."""
from graphrag.logger.blob_workflow_logger import BlobWorkflowLogger
return BlobWorkflowLogger(
connection_string=kwargs["connection_string"],
container_name=kwargs["container_name"],
base_dir=kwargs["base_dir"],
account_url=kwargs["account_url"],
)
# --- register built-in implementations ---
logger_factory = LoggerFactory()
logger_factory.register(ReportingType.file.value, create_file_logger)
logger_factory.register(ReportingType.blob.value, create_blob_logger)
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag/graphrag/logger/factory.py",
"license": "MIT License",
"lines": 43,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag/graphrag/prompts/index/extract_graph.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""A file containing prompts definition."""
GRAPH_EXTRACTION_PROMPT = """
-Goal-
Given a text document that is potentially relevant to this activity and a list of entity types, identify all entities of those types from the text and all relationships among the identified entities.
-Steps-
1. Identify all entities. For each identified entity, extract the following information:
- entity_name: Name of the entity, capitalized
- entity_type: One of the following types: [{entity_types}]
- entity_description: Comprehensive description of the entity's attributes and activities
Format each entity as ("entity"<|><entity_name><|><entity_type><|><entity_description>)
2. From the entities identified in step 1, identify all pairs of (source_entity, target_entity) that are *clearly related* to each other.
For each pair of related entities, extract the following information:
- source_entity: name of the source entity, as identified in step 1
- target_entity: name of the target entity, as identified in step 1
- relationship_description: explanation as to why you think the source entity and the target entity are related to each other
- relationship_strength: a numeric score indicating strength of the relationship between the source entity and target entity
Format each relationship as ("relationship"<|><source_entity><|><target_entity><|><relationship_description><|><relationship_strength>)
3. Return output in English as a single list of all the entities and relationships identified in steps 1 and 2. Use **##** as the list delimiter.
4. When finished, output <|COMPLETE|>
######################
-Examples-
######################
Example 1:
Entity_types: ORGANIZATION,PERSON
Text:
The Verdantis's Central Institution is scheduled to meet on Monday and Thursday, with the institution planning to release its latest policy decision on Thursday at 1:30 p.m. PDT, followed by a press conference where Central Institution Chair Martin Smith will take questions. Investors expect the Market Strategy Committee to hold its benchmark interest rate steady in a range of 3.5%-3.75%.
######################
Output:
("entity"<|>CENTRAL INSTITUTION<|>ORGANIZATION<|>The Central Institution is the Federal Reserve of Verdantis, which is setting interest rates on Monday and Thursday)
##
("entity"<|>MARTIN SMITH<|>PERSON<|>Martin Smith is the chair of the Central Institution)
##
("entity"<|>MARKET STRATEGY COMMITTEE<|>ORGANIZATION<|>The Central Institution committee makes key decisions about interest rates and the growth of Verdantis's money supply)
##
("relationship"<|>MARTIN SMITH<|>CENTRAL INSTITUTION<|>Martin Smith is the Chair of the Central Institution and will answer questions at a press conference<|>9)
<|COMPLETE|>
######################
Example 2:
Entity_types: ORGANIZATION
Text:
TechGlobal's (TG) stock skyrocketed in its opening day on the Global Exchange Thursday. But IPO experts warn that the semiconductor corporation's debut on the public markets isn't indicative of how other newly listed companies may perform.
TechGlobal, a formerly public company, was taken private by Vision Holdings in 2014. The well-established chip designer says it powers 85% of premium smartphones.
######################
Output:
("entity"<|>TECHGLOBAL<|>ORGANIZATION<|>TechGlobal is a stock now listed on the Global Exchange which powers 85% of premium smartphones)
##
("entity"<|>VISION HOLDINGS<|>ORGANIZATION<|>Vision Holdings is a firm that previously owned TechGlobal)
##
("relationship"<|>TECHGLOBAL<|>VISION HOLDINGS<|>Vision Holdings formerly owned TechGlobal from 2014 until present<|>5)
<|COMPLETE|>
######################
Example 3:
Entity_types: ORGANIZATION,GEO,PERSON
Text:
Five Aurelians jailed for 8 years in Firuzabad and widely regarded as hostages are on their way home to Aurelia.
The swap orchestrated by Quintara was finalized when $8bn of Firuzi funds were transferred to financial institutions in Krohaara, the capital of Quintara.
The exchange initiated in Firuzabad's capital, Tiruzia, led to the four men and one woman, who are also Firuzi nationals, boarding a chartered flight to Krohaara.
They were welcomed by senior Aurelian officials and are now on their way to Aurelia's capital, Cashion.
The Aurelians include 39-year-old businessman Samuel Namara, who has been held in Tiruzia's Alhamia Prison, as well as journalist Durke Bataglani, 59, and environmentalist Meggie Tazbah, 53, who also holds Bratinas nationality.
######################
Output:
("entity"<|>FIRUZABAD<|>GEO<|>Firuzabad held Aurelians as hostages)
##
("entity"<|>AURELIA<|>GEO<|>Country seeking to release hostages)
##
("entity"<|>QUINTARA<|>GEO<|>Country that negotiated a swap of money in exchange for hostages)
##
##
("entity"<|>TIRUZIA<|>GEO<|>Capital of Firuzabad where the Aurelians were being held)
##
("entity"<|>KROHAARA<|>GEO<|>Capital city in Quintara)
##
("entity"<|>CASHION<|>GEO<|>Capital city in Aurelia)
##
("entity"<|>SAMUEL NAMARA<|>PERSON<|>Aurelian who spent time in Tiruzia's Alhamia Prison)
##
("entity"<|>ALHAMIA PRISON<|>GEO<|>Prison in Tiruzia)
##
("entity"<|>DURKE BATAGLANI<|>PERSON<|>Aurelian journalist who was held hostage)
##
("entity"<|>MEGGIE TAZBAH<|>PERSON<|>Bratinas national and environmentalist who was held hostage)
##
("relationship"<|>FIRUZABAD<|>AURELIA<|>Firuzabad negotiated a hostage exchange with Aurelia<|>2)
##
("relationship"<|>QUINTARA<|>AURELIA<|>Quintara brokered the hostage exchange between Firuzabad and Aurelia<|>2)
##
("relationship"<|>QUINTARA<|>FIRUZABAD<|>Quintara brokered the hostage exchange between Firuzabad and Aurelia<|>2)
##
("relationship"<|>SAMUEL NAMARA<|>ALHAMIA PRISON<|>Samuel Namara was a prisoner at Alhamia prison<|>8)
##
("relationship"<|>SAMUEL NAMARA<|>MEGGIE TAZBAH<|>Samuel Namara and Meggie Tazbah were exchanged in the same hostage release<|>2)
##
("relationship"<|>SAMUEL NAMARA<|>DURKE BATAGLANI<|>Samuel Namara and Durke Bataglani were exchanged in the same hostage release<|>2)
##
("relationship"<|>MEGGIE TAZBAH<|>DURKE BATAGLANI<|>Meggie Tazbah and Durke Bataglani were exchanged in the same hostage release<|>2)
##
("relationship"<|>SAMUEL NAMARA<|>FIRUZABAD<|>Samuel Namara was a hostage in Firuzabad<|>2)
##
("relationship"<|>MEGGIE TAZBAH<|>FIRUZABAD<|>Meggie Tazbah was a hostage in Firuzabad<|>2)
##
("relationship"<|>DURKE BATAGLANI<|>FIRUZABAD<|>Durke Bataglani was a hostage in Firuzabad<|>2)
<|COMPLETE|>
######################
-Real Data-
######################
Entity_types: {entity_types}
Text: {input_text}
######################
Output:"""
CONTINUE_PROMPT = "MANY entities and relationships were missed in the last extraction. Remember to ONLY emit entities that match any of the previously extracted types. Add them below using the same format:\n"
LOOP_PROMPT = "It appears some entities and relationships may have still been missed. Answer Y if there are still entities or relationships that need to be added, or N if there are none. Please answer with a single letter Y or N.\n"
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag/graphrag/prompts/index/extract_graph.py",
"license": "MIT License",
"lines": 113,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:packages/graphrag/graphrag/utils/api.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""API functions for the GraphRAG module."""
from pathlib import Path
from graphrag_vectors import (
VectorStore,
VectorStoreConfig,
create_vector_store,
)
def get_embedding_store(
config: VectorStoreConfig,
embedding_name: str,
) -> VectorStore:
"""Get the embedding store."""
embedding_store = create_vector_store(config, config.index_schema[embedding_name])
embedding_store.connect()
return embedding_store
def reformat_context_data(context_data: dict) -> dict:
"""
Reformats context_data for all query responses.
Reformats a dictionary of dataframes into a dictionary of lists.
One list entry for each record. Records are grouped by original
dictionary keys.
Note: depending on which query algorithm is used, the context_data may not
contain the same information (keys). In this case, the default behavior will be to
set these keys as empty lists to preserve a standard output format.
"""
final_format = {
"reports": [],
"entities": [],
"relationships": [],
"claims": [],
"sources": [],
}
for key in context_data:
records = (
context_data[key].to_dict(orient="records")
if context_data[key] is not None and not isinstance(context_data[key], dict)
else context_data[key]
)
if len(records) < 1:
continue
final_format[key] = records
return final_format
def load_search_prompt(prompt_config: str | None) -> str | None:
"""
Load the search prompt from disk if configured.
If not, leave it empty - the search functions will load their defaults.
"""
if prompt_config:
prompt_file = Path(prompt_config).resolve()
if prompt_file.exists():
return prompt_file.read_bytes().decode(encoding="utf-8")
return None
def truncate(text: str, max_length: int) -> str:
"""Truncate a string to a maximum length."""
if len(text) <= max_length:
return text
return text[:max_length] + "...[truncated]"
| {
"repo_id": "microsoft/graphrag",
"file_path": "packages/graphrag/graphrag/utils/api.py",
"license": "MIT License",
"lines": 59,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:scripts/copy_build_assets.py | # Copyright (c) 2025 Microsoft Corporation.
# Licensed under the MIT License
"""Copy root build assets to package directories."""
import shutil
from pathlib import Path
def copy_build_assets():
"""Copy root build assets to package build directories so files are included in pypi distributions."""
root_dir = Path(__file__).parent.parent
build_assets = ["LICENSE"]
for package_dir in root_dir.glob("packages/*"):
if package_dir.is_dir():
for asset in build_assets:
src = root_dir / asset
dest = package_dir / asset
if src.exists():
shutil.copy(src, dest)
if __name__ == "__main__":
copy_build_assets()
| {
"repo_id": "microsoft/graphrag",
"file_path": "scripts/copy_build_assets.py",
"license": "MIT License",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:scripts/update_workspace_dependency_versions.py | # Copyright (c) 2025 Microsoft Corporation.
# Licensed under the MIT License
"""Update workspace dependency versions."""
import os
import re
import subprocess # noqa: S404
from pathlib import Path
def _get_version() -> str:
command = ["uv", "run", "semversioner", "current-version"]
completion = subprocess.run(command, env=os.environ, capture_output=True, text=True) # noqa: S603
if completion.returncode != 0:
msg = f"Failed to get current version with return code: {completion.returncode}"
raise RuntimeError(msg)
return completion.stdout.strip()
def _get_package_paths() -> list[Path]:
root_dir = Path(__file__).parent.parent
return [p.resolve() for p in root_dir.glob("packages/*") if p.is_dir()]
def update_workspace_dependency_versions():
"""Update dependency versions across workspace packages.
Iterate through all the workspace packages and update cross-package
dependency versions to match the current version of the workspace.
"""
version = _get_version()
package_paths = _get_package_paths()
for package_path in package_paths:
current_package_name = package_path.name
toml_path = package_path / "pyproject.toml"
if not toml_path.exists() or not toml_path.is_file():
continue
toml_contents = toml_path.read_text(encoding="utf-8")
for other_package_path in package_paths:
other_package_name = other_package_path.name
if other_package_name == current_package_name:
continue
dep_pattern = rf"{other_package_name}\s*==\s*\d+\.\d+\.\d+"
if re.search(dep_pattern, toml_contents):
toml_contents = re.sub(
dep_pattern,
f"{other_package_name}=={version}",
toml_contents,
)
toml_path.write_text(toml_contents, encoding="utf-8", newline="\n")
if __name__ == "__main__":
update_workspace_dependency_versions()
| {
"repo_id": "microsoft/graphrag",
"file_path": "scripts/update_workspace_dependency_versions.py",
"license": "MIT License",
"lines": 44,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/graphrag:tests/integration/language_model/test_retries.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Test LiteLLM Retries."""
import time
from typing import Any
import httpx
import litellm.exceptions as exceptions
import pytest
from graphrag_llm.config import RetryConfig, RetryType
from graphrag_llm.retry import create_retry
@pytest.mark.parametrize(
("config", "max_retries", "expected_time"),
[
(
RetryConfig(
type=RetryType.ExponentialBackoff,
max_retries=3,
base_delay=2.0,
jitter=False,
),
3,
2 + 4 + 8, # No jitter, so exact times
),
(
RetryConfig(
type=RetryType.Immediate,
max_retries=3,
),
3,
0, # Immediate retry, so no delay
),
],
)
def test_retries(config: RetryConfig, max_retries: int, expected_time: float) -> None:
"""
Test various retry strategies with various configurations.
"""
retry_service = create_retry(config)
# start at -1 because the first call is not a retry
retries = -1
def mock_func():
nonlocal retries
retries += 1
msg = "Mock error for testing retries"
raise ValueError(msg)
start_time = time.time()
with pytest.raises(ValueError, match="Mock error for testing retries"):
retry_service.retry(func=mock_func, input_args={})
elapsed_time = time.time() - start_time
assert retries == max_retries, f"Expected {max_retries} retries, got {retries}"
assert elapsed_time >= expected_time, (
f"Expected elapsed time >= {expected_time}, got {elapsed_time}"
)
@pytest.mark.parametrize(
("config", "max_retries", "expected_time"),
[
(
RetryConfig(
type=RetryType.ExponentialBackoff,
max_retries=3,
base_delay=2.0,
jitter=False,
),
3,
2 + 4 + 8, # No jitter, so exact times
),
(
RetryConfig(
type=RetryType.Immediate,
max_retries=3,
),
3,
0, # Immediate retry, so no delay
),
],
)
async def test_retries_async(
config: RetryConfig, max_retries: int, expected_time: float
) -> None:
"""
Test various retry strategies with various configurations.
"""
retry_service = create_retry(config)
# start at -1 because the first call is not a retry
retries = -1
def mock_func():
nonlocal retries
retries += 1
msg = "Mock error for testing retries"
raise ValueError(msg)
start_time = time.time()
with pytest.raises(ValueError, match="Mock error for testing retries"):
await retry_service.retry_async(func=mock_func, input_args={})
elapsed_time = time.time() - start_time
assert retries == max_retries, f"Expected {max_retries} retries, got {retries}"
assert elapsed_time >= expected_time, (
f"Expected elapsed time >= {expected_time}, got {elapsed_time}"
)
@pytest.mark.parametrize(
"config",
[
(
RetryConfig(
type=RetryType.ExponentialBackoff,
max_retries=3,
base_delay=2.0,
jitter=False,
)
),
(
RetryConfig(
type=RetryType.Immediate,
max_retries=3,
)
),
],
)
@pytest.mark.parametrize(
("exception", "exception_args"),
[
(
"BadRequestError",
["Oh no!", "", ""],
),
(
"UnsupportedParamsError",
["Oh no!", "", ""],
),
(
"ContextWindowExceededError",
["Oh no!", "", ""],
),
(
"ContentPolicyViolationError",
["Oh no!", "", ""],
),
(
"ImageFetchError",
["Oh no!", "", ""],
),
(
"InvalidRequestError",
["Oh no!", "", ""],
),
(
"AuthenticationError",
["Oh no!", "", ""],
),
(
"PermissionDeniedError",
[
"Oh no!",
"",
"",
httpx.Response(
status_code=403,
request=httpx.Request(
method="GET", url="https://litellm.ai"
), # mock request object
),
],
),
(
"NotFoundError",
["Oh no!", "", ""],
),
(
"UnprocessableEntityError",
[
"Oh no!",
"",
"",
httpx.Response(
status_code=403,
request=httpx.Request(
method="GET", url="https://litellm.ai"
), # mock request object
),
],
),
(
"APIConnectionError",
["Oh no!", "", ""],
),
(
"APIError",
[500, "Oh no!", "", ""],
),
(
"ServiceUnavailableError",
["Oh no!", "", ""],
),
(
"APIResponseValidationError",
["Oh no!", "", ""],
),
(
"BudgetExceededError",
["Oh no!", "", ""],
),
],
)
def test_exponential_backoff_skipping_exceptions(
config: RetryConfig, exception: str, exception_args: list[Any]
) -> None:
"""
Test skipping retries for exceptions that should not cause a retry.
"""
retry_service = create_retry(config)
# start at -1 because the first call is not a retry
retries = -1
exception_cls = exceptions.__dict__[exception]
def mock_func():
nonlocal retries
retries += 1
raise exception_cls(*exception_args)
with pytest.raises(exception_cls, match="Oh no!"):
retry_service.retry(func=mock_func, input_args={})
# subtract 1 from retries because the first call is not a retry
assert retries == 0, (
f"Expected not to retry for '{exception}' exception. Got {retries} retries."
)
| {
"repo_id": "microsoft/graphrag",
"file_path": "tests/integration/language_model/test_retries.py",
"license": "MIT License",
"lines": 222,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
microsoft/graphrag:tests/unit/chunking/test_chunker.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
from typing import Any
from unittest.mock import Mock, patch
from graphrag.tokenizer.get_tokenizer import get_tokenizer
from graphrag_chunking.bootstrap_nltk import bootstrap
from graphrag_chunking.chunk_strategy_type import ChunkerType
from graphrag_chunking.chunker_factory import create_chunker
from graphrag_chunking.chunking_config import ChunkingConfig
from graphrag_chunking.token_chunker import (
split_text_on_tokens,
)
from graphrag_llm.tokenizer import Tokenizer
class MockTokenizer(Tokenizer):
def __init__(self, **kwargs: Any) -> None:
"""Initialize the LiteLLM Tokenizer."""
def encode(self, text) -> list[int]:
return [ord(char) for char in text]
def decode(self, tokens) -> str:
return "".join(chr(id) for id in tokens)
class TestRunSentences:
def setup_method(self, method):
bootstrap()
def test_basic_functionality(self):
"""Test basic sentence splitting"""
input = "This is a test. Another sentence. And a third one!"
chunker = create_chunker(ChunkingConfig(type=ChunkerType.Sentence))
chunks = chunker.chunk(input)
assert len(chunks) == 3
assert chunks[0].text == "This is a test."
assert chunks[0].index == 0
assert chunks[0].start_char == 0
assert chunks[0].end_char == 14
assert chunks[1].text == "Another sentence."
assert chunks[1].index == 1
assert chunks[1].start_char == 16
assert chunks[1].end_char == 32
assert chunks[2].text == "And a third one!"
assert chunks[2].index == 2
assert chunks[2].start_char == 34
assert chunks[2].end_char == 49
def test_mixed_whitespace_handling(self):
"""Test input with irregular whitespace"""
input = " Sentence with spaces. Another one! "
chunker = create_chunker(ChunkingConfig(type=ChunkerType.Sentence))
chunks = chunker.chunk(input)
assert len(chunks) == 2
assert chunks[0].text == "Sentence with spaces."
assert chunks[0].index == 0
assert chunks[0].start_char == 3
assert chunks[0].end_char == 23
assert chunks[1].text == "Another one!"
assert chunks[1].index == 1
assert chunks[1].start_char == 25
assert chunks[1].end_char == 36
class TestRunTokens:
@patch("tiktoken.get_encoding")
def test_basic_functionality(self, mock_get_encoding):
mock_encoder = Mock()
mock_encoder.encode.side_effect = lambda x: list(x.encode())
mock_encoder.decode.side_effect = lambda x: bytes(x).decode()
mock_get_encoding.return_value = mock_encoder
input = "Marley was dead: to begin with. There is no doubt whatever about that. The register of his burial was signed by the clergyman, the clerk, the undertaker, and the chief mourner. Scrooge signed it. And Scrooge's name was good upon 'Change, for anything he chose to put his hand to."
config = ChunkingConfig(
size=5,
overlap=1,
encoding_model="fake-encoding",
type=ChunkerType.Tokens,
)
chunker = create_chunker(config, mock_encoder.encode, mock_encoder.decode)
chunks = chunker.chunk(input)
assert len(chunks) > 0
def test_split_text_str_empty():
tokenizer = get_tokenizer()
result = split_text_on_tokens(
"",
chunk_size=5,
chunk_overlap=2,
encode=tokenizer.encode,
decode=tokenizer.decode,
)
assert result == []
def test_split_text_on_tokens():
text = "This is a test text, meaning to be taken seriously by this test only."
mocked_tokenizer = MockTokenizer()
expected_splits = [
"This is a ",
"is a test ",
"test text,",
"text, mean",
" meaning t",
"ing to be ",
"o be taken",
"taken seri", # cspell:disable-line
" seriously",
"ously by t", # cspell:disable-line
" by this t",
"his test o",
"est only.",
]
result = split_text_on_tokens(
text=text,
chunk_overlap=5,
chunk_size=10,
decode=mocked_tokenizer.decode,
encode=lambda text: mocked_tokenizer.encode(text),
)
assert result == expected_splits
def test_split_text_on_tokens_one_overlap():
text = "This is a test text, meaning to be taken seriously by this test only."
tokenizer = get_tokenizer(encoding_model="o200k_base")
expected_splits = [
"This is",
" is a",
" a test",
" test text",
" text,",
", meaning",
" meaning to",
" to be",
" be taken",
" taken seriously",
" seriously by",
" by this",
" this test",
" test only",
" only.",
]
result = split_text_on_tokens(
text=text,
chunk_size=2,
chunk_overlap=1,
decode=tokenizer.decode,
encode=tokenizer.encode,
)
assert result == expected_splits
def test_split_text_on_tokens_no_overlap():
text = "This is a test text, meaning to be taken seriously by this test only."
tokenizer = get_tokenizer(encoding_model="o200k_base")
expected_splits = [
"This is a",
" test text,",
" meaning to be",
" taken seriously by",
" this test only",
".",
]
result = split_text_on_tokens(
text=text,
chunk_size=3,
chunk_overlap=0,
decode=tokenizer.decode,
encode=tokenizer.encode,
)
assert result == expected_splits
| {
"repo_id": "microsoft/graphrag",
"file_path": "tests/unit/chunking/test_chunker.py",
"license": "MIT License",
"lines": 155,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
microsoft/graphrag:tests/unit/chunking/test_prepend_metadata.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
from graphrag_chunking.transformers import add_metadata
def test_add_metadata_one_row():
"""Test prepending metadata to chunks"""
chunks = ["This is a test.", "Another sentence."]
metadata = {"message": "hello"}
transformer = add_metadata(metadata)
results = [transformer(chunk) for chunk in chunks]
assert results[0] == "message: hello\nThis is a test."
assert results[1] == "message: hello\nAnother sentence."
def test_add_metadata_one_row_append():
"""Test prepending metadata to chunks"""
chunks = ["This is a test.", "Another sentence."]
metadata = {"message": "hello"}
transformer = add_metadata(metadata, append=True)
results = [transformer(chunk) for chunk in chunks]
assert results[0] == "This is a test.message: hello\n"
assert results[1] == "Another sentence.message: hello\n"
def test_add_metadata_multiple_rows():
"""Test prepending metadata to chunks"""
chunks = ["This is a test.", "Another sentence."]
metadata = {"message": "hello", "tag": "first"}
transformer = add_metadata(metadata)
results = [transformer(chunk) for chunk in chunks]
assert results[0] == "message: hello\ntag: first\nThis is a test."
assert results[1] == "message: hello\ntag: first\nAnother sentence."
def test_add_metadata_custom_delimiters():
"""Test prepending metadata to chunks"""
chunks = ["This is a test.", "Another sentence."]
metadata = {"message": "hello", "tag": "first"}
transformer = add_metadata(metadata, delimiter="-", line_delimiter="_")
results = [transformer(chunk) for chunk in chunks]
assert results[0] == "message-hello_tag-first_This is a test."
assert results[1] == "message-hello_tag-first_Another sentence."
| {
"repo_id": "microsoft/graphrag",
"file_path": "tests/unit/chunking/test_prepend_metadata.py",
"license": "MIT License",
"lines": 35,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
microsoft/graphrag:tests/unit/config/test_metrics_config.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Test metrics configuration loading."""
import pytest
from graphrag_llm.config import (
MetricsConfig,
MetricsWriterType,
)
def test_file_metrics_writer_validation() -> None:
"""Test that missing required parameters raise validation errors."""
with pytest.raises(
ValueError,
match="base_dir must be specified for file-based metrics writer\\.",
):
_ = MetricsConfig(
writer=MetricsWriterType.File,
base_dir=" ",
)
# passes validation
_ = MetricsConfig(
writer=MetricsWriterType.File,
base_dir="./metrics",
)
| {
"repo_id": "microsoft/graphrag",
"file_path": "tests/unit/config/test_metrics_config.py",
"license": "MIT License",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
microsoft/graphrag:tests/unit/config/test_model_config.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Test model configuration loading."""
import pytest
from graphrag_llm.config import AuthMethod, LLMProviderType, ModelConfig
from pydantic import ValidationError
def test_litellm_provider_validation() -> None:
"""Test that missing required parameters raise validation errors."""
with pytest.raises(ValidationError):
_ = ModelConfig(
type=LLMProviderType.LiteLLM,
model_provider="openai",
model="",
)
with pytest.raises(ValidationError):
_ = ModelConfig(
type=LLMProviderType.LiteLLM,
model_provider="",
model="gpt-4o",
)
with pytest.raises(
ValueError,
match="api_key must be set when auth_method=api_key\\.",
):
_ = ModelConfig(
type=LLMProviderType.LiteLLM,
model_provider="openai",
model="gpt-4o",
)
with pytest.raises(
ValueError,
match="azure_deployment_name should not be specified for non-Azure model providers\\.",
):
_ = ModelConfig(
type=LLMProviderType.LiteLLM,
model_provider="openai",
model="gpt-4o",
azure_deployment_name="some-deployment",
)
with pytest.raises(
ValueError,
match="api_base must be specified with the 'azure' model provider\\.",
):
_ = ModelConfig(
type=LLMProviderType.LiteLLM,
model_provider="azure",
model="gpt-4o",
)
with pytest.raises(
ValueError,
match="api_key should not be set when using Azure Managed Identity\\.",
):
_ = ModelConfig(
type=LLMProviderType.LiteLLM,
model_provider="azure",
model="gpt-4o",
azure_deployment_name="gpt-4o",
api_base="https://my-azure-endpoint/",
api_version="2024-06-01",
auth_method=AuthMethod.AzureManagedIdentity,
api_key="some-api-key",
)
with pytest.raises(
ValueError,
match="api_key must be set when auth_method=api_key\\.",
):
_ = ModelConfig(
type=LLMProviderType.LiteLLM,
model_provider="azure",
azure_deployment_name="gpt-4o",
api_base="https://my-azure-endpoint/",
api_version="2024-06-01",
model="gpt-4o",
)
# pass validation
_ = ModelConfig(
type=LLMProviderType.LiteLLM,
model_provider="openai",
model="gpt-4o",
api_key="NOT_A_REAL_API_KEY",
)
_ = ModelConfig(
type=LLMProviderType.LiteLLM,
model_provider="azure",
model="gpt-4o",
azure_deployment_name="gpt-4o",
api_base="https://my-azure-endpoint/",
api_key="NOT_A_REAL_API_KEY",
)
_ = ModelConfig(
type=LLMProviderType.LiteLLM,
model_provider="azure",
model="gpt-4o",
azure_deployment_name="gpt-4o",
api_base="https://my-azure-endpoint/",
api_version="2024-06-01",
auth_method=AuthMethod.AzureManagedIdentity,
)
| {
"repo_id": "microsoft/graphrag",
"file_path": "tests/unit/config/test_model_config.py",
"license": "MIT License",
"lines": 98,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
microsoft/graphrag:tests/unit/config/test_rate_limit_config.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Test rate limit configuration loading."""
import pytest
from graphrag_llm.config import RateLimitConfig, RateLimitType
def test_sliding_window_validation() -> None:
"""Test that missing required parameters raise validation errors."""
with pytest.raises(
ValueError,
match="period_in_seconds must be a positive integer for Sliding Window rate limit\\.",
):
_ = RateLimitConfig(
type=RateLimitType.SlidingWindow,
period_in_seconds=0,
requests_per_period=100,
tokens_per_period=1000,
)
with pytest.raises(
ValueError,
match="At least one of requests_per_period or tokens_per_period must be specified for Sliding Window rate limit\\.",
):
_ = RateLimitConfig(
type=RateLimitType.SlidingWindow,
)
with pytest.raises(
ValueError,
match="requests_per_period must be a positive integer for Sliding Window rate limit\\.",
):
_ = RateLimitConfig(
type=RateLimitType.SlidingWindow,
period_in_seconds=60,
requests_per_period=-10,
)
with pytest.raises(
ValueError,
match="tokens_per_period must be a positive integer for Sliding Window rate limit\\.",
):
_ = RateLimitConfig(
type=RateLimitType.SlidingWindow,
period_in_seconds=60,
tokens_per_period=-10,
)
# passes validation
_ = RateLimitConfig(
type=RateLimitType.SlidingWindow,
requests_per_period=100,
)
_ = RateLimitConfig(
type=RateLimitType.SlidingWindow,
tokens_per_period=1000,
)
_ = RateLimitConfig(
type=RateLimitType.SlidingWindow,
period_in_seconds=60,
requests_per_period=100,
tokens_per_period=1000,
)
| {
"repo_id": "microsoft/graphrag",
"file_path": "tests/unit/config/test_rate_limit_config.py",
"license": "MIT License",
"lines": 57,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
microsoft/graphrag:tests/unit/config/test_retry_config.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Test retry configuration loading."""
import pytest
from graphrag_llm.config import RetryConfig, RetryType
def test_exponential_backoff_validation() -> None:
"""Test that missing required parameters raise validation errors."""
with pytest.raises(
ValueError,
match="max_retries must be greater than 1 for Exponential Backoff retry\\.",
):
_ = RetryConfig(
type=RetryType.ExponentialBackoff,
max_retries=0,
)
with pytest.raises(
ValueError,
match="base_delay must be greater than 1\\.0 for Exponential Backoff retry\\.",
):
_ = RetryConfig(
type=RetryType.ExponentialBackoff,
base_delay=0.5,
)
with pytest.raises(
ValueError,
match="max_delay must be greater than 1 for Exponential Backoff retry\\.",
):
_ = RetryConfig(
type=RetryType.ExponentialBackoff,
max_delay=0.5,
)
# passes validation
_ = RetryConfig(type=RetryType.ExponentialBackoff)
_ = RetryConfig(
type=RetryType.ExponentialBackoff,
max_retries=5,
base_delay=2.0,
max_delay=30,
)
def test_immediate_validation() -> None:
"""Test that missing required parameters raise validation errors."""
with pytest.raises(
ValueError,
match="max_retries must be greater than 1 for Immediate retry\\.",
):
_ = RetryConfig(
type=RetryType.Immediate,
max_retries=0,
)
# passes validation
_ = RetryConfig(type=RetryType.Immediate)
_ = RetryConfig(
type=RetryType.Immediate,
max_retries=3,
)
| {
"repo_id": "microsoft/graphrag",
"file_path": "tests/unit/config/test_retry_config.py",
"license": "MIT License",
"lines": 55,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
microsoft/graphrag:tests/unit/config/test_template_engine_config.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Test metrics configuration loading."""
import pytest
from graphrag_llm.config import (
TemplateEngineConfig,
TemplateEngineType,
TemplateManagerType,
)
def test_template_engine_config_validation() -> None:
"""Test that missing required parameters raise validation errors."""
with pytest.raises(
ValueError,
match="base_dir must be specified for file-based template managers\\.",
):
_ = TemplateEngineConfig(
type=TemplateEngineType.Jinja,
template_manager=TemplateManagerType.File,
base_dir=" ",
)
with pytest.raises(
ValueError,
match="template_extension cannot be an empty string for file-based template managers\\.",
):
_ = TemplateEngineConfig(
type=TemplateEngineType.Jinja,
template_manager=TemplateManagerType.File,
base_dir="./templates",
template_extension=" ",
)
# passes validation
_ = TemplateEngineConfig(
type=TemplateEngineType.Jinja,
template_manager=TemplateManagerType.File,
base_dir="./templates",
template_extension=".jinja",
)
| {
"repo_id": "microsoft/graphrag",
"file_path": "tests/unit/config/test_template_engine_config.py",
"license": "MIT License",
"lines": 37,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
microsoft/graphrag:tests/unit/config/test_tokenizer_config.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Test tokenizer configuration loading."""
import pytest
from graphrag_llm.config import TokenizerConfig, TokenizerType
def test_litellm_tokenizer_validation() -> None:
"""Test that missing required parameters raise validation errors."""
with pytest.raises(
ValueError,
match="model_id must be specified for LiteLLM tokenizer\\.",
):
_ = TokenizerConfig(
type=TokenizerType.LiteLLM,
model_id="",
)
with pytest.raises(
ValueError,
match="encoding_name must be specified for TikToken tokenizer\\.",
):
_ = TokenizerConfig(
type=TokenizerType.Tiktoken,
encoding_name="",
)
# passes validation
_ = TokenizerConfig(
type=TokenizerType.LiteLLM,
model_id="openai/gpt-4o",
)
_ = TokenizerConfig(
type=TokenizerType.Tiktoken,
encoding_name="o200k-base",
)
| {
"repo_id": "microsoft/graphrag",
"file_path": "tests/unit/config/test_tokenizer_config.py",
"license": "MIT License",
"lines": 32,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
microsoft/graphrag:tests/unit/graphrag_factory/test_factory.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Unit tests for graphrag_factory package."""
from abc import ABC, abstractmethod
from graphrag_common.factory import Factory
class TestABC(ABC):
"""Test abstract base class."""
@abstractmethod
def get_value(self) -> str:
"""
Get a string value.
Returns
-------
str: A string value.
"""
msg = "Subclasses must implement the get_value method."
raise NotImplementedError(msg)
class ConcreteTestClass(TestABC):
"""Concrete implementation of TestABC."""
def __init__(self, value: str):
"""Initialize with a string value."""
self._value = value
def get_value(self) -> str:
"""Get a string value.
Returns
-------
str: A string value.
"""
return self._value
def test_factory() -> None:
"""Test the factory behavior."""
class TestFactory(Factory[TestABC]):
"""Test factory for TestABC implementations."""
factory = TestFactory()
factory.register("transient_strategy", ConcreteTestClass)
factory.register("singleton_strategy", ConcreteTestClass, scope="singleton")
trans1 = factory.create("transient_strategy", {"value": "test1"})
trans2 = factory.create("transient_strategy", {"value": "test2"})
assert trans1 is not trans2
assert trans1.get_value() == "test1"
assert trans2.get_value() == "test2"
single1 = factory.create("singleton_strategy", {"value": "singleton"})
single2 = factory.create("singleton_strategy", {"value": "singleton"})
assert single1 is single2
assert single1.get_value() == "singleton"
assert single2.get_value() == "singleton"
| {
"repo_id": "microsoft/graphrag",
"file_path": "tests/unit/graphrag_factory/test_factory.py",
"license": "MIT License",
"lines": 46,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
microsoft/graphrag:tests/unit/hasher/test_hasher.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Test hasher"""
from graphrag_common.hasher import hash_data
def test_hash_data() -> None:
"""Test hash data function."""
# Test different types of data
class TestClass: # noqa: B903
"""Test hasher class."""
def __init__(self, value: str) -> None:
self.value = value
def _test_func():
pass
# All should work and not raise exceptions
_ = hash_data("test string")
_ = hash_data(12345)
_ = hash_data(12.345)
_ = hash_data([1, 2, 3, 4, 5])
_ = hash_data({"key": "value", "number": 42})
_ = hash_data((1, "two", 3.0))
_ = hash_data({1, 2, 3, 4, 5})
_ = hash_data(None)
_ = hash_data(True)
_ = hash_data(b"bytes data")
_ = hash_data({"nested": {"list": [1, 2, 3], "dict": {"a": "b"}}})
_ = hash_data(range(10))
_ = hash_data(frozenset([1, 2, 3]))
_ = hash_data(complex(1, 2))
_ = hash_data(bytearray(b"byte array data"))
_ = hash_data(memoryview(b"memory view data"))
_ = hash_data(Exception("test exception"))
_ = hash_data(TestClass)
_ = hash_data(TestClass("instance value"))
_ = hash_data(lambda x: x * 2)
_ = hash_data(_test_func)
# Test that equivalent data structures produce the same hash
data1 = {
"bool": True,
"int": 42,
"float": 3.14,
"str": "hello, world",
"list": [1, 2, 3],
"dict": {"key": "value"},
"nested": {
"list_of_dicts": [{"a": 1}, {"b": 2}],
"dict_of_lists": {"numbers": [1, 2, 3]},
},
"tuple": (1, 2, 3),
"set": {1, 2, 3},
"class": TestClass,
"function": _test_func,
"instance": TestClass("instance value"),
}
# Same data but different order
data2 = {
"bool": True,
"list": [1, 2, 3],
"float": 3.14,
"str": "hello, world",
"int": 42,
"nested": {
"dict_of_lists": {"numbers": [1, 2, 3]},
"list_of_dicts": [{"a": 1}, {"b": 2}],
},
"dict": {"key": "value"},
"tuple": (1, 2, 3),
"class": TestClass,
"set": {1, 3, 2},
"instance": TestClass("instance value"),
"function": _test_func,
}
hash1 = hash_data(data1)
hash2 = hash_data(data2)
assert hash1 == hash2, "Hashes should be the same for equivalent data structures"
data3 = {"key1": "value1", "key2": 124, "key3": [1, 2, 3]} # Different value
hash3 = hash_data(data3)
assert hash1 != hash3, "Hashes should be different for different data structures"
# Test classes
instance1 = TestClass("value1")
instance2 = TestClass("value1")
instance3 = TestClass("value2")
hash_instance1 = hash_data(instance1)
hash_instance2 = hash_data(instance2)
hash_instance3 = hash_data(instance3)
assert hash_instance1 == hash_instance2, (
"Hashes should be the same for equivalent class instances"
)
assert hash_instance1 != hash_instance3, (
"Hashes should be different for different class instances"
)
| {
"repo_id": "microsoft/graphrag",
"file_path": "tests/unit/hasher/test_hasher.py",
"license": "MIT License",
"lines": 90,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
microsoft/graphrag:tests/unit/indexing/input/test_jsonl_loader.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
from graphrag_input import InputConfig, InputType, create_input_reader
from graphrag_storage import StorageConfig, create_storage
async def test_jsonl_loader_one_file_multiple_objects():
config = InputConfig(
type=InputType.JsonLines,
file_pattern=".*\\.jsonl$",
)
storage = create_storage(
StorageConfig(
base_dir="tests/unit/indexing/input/data/one-jsonl",
)
)
reader = create_input_reader(config, storage)
documents = await reader.read_files()
assert len(documents) == 3
assert documents[0].title == "input.jsonl (0)"
assert documents[0].raw_data == {
"title": "Hello",
"text": "Hi how are you today?",
}
assert documents[1].title == "input.jsonl (1)"
async def test_jsonl_loader_one_file_with_title():
config = InputConfig(
type=InputType.JsonLines,
title_column="title",
)
storage = create_storage(
StorageConfig(
base_dir="tests/unit/indexing/input/data/one-jsonl",
)
)
reader = create_input_reader(config, storage)
documents = await reader.read_files()
assert len(documents) == 3
assert documents[0].title == "Hello"
| {
"repo_id": "microsoft/graphrag",
"file_path": "tests/unit/indexing/input/test_jsonl_loader.py",
"license": "MIT License",
"lines": 37,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
microsoft/graphrag:tests/unit/indexing/input/test_markitdown_loader.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
from graphrag_input import InputConfig, InputType, create_input_reader
from graphrag_storage import StorageConfig, create_storage
# these tests just confirm we can load files with MarkItDown,
# and use html specifically because it requires no additional dependency installation
async def test_markitdown_loader_one_file():
config = InputConfig(
type=InputType.MarkItDown,
file_pattern=".*\\.html$",
)
storage = create_storage(
StorageConfig(
base_dir="tests/unit/indexing/input/data/one-html",
)
)
reader = create_input_reader(config, storage)
documents = await reader.read_files()
assert len(documents) == 1
# markitdown will extract the title and body from the HTML if present and clean them
assert documents[0].title == "Test"
assert documents[0].text == "Hi how are you today?"
assert documents[0].raw_data is None
| {
"repo_id": "microsoft/graphrag",
"file_path": "tests/unit/indexing/input/test_markitdown_loader.py",
"license": "MIT License",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
microsoft/graphrag:tests/unit/indexing/input/test_text_document.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
import pytest
from graphrag_input import get_property
def test_get_property_single_level():
data = {"foo": "bar"}
assert get_property(data, "foo") == "bar"
def test_get_property_two_levels():
data = {"foo": {"bar": "baz"}}
assert get_property(data, "foo.bar") == "baz"
def test_get_property_three_levels():
data = {"a": {"b": {"c": "value"}}}
assert get_property(data, "a.b.c") == "value"
def test_get_property_returns_dict():
data = {"foo": {"bar": {"baz": "qux"}}}
result = get_property(data, "foo.bar")
assert result == {"baz": "qux"}
def test_get_property_missing_key_raises():
data = {"foo": "bar"}
with pytest.raises(KeyError):
get_property(data, "missing")
def test_get_property_missing_nested_key_raises():
data = {"foo": {"bar": "baz"}}
with pytest.raises(KeyError):
get_property(data, "foo.missing")
def test_get_property_non_dict_intermediate_raises():
data = {"foo": "bar"}
with pytest.raises(KeyError):
get_property(data, "foo.bar")
def test_get_property_empty_dict_raises():
data = {}
with pytest.raises(KeyError):
get_property(data, "foo")
def test_get_property_with_none_value():
data = {"foo": None}
assert get_property(data, "foo") is None
def test_get_property_with_list_value():
data = {"foo": [1, 2, 3]}
assert get_property(data, "foo") == [1, 2, 3]
def test_get_property_list_intermediate_raises():
data = {"foo": [{"bar": "baz"}]}
with pytest.raises(KeyError):
get_property(data, "foo.bar")
def test_get_property_numeric_value():
data = {"count": 42}
assert get_property(data, "count") == 42
def test_get_property_boolean_value():
data = {"enabled": True}
assert get_property(data, "enabled") is True
| {
"repo_id": "microsoft/graphrag",
"file_path": "tests/unit/indexing/input/test_text_document.py",
"license": "MIT License",
"lines": 49,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
microsoft/graphrag:tests/unit/indexing/input/test_text_loader.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
from graphrag_input import InputConfig, InputType, create_input_reader
from graphrag_storage import StorageConfig, create_storage
async def test_text_loader_one_file():
config = InputConfig(
type=InputType.Text,
file_pattern=".*\\.txt$",
)
storage = create_storage(
StorageConfig(
base_dir="tests/unit/indexing/input/data/one-txt",
)
)
reader = create_input_reader(config, storage)
documents = await reader.read_files()
assert len(documents) == 1
assert documents[0].title == "input.txt"
assert documents[0].raw_data is None
async def test_text_loader_multiple_files():
config = InputConfig(
type=InputType.Text,
)
storage = create_storage(
StorageConfig(
base_dir="tests/unit/indexing/input/data/multiple-txts",
)
)
reader = create_input_reader(config, storage)
documents = await reader.read_files()
assert len(documents) == 2
| {
"repo_id": "microsoft/graphrag",
"file_path": "tests/unit/indexing/input/test_text_loader.py",
"license": "MIT License",
"lines": 31,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
microsoft/graphrag:tests/unit/load_config/config.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Config models for load_config unit tests."""
from pydantic import BaseModel, ConfigDict, Field
class TestNestedModel(BaseModel):
"""Test nested model."""
model_config = ConfigDict(extra="forbid")
nested_str: str = Field(description="A nested field.")
nested_int: int = Field(description="Another nested field.")
class TestConfigModel(BaseModel):
"""Test configuration model."""
model_config = ConfigDict(extra="forbid")
__test__ = False # type: ignore
name: str = Field(description="Name field.")
value: int = Field(description="Value field.")
nested: TestNestedModel = Field(description="Nested model field.")
nested_list: list[TestNestedModel] = Field(description="List of nested models.")
| {
"repo_id": "microsoft/graphrag",
"file_path": "tests/unit/load_config/config.py",
"license": "MIT License",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
microsoft/graphrag:tests/unit/load_config/test_load_config.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Unit tests for graphrag-config.load_config."""
import os
from pathlib import Path
import pytest
from graphrag_common.config import ConfigParsingError, load_config
from pydantic import ValidationError
from .config import TestConfigModel
def test_load_config_validation():
"""Test loading config validation."""
with pytest.raises(
FileNotFoundError,
):
_ = load_config(TestConfigModel, "non_existent_config.yaml")
config_directory = Path(__file__).parent / "fixtures"
invalid_config_formatting_path = config_directory / "invalid_config_format.yaml"
with pytest.raises(
FileNotFoundError,
):
_ = load_config(
config_initializer=TestConfigModel,
config_path=invalid_config_formatting_path,
dot_env_path="non_existent.env",
)
# Using yaml to parse invalid json formatting
with pytest.raises(
ConfigParsingError,
):
_ = load_config(TestConfigModel, invalid_config_formatting_path)
invalid_config_path = config_directory / "invalid_config.yaml"
# Test validation error from config model
with pytest.raises(
ValidationError,
):
_ = load_config(
config_initializer=TestConfigModel,
config_path=invalid_config_path,
set_cwd=False,
)
def test_load_config():
"""Test loading configuration."""
config_directory = Path(__file__).parent / "fixtures"
config_path = config_directory / "settings.yaml"
# Load from dir
config = load_config(
config_initializer=TestConfigModel, config_path=config_directory, set_cwd=False
)
assert config.name == "test_name"
assert config.value == 100
assert config.nested.nested_str == "nested_value"
assert config.nested.nested_int == 42
assert len(config.nested_list) == 2
assert config.nested_list[0].nested_str == "list_value_1"
assert config.nested_list[0].nested_int == 7
assert config.nested_list[1].nested_str == "list_value_2"
assert config.nested_list[1].nested_int == 8
# Should not have changed directories
root_repo_dir = Path(__file__).parent.parent.parent.parent.resolve()
assert Path.cwd().resolve() == root_repo_dir
config = load_config(
config_initializer=TestConfigModel,
config_path=config_path,
set_cwd=False,
)
assert config.name == "test_name"
assert config.value == 100
assert config.nested.nested_str == "nested_value"
assert config.nested.nested_int == 42
assert len(config.nested_list) == 2
assert config.nested_list[0].nested_str == "list_value_1"
assert config.nested_list[0].nested_int == 7
assert config.nested_list[1].nested_str == "list_value_2"
assert config.nested_list[1].nested_int == 8
overrides = {
"value": 65537,
"nested": {"nested_int": 84},
"nested_list": [
{"nested_str": "overridden_list_value_1", "nested_int": 23},
],
}
cwd = Path.cwd()
config_with_overrides = load_config(
config_initializer=TestConfigModel,
config_path=config_path,
overrides=overrides,
)
# Should have changed directories to the config file location
assert Path.cwd() == config_directory
assert (
Path("some/new/path").resolve()
== (config_directory / "some/new/path").resolve()
)
# Reset cwd
os.chdir(cwd)
assert config_with_overrides.name == "test_name"
assert config_with_overrides.value == 65537
assert config_with_overrides.nested.nested_str == "nested_value"
assert config_with_overrides.nested.nested_int == 84
assert len(config_with_overrides.nested_list) == 1
assert config_with_overrides.nested_list[0].nested_str == "overridden_list_value_1"
assert config_with_overrides.nested_list[0].nested_int == 23
config_with_env_vars_path = config_directory / "config_with_env.yaml"
# Config contains env vars that do not exist
# and no .env file is provided
with pytest.raises(
ConfigParsingError,
):
_ = load_config(
config_initializer=TestConfigModel,
config_path=config_with_env_vars_path,
load_dot_env_file=False,
set_cwd=False,
)
env_path = config_directory / "test.env"
config_with_env_vars = load_config(
config_initializer=TestConfigModel,
config_path=config_with_env_vars_path,
dot_env_path=env_path,
)
assert config_with_env_vars.name == "env_name"
assert config_with_env_vars.value == 100
assert config_with_env_vars.nested.nested_str == "nested_value"
assert config_with_env_vars.nested.nested_int == 42
assert len(config_with_env_vars.nested_list) == 2
assert config_with_env_vars.nested_list[0].nested_str == "list_value_1"
assert config_with_env_vars.nested_list[0].nested_int == 7
assert config_with_env_vars.nested_list[1].nested_str == "list_value_2"
assert config_with_env_vars.nested_list[1].nested_int == 8
| {
"repo_id": "microsoft/graphrag",
"file_path": "tests/unit/load_config/test_load_config.py",
"license": "MIT License",
"lines": 129,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
microsoft/graphrag:tests/unit/query/context_builder/dynamic_community_selection.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""Tests for dynamic community selection with type handling."""
from unittest.mock import MagicMock
from graphrag.data_model.community import Community
from graphrag.data_model.community_report import CommunityReport
from graphrag.query.context_builder.dynamic_community_selection import (
DynamicCommunitySelection,
)
def create_mock_tokenizer() -> MagicMock:
"""Create a mock tokenizer."""
tokenizer = MagicMock()
tokenizer.encode.return_value = [1, 2, 3]
return tokenizer
def create_mock_model() -> MagicMock:
"""Create a mock chat model."""
return MagicMock()
def test_dynamic_community_selection_handles_int_children():
"""Test that DynamicCommunitySelection correctly handles children IDs as integers.
This tests the fix for issue #2004 where children IDs could be integers
while self.reports keys are strings, causing child communities to be skipped.
"""
# Create communities with integer children (simulating the bug scenario)
# Note: Even though the type annotation says list[str], actual data may have ints
communities = [
Community(
id="comm-0",
short_id="0",
title="Root Community",
level="0",
parent="",
children=[1, 2], # type: ignore[list-item] # Integer children - testing bug fix
),
Community(
id="comm-1",
short_id="1",
title="Child Community 1",
level="1",
parent="0",
children=[],
),
Community(
id="comm-2",
short_id="2",
title="Child Community 2",
level="1",
parent="0",
children=[],
),
]
# Create community reports with string community_id
reports = [
CommunityReport(
id="report-0",
short_id="0",
title="Report 0",
community_id="0",
summary="Root community summary",
full_content="Root community full content",
rank=1.0,
),
CommunityReport(
id="report-1",
short_id="1",
title="Report 1",
community_id="1",
summary="Child 1 summary",
full_content="Child 1 full content",
rank=1.0,
),
CommunityReport(
id="report-2",
short_id="2",
title="Report 2",
community_id="2",
summary="Child 2 summary",
full_content="Child 2 full content",
rank=1.0,
),
]
model = create_mock_model()
tokenizer = create_mock_tokenizer()
selector = DynamicCommunitySelection(
community_reports=reports,
communities=communities,
model=model,
tokenizer=tokenizer,
threshold=1,
keep_parent=False,
max_level=2,
)
# Verify that reports are keyed by string
assert "0" in selector.reports
assert "1" in selector.reports
assert "2" in selector.reports
# Verify that communities are keyed by string short_id
assert "0" in selector.communities
assert "1" in selector.communities
assert "2" in selector.communities
# Verify that the children are properly accessible
# Before the fix, int children would fail the `in self.reports` check
root_community = selector.communities["0"]
for child in root_community.children:
child_id = str(child)
# This should now work with the fix
assert child_id in selector.reports, (
f"Child {child} (as '{child_id}') should be found in reports"
)
def test_dynamic_community_selection_handles_str_children():
"""Test that DynamicCommunitySelection works correctly with string children IDs."""
communities = [
Community(
id="comm-0",
short_id="0",
title="Root Community",
level="0",
parent="",
children=["1", "2"], # String children - expected type
),
Community(
id="comm-1",
short_id="1",
title="Child Community 1",
level="1",
parent="0",
children=[],
),
Community(
id="comm-2",
short_id="2",
title="Child Community 2",
level="1",
parent="0",
children=[],
),
]
reports = [
CommunityReport(
id="report-0",
short_id="0",
title="Report 0",
community_id="0",
summary="Root community summary",
full_content="Root community full content",
rank=1.0,
),
CommunityReport(
id="report-1",
short_id="1",
title="Report 1",
community_id="1",
summary="Child 1 summary",
full_content="Child 1 full content",
rank=1.0,
),
CommunityReport(
id="report-2",
short_id="2",
title="Report 2",
community_id="2",
summary="Child 2 summary",
full_content="Child 2 full content",
rank=1.0,
),
]
model = create_mock_model()
tokenizer = create_mock_tokenizer()
selector = DynamicCommunitySelection(
community_reports=reports,
communities=communities,
model=model,
tokenizer=tokenizer,
threshold=1,
keep_parent=False,
max_level=2,
)
# Verify that children can be found in reports
root_community = selector.communities["0"]
for child in root_community.children:
child_id = str(child)
assert child_id in selector.reports, (
f"Child {child} (as '{child_id}') should be found in reports"
)
| {
"repo_id": "microsoft/graphrag",
"file_path": "tests/unit/query/context_builder/dynamic_community_selection.py",
"license": "MIT License",
"lines": 183,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
microsoft/graphrag:tests/unit/utils/test_encoding.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
from graphrag.tokenizer.get_tokenizer import get_tokenizer
def test_encode_basic():
tokenizer = get_tokenizer()
result = tokenizer.encode("abc def")
assert result == [26682, 1056], (
f"Encoding failed to return expected tokens, sent {result}"
)
def test_num_tokens_empty_input():
tokenizer = get_tokenizer()
result = len(tokenizer.encode(""))
assert result == 0, "Token count for empty input should be 0"
| {
"repo_id": "microsoft/graphrag",
"file_path": "tests/unit/utils/test_encoding.py",
"license": "MIT License",
"lines": 13,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
microsoft/graphrag:tests/integration/cache/test_factory.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""CacheFactory Tests.
These tests will test the CacheFactory() class and the creation of each cache type that is natively supported.
"""
import sys
import pytest
from graphrag_cache import Cache, CacheConfig, CacheType, create_cache, register_cache
from graphrag_cache.cache_factory import cache_factory
from graphrag_cache.json_cache import JsonCache
from graphrag_cache.memory_cache import MemoryCache
from graphrag_cache.noop_cache import NoopCache
from graphrag_storage import StorageConfig, StorageType, create_storage
# cspell:disable-next-line well-known-key
WELL_KNOWN_BLOB_STORAGE_KEY = "DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://127.0.0.1:10000/devstoreaccount1;"
# cspell:disable-next-line well-known-key
WELL_KNOWN_COSMOS_CONNECTION_STRING = "AccountEndpoint=https://127.0.0.1:8081/;AccountKey=C2y6yDjf5/R+ob0N8A7Cgv30VRDJIWEHLM+4QDU5DE2nQ9nDuVTqobD4b8mGGyPMbIZnqyMsEcaGQy67XIw/Jw=="
def test_create_noop_cache():
cache = create_cache(
CacheConfig(
type=CacheType.Noop,
)
)
assert isinstance(cache, NoopCache)
def test_create_memory_cache():
cache = create_cache(
CacheConfig(
type=CacheType.Memory,
)
)
assert isinstance(cache, MemoryCache)
def test_create_file_cache():
storage = create_storage(
StorageConfig(
type=StorageType.Memory,
)
)
cache = create_cache(
CacheConfig(
type=CacheType.Json,
),
storage=storage,
)
assert isinstance(cache, JsonCache)
def test_create_blob_cache():
storage = create_storage(
StorageConfig(
type=StorageType.AzureBlob,
connection_string=WELL_KNOWN_BLOB_STORAGE_KEY,
container_name="testcontainer",
base_dir="testcache",
)
)
cache = create_cache(
CacheConfig(
type=CacheType.Json,
),
storage=storage,
)
assert isinstance(cache, JsonCache)
@pytest.mark.skipif(
not sys.platform.startswith("win"),
reason="cosmosdb emulator is only available on windows runners at this time",
)
def test_create_cosmosdb_cache():
storage = create_storage(
StorageConfig(
type=StorageType.AzureCosmos,
connection_string=WELL_KNOWN_COSMOS_CONNECTION_STRING,
database_name="testdatabase",
container_name="testcontainer",
)
)
cache = create_cache(
CacheConfig(
type=CacheType.Json,
),
storage=storage,
)
assert isinstance(cache, JsonCache)
def test_register_and_create_custom_cache():
"""Test registering and creating a custom cache type."""
from unittest.mock import MagicMock
# Create a mock that satisfies the PipelineCache interface
custom_cache_class = MagicMock(spec=Cache)
# Make the mock return a mock instance when instantiated
instance = MagicMock()
instance.initialized = True
custom_cache_class.return_value = instance
register_cache("custom", lambda **kwargs: custom_cache_class(**kwargs))
cache = create_cache(CacheConfig(type="custom"))
assert custom_cache_class.called
assert cache is instance
# Access the attribute we set on our mock
assert cache.initialized is True # type: ignore # Attribute only exists on our mock
# Check if it's in the list of registered cache types
assert "custom" in cache_factory
def test_create_unknown_cache():
with pytest.raises(
ValueError,
match="CacheConfig\\.type 'unknown' is not registered in the CacheFactory\\.",
):
create_cache(CacheConfig(type="unknown"))
def test_register_class_directly_works():
"""Test that registering a class directly works (CacheFactory() allows this)."""
class CustomCache(Cache):
def __init__(self, **kwargs):
pass
async def get(self, key: str):
return None
async def set(self, key: str, value, debug_data=None):
pass
async def has(self, key: str):
return False
async def delete(self, key: str):
pass
async def clear(self):
pass
def child(self, name: str):
return self
# CacheFactory() allows registering classes directly (no TypeError)
register_cache("custom_class", CustomCache)
# Verify it was registered
assert "custom_class" in cache_factory
# Test creating an instance
cache = create_cache(CacheConfig(type="custom_class"))
assert isinstance(cache, CustomCache)
| {
"repo_id": "microsoft/graphrag",
"file_path": "tests/integration/cache/test_factory.py",
"license": "MIT License",
"lines": 127,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
microsoft/graphrag:tests/integration/logging/test_factory.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""LoggerFactory Tests.
These tests will test the LoggerFactory class and the creation of each reporting type that is natively supported.
"""
import logging
import pytest
from graphrag.config.enums import ReportingType
from graphrag.logger.blob_workflow_logger import BlobWorkflowLogger
from graphrag.logger.factory import LoggerFactory
# cspell:disable-next-line well-known-key
WELL_KNOWN_BLOB_STORAGE_KEY = "DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://127.0.0.1:10000/devstoreaccount1;"
# cspell:disable-next-line well-known-key
WELL_KNOWN_COSMOS_CONNECTION_STRING = "AccountEndpoint=https://127.0.0.1:8081/;AccountKey=C2y6yDjf5/R+ob0N8A7Cgv30VRDJIWEHLM+4QDU5DE2nQ9nDuVTqobD4b8mGGyPMbIZnqyMsEcaGQy67XIw/Jw=="
@pytest.mark.skip(reason="Blob storage emulator is not available in this environment")
def test_create_blob_logger():
kwargs = {
"type": "blob",
"connection_string": WELL_KNOWN_BLOB_STORAGE_KEY,
"base_dir": "testbasedir",
"container_name": "testcontainer",
}
logger = LoggerFactory().create(ReportingType.blob.value, kwargs)
assert isinstance(logger, BlobWorkflowLogger)
def test_register_and_create_custom_logger():
"""Test registering and creating a custom logger type."""
from unittest.mock import MagicMock
custom_logger_class = MagicMock(spec=logging.Handler)
instance = MagicMock()
instance.initialized = True
custom_logger_class.return_value = instance
LoggerFactory().register("custom", lambda **kwargs: custom_logger_class(**kwargs))
logger = LoggerFactory().create("custom")
assert custom_logger_class.called
assert logger is instance
# Access the attribute we set on our mock
assert logger.initialized is True # type: ignore # Attribute only exists on our mock
# Check if it's in the list of registered logger types
assert "custom" in LoggerFactory()
def test_get_logger_types():
# Check that built-in types are registered
assert ReportingType.file.value in LoggerFactory()
assert ReportingType.blob.value in LoggerFactory()
def test_create_unknown_logger():
with pytest.raises(ValueError, match="Strategy 'unknown' is not registered\\."):
LoggerFactory().create("unknown")
| {
"repo_id": "microsoft/graphrag",
"file_path": "tests/integration/logging/test_factory.py",
"license": "MIT License",
"lines": 46,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
microsoft/graphrag:tests/integration/vector_stores/test_factory.py | # Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
"""VectorStoreFactory Tests.
These tests will test the VectorStoreFactory class and the creation of each vector store type that is natively supported.
"""
import pytest
from graphrag_vectors import (
VectorStore,
VectorStoreFactory,
VectorStoreType,
)
from graphrag_vectors.azure_ai_search import AzureAISearchVectorStore
from graphrag_vectors.cosmosdb import CosmosDBVectorStore
from graphrag_vectors.lancedb import LanceDBVectorStore
# register the defaults, since they are lazily registered
VectorStoreFactory().register(VectorStoreType.LanceDB, LanceDBVectorStore)
VectorStoreFactory().register(VectorStoreType.AzureAISearch, AzureAISearchVectorStore)
VectorStoreFactory().register(VectorStoreType.CosmosDB, CosmosDBVectorStore)
def test_create_lancedb_vector_store():
kwargs = {
"db_uri": "/tmp/lancedb",
}
vector_store = VectorStoreFactory().create(VectorStoreType.LanceDB, kwargs)
assert isinstance(vector_store, LanceDBVectorStore)
assert vector_store.index_name == "vector_index"
@pytest.mark.skip(reason="Azure AI Search requires credentials and setup")
def test_create_azure_ai_search_vector_store():
kwargs = {
"url": "https://test.search.windows.net",
"api_key": "test_key",
"index_name": "test_collection",
}
vector_store = VectorStoreFactory().create(
VectorStoreType.AzureAISearch,
kwargs,
)
assert isinstance(vector_store, AzureAISearchVectorStore)
@pytest.mark.skip(reason="CosmosDB requires credentials and setup")
def test_create_cosmosdb_vector_store():
kwargs = {
"connection_string": "AccountEndpoint=https://test.documents.azure.com:443/;AccountKey=test_key==",
"database_name": "test_db",
"index_name": "test_collection",
}
vector_store = VectorStoreFactory().create(
VectorStoreType.CosmosDB,
kwargs,
)
assert isinstance(vector_store, CosmosDBVectorStore)
def test_register_and_create_custom_vector_store():
"""Test registering and creating a custom vector store type."""
from unittest.mock import MagicMock
# Create a mock that satisfies the VectorStore interface
custom_vector_store_class = MagicMock(spec=VectorStore)
# Make the mock return a mock instance when instantiated
instance = MagicMock()
instance.initialized = True
custom_vector_store_class.return_value = instance
VectorStoreFactory().register(
"custom", lambda **kwargs: custom_vector_store_class(**kwargs)
)
vector_store = VectorStoreFactory().create("custom", {})
assert custom_vector_store_class.called
assert vector_store is instance
# Access the attribute we set on our mock
assert vector_store.initialized is True # type: ignore # Attribute only exists on our mock
# Check if it's in the list of registered vector store types
assert "custom" in VectorStoreFactory()
def test_create_unknown_vector_store():
with pytest.raises(ValueError, match="Strategy 'unknown' is not registered\\."):
VectorStoreFactory().create("unknown")
def test_is_supported_type():
# Test built-in types
assert VectorStoreType.LanceDB in VectorStoreFactory()
assert VectorStoreType.AzureAISearch in VectorStoreFactory()
assert VectorStoreType.CosmosDB in VectorStoreFactory()
# Test unknown type
assert "unknown" not in VectorStoreFactory()
def test_register_class_directly_works():
"""Test that registering a class directly works."""
from graphrag_vectors import VectorStore
class CustomVectorStore(VectorStore):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def connect(self, **kwargs):
pass
def create_index(self, **kwargs):
pass
def load_documents(self, documents):
pass
def insert(self, document):
pass
def similarity_search_by_vector(
self,
query_embedding,
k=10,
select=None,
filters=None,
include_vectors=True,
):
return []
def similarity_search_by_text(
self,
text,
text_embedder,
k=10,
select=None,
filters=None,
include_vectors=True,
):
return []
def search_by_id(self, id, select=None, include_vectors=True):
from graphrag_vectors import VectorStoreDocument
return VectorStoreDocument(id=id, vector=None)
def count(self):
return 0
def remove(self, ids):
pass
def update(self, document):
pass
# VectorStoreFactory() allows registering classes directly (no TypeError)
VectorStoreFactory().register("custom_class", CustomVectorStore)
# Verify it was registered
assert "custom_class" in VectorStoreFactory()
# Test creating an instance
vector_store = VectorStoreFactory().create(
"custom_class",
{},
)
assert isinstance(vector_store, CustomVectorStore)
| {
"repo_id": "microsoft/graphrag",
"file_path": "tests/integration/vector_stores/test_factory.py",
"license": "MIT License",
"lines": 130,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
microsoft/markitdown:packages/markitdown/tests/test_pdf_masterformat.py | #!/usr/bin/env python3 -m pytest
"""Tests for MasterFormat-style partial numbering in PDF conversion."""
import os
import re
import pytest
from markitdown import MarkItDown
from markitdown.converters._pdf_converter import PARTIAL_NUMBERING_PATTERN
TEST_FILES_DIR = os.path.join(os.path.dirname(__file__), "test_files")
class TestMasterFormatPartialNumbering:
"""Test handling of MasterFormat-style partial numbering (.1, .2, etc.)."""
def test_partial_numbering_pattern_regex(self):
"""Test that the partial numbering regex pattern correctly matches."""
# Should match partial numbering patterns
assert PARTIAL_NUMBERING_PATTERN.match(".1") is not None
assert PARTIAL_NUMBERING_PATTERN.match(".2") is not None
assert PARTIAL_NUMBERING_PATTERN.match(".10") is not None
assert PARTIAL_NUMBERING_PATTERN.match(".99") is not None
# Should NOT match other patterns
assert PARTIAL_NUMBERING_PATTERN.match("1.") is None
assert PARTIAL_NUMBERING_PATTERN.match("1.2") is None
assert PARTIAL_NUMBERING_PATTERN.match(".1.2") is None
assert PARTIAL_NUMBERING_PATTERN.match("text") is None
assert PARTIAL_NUMBERING_PATTERN.match(".a") is None
assert PARTIAL_NUMBERING_PATTERN.match("") is None
def test_masterformat_partial_numbering_not_split(self):
"""Test that MasterFormat partial numbering stays with associated text.
MasterFormat documents use partial numbering like:
.1 The intent of this Request for Proposal...
.2 Available information relative to...
These should NOT be split into separate table columns, but kept
as coherent text lines with the number followed by its description.
"""
pdf_path = os.path.join(TEST_FILES_DIR, "masterformat_partial_numbering.pdf")
markitdown = MarkItDown()
result = markitdown.convert(pdf_path)
text_content = result.text_content
# Partial numberings should NOT appear isolated on their own lines
# If they're isolated, it means the parser incorrectly split them from their text
lines = text_content.split("\n")
isolated_numberings = []
for line in lines:
stripped = line.strip()
# Check if line contains ONLY a partial numbering (with possible whitespace/pipes)
cleaned = stripped.replace("|", "").strip()
if cleaned in [".1", ".2", ".3", ".4", ".5", ".6", ".7", ".8", ".9", ".10"]:
isolated_numberings.append(stripped)
assert len(isolated_numberings) == 0, (
f"Partial numberings should not be isolated from their text. "
f"Found isolated: {isolated_numberings}"
)
# Verify that partial numberings appear WITH following text on the same line
# Look for patterns like ".1 The intent" or ".1 Some text"
partial_with_text = re.findall(r"\.\d+\s+\w+", text_content)
assert (
len(partial_with_text) > 0
), "Expected to find partial numberings followed by text on the same line"
def test_masterformat_content_preserved(self):
"""Test that MasterFormat document content is fully preserved."""
pdf_path = os.path.join(TEST_FILES_DIR, "masterformat_partial_numbering.pdf")
markitdown = MarkItDown()
result = markitdown.convert(pdf_path)
text_content = result.text_content
# Verify key content from the MasterFormat document is preserved
expected_content = [
"RFP for Construction Management Services",
"Section 00 00 43",
"Instructions to Respondents",
"Ken Sargent House",
"INTENT",
"Request for Proposal",
"KEN SARGENT HOUSE",
"GRANDE PRAIRIE, ALBERTA",
"Section 00 00 45",
]
for content in expected_content:
assert (
content in text_content
), f"Expected content '{content}' not found in extracted text"
# Verify partial numbering is followed by text on the same line
# .1 should be followed by "The intent" on the same line
assert re.search(
r"\.1\s+The intent", text_content
), "Partial numbering .1 should be followed by 'The intent' text"
# .2 should be followed by "Available information" on the same line
assert re.search(
r"\.2\s+Available information", text_content
), "Partial numbering .2 should be followed by 'Available information' text"
# Ensure text content is not empty and has reasonable length
assert (
len(text_content.strip()) > 100
), "MasterFormat document should have substantial text content"
def test_merge_partial_numbering_with_empty_lines_between(self):
"""Test that partial numberings merge correctly even with empty lines between.
When PDF extractors produce output like:
.1
The intent of this Request...
The merge logic should still combine them properly.
"""
pdf_path = os.path.join(TEST_FILES_DIR, "masterformat_partial_numbering.pdf")
markitdown = MarkItDown()
result = markitdown.convert(pdf_path)
text_content = result.text_content
# The merged result should have .1 and .2 followed by text
# Check that we don't have patterns like ".1\n\nThe intent" (unmerged)
lines = text_content.split("\n")
for i, line in enumerate(lines):
stripped = line.strip()
# If we find an isolated partial numbering, the merge failed
if stripped in [".1", ".2", ".3", ".4", ".5", ".6", ".7", ".8"]:
# Check if next non-empty line exists and wasn't merged
for j in range(i + 1, min(i + 3, len(lines))):
if lines[j].strip():
pytest.fail(
f"Partial numbering '{stripped}' on line {i} was not "
f"merged with following text '{lines[j].strip()[:30]}...'"
)
break
def test_multiple_partial_numberings_all_merged(self):
"""Test that all partial numberings in a document are properly merged."""
pdf_path = os.path.join(TEST_FILES_DIR, "masterformat_partial_numbering.pdf")
markitdown = MarkItDown()
result = markitdown.convert(pdf_path)
text_content = result.text_content
# Count occurrences of merged partial numberings (number followed by text)
merged_count = len(re.findall(r"\.\d+\s+[A-Za-z]", text_content))
# Count isolated partial numberings (number alone on a line)
isolated_count = 0
for line in text_content.split("\n"):
stripped = line.strip()
if re.match(r"^\.\d+$", stripped):
isolated_count += 1
assert (
merged_count >= 2
), f"Expected at least 2 merged partial numberings, found {merged_count}"
assert (
isolated_count == 0
), f"Found {isolated_count} isolated partial numberings that weren't merged"
| {
"repo_id": "microsoft/markitdown",
"file_path": "packages/markitdown/tests/test_pdf_masterformat.py",
"license": "MIT License",
"lines": 137,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
microsoft/markitdown:packages/markitdown/tests/test_docintel_html.py | import io
from markitdown.converters._doc_intel_converter import (
DocumentIntelligenceConverter,
DocumentIntelligenceFileType,
)
from markitdown._stream_info import StreamInfo
def _make_converter(file_types):
conv = DocumentIntelligenceConverter.__new__(DocumentIntelligenceConverter)
conv._file_types = file_types
return conv
def test_docintel_accepts_html_extension():
conv = _make_converter([DocumentIntelligenceFileType.HTML])
stream_info = StreamInfo(mimetype=None, extension=".html")
assert conv.accepts(io.BytesIO(b""), stream_info)
def test_docintel_accepts_html_mimetype():
conv = _make_converter([DocumentIntelligenceFileType.HTML])
stream_info = StreamInfo(mimetype="text/html", extension=None)
assert conv.accepts(io.BytesIO(b""), stream_info)
stream_info = StreamInfo(mimetype="application/xhtml+xml", extension=None)
assert conv.accepts(io.BytesIO(b""), stream_info)
| {
"repo_id": "microsoft/markitdown",
"file_path": "packages/markitdown/tests/test_docintel_html.py",
"license": "MIT License",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
microsoft/qlib:tests/backtest/test_soft_topk_strategy.py | import pandas as pd
import pytest
from qlib.contrib.strategy.cost_control import SoftTopkStrategy
class MockPosition:
def __init__(self, weights):
self.weights = weights
def get_stock_weight_dict(self, only_stock=True):
return self.weights
def test_soft_topk_logic():
# Initial: A=0.8, B=0.2 (Total=1.0). Target Risk=0.95.
# Scores: A and B are low, C and D are topk.
scores = pd.Series({"C": 0.9, "D": 0.8, "A": 0.1, "B": 0.1})
current_pos = MockPosition({"A": 0.8, "B": 0.2})
topk = 2
risk_degree = 0.95
impact_limit = 0.1 # Max change per step
def create_test_strategy(impact_limit_value):
strat = SoftTopkStrategy.__new__(SoftTopkStrategy)
strat.topk = topk
strat.risk_degree = risk_degree
strat.trade_impact_limit = impact_limit_value
return strat
# 1. With impact limit: Expect deterministic sell and limited buy
strat_i = create_test_strategy(impact_limit)
res_i = strat_i.generate_target_weight_position(scores, current_pos, None, None)
# A should be exactly 0.8 - 0.1 = 0.7
assert abs(res_i["A"] - 0.7) < 1e-8
# B should be exactly 0.2 - 0.1 = 0.1
assert abs(res_i["B"] - 0.1) < 1e-8
# Total sells = 0.2 released. New budget = 0.2 + (0.95 - 1.0) = 0.15.
# C and D share 0.15 -> 0.075 each.
assert abs(res_i["C"] - 0.075) < 1e-8
assert abs(res_i["D"] - 0.075) < 1e-8
# 2. Without impact limit: Expect full liquidation and full target fill
strat_c = create_test_strategy(1.0)
res_c = strat_c.generate_target_weight_position(scores, current_pos, None, None)
# A, B not in topk -> Liquidated
assert "A" not in res_c and "B" not in res_c
# C, D should reach ideal_per_stock (0.95/2 = 0.475)
assert abs(res_c["C"] - 0.475) < 1e-8
assert abs(res_c["D"] - 0.475) < 1e-8
if __name__ == "__main__":
pytest.main([__file__])
| {
"repo_id": "microsoft/qlib",
"file_path": "tests/backtest/test_soft_topk_strategy.py",
"license": "MIT License",
"lines": 43,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
microsoft/qlib:tests/backtest/test_soft_topk_strategy_cold_start.py | import pandas as pd
import pytest
from qlib.contrib.strategy.cost_control import SoftTopkStrategy
class MockPosition:
def __init__(self, weights):
self.weights = weights
def get_stock_weight_dict(self, only_stock=True):
return self.weights
def create_test_strategy(topk, risk_degree, impact_limit):
strat = SoftTopkStrategy.__new__(SoftTopkStrategy)
strat.topk = topk
strat.risk_degree = risk_degree
strat.trade_impact_limit = impact_limit
return strat
@pytest.mark.parametrize(
("impact_limit", "expected_fill"),
[
(0.1, 0.1),
(1.0, 0.475),
],
)
def test_soft_topk_cold_start_impact_limit(impact_limit, expected_fill):
scores = pd.Series({"C": 0.9, "D": 0.8, "A": 0.1, "B": 0.1})
current_pos = MockPosition({})
strat = create_test_strategy(topk=2, risk_degree=0.95, impact_limit=impact_limit)
res = strat.generate_target_weight_position(scores, current_pos, None, None)
assert abs(res["C"] - expected_fill) < 1e-8
assert abs(res["D"] - expected_fill) < 1e-8
| {
"repo_id": "microsoft/qlib",
"file_path": "tests/backtest/test_soft_topk_strategy_cold_start.py",
"license": "MIT License",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
microsoft/qlib:qlib/utils/pickle_utils.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
"""
Secure pickle utilities to prevent arbitrary code execution through deserialization.
This module provides a secure alternative to pickle.load() and pickle.loads()
that restricts deserialization to a whitelist of safe classes.
"""
import io
import pickle
from typing import Any, BinaryIO, Set, Tuple
# Whitelist of safe classes that are allowed to be unpickled
# These are common data types used in qlib that should be safe to deserialize
SAFE_PICKLE_CLASSES: Set[Tuple[str, str]] = {
# python builtins
("builtins", "slice"),
("builtins", "range"),
("builtins", "dict"),
("builtins", "list"),
("builtins", "tuple"),
("builtins", "set"),
("builtins", "frozenset"),
("builtins", "bytearray"),
("builtins", "bytes"),
("builtins", "str"),
("builtins", "int"),
("builtins", "float"),
("builtins", "bool"),
("builtins", "complex"),
("builtins", "type"),
("builtins", "property"),
# common utility classes
("datetime", "datetime"),
("datetime", "date"),
("datetime", "time"),
("datetime", "timedelta"),
("datetime", "timezone"),
("decimal", "Decimal"),
("collections", "OrderedDict"),
("collections", "defaultdict"),
("collections", "Counter"),
("collections", "namedtuple"),
("enum", "Enum"),
("pathlib", "Path"),
("pathlib", "PosixPath"),
("pathlib", "WindowsPath"),
("qlib.data.dataset.handler", "DataHandler"),
("qlib.data.dataset.handler", "DataHandlerLP"),
("qlib.data.dataset.loader", "StaticDataLoader"),
}
TRUSTED_MODULE_PREFIXES = (
"pandas",
"numpy",
)
class RestrictedUnpickler(pickle.Unpickler):
"""Custom unpickler that only allows safe classes to be deserialized.
This prevents arbitrary code execution through malicious pickle files by
restricting deserialization to a whitelist of safe classes.
Example:
>>> with open("data.pkl", "rb") as f:
... data = RestrictedUnpickler(f).load()
"""
def find_class(self, module: str, name: str):
"""Override find_class to restrict allowed classes.
Args:
module: Module name of the class
name: Class name
Returns:
The class object if it's in the whitelist
Raises:
pickle.UnpicklingError: If the class is not in the whitelist
"""
if module.startswith(TRUSTED_MODULE_PREFIXES):
return super().find_class(module, name)
# 2. explicit whitelist (qlib internal)
if (module, name) in SAFE_PICKLE_CLASSES:
return super().find_class(module, name)
raise pickle.UnpicklingError(
f"Forbidden class: {module}.{name}. "
f"Only whitelisted classes are allowed for security reasons. "
f"This is to prevent arbitrary code execution through pickle deserialization."
)
def restricted_pickle_load(file: BinaryIO) -> Any:
"""Safely load a pickle file with restricted classes.
This is a drop-in replacement for pickle.load() that prevents
arbitrary code execution by only allowing whitelisted classes.
Args:
file: An opened file object in binary mode
Returns:
The unpickled Python object
Raises:
pickle.UnpicklingError: If the pickle contains forbidden classes
Example:
>>> with open("data.pkl", "rb") as f:
... data = restricted_pickle_load(f)
"""
return RestrictedUnpickler(file).load()
def restricted_pickle_loads(data: bytes) -> Any:
"""Safely load a pickle from bytes with restricted classes.
This is a drop-in replacement for pickle.loads() that prevents
arbitrary code execution by only allowing whitelisted classes.
Args:
data: Bytes object containing pickled data
Returns:
The unpickled Python object
Raises:
pickle.UnpicklingError: If the pickle contains forbidden classes
Example:
>>> data = b'\\x80\\x04\\x95...'
>>> obj = restricted_pickle_loads(data)
"""
file_like = io.BytesIO(data)
return RestrictedUnpickler(file_like).load()
def add_safe_class(module: str, name: str) -> None:
"""Add a class to the whitelist of safe classes for unpickling.
Use this function to extend the whitelist if your code needs to deserialize
additional classes. However, be very careful when adding classes, as this
could potentially introduce security vulnerabilities.
Args:
module: Module name of the class (e.g., 'my_package.my_module')
name: Class name (e.g., 'MyClass')
Warning:
Only add classes that you fully control and trust. Adding arbitrary
classes from external packages could introduce security risks.
Example:
>>> add_safe_class('my_package.models', 'CustomModel')
"""
SAFE_PICKLE_CLASSES.add((module, name))
def get_safe_classes() -> Set[Tuple[str, str]]:
"""Get a copy of the current whitelist of safe classes.
Returns:
A set of (module, name) tuples representing allowed classes
"""
return SAFE_PICKLE_CLASSES.copy()
| {
"repo_id": "microsoft/qlib",
"file_path": "qlib/utils/pickle_utils.py",
"license": "MIT License",
"lines": 133,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/unilm:Diff-Transformer/Diff-Transformer-V2/multihead_flashdiffv2.py | import torch
from torch import nn
from typing import Optional, Tuple
from ..kernel.rotary import apply_rotary_emb
from flash_attn import flash_attn_func
@torch.compile
def diff_func(attn1: torch.Tensor, attn2: torch.Tensor, lambda_val: torch.Tensor) -> torch.Tensor:
return attn1 - torch.sigmoid(lambda_val).unsqueeze(-1) * attn2
class MultiheadFlashDiffV2(nn.Module):
"""
Differential Attention Version 2 (DiffAttnV2) implementation using Flash Attention.
"""
def __init__(
self,
use_diff_v2: bool, # If False, acts as a baseline Transformer attention
d_model: int, # Model dimension
num_heads: int, # Number of output heads
num_kv_heads: Optional[int], # Number of KV heads for GQA
head_dim: int, # Dimension per head
):
super().__init__()
self.use_diff_v2 = use_diff_v2
self.d_model = d_model
self.num_heads = num_heads
self.num_kv_heads = num_kv_heads if num_kv_heads is not None else num_heads
self.head_dim = head_dim
self.num_q_heads = 2 * self.num_heads if self.use_diff_v2 else self.num_heads
self.q_proj = nn.Linear(self.d_model, self.num_q_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.d_model, self.num_kv_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.d_model, self.num_kv_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.d_model, bias=False)
self.lambda_proj = nn.Linear(self.d_model, self.num_heads, bias=False) if self.use_diff_v2 else None
def forward(
self,
x: torch.Tensor, # Input tensor [bsz, seq_len, d_model]
rel_pos: Tuple[torch.Tensor, torch.Tensor], # Rotary embedding (cos, sin)
) -> torch.Tensor:
"""
Forward pass for MultiheadFlashDiffV2.
Args:
x: Input hidden states of shape [batch, length, d_model]
rel_pos: Tuple of (cos, sin) tensors for rotary positional embeddings
Returns:
Output tensor of shape [batch, length, d_model]
"""
bsz, tgt_len, _ = x.size()
src_len = tgt_len
q = self.q_proj(x)
k = self.k_proj(x)
v = self.v_proj(x)
q = q.view(bsz, tgt_len, self.num_q_heads, self.head_dim)
k = k.view(bsz, src_len, self.num_kv_heads, self.head_dim)
v = v.view(bsz, src_len, self.num_kv_heads, self.head_dim)
q = apply_rotary_emb(q, *rel_pos, interleaved=True)
k = apply_rotary_emb(k, *rel_pos, interleaved=True)
attn = flash_attn_func(q, k, v, causal=True)
if self.use_diff_v2:
lambda_val = self.lambda_proj(x)
attn1, attn2 = attn[:, :, 0::2], attn[:, :, 1::2]
attn = diff_func(attn1, attn2, lambda_val)
attn = attn.reshape(bsz, tgt_len, self.num_heads * self.head_dim)
output = self.o_proj(attn)
return output
| {
"repo_id": "microsoft/unilm",
"file_path": "Diff-Transformer/Diff-Transformer-V2/multihead_flashdiffv2.py",
"license": "MIT License",
"lines": 63,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
microsoft/unilm:ReSA/llm/arch/context_manager.py | import torch
import torch.nn.functional as F
import triton
import triton.language as tl
@triton.autotune(
configs=[
triton.Config({"BLOCK_N": BLOCK_N}, num_warps=num_warps, num_stages=1)
for num_warps in [1, 2, 4, 8]
for BLOCK_N in [32, 64]
],
key=['local_block_num', 'head_dim'],
)
@triton.jit
def block_attn_decoding_kernel(
q, # pointer to float32, shape [batch_size, n_head * gqa_size, head_dim]
k_min, # pointer to float32, shape [batch_size, num_blocks, n_head, head_dim]
k_max, # pointer to float32, shape [batch_size, num_blocks, n_head, head_dim]
num_blocks, # pointer to int32, shape [batch_size]
local_block_num: tl.constexpr, # int32, number of local blocks per query
head_dim: tl.constexpr, # int32, dimension per head
stride_qb, stride_qh, stride_qd,
stride_kb, stride_kt, stride_kh, stride_kd,
stride_ab, stride_ah, stride_at,
attn_score, # pointer to int32, shape [batch_size, n_head, num_blocks]
BLOCK_N: tl.constexpr, # int32, number of queries per block
):
batch_id = tl.program_id(0)
head_idx = tl.program_id(1)
start_block_idx = tl.program_id(2) * BLOCK_N
base_q_ptr = tl.make_block_ptr(q + batch_id * stride_qb + head_idx * stride_qh, shape=(head_dim,), block_shape=(head_dim,), strides=(stride_qd,), offsets=(0,), order=(0,))
mean_q = tl.load(base_q_ptr).to(tl.float32)
POS_INF = 1e38
total_n_blocks = tl.load(num_blocks + batch_id)
block_block_idx = start_block_idx + tl.arange(0, BLOCK_N)
k_min_ptr = tl.make_block_ptr(k_min + batch_id * stride_kb + head_idx * stride_kh, shape=(total_n_blocks, head_dim), block_shape=(BLOCK_N, head_dim), strides=(stride_kt, stride_kd), offsets=(start_block_idx, 0), order=(0, 1))
k_max_ptr = tl.make_block_ptr(k_max + batch_id * stride_kb + head_idx * stride_kh, shape=(total_n_blocks, head_dim), block_shape=(BLOCK_N, head_dim), strides=(stride_kt, stride_kd), offsets=(start_block_idx, 0), order=(0, 1))
k_min_val = tl.load(k_min_ptr, boundary_check=(0, 1)).to(tl.float32)
k_max_val = tl.load(k_max_ptr, boundary_check=(0, 1)).to(tl.float32)
score = tl.maximum(k_min_val * mean_q, k_max_val * mean_q)
score = tl.sum(score, axis=1)
score = tl.where(block_block_idx >= total_n_blocks - local_block_num, POS_INF, score)
attn_score_ptr = tl.make_block_ptr(attn_score + batch_id * stride_ab + head_idx * stride_ah, shape=(total_n_blocks,), block_shape=(BLOCK_N,), strides=(stride_at,), offsets=(start_block_idx,), order=(0,))
tl.store(attn_score_ptr, score, boundary_check=(0,))
def block_attn_decoding(q, k_min, k_max, num_blocks, local_block_num):
assert num_blocks.max().item() <= k_min.shape[1] , "num_blocks should be less than or equal to k_min.shape[1]"
min_val = torch.finfo(torch.float32).min
batch, n_kv_heads, head_dim = q.shape[0], k_min.shape[2], k_min.shape[3]
attn_score = torch.full((batch, n_kv_heads, num_blocks.max().item()), fill_value=min_val, device=q.device, dtype=torch.float32)
grid = lambda META: (batch, n_kv_heads, triton.cdiv(num_blocks.max().item(), META['BLOCK_N']))
with torch.cuda.device(q.device.index):
block_attn_decoding_kernel[grid](q, k_min, k_max,
num_blocks, local_block_num, head_dim,
*q.stride(),
*k_min.stride(),
*attn_score.stride(),
attn_score,
)
return attn_score
@torch.compile(fullgraph=True)
def get_topk_indices(block_attn_score, block_index_mask, max_num_selected_blocks):
block_index = torch.topk(block_attn_score, k=max_num_selected_blocks, dim=-1, sorted=True).indices
topk_indices = block_index.masked_fill_(~block_index_mask, -1)
topk_indices = torch.sort(topk_indices, dim=-1, descending=False).values
return topk_indices.to(torch.int32)
@torch.compile(fullgraph=True)
def get_num_blocks(cache_seqlens, block_size, sparse_ratio, min_block_num):
num_blocks = (cache_seqlens + (block_size - 1)) // block_size
num_selected_blocks = torch.ceil(num_blocks * sparse_ratio)
num_selected_blocks = torch.maximum(num_selected_blocks, num_blocks.clamp(max=min_block_num)).long()
return num_blocks, num_selected_blocks
class KVManager:
def __init__(self, num_heads, block_size, sparse_ratio, local_block_num, min_block_num):
self.num_heads = num_heads
self.block_size = block_size
self.sparse_ratio = sparse_ratio
self.local_block_num = local_block_num
self.min_block_num = min_block_num
self.block_max_key = None
self.block_min_key = None
self.num_elements = None
assert self.local_block_num <= self.min_block_num, "local_block_num should be less than or equal to min_block_num"
def init_centeroids(self, key, cache_seqlens):
bsz, seqlen, num_heads, head_dim = key.shape
max_val = torch.finfo(key.dtype).max
min_val = torch.finfo(key.dtype).min
self.num_elements = seqlen
key_block = key.reshape(bsz, -1, self.block_size, num_heads, head_dim)
self.block_max_key = key_block.max(dim=2).values
self.block_min_key = key_block.min(dim=2).values
num_blocks = (cache_seqlens + self.block_size - 1) // self.block_size
key_last_block = key_block[torch.arange(bsz, device=key.device), num_blocks - 1]
valid_mask = torch.arange(self.block_size, device=key.device) + (num_blocks[:, None] - 1) * self.block_size < cache_seqlens[:, None]
last_min_key = torch.masked_fill(key_last_block, ~valid_mask[:, :, None, None], max_val).min(dim=1).values
last_max_key = torch.masked_fill(key_last_block, ~valid_mask[:, :, None, None], min_val).max(dim=1).values
self.block_max_key[torch.arange(bsz, device=key.device), num_blocks - 1] = last_max_key
self.block_min_key[torch.arange(bsz, device=key.device), num_blocks - 1] = last_min_key
causal_mask = torch.arange(self.block_max_key.shape[1], device=key.device) < num_blocks[:, None]
self.block_max_key.masked_fill_(~causal_mask[:, :, None, None], min_val)
self.block_min_key.masked_fill_(~causal_mask[:, :, None, None], max_val)
@torch.compile(fullgraph=True)
def update_centeroids(self, key, cache_seqlens):
num_blocks = (cache_seqlens + self.block_size - 1) // self.block_size
batch_index = torch.arange(num_blocks.shape[0], device=key.device)
self.block_max_key[batch_index, num_blocks - 1] = torch.maximum(self.block_max_key[batch_index, num_blocks - 1], key)
self.block_min_key[batch_index, num_blocks - 1] = torch.minimum(self.block_min_key[batch_index, num_blocks - 1], key)
def clear_centeroids(self):
self.block_max_key = None
self.block_min_key = None
self.num_elements = None
def get_kv_cache_indices(self, query, cache_seqlens):
bsz, num_heads, head_dim = query.shape
max_val = torch.finfo(torch.float32).max
query = query.view(bsz, 1, self.num_heads, num_heads // self.num_heads, head_dim).mean(dim=3).float() * (head_dim ** -0.5)
attn_score = torch.maximum(query * self.block_max_key.float(), query * self.block_min_key.float()).sum(dim=-1)
topk_indices = torch.full((bsz, self.num_heads, self.block_max_key.shape[1]), device=query.device, dtype=torch.int32, fill_value=-1)
num_blocks = (cache_seqlens + self.block_size - 1) // self.block_size
num_selected_blocks = torch.ceil(num_blocks * self.sparse_ratio)[:, None].repeat(1, self.num_heads)
num_selected_blocks = torch.maximum(num_selected_blocks, num_blocks.clamp(max=self.min_block_num)[:, None]).long()
for b in range(bsz):
local_attn_score = attn_score[b, :num_blocks[b]]
local_attn_score[-self.local_block_num:] = max_val
block_index = torch.sort(local_attn_score, dim=0, descending=True).indices.transpose(0, 1)
block_index_mask = torch.arange(num_blocks[b], device=query.device) < num_selected_blocks[b][:, None]
topk_indices[b, :, :num_blocks[b]] = block_index.masked_fill(~block_index_mask, -1).to(torch.int32)
topk_indices = torch.sort(topk_indices, dim=-1, descending=True).values
return topk_indices
def get_kv_cache_indices_fast(self, query, cache_seqlens):
bsz, num_heads, head_dim = query.shape
num_blocks, num_selected_blocks = get_num_blocks(cache_seqlens, self.block_size, self.sparse_ratio, self.min_block_num)
max_num_selected_blocks = num_selected_blocks.max().item()
query = query.view(bsz, self.num_heads, num_heads // self.num_heads, head_dim).mean(dim=2) * (head_dim ** -0.5)
block_attn_score = block_attn_decoding(query, self.block_max_key, self.block_min_key, num_blocks, self.local_block_num)
block_index_mask = torch.arange(max_num_selected_blocks, device=query.device) < num_selected_blocks[:, None, None]
topk_indices = get_topk_indices(block_attn_score, block_index_mask, max_num_selected_blocks)
return topk_indices
| {
"repo_id": "microsoft/unilm",
"file_path": "ReSA/llm/arch/context_manager.py",
"license": "MIT License",
"lines": 135,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
microsoft/unilm:ReSA/llm/arch/model.py | import random
import torch
from torch import nn
from torch.nn import functional as F
from einops import rearrange, repeat
from typing import Optional, Tuple, List
from dataclasses import dataclass
from apex.normalization.fused_layer_norm import fused_rms_norm_affine
from kernel.flash_sparse_decoding import flash_block_sparse_decoding
from kernel.flash_attention_with_kv_cache import flash_attention_with_kv_cache
from kernel.rotary import apply_rotary_emb
from flash_attn import flash_attn_with_kvcache
import math
import logging
logger = logging.getLogger(__name__)
from .context_manager import KVManager
@dataclass
class ModelArgs:
dim: int
n_layers: int
hidden_dim: int
n_heads: int
n_kv_heads: int
vocab_size: Optional[int] = None
max_batch_size: int = 0
max_seq_len: int = -1
model_parallel_size: int = 1
rope_theta: float = 10000.0
norm_eps: float = 1e-5
tie_word_embeddings: bool = False
save_feature: str = None
# decoding config
temperature: float = 0.6
top_p: float = 0.9
# SPA config
resa_rec_freq: int = 32
resa_block_size: int = 16
resa_sparse_ratio: float = 0.1
resa_local_block_num: int = 1
resa_min_block_num: int = 16
class RMSNorm(nn.Module):
def __init__(self, dim: int, eps: float = 1e-5):
super().__init__()
self.eps = eps
self.weight = nn.Parameter(torch.ones(dim))
self.normalized_shape = torch.Size((dim,))
def forward(self, x):
return fused_rms_norm_affine(x, self.weight, self.normalized_shape, self.eps)
def precompute_freqs_cis(dim: int, end: int, theta: float = 10000.0):
freqs = 1.0 / (theta ** (torch.arange(0, dim, 2)[: (dim // 2)].float() / dim))
t = torch.arange(end, device=freqs.device, dtype=torch.float32)
freqs = torch.outer(t, freqs)
return freqs
class Attention(nn.Module):
def __init__(self, index: int, args: ModelArgs):
super(Attention, self).__init__()
self.args = args
self.layer_index = index
self.head_dim = args.dim // args.n_heads
self.kv_head = args.n_kv_heads
self.head = args.n_heads
self.qkv_proj = nn.Linear(self.args.dim, (self.head + self.kv_head * 2) * self.head_dim, bias=True)
self.o_proj = nn.Linear(self.head * self.head_dim, self.args.dim, bias=False)
# from kernel.tilelang_sparse_decoding import SparseFlashAttn
# self.sparse_kernel = SparseFlashAttn(self.head, self.kv_head, self.head_dim, self.head_dim, self.args.resa_block_size)
def forward(
self,
x: torch.Tensor,
rel_pos: Tuple[torch.Tensor, torch.Tensor],
cu_seqlens_q: torch.Tensor,
cu_seqlens_k: torch.Tensor,
kv_cache_index: Tuple[torch.Tensor, torch.Tensor] = None,
kv_cache: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
) -> torch.Tensor:
bsz, seqlen = x.shape[0], x.shape[1]
seq_bsz = cu_seqlens_q.shape[0] - 1
seqlens_q = cu_seqlens_q[1:] - cu_seqlens_q[:-1]
seqlens_k = cu_seqlens_k[1:] - cu_seqlens_k[:-1]
xqkv = self.qkv_proj(x)
xqkv = rearrange(xqkv, 'b n (h d) -> (b n) h d', d=self.head_dim)
xqk, xv = xqkv.split([self.head + self.kv_head, self.kv_head], dim=-2)
xqk = apply_rotary_emb(xqk.unsqueeze(0), *rel_pos, inplace=True).squeeze(0)
xq, xk = xqk.split([self.head, self.kv_head], dim=-2)
if kv_cache is not None:
batch_index, seq_index = kv_cache_index
kv_cache = kv_cache[self.layer_index]
kv_cache[0][batch_index, seq_index] = xk
kv_cache[1][batch_index, seq_index] = xv
if self.args.resa_sparse_ratio < 1.0 and seqlen == 1:
assert kv_cache is not None, "kv_cache should not be None for generation"
kv_manager = kv_cache[2]
if kv_manager.num_elements is None:
kv_manager.init_centeroids(kv_cache[0][:seq_bsz], seqlens_k)
else:
kv_manager.update_centeroids(xk, seqlens_k)
sparse_indices = kv_manager.get_kv_cache_indices_fast(xq, seqlens_k)
output = flash_block_sparse_decoding(xq, kv_cache[0], kv_cache[1], seqlens_k, sparse_indices, block_size=self.args.resa_block_size)
else:
max_seqlen_q = seqlens_q.max().item()
xq_pad = torch.zeros((seq_bsz, max_seqlen_q, self.head, self.head_dim), device=xq.device, dtype=xq.dtype)
xq_pad_mask = torch.arange(max_seqlen_q, device=xq.device)[None, :] >= max_seqlen_q - seqlens_q[:, None]
xq_pad[xq_pad_mask] = xq
if max_seqlen_q <= 32:
output = flash_attention_with_kv_cache(xq_pad, kv_cache[0], kv_cache[1], cache_seqlens=seqlens_k)
else:
output = flash_attn_with_kvcache(xq_pad, kv_cache[0], kv_cache[1], cache_seqlens=seqlens_k, causal=True)
output = output[xq_pad_mask]
output = rearrange(output, '(b n) h d -> b n (h d)', b=bsz)
output = self.o_proj(output)
return output
class FeedForwardNetwork(nn.Module):
def __init__(
self,
embed_dim,
ffn_dim,
):
super().__init__()
self.embed_dim = embed_dim
self.up_proj = nn.Linear(self.embed_dim, ffn_dim, bias=False)
self.down_proj = nn.Linear(ffn_dim, self.embed_dim, bias=False)
self.gate_proj = nn.Linear(self.embed_dim, ffn_dim, bias=False)
def forward(self, x):
return self.down_proj(F.silu(self.gate_proj(x)) * self.up_proj(x))
class Block(nn.Module):
def __init__(self, index: int, args: ModelArgs):
super(Block, self).__init__()
self.args = args
self.self_attn = Attention(index, args)
self.mlp = FeedForwardNetwork(args.dim, args.hidden_dim)
self.input_layernorm = RMSNorm(args.dim, eps=args.norm_eps)
self.post_attention_layernorm = RMSNorm(args.dim, eps=args.norm_eps)
def forward(
self,
x: torch.Tensor,
rel_pos: Tuple[torch.Tensor, torch.Tensor],
cu_seqlens_q: torch.Tensor,
cu_seqlens_k: torch.Tensor,
kv_cache_index: Tuple[torch.Tensor, torch.Tensor],
kv_cache: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
) -> torch.Tensor:
h = x + self.self_attn(self.input_layernorm(x), rel_pos=rel_pos, cu_seqlens_q=cu_seqlens_q, cu_seqlens_k=cu_seqlens_k, kv_cache_index=kv_cache_index, kv_cache=kv_cache)
out = h + self.mlp(self.post_attention_layernorm(h))
return out
class Model(nn.Module):
def __init__(self, args: ModelArgs):
super(Model, self).__init__()
self.args = args
self.tok_embeddings = nn.Embedding(args.vocab_size, args.dim)
nn.init.normal_(self.tok_embeddings.weight, mean=0, std=args.dim ** -0.5)
self.layers = nn.ModuleList()
for index in range(args.n_layers):
block = Block(index,args)
self.layers.append(block)
self.norm = RMSNorm(args.dim, eps=args.norm_eps)
self.output = nn.Linear(args.dim, args.vocab_size, bias=False)
if args.tie_word_embeddings:
self.output.weight = self.tok_embeddings.weight
self._precompute_freqs_cis(args.max_seq_len)
def _precompute_freqs_cis(self, max_seqlen):
freqs_cis = precompute_freqs_cis(self.args.dim // self.args.n_heads, max_seqlen, theta=self.args.rope_theta)
self.cos = freqs_cis.cos()
self.sin = freqs_cis.sin()
def forward(
self,
tokens: torch.Tensor,
start_pos: Optional[torch.Tensor] = None,
cu_seqlens: Optional[torch.Tensor] = None,
kv_cache: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
last_hidden_only: bool = False,
) -> torch.Tensor:
assert kv_cache is not None, "kv_cache should not be None for decoding only code"
seqlens_q = cu_seqlens[1:] - cu_seqlens[:-1]
seqlens_k = start_pos + seqlens_q
cu_seqlens_q = torch.cat((torch.tensor([0], device=tokens.device), seqlens_q.cumsum(dim=0)), dim=0).to(torch.int32)
cu_seqlens_k = torch.cat((torch.tensor([0], device=tokens.device), seqlens_k.cumsum(dim=0)), dim=0).to(torch.int32)
h = self.tok_embeddings(tokens)
batch_index = torch.cat([torch.full((seqlens_q[i],), fill_value=i) for i in range(len(seqlens_q))])
seq_index = torch.cat([torch.arange(start_pos[i], seqlens_k[i], device=h.device) for i in range(len(seqlens_q))])
self.cos, self.sin = self.cos.to(h), self.sin.to(h)
rel_pos = (self.cos[seq_index], self.sin[seq_index])
for i, layer in enumerate(self.layers):
h = layer(h, rel_pos=rel_pos, cu_seqlens_q=cu_seqlens_q, cu_seqlens_k=cu_seqlens_k, kv_cache_index=(batch_index, seq_index), kv_cache=kv_cache)
h = self.norm(h)
if last_hidden_only:
return h
else:
logits = self.output(h)
return logits
def create_kv_cache(args: ModelArgs, batch_size: int, dtype: torch.dtype = torch.float16, device: torch.device = torch.device('cuda')) -> List[Tuple[torch.Tensor, torch.Tensor]]:
kv_cache = []
for _ in range(args.n_layers):
k_cache = torch.zeros(batch_size, args.max_seq_len,
args.n_kv_heads, args.dim // args.n_heads, dtype=dtype, device=device)
v_cache = torch.zeros(batch_size, args.max_seq_len,
args.n_kv_heads, args.dim // args.n_heads, dtype=dtype, device=device)
kv_manager = KVManager(args.n_kv_heads, args.resa_block_size, args.resa_sparse_ratio, args.resa_local_block_num, args.resa_min_block_num)
kv_cache.append([k_cache, v_cache, kv_manager])
return kv_cache
| {
"repo_id": "microsoft/unilm",
"file_path": "ReSA/llm/arch/model.py",
"license": "MIT License",
"lines": 198,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
microsoft/unilm:ReSA/llm/config.py | import argparse
def parse_eval_args():
parser = argparse.ArgumentParser(description="evaluation arguments")
parser.add_argument(f"--limit", type=int, default=None)
parser.add_argument(f"--batch_size", type=int, default=32)
parser.add_argument(f"--tasks", type=str, default=None)
parser.add_argument(f"--downstream_task", type=str, default=None)
parser.add_argument(f"--valid_set", type=str, default=None)
parser.add_argument(f"--checkpoint_dir", type=str, default=None)
parser.add_argument(f"--merging_checkpoint_dir", type=str, default=None)
parser.add_argument(f"--save_feature", type=str, default=None)
parser.add_argument(f"--resa_sparse_ratio", type=float, default=0.1)
parser.add_argument(f"--resa_block_size", type=int, default=16)
parser.add_argument(f"--resa_local_block_num", type=int, default=1)
parser.add_argument(f"--resa_min_block_num", type=int, default=16)
parser.add_argument(f"--resa_rec_freq", type=int, default=32)
parser.add_argument(f"--output_folder", type=str, default=None)
parser.add_argument(f"--temperature", type=float, default=0.6)
parser.add_argument(f"--top_p", type=float, default=0.9)
parser.add_argument(f"--tokenizer_path", type=str, default=None)
parser.add_argument(f"--wandb_project", type=str, default=None)
parser.add_argument(f"--wandb_id", type=str, default=None)
return parser.parse_args() | {
"repo_id": "microsoft/unilm",
"file_path": "ReSA/llm/config.py",
"license": "MIT License",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
microsoft/unilm:ReSA/llm/data/tokenizer.py | import os
from typing import List
from transformers import LlamaTokenizerFast
os.environ["TOKENIZERS_PARALLELISM"] = "true"
class Tokenizer:
def __init__(self, tokenizer_path: str):
self.tok = LlamaTokenizerFast.from_pretrained(tokenizer_path)
@property
def n_words(self) -> int:
return self.tok.vocab_size
@property
def bos_id(self) -> int:
return self.tok.encode(self.tok.bos_token)[-1]
@property
def eos_id(self) -> int:
return self.tok.encode(self.tok.eos_token)[-1]
@property
def pad_id(self) -> int:
return -100
@property
def unk_id(self) -> int:
return self.tok.encode(self.tok.eos_token)[-1]
def encode(self, s: str, bos: bool = True, eos: bool = False):
tok = self.tok.encode(s, add_special_tokens=False)
if bos:
tok = [self.bos_id] + tok
if eos:
tok = tok + [self.eos_id]
return tok
def encode_batch(self, s: List[str], bos: bool = True, eos: bool = False):
return [self.encode(s, bos, eos) for s in s]
def decode(self, t: List[int]) -> str:
t = [i for i in t if i != self.pad_id]
return self.tok.decode(t, skip_special_tokens=True)
def decode_batch(self, t: List[List[int]]) -> List[str]:
t = [[i for i in x if i != self.pad_id] for x in t]
return self.tok.batch_decode(t, skip_special_tokens=True) | {
"repo_id": "microsoft/unilm",
"file_path": "ReSA/llm/data/tokenizer.py",
"license": "MIT License",
"lines": 37,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
microsoft/unilm:ReSA/llm/eval.py | import time
import torch
import torch.nn as nn
from torch.distributed import init_process_group, destroy_process_group
from typing import Optional
import datetime
import lm_eval
from lm_eval.models.huggingface import HFLM as eval_wrapper
from lm_eval.tasks import get_task_dict, TaskManager
from lm_eval.evaluator import evaluate
from eval_math import evaluate as evaluate_math
import os, json
from data.tokenizer import Tokenizer
from config import parse_eval_args
from arch.model import ModelArgs, Model, create_kv_cache
import re
import torch.nn.functional as F
from safetensors.torch import load_file
def sample_top_p(probs, p):
probs_sort, probs_idx = torch.sort(probs, dim=-1, descending=True)
probs_sum = torch.cumsum(probs_sort, dim=-1)
mask = probs_sum - probs_sort > p
probs_sort[mask] = 0.0
probs_sort.div_(probs_sort.sum(dim=-1, keepdim=True))
next_token = torch.multinomial(probs_sort, num_samples=1)
next_token = torch.gather(probs_idx, -1, next_token)
return next_token.squeeze(-1)
class EvalWrapper(eval_wrapper):
def __init__(
self,
model,
tokenizer,
batch_size,
max_seq_length: Optional[int]=None,
):
super().__init__(pretrained="gpt2")
self._model = model
self._tokenizer = tokenizer
self._device = torch.device('cuda')
self._max_seq_length = 2048 if max_seq_length is None else max_seq_length
self._batch_size = batch_size
self._rank = 0
self._world_size = 1
@property
def eot_token_id(self):
return self._tokenizer.eos_id
@property
def eos_token_id(self):
return self._tokenizer.eos_id
@property
def pad_token_id(self):
return self._tokenizer.pad_id
@property
def max_length(self):
return self._max_seq_length
@property
def max_gen_toks(self):
return 1024
@property
def batch_size(self):
return self._batch_size
@property
def device(self):
return self._device
@property
def model(self):
return self._model
def tok_encode(self, string: str, **kwargs):
encoded = self._tokenizer.encode(string, bos=True, eos=False)
return encoded
def tok_decode(self, tokens, **kwargs):
if type(tokens) == int:
tokens = [tokens]
decoded = self._tokenizer.decode(tokens)
return decoded
def tok_batch_encode(self, strings, left_truncate_len=None, **kwargs):
tokens = [self._tokenizer.encode(string, bos=True, eos=False) for string in strings]
if left_truncate_len is not None:
tokens = [t[-left_truncate_len:] for t in tokens]
max_len = max(len(t) for t in tokens)
tokens = [t + [self.pad_token_id] * (max_len - len(t)) for t in tokens]
tensor = torch.tensor(tokens).long()
return tensor, tensor # return a dummy tensor for the attention mask
def _model_call(self, inps):
logits = self.model(inps)
return logits
def _model_generate(self, context, max_length, kv_cache=None, **generation_kwargs):
bsz = context.size(0)
tokens = context.tolist()
PREFILL_CHUNK_SIZE = 65536 // bsz
# remove padding
tokens = [t[:t.index(self.pad_token_id)] if self.pad_token_id in t else t for t in tokens]
seqlens = torch.tensor([len(t) for t in tokens], device=self.device)
max_seqlen = seqlens.max().item()
generation_length = max_length - max_seqlen
eos_reached = torch.tensor([False] * bsz, device=self.device)
kv_cache = create_kv_cache(self.model.args, bsz) if kv_cache is None else kv_cache
for layer_kv_cache in kv_cache:
layer_kv_cache[2].clear_centeroids()
output = torch.zeros(bsz, generation_length, dtype=torch.long, device=self.device).fill_(self.pad_token_id)
for cur_pos in range(generation_length):
if cur_pos == 0:
last_logits = torch.zeros(bsz, self.model.args.vocab_size, device=self.device, dtype=torch.float16)
for pre_start_pos in range(0, max_seqlen, PREFILL_CHUNK_SIZE):
pre_end_pos = min(pre_start_pos + PREFILL_CHUNK_SIZE, max_seqlen)
chunk_start_pos = torch.tensor([min(seqlens[b], pre_start_pos) for b in range(bsz)], device=self.device)
chunk_end_pos = torch.tensor([min(seqlens[b], pre_end_pos) for b in range(bsz)], device=self.device)
chunk_tokens = torch.cat([torch.tensor(tokens[b][chunk_start_pos[b]:chunk_end_pos[b]], device=self.device) for b in range(bsz)], dim=0)
cu_seqlens = torch.cat([torch.tensor([0], device=self.device), (chunk_end_pos - chunk_start_pos).cumsum(dim=0)], dim=0)
logits = self.model(chunk_tokens[None, :], start_pos=chunk_start_pos, cu_seqlens=cu_seqlens, kv_cache=kv_cache, last_hidden_only=True)
is_last = (chunk_end_pos == seqlens) & (chunk_start_pos < chunk_end_pos)
last_logits[is_last] = self.model.output(logits[0, cu_seqlens[1:][is_last] - 1])
# prefill_tokens = torch.cat([torch.tensor(tokens[b], device=self.device) for b in range(bsz)], dim=0)
# cu_seqlens = torch.cat([torch.tensor([0], device=self.device), seqlens.cumsum(dim=0)], dim=0)
# logits = self.model(prefill_tokens[None, :], start_pos=torch.zeros(bsz, device=self.device, dtype=torch.long), cu_seqlens=cu_seqlens, kv_cache=kv_cache, last_hidden_only=True)
else:
next_input = torch.where(eos_reached[:, None], 0, output[:, cur_pos - 1:cur_pos])
logits = self.model(next_input, start_pos=seqlens + cur_pos - 1, cu_seqlens=torch.arange(bsz + 1, device=self.device), kv_cache=kv_cache, last_hidden_only=True)
last_logits = self.model.output(logits[:, -1, :])
if self.model.args.temperature > 0:
probs = torch.softmax(last_logits / self.model.args.temperature, dim=-1)
next_tokens = sample_top_p(probs, self.model.args.top_p).reshape(-1)
else:
next_tokens = torch.argmax(last_logits, dim=-1).reshape(-1)
output[:, cur_pos] = torch.where(eos_reached, output[:, cur_pos], next_tokens)
eos_reached |= (next_tokens == self.eos_token_id)
if eos_reached.all():
break
if cur_pos > 0 and self.model.args.resa_rec_freq > 0 and cur_pos % self.model.args.resa_rec_freq == 0:
start_pos = cur_pos - self.model.args.resa_rec_freq
reprefill_tokens = output[:, start_pos:start_pos + self.model.args.resa_rec_freq]
reprefill_tokens = torch.where(reprefill_tokens == self.pad_token_id, 0, reprefill_tokens)
logits = self.model(reprefill_tokens, start_pos=seqlens + start_pos, cu_seqlens=torch.arange(bsz + 1, device=self.device) * self.model.args.resa_rec_freq, kv_cache=kv_cache, last_hidden_only=True)
for layer_kv_cache in kv_cache:
# HOTFIX: clear centeroids for each layer, online modification in the future
layer_kv_cache[2].clear_centeroids()
final_output = torch.full((bsz, max_length), fill_value=self.pad_token_id, dtype=torch.long, device=self.device)
final_output[:, :max_seqlen] = context
for b in range(bsz):
final_output[b, seqlens[b]:seqlens[b] + generation_length] = output[b]
return final_output
def _adjust_config(task_dict):
adjusted_task_dict = {}
for task_name, task_obj in task_dict.items():
if isinstance(task_obj, dict):
adjusted_task_dict = {
**adjusted_task_dict,
**{task_name: _adjust_config(task_obj)},
}
else:
if 'mmlu' in task_name or 'gsm' in task_name:
task_obj.set_config(key="num_fewshot", value=5)
elif task_obj.get_config("num_fewshot") is None:
task_obj.set_config(key="num_fewshot", value=0)
task_obj.set_fewshot_seed(seed=1234)
adjusted_task_dict[task_name] = task_obj
return adjusted_task_dict
@torch.no_grad()
def eval_end_task(
model,
tokenizer,
tasks,
limit,
batch_size,
max_seq_length,
):
model_eval_wrapper = EvalWrapper(
model,
tokenizer,
batch_size,
max_seq_length,
)
task_dict = get_task_dict(tasks.split(','), task_manager=TaskManager(verbosity='WARNING'))
task_dict = _adjust_config(task_dict)
eval_results = evaluate(
model_eval_wrapper,
task_dict,
limit=limit,
verbosity='WARNING',
)
return eval_results
@torch.no_grad()
def eval_downstream_task(
args,
model,
tokenizer,
downstream_task,
limit,
batch_size,
max_seq_length,
):
model_eval_wrapper = EvalWrapper(
model,
tokenizer,
batch_size,
max_seq_length,
)
if downstream_task == "math":
evaluate_func = evaluate_math
else:
raise ValueError(f"Unknown downstream task: {downstream_task}")
evaluate_func(
args,
model_eval_wrapper,
limit=limit,
)
def load_qwen2_model(state_dict):
new_state_dict = {}
for k, v in state_dict.items():
if "lm_head" in k:
new_state_dict[k.replace("lm_head", "output")] = v
elif "embed_tokens" in k:
new_state_dict[k.replace("model.embed_tokens", "tok_embeddings")] = v
else:
new_state_dict[k.replace("model.", "")] = v
return new_state_dict
def load_model(args):
tokenizer = Tokenizer(args.checkpoint_dir)
config = json.load(open(os.path.join(args.checkpoint_dir, "config.json")))
params = {
"dim": config["hidden_size"],
"hidden_dim": config["intermediate_size"],
"n_layers": config["num_hidden_layers"],
"n_heads": config["num_attention_heads"],
"n_kv_heads": config["num_key_value_heads"],
"rope_theta": config["rope_theta"],
"norm_eps": config["rms_norm_eps"],
"vocab_size": config["vocab_size"],
"max_batch_size": args.batch_size,
"max_seq_len": config["max_position_embeddings"],
"tie_word_embeddings": config["tie_word_embeddings"],
"temperature": args.temperature,
"top_p": args.top_p,
"resa_rec_freq": args.resa_rec_freq,
"resa_block_size": args.resa_block_size,
"resa_local_block_num": args.resa_local_block_num,
"resa_min_block_num": args.resa_min_block_num,
"resa_sparse_ratio": args.resa_sparse_ratio,
}
model_args = ModelArgs(**params)
if "model.safetensors.index.json" in os.listdir(args.checkpoint_dir):
safetensor_file = filter(lambda x: x.endswith(
"safetensors"), os.listdir(args.checkpoint_dir))
state_dict = {}
for file in safetensor_file:
state_dict.update(load_qwen2_model(load_file(os.path.join(
args.checkpoint_dir, file))))
else:
state_dict = load_qwen2_model(load_file(os.path.join(
args.checkpoint_dir, "model.safetensors")))
for i in range(model_args.n_layers):
state_dict[f"layers.{i}.self_attn.qkv_proj.weight"] = torch.cat([state_dict[f"layers.{i}.self_attn.q_proj.weight"], state_dict[f"layers.{i}.self_attn.k_proj.weight"], state_dict[f"layers.{i}.self_attn.v_proj.weight"]], dim=0)
state_dict[f"layers.{i}.self_attn.qkv_proj.bias"] = torch.cat([state_dict[f"layers.{i}.self_attn.q_proj.bias"], state_dict[f"layers.{i}.self_attn.k_proj.bias"], state_dict[f"layers.{i}.self_attn.v_proj.bias"]], dim=0)
del state_dict[f"layers.{i}.self_attn.q_proj.weight"], state_dict[f"layers.{i}.self_attn.k_proj.weight"], state_dict[f"layers.{i}.self_attn.v_proj.weight"]
del state_dict[f"layers.{i}.self_attn.q_proj.bias"], state_dict[f"layers.{i}.self_attn.k_proj.bias"], state_dict[f"layers.{i}.self_attn.v_proj.bias"]
model = Model(model_args)
model = model.cuda().to(dtype=torch.float16)
model.eval()
model.load_state_dict(state_dict, strict=True)
return model, tokenizer
def evaluate_one_checkpoint(args):
model, tokenizer = load_model(args)
if args.tasks is not None:
results = eval_end_task(
model,
tokenizer,
args.tasks,
args.limit,
args.batch_size,
model.args.max_seq_len,
)
for task, res in results["results"].items():
if task in args.tasks.split(','):
print(f"{task}: {res}")
return results
elif args.downstream_task is not None:
eval_downstream_task(
args,
model,
tokenizer,
args.downstream_task,
args.limit,
args.batch_size,
model.args.max_seq_len,
)
return None
else:
raise NotImplementedError("No evaluation task specified")
if __name__ == '__main__':
init_process_group(backend='gloo', timeout=datetime.timedelta(hours=2))
dp_rank = int(os.environ['RANK'])
dp_local_rank = int(os.environ['LOCAL_RANK'])
dp_world_size = int(os.environ['WORLD_SIZE'])
device = f'cuda:{dp_local_rank}'
torch.cuda.set_device(device)
args = parse_eval_args()
results = evaluate_one_checkpoint(args)
| {
"repo_id": "microsoft/unilm",
"file_path": "ReSA/llm/eval.py",
"license": "MIT License",
"lines": 297,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
microsoft/unilm:ReSA/llm/kernel/flash_attention_with_kv_cache.py | import math
import torch
import triton
import triton.language as tl
def is_hip():
return triton.runtime.driver.active.get_current_target().backend == "hip"
def num_splits_heuristic(total_mblocks, num_SMs, num_n_blocks, num_m_blocks, size_one_kv_head,
is_causal_or_local, max_splits):
"""
Determines the optimal number of splits for maximizing GPU occupancy while balancing memory efficiency.
Parameters:
- total_mblocks (int): Total number of m_blocks.
- num_SMs (int): Number of Streaming Multiprocessors (SMs) in the GPU.
- num_n_blocks (int): Number of n_blocks.
- num_m_blocks (int): Number of m_blocks.
- size_one_kv_head (int): Size of one KV head in bytes.
- is_causal_or_local (bool): Indicates whether the operation is causal or local.
- max_splits (int): Maximum number of allowed splits.
Returns:
- int: The optimal number of splits.
"""
# If we have enough m_blocks to almost fill the SMs, prefer 1 split unless memory constraints apply.
if total_mblocks >= 0.8 * num_SMs:
size_l2 = 50 * 1024 * 1024 # L2 cache size assumption (50MB)
# Only split if each KV head is too large for L2 and there are enough m_blocks
if size_one_kv_head > size_l2 and num_m_blocks >= num_SMs * 2 and not is_causal_or_local:
return min((size_one_kv_head + size_l2 - 1) // size_l2, max_splits)
else:
return 1
# If num_n_blocks is too small, we don't split
if num_n_blocks <= 4:
return 1
# Limit max_splits to a reasonable range
max_splits = min(max_splits, num_SMs, num_n_blocks)
max_efficiency = 0.0
efficiency = []
# Compute efficiency for different splits
for num_splits in range(1, max_splits + 1):
n_waves = (total_mblocks * num_splits) / num_SMs
eff = n_waves / math.ceil(n_waves)
# Track max efficiency
if eff > max_efficiency:
max_efficiency = eff
efficiency.append(eff)
# Find the smallest number of splits that achieves at least 85% of max efficiency
for num_splits in range(1, max_splits + 1):
if efficiency[num_splits - 1] >= 0.95 * max_efficiency:
return num_splits
return 1
@triton.autotune(
configs=[
triton.Config({}, num_warps=num_warps)
for num_warps in [1, 2, 4, 8, 16]
],
key=['gqa_group_size', 'BLOCK_H', 'BLOCK_N', 'BLOCK_D', 'BLOCK_V'],
)
@triton.jit
def _fwd_kernel_with_kv_cache(
Q, K, V, Out, L,
sm_scale,
cache_seqlens,
stride_qz, stride_qt, stride_qh, stride_qd,
stride_kz, stride_kt, stride_kh, stride_kd,
stride_vz, stride_vt, stride_vh, stride_vd,
stride_oz, stride_ot, stride_oh, stride_os, stride_od,
stride_lz, stride_lt, stride_lh, stride_ls,
num_splits: tl.constexpr,
seqlen_q: tl.constexpr,
num_m_blocks: tl.constexpr,
gqa_group_size: tl.constexpr,
BLOCK_H: tl.constexpr,
BLOCK_M: tl.constexpr,
BLOCK_N: tl.constexpr,
BLOCK_D: tl.constexpr,
BLOCK_V: tl.constexpr,
):
off_sm = tl.program_id(0).to(tl.int64)
off_split, off_m = off_sm // num_m_blocks, off_sm % num_m_blocks
off_h_for_kv = tl.program_id(1).to(tl.int64)
off_z = tl.program_id(2).to(tl.int64)
off_h_q = off_h_for_kv * gqa_group_size
offs_h = tl.arange(0, BLOCK_H)
offs_n = tl.arange(0, BLOCK_N)
offs_d = tl.arange(0, BLOCK_D)
offs_v = tl.arange(0, BLOCK_V)
mask_h = offs_h < gqa_group_size
seqlen_k = tl.load(cache_seqlens + off_z)
Q += off_z * stride_qz + off_h_q * stride_qh
K += off_z * stride_kz + off_h_for_kv * stride_kh
V += off_z * stride_vz + off_h_for_kv * stride_vh
L += off_z * stride_lz + off_h_q * stride_lh + off_split * stride_ls
Out += off_z * stride_oz + off_h_q * stride_oh + off_split * stride_os
num_kv_blocks = tl.cdiv(seqlen_k, BLOCK_N)
blocks_per_split = num_kv_blocks // num_splits
remaining_blocks = num_kv_blocks % num_splits
loop_range = blocks_per_split + (1 if off_split < remaining_blocks else 0)
start = blocks_per_split * off_split + min(off_split, remaining_blocks)
offs_m = tl.arange(0, BLOCK_M) + off_m * BLOCK_M
mask_q = offs_m < seqlen_q
mask_qh = mask_q[:, None] & mask_h[None, :]
q_idx = offs_m[:, None] + tl.zeros([BLOCK_H], dtype=tl.int32) + seqlen_k - seqlen_q
q_idx = tl.reshape(q_idx, [BLOCK_M * BLOCK_H])
q = tl.load(Q + offs_m[:, None, None] * stride_qt + offs_h[None, :, None] * stride_qh + offs_d[None, None, :] * stride_qd, mask=mask_qh[:, :, None]) ## padding to min 16
q = tl.reshape(q, (BLOCK_M * BLOCK_H, BLOCK_D))
m_i = tl.full([BLOCK_M * BLOCK_H], float("-inf"), dtype=tl.float32)
l_i = tl.full([BLOCK_M * BLOCK_H], 1.0, dtype=tl.float32)
acc = tl.zeros([BLOCK_M * BLOCK_H, BLOCK_V], dtype=tl.float32)
k_ptrs = K + offs_n[None, :] * stride_kt + offs_d[:, None] * stride_kd
v_ptrs = V + offs_n[:, None] * stride_vt + offs_v[None, :] * stride_vd
for block_idx in range(start, start + loop_range):
start_n = block_idx * BLOCK_N
k = tl.load(k_ptrs + start_n * stride_kt, mask=offs_n[None, :] + start_n < seqlen_k, cache_modifier=".ca")
qk = tl.dot(q, k)
causal_mask = q_idx[:, None] >= start_n + offs_n[None, :]
qk = tl.where(causal_mask, qk, -1.0e6)
qk *= sm_scale
m_ij = tl.maximum(m_i, tl.max(qk, 1))
qk -= m_ij[:, None]
p = tl.exp(qk)
l_ij = tl.sum(p, 1)
alpha = tl.exp(m_i - m_ij)
l_i = l_i * alpha + l_ij
acc = acc * alpha[:, None]
v = tl.load(v_ptrs + start_n * stride_vt, mask=offs_n[:, None] + start_n < seqlen_k, cache_modifier=".ca")
p = p.to(v.type.element_ty)
acc += tl.dot(p, v)
m_i = m_ij
l_recip = 1 / l_i[:, None]
acc = acc * l_recip
m_i += tl.math.log(l_i)
l_ptrs = L + offs_m[:, None] * stride_lt + offs_h * stride_lh
m_i = tl.reshape(m_i, (BLOCK_M, BLOCK_H))
tl.store(l_ptrs, m_i, mask=mask_qh)
O_ptrs = Out + offs_m[:, None, None] * stride_ot + offs_h[None, :, None] * stride_oh + offs_v[None, None, :] * stride_od
acc = tl.reshape(acc, (BLOCK_M, BLOCK_H, BLOCK_V))
tl.store(O_ptrs, acc, mask=mask_qh[:, :, None])
@triton.autotune(
configs=[
triton.Config({}, num_warps=num_warps)
for num_warps in [1, 2, 4, 8, 16]
],
key=['BLOCK_V'],
)
@triton.jit
def combine(
out_partial, out, L,
stride_op_z, stride_op_t, stride_op_h, stride_op_s, stride_op_d,
stride_o_z, stride_o_t, stride_o_h, stride_o_d,
stride_l_z, stride_l_t, stride_l_h, stride_l_s,
num_splits: tl.constexpr,
num_splits_pow2: tl.constexpr,
BLOCK_V: tl.constexpr,
):
off_h = tl.program_id(0).to(tl.int64)
off_t = tl.program_id(1).to(tl.int64)
off_z = tl.program_id(2).to(tl.int64)
split = tl.arange(0, num_splits_pow2)
split_mask = split < num_splits
L += off_z * stride_l_z + off_t * stride_l_t + off_h * stride_l_h
out_partial += off_z * stride_op_z + off_t * stride_op_t + off_h * stride_op_h
out += off_z * stride_o_z + off_t * stride_o_t + off_h * stride_o_h
lse_local = tl.load(L + split * stride_l_s, mask=split_mask, other=float("-inf"))
lse_max_local = tl.max(lse_local, axis=0)
lse_logsum_local = tl.sum(tl.exp(lse_local - lse_max_local), axis=0)
lse_logsum_local = tl.log(lse_logsum_local) + lse_max_local
po_local = tl.load(out_partial + split[:, None] * stride_op_s + tl.arange(0, BLOCK_V) * stride_op_d, mask=split_mask[:, None])
scale_local = tl.exp(lse_local - lse_logsum_local)
accum_local = tl.sum(po_local * scale_local[:, None], axis=0)
tl.store(out + tl.arange(0, BLOCK_V) * stride_o_d, accum_local)
def flash_attention_with_kv_cache(
q, k, v,
cache_seqlens,
sm_scale=None,
):
# split q to blocks
batch, seqlen_q, n_heads, key_dim = q.shape
_, _, n_kv_heads, head_dim = v.shape
gqa_group_size = n_heads // n_kv_heads
block_h = triton.next_power_of_2(gqa_group_size)
block_m = max(256 // block_h, 1)
block_n = 32
# assert seqlen_q <= 32, "it seems the performance is not good when seqlen_q > 32"
assert k.size(0) == v.size(0)
assert q.size(3) == k.size(3)
assert k.size(1) == v.size(1)
assert key_dim in {64, 128, 256}
assert head_dim in {64, 128, 256}
props = torch.cuda.get_device_properties(torch.device("cuda:0"))
num_sm = props.multi_processor_count
num_m_blocks = triton.cdiv(seqlen_q, block_m)
num_n_blocks = triton.cdiv(cache_seqlens.max(), block_n)
size_one_kv_head = cache_seqlens.max() * block_n * (key_dim + head_dim) * 2
total_mblocks = batch * n_kv_heads
num_splits = num_splits_heuristic(
total_mblocks, num_sm, num_n_blocks, num_m_blocks,
size_one_kv_head, is_causal_or_local=True, max_splits=16
)
out_partial = torch.empty((batch, seqlen_q, n_heads, num_splits, head_dim), device=q.device, dtype=torch.float32)
out = torch.empty((batch, seqlen_q, n_heads, head_dim), device=q.device, dtype=q.dtype)
L = torch.empty((batch, seqlen_q, n_heads, num_splits), device=q.device, dtype=torch.float32)
if is_hip():
extra_kern_args = {"waves_per_eu": 1}
else:
extra_kern_args = {}
with torch.cuda.device(q.device.index):
grid = lambda META: (num_splits * num_m_blocks, n_kv_heads, batch)
_fwd_kernel_with_kv_cache[grid](
q, k, v, out_partial, L,
sm_scale if sm_scale is not None else key_dim ** -0.5,
cache_seqlens.contiguous(),
*q.stride(),
*k.stride(),
*v.stride(),
*out_partial.stride(),
*L.stride(),
num_splits=num_splits,
seqlen_q=seqlen_q,
num_m_blocks=num_m_blocks,
gqa_group_size=gqa_group_size,
BLOCK_H = block_h,
BLOCK_M = block_m,
BLOCK_N = block_n,
BLOCK_D = key_dim,
BLOCK_V = head_dim,
**extra_kern_args
)
grid = lambda META: (n_heads, seqlen_q, batch)
combine[grid](
out_partial, out, L,
*out_partial.stride(),
*out.stride(),
*L.stride(),
num_splits=num_splits,
num_splits_pow2=triton.next_power_of_2(num_splits),
BLOCK_V=head_dim,
**extra_kern_args
)
return out
def ref_program_fa(query, key, value, cache_seqlens):
# latency reference
# from flash_attn_interface import flash_attn_with_kvcache, flash_attn_func # fa3
from flash_attn import flash_attn_with_kvcache, flash_attn_func #fa2
output = flash_attn_with_kvcache(query, key, value, cache_seqlens=cache_seqlens, causal=True)
return output
def debug(name,expect, actual, atol=1e-3, rtol=1e-3):
all_close = torch.allclose(expect, actual, atol=atol, rtol=rtol)
print(name + " all_close={}".format(all_close))
if not all_close:
# print(expect[3, 28])
# print(actual[3, 28])
diff = (expect - actual).abs()
print("all_close={}, max={}, min={}, mean={}".format(all_close, diff.max().item(), diff.min().item(), diff.mean().item()))
max_indices = torch.nonzero(diff == diff.max().item())
first_index = tuple(max_indices[0].tolist())
print(f"Index: {first_index}, expect: {expect[first_index]}, actual: {actual[first_index]}")
if __name__ == "__main__":
import argparse
import time
parser = argparse.ArgumentParser()
parser.add_argument('--batch', type=int, default=8, help='batch size')
parser.add_argument('--seqlen_q', type=int, default=128, help='sequence length')
parser.add_argument('--heads', type=int, default=28, help='heads')
parser.add_argument('--heads_kv', type=int, default=4, help='heads_kv')
parser.add_argument('--max_cache_seqlen', type=int, default=65536, help='kvcache sequence length')
parser.add_argument('--dim', type=int, default=128, help='dim')
parser.add_argument('--dim_v', type=int, default=128, help='dim_v')
parser.add_argument('--load_from_file', type=str, default=None, help='load from file')
args = parser.parse_args()
batch, seqlen_q, heads, heads_kv, max_cache_seqlen, dim, dim_v = args.batch, args.seqlen_q, args.heads, args.heads_kv, args.max_cache_seqlen, args.dim, args.dim_v
dtype = torch.bfloat16
Q = torch.randn((batch, seqlen_q, heads, dim), dtype=dtype, device='cuda')
K = torch.randn((batch, max_cache_seqlen, heads_kv, dim), dtype=dtype, device='cuda')
V = torch.randn((batch, max_cache_seqlen, heads_kv, dim_v), dtype=dtype, device='cuda')
cache_seqlens = torch.randint(max_cache_seqlen - 32, max_cache_seqlen, (batch,), device='cuda', dtype=torch.int32)
print("cache_seqlens: ", cache_seqlens)
# parity reference
ref = ref_program_fa(Q, K, V, cache_seqlens)
# ref = ref_program_triton(Q, K, V, block_indices, cache_seqlens, max_cache_seqlen, max_num_blocks, block_size)
# out = kernel(Q, K, V, block_indices, cache_seqlens, actual_num_blocks, glse, Output_partial)
# out = sparse_gqa_decode_varlen_indice(Q, K, V, block_indices, cache_seqlens, max_cache_seqlen, block_size)
out = flash_attention_with_kv_cache(Q, K, V, cache_seqlens)
debug("output", ref, out, atol=1e-3, rtol=1e-3)
## latency reference
for i in range(10):
ref = ref_program_fa(Q, K, V, cache_seqlens)
torch.cuda.synchronize()
start = time.time()
for i in range(100):
ref = ref_program_fa(Q, K, V, cache_seqlens)
torch.cuda.synchronize()
print("dense time: ", (time.time() - start) / 100*1000)
for i in range(10):
out = flash_attention_with_kv_cache(Q, K, V, cache_seqlens)
torch.cuda.synchronize()
start = time.time()
for i in range(100):
out = flash_attention_with_kv_cache(Q, K, V, cache_seqlens)
torch.cuda.synchronize()
print("sparse time: ", (time.time() - start) / 100*1000) | {
"repo_id": "microsoft/unilm",
"file_path": "ReSA/llm/kernel/flash_attention_with_kv_cache.py",
"license": "MIT License",
"lines": 299,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
microsoft/unilm:ReSA/llm/kernel/rotary.py | # Copyright (c) 2023, Tri Dao.
from typing import Optional, Union
import torch
import triton
import triton.language as tl
from typing import Optional, Union
from einops import rearrange, repeat
def rotate_half(x, interleaved=False):
if not interleaved:
x1, x2 = x.chunk(2, dim=-1)
return torch.cat((-x2, x1), dim=-1)
else:
x1, x2 = x[..., ::2], x[..., 1::2]
return rearrange(torch.stack((-x2, x1), dim=-1), "... d two -> ... (d two)", two=2)
def apply_rotary_emb_torch(x, cos, sin, interleaved=False, inplace=False):
"""
x: (batch_size, seqlen, nheads, headdim)
cos, sin: (seqlen, rotary_dim / 2) or (batch_size, seqlen, rotary_dim / 2)
"""
ro_dim = cos.shape[-1] * 2
assert ro_dim <= x.shape[-1]
cos = repeat(cos, "... d -> ... 1 (2 d)" if not interleaved else "... d -> ... 1 (d 2)")
sin = repeat(sin, "... d -> ... 1 (2 d)" if not interleaved else "... d -> ... 1 (d 2)")
return torch.cat(
[x[..., :ro_dim] * cos + rotate_half(x[..., :ro_dim], interleaved) * sin, x[..., ro_dim:]],
dim=-1,
)
@triton.jit
def rotary_kernel(
OUT, # Pointers to matrices
X,
COS,
SIN,
CU_SEQLENS,
SEQLEN_OFFSETS, # this could be int or a pointer
# Matrix dimensions
seqlen,
rotary_dim,
seqlen_ro,
# strides
stride_out_batch,
stride_out_seqlen,
stride_out_nheads,
stride_out_headdim,
stride_x_batch,
stride_x_seqlen,
stride_x_nheads,
stride_x_headdim,
# Meta-parameters
BLOCK_K: tl.constexpr,
IS_SEQLEN_OFFSETS_TENSOR: tl.constexpr,
IS_VARLEN: tl.constexpr,
INTERLEAVED: tl.constexpr,
CONJUGATE: tl.constexpr,
BLOCK_M: tl.constexpr,
):
pid_m = tl.program_id(axis=0)
pid_batch = tl.program_id(axis=1)
pid_head = tl.program_id(axis=2)
rotary_dim_half = rotary_dim // 2
if not IS_VARLEN:
X = X + pid_batch * stride_x_batch + pid_head * stride_x_nheads
OUT = OUT + pid_batch * stride_out_batch + pid_head * stride_out_nheads
else:
start_idx = tl.load(CU_SEQLENS + pid_batch)
seqlen = tl.load(CU_SEQLENS + pid_batch + 1) - start_idx
X = X + start_idx * stride_x_seqlen + pid_head * stride_x_nheads
OUT = OUT + start_idx * stride_out_seqlen + pid_head * stride_out_nheads
if pid_m * BLOCK_M >= seqlen:
return
rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
if not IS_SEQLEN_OFFSETS_TENSOR:
rm_cs = rm + SEQLEN_OFFSETS
else:
rm_cs = rm + tl.load(SEQLEN_OFFSETS + pid_batch)
rk = tl.arange(0, BLOCK_K)
rk_half = tl.arange(0, BLOCK_K // 2)
if not INTERLEAVED:
# Load the 1st and 2nd halves of X, do calculation, then store to 1st and 2nd halves of OUT
X = X + (rm[:, None] * stride_x_seqlen + rk_half[None, :] * stride_x_headdim)
COS = COS + (rm_cs[:, None] * rotary_dim_half + rk_half[None, :])
SIN = SIN + (rm_cs[:, None] * rotary_dim_half + rk_half[None, :])
cos = tl.load(
COS, mask=(rm_cs[:, None] < seqlen_ro) & (rk_half[None, :] < rotary_dim_half), other=1.0
).to(tl.float32)
sin = tl.load(
SIN, mask=(rm_cs[:, None] < seqlen_ro) & (rk_half[None, :] < rotary_dim_half), other=0.0
).to(tl.float32)
x0 = tl.load(
X, mask=(rm[:, None] < seqlen) & (rk_half[None, :] < rotary_dim_half), other=0.0
).to(tl.float32)
x1 = tl.load(
X + rotary_dim_half * stride_x_headdim,
mask=(rm[:, None] < seqlen) & (rk_half[None, :] < rotary_dim_half),
other=0.0,
).to(tl.float32)
if CONJUGATE:
sin = -sin
o0 = x0 * cos - x1 * sin
o1 = x0 * sin + x1 * cos
# write back result
OUT = OUT + (rm[:, None] * stride_out_seqlen + rk_half[None, :] * stride_out_headdim)
tl.store(OUT, o0, mask=(rm[:, None] < seqlen) & (rk_half[None, :] < rotary_dim_half))
tl.store(
OUT + rotary_dim_half * stride_out_headdim,
o1,
mask=(rm[:, None] < seqlen) & (rk_half[None, :] < rotary_dim_half),
)
else:
# We don't want to load X[0, 2, 4, ...] and X[1, 3, 5, ...] separately since both are slow.
# Instead, we load x0 = X[0, 1, 2, 3, ...] and x1 = X[1, 0, 3, 2, ...].
# Loading x0 will be fast but x1 will be slow.
# Then we load cos = COS[0, 0, 1, 1, ...] and sin = SIN[0, 0, 1, 1, ...].
# Then we do the calculation and use tl.where to pick put the right outputs for the even
# and for the odd indices.
rk_swap = rk + ((rk + 1) % 2) * 2 - 1 # 1, 0, 3, 2, 5, 4, ...
rk_repeat = tl.arange(0, BLOCK_K) // 2
X0 = X + (rm[:, None] * stride_x_seqlen + rk[None, :] * stride_x_headdim)
X1 = X + (rm[:, None] * stride_x_seqlen + rk_swap[None, :] * stride_x_headdim)
COS = COS + (rm_cs[:, None] * rotary_dim_half + rk_repeat[None, :])
SIN = SIN + (rm_cs[:, None] * rotary_dim_half + rk_repeat[None, :])
cos = tl.load(
COS,
mask=(rm_cs[:, None] < seqlen_ro) & (rk_repeat[None, :] < rotary_dim_half),
other=1.0,
).to(tl.float32)
sin = tl.load(
SIN,
mask=(rm_cs[:, None] < seqlen_ro) & (rk_repeat[None, :] < rotary_dim_half),
other=0.0,
).to(tl.float32)
x0 = tl.load(X0, mask=(rm[:, None] < seqlen) & (rk[None, :] < rotary_dim), other=0.0).to(
tl.float32
)
x1 = tl.load(
X1, mask=(rm[:, None] < seqlen) & (rk_swap[None, :] < rotary_dim), other=0.0
).to(tl.float32)
if CONJUGATE:
sin = -sin
x0_cos = x0 * cos
x1_sin = x1 * sin
out = tl.where(rk[None, :] % 2 == 0, x0_cos - x1_sin, x0_cos + x1_sin)
OUT = OUT + (rm[:, None] * stride_out_seqlen + rk[None, :] * stride_out_headdim)
tl.store(OUT, out, mask=(rm[:, None] < seqlen) & (rk[None, :] < rotary_dim))
def apply_rotary(
x: torch.Tensor,
cos: torch.Tensor,
sin: torch.Tensor,
seqlen_offsets: Union[int, torch.Tensor] = 0,
cu_seqlens: Optional[torch.Tensor] = None,
max_seqlen: Optional[int] = None,
interleaved=False,
inplace=False,
conjugate=False,
) -> torch.Tensor:
"""
Arguments:
x: (batch, seqlen, nheads, headdim) if cu_seqlens is None
else (total_seqlen, nheads, headdim).
cos: (seqlen_ro, rotary_dim / 2)
sin: (seqlen_ro, rotary_dim / 2)
seqlen_offsets: integer or integer tensor of size (batch,)
cu_seqlens: (batch + 1,) or None
max_seqlen: int
Returns:
y: (batch, seqlen, nheads, headdim)
"""
is_varlen = cu_seqlens is not None
if not is_varlen:
batch, seqlen, nheads, headdim = x.shape
else:
assert max_seqlen is not None, "If cu_seqlens is passed in, then max_seqlen must be passed"
total_seqlen, nheads, headdim = x.shape
batch_p_1 = cu_seqlens.shape[0]
batch = batch_p_1 - 1
seqlen = max_seqlen
seqlen_ro, rotary_dim = cos.shape
assert sin.shape == cos.shape
rotary_dim *= 2
assert rotary_dim <= headdim, f"rotary_dim must be <= headdim, but got {rotary_dim} and {headdim}"
assert headdim <= 256, "Only support headdim <= 256"
assert seqlen_ro >= seqlen, f"seqlen_ro must be >= seqlen, but got {seqlen_ro} and {seqlen}"
assert (
cos.dtype == sin.dtype
), f"cos and sin must have the same dtype, got {cos.dtype} and {sin.dtype}"
assert (
x.dtype == cos.dtype
), f"Input and cos/sin must have the same dtype, got {x.dtype} and {cos.dtype}"
cos, sin = cos.contiguous(), sin.contiguous()
if isinstance(seqlen_offsets, torch.Tensor):
assert seqlen_offsets.shape == (batch,)
assert seqlen_offsets.dtype in [torch.int32, torch.int64]
seqlen_offsets = seqlen_offsets.contiguous()
else:
assert seqlen_offsets + seqlen <= seqlen_ro
output = torch.empty_like(x) if not inplace else x
if rotary_dim < headdim and not inplace:
output[..., rotary_dim:].copy_(x[..., rotary_dim:])
BLOCK_K = (
32
if rotary_dim <= 32
else (64 if rotary_dim <= 64 else (128 if rotary_dim <= 128 else 256))
)
grid = lambda META: (triton.cdiv(seqlen, META["BLOCK_M"]), batch, nheads) # noqa
BLOCK_M = 4 if interleaved else (8 if rotary_dim <= 128 else 4)
# Need this, otherwise Triton tries to launch from cuda:0 and we get
# ValueError: Pointer argument (at 0) cannot be accessed from Triton (cpu tensor?)
with torch.cuda.device(x.device.index):
rotary_kernel[grid](
output, # data ptrs
x,
cos,
sin,
cu_seqlens,
seqlen_offsets,
seqlen, # shapes
rotary_dim,
seqlen_ro,
output.stride(0) if not is_varlen else 0, # batch_strides if not varlen else 0
output.stride(-3), # seqlen_stride or total_seqlen_stride
output.stride(-2), # nheads_stride
output.stride(-1), # headdim_stride
x.stride(0) if not is_varlen else 0, # batch_strides if not varlen else 0
x.stride(-3), # seqlen stride or total_seqlen_stride
x.stride(-2), # nheads stride
x.stride(-1), # headdim stride
BLOCK_K,
isinstance(seqlen_offsets, torch.Tensor),
is_varlen,
interleaved,
conjugate,
BLOCK_M,
)
return output
class ApplyRotaryEmb(torch.autograd.Function):
@staticmethod
def forward(
ctx,
x,
cos,
sin,
interleaved=False,
inplace=False,
seqlen_offsets: Union[int, torch.Tensor] = 0,
cu_seqlens: Optional[torch.Tensor] = None,
max_seqlen: Optional[int] = None,
):
out = apply_rotary(
x,
cos,
sin,
seqlen_offsets=seqlen_offsets,
cu_seqlens=cu_seqlens,
max_seqlen=max_seqlen,
interleaved=interleaved,
inplace=inplace,
)
if isinstance(seqlen_offsets, int):
ctx.save_for_backward(cos, sin, cu_seqlens) # Can't save int with save_for_backward
ctx.seqlen_offsets = seqlen_offsets
else:
ctx.save_for_backward(cos, sin, cu_seqlens, seqlen_offsets)
ctx.seqlen_offsets = None
ctx.interleaved = interleaved
ctx.inplace = inplace
ctx.max_seqlen = max_seqlen
return out if not inplace else x
@staticmethod
def backward(ctx, do):
seqlen_offsets = ctx.seqlen_offsets
if seqlen_offsets is None:
cos, sin, cu_seqlens, seqlen_offsets = ctx.saved_tensors
else:
cos, sin, cu_seqlens = ctx.saved_tensors
# TD [2023-09-02]: For some reason Triton (2.0.0.post1) errors with
# "[CUDA]: invalid device context", and cloning makes it work. Idk why. Triton 2.1.0 works.
if not ctx.interleaved and not ctx.inplace:
do = do.clone()
dx = apply_rotary(
do,
cos,
sin,
seqlen_offsets=seqlen_offsets,
cu_seqlens=cu_seqlens,
max_seqlen=ctx.max_seqlen,
interleaved=ctx.interleaved,
inplace=ctx.inplace,
conjugate=True,
)
return dx, None, None, None, None, None, None, None
def apply_rotary_emb_triton(
x,
cos,
sin,
interleaved=False,
inplace=False,
seqlen_offsets: Union[int, torch.Tensor] = 0,
cu_seqlens: Optional[torch.Tensor] = None,
max_seqlen: Optional[int] = None,
):
"""
Arguments:
x: (batch_size, seqlen, nheads, headdim) if cu_seqlens is None
else (total_seqlen, nheads, headdim)
cos, sin: (seqlen_rotary, rotary_dim / 2)
interleaved: if True, rotate pairs of even and odd dimensions (GPT-J style) instead
of 1st half and 2nd half (GPT-NeoX style).
inplace: if True, apply rotary embedding in-place.
seqlen_offsets: (batch_size,) or int. Each sequence in x is shifted by this amount.
Most commonly used in inference when we have KV cache.
cu_seqlens: (batch + 1,) or None
max_seqlen: int
Return:
out: (batch_size, seqlen, nheads, headdim) if cu_seqlens is None
else (total_seqlen, nheads, headdim)
rotary_dim must be <= headdim
Apply rotary embedding to the first rotary_dim of x.
"""
return ApplyRotaryEmb.apply(
x, cos, sin, interleaved, inplace, seqlen_offsets, cu_seqlens, max_seqlen
)
if torch.cuda.is_available() and torch.version.hip:
# do something specific for HIP
apply_rotary_emb = apply_rotary_emb_torch
elif torch.cuda.is_available() and torch.version.cuda:
# do something specific for CUDA
apply_rotary_emb = apply_rotary_emb_triton
else:
# do something for CPU
apply_rotary_emb = apply_rotary_emb_torch | {
"repo_id": "microsoft/unilm",
"file_path": "ReSA/llm/kernel/rotary.py",
"license": "MIT License",
"lines": 330,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/unilm:ReSA/llm/kernel/tilelang_attention_with_kv_cache.py | # Copyright (c) Tile-AI Corporation.
# Licensed under the MIT License.
import torch
import torch.nn.functional as F
import tilelang
from tilelang.autotuner import *
import tilelang.language as T
import argparse
import time
import math
def num_splits_heuristic(total_mblocks, num_SMs, num_n_blocks, num_m_blocks, size_one_kv_head,
is_causal_or_local, max_splits):
"""
Determines the optimal number of splits for maximizing GPU occupancy while balancing memory efficiency.
Parameters:
- total_mblocks (int): Total number of m_blocks.
- num_SMs (int): Number of Streaming Multiprocessors (SMs) in the GPU.
- num_n_blocks (int): Number of n_blocks.
- num_m_blocks (int): Number of m_blocks.
- size_one_kv_head (int): Size of one KV head in bytes.
- is_causal_or_local (bool): Indicates whether the operation is causal or local.
- max_splits (int): Maximum number of allowed splits.
Returns:
- int: The optimal number of splits.
"""
# If we have enough m_blocks to almost fill the SMs, prefer 1 split unless memory constraints apply.
if total_mblocks >= 0.8 * num_SMs:
size_l2 = 50 * 1024 * 1024 # L2 cache size assumption (50MB)
# Only split if each KV head is too large for L2 and there are enough m_blocks
if size_one_kv_head > size_l2 and num_m_blocks >= num_SMs * 2 and not is_causal_or_local:
return min((size_one_kv_head + size_l2 - 1) // size_l2, max_splits)
else:
return 1
# If num_n_blocks is too small, we don't split
if num_n_blocks <= 4:
return 1
# Limit max_splits to a reasonable range
max_splits = min(max_splits, num_SMs, num_n_blocks)
max_efficiency = 0.0
efficiency = []
# Compute efficiency for different splits
for num_splits in range(1, max_splits + 1):
n_waves = (total_mblocks * num_splits) / num_SMs
eff = n_waves / math.ceil(n_waves)
# Track max efficiency
if eff > max_efficiency:
max_efficiency = eff
efficiency.append(eff)
# Find the smallest number of splits that achieves at least 85% of max efficiency
for num_splits in range(1, max_splits + 1):
if efficiency[num_splits - 1] >= 0.95 * max_efficiency:
return num_splits
return 1
def flashattn(heads, heads_kv, dim, dim_v):
scale = (1.0 / dim)**0.5 * 1.44269504 # log2(e)
dtype = "bfloat16"
accum_dtype = "float"
kv_group_num = heads // heads_kv
def kernel_func(batch, block_N, block_H, block_M, num_split, num_stages, threads, seqlen_q, max_cache_seqlen):
shape_q = [batch, seqlen_q, heads, dim]
shape_k = [batch, max_cache_seqlen, heads_kv, dim]
shape_v = [batch, max_cache_seqlen, heads_kv, dim_v]
shape_o = [batch, seqlen_q, heads, dim_v]
part_shape = [batch, seqlen_q, heads, num_split, dim_v]
num_block_M = (seqlen_q + block_M - 1) // block_M
@T.macro
def flash_attn_split(
Q: T.Tensor(shape_q, dtype),
K: T.Tensor(shape_k, dtype),
V: T.Tensor(shape_v, dtype),
cache_seqlens: T.Tensor([batch], "int32"),
glse: T.Tensor([batch, seqlen_q, heads, num_split], accum_dtype),
Output_partial: T.Tensor(part_shape, accum_dtype),
):
with T.Kernel(
batch * num_block_M, heads_kv, num_split, threads=threads) as (bx, by, bz):
Q_shared = T.alloc_shared([block_M * block_H, dim], dtype)
K_shared = T.alloc_shared([block_N, dim], dtype)
V_shared = T.alloc_shared([block_N, dim_v], dtype)
acc_s = T.alloc_fragment([block_M * block_H, block_N], accum_dtype)
acc_s_cast = T.alloc_fragment([block_M * block_H, block_N], dtype)
acc_o = T.alloc_fragment([block_M * block_H, dim_v], accum_dtype)
scores_max = T.alloc_fragment([block_M * block_H], accum_dtype)
scores_max_prev = T.alloc_fragment([block_M * block_H], accum_dtype)
scores_scale = T.alloc_fragment([block_M * block_H], accum_dtype)
scores_sum = T.alloc_fragment([block_M * block_H], accum_dtype)
logsum = T.alloc_fragment([block_M * block_H], accum_dtype)
bid = T.floordiv(bx, num_block_M)
mid = T.floormod(bx, num_block_M) * block_M
hid = by
sid = bz
for i, d in T.Parallel(block_M * block_H, dim):
i_m = T.floordiv(i, block_H)
i_h = T.floormod(i, block_H)
if i_h < kv_group_num:
Q_shared[i, d] = Q[bid, mid + i_m, hid * kv_group_num + i_h, d]
# T.copy(Q[bid, mid:(mid + block_M), hid * kv_group_num : (hid + 1) * kv_group_num, :], Q_shared)
T.fill(acc_o, 0)
T.fill(logsum, 0)
T.fill(scores_max, -T.infinity(accum_dtype))
# num_blocks = actual_num_blocks[bid]
num_blocks = (cache_seqlens[bid] + block_N - 1) // block_N
blocks_per_split = T.floordiv(num_blocks, num_split)
remaining_blocks = T.floormod(num_blocks, num_split)
loop_range = (blocks_per_split + T.if_then_else(sid < remaining_blocks, 1, 0))
start = blocks_per_split * sid + T.min(sid, remaining_blocks)
for k in T.Pipelined(loop_range, num_stages=num_stages):
i_s = start + k
T.copy(
K[bid, i_s * block_N: (i_s + 1) * block_N, hid, :], K_shared)
T.clear(acc_s)
T.gemm(
Q_shared,
K_shared,
acc_s,
transpose_B=True,
policy=T.GemmWarpPolicy.FullRow)
for i, j in T.Parallel(block_M * block_H, block_N):
i_m = T.floordiv(i, block_H)
i_h = T.floormod(i, block_H)
acc_s[i_m * block_H + i_h, j] = T.if_then_else(i_s * block_N + j > cache_seqlens[bid] - seqlen_q + (mid + i_m), -T.infinity(accum_dtype), acc_s[i_m * block_H + i_h, j])
T.copy(scores_max, scores_max_prev)
T.fill(scores_max, -T.infinity(accum_dtype))
T.reduce_max(acc_s, scores_max, dim=1, clear=False)
for i in T.Parallel(block_M * block_H):
scores_max[i] = T.if_then_else(scores_max[i] > scores_max_prev[i], scores_max[i], scores_max_prev[i])
scores_scale[i] = T.exp2(scores_max_prev[i] * scale - scores_max[i] * scale)
for i, j in T.Parallel(block_M * block_H, block_N):
acc_s[i, j] = T.exp2(acc_s[i, j] * scale - scores_max[i] * scale)
T.reduce_sum(acc_s, scores_sum, dim=1)
for i in T.Parallel(block_M * block_H):
logsum[i] = logsum[i] * scores_scale[i] + scores_sum[i]
T.copy(acc_s, acc_s_cast)
for i, j in T.Parallel(block_M * block_H, dim_v):
acc_o[i, j] *= scores_scale[i]
T.copy(
V[bid, i_s * block_N: (i_s + 1) * block_N, hid, :], V_shared)
T.gemm(acc_s_cast, V_shared, acc_o, policy=T.GemmWarpPolicy.FullRow)
for i, j in T.Parallel(block_M * block_H, dim_v):
acc_o[i, j] /= logsum[i]
for i in T.Parallel(block_M * block_H):
logsum[i] = T.log2(logsum[i]) + scores_max[i] * scale
for i in T.Parallel(block_M * block_H):
i_m = T.floordiv(i, block_H)
i_h = T.floormod(i, block_H)
if i_h < kv_group_num:
glse[bid, mid + i_m, hid * kv_group_num + i_h, sid] = logsum[i]
for i, v in T.Parallel(block_M * block_H, dim_v):
i_m = T.floordiv(i, block_H)
i_h = T.floormod(i, block_H)
if i_h < kv_group_num:
Output_partial[bid, mid + i_m, hid * kv_group_num + i_h, sid, v] = acc_o[i, v]
@T.macro
def combine(
glse: T.Tensor([batch, seqlen_q, heads, num_split], accum_dtype),
Output_partial: T.Tensor(part_shape, accum_dtype),
Output: T.Tensor(shape_o, dtype),
):
with T.Kernel(heads, seqlen_q, batch, threads=128) as (bx, by, bz):
po_local = T.alloc_fragment([dim_v], accum_dtype)
o_accum_local = T.alloc_fragment([dim_v], accum_dtype)
lse_local_split = T.alloc_local([1], accum_dtype)
lse_logsum_local = T.alloc_local([1], accum_dtype)
lse_max_local = T.alloc_local([1], accum_dtype)
scale_local = T.alloc_local([1], accum_dtype)
max_split = T.alloc_local([1], "int32")
T.annotate_layout({
lse_logsum_local: T.Fragment(lse_logsum_local.shape, forward_thread_fn=lambda i: i),
})
T.clear(lse_logsum_local)
T.clear(o_accum_local)
lse_max_local[0] = -T.infinity(accum_dtype)
for k in T.serial(num_split):
lse_local_split[0] = glse[bz, by, bx, k]
if (lse_local_split[0] != 0):
max_split[0] = k
lse_max_local[0] = T.max(lse_max_local[0], glse[bz, by, bx, k])
for k in T.Pipelined(num_split, num_stages=1):
if k <= max_split[0]:
lse_local_split[0] = glse[bz, by, bx, k]
lse_logsum_local[0] += T.exp2(lse_local_split[0] - lse_max_local[0])
lse_logsum_local[0] = T.log2(lse_logsum_local[0]) + lse_max_local[0]
for k in T.serial(num_split):
if k <= max_split[0]:
for i in T.Parallel(dim_v):
po_local[i] = Output_partial[bz, by, bx, k, i]
lse_local_split[0] = glse[bz, by, bx, k]
scale_local[0] = T.exp2(lse_local_split[0] - lse_logsum_local[0])
for i in T.Parallel(dim_v):
o_accum_local[i] += po_local[i] * scale_local[0]
for i in T.Parallel(dim_v):
Output[bz, by, bx, i] = o_accum_local[i]
@T.prim_func
def main(
Q: T.Tensor(shape_q, dtype),
K: T.Tensor(shape_k, dtype),
V: T.Tensor(shape_v, dtype),
cache_seqlens: T.Tensor([batch], "int32"),
glse: T.Tensor([batch, seqlen_q, heads, num_split], accum_dtype),
Output_partial: T.Tensor(part_shape, accum_dtype),
Output: T.Tensor(shape_o, dtype),
):
flash_attn_split(Q, K, V, cache_seqlens, glse, Output_partial)
combine(glse, Output_partial, Output)
return main
return kernel_func
class AttentionWithKVCache(torch.nn.Module):
def __init__(self, heads, heads_kv, dim, dim_v, seqlen_q):
super(AttentionWithKVCache, self).__init__()
self.heads = heads
self.heads_kv = heads_kv
self.dim = dim
self.dim_v = dim_v
self.block_N = 32
self.block_H = tilelang.next_power_of_2(heads // heads_kv)
self.block_M = seqlen_q
program = flashattn(heads, heads_kv, dim, dim_v)(
batch=T.symbolic("batch"),
block_N=self.block_N,
block_H=self.block_H,
block_M=self.block_M,
num_split=T.symbolic("num_split"),
num_stages=2,
threads=128,
seqlen_q=seqlen_q,
max_cache_seqlen=T.symbolic("max_cache_seqlen"),
)
self.kernel = tilelang.compile(
program,
out_idx=-1,
target='cuda',
execution_backend="cython"
)
props = torch.cuda.get_device_properties(torch.device("cuda:0"))
self.num_sm = props.multi_processor_count
def forward(self, query, key, value, cache_seqlens):
batch = query.shape[0]
seqlen_q = query.shape[1]
heads = self.heads
heads_kv = self.heads_kv
dim = self.dim
dim_v = self.dim_v
# Compute static scheduling parameters
num_m_blocks = (seqlen_q + self.block_M - 1) // self.block_M
num_n_blocks = (cache_seqlens.max().item() + self.block_N - 1) // self.block_N
size_one_kv_head = num_n_blocks * self.block_N * (dim + dim_v) * 2
total_mblocks = batch * heads_kv * num_m_blocks
# num_sm = 132
num_sm = self.num_sm
num_split = num_splits_heuristic(
total_mblocks, num_sm, num_n_blocks, num_m_blocks,
size_one_kv_head, is_causal_or_local=True, max_splits=16
)
glse = torch.empty((batch, seqlen_q, heads, num_split), dtype=torch.float32, device='cuda')
output_partial = torch.empty((batch, seqlen_q, heads, num_split, dim_v), dtype=torch.float32, device='cuda')
output = self.kernel(
query, key, value, cache_seqlens,
glse, output_partial
)
return output
def ref_program_fa(query, key, value, cache_seqlens):
# latency reference
# from flash_attn_interface import flash_attn_with_kvcache, flash_attn_func # fa3
from flash_attn import flash_attn_with_kvcache, flash_attn_func #fa2
output = flash_attn_with_kvcache(query, key, value, cache_seqlens=cache_seqlens)
return output
def debug(name,expect, actual, atol=1e-3, rtol=1e-3):
all_close = torch.allclose(expect, actual, atol=atol, rtol=rtol)
print(name + " all_close={}".format(all_close))
if not all_close:
# print(expect[3, 28])
# print(actual[3, 28])
diff = (expect - actual).abs()
print("all_close={}, max={}, min={}, mean={}".format(all_close, diff.max().item(), diff.min().item(), diff.mean().item()))
max_indices = torch.nonzero(diff == diff.max().item())
first_index = tuple(max_indices[0].tolist())
print(f"Index: {first_index}, expect: {expect[first_index]}, actual: {actual[first_index]}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--batch', type=int, default=4, help='batch size')
parser.add_argument('--seqlen_q', type=int, default=32, help='sequence length')
parser.add_argument('--heads', type=int, default=28, help='heads')
parser.add_argument('--heads_kv', type=int, default=4, help='heads_kv')
parser.add_argument('--max_cache_seqlen', type=int, default=65536, help='kvcache sequence length')
parser.add_argument('--dim', type=int, default=128, help='dim')
parser.add_argument('--dim_v', type=int, default=128, help='dim_v')
parser.add_argument('--block_size', type=int, default=32, help='block_size')
parser.add_argument('--load_from_file', type=str, default=None, help='load from file')
args = parser.parse_args()
batch, seqlen_q, heads, heads_kv, max_cache_seqlen, dim, dim_v = args.batch, args.seqlen_q, args.heads, args.heads_kv, args.max_cache_seqlen, args.dim, args.dim_v
block_size = args.block_size
dtype = torch.bfloat16
Q = torch.randn((batch, seqlen_q, heads, dim), dtype=dtype, device='cuda')
K = torch.randn((batch, max_cache_seqlen, heads_kv, dim), dtype=dtype, device='cuda')
V = torch.randn((batch, max_cache_seqlen, heads_kv, dim_v), dtype=dtype, device='cuda')
cache_seqlens = torch.randint(max_cache_seqlen - 32, max_cache_seqlen, (batch,), device='cuda', dtype=torch.int32)
print("cache_seqlens: ", cache_seqlens)
# parity reference
ref = ref_program_fa(Q, K, V, cache_seqlens)
# ref = ref_program_triton(Q, K, V, block_indices, cache_seqlens, max_cache_seqlen, max_num_blocks, block_size)
# out = kernel(Q, K, V, block_indices, cache_seqlens, actual_num_blocks, glse, Output_partial)
# out = sparse_gqa_decode_varlen_indice(Q, K, V, block_indices, cache_seqlens, max_cache_seqlen, block_size)
sparse_kernel = AttentionWithKVCache(heads, heads_kv, dim, dim_v, seqlen_q)
out = sparse_kernel(Q, K, V, cache_seqlens)
debug("output", ref, out, atol=1e-3, rtol=1e-3)
## latency reference
for i in range(10):
ref = ref_program_fa(Q, K, V, cache_seqlens)
torch.cuda.synchronize()
start = time.time()
for i in range(100):
ref = ref_program_fa(Q, K, V, cache_seqlens)
torch.cuda.synchronize()
print("dense time: ", (time.time() - start) / 100*1000)
for i in range(10):
# out = sparse_gqa_decode_varlen_indice(Q, K, V, block_indices, cache_seqlens, max_cache_seqlen, block_size)
out = sparse_kernel(Q, K, V, cache_seqlens)
torch.cuda.synchronize()
start = time.time()
for i in range(100):
# out = sparse_gqa_decode_varlen_indice(Q, K, V, block_indices, cache_seqlens, max_cache_seqlen, block_size)
out = sparse_kernel(Q, K, V, cache_seqlens)
torch.cuda.synchronize()
print("sparse time: ", (time.time() - start) / 100*1000)
| {
"repo_id": "microsoft/unilm",
"file_path": "ReSA/llm/kernel/tilelang_attention_with_kv_cache.py",
"license": "MIT License",
"lines": 324,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/unilm:ReSA/llm/kernel/tilelang_sparse_decoding.py | # Copyright (c) Tile-AI Corporation.
# Licensed under the MIT License.
import torch
import torch.nn.functional as F
import tilelang
from tilelang.autotuner import *
import tilelang.language as T
from einops import rearrange, einsum
import argparse
import time
import math
def num_splits_heuristic(total_mblocks, num_SMs, num_n_blocks, num_m_blocks, size_one_kv_head,
is_causal_or_local, max_splits):
"""
Determines the optimal number of splits for maximizing GPU occupancy while balancing memory efficiency.
Parameters:
- total_mblocks (int): Total number of m_blocks.
- num_SMs (int): Number of Streaming Multiprocessors (SMs) in the GPU.
- num_n_blocks (int): Number of n_blocks.
- num_m_blocks (int): Number of m_blocks.
- size_one_kv_head (int): Size of one KV head in bytes.
- is_causal_or_local (bool): Indicates whether the operation is causal or local.
- max_splits (int): Maximum number of allowed splits.
Returns:
- int: The optimal number of splits.
"""
# If we have enough m_blocks to almost fill the SMs, prefer 1 split unless memory constraints apply.
if total_mblocks >= 0.8 * num_SMs:
size_l2 = 50 * 1024 * 1024 # L2 cache size assumption (50MB)
# Only split if each KV head is too large for L2 and there are enough m_blocks
if size_one_kv_head > size_l2 and num_m_blocks >= num_SMs * 2 and not is_causal_or_local:
return min((size_one_kv_head + size_l2 - 1) // size_l2, max_splits)
else:
return 1
# If num_n_blocks is too small, we don't split
if num_n_blocks <= 4:
return 1
# Limit max_splits to a reasonable range
max_splits = min(max_splits, num_SMs, num_n_blocks)
max_efficiency = 0.0
efficiency = []
# Compute efficiency for different splits
for num_splits in range(1, max_splits + 1):
n_waves = (total_mblocks * num_splits) / num_SMs
eff = n_waves / math.ceil(n_waves)
# Track max efficiency
if eff > max_efficiency:
max_efficiency = eff
efficiency.append(eff)
# Find the smallest number of splits that achieves at least 85% of max efficiency
for num_splits in range(1, max_splits + 1):
if efficiency[num_splits - 1] >= 0.95 * max_efficiency:
return num_splits
return 1
def flashattn(heads, heads_kv, dim, dim_v):
scale = (1.0 / dim)**0.5 * 1.44269504 # log2(e)
dtype = "float16"
accum_dtype = "float"
kv_group_num = heads // heads_kv
def kernel_func(batch, block_N, block_H, num_split, num_stages, threads, max_cache_seqlen, max_selected_blocks):
shape_q = [batch, heads, dim]
shape_k = [batch, max_cache_seqlen, heads_kv, dim]
shape_v = [batch, max_cache_seqlen, heads_kv, dim_v]
shape_indices = [batch, heads_kv, max_selected_blocks]
shape_o = [batch, heads, dim_v]
part_shape = [batch, heads, num_split, dim_v]
valid_block_H = min(block_H, kv_group_num)
@T.macro
def flash_attn_split(
Q: T.Tensor(shape_q, dtype),
K: T.Tensor(shape_k, dtype),
V: T.Tensor(shape_v, dtype),
block_indices: T.Tensor(shape_indices, "int32"),
cache_seqlens: T.Tensor([batch], "int32"),
# actual_num_blocks: T.Tensor([batch], "int32"),
glse: T.Tensor([batch, heads, num_split], accum_dtype),
Output_partial: T.Tensor(part_shape, accum_dtype),
):
with T.Kernel(
batch, heads // valid_block_H, num_split, threads=threads) as (bx, by, bz):
Q_shared = T.alloc_shared([block_H, dim], dtype)
K_shared = T.alloc_shared([block_N, dim], dtype)
V_shared = T.alloc_shared([block_N, dim_v], dtype)
# O_shared = T.alloc_shared([valid_block_H, dim_v], dtype)
acc_s = T.alloc_fragment([block_H, block_N], accum_dtype)
acc_s_cast = T.alloc_fragment([block_H, block_N], dtype)
acc_o = T.alloc_fragment([block_H, dim_v], accum_dtype)
scores_max = T.alloc_fragment([block_H], accum_dtype)
scores_max_prev = T.alloc_fragment([block_H], accum_dtype)
scores_scale = T.alloc_fragment([block_H], accum_dtype)
scores_sum = T.alloc_fragment([block_H], accum_dtype)
logsum = T.alloc_fragment([block_H], accum_dtype)
has_valid_block=T.alloc_var("bool")
# num_blocks = T.alloc_local([1], "int32")
bid = bx
hid = by
sid = bz
cur_kv_head = hid // (kv_group_num // valid_block_H)
T.copy(Q[bid, hid * valid_block_H:hid * valid_block_H + block_H, :], Q_shared)
T.fill(acc_o, 0)
T.fill(logsum, 0)
T.fill(scores_max, -T.infinity(accum_dtype))
# num_blocks = actual_num_blocks[bid]
num_blocks = max_selected_blocks
blocks_per_split = T.floordiv(num_blocks, num_split)
remaining_blocks = T.floormod(num_blocks, num_split)
loop_range = (blocks_per_split + T.if_then_else(sid < remaining_blocks, 1, 0))
start = blocks_per_split * sid + T.min(sid, remaining_blocks)
has_valid_block=False
# if (start < num_blocks):
for k in T.Pipelined(loop_range, num_stages=num_stages):
i_s = block_indices[bid, cur_kv_head, start + k]
if i_s >= 0:
has_valid_block = True
T.copy(
K[bid, i_s * block_N: (i_s + 1) * block_N,
cur_kv_head, :], K_shared)
T.clear(acc_s)
T.gemm(
Q_shared,
K_shared,
acc_s,
transpose_B=True,
policy=T.GemmWarpPolicy.FullRow)
# if k == 0: # assume block_indices is sorted in reverse order, otherwise, remove this if condition
for i, j in T.Parallel(block_H, block_N):
acc_s[i, j] = T.if_then_else(i_s * block_N + j >= cache_seqlens[bid], -T.infinity(accum_dtype), acc_s[i, j])
T.copy(scores_max, scores_max_prev)
T.fill(scores_max, -T.infinity(accum_dtype))
T.reduce_max(acc_s, scores_max, dim=1, clear=False)
for i in T.Parallel(block_H):
scores_max[i] = T.if_then_else(scores_max[i] > scores_max_prev[i], scores_max[i], scores_max_prev[i])
scores_scale[i] = T.exp2(scores_max_prev[i] * scale - scores_max[i] * scale)
for i, j in T.Parallel(block_H, block_N):
acc_s[i, j] = T.exp2(acc_s[i, j] * scale - scores_max[i] * scale)
T.reduce_sum(acc_s, scores_sum, dim=1)
for i in T.Parallel(block_H):
logsum[i] = logsum[i] * scores_scale[i] + scores_sum[i]
T.copy(acc_s, acc_s_cast)
for i, j in T.Parallel(block_H, dim_v):
acc_o[i, j] *= scores_scale[i]
T.copy(
V[bid, i_s * block_N: (i_s + 1) * block_N,
cur_kv_head, :], V_shared)
T.gemm(acc_s_cast, V_shared, acc_o, policy=T.GemmWarpPolicy.FullRow)
if has_valid_block:
for i, j in T.Parallel(block_H, dim_v):
acc_o[i, j] /= logsum[i]
for i in T.Parallel(block_H):
logsum[i] = T.log2(logsum[i]) + scores_max[i] * scale
for i in T.Parallel(block_H):
if i < valid_block_H:
glse[bid, hid * valid_block_H + i, sid] = logsum[i]
for i, j in T.Parallel(block_H, dim_v):
if i < valid_block_H:
Output_partial[bid, hid * valid_block_H + i, sid, j] = acc_o[i, j]
@T.macro
def combine(
glse: T.Tensor([batch, heads, num_split], accum_dtype),
Output_partial: T.Tensor(part_shape, accum_dtype),
Output: T.Tensor(shape_o, dtype),
):
with T.Kernel(heads, batch, threads=128) as (by, bz):
po_local = T.alloc_fragment([dim_v], accum_dtype)
o_accum_local = T.alloc_fragment([dim_v], accum_dtype)
lse_local_split = T.alloc_local([1], accum_dtype)
lse_logsum_local = T.alloc_local([1], accum_dtype)
lse_max_local = T.alloc_local([1], accum_dtype)
scale_local = T.alloc_local([1], accum_dtype)
max_split = T.alloc_local([1], "int32")
T.annotate_layout({
lse_logsum_local: T.Fragment(lse_logsum_local.shape, forward_thread_fn=lambda i: i),
})
T.clear(lse_logsum_local)
T.clear(o_accum_local)
lse_max_local[0] = -T.infinity(accum_dtype)
for k in T.serial(num_split):
lse_local_split[0] = glse[bz, by, k]
if (lse_local_split[0] != 0):
max_split[0] = k
lse_max_local[0] = T.max(lse_max_local[0], glse[bz, by, k])
for k in T.Pipelined(num_split, num_stages=1):
if k <= max_split[0]:
lse_local_split[0] = glse[bz, by, k]
lse_logsum_local[0] += T.exp2(lse_local_split[0] - lse_max_local[0])
lse_logsum_local[0] = T.log2(lse_logsum_local[0]) + lse_max_local[0]
for k in T.serial(num_split):
if k <= max_split[0]:
for i in T.Parallel(dim_v):
po_local[i] = Output_partial[bz, by, k, i]
lse_local_split[0] = glse[bz, by, k]
scale_local[0] = T.exp2(lse_local_split[0] - lse_logsum_local[0])
for i in T.Parallel(dim_v):
o_accum_local[i] += po_local[i] * scale_local[0]
for i in T.Parallel(dim_v):
Output[bz, by, i] = o_accum_local[i]
@T.prim_func
def main(
Q: T.Tensor(shape_q, dtype),
K: T.Tensor(shape_k, dtype),
V: T.Tensor(shape_v, dtype),
block_indices: T.Tensor(shape_indices, "int32"),
cache_seqlens: T.Tensor([batch], "int32"),
# actual_num_blocks: T.Tensor([batch], "int32"),
glse: T.Tensor([batch, heads, num_split], accum_dtype),
Output_partial: T.Tensor(part_shape, accum_dtype),
Output: T.Tensor(shape_o, dtype),
):
# flash_attn_split(Q, K, V, block_indices, cache_seqlens, actual_num_blocks, glse, Output_partial)
flash_attn_split(Q, K, V, block_indices, cache_seqlens, glse, Output_partial)
combine(glse, Output_partial, Output)
return main
return kernel_func
class SparseFlashAttn(torch.nn.Module):
def __init__(self, heads, heads_kv, dim, dim_v, block_size):
super(SparseFlashAttn, self).__init__()
self.heads = heads
self.heads_kv = heads_kv
self.dim = dim
self.dim_v = dim_v
self.block_size = block_size
self.block_H = 64
program = flashattn(heads, heads_kv, dim, dim_v)(
batch=T.symbolic("batch"),
block_N=block_size,
block_H=self.block_H,
num_split=T.symbolic("num_split"),
num_stages=2,
threads=128,
max_cache_seqlen=T.symbolic("max_cache_seqlen"),
max_selected_blocks=T.symbolic("max_selected_blocks")
)
self.kernel = tilelang.compile(
program,
out_idx=-1,
target='cuda',
execution_backend="cython"
)
props = torch.cuda.get_device_properties(torch.device("cuda:0"))
self.num_sm = props.multi_processor_count
def forward(self, query, key, value, block_indices, cache_seqlens):
batch = query.shape[0]
heads = self.heads
heads_kv = self.heads_kv
dim = self.dim
dim_v = self.dim_v
block_size = self.block_size
max_selected_blocks = block_indices.shape[-1]
# Compute static scheduling parameters
num_m_blocks = 1 * (heads // heads_kv + self.block_H - 1) // self.block_H
num_n_blocks = max_selected_blocks
size_one_kv_head = max_selected_blocks * block_size * (dim + dim_v) * 2
total_mblocks = batch * heads_kv * num_m_blocks
# num_sm = 132
num_sm = self.num_sm
num_split = num_splits_heuristic(
total_mblocks, num_sm, num_n_blocks, num_m_blocks,
size_one_kv_head, is_causal_or_local=True, max_splits=16
)
# Function to compile
# def compute_actual_num_blocks(block_indices):
# actual_num_blocks = torch.sum(block_indices != -1, dim=-1).to(torch.int32)
# actual_num_blocks = actual_num_blocks[:, 0] # [batch]
# return actual_num_blocks
# compiled_fn = torch.compile(compute_actual_num_blocks)
# actual_num_blocks = compiled_fn(block_indices)
glse = torch.empty((batch, heads, num_split), dtype=torch.float32, device='cuda')
output_partial = torch.empty((batch, heads, num_split, dim_v), dtype=torch.float32, device='cuda')
# output = self.kernel(
# query, key, value, block_indices, cache_seqlens,
# actual_num_blocks, glse, output_partial
# )
output = self.kernel(
query, key, value, block_indices, cache_seqlens,
glse, output_partial
)
return output
def sparse_gqa_decode_varlen_indice(query, key, value, block_indices, cache_seqlens, max_cache_seqlen, block_size):
"""
Args:
query: [batch, heads, dim]
key: [batch, max_cache_seqlen, heads_kv, dim]
value: [batch, max_cache_seqlen, heads_kv, dim_v]
block_indices: [batch, heads_kv, max_selected_blocks], indices of selected blocks, -1 for padding
cache_seqlens: [batch], sequence lengths of the kvcache
max_cache_seqlen: maximum sequence length of kvcache
block_size: block size
Returns:
output: [batch, heads, dim_v]
"""
batch, heads, dim = query.shape
heads_kv = key.shape[2]
dim_v = value.shape[-1]
max_selected_blocks = block_indices.shape[-1]
block_H = 64
actual_num_blocks = torch.sum(block_indices != -1, dim=-1).to(torch.int32)
actual_num_blocks = actual_num_blocks[:,0] #[batch], number of valid blocks, assum all groups in the same batch have the same number of blocks
# get num_split
num_m_blocks = 1 * (heads // heads_kv + block_H - 1) // block_H
num_n_blocks = max_selected_blocks#(kv_seqlen + block_size - 1 ) // block_size
# num_n_blocks = torch.sum(actual_num_blocks, dim=-1).item() * heads_kv # total number of blocks
size_one_kv_head = max_selected_blocks * block_size * (dim + dim_v) * 2 #kv_seqlen * (dim + dim_v) * 2
total_mblocks = batch * heads_kv * num_m_blocks
num_sm = 132
num_split = num_splits_heuristic(total_mblocks, num_sm, num_n_blocks, num_m_blocks, size_one_kv_head, is_causal_or_local=True, max_splits=128)
program = flashattn(
batch, heads, heads_kv, dim, dim_v)(
block_N=block_size, block_H=block_H, num_split=T.symbolic("num_split"), num_stages=2, threads=128,
max_cache_seqlen=T.symbolic("max_cache_seqlen"), max_selected_blocks=T.symbolic("max_selected_blocks"))
glse = torch.empty((batch, heads, num_split), dtype=torch.float32, device='cuda')
Output_partial = torch.empty((batch, heads, num_split, dim_v), dtype=torch.float32, device='cuda')
kernel = tilelang.compile(program, out_idx=-1, target='cuda', execution_backend="cython", pass_configs={"tl.config_index_bitwidth": 64})
# print(kernel.get_kernel_source())
# output = kernel(query, key, value, block_indices, cache_seqlens, actual_num_blocks, glse, Output_partial)
output = kernel(query, key, value, block_indices, cache_seqlens, glse, Output_partial)
return output
def ref_program_torch(query, key, value, block_indices, cache_seqlens, max_cache_seqlen, num_blocks, block_size):
batch, heads, dim = query.shape
heads_kv = key.shape[2]
dim_v = value.shape[-1]
num_head_groups = query.shape[1] // key.shape[2]
scale = dim**0.5
key = rearrange(key, 'b n h d -> b h n d') # [batch_size, heads_kv, seqlen_kv, dim]
value = rearrange(value, 'b n h d -> b h n d') # [batch_size, heads_kv, seqlen_kv, dim]
query = rearrange(
query, 'b (h g) d -> b g h d',
g=num_head_groups) # [batch_size, num_head_groups, heads_kv, dim]
scores = einsum(
query, key,
'b g h d, b h s d -> b g h s') # [batch_size, num_head_groups, heads_kv, seqlen_kv]
sparse_mask = torch.zeros_like(scores)
# Assign mask values based on block_indices
for b in range(batch):
for h in range(heads_kv):
valid_indices = block_indices[b, h] # Extract indices for this batch and head
for idx in valid_indices:
if idx >= 0:
sparse_mask[b, :, h, idx * block_size: (idx + 1) * block_size] = 1
scores = scores.masked_fill(sparse_mask == 0, float('-inf'))
range_len = torch.arange(scores.shape[-1], device='cuda').unsqueeze(0)
cache_seqlens_expanded = cache_seqlens.unsqueeze(1)
pad_mask = range_len >= cache_seqlens_expanded
pad_mask = pad_mask[:, None, None, :]
scores = scores.masked_fill(pad_mask, float('-inf'))
attention = F.softmax(
scores / scale, dim=-1) # [batch_size, num_head_groups, heads_kv, seqlen_kv]
out = einsum(attention, value,
'b g h s, b h s d -> b g h d') # [batch_size, num_head_groups, heads_kv, dim]
out = rearrange(out, 'b g h d -> b (h g) d') # [batch_size, heads, dim]
return out
def ref_program_fa(query, key, value, block_indices, cache_seqlens, max_cache_seqlen, num_blocks, block_size):
# latency reference
# from flash_attn_interface import flash_attn_with_kvcache, flash_attn_func # fa3
from flash_attn import flash_attn_with_kvcache, flash_attn_func #fa2
query = query.unsqueeze(1)
output = flash_attn_with_kvcache(query, key, value, cache_seqlens=cache_seqlens)
output = output.squeeze(1)
return output
def debug(name,expect, actual, atol=1e-3, rtol=1e-3):
all_close = torch.allclose(expect, actual, atol=atol, rtol=rtol)
print(name + " all_close={}".format(all_close))
if not all_close:
# print(expect[3, 28])
# print(actual[3, 28])
diff = (expect - actual).abs()
print("all_close={}, max={}, min={}, mean={}".format(all_close, diff.max().item(), diff.min().item(), diff.mean().item()))
max_indices = torch.nonzero(diff == diff.max().item())
first_index = tuple(max_indices[0].tolist())
print(f"Index: {first_index}, expect: {expect[first_index]}, actual: {actual[first_index]}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--batch', type=int, default=4, help='batch size')
parser.add_argument('--heads', type=int, default=28, help='heads')
parser.add_argument('--heads_kv', type=int, default=4, help='heads_kv')
parser.add_argument('--max_cache_seqlen', type=int, default=1048576, help='kvcache sequence length')
parser.add_argument('--dim', type=int, default=128, help='dim')
parser.add_argument('--dim_v', type=int, default=128, help='dim_v')
parser.add_argument('--sparse_ratio', type=float, default=0.9, help='sparse ratio')
parser.add_argument('--block_size', type=int, default=32, help='block_size')
parser.add_argument('--load_from_file', type=str, default=None, help='load from file')
args = parser.parse_args()
block_H = 64
if args.load_from_file is None:
batch, heads, heads_kv, max_cache_seqlen, dim, dim_v = args.batch, args.heads, args.heads_kv, args.max_cache_seqlen, args.dim, args.dim_v
sparse_ratio = args.sparse_ratio
block_size = args.block_size
max_selected_blocks = int(math.ceil(max_cache_seqlen * (1-sparse_ratio)/ block_size))
print("max_selected_blocks: ", max_selected_blocks)
dtype = torch.float16
Q = torch.randn((batch, heads, dim), dtype=dtype, device='cuda')
K = torch.randn((batch, max_cache_seqlen, heads_kv, dim), dtype=dtype, device='cuda')
V = torch.randn((batch, max_cache_seqlen, heads_kv, dim_v), dtype=dtype, device='cuda')
cache_seqlens = torch.full((batch,), max_cache_seqlen, dtype=torch.int32, device='cuda')
# cache_seqlens = torch.randint(1, max_cache_seqlen, (batch,), dtype=torch.int32, device='cuda')
# cache_seqlens = torch.full((batch,), max_cache_seqlen, dtype=torch.int32, device='cuda')
# Ensure at least one element equals cache_seqlen
random_index = torch.randint(0, batch, (1,), device='cuda').item() # Select a random index
cache_seqlens[random_index] = max_cache_seqlen # Assign cache_seqlen to ensure at least one occurrence
else:
save_dict = torch.load(args.load_from_file)
Q = save_dict["xq"]
K = save_dict["key"]
V = save_dict["value"]
block_indices = save_dict["sparse_indices"]
cache_seqlens = save_dict["seqlens_k"]
batch, heads, dim = Q.shape
heads_kv = K.shape[2]
max_cache_seqlen = K.shape[1]
dim_v = V.shape[-1]
block_size = max_cache_seqlen // block_indices.shape[-1]
max_selected_blocks = block_indices.shape[-1]
print(f"Load debug data from file with batch={batch}, heads={heads}, heads_kv={heads_kv}, max_cache_seqlen={max_cache_seqlen}, dim={dim}, dim_v={dim_v}, block_size={block_size}")
print("cache_seqlens: ", cache_seqlens)
max_valid_num_blocks = torch.ceil(cache_seqlens / block_size).int()
print("max_valid_num_blocks: ", max_valid_num_blocks)
# Initialize block_indices with -1 (for padding blocks)
block_indices = torch.full((batch, heads_kv, max_selected_blocks), -1, dtype=torch.int32, device='cuda')
# Assign valid indices while ensuring no duplicates within each batch-group
for b in range(batch):
max_valid_block = max_valid_num_blocks[b].item() # Max valid blocks for this batch
if max_valid_block > 0: # Ensure there's at least one valid block
for h in range(heads_kv):
valid_indices = torch.randperm(max_valid_block, device='cuda', dtype=torch.int32)[:max_selected_blocks]
block_indices[b, h, :len(valid_indices)] = valid_indices
# Sort indices within each batch-group for consistency
block_indices, _ = block_indices.sort(dim=-1, descending=True)
# print("block_indices: ", block_indices)
actual_num_blocks = torch.sum(block_indices != -1, dim=-1).to(torch.int32)[:,0]
print("actual_num_blocks: ", actual_num_blocks)
# print(block_indices.shape, actual_num_blocks.shape)
max_num_blocks = torch.max(max_valid_num_blocks).item()
print("max_num_blocks: ", max_num_blocks)
# parity reference
ref = ref_program_torch(Q, K, V, block_indices, cache_seqlens, max_cache_seqlen, max_num_blocks, block_size)
# ref = ref_program_triton(Q, K, V, block_indices, cache_seqlens, max_cache_seqlen, max_num_blocks, block_size)
# out = kernel(Q, K, V, block_indices, cache_seqlens, actual_num_blocks, glse, Output_partial)
# out = sparse_gqa_decode_varlen_indice(Q, K, V, block_indices, cache_seqlens, max_cache_seqlen, block_size)
sparse_kernel = SparseFlashAttn(heads, heads_kv, dim, dim_v, block_size)
out = sparse_kernel(Q, K, V, block_indices, cache_seqlens)
debug("output", ref, out, atol=1e-3, rtol=1e-3)
## latency reference
for i in range(10):
ref = ref_program_fa(Q, K, V, block_indices, cache_seqlens, max_cache_seqlen, max_num_blocks, block_size)
torch.cuda.synchronize()
start = time.time()
for i in range(100):
ref = ref_program_fa(Q, K, V, block_indices, cache_seqlens, max_cache_seqlen, max_num_blocks, block_size)
torch.cuda.synchronize()
print("dense time: ", (time.time() - start) / 100*1000)
for i in range(10):
# out = sparse_gqa_decode_varlen_indice(Q, K, V, block_indices, cache_seqlens, max_cache_seqlen, block_size)
out = sparse_kernel(Q, K, V, block_indices, cache_seqlens)
torch.cuda.synchronize()
start = time.time()
for i in range(100):
# out = sparse_gqa_decode_varlen_indice(Q, K, V, block_indices, cache_seqlens, max_cache_seqlen, block_size)
out = sparse_kernel(Q, K, V, block_indices, cache_seqlens)
torch.cuda.synchronize()
print("sparse time: ", (time.time() - start) / 100*1000)
| {
"repo_id": "microsoft/unilm",
"file_path": "ReSA/llm/kernel/tilelang_sparse_decoding.py",
"license": "MIT License",
"lines": 459,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
microsoft/unilm:ReSA/scripts/math_eval_result_length.py | import argparse
from transformers import LlamaTokenizerFast
import os
os.environ["TOKENIZERS_PARALLELISM"] = "true"
from math_utils import evaluate, load_jsonl
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--data_names", default="gsm8k", type=str)
parser.add_argument("--result_file", default=None, type=str)
parser.add_argument("--prompt_type", default="direct", type=str)
parser.add_argument("--eval_num", default=-1, type=int)
args = parser.parse_args()
return args
def eval_math_acc(args, data_name):
result_file = args.result_file.format(data_name=data_name)
print(result_file)
all_samples = list(load_jsonl(result_file))
if args.eval_num > 0:
all_samples = all_samples[: args.eval_num]
tokenizer = LlamaTokenizerFast.from_pretrained('/mnt/msranlp/tianzhu/ckpt/DeepSeek-R1-Distill-Qwen-1.5B')
avg_len = 0
threshold = 2048
below_num, above_num, below_acc, above_acc = 0, 0, 0, 0
for sample in all_samples:
length = len(tokenizer.encode(sample["code"][0]))
avg_len += length
_, result_json = evaluate(
samples=[sample],
data_name=data_name,
prompt_type=args.prompt_type,
execute=True,
)
if length <= threshold:
below_num += 1
below_acc += result_json["acc"]
else:
above_num += 1
above_acc += result_json["acc"]
total_num = below_num + above_num
total_acc = (below_acc + above_acc) / total_num
avg_len /= len(all_samples)
print(f"{data_name} total acc: {total_acc:.1f} ({total_num})")
print(
f"{data_name} below {threshold} acc: {int(below_acc/100)}/{below_num}/{below_acc/below_num if below_num > 0 else 0:.1f}"
)
print(
f"{data_name} above {threshold} acc: {int(above_acc/100)}/{above_num}/{above_acc/above_num if above_num > 0 else 0:.1f}"
)
print(f"{data_name} avg len: {avg_len:.1f}")
print(f"{total_acc:.1f}")
print(f"{int(below_acc/100)}/{below_num}/{below_acc/below_num if below_num > 0 else 0:.1f}")
print(f"{int(above_acc/100)}/{above_num}/{above_acc/above_num if above_num > 0 else 0:.1f}")
print(f"{avg_len:.1f}")
# print(result_json)
return result_json
def main(args):
data_names = args.data_names
data_list = data_names.split(",")
results = []
for data_name in data_list:
results.append(eval_math_acc(args, data_name))
# add "avg" result to data_list and results
data_list.append("avg")
results.append(
{
"acc": sum([result["acc"] for result in results]) / len(results),
}
)
# print all results
pad = max([len(data_name) for data_name in data_list])
print("\t".join(data_name.ljust(pad, " ") for data_name in data_list))
print("\t".join([f"{result['acc']:.1f}".ljust(pad, " ") for result in results]))
if __name__ == "__main__":
args = parse_args()
main(args) | {
"repo_id": "microsoft/unilm",
"file_path": "ReSA/scripts/math_eval_result_length.py",
"license": "MIT License",
"lines": 75,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
microsoft/unilm:ReSA/scripts/math_utils.py | import re
import os
import io
import json
import copy
import regex
import pickle
import datetime
import traceback
import numpy as np
from tqdm import tqdm
from math import isclose
from pathlib import Path
from contextlib import redirect_stdout
from concurrent.futures import TimeoutError
from functools import partial
import multiprocessing
import multiprocess
from multiprocess import Pool
from typing import List, Tuple, Optional, Type, TypeVar, Any, Iterable, Union, Dict
import dateutil.relativedelta
from pebble import ProcessPool
from timeout_decorator import timeout
from word2number import w2n
from sympy import simplify, N
from sympy.parsing.sympy_parser import parse_expr
from sympy.parsing.latex import parse_latex
from latex2sympy2 import latex2sympy
# from utils.math_examples import get_examples
def is_multi_choice(answer):
for c in answer:
if c not in ["A", "B", "C", "D", "E"]:
return False
return True
def load_jsonl(file: Union[str, Path]) -> Iterable[Any]:
with open(file, "r", encoding="utf-8") as f:
for line in f:
try:
yield json.loads(line)
except:
print("Error in loading:", line)
exit()
def save_jsonl(samples, save_path):
# ensure path
folder = os.path.dirname(save_path)
os.makedirs(folder, exist_ok=True)
with open(save_path, "a", encoding="utf-8") as f:
for sample in samples:
f.write(json.dumps(sample, ensure_ascii=False) + "\n")
print("Saved to", save_path)
# EXAMPLES = get_examples()
def load_prompt(data_name, prompt_type, num_shots):
if not num_shots:
return []
if data_name in ["gsm_hard", "svamp", "tabmwp", "asdiv", "mawps"]:
data_name = "gsm8k"
if data_name in ["math_oai", "hungarian_exam", "math-oai", "aime24", "amc23"]:
data_name = "math"
if data_name in ["sat_math"]:
data_name = "mmlu_stem"
if data_name in [
"gaokao2024_I",
"gaokao2024_II",
"gaokao_math_qa",
"gaokao2024_mix",
"cn_middle_school",
]:
data_name = "gaokao"
if prompt_type in ["tool-integrated"]:
prompt_type = "tora"
return EXAMPLES[data_name][:num_shots]
PROMPT_TEMPLATES = {
"direct": ("Problem: {input}\nAnswer: ", "{output}", "\n\n"),
"o1": (
"### Instruction:\n Return your final response within \\boxed{{}}. {input}\n\n### Think:\n ",
"{output}",
"\n\n",
),
}
def construct_prompt(example, data_name, args):
if args.adapt_few_shot and data_name in [
"gaokao2024_I",
"gaokao2024_II",
"gaokao_math_qa",
"gaokao2024_mix",
"cn_middle_school",
]:
demos = load_prompt(data_name, args.prompt_type, 5)
else:
demos = load_prompt(data_name, args.prompt_type, args.num_shots)
prompt_type = args.prompt_type
prompt_temp = PROMPT_TEMPLATES[args.prompt_type]
splitter = prompt_temp[2]
input_template, output_template, splitter = (
prompt_temp[0],
prompt_temp[1],
prompt_temp[2],
)
demo_prompt = splitter.join(
[
input_template.format(input=q) + output_template.format(output=a)
for q, a in demos
]
)
context = input_template.format(input=example["question"])
if len(demo_prompt) == 0 or (
args.adapt_few_shot and example["gt_ans"] not in ["A", "B", "C", "D", "E"]
):
full_prompt = context
else:
full_prompt = demo_prompt + splitter + context
return full_prompt.strip(" ") # important!
# parser.py
def _fix_fracs(string):
substrs = string.split("\\frac")
new_str = substrs[0]
if len(substrs) > 1:
substrs = substrs[1:]
for substr in substrs:
new_str += "\\frac"
if len(substr) > 0 and substr[0] == "{":
new_str += substr
else:
try:
assert len(substr) >= 2
except:
return string
a = substr[0]
b = substr[1]
if b != "{":
if len(substr) > 2:
post_substr = substr[2:]
new_str += "{" + a + "}{" + b + "}" + post_substr
else:
new_str += "{" + a + "}{" + b + "}"
else:
if len(substr) > 2:
post_substr = substr[2:]
new_str += "{" + a + "}" + b + post_substr
else:
new_str += "{" + a + "}" + b
string = new_str
return string
def _fix_a_slash_b(string):
if len(string.split("/")) != 2:
return string
a = string.split("/")[0]
b = string.split("/")[1]
try:
if "sqrt" not in a:
a = int(a)
if "sqrt" not in b:
b = int(b)
assert string == "{}/{}".format(a, b)
new_string = "\\frac{" + str(a) + "}{" + str(b) + "}"
return new_string
except:
return string
def _fix_sqrt(string):
_string = re.sub(r"\\sqrt(\w+)", r"\\sqrt{\1}", string)
return _string
def convert_word_number(text: str) -> str:
try:
text = str(w2n.word_to_num(text))
except:
pass
return text
# units mainly from MathQA
unit_texts = [
"east",
"degree",
"mph",
"kmph",
"ft",
"m sqaure",
" m east",
"sq m",
"deg",
"mile",
"q .",
"monkey",
"prime",
"ratio",
"profit of rs",
"rd",
"o",
"gm",
"p . m",
"lb",
"tile",
"per",
"dm",
"lt",
"gain",
"ab",
"way",
"west",
"a .",
"b .",
"c .",
"d .",
"e .",
"f .",
"g .",
"h .",
"t",
"a",
"h",
"no change",
"men",
"soldier",
"pie",
"bc",
"excess",
"st",
"inches",
"noon",
"percent",
"by",
"gal",
"kmh",
"c",
"acre",
"rise",
"a . m",
"th",
"π r 2",
"sq",
"mark",
"l",
"toy",
"coin",
"sq . m",
"gallon",
"° f",
"profit",
"minw",
"yr",
"women",
"feet",
"am",
"pm",
"hr",
"cu cm",
"square",
"v â € ™",
"are",
"rupee",
"rounds",
"cubic",
"cc",
"mtr",
"s",
"ohm",
"number",
"kmph",
"day",
"hour",
"minute",
"min",
"second",
"man",
"woman",
"sec",
"cube",
"mt",
"sq inch",
"mp",
"∏ cm ³",
"hectare",
"more",
"sec",
"unit",
"cu . m",
"cm 2",
"rs .",
"rs",
"kg",
"g",
"month",
"km",
"m",
"cm",
"mm",
"apple",
"liter",
"loss",
"yard",
"pure",
"year",
"increase",
"decrease",
"d",
"less",
"Surface",
"litre",
"pi sq m",
"s .",
"metre",
"meter",
"inch",
]
unit_texts.extend([t + "s" for t in unit_texts])
def strip_string(string, skip_unit=False):
string = str(string).strip()
# linebreaks
string = string.replace("\n", "")
# right "."
string = string.rstrip(".")
# remove inverse spaces
# replace \\ with \
string = string.replace("\\!", "")
# string = string.replace("\\ ", "")
# string = string.replace("\\\\", "\\")
# matrix
string = re.sub(r"\\begin\{array\}\{.*?\}", r"\\begin{pmatrix}", string)
string = re.sub(r"\\end\{array\}", r"\\end{pmatrix}", string)
string = string.replace("bmatrix", "pmatrix")
# replace tfrac and dfrac with frac
string = string.replace("tfrac", "frac")
string = string.replace("dfrac", "frac")
string = (
string.replace("\\neq", "\\ne")
.replace("\\leq", "\\le")
.replace("\\geq", "\\ge")
)
# remove \left and \right
string = string.replace("\\left", "")
string = string.replace("\\right", "")
string = string.replace("\\{", "{")
string = string.replace("\\}", "}")
# Remove unit: miles, dollars if after is not none
_string = re.sub(r"\\text{.*?}$", "", string).strip()
if _string != "" and _string != string:
# print("Warning: unit not removed: '{}' -> '{}'".format(string, _string))
string = _string
if not skip_unit:
# Remove unit: texts
for _ in range(2):
for unit_text in unit_texts:
# use regex, the prefix should be either the start of the string or a non-alphanumeric character
# the suffix should be either the end of the string or a non-alphanumeric character
_string = re.sub(r"(^|\W)" + unit_text + r"($|\W)", r"\1\2", string)
if _string != "":
string = _string
# Remove circ (degrees)
string = string.replace("^{\\circ}", "")
string = string.replace("^\\circ", "")
# remove dollar signs
string = string.replace("\\$", "")
string = string.replace("$", "")
string = string.replace("\\(", "").replace("\\)", "")
# convert word number to digit
string = convert_word_number(string)
# replace "\\text{...}" to "..."
string = re.sub(r"\\text\{(.*?)\}", r"\1", string)
for key in ["x=", "y=", "z=", "x\\in", "y\\in", "z\\in", "x\\to", "y\\to", "z\\to"]:
string = string.replace(key, "")
string = string.replace("\\emptyset", r"{}")
string = string.replace("(-\\infty,\\infty)", "\\mathbb{R}")
# remove percentage
string = string.replace("\\%", "")
string = string.replace("\%", "")
string = string.replace("%", "")
# " 0." equivalent to " ." and "{0." equivalent to "{." Alternatively, add "0" if "." is the start of the string
string = string.replace(" .", " 0.")
string = string.replace("{.", "{0.")
# cdot
# string = string.replace("\\cdot", "")
if (
string.startswith("{")
and string.endswith("}")
and string.isalnum()
or string.startswith("(")
and string.endswith(")")
and string.isalnum()
or string.startswith("[")
and string.endswith("]")
and string.isalnum()
):
string = string[1:-1]
# inf
string = string.replace("infinity", "\\infty")
if "\\infty" not in string:
string = string.replace("inf", "\\infty")
string = string.replace("+\\inity", "\\infty")
# and
string = string.replace("and", "")
string = string.replace("\\mathbf", "")
# use regex to remove \mbox{...}
string = re.sub(r"\\mbox{.*?}", "", string)
# quote
string.replace("'", "")
string.replace('"', "")
# i, j
if "j" in string and "i" not in string:
string = string.replace("j", "i")
# replace a.000b where b is not number or b is end, with ab, use regex
string = re.sub(r"(\d+)\.0*([^\d])", r"\1\2", string)
string = re.sub(r"(\d+)\.0*$", r"\1", string)
# if empty, return empty string
if len(string) == 0:
return string
if string[0] == ".":
string = "0" + string
# to consider: get rid of e.g. "k = " or "q = " at beginning
if len(string.split("=")) == 2:
if len(string.split("=")[0]) <= 2:
string = string.split("=")[1]
string = _fix_sqrt(string)
string = string.replace(" ", "")
# \frac1b or \frac12 --> \frac{1}{b} and \frac{1}{2}, etc. Even works with \frac1{72} (but not \frac{72}1). Also does a/b --> \\frac{a}{b}
string = _fix_fracs(string)
# NOTE: X/Y changed to \frac{X}{Y} in dataset, but in simple cases fix in case the model output is X/Y
string = _fix_a_slash_b(string)
return string
def extract_multi_choice_answer(pred_str):
# TODO: SFT models
if "Problem:" in pred_str:
pred_str = pred_str.split("Problem:", 1)[0]
pred_str = pred_str.replace("choice is", "answer is")
patt = regex.search(r"answer is \(?(?P<ans>[abcde])\)?", pred_str.lower())
if patt is not None:
return patt.group("ans").upper()
return "placeholder"
direct_answer_trigger_for_fewshot = ("choice is", "answer is")
def choice_answer_clean(pred: str):
pred = pred.strip("\n")
# Determine if this is ICL, if so, use \n\n to split the first chunk.
ICL = False
for trigger in direct_answer_trigger_for_fewshot:
if pred.count(trigger) > 1:
ICL = True
if ICL:
pred = pred.split("\n\n")[0]
# Split the trigger to find the answer.
preds = re.split("|".join(direct_answer_trigger_for_fewshot), pred)
if len(preds) > 1:
answer_flag = True
pred = preds[-1]
else:
answer_flag = False
pred = pred.strip("\n").rstrip(".").rstrip("/").strip(" ").lstrip(":")
# Clean the answer based on the dataset
tmp = re.findall(r"\b(A|B|C|D|E)\b", pred.upper())
if tmp:
pred = tmp
else:
pred = [pred.strip().strip(".")]
if len(pred) == 0:
pred = ""
else:
if answer_flag:
# choose the first element in list ...
pred = pred[0]
else:
# choose the last e
pred = pred[-1]
# Remove the period at the end, again!
pred = pred.rstrip(".").rstrip("/")
return pred
def find_box(pred_str: str):
ans = pred_str.split("boxed")[-1]
if not ans:
return ""
if ans[0] == "{":
stack = 1
a = ""
for c in ans[1:]:
if c == "{":
stack += 1
a += c
elif c == "}":
stack -= 1
if stack == 0:
break
a += c
else:
a += c
else:
a = ans.split("$")[0].strip()
return a
def clean_units(pred_str: str):
"""Clean the units in the number."""
def convert_pi_to_number(code_string):
code_string = code_string.replace("\\pi", "π")
# Replace \pi or π not preceded by a digit or } with 3.14
code_string = re.sub(r"(?<![\d}])\\?π", "3.14", code_string)
# Replace instances where π is preceded by a digit but without a multiplication symbol, e.g., "3π" -> "3*3.14"
code_string = re.sub(r"(\d)(\\?π)", r"\1*3.14", code_string)
# Handle cases where π is within braces or followed by a multiplication symbol
# This replaces "{π}" with "3.14" directly and "3*π" with "3*3.14"
code_string = re.sub(r"\{(\\?π)\}", "3.14", code_string)
code_string = re.sub(r"\*(\\?π)", "*3.14", code_string)
return code_string
pred_str = convert_pi_to_number(pred_str)
pred_str = pred_str.replace("%", "/100")
pred_str = pred_str.replace("$", "")
pred_str = pred_str.replace("¥", "")
pred_str = pred_str.replace("°C", "")
pred_str = pred_str.replace(" C", "")
pred_str = pred_str.replace("°", "")
return pred_str
def extract_theoremqa_answer(pred: str, answer_flag: bool = True):
if any([option in pred.lower() for option in ["yes", "true"]]):
pred = "True"
elif any([option in pred.lower() for option in ["no", "false"]]):
pred = "False"
elif any(
[
option in pred.lower()
for option in ["(a)", "(b)", "(c)", "(d)", "(e)", "(f)"]
]
):
pass
else:
# Some of the models somehow get used to boxed output from pre-training
if "boxed" in pred:
pred = find_box(pred)
if answer_flag:
# Extract the numbers out of the string
pred = pred.split("=")[-1].strip()
pred = clean_units(pred)
try:
tmp = str(latex2sympy(pred))
pred = str(eval(tmp))
except Exception:
if re.match(r"-?[\d\.]+\s\D+$", pred):
pred = pred.split(" ")[0]
elif re.match(r"-?[\d\.]+\s[^\s]+$", pred):
pred = pred.split(" ")[0]
else:
# desparate search over the last number
preds = re.findall(r"-?\d*\.?\d+", pred)
if len(preds) >= 1:
pred = preds[-1]
else:
pred = ""
return pred
def extract_answer(pred_str, data_name, use_last_number=True):
pred_str = pred_str.replace("\u043a\u0438", "")
if data_name in ["mmlu_stem", "sat_math", "aqua", "gaokao2023"]:
# TODO check multiple choice
return choice_answer_clean(pred_str)
if "final answer is $" in pred_str and "$. I hope" in pred_str:
# minerva_math
tmp = pred_str.split("final answer is $", 1)[1]
pred = tmp.split("$. I hope", 1)[0].strip()
elif "boxed" in pred_str:
ans = pred_str.split("boxed")[-1]
if len(ans) == 0:
return ""
elif ans[0] == "{":
stack = 1
a = ""
for c in ans[1:]:
if c == "{":
stack += 1
a += c
elif c == "}":
stack -= 1
if stack == 0:
break
a += c
else:
a += c
else:
a = ans.split("$")[0].strip()
pred = a
elif "he answer is" in pred_str:
pred = pred_str.split("he answer is")[-1].strip()
elif "final answer is" in pred_str:
pred = pred_str.split("final answer is")[-1].strip()
elif "答案是" in pred_str:
# Handle Chinese few-shot multiple choice problem answer extraction
pred = pred_str.split("答案是")[1].strip().split("\n\n")[0].strip()
else: # use the last number
if use_last_number:
pattern = "-?\d*\.?\d+"
pred = re.findall(pattern, pred_str.replace(",", ""))
if len(pred) >= 1:
pred = pred[-1]
else:
pred = ""
else:
pred = ""
# choice answer
if (
data_name in ["sat_math", "aqua"]
or "mmlu" in data_name
):
tmp = re.findall(r"\b(A|B|C|D|E)\b", pred.upper())
if tmp:
pred = tmp[-1]
else:
pred = pred.strip().strip(".")
# multiple line
# pred = pred.split("\n")[0]
pred = re.sub(r"\n\s*", "", pred)
if pred != "" and pred[0] == ":":
pred = pred[1:]
if pred != "" and pred[-1] == ".":
pred = pred[:-1]
if pred != "" and pred[-1] == "/":
pred = pred[:-1]
pred = strip_string(pred, skip_unit=data_name in ["carp_en", "minerva_math"])
return pred
STRIP_EXCEPTIONS = ["carp_en", "minerva_math"]
def parse_ground_truth(example: Dict[str, Any], data_name):
if "gt_cot" in example and "gt" in example:
if data_name in ["math"]:
gt_ans = extract_answer(example["gt_cot"], data_name)
elif data_name in STRIP_EXCEPTIONS:
gt_ans = example["gt"]
else:
gt_ans = strip_string(example["gt"])
return example["gt_cot"], gt_ans
# parse ground truth
if data_name in ["math", "minerva_math"]:
gt_cot = example["solution"]
gt_ans = extract_answer(gt_cot, data_name)
elif data_name == "gsm8k":
gt_cot, gt_ans = example["answer"].split("####")
elif data_name == "svamp":
gt_cot, gt_ans = example["Equation"], example["Answer"]
elif data_name == "asdiv":
gt_cot = example["formula"]
gt_ans = re.sub(r"\(.*?\)", "", example["answer"])
elif data_name == "mawps":
gt_cot, gt_ans = None, example["target"]
elif data_name == "tabmwp":
gt_cot = example["solution"]
gt_ans = example["answer"]
if example["ans_type"] in ["integer_number", "decimal_number"]:
if "/" in gt_ans:
gt_ans = int(gt_ans.split("/")[0]) / int(gt_ans.split("/")[1])
elif "," in gt_ans:
gt_ans = float(gt_ans.replace(",", ""))
elif "%" in gt_ans:
gt_ans = float(gt_ans.split("%")[0]) / 100
else:
gt_ans = float(gt_ans)
elif data_name == "carp_en":
gt_cot, gt_ans = example["steps"], example["answer"]
elif data_name == "mmlu_stem":
abcd = "ABCD"
gt_cot, gt_ans = None, abcd[example["answer"]]
elif data_name == "sat_math":
gt_cot, gt_ans = None, example["Answer"]
elif data_name == "aqua":
gt_cot, gt_ans = None, example["correct"]
elif data_name in ["gaokao2023en", "college_math", "gaokao_math_cloze"]:
gt_cot, gt_ans = None, example["answer"].replace("$", "").strip()
elif data_name == "gaokao_math_qa":
gt_cot, gt_ans = None, example["label"]
elif data_name in ["gaokao2024_mix", "cn_middle_school"]:
if len(example["choice_answer"]) > 0:
gt_cot, gt_ans = None, example["choice_answer"]
else:
gt_cot, gt_ans = None, example["answer"]
elif data_name == "olympiadbench":
gt_cot, gt_ans = None, example["final_answer"][0].strip("$")
elif data_name in [
"aime24",
"amc23",
"cmath",
"gaokao2024_I",
"gaokao2024_II",
"imo2024",
]:
gt_cot, gt_ans = None, example["answer"]
else:
raise NotImplementedError(f"`{data_name}`")
# post process
gt_cot = str(gt_cot).strip()
if data_name not in STRIP_EXCEPTIONS:
gt_ans = strip_string(gt_ans, skip_unit=data_name == "carp_en")
else:
gt_ans = (
gt_ans.replace("\\neq", "\\ne")
.replace("\\leq", "\\le")
.replace("\\geq", "\\ge")
)
return gt_cot, gt_ans
def parse_question(example, data_name):
question = ""
if data_name == "asdiv":
question = f"{example['body'].strip()} {example['question'].strip()}"
elif data_name == "svamp":
body = example["Body"].strip()
if not body.endswith("."):
body = body + "."
question = f'{body} {example["Question"].strip()}'
elif data_name == "tabmwp":
title_str = (
f'regarding "{example["table_title"]}" ' if example["table_title"] else ""
)
question = f"Read the following table {title_str}and answer a question:\n"
question += f'{example["table"]}\n{example["question"]}'
if example["choices"]:
question += (
f' Please select from the following options: {example["choices"]}'
)
elif data_name == "carp_en":
question = example["content"]
elif data_name == "mmlu_stem":
options = example["choices"]
assert len(options) == 4
for i, (label, option) in enumerate(zip("ABCD", options)):
options[i] = f"({label}) {str(option).strip()}"
options = " ".join(options)
# question = f"{example['question'].strip()}\nWhat of the following is the right choice? Explain your answer.\n{options}"
question = f"{example['question'].strip()}\nAnswer Choices: {options}"
elif data_name == "sat_math":
options = example["options"].strip()
assert "A" == options[0]
options = "(" + options
for ch in "BCD":
if f" {ch}) " in options:
options = regex.sub(f" {ch}\) ", f" ({ch}) ", options)
# question = f"{example['question'].strip()}\nWhat of the following is the right choice? Explain your answer.\n{options.strip()}"
question = f"{example['question'].strip()}\nAnswer Choices: {options}"
elif "aqua" in data_name:
options = example["options"]
choice = "(" + "(".join(options)
choice = choice.replace("(", " (").replace(")", ") ").strip()
choice = "\nAnswer Choices: " + choice
question = example["question"].strip() + choice
elif data_name == "gaokao_math_qa":
options_dict = example["options"]
options = []
for key in options_dict:
options.append(f"({key}) {options_dict[key]}")
options = " ".join(options)
question = f"{example['question'].strip()}\n选项: {options}"
else:
for key in ["question", "problem", "Question", "input"]:
if key in example:
question = example[key]
break
# assert question != ""
# Yes or No question
_, gt_ans = parse_ground_truth(example, data_name)
if isinstance(gt_ans, str):
gt_lower = gt_ans.lower()
if gt_lower in ["true", "false"]:
question += " (True or False)"
if gt_lower in ["yes", "no"]:
question += " (Yes or No)"
return question.strip()
def run_execute(executor, result, prompt_type, data_name, execute=False):
if not result or result == "error":
return None, None
report = None
if "program_only" in prompt_type:
prediction = extract_program_output(result)
elif prompt_type in ["pot", "pal"] and execute:
code = extract_program(result)
prediction, report = executor.apply(code)
else:
prediction = extract_answer(result, data_name)
# prediction = strip_string(prediction, skip_unit=data_name == "carp_en")
prediction = strip_string(prediction, skip_unit=data_name in STRIP_EXCEPTIONS)
return prediction, report
# trajcectory.py
"""
trajcectory:
[
{"role": "rationale", "content": "..."},
{"role": "program", "content": "..."},
{"role": "output", "content": "..."},
{"role": "rationale", "content": "..."},
...
]
"""
def text_to_trajectory(traj_str: str) -> None:
"""
"""
# parse the above interleaved string of raionale, program, output, raionale, program, output, ...
# output a list of dict
trajectory = []
cur_role = "rationale"
cur_content = ""
# print(traj_str)
for i, line in enumerate(traj_str.split("\n")):
if line == "```python": # program begin
assert cur_role == "rationale"
if cur_content:
trajectory.append({"role": cur_role, "content": cur_content})
cur_content = ""
cur_role = "program"
elif cur_role == "program" and line == "```": # program end
assert cur_content
trajectory.append({"role": cur_role, "content": cur_content})
cur_content = ""
cur_role = "output"
elif cur_role == "output" and line.startswith("```output"): # output begin
assert cur_content == ""
elif cur_role == "output" and line == "```": # output end
trajectory.append({"role": cur_role, "content": cur_content})
cur_content = ""
cur_role = "rationale"
else: # content
cur_content += line
if i < len(traj_str.split("\n")) - 1:
cur_content += "\n"
# the last content
if cur_content:
trajectory.append({"role": cur_role, "content": cur_content})
return trajectory
def trajectory_to_text(trajectory: list) -> str:
text = ""
for item in trajectory:
content = item["content"]
if item["role"] == "program":
content = f"```python\n{content}```\n"
elif item["role"] == "output":
content = f"```output\n{content}```\n"
text += content
return text
def is_execution_success(output):
error_key_words = ["error", "exception", "no algorithms", "no algorithms", "cannot", "nan", "..."]
success = all([k not in output.lower() for k in error_key_words])
return success
def extract_program(text:str=None, trajectory:list=None, last_only=False) -> str:
assert text is not None or trajectory is not None, "Either text or trajectory should be provided."
if trajectory is None:
try:
trajectory = text_to_trajectory(text)
except:
return "raise ValueError('Invalid trajectory')"
program_list = []
import_lines = []
for i, item in enumerate(trajectory):
if item["role"] == "program":
cur_program = item["content"]
if i < len(trajectory) - 1:
assert trajectory[i+1]["role"] == "output"
output = trajectory[i+1]["content"].strip()
if is_execution_success(output):
program_list.append(cur_program)
else:
# extract import lines only
for line in cur_program.split("\n"):
if line.startswith("import") or line.startswith("from"):
import_lines.append(line)
else:
program_list.append(cur_program)
# add import lines to the first program
if len(program_list) == 0:
program_list.append("")
if len(import_lines) > 0:
program_list[0] = "\n".join(import_lines) + "\n" + program_list[0]
for i, program in enumerate(program_list[:-1]):
program_list[i] = "\n".join([line for line in program.split("\n") if not line.strip().startswith("print(")])
if last_only:
program = program_list[-1]
else:
program = "\n".join(program_list)
return program
def extract_program_output(pred_str, last_only=True):
"""
extract output between ```output\n...\n```, use regex, there might be multiple outputs, each output may have multiple lines
"""
outputs = re.findall(r"```output\n(.*?)\n```", pred_str, re.DOTALL)
if last_only:
return outputs[-1] if len(outputs) > 0 else ""
else:
return outputs
# grader.py
def choice_answer_clean(pred: str):
pred = pred.strip("\n").rstrip(".").rstrip("/").strip(" ").lstrip(":")
# Clean the answer based on the dataset
tmp = re.findall(r"\b(A|B|C|D|E)\b", pred.upper())
if tmp:
pred = tmp
else:
pred = [pred.strip().strip(".")]
pred = pred[-1]
# Remove the period at the end, again!
pred = pred.rstrip(".").rstrip("/")
return pred
def parse_digits(num):
num = regex.sub(",", "", str(num))
try:
return float(num)
except:
if num.endswith("%"):
num = num[:-1]
if num.endswith("\\"):
num = num[:-1]
try:
return float(num) / 100
except:
pass
return None
def is_digit(num):
# paired with parse_digits
return parse_digits(num) is not None
def str_to_pmatrix(input_str):
input_str = input_str.strip()
matrix_str = re.findall(r"\{.*,.*\}", input_str)
pmatrix_list = []
for m in matrix_str:
m = m.strip("{}")
pmatrix = r"\begin{pmatrix}" + m.replace(",", "\\") + r"\end{pmatrix}"
pmatrix_list.append(pmatrix)
return ", ".join(pmatrix_list)
def math_equal(
prediction: Union[bool, float, str],
reference: Union[float, str],
include_percentage: bool = True,
is_close: bool = True,
timeout: bool = False,
) -> bool:
"""
Exact match of math if and only if:
1. numerical equal: both can convert to float and are equal
2. symbolic equal: both can convert to sympy expression and are equal
"""
# print("Judge:", prediction, reference)
if prediction is None or reference is None:
return False
if str(prediction.strip().lower()) == str(reference.strip().lower()):
return True
if (
reference in ["A", "B", "C", "D", "E"]
and choice_answer_clean(prediction) == reference
):
return True
try: # 1. numerical equal
if is_digit(prediction) and is_digit(reference):
prediction = parse_digits(prediction)
reference = parse_digits(reference)
# number questions
if include_percentage:
gt_result = [reference / 100, reference, reference * 100]
else:
gt_result = [reference]
for item in gt_result:
try:
if is_close:
if numeric_equal(prediction, item):
return True
else:
if item == prediction:
return True
except Exception:
continue
return False
except:
pass
if not prediction and prediction not in [0, False]:
return False
# 2. symbolic equal
reference = str(reference).strip()
prediction = str(prediction).strip()
## pmatrix (amps)
if "pmatrix" in prediction and not "pmatrix" in reference:
reference = str_to_pmatrix(reference)
## deal with [], (), {}
pred_str, ref_str = prediction, reference
if (
prediction.startswith("[")
and prediction.endswith("]")
and not reference.startswith("(")
) or (
prediction.startswith("(")
and prediction.endswith(")")
and not reference.startswith("[")
):
pred_str = pred_str.strip("[]()")
ref_str = ref_str.strip("[]()")
for s in ["{", "}", "(", ")"]:
ref_str = ref_str.replace(s, "")
pred_str = pred_str.replace(s, "")
if pred_str.lower() == ref_str.lower():
return True
## [a, b] vs. [c, d], return a==c and b==d
if (
regex.match(r"(\(|\[).+(\)|\])", prediction) is not None
and regex.match(r"(\(|\[).+(\)|\])", reference) is not None
):
pred_parts = prediction[1:-1].split(",")
ref_parts = reference[1:-1].split(",")
if len(pred_parts) == len(ref_parts):
if all(
[
math_equal(
pred_parts[i], ref_parts[i], include_percentage, is_close
)
for i in range(len(pred_parts))
]
):
return True
if (
(
prediction.startswith("\\begin{pmatrix}")
or prediction.startswith("\\begin{bmatrix}")
)
and (
prediction.endswith("\\end{pmatrix}")
or prediction.endswith("\\end{bmatrix}")
)
and (
reference.startswith("\\begin{pmatrix}")
or reference.startswith("\\begin{bmatrix}")
)
and (
reference.endswith("\\end{pmatrix}") or reference.endswith("\\end{bmatrix}")
)
):
pred_lines = [
line.strip()
for line in prediction[
len("\\begin{pmatrix}") : -len("\\end{pmatrix}")
].split("\\\\")
if line.strip()
]
ref_lines = [
line.strip()
for line in reference[
len("\\begin{pmatrix}") : -len("\\end{pmatrix}")
].split("\\\\")
if line.strip()
]
matched = True
if len(pred_lines) == len(ref_lines):
for pred_line, ref_line in zip(pred_lines, ref_lines):
pred_parts = pred_line.split("&")
ref_parts = ref_line.split("&")
if len(pred_parts) == len(ref_parts):
if not all(
[
math_equal(
pred_parts[i],
ref_parts[i],
include_percentage,
is_close,
)
for i in range(len(pred_parts))
]
):
matched = False
break
else:
matched = False
if not matched:
break
else:
matched = False
if matched:
return True
if prediction.count("=") == 1 and reference.count("=") == 1:
pred = prediction.split("=")
pred = f"{pred[0].strip()} - ({pred[1].strip()})"
ref = reference.split("=")
ref = f"{ref[0].strip()} - ({ref[1].strip()})"
if symbolic_equal(pred, ref) or symbolic_equal(f"-({pred})", ref):
return True
elif (
prediction.count("=") == 1
and len(prediction.split("=")[0].strip()) <= 2
and "=" not in reference
):
if math_equal(
prediction.split("=")[1], reference, include_percentage, is_close
):
return True
elif (
reference.count("=") == 1
and len(reference.split("=")[0].strip()) <= 2
and "=" not in prediction
):
if math_equal(
prediction, reference.split("=")[1], include_percentage, is_close
):
return True
# symbolic equal with sympy
if timeout:
if call_with_timeout(symbolic_equal_process, prediction, reference):
return True
else:
if symbolic_equal(prediction, reference):
return True
return False
def math_equal_process(param):
return math_equal(param[-2], param[-1])
def numeric_equal(prediction: float, reference: float):
# Note that relative tolerance has significant impact
# on the result of the synthesized GSM-Hard dataset
# if reference.is_integer():
# return isclose(reference, round(prediction), abs_tol=1e-4)
# else:
# prediction = round(prediction, len(str(reference).split(".")[-1]))
return isclose(reference, prediction, rel_tol=1e-4)
def symbolic_equal(a, b):
def _parse(s):
for f in [parse_latex, parse_expr, latex2sympy]:
try:
return f(s.replace("\\\\", "\\"))
except:
try:
return f(s)
except:
pass
return s
a = _parse(a)
b = _parse(b)
# direct equal
try:
if str(a) == str(b) or a == b:
return True
except:
pass
# simplify equal
try:
if a.equals(b) or simplify(a - b) == 0:
return True
except:
pass
# equation equal
try:
if (abs(a.lhs - a.rhs)).equals(abs(b.lhs - b.rhs)):
return True
except:
pass
try:
if numeric_equal(float(N(a)), float(N(b))):
return True
except:
pass
# matrix
try:
# if a and b are matrix
if a.shape == b.shape:
_a = a.applyfunc(lambda x: round(x, 3))
_b = b.applyfunc(lambda x: round(x, 3))
if _a.equals(_b):
return True
except:
pass
return False
def symbolic_equal_process(a, b, output_queue):
result = symbolic_equal(a, b)
output_queue.put(result)
def call_with_timeout(func, *args, timeout=1, **kwargs):
output_queue = multiprocessing.Queue()
process_args = args + (output_queue,)
process = multiprocessing.Process(target=func, args=process_args, kwargs=kwargs)
process.start()
process.join(timeout)
if process.is_alive():
process.terminate()
process.join()
return False
return output_queue.get()
# evaluate.py
def evaluate(data_name, prompt_type, samples: list=None, file_path: str=None, max_num_samples=None, execute=False):
assert samples or file_path, "samples or file_path must be provided"
if not samples:
samples = list(load_jsonl(file_path))
if 'idx' in samples[0]:
samples = {sample['idx']: sample for sample in samples}.values()
samples = sorted(samples, key=lambda x: x['idx'])
else:
samples = [dict(idx=idx, **sample) for idx, sample in enumerate(samples)]
if max_num_samples:
print(f"max_num_samples: {max_num_samples} / {len(samples)}")
samples = samples[:max_num_samples]
# parse gt
for sample in samples:
sample['gt_cot'], sample['gt'] = parse_ground_truth(sample, data_name)
params = [(idx, pred, sample['gt']) for idx, sample in enumerate(samples) for pred in sample['pred']]
scores = []
timeout_cnt = 0
with ProcessPool(max_workers=16) as pool:
future = pool.map(math_equal_process, params, timeout=10)
iterator = future.result()
with tqdm(total=len(samples), desc="Evaluate") as progress_bar:
while True:
try:
result = next(iterator)
scores.append(result)
except StopIteration:
break
except TimeoutError as error:
print(error)
scores.append(False)
timeout_cnt += 1
except Exception as error:
print(error.traceback)
exit()
progress_bar.update(1)
idx = 0
score_mat = []
for sample in samples:
sample['score'] = scores[idx: idx+len(sample['pred'])]
assert len(sample['score']) == len(sample['pred'])
score_mat.append(sample['score'])
idx += len(sample['pred'])
max_len = max([len(s) for s in score_mat])
for i, s in enumerate(score_mat):
if len(s) < max_len:
score_mat[i] = s + [s[-1]] * (max_len - len(s)) # pad
# output mean of each column of scores
col_means= np.array(score_mat).mean(axis=0)
mean_score = list(np.round(col_means * 100, decimals=1))
result_json = {
"num_samples": len(samples),
"num_scores": len(scores),
"timeout_samples": timeout_cnt,
"empty_samples": len([s for s in samples if not s['pred'][-1]]),
"acc": mean_score[0]
}
# each type score
if "type" in samples[0]:
type_scores = {}
for sample in samples:
if sample['type'] not in type_scores:
type_scores[sample['type']] = []
type_scores[sample['type']].append(sample['score'][-1])
type_scores = {k: np.round(np.array(v).mean() * 100, decimals=1) for k, v in type_scores.items()}
type_scores = {k: v for k, v in sorted(type_scores.items(), key=lambda item: item[0])}
result_json['type_acc'] = type_scores
return samples, result_json
# python_executor.py
class GenericRuntime:
GLOBAL_DICT = {}
LOCAL_DICT = None
HEADERS = []
def __init__(self):
self._global_vars = copy.copy(self.GLOBAL_DICT)
self._local_vars = copy.copy(self.LOCAL_DICT) if self.LOCAL_DICT else None
for c in self.HEADERS:
self.exec_code(c)
def exec_code(self, code_piece: str) -> None:
if regex.search(r'(\s|^)?input\(', code_piece):
# regex.search(r'(\s|^)?os.', code_piece):
raise RuntimeError()
exec(code_piece, self._global_vars)
# TODO: use: https://github.com/shroominic/codebox-api
# @high safe exec in sandbox
# byte_code = compile_restricted(
# code_piece,
# filename='<inline code>',
# mode='exec'
# )
# print("global vars:", self._global_vars)
# _print_ = PrintCollector
# exec(byte_code, {'__builtins__': utility_builtins}, None)
def eval_code(self, expr: str) -> Any:
return eval(expr, self._global_vars)
def inject(self, var_dict: Dict[str, Any]) -> None:
for k, v in var_dict.items():
self._global_vars[k] = v
@property
def answer(self):
return self._global_vars['answer']
class DateRuntime(GenericRuntime):
GLOBAL_DICT = {
'datetime': datetime.datetime,
'timedelta': dateutil.relativedelta.relativedelta,
'relativedelta': dateutil.relativedelta.relativedelta
}
class CustomDict(dict):
def __iter__(self):
return list(super().__iter__()).__iter__()
class ColorObjectRuntime(GenericRuntime):
GLOBAL_DICT = {'dict': CustomDict}
class PythonExecutor:
def __init__(
self,
runtime: Optional[Any] = None,
get_answer_symbol: Optional[str] = None,
get_answer_expr: Optional[str] = None,
get_answer_from_stdout: bool = False,
timeout_length: int = 5,
) -> None:
self.runtime = runtime if runtime else GenericRuntime()
self.answer_symbol = get_answer_symbol
self.answer_expr = get_answer_expr
self.get_answer_from_stdout = get_answer_from_stdout
self.pool = Pool(multiprocess.cpu_count())
self.timeout_length = timeout_length
def process_generation_to_code(self, gens: str):
return [g.strip().split('\n') for g in gens]
@staticmethod
def execute(
code,
get_answer_from_stdout = None,
runtime = None,
answer_symbol = None,
answer_expr = None,
timeout_length = 10,
auto_mode=False
):
try:
if auto_mode:
if "print(" in code[-1]:
program_io = io.StringIO()
with redirect_stdout(program_io):
timeout(timeout_length)(runtime.exec_code)('\n'.join(code))
program_io.seek(0)
result = program_io.read()
else:
print(code)
timeout(timeout_length)(runtime.exec_code)('\n'.join(code[:-1]))
result = timeout(timeout_length)(runtime.eval_code)(code[-1])
else:
if get_answer_from_stdout:
program_io = io.StringIO()
with redirect_stdout(program_io):
timeout(timeout_length)(runtime.exec_code)('\n'.join(code))
program_io.seek(0)
result = program_io.read()
elif answer_symbol:
timeout(timeout_length)(runtime.exec_code)('\n'.join(code))
result = runtime._global_vars[answer_symbol]
elif answer_expr:
timeout(timeout_length)(runtime.exec_code)('\n'.join(code))
result = timeout(timeout_length)(runtime.eval_code)(answer_expr)
else:
timeout(timeout_length)(runtime.exec_code)('\n'.join(code[:-1]))
result = timeout(timeout_length)(runtime.eval_code)(code[-1])
report = "Done"
str(result)
pickle.dumps(result) # serialization check
except:
result = ''
report = traceback.format_exc().split('\n')[-2]
return result, report
def apply(self, code):
return self.batch_apply([code])[0]
@staticmethod
def truncate(s, max_length=400):
half = max_length // 2
if len(s) > max_length:
s = s[:half] + "..." + s[-half:]
return s
def batch_apply(self, batch_code):
all_code_snippets = self.process_generation_to_code(batch_code)
timeout_cnt = 0
all_exec_results = []
# with ProcessPool(max_workers=min(len(all_code_snippets), os.cpu_count())) as pool:
with ProcessPool(max_workers=min(len(all_code_snippets), 1)) as pool:
executor = partial(
self.execute,
get_answer_from_stdout=self.get_answer_from_stdout,
runtime=self.runtime,
answer_symbol=self.answer_symbol,
answer_expr=self.answer_expr,
timeout_length=self.timeout_length, # this timeout not work
auto_mode=True
)
future = pool.map(executor, all_code_snippets, timeout=self.timeout_length)
iterator = future.result()
if len(all_code_snippets) > 100:
progress_bar = tqdm(total=len(all_code_snippets), desc="Execute")
else:
progress_bar = None
while True:
try:
result = next(iterator)
all_exec_results.append(result)
except StopIteration:
break
except TimeoutError as error:
print(error)
all_exec_results.append(("", "Timeout Error"))
timeout_cnt += 1
except Exception as error:
print(error)
exit()
if progress_bar is not None:
progress_bar.update(1)
if progress_bar is not None:
progress_bar.close()
batch_results = []
for code, (res, report) in zip(all_code_snippets, all_exec_results):
# post processing
res, report = str(res).strip(), str(report).strip()
res, report = self.truncate(res), self.truncate(report)
batch_results.append((res, report))
return batch_results | {
"repo_id": "microsoft/unilm",
"file_path": "ReSA/scripts/math_utils.py",
"license": "MIT License",
"lines": 1385,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mingrammer/diagrams:diagrams/cli.py | import argparse
import sys
def run() -> int:
"""
Run diagrams code files in a diagrams environment.
Args:
paths: A list of paths to Python files containing diagrams code.
Returns:
The exit code.
"""
parser = argparse.ArgumentParser(
description="Run diagrams code files in a diagrams environment.",
)
parser.add_argument(
"paths",
metavar="path",
type=str,
nargs="+",
help="a Python file containing diagrams code",
)
args = parser.parse_args()
for path in args.paths:
with open(path, encoding='utf-8') as f:
exec(f.read())
return 0
def main():
sys.exit(run())
if __name__ == "__main__":
main()
| {
"repo_id": "mingrammer/diagrams",
"file_path": "diagrams/cli.py",
"license": "MIT License",
"lines": 29,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mitmproxy/mitmproxy:mitmproxy/contentviews/_view_zip.py | import io
import zipfile
from mitmproxy.contentviews._api import Contentview
from mitmproxy.contentviews._api import Metadata
from mitmproxy.contentviews._utils import yaml_dumps
class ZipContentview(Contentview):
name = "ZIP Archive"
syntax_highlight = "yaml"
def prettify(self, data: bytes, metadata: Metadata) -> str:
with zipfile.ZipFile(io.BytesIO(data), "r") as zip_file:
filenames = [info.filename for info in zip_file.infolist()]
return yaml_dumps(filenames) if filenames else "(empty zip file)"
def render_priority(self, data: bytes, metadata: Metadata) -> float:
return 1.0 if data and metadata.content_type == "application/zip" else 0
zip = ZipContentview()
| {
"repo_id": "mitmproxy/mitmproxy",
"file_path": "mitmproxy/contentviews/_view_zip.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mitmproxy/mitmproxy:test/mitmproxy/contentviews/test__view_zip.py | import io
import zipfile
from mitmproxy import http
from mitmproxy.contentviews import Metadata
from mitmproxy.contentviews._view_zip import zip
def meta(content_type: str) -> Metadata:
return Metadata(
content_type=content_type.split(";")[0],
http_message=http.Request.make(
"POST", "https://example.com/", headers={"content-type": content_type}
),
)
def test_view_zip():
buffer = io.BytesIO()
with zipfile.ZipFile(buffer, "w") as zf:
for name in [
"normal.txt",
"with spaces.txt",
"dir/nested.txt",
"file\nwith\nnewlines.txt",
"unicode_文件.txt",
"café.txt",
]:
zf.writestr(name, b"content")
result = zip.prettify(buffer.getvalue(), meta("application/zip"))
for name in [
"normal.txt",
"with spaces.txt",
"dir/nested.txt",
"newlines",
"文件",
"café",
]:
assert name in result
assert zip.syntax_highlight == "yaml"
def test_view_zip_empty():
buffer = io.BytesIO()
with zipfile.ZipFile(buffer, "w"):
pass
assert (
zip.prettify(buffer.getvalue(), meta("application/zip")) == "(empty zip file)"
)
def test_render_priority():
assert zip.render_priority(b"data", Metadata(content_type="application/zip")) == 1.0
assert zip.render_priority(b"data", Metadata(content_type="text/plain")) == 0
assert zip.render_priority(b"", Metadata(content_type="application/zip")) == 0
| {
"repo_id": "mitmproxy/mitmproxy",
"file_path": "test/mitmproxy/contentviews/test__view_zip.py",
"license": "MIT License",
"lines": 46,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mitmproxy/mitmproxy:examples/addons/dns-simple.py | """
Spoof DNS responses.
In this example, we fiddle with IPv6 (AAAA) records:
- For example.com, `::1` is returned.
(domain is hosted on localhost)
- For example.org, an NXDOMAIN error is returned.
(domain does not exist)
- For all other domains, return a non-error response without any records.
(domain exists, but has no IPv6 configured)
"""
import ipaddress
import logging
from mitmproxy import dns
def dns_request(flow: dns.DNSFlow) -> None:
q = flow.request.question
if q and q.type == dns.types.AAAA:
logging.info(f"Spoofing IPv6 records for {q.name}...")
if q.name == "example.com":
flow.response = flow.request.succeed(
[
dns.ResourceRecord(
name="example.com",
type=dns.types.AAAA,
class_=dns.classes.IN,
ttl=dns.ResourceRecord.DEFAULT_TTL,
data=ipaddress.ip_address("::1").packed,
)
]
)
elif q.name == "example.org":
flow.response = flow.request.fail(dns.response_codes.NXDOMAIN)
else:
flow.response = flow.request.succeed([])
| {
"repo_id": "mitmproxy/mitmproxy",
"file_path": "examples/addons/dns-simple.py",
"license": "MIT License",
"lines": 33,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mitmproxy/mitmproxy:mitmproxy/utils/htpasswd.py | """
A standalone, minimal htpasswd parser.
This implementation currently supports bcrypt and SHA1 passwords. SHA1 is insecure.
"""
from __future__ import annotations
import base64
import hashlib
from pathlib import Path
import bcrypt
class HtpasswdFile:
def __init__(self, content: str):
"""
Create a HtpasswdFile from a string.
"""
self.users: dict[str, str] = {}
for line in content.splitlines():
line = line.strip()
if not line or line.startswith("#"):
continue
if ":" not in line:
raise ValueError(f"Malformed htpasswd line: {line!r}")
user, pwhash = line.split(":", 1)
if not user:
raise ValueError(f"Malformed htpasswd line: {line!r}")
is_sha = pwhash.startswith("{SHA}")
is_bcrypt = pwhash.startswith(("$2y$", "$2b$", "$2a$"))
if not is_sha and not is_bcrypt:
raise ValueError(f"Unsupported htpasswd format for user {user!r}")
self.users[user] = pwhash
@classmethod
def from_file(cls, path: Path) -> HtpasswdFile:
"""
Initializes and loads an htpasswd file.
Args:
path: The path to the htpasswd file.
Raises:
OSError: If the file cannot be read.
ValueError: If the file is malformed.
"""
try:
content = path.read_text("utf-8")
except FileNotFoundError:
raise OSError(f"Htpasswd file not found: {path}") from None
return cls(content)
def check_password(self, username: str, password: str) -> bool:
"""
Checks if a username and password combination is valid.
Args:
username: The username to check.
password: The password to check.
Returns:
True if the password is valid, False otherwise.
"""
pwhash = self.users.get(username)
if pwhash is None:
return False
pwhash = pwhash.split(":", 1)[0]
if pwhash.startswith("{SHA}"):
# Apache's {SHA} is base64-encoded SHA-1.
# https://httpd.apache.org/docs/2.4/misc/password_encryptions.html
digest = hashlib.sha1(password.encode("utf-8")).digest()
expected = base64.b64encode(digest).decode("ascii")
return pwhash[5:] == expected
else: # pwhash.startswith(("$2y$", "$2b$", "$2a$")):
return bcrypt.checkpw(password.encode("utf-8"), pwhash.encode("utf-8"))
| {
"repo_id": "mitmproxy/mitmproxy",
"file_path": "mitmproxy/utils/htpasswd.py",
"license": "MIT License",
"lines": 65,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mitmproxy/mitmproxy:test/mitmproxy/utils/test_htpasswd.py | from pathlib import Path
import pytest
from mitmproxy.utils import htpasswd
def test_sha1():
ht = htpasswd.HtpasswdFile(
"user1:{SHA}8FePHnF0saQcTqjG4X96ijuIySo=\n"
"user2:{SHA}i+UhJqb95FCnFio2UdWJu1HpV50=\n"
"user3:{SHA}3ipNV1GrBtxPmHFC21fCbVCSXIo=:extra\n"
)
assert ht.check_password("user1", "pass1")
assert ht.check_password("user2", "pass2")
assert ht.check_password("user3", "pass3")
assert not ht.check_password("user1", "pass2")
assert not ht.check_password("wronguser", "testpassword")
def test_bcrypt():
ht = htpasswd.HtpasswdFile(
"user_bcrypt:$2b$05$opH8g9/PUhK6HVSnhdX7P.oB6MTMOlIlXgb4THm1Adh12t4IuqMsK\n"
)
assert ht.check_password("user_bcrypt", "pass")
assert not ht.check_password("user_bcrypt", "wrong")
@pytest.mark.parametrize(
"file_content, err_msg",
[
("malformed", "Malformed htpasswd line"),
(":malformed", "Malformed htpasswd line"),
("user_md5:$apr1$....", "Unsupported htpasswd format"),
("user_ssha:{SSHA}...", "Unsupported htpasswd format"),
("user_plain:pass", "Unsupported htpasswd format"),
("user_crypt:..j8N8I28nVM", "Unsupported htpasswd format"),
("user_empty_pw:", "Unsupported htpasswd format"),
],
)
def test_errors(file_content, err_msg):
with pytest.raises(ValueError, match=err_msg):
htpasswd.HtpasswdFile(file_content)
def test_from_file(tdata):
assert htpasswd.HtpasswdFile.from_file(
Path(tdata.path("mitmproxy/data/htpasswd"))
).users
def test_file_not_found():
with pytest.raises(OSError, match="Htpasswd file not found"):
htpasswd.HtpasswdFile.from_file(Path("/nonexistent"))
def test_empty_and_comments():
ht = htpasswd.HtpasswdFile("\n# comment\n \n\t# another comment\n")
assert not ht.users
| {
"repo_id": "mitmproxy/mitmproxy",
"file_path": "test/mitmproxy/utils/test_htpasswd.py",
"license": "MIT License",
"lines": 45,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mitmproxy/mitmproxy:examples/contrib/portfile.py | import json
import pathlib
from typing import Optional
from mitmproxy import ctx
class PortFile:
def load(self, loader):
loader.add_option(
name="datadir",
typespec=Optional[str],
default=None,
help="Creates `portfile` mapping proxies (by mode spec) to the port "
"they use in the provided directory.",
)
def running(self):
if not ctx.options.datadir:
return
datadir = pathlib.Path(ctx.options.datadir)
if not datadir.is_dir():
ctx.log.warning("%s is not a directory", datadir)
return
proxies = ctx.master.addons.get("proxyserver")
modemap = {
instance.mode.full_spec: addr[1]
for instance in proxies.servers
# assumes all listen_addrs of a given instance are bound
# to the same port, but as far as I can tell mitmproxy
# works very hard to try and make it so
if (addr := next(iter(instance.listen_addrs), None))
}
with datadir.joinpath("portfile").open("w", encoding="utf-8") as fp:
json.dump(modemap, fp)
addons = [PortFile()]
| {
"repo_id": "mitmproxy/mitmproxy",
"file_path": "examples/contrib/portfile.py",
"license": "MIT License",
"lines": 32,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlc-ai/mlc-llm:python/mlc_llm/loader/standard_loader.py | """Standard HuggingFace loader mapping helpers."""
from __future__ import annotations
import functools
from typing import Callable, Iterable, Optional, Sequence, Type
import numpy as np
from tvm.relax.frontend import nn # type: ignore[import]
from mlc_llm.loader import ExternMapping
from mlc_llm.quantization import Quantization
NameTransform = Callable[[str], str]
ExportSpecGetter = Callable[[nn.Module], object]
def _default_export_spec(model: nn.Module) -> object:
return model.get_default_spec()
def make_standard_hf_loader( # pylint: disable=too-many-arguments,too-many-locals
*,
model_cls: Type[nn.Module],
layer_prefix: str = "model.layers",
qkv_names: Sequence[str] = ("q_proj", "k_proj", "v_proj"),
qkv_concat_axis: int = 0,
qkv_target_name: str = "qkv_proj",
add_qkv_bias: bool = False,
qkv_bias_optional: bool = False,
gate_up_names: Sequence[str] = ("gate_proj", "up_proj"),
gate_up_concat_axis: int = 0,
gate_up_target_name: str = "gate_up_proj",
include_qkv: bool = True,
include_gate_up: bool = True,
add_unused: Optional[Iterable[str]] = None,
hf_prefix: str = "model.",
name_transform: Optional[NameTransform] = None,
export_spec_getter: Optional[ExportSpecGetter] = None,
num_layers_getter: Optional[Callable[[object], int]] = None,
) -> Callable[[object, Quantization], ExternMapping]:
"""Create a standard loader for HuggingFace weights.
This handles the common QKV concatenation, gate+up concatenation, optional
QKV bias mapping, and passes through remaining parameters 1:1.
"""
if not qkv_names:
include_qkv = False
if not gate_up_names:
include_gate_up = False
if not include_qkv:
qkv_names = ()
if not include_gate_up:
gate_up_names = ()
def _default_name_transform(name: str) -> str:
# When hf_prefix is empty, strip the "model." prefix so models that
# expose bare top-level weights (no "model." namespace) still load.
if hf_prefix == "":
return name[6:] if name.startswith("model.") else name
return name
name_transform_fn = name_transform or _default_name_transform
spec_getter = export_spec_getter or _default_export_spec
unused_names = tuple(add_unused or ())
def huggingface( # pylint: disable=too-many-locals,too-many-branches
model_config: object,
quantization: Quantization,
) -> ExternMapping:
model = model_cls(model_config)
if quantization is not None:
model.to(quantization.model_dtype)
_, _named_params, _ = model.export_tvm( # type: ignore[misc]
spec=spec_getter(model),
allow_extern=True,
)
named_parameters = dict(_named_params)
mapping = ExternMapping()
if include_qkv or include_gate_up or unused_names:
if num_layers_getter is None:
num_layers = model_config.num_hidden_layers # type: ignore[attr-defined]
else:
num_layers = num_layers_getter(model_config)
for i in range(num_layers):
attn = f"{layer_prefix}.{i}.self_attn"
if include_qkv:
mlc_qkv_name = f"{attn}.{qkv_target_name}.weight"
mlc_param = named_parameters[mlc_qkv_name]
mapping.add_mapping(
mlc_qkv_name,
[name_transform_fn(f"{attn}.{name}.weight") for name in qkv_names],
functools.partial(
lambda q, k, v, dtype: np.concatenate(
[q, k, v], axis=qkv_concat_axis
).astype(dtype),
dtype=mlc_param.dtype,
),
)
if add_qkv_bias:
mlc_bias_name = f"{attn}.{qkv_target_name}.bias"
if (not qkv_bias_optional) or mlc_bias_name in named_parameters:
mlc_param = named_parameters[mlc_bias_name]
mapping.add_mapping(
mlc_bias_name,
[name_transform_fn(f"{attn}.{name}.bias") for name in qkv_names],
functools.partial(
lambda q, k, v, dtype: np.concatenate(
[q, k, v], axis=qkv_concat_axis
).astype(dtype),
dtype=mlc_param.dtype,
),
)
if include_gate_up:
mlp = f"{layer_prefix}.{i}.mlp"
mlc_gate_up_name = f"{mlp}.{gate_up_target_name}.weight"
if gate_up_names:
mlc_param = named_parameters[mlc_gate_up_name]
mapping.add_mapping(
mlc_gate_up_name,
[name_transform_fn(f"{mlp}.{name}.weight") for name in gate_up_names],
functools.partial(
lambda gate, up, dtype: np.concatenate(
[gate, up], axis=gate_up_concat_axis
).astype(dtype),
dtype=mlc_param.dtype,
),
)
for unused_name in unused_names:
mapping.add_unused(name_transform_fn(f"{attn}.{unused_name}"))
for mlc_name, mlc_param in named_parameters.items():
if mlc_name not in mapping.param_map:
mapping.add_mapping(
mlc_name,
[name_transform_fn(mlc_name)],
functools.partial(
lambda x, dtype: x.astype(dtype),
dtype=mlc_param.dtype,
),
)
return mapping
return huggingface
| {
"repo_id": "mlc-ai/mlc-llm",
"file_path": "python/mlc_llm/loader/standard_loader.py",
"license": "Apache License 2.0",
"lines": 129,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlc-ai/mlc-llm:python/mlc_llm/quantization/model_quantization.py | """Quantization factory utilities for model quantization."""
from typing import Any, Callable, Dict, Optional, Tuple, Type
from tvm.relax.frontend import nn
from mlc_llm.loader import QuantizeMapping
from .awq_quantization import AWQQuantize
from .block_scale_quantization import BlockScaleQuantize
from .ft_quantization import FTQuantize
from .group_quantization import GroupQuantize
from .no_quantization import NoQuantize
from .per_tensor_quantization import PerTensorQuantize
from .quantization import Quantization
FuncQuantization = Callable[[Any, Quantization], Tuple[nn.Module, QuantizeMapping]]
def make_quantization_functions( # pylint: disable=too-many-arguments, too-many-locals
model_cls: Type[nn.Module],
*,
model_ctor: Optional[Callable[[Any], nn.Module]] = None,
supports_group_quant: bool = True,
supports_ft_quant: bool = True,
supports_awq: bool = False,
awq_unsupported_message: Optional[str] = None,
supports_per_tensor: bool = False,
supports_block_scale: bool = False,
set_tensor_parallel_shards: bool = True,
per_tensor_use_shards: bool = True,
) -> Dict[str, FuncQuantization]:
"""Create standard quantization function implementations for a model class."""
def _create_model(model_config: Any) -> nn.Module:
if model_ctor is not None:
return model_ctor(model_config)
return model_cls(model_config)
def _no_quant(model_config: Any, quantization: NoQuantize) -> Tuple[nn.Module, QuantizeMapping]:
model = _create_model(model_config)
model.to(quantization.model_dtype)
return model, QuantizeMapping({}, {})
def _group_quant(
model_config: Any,
quantization: GroupQuantize,
) -> Tuple[nn.Module, QuantizeMapping]:
model = _create_model(model_config)
model.to(quantization.model_dtype)
quant_map = QuantizeMapping({}, {})
if set_tensor_parallel_shards:
if not hasattr(model_config, "tensor_parallel_shards"):
raise AttributeError(
"model_config is missing required "
"attribute 'tensor_parallel_shards' for group quantization"
)
quantization.tensor_parallel_shards = getattr(model_config, "tensor_parallel_shards")
model = quantization.quantize_model(
model,
quant_map,
"",
)
return model, quant_map
def _ft_quant(model_config: Any, quantization: FTQuantize) -> Tuple[nn.Module, QuantizeMapping]:
model = _create_model(model_config)
model.to(quantization.model_dtype)
quant_map = QuantizeMapping({}, {})
model = quantization.quantize_model(
model,
quant_map,
"",
)
return model, quant_map
def _awq_quant(
model_config: Any, quantization: AWQQuantize
) -> Tuple[nn.Module, QuantizeMapping]:
if awq_unsupported_message is not None:
raise NotImplementedError(awq_unsupported_message)
model = _create_model(model_config)
model.to(quantization.model_dtype)
quant_map = QuantizeMapping({}, {})
model = quantization.quantize_model(
model,
quant_map,
"",
)
return model, quant_map
def _per_tensor_quant(
model_config: Any,
quantization: PerTensorQuantize,
) -> Tuple[nn.Module, QuantizeMapping]:
model = _create_model(model_config)
model.to(quantization.model_dtype)
quant_map = QuantizeMapping({}, {})
kwargs = {}
if per_tensor_use_shards:
if not hasattr(model_config, "tensor_parallel_shards"):
raise AttributeError(
"model_config is missing required attribute "
"'tensor_parallel_shards' for per-tensor quantization"
)
kwargs["tensor_parallel_shards"] = getattr(model_config, "tensor_parallel_shards")
model = quantization.quantize_model(
model,
quant_map,
"",
**kwargs,
)
return model, quant_map
def _block_scale_quant(
model_config: Any,
quantization: BlockScaleQuantize,
) -> Tuple[nn.Module, QuantizeMapping]:
model = _create_model(model_config)
model.to(quantization.model_dtype)
quant_map = QuantizeMapping({}, {})
model = quantization.quantize_model(model, quant_map, "")
return model, quant_map
quantize_fns: Dict[str, FuncQuantization] = {"no-quant": _no_quant}
if supports_group_quant:
quantize_fns["group-quant"] = _group_quant
if supports_ft_quant:
quantize_fns["ft-quant"] = _ft_quant
if supports_awq:
quantize_fns["awq"] = _awq_quant
if supports_per_tensor:
quantize_fns["per-tensor-quant"] = _per_tensor_quant
if supports_block_scale:
quantize_fns["block-scale-quant"] = _block_scale_quant
return quantize_fns
def make_awq_quant(
model_cls: Type[nn.Module],
) -> Callable[[Any, AWQQuantize], Tuple[nn.Module, QuantizeMapping]]:
"""Create a standard AWQ quantization function for loaders."""
def awq_quant(
model_config: Any, quantization: AWQQuantize
) -> Tuple[nn.Module, QuantizeMapping]:
model = model_cls(model_config)
model.to(quantization.model_dtype)
quant_map = QuantizeMapping({}, {})
model = quantization.quantize_model(
model,
quant_map,
"",
)
return model, quant_map
return awq_quant
| {
"repo_id": "mlc-ai/mlc-llm",
"file_path": "python/mlc_llm/quantization/model_quantization.py",
"license": "Apache License 2.0",
"lines": 138,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlc-ai/mlc-llm:python/mlc_llm/bench/evaluation/mmlu.py | """Eval MMLU with MLCEngine."""
import argparse
import asyncio
import csv
import json
import string
from datetime import datetime
from pathlib import Path
from typing import Any, Dict, List, Optional
import numpy as np
import tqdm
from mlc_llm import AsyncMLCEngine
SUBJECTS = [
"abstract_algebra",
"anatomy",
"astronomy",
"business_ethics",
"clinical_knowledge",
"college_biology",
"college_chemistry",
"college_computer_science",
"college_mathematics",
"college_medicine",
"college_physics",
"computer_security",
"conceptual_physics",
"econometrics",
"electrical_engineering",
"elementary_mathematics",
"formal_logic",
"global_facts",
"high_school_biology",
"high_school_chemistry",
"high_school_computer_science",
"high_school_european_history",
"high_school_geography",
"high_school_government_and_politics",
"high_school_macroeconomics",
"high_school_mathematics",
"high_school_microeconomics",
"high_school_physics",
"high_school_psychology",
"high_school_statistics",
"high_school_us_history",
"high_school_world_history",
"human_aging",
"human_sexuality",
"international_law",
"jurisprudence",
"logical_fallacies",
"machine_learning",
"management",
"marketing",
"medical_genetics",
"miscellaneous",
"moral_disputes",
"moral_scenarios",
"nutrition",
"philosophy",
"prehistory",
"professional_accounting",
"professional_law",
"professional_medicine",
"professional_psychology",
"public_relations",
"security_studies",
"sociology",
"us_foreign_policy",
"virology",
"world_religions",
]
PADDING_LEN = max(len(subject) for subject in SUBJECTS)
DEVICES = ["cuda", "rocm", "metal", "vulkan"]
PROMPT_TEMPLATE = string.Template("$Q\nA. $A\nB. $B\nC. $C\nD. $D\nAnswer:")
def parse_args():
"""Parse command line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument("--model", type=str, required=True)
parser.add_argument(
"--dataset", type=Path, required=True, help="Path to MMLU test dataset home."
)
parser.add_argument("--device", type=str, choices=["auto"] + DEVICES, default="auto")
parser.add_argument("--model-lib", type=str, default=None)
parser.add_argument("-s", "--subject", nargs="+", type=str, choices=SUBJECTS, default=SUBJECTS)
parser.add_argument("-bs", "--batch-size", type=int, default=16)
parser.add_argument("--log-dir", type=Path, default=None)
return parser.parse_args()
async def send_request(
async_engine: AsyncMLCEngine,
prompts: List[str],
semaphore: asyncio.Semaphore,
subject: str,
):
"""Send the calibration requests to the engine."""
tasks = []
async def generate_task(prompt):
async with semaphore:
return await async_engine.completions.create(
prompt=prompt,
stream=False,
max_tokens=1,
temperature=1.0,
logprobs=True,
top_logprobs=5,
)
for prompt in prompts:
task = asyncio.create_task(generate_task(prompt))
tasks.append(task)
return await tqdm.asyncio.tqdm.gather(
*tasks,
desc=f"Running {subject.ljust(PADDING_LEN)}",
bar_format="{desc} {percentage:3.0f}%|{bar}{r_bar}",
)
async def evaluate( # pylint: disable=too-many-arguments, too-many-locals
model: str,
device: str,
dataset: Path,
model_lib: Optional[str],
subjects: List[str],
semaphore: asyncio.Semaphore,
log_dir: Optional[Path], # pylint: disable=redefined-outer-name
):
"""Evaluate MMLU for the model."""
async_engine = AsyncMLCEngine(model, device=device, model_lib=model_lib, mode="server")
results: Dict[str, Any] = {}
for subject in subjects:
with open(dataset / "test" / f"{subject}_test.csv", encoding="utf-8") as csvfile:
tests = list(csv.reader(csvfile, delimiter=",", quotechar='"'))
assert all(len(test) == 6 for test in tests)
logs = []
num_correct = 0
prompts = [
PROMPT_TEMPLATE.substitute(Q=test[0], A=test[1], B=test[2], C=test[3], D=test[4])
for test in tests
]
responses = await send_request(async_engine, prompts, semaphore, subject)
assert len(responses) == len(tests)
for response, test in zip(responses, tests):
token_logprobs = {}
logprobs = response.choices[0].logprobs.content[0].top_logprobs
for logprob in logprobs:
if logprob.token not in token_logprobs:
token_logprobs[logprob.token] = logprob.logprob
abcd_logprobs = {}
for choice in ["A", "B", "C", "D"]:
abcd_logprobs[choice] = token_logprobs[choice] if choice in token_logprobs else -100
pred = {0: "A", 1: "B", 2: "C", 3: "D"}[int(np.argmax(list(abcd_logprobs.values())))]
num_correct += pred == test[5]
logs.append(
{
"Question": {
"Q": test[0],
"A": test[1],
"B": test[2],
"C": test[3],
"D": test[4],
},
"Answer": test[5],
"Response": {
"pred": pred,
"logprobs": list(abcd_logprobs.values()),
},
}
)
results[subject] = {
"correct": num_correct,
"total": len(tests),
"accuracy": num_correct / len(tests),
}
if log_dir:
with open(log_dir / "subjects" / f"{subject}.json", "w", encoding="utf-8") as f:
json.dump(logs, f, indent=2)
total_correct, total_tests = 0, 0
for subject, v in results.items():
num_correct, num_tests, accuracy = v["correct"], v["total"], v["accuracy"]
print(f"{subject}: {num_correct} / {num_tests} = {accuracy * 100:.2f}%")
total_correct += num_correct
total_tests += num_tests
total_accuracy = total_correct / total_tests
results["total"] = {
"correct": total_correct,
"total": total_tests,
"accuracy": total_accuracy,
}
print(f"Total accuracy: {total_correct} / {total_tests} = {total_accuracy * 100:.2f}%")
if log_dir:
results = {
"config": {
"model": model,
"device": device,
"model_lib": model_lib,
"subjects": subjects,
},
"results": results,
}
with open(log_dir / "summary.json", "w", encoding="utf-8") as f:
json.dump(results, f, indent=2)
if __name__ == "__main__":
args = parse_args()
start_time = datetime.now()
log_dir: Optional[Path] = None
if args.log_dir is not None:
time_dir = start_time.strftime("%Y-%m-%d_%H-%M-%S")
log_dir = args.log_dir / time_dir
(log_dir / "subjects").mkdir(parents=True, exist_ok=True)
asyncio.run(
evaluate(
model=args.model,
device=args.device,
dataset=args.dataset,
model_lib=args.model_lib,
subjects=args.subject,
semaphore=asyncio.Semaphore(args.batch_size),
log_dir=log_dir,
)
)
end_time = datetime.now()
print(f"Time used: {end_time - start_time}")
| {
"repo_id": "mlc-ai/mlc-llm",
"file_path": "python/mlc_llm/bench/evaluation/mmlu.py",
"license": "Apache License 2.0",
"lines": 218,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlc-ai/mlc-llm:python/mlc_llm/bench/evaluation/gsm8k.py | """Eval GSM8K with MLCEngine."""
import argparse
import asyncio
import json
import random
import re
from datetime import datetime
from pathlib import Path
from typing import List, Literal, Optional
import tqdm
from mlc_llm import AsyncMLCEngine
DEVICES = ["cuda", "rocm", "metal", "vulkan"]
ANSWER_TRIGGER = "The answer is"
INVALID_ANS = "[invalid]"
def extract_answer(text: str, regex: re.Pattern, select_index: int) -> str:
"""Extract the answer from the text."""
match_all = regex.findall(text)
if len(match_all) == 0:
return INVALID_ANS
match = match_all[select_index]
if isinstance(match, tuple):
match = [m for m in match if m][0]
match_str: str = match.strip()
match_str = match_str.lstrip("$").rstrip(".").replace(",", "")
return match_str
def extract_ground_truth(text: str) -> str:
"""Extract the ground truth from the text."""
return extract_answer(text, re.compile(r"#### (\-?[0-9\.\,]+)"), 0)
def strict_extract_answer(text: str) -> str:
"""Strictly extract the answer from the text."""
return extract_answer(text, re.compile(r"The answer is \$?(\-?[0-9\.\,]+)."), 0)
def flexible_extract_answer(text: str) -> str:
"""Extract the last number from the text."""
return extract_answer(text, re.compile(r"(-?[$0-9.,]{2,})|(-?[0-9]+)"), -1)
def create_few_shot_prompt(n_shot: int, use_cot: bool, random_order=False) -> str:
"""
Create a prompt for the few-shot learning task.
Note
----
The examples are taken from the paper https://arxiv.org/pdf/2201.11903.pdf page 35.
"""
question, chain, answer = [], [], []
question.append(
"There are 15 trees in the grove. "
"Grove workers will plant trees in the grove today. "
"After they are done, there will be 21 trees. "
"How many trees did the grove workers plant today?"
)
chain.append(
"There are 15 trees originally. "
"Then there were 21 trees after some more were planted. "
"So there must have been 21 - 15 = 6."
)
answer.append("6")
question.append(
"If there are 3 cars in the parking lot and 2 more cars arrive, "
"how many cars are in the parking lot?"
)
chain.append("There are originally 3 cars. 2 more cars arrive. 3 + 2 = 5.")
answer.append("5")
question.append(
"Leah had 32 chocolates and her sister had 42. If they ate 35, "
"how many pieces do they have left in total?"
)
chain.append(
"Originally, Leah had 32 chocolates. "
"Her sister had 42. So in total they had 32 + 42 = 74. "
"After eating 35, they had 74 - 35 = 39."
)
answer.append("39")
question.append(
"Jason had 20 lollipops. He gave Denny some lollipops. Now Jason "
"has 12 lollipops. How many lollipops did Jason give to Denny?"
)
chain.append(
"Jason started with 20 lollipops. Then he had 12 after giving some "
"to Denny. So he gave Denny 20 - 12 = 8."
)
answer.append("8")
question.append(
"Shawn has five toys. For Christmas, he got two toys each from his "
"mom and dad. How many toys does he have now?"
)
chain.append(
"Shawn started with 5 toys. If he got 2 toys each from his mom and "
"dad, then that is 4 more toys. 5 + 4 = 9."
)
answer.append("9")
question.append(
"There were nine computers in the server room. Five more computers "
"were installed each day, from monday to thursday. "
"How many computers are now in the server room?"
)
chain.append(
"There were originally 9 computers. For each of 4 days, 5 more "
"computers were added. So 5 * 4 = 20 computers were added. "
"9 + 20 is 29."
)
answer.append("29")
question.append(
"Michael had 58 golf balls. On tuesday, he lost 23 golf balls. On "
"wednesday, he lost 2 more. "
"How many golf balls did he have at the end of wednesday?"
)
chain.append(
"Michael started with 58 golf balls. After losing 23 on tuesday, "
"he had 58 - 23 = 35. After losing 2 more, "
"he had 35 - 2 = 33 golf balls."
)
answer.append("33")
question.append(
"Olivia has $23. She bought five bagels for $3 each. How much money does she have left?"
)
chain.append(
"Olivia had 23 dollars. "
"5 bagels for 3 dollars each will be 5 x 3 = 15 dollars. "
"So she has 23 - 15 dollars left. 23 - 15 is 8."
)
answer.append("8")
index_list = list(range(len(question)))
if random_order:
random.shuffle(index_list)
prompt = ""
for i in index_list[:n_shot]:
if use_cot:
prompt += f"Q: {question[i]}\nA: {chain[i]} {ANSWER_TRIGGER} {answer[i]}.\n\n"
else:
prompt += f"Question: {question[i]}\nAnswer: {ANSWER_TRIGGER} {answer[i]}.\n\n"
return prompt
def create_prompt(question: str, n_shot: int, use_cot: bool, random_order: bool = False) -> str:
"""Create a prompt for the few-shot learning task."""
prompt = create_few_shot_prompt(n_shot, use_cot, random_order)
if use_cot:
prompt += f"Q: {question}\nA:"
else:
prompt += f"Question: {question}\nAnswer:"
return prompt
def parse_args():
"""Parse command line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument("--model", type=str, required=True)
parser.add_argument(
"--dataset", type=Path, required=True, help="Path to GSM8K test dataset home."
)
parser.add_argument("--device", type=str, choices=["auto"] + DEVICES, default="auto")
parser.add_argument("--model-lib", type=str, default=None)
parser.add_argument("--n-shot", type=int, default=8)
parser.add_argument("--disable_cot", action="store_true", default=False)
parser.add_argument("-bs", "--batch-size", type=int, default=16)
parser.add_argument("--log-dir", type=Path, default=None)
return parser.parse_args()
async def send_request(
async_engine: AsyncMLCEngine,
prompts: List[str],
semaphore: asyncio.Semaphore,
):
"""Send the calibration requests to the engine."""
tasks = []
async def generate_task(prompt):
async with semaphore:
return await async_engine.completions.create(
prompt=prompt,
stream=False,
max_tokens=512,
stop=["Q:", "Question:"],
temperature=0.0,
)
for prompt in prompts:
task = asyncio.create_task(generate_task(prompt))
tasks.append(task)
return await tqdm.asyncio.tqdm.gather(*tasks)
async def evaluate( # pylint: disable=too-many-arguments, too-many-locals
model: str,
device: str,
dataset: Path,
model_lib: Optional[str],
n_shot: int,
use_cot: bool,
batch_size: int,
log_dir: Optional[Path], # pylint: disable=redefined-outer-name
):
"""Evaluate GSM8K for the model."""
mode: Literal["local", "interactive", "server"] = (
"server" if batch_size > 4 else "interactive" if batch_size == 1 else "local"
)
async_engine = AsyncMLCEngine(model, device=device, model_lib=model_lib, mode=mode)
with open(dataset / "test.jsonl", "r", encoding="utf-8") as file:
tests = [json.loads(line) for line in file]
prompts = [create_prompt(test["question"], n_shot, use_cot) for test in tests]
responses = await send_request(async_engine, prompts, asyncio.Semaphore(batch_size))
assert len(responses) == len(tests)
num_strict_correct, num_flexible_correct = 0, 0
num_tests = len(tests)
logs = []
for response, test in zip(responses, tests):
response_text = response.choices[0].text.strip()
gt_answer = extract_ground_truth(test["answer"])
assert gt_answer != INVALID_ANS
strict_answer = strict_extract_answer(response_text)
flexible_answer = flexible_extract_answer(response_text)
if gt_answer == strict_extract_answer(response_text):
# If the answer is exactly the same as the response, then it is correct
num_strict_correct += 1
num_flexible_correct += 1
elif gt_answer == flexible_extract_answer(response_text):
# Try flexible extract if the strict match fails
num_flexible_correct += 1
logs.append(
{
"question": test["question"],
"response": response_text,
"ground_truth": gt_answer,
"strict_answer": strict_answer,
"flexible_answer": flexible_answer,
"strict_match": gt_answer == strict_answer,
"flexible_match": gt_answer == flexible_answer,
}
)
results = {
"config": {
"model": model,
"device": device,
"model_lib": model_lib,
"n_shot": n_shot,
"use_cot": use_cot,
},
"results": {
"strict_match": num_strict_correct,
"flexible_match": num_flexible_correct,
"total": num_tests,
},
}
print(
f"Strict Matching Accuracy: {num_strict_correct} / {num_tests} = "
f"{num_strict_correct /num_tests * 100:.2f}%"
)
print(
f"Flexible Matching Accuracy: {num_flexible_correct} / {num_tests} = "
f"{num_flexible_correct /num_tests * 100:.2f}%"
)
if log_dir:
with open(log_dir / "summary.json", "w", encoding="utf-8") as f:
json.dump(results, f, indent=2)
with open(log_dir / "logs.json", "w", encoding="utf-8") as f:
json.dump(logs, f, indent=2)
if __name__ == "__main__":
args = parse_args()
start_time = datetime.now()
log_dir: Optional[Path] = None
if args.log_dir is not None:
time_dir = start_time.strftime("%Y-%m-%d_%H-%M-%S")
log_dir = args.log_dir / time_dir
log_dir.mkdir(parents=True, exist_ok=True)
asyncio.run(
evaluate(
model=args.model,
device=args.device,
dataset=args.dataset,
model_lib=args.model_lib,
n_shot=args.n_shot,
use_cot=not args.disable_cot,
batch_size=args.batch_size,
log_dir=log_dir,
)
)
end_time = datetime.now()
print(f"Time used: {end_time - start_time}")
| {
"repo_id": "mlc-ai/mlc-llm",
"file_path": "python/mlc_llm/bench/evaluation/gsm8k.py",
"license": "Apache License 2.0",
"lines": 267,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlc-ai/mlc-llm:tests/python/support/test_cli_convert_weight.py | # pylint: disable=missing-docstring
import json
import tempfile
from pathlib import Path
import pytest
from mlc_llm.cli import convert_weight as convert_weight_cli
pytestmark = [pytest.mark.unittest]
def test_convert_weight_cli_passes_lora_adapter(monkeypatch):
with tempfile.TemporaryDirectory() as tmp_dir:
temp_path = Path(tmp_dir)
config_path = temp_path / "config.json"
source_dir = temp_path / "source"
source_dir.mkdir(parents=True, exist_ok=True)
source_index = source_dir / "pytorch_model.bin.index.json"
adapter_dir = temp_path / "adapter"
adapter_dir.mkdir(parents=True, exist_ok=True)
output_dir = temp_path / "output"
config_path.write_text(json.dumps({}), encoding="utf-8")
source_index.write_text(json.dumps({"weight_map": {}}), encoding="utf-8")
def _fake_detect_device(device):
return device
def _fake_detect_weight(_weight_path, _config_json_path, _weight_format):
return source_index, "huggingface-torch"
def _fake_detect_model_type(_model_type, _config):
return "dummy"
monkeypatch.setattr(convert_weight_cli, "detect_config", Path)
monkeypatch.setattr(convert_weight_cli, "detect_device", _fake_detect_device)
monkeypatch.setattr(convert_weight_cli, "detect_weight", _fake_detect_weight)
monkeypatch.setattr(convert_weight_cli, "detect_model_type", _fake_detect_model_type)
monkeypatch.setattr(convert_weight_cli, "MODELS", {"dummy": object()})
monkeypatch.setattr(convert_weight_cli, "QUANTIZATION", {"q0f16": object()})
call_args = {}
def _fake_convert_weight(**kwargs):
call_args.update(kwargs)
monkeypatch.setattr(convert_weight_cli, "convert_weight", _fake_convert_weight)
convert_weight_cli.main(
[
str(config_path),
"--quantization",
"q0f16",
"--model-type",
"dummy",
"--source",
str(source_dir),
"--source-format",
"auto",
"--output",
str(output_dir),
"--lora-adapter",
str(adapter_dir),
]
)
assert call_args["lora_adapter"] == adapter_dir
assert call_args["source"] == source_index
assert call_args["source_format"] == "huggingface-torch"
| {
"repo_id": "mlc-ai/mlc-llm",
"file_path": "tests/python/support/test_cli_convert_weight.py",
"license": "Apache License 2.0",
"lines": 55,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlc-ai/mlc-llm:tests/python/support/test_convert_weight_lora_merge.py | # pylint: disable=missing-docstring,protected-access
import contextlib
import json
import tempfile
from pathlib import Path
import pytest
from mlc_llm.interface import convert_weight as convert_weight_interface
pytestmark = [pytest.mark.unittest]
def test_resolve_base_model_dir():
with tempfile.TemporaryDirectory() as tmp_dir:
temp_path = Path(tmp_dir)
model_dir = temp_path / "model"
model_dir.mkdir(parents=True, exist_ok=True)
source_file = model_dir / "pytorch_model.bin.index.json"
source_file.write_text(json.dumps({"weight_map": {}}), encoding="utf-8")
assert convert_weight_interface._resolve_base_model_dir(model_dir) == model_dir
assert convert_weight_interface._resolve_base_model_dir(source_file) == model_dir
def test_convert_weight_with_lora_uses_merged_source(monkeypatch):
with tempfile.TemporaryDirectory() as tmp_dir:
temp_path = Path(tmp_dir)
config_path = temp_path / "config.json"
config_path.write_text(json.dumps({}), encoding="utf-8")
source_dir = temp_path / "source"
source_dir.mkdir(parents=True, exist_ok=True)
source_file = source_dir / "pytorch_model.bin.index.json"
source_file.write_text(json.dumps({"weight_map": {}}), encoding="utf-8")
adapter_dir = temp_path / "adapter"
adapter_dir.mkdir(parents=True, exist_ok=True)
merged_dir = temp_path / "merged"
merged_dir.mkdir(parents=True, exist_ok=True)
merged_file = merged_dir / "pytorch_model.bin"
merged_file.write_bytes(b"")
captured = {}
@contextlib.contextmanager
def _fake_merge(base_source: Path, lora_adapter: Path):
captured["merge_base_source"] = base_source
captured["merge_lora_adapter"] = lora_adapter
yield merged_dir
def _fake_detect_weight(weight_path: Path, config_json_path: Path, weight_format: str):
captured["detect_weight_path"] = weight_path
captured["detect_weight_config"] = config_json_path
captured["detect_weight_format"] = weight_format
return merged_file, "huggingface-torch"
def _fake_convert_args(args):
captured["converted_args"] = args
monkeypatch.setattr(
convert_weight_interface, "_merge_lora_adapter_with_base_model", _fake_merge
)
monkeypatch.setattr(convert_weight_interface, "detect_weight", _fake_detect_weight)
monkeypatch.setattr(convert_weight_interface, "_convert_args", _fake_convert_args)
monkeypatch.setattr(convert_weight_interface.ConversionArgs, "display", lambda self: None)
convert_weight_interface.convert_weight(
config=config_path,
quantization=object(),
model=type("DummyModel", (), {"name": "dummy"})(),
device=object(),
source=source_file,
source_format="huggingface-safetensor",
output=temp_path / "output",
lora_adapter=adapter_dir,
)
converted_args = captured["converted_args"]
assert captured["merge_base_source"] == source_file
assert captured["merge_lora_adapter"] == adapter_dir
assert captured["detect_weight_path"] == merged_dir
assert captured["detect_weight_config"] == config_path
assert captured["detect_weight_format"] == "auto"
assert converted_args.source == merged_file
assert converted_args.source_format == "huggingface-torch"
assert converted_args.lora_adapter == adapter_dir
def test_convert_weight_with_lora_rejects_awq():
with tempfile.TemporaryDirectory() as tmp_dir:
temp_path = Path(tmp_dir)
config_path = temp_path / "config.json"
config_path.write_text(json.dumps({}), encoding="utf-8")
adapter_dir = temp_path / "adapter"
adapter_dir.mkdir(parents=True, exist_ok=True)
with pytest.raises(ValueError, match="only supports source formats"):
convert_weight_interface.convert_weight(
config=config_path,
quantization=object(),
model=type("DummyModel", (), {"name": "dummy"})(),
device=object(),
source=temp_path / "source",
source_format="awq",
output=temp_path / "output",
lora_adapter=adapter_dir,
)
| {
"repo_id": "mlc-ai/mlc-llm",
"file_path": "tests/python/support/test_convert_weight_lora_merge.py",
"license": "Apache License 2.0",
"lines": 88,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlc-ai/mlc-llm:python/mlc_llm/serve/embedding_engine.py | """Asynchronous embedding inference engine for encoder and decoder models."""
import asyncio
import concurrent.futures
import json
import os
from typing import List, Literal, Optional, Tuple, Union
import numpy as np
import tvm
from tvm import relax
from tvm.runtime import Device, ShapeTuple
from mlc_llm.serve import engine_utils
from mlc_llm.support.auto_device import detect_device
from mlc_llm.tokenizers import Tokenizer
class AsyncEmbeddingEngine: # pylint: disable=too-many-instance-attributes
"""Asynchronous embedding inference engine.
Supports both encoder models (BERT-style) and decoder-only embedding models
(e.g. Qwen3-Embeddings). Uses a ThreadPoolExecutor for background inference
so that the asyncio event loop is not blocked.
Parameters
----------
model : str
Path to the model weight directory.
model_lib : str
Path to the compiled model library (.so/.dylib file).
device : Union[str, Device]
Device string, e.g. "auto", "cuda:0", "metal".
pooling_strategy : Optional[str]
Pooling strategy: "cls" (first token), "mean" (masked average),
or "last" (last token). If None, auto-detected based on model type:
encoder -> "cls", decoder -> "last".
"""
def __init__( # pylint: disable=too-many-branches
self,
model: str,
model_lib: str,
device: Union[str, Device] = "auto",
*,
pooling_strategy: Optional[str] = None,
) -> None:
# Reuse existing utility: device detection
self.device = detect_device(device) if isinstance(device, str) else device
# Reuse existing utility: tokenizer
self.tokenizer = Tokenizer(model)
# Load TVM module, metadata, and params via engine_utils helpers
ex = tvm.runtime.load_module(model_lib)
vm = relax.VirtualMachine(ex, device=self.device)
self._mod = vm.module
self._metadata = json.loads(self._mod["_metadata"]())
self._params = engine_utils.load_embedding_params(model, self.device, self._metadata)
# Detect model type and set pooling strategy
self.model_type = engine_utils.detect_embedding_model_type(self._mod)
if pooling_strategy is not None:
self.pooling_strategy = pooling_strategy
else:
self.pooling_strategy = "cls" if self.model_type == "encoder" else "last"
# Initialize model-type-specific functions
if self.model_type == "encoder":
self._init_encoder(model)
else:
self._init_decoder(model)
# Background thread pool (1 worker = serialized GPU inference)
self._executor = concurrent.futures.ThreadPoolExecutor(
max_workers=1, thread_name_prefix="embedding"
)
self._terminated = False
def _init_encoder(self, model: str) -> None:
"""Initialize encoder (BERT-style) model functions and special tokens."""
self._prefill_func = self._mod["prefill"]
self._cls_token_id: Optional[int] = None
self._sep_token_id: Optional[int] = None
tok_config_path = os.path.join(model, "tokenizer_config.json")
if os.path.exists(tok_config_path):
with open(tok_config_path, encoding="utf-8") as f:
tok_config = json.load(f)
# Try added_tokens_decoder first (newer HF format)
added = tok_config.get("added_tokens_decoder", {})
for tid, info in added.items():
if info.get("content") == tok_config.get("cls_token"):
self._cls_token_id = int(tid)
if info.get("content") == tok_config.get("sep_token"):
self._sep_token_id = int(tid)
# Fallback: encode the special token strings via tokenizer
if self._cls_token_id is None and tok_config.get("cls_token"):
ids = list(self.tokenizer.encode(tok_config["cls_token"]))
if len(ids) == 1:
self._cls_token_id = ids[0]
if self._sep_token_id is None and tok_config.get("sep_token"):
ids = list(self.tokenizer.encode(tok_config["sep_token"]))
if len(ids) == 1:
self._sep_token_id = ids[0]
def _init_decoder(self, model: str) -> None:
"""Initialize decoder (Qwen3-Embeddings style) model functions."""
# Prefer tokenizer post-processing (HF-style) for terminal/pooling token handling.
# Only fall back to manual EOS append when tokenizer does not define a post-processor
# that actually appends a token at the end of the sequence.
self._decoder_tokenizer_appends_eos = False
tokenizer_json_path = os.path.join(model, "tokenizer.json")
if os.path.exists(tokenizer_json_path):
with open(tokenizer_json_path, encoding="utf-8") as f:
tokenizer_json = json.load(f)
post_proc = tokenizer_json.get("post_processor")
if post_proc is not None:
# Check if the post-processor actually appends a special token at the end
# (e.g. TemplateProcessing with "$A <|endoftext|>"). We verify by encoding
# a test string and checking if the last token is a known special token.
test_tokens = list(self.tokenizer.encode("test"))
if len(test_tokens) > 0:
vocab = tokenizer_json.get("added_tokens", [])
special_ids = {t["id"] for t in vocab if t.get("special", False)}
if test_tokens[-1] in special_ids:
self._decoder_tokenizer_appends_eos = True
# Read EOS token from config — fallback only when tokenizer does not auto-append.
self._decoder_eos_token_id: Optional[int] = None
config_path = os.path.join(model, "mlc-chat-config.json")
if os.path.exists(config_path):
with open(config_path, encoding="utf-8") as f:
chat_config = json.load(f)
eos = chat_config.get("eos_token_id")
if isinstance(eos, list):
self._decoder_eos_token_id = eos[0]
elif isinstance(eos, int):
self._decoder_eos_token_id = eos
self._embed_func = self._mod["embed"]
self._prefill_to_hidden_func = self._mod["prefill_to_last_hidden_states"]
self._batch_prefill_to_hidden_func = self._mod["batch_prefill_to_last_hidden_states"]
if self._mod.implements_function("create_tir_paged_kv_cache"):
self._create_kv_cache_func = self._mod["create_tir_paged_kv_cache"]
elif self._mod.implements_function("create_flashinfer_paged_kv_cache"):
self._create_kv_cache_func = self._mod["create_flashinfer_paged_kv_cache"]
else:
raise RuntimeError("Cannot find KV cache creation function in model library.")
self._kv_state_add_sequence = tvm.get_global_func("vm.builtin.kv_state_add_sequence")
self._kv_state_remove_sequence = tvm.get_global_func("vm.builtin.kv_state_remove_sequence")
self._kv_state_begin_forward = tvm.get_global_func("vm.builtin.kv_state_begin_forward")
self._kv_state_end_forward = tvm.get_global_func("vm.builtin.kv_state_end_forward")
self._nd_reshape = tvm.get_global_func("vm.builtin.reshape")
def embed(self, inputs: List[str]) -> Tuple[List[List[float]], int]:
"""Compute embeddings for a list of input strings (synchronous).
Parameters
----------
inputs : List[str]
The input strings to embed.
Returns
-------
embeddings : List[List[float]]
The L2-normalized embedding vectors.
total_tokens : int
Total number of tokens processed.
"""
if self.model_type == "encoder":
return self._embed_encoder(inputs)
return self._embed_decoder(inputs)
async def async_embed(self, inputs: List[str]) -> Tuple[List[List[float]], int]:
"""Compute embeddings asynchronously in a background thread.
This method does not block the asyncio event loop.
Parameters
----------
inputs : List[str]
The input strings to embed.
Returns
-------
embeddings : List[List[float]]
The L2-normalized embedding vectors.
total_tokens : int
Total number of tokens processed.
"""
loop = asyncio.get_running_loop()
return await loop.run_in_executor(self._executor, self.embed, inputs)
def _embed_encoder( # pylint: disable=too-many-locals
self, inputs: List[str]
) -> Tuple[List[List[float]], int]:
"""Encoder model embedding (BERT-style).
Processes each input individually to avoid batch padding artifacts.
Encoder uses bidirectional attention, so chunked prefill is NOT possible
(each token must attend to all other tokens in the full sequence).
Inputs exceeding prefill_chunk_size are truncated.
(Additional Strategy)
TODO: For better long-text support, implement sliding window + mean pooling:
1. Split text into overlapping windows of prefill_chunk_size (stride=chunk/2)
2. Encode each window independently
3. Mean-pool all window embeddings → final embedding → L2 normalize
This preserves information from the full text at the cost of N× compute.
"""
embeddings: List[List[float]] = []
total_tokens = 0
prefill_chunk = self._metadata.get("prefill_chunk_size", 512)
for text in inputs:
tokens = list(self.tokenizer.encode(text))
# Add [CLS] and [SEP] if needed
if self._cls_token_id is not None and (
len(tokens) == 0 or tokens[0] != self._cls_token_id
):
tokens = [self._cls_token_id] + tokens
if self._sep_token_id is not None and (
len(tokens) == 0 or tokens[-1] != self._sep_token_id
):
tokens = tokens + [self._sep_token_id]
# Truncate to compiled buffer limit (keep [CLS] at start, [SEP] at end)
if len(tokens) > prefill_chunk:
tokens = tokens[:prefill_chunk]
if self._sep_token_id is not None:
tokens[-1] = self._sep_token_id
seq_len = len(tokens)
total_tokens += seq_len
token_ids = np.array([tokens], dtype=np.int32) # [1, seq_len]
attention_mask: np.ndarray = np.ones((1, seq_len), dtype=np.int32) # [1, seq_len]
tokens_tvm = tvm.runtime.tensor(token_ids, device=self.device)
mask_tvm = tvm.runtime.tensor(attention_mask, device=self.device)
output = self._prefill_func(tokens_tvm, mask_tvm, self._params)
# .numpy() copies to CPU, escaping TVM workspace buffer reuse across calls.
output_np = output.numpy() # [1, seq_len, hidden_size]
# Pooling
if self.pooling_strategy == "cls":
pooled = output_np[0, 0, :]
elif self.pooling_strategy == "mean":
pooled = output_np[0].mean(axis=0)
else: # "last"
pooled = output_np[0, -1, :]
# L2 normalize
pooled = pooled.astype(np.float32)
norm = np.linalg.norm(pooled)
if norm > 1e-12:
pooled = pooled / norm
embeddings.append(pooled.tolist())
return embeddings, total_tokens
def _embed_decoder(self, inputs: List[str]) -> Tuple[List[List[float]], int]:
"""Decoder model embedding with batch prefill optimization.
When total tokens fit within prefill_chunk_size, all inputs are processed
in a single batch forward pass using shared KV cache. Otherwise, falls back
to sequential chunked prefill per input.
"""
# Read KV cache config from metadata
prefill_chunk = self._metadata.get("prefill_chunk_size", 2048)
max_seq_len = self._metadata.get("context_window_size", 32768)
if max_seq_len == -1:
max_seq_len = self._metadata.get("sliding_window_size", -1)
assert max_seq_len > 0, f"max_seq_len must be positive, got {max_seq_len}"
support_sliding = int(self._metadata.get("sliding_window_size", -1) != -1)
# Tokenize all inputs. Prefer tokenizer post-processor output. If absent (older models),
# fall back to appending eos_token_id when missing.
token_lists: List[List[int]] = []
for text in inputs:
tokens = list(self.tokenizer.encode(text))
if (
not self._decoder_tokenizer_appends_eos
and self._decoder_eos_token_id is not None
and (len(tokens) == 0 or tokens[-1] != self._decoder_eos_token_id)
):
tokens.append(self._decoder_eos_token_id)
if len(tokens) > max_seq_len:
tokens = tokens[:max_seq_len]
token_lists.append(tokens)
total_tokens = sum(len(t) for t in token_lists)
# Fast path: all tokens fit in one prefill chunk → batch forward
if total_tokens <= prefill_chunk and all(len(t) > 0 for t in token_lists):
return self._batch_embed_decoder(
token_lists, total_tokens, max_seq_len, prefill_chunk, support_sliding
)
# Greedy sub-batching: pack texts into sub-batches that fit within
# prefill_chunk, preserving input order. Oversize texts (single text
# exceeding prefill_chunk) fall back to sequential chunked prefill.
sub_batches = self._build_sub_batches(token_lists, prefill_chunk)
all_embeddings: List[List[float]] = []
for batch_type, batch, batch_total in sub_batches:
if batch_type == "batch":
embs, _ = self._batch_embed_decoder(
batch, batch_total, max_seq_len, prefill_chunk, support_sliding
)
else:
embs, _ = self._sequential_embed_decoder(
batch, batch_total, max_seq_len, prefill_chunk, support_sliding
)
all_embeddings.extend(embs)
return all_embeddings, total_tokens
@staticmethod
def _build_sub_batches(
token_lists: List[List[int]], prefill_chunk: int
) -> List[Tuple[Literal["batch", "sequential"], List[List[int]], int]]:
"""Partition token lists into sub-batches that fit within prefill_chunk.
Each sub-batch is a tuple of (mode, token_lists, total_token_count).
Empty token lists are skipped to avoid invalid batch processing.
"""
sub_batches: List[Tuple[Literal["batch", "sequential"], List[List[int]], int]] = []
current_batch: List[List[int]] = []
current_tokens = 0
for tokens in token_lists:
if not tokens:
continue
token_len = len(tokens)
is_oversized = token_len > prefill_chunk
if current_batch and (is_oversized or current_tokens + token_len > prefill_chunk):
sub_batches.append(("batch", current_batch, current_tokens))
current_batch, current_tokens = [], 0
if is_oversized:
sub_batches.append(("sequential", [tokens], token_len))
else:
current_batch.append(tokens)
current_tokens += token_len
if current_batch:
sub_batches.append(("batch", current_batch, current_tokens))
return sub_batches
def _batch_embed_decoder( # pylint: disable=too-many-arguments,too-many-locals
self,
token_lists: List[List[int]],
total_tokens: int,
max_seq_len: int,
prefill_chunk: int,
support_sliding: int,
) -> Tuple[List[List[float]], int]:
"""Batch prefill: process all inputs in a single forward pass."""
batch_size = len(token_lists)
# Create KV cache for the entire batch
kv_cache = self._create_kv_cache_func(
ShapeTuple([batch_size]),
ShapeTuple([max_seq_len]),
ShapeTuple([prefill_chunk]),
ShapeTuple([16]),
ShapeTuple([support_sliding]),
)
# Register all sequences
seq_ids = list(range(batch_size))
seq_lens = [len(t) for t in token_lists]
for sid in seq_ids:
self._kv_state_add_sequence(kv_cache, sid)
# Begin forward with all sequences at once
self._kv_state_begin_forward(kv_cache, ShapeTuple(seq_ids), ShapeTuple(seq_lens))
# Concatenate all tokens → embed → batch prefill
all_tokens = []
for tokens in token_lists:
all_tokens.extend(tokens)
token_ids = tvm.runtime.tensor(np.array(all_tokens, dtype=np.int32), device=self.device)
all_embed = self._embed_func(token_ids, self._params)
all_embed = self._nd_reshape(all_embed, ShapeTuple([1, total_tokens, all_embed.shape[-1]]))
hidden_states, _ = self._batch_prefill_to_hidden_func(all_embed, kv_cache, self._params)
# .numpy() copies to CPU, escaping TVM workspace buffer reuse across calls.
# (torch.from_dlpack is zero-copy and hits aliasing bugs on 2nd+ invocation.)
hidden_np = hidden_states.numpy()
self._kv_state_end_forward(kv_cache)
for sid in seq_ids:
self._kv_state_remove_sequence(kv_cache, sid)
# Extract last token hidden state per sequence
embeddings: List[List[float]] = []
offset = 0
for tokens in token_lists:
last_pos = offset + len(tokens) - 1
pooled = hidden_np[0, last_pos, :].astype(np.float32)
norm = np.linalg.norm(pooled)
if norm > 1e-12:
pooled = pooled / norm
embeddings.append(pooled.tolist())
offset += len(tokens)
return embeddings, total_tokens
def _sequential_embed_decoder( # pylint: disable=too-many-arguments,too-many-locals
self,
token_lists: List[List[int]],
total_tokens: int,
max_seq_len: int,
prefill_chunk: int,
support_sliding: int,
) -> Tuple[List[List[float]], int]:
"""Sequential chunked prefill: process each input independently."""
embeddings: List[List[float]] = []
for tokens in token_lists:
if len(tokens) == 0:
continue
# Create KV cache for this single sequence
kv_cache = self._create_kv_cache_func(
ShapeTuple([1]),
ShapeTuple([max_seq_len]),
ShapeTuple([prefill_chunk]),
ShapeTuple([16]),
ShapeTuple([support_sliding]),
)
self._kv_state_add_sequence(kv_cache, 0)
# Process tokens in chunks
hidden = None
for chunk_start in range(0, len(tokens), prefill_chunk):
chunk_end = min(chunk_start + prefill_chunk, len(tokens))
chunk_tokens = tokens[chunk_start:chunk_end]
chunk_len = len(chunk_tokens)
token_ids = tvm.runtime.tensor(
np.array(chunk_tokens, dtype=np.int32), device=self.device
)
chunk_embed = self._embed_func(token_ids, self._params)
chunk_embed = self._nd_reshape(
chunk_embed, ShapeTuple([1, chunk_len, chunk_embed.shape[-1]])
)
self._kv_state_begin_forward(kv_cache, ShapeTuple([0]), ShapeTuple([chunk_len]))
hidden, kv_cache = self._prefill_to_hidden_func(chunk_embed, kv_cache, self._params)
# .numpy() copies to CPU, escaping TVM buffer aliasing.
hidden_np = hidden.numpy()
self._kv_state_end_forward(kv_cache)
self._kv_state_remove_sequence(kv_cache, 0)
pooled = hidden_np[0, -1, :] if hidden_np.ndim == 3 else hidden_np[-1, :]
pooled = pooled.astype(np.float32)
norm = np.linalg.norm(pooled)
if norm > 1e-12:
pooled = pooled / norm
embeddings.append(pooled.tolist())
return embeddings, total_tokens
def terminate(self) -> None:
"""Terminate the engine and clean up the thread pool."""
if getattr(self, "_terminated", True):
return
self._terminated = True
self._executor.shutdown(wait=False)
def __del__(self):
self.terminate()
| {
"repo_id": "mlc-ai/mlc-llm",
"file_path": "python/mlc_llm/serve/embedding_engine.py",
"license": "Apache License 2.0",
"lines": 408,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlc-ai/mlc-llm:tests/python/serve/server/test_embedding_server.py | """Embedding server endpoint tests in MLC LLM.
Tests the /v1/embeddings endpoint via HTTP using the OpenAI client,
following the same patterns as test_server.py.
Reuses MLC LLM test infrastructure:
- Pytest markers (endpoint)
- expect_error() response validation pattern from test_server.py
- OpenAI client usage pattern from test_server.py
- Session-scoped server fixture pattern from conftest.py
Run (launches its own embedding-only server):
MLC_SERVE_EMBEDDING_MODEL_LIB="path/to/model.dylib" \
pytest -m endpoint tests/python/serve/server/test_embedding_server.py -v
Environment variables:
MLC_SERVE_EMBEDDING_MODEL_LIB Path to compiled embedding model library (required)
MLC_SERVE_EMBEDDING_MODEL Path to embedding model weight directory
(optional, defaults to dirname of model lib)
"""
import json
import os
import signal
import subprocess
import sys
import time
from pathlib import Path
from typing import Dict, Optional
import numpy as np
import pytest
import requests
from openai import OpenAI
# Reuse MLC LLM marker system
pytestmark = [pytest.mark.endpoint]
# ---------------------------------------------------------------------------
# Config
# ---------------------------------------------------------------------------
EMBEDDING_MODEL_LIB = os.environ.get("MLC_SERVE_EMBEDDING_MODEL_LIB")
EMBEDDING_MODEL_DIR = os.environ.get(
"MLC_SERVE_EMBEDDING_MODEL",
os.path.dirname(EMBEDDING_MODEL_LIB) if EMBEDDING_MODEL_LIB else None,
)
EMBEDDING_SERVER_HOST = "127.0.0.1"
EMBEDDING_SERVER_PORT = 8321
EMBEDDING_BASE_URL = f"http://{EMBEDDING_SERVER_HOST}:{EMBEDDING_SERVER_PORT}/v1"
EMBEDDING_MODEL_NAME = "embedding"
def _skip_if_no_model():
if EMBEDDING_MODEL_LIB is None:
pytest.skip(
'Environment variable "MLC_SERVE_EMBEDDING_MODEL_LIB" not found. '
"Set it to a compiled embedding model library."
)
if not os.path.isfile(EMBEDDING_MODEL_LIB):
pytest.skip(f"Embedding model library not found at: {EMBEDDING_MODEL_LIB}")
if EMBEDDING_MODEL_DIR is None or not os.path.isdir(EMBEDDING_MODEL_DIR):
pytest.skip(f"Embedding model directory not found at: {EMBEDDING_MODEL_DIR}")
# ---------------------------------------------------------------------------
# Response validation helpers — adapted from test_server.py patterns
# ---------------------------------------------------------------------------
def check_embedding_response(
response: Dict,
*,
model: str,
num_embeddings: int,
expected_dim: Optional[int] = None,
check_unit_norm: bool = True,
):
"""Validate an OpenAI-compatible embedding response.
Adapted from check_openai_nonstream_response() in test_server.py,
specialized for embedding responses.
"""
assert response["object"] == "list"
assert response["model"] == model
data = response["data"]
assert isinstance(data, list)
assert len(data) == num_embeddings
for item in data:
assert item["object"] == "embedding"
assert isinstance(item["index"], int)
emb = item["embedding"]
assert isinstance(emb, list)
assert len(emb) > 0
if expected_dim is not None:
assert len(emb) == expected_dim, f"Expected dim={expected_dim}, got {len(emb)}"
if check_unit_norm:
norm = float(np.linalg.norm(emb))
assert abs(norm - 1.0) < 1e-3, f"Expected unit norm, got {norm}"
# Usage validation — same pattern as test_server.py
usage = response["usage"]
assert isinstance(usage, dict)
assert usage["prompt_tokens"] > 0
assert usage["total_tokens"] == usage["prompt_tokens"]
def expect_error(response_str: str, msg_prefix: Optional[str] = None):
"""Validate error response — reused directly from test_server.py."""
response = json.loads(response_str)
assert response["object"] == "error"
assert isinstance(response["message"], str)
if msg_prefix is not None:
assert response["message"].startswith(msg_prefix)
# ---------------------------------------------------------------------------
# Server fixture — follows PopenServer/launch_server pattern from conftest.py
# ---------------------------------------------------------------------------
@pytest.fixture(scope="module")
def launch_embedding_server():
"""Launch an embedding-only server as a subprocess.
Follows the same lifecycle pattern as the launch_server fixture
in serve/server/conftest.py, but uses a lightweight embedding-only
server since PopenServer doesn't support --embedding-model yet.
"""
_skip_if_no_model()
mlc_llm_path = str(Path(__file__).resolve().parents[4] / "python")
server_code = f"""
import sys
sys.path.insert(0, "{mlc_llm_path}")
import fastapi
import uvicorn
from mlc_llm.serve.engine import AsyncEmbeddingEngine
from mlc_llm.serve.server import ServerContext
from mlc_llm.serve.entrypoints import openai_entrypoints
app = fastapi.FastAPI()
app.include_router(openai_entrypoints.app)
engine = AsyncEmbeddingEngine(
model="{EMBEDDING_MODEL_DIR}",
model_lib="{EMBEDDING_MODEL_LIB}",
device="auto",
)
ctx = ServerContext()
ServerContext.server_context = ctx
ctx.add_embedding_engine("{EMBEDDING_MODEL_NAME}", engine)
uvicorn.run(app, host="{EMBEDDING_SERVER_HOST}", port={EMBEDDING_SERVER_PORT}, log_level="info")
"""
proc = subprocess.Popen(
[sys.executable, "-c", server_code],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
# Wait for server readiness — same polling pattern as PopenServer.start()
timeout = 120
attempts = 0.0
ready = False
while attempts < timeout:
try:
r = requests.get(f"{EMBEDDING_BASE_URL}/models", timeout=2)
if r.status_code == 200:
ready = True
break
except Exception:
pass
attempts += 0.5
time.sleep(0.5)
if not ready:
stderr = proc.stderr.read().decode() if proc.stderr else ""
proc.kill()
raise RuntimeError(f"Embedding server failed to start in {timeout}s.\nStderr: {stderr}")
yield proc
# Cleanup — same pattern as PopenServer.terminate()
proc.send_signal(signal.SIGINT)
try:
proc.wait(timeout=10)
except subprocess.TimeoutExpired:
proc.kill()
@pytest.fixture(scope="module")
def client(launch_embedding_server):
"""OpenAI client connected to the embedding server."""
return OpenAI(base_url=EMBEDDING_BASE_URL, api_key="none")
# ===================================================================
# /v1/models
# ===================================================================
def test_models_endpoint(client, launch_embedding_server):
"""The /v1/models endpoint lists the embedding model."""
resp = requests.get(f"{EMBEDDING_BASE_URL}/models")
assert resp.status_code == 200
data = resp.json()
assert isinstance(data["data"], list)
# ===================================================================
# Single input
# ===================================================================
def test_single_string_input(client, launch_embedding_server):
"""Single string input returns one embedding."""
resp = client.embeddings.create(input="What is machine learning?", model=EMBEDDING_MODEL_NAME)
raw = resp.model_dump()
check_embedding_response(raw, model=EMBEDDING_MODEL_NAME, num_embeddings=1)
# ===================================================================
# Batch input
# ===================================================================
BATCH_INPUTS = [
"What is machine learning?",
"How to brew coffee?",
"ML is a subset of AI.",
]
def test_batch_string_input(client, launch_embedding_server):
"""List of strings returns one embedding per input."""
resp = client.embeddings.create(input=BATCH_INPUTS, model=EMBEDDING_MODEL_NAME)
raw = resp.model_dump()
check_embedding_response(raw, model=EMBEDDING_MODEL_NAME, num_embeddings=len(BATCH_INPUTS))
def test_batch_index_ordering(client, launch_embedding_server):
"""Embedding indices are sequential."""
resp = client.embeddings.create(input=BATCH_INPUTS, model=EMBEDDING_MODEL_NAME)
indices = [d.index for d in resp.data]
assert indices == list(range(len(BATCH_INPUTS)))
# ===================================================================
# Cosine similarity — semantic quality via endpoint
# ===================================================================
def test_cosine_similarity_via_endpoint(client, launch_embedding_server):
"""Related texts have higher similarity than unrelated (end-to-end)."""
resp = client.embeddings.create(
input=[
"What is machine learning?",
"Explain deep learning",
"Order a pizza",
],
model=EMBEDDING_MODEL_NAME,
)
e0, e1, e2 = [np.array(d.embedding) for d in resp.data]
sim_related = float(np.dot(e0, e1))
sim_unrelated = float(np.dot(e0, e2))
assert (
sim_related > sim_unrelated
), f"Related ({sim_related:.4f}) should > unrelated ({sim_unrelated:.4f})"
# ===================================================================
# Dimension truncation (Matryoshka)
# ===================================================================
def test_dimension_truncation(client, launch_embedding_server):
"""dimensions parameter truncates and re-normalizes output."""
target_dim = 256
resp = client.embeddings.create(
input="Hello world", model=EMBEDDING_MODEL_NAME, dimensions=target_dim
)
raw = resp.model_dump()
check_embedding_response(
raw,
model=EMBEDDING_MODEL_NAME,
num_embeddings=1,
expected_dim=target_dim,
)
# ===================================================================
# Encoding format
# ===================================================================
def test_base64_encoding(launch_embedding_server):
"""base64 encoding format returns base64-encoded embeddings."""
resp = requests.post(
f"{EMBEDDING_BASE_URL}/embeddings",
json={
"input": "Hello world",
"model": EMBEDDING_MODEL_NAME,
"encoding_format": "base64",
},
)
assert resp.status_code == 200
data = resp.json()
assert data["data"][0]["object"] == "embedding"
# base64 string should be a non-empty string (not a list)
emb = data["data"][0]["embedding"]
assert isinstance(emb, str) and len(emb) > 0
# ===================================================================
# Error handling — reuses expect_error() pattern from test_server.py
# ===================================================================
def test_any_model_name_works_with_single_engine(launch_embedding_server):
"""When only one embedding engine is served, any model name works.
This mirrors ServerContext.get_engine() behavior: a single served
model is returned regardless of the requested model name.
"""
resp = requests.post(
f"{EMBEDDING_BASE_URL}/embeddings",
json={"input": "test", "model": "any-name-works"},
)
assert resp.status_code == 200
data = resp.json()
assert len(data["data"]) == 1
# ===================================================================
# Standalone runner (same pattern as test_server.py __main__)
# ===================================================================
if __name__ == "__main__":
_skip_if_no_model()
print(f"Using model: {EMBEDDING_MODEL_DIR}")
print(f"Using model lib: {EMBEDDING_MODEL_LIB}")
print(f"Server URL: {EMBEDDING_BASE_URL}")
print(
"\nMake sure the embedding server is running, or set env vars "
"and use pytest to auto-launch."
)
# Allow running against an already-running server
c = OpenAI(base_url=EMBEDDING_BASE_URL, api_key="none")
test_models_endpoint(c, None)
test_single_string_input(c, None)
test_batch_string_input(c, None)
test_batch_index_ordering(c, None)
test_cosine_similarity_via_endpoint(c, None)
test_dimension_truncation(c, None)
print("\nAll embedding server tests passed!")
| {
"repo_id": "mlc-ai/mlc-llm",
"file_path": "tests/python/serve/server/test_embedding_server.py",
"license": "Apache License 2.0",
"lines": 286,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlc-ai/mlc-llm:tests/python/serve/test_embedding_engine.py | """Embedding engine tests in MLC LLM.
Tests AsyncEmbeddingEngine for both direct (sync) and async embedding inference.
Reuses MLC LLM test infrastructure: markers, require_test_model pattern,
and conventions from test_serve_engine.py.
Run with real model (requires GPU + compiled embedding model):
MLC_SERVE_EMBEDDING_MODEL_LIB="path/to/model.dylib" \
pytest -m engine tests/python/serve/test_embedding_engine.py -v
Environment variables:
MLC_SERVE_EMBEDDING_MODEL_LIB Path to compiled embedding model library (required)
MLC_SERVE_EMBEDDING_MODEL Path to embedding model weight directory
(optional, defaults to dirname of model lib)
"""
import asyncio
import os
import numpy as np
import pytest
# Reuse MLC LLM marker system (registered in tests/python/conftest.py)
pytestmark = [pytest.mark.engine]
# ---------------------------------------------------------------------------
# Fixtures — follows pattern from serve/server/conftest.py (served_model)
# ---------------------------------------------------------------------------
EMBEDDING_MODEL_LIB = os.environ.get("MLC_SERVE_EMBEDDING_MODEL_LIB")
EMBEDDING_MODEL_DIR = os.environ.get(
"MLC_SERVE_EMBEDDING_MODEL",
os.path.dirname(EMBEDDING_MODEL_LIB) if EMBEDDING_MODEL_LIB else None,
)
def _skip_if_no_model():
if EMBEDDING_MODEL_LIB is None:
pytest.skip(
'Environment variable "MLC_SERVE_EMBEDDING_MODEL_LIB" not found. '
"Set it to a compiled embedding model library "
"(e.g., Qwen3-Embedding-0.6B-q0f32-MLC.dylib)."
)
if not os.path.isfile(EMBEDDING_MODEL_LIB):
pytest.skip(f"Embedding model library not found at: {EMBEDDING_MODEL_LIB}")
if EMBEDDING_MODEL_DIR is None or not os.path.isdir(EMBEDDING_MODEL_DIR):
pytest.skip(f"Embedding model directory not found at: {EMBEDDING_MODEL_DIR}")
@pytest.fixture(scope="module")
def embedding_engine():
"""Module-scoped AsyncEmbeddingEngine — loaded once, shared across tests."""
_skip_if_no_model()
from mlc_llm.serve.embedding_engine import AsyncEmbeddingEngine
engine = AsyncEmbeddingEngine(
model=EMBEDDING_MODEL_DIR,
model_lib=EMBEDDING_MODEL_LIB,
device="auto",
)
yield engine
engine.terminate()
# ---------------------------------------------------------------------------
# Helpers — reuse cosine_similarity pattern from test_serve_engine.py
# ---------------------------------------------------------------------------
def cosine_similarity(a, b):
a, b = np.array(a), np.array(b)
return float(np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b)))
# ===================================================================
# Engine initialization tests
# ===================================================================
def test_engine_model_type(embedding_engine):
"""Engine reports a valid model type."""
assert embedding_engine.model_type in ("encoder", "decoder")
def test_engine_pooling_strategy(embedding_engine):
"""Engine selects appropriate default pooling strategy."""
if embedding_engine.model_type == "encoder":
assert embedding_engine.pooling_strategy == "cls"
else:
assert embedding_engine.pooling_strategy == "last"
# ===================================================================
# Single-text embedding
# ===================================================================
def test_single_text_shape(embedding_engine):
"""Single text returns exactly one embedding vector."""
embeddings, tokens = embedding_engine.embed(["Hello world"])
assert len(embeddings) == 1
assert len(embeddings[0]) > 0
assert tokens > 0
def test_single_text_unit_norm(embedding_engine):
"""Embedding output is L2-normalized."""
embeddings, _ = embedding_engine.embed(["Hello world"])
norm = float(np.linalg.norm(embeddings[0]))
assert abs(norm - 1.0) < 1e-4, f"Expected unit norm, got {norm}"
# ===================================================================
# Batch embedding
# ===================================================================
BATCH_TEXTS = [
"Machine learning is fascinating",
"I love pizza",
"Deep learning uses neural networks",
]
def test_batch_count(embedding_engine):
"""Batch embedding returns one vector per input."""
embeddings, tokens = embedding_engine.embed(BATCH_TEXTS)
assert len(embeddings) == len(BATCH_TEXTS)
assert tokens > 0
def test_batch_all_normalized(embedding_engine):
"""Every vector in a batch is L2-normalized."""
embeddings, _ = embedding_engine.embed(BATCH_TEXTS)
for i, emb in enumerate(embeddings):
norm = float(np.linalg.norm(emb))
assert abs(norm - 1.0) < 1e-4, f"Embedding [{i}] norm={norm}"
def test_batch_consistent_dimension(embedding_engine):
"""All embeddings in a batch have the same dimension."""
embeddings, _ = embedding_engine.embed(BATCH_TEXTS)
dims = {len(emb) for emb in embeddings}
assert len(dims) == 1, f"Inconsistent dimensions: {dims}"
# ===================================================================
# Semantic quality — cosine similarity ranking
# ===================================================================
SIMILARITY_TEXTS = [
"What is machine learning?",
"Explain deep learning algorithms",
"I want to order pizza",
]
def test_cosine_similarity_ranking(embedding_engine):
"""Related texts have higher cosine similarity than unrelated texts."""
embeddings, _ = embedding_engine.embed(SIMILARITY_TEXTS)
e_ml, e_dl, e_pizza = [np.array(e) for e in embeddings]
sim_related = float(np.dot(e_ml, e_dl))
sim_unrelated = float(np.dot(e_ml, e_pizza))
assert (
sim_related > sim_unrelated
), f"Related sim ({sim_related:.4f}) should > unrelated sim ({sim_unrelated:.4f})"
# ===================================================================
# Determinism
# ===================================================================
def test_deterministic_output(embedding_engine):
"""Same input produces identical output across calls."""
text = ["Deterministic test"]
emb1, _ = embedding_engine.embed(text)
emb2, _ = embedding_engine.embed(text)
cos = cosine_similarity(emb1[0], emb2[0])
assert cos > 0.9999, f"Expected deterministic output, cosine={cos}"
# ===================================================================
# Async embedding
# ===================================================================
def test_async_embed(embedding_engine):
"""async_embed produces same result as sync embed."""
text = ["Async test"]
sync_emb, sync_tokens = embedding_engine.embed(text)
loop = asyncio.new_event_loop()
try:
async_emb, async_tokens = loop.run_until_complete(embedding_engine.async_embed(text))
finally:
loop.close()
assert sync_tokens == async_tokens
cos = cosine_similarity(sync_emb[0], async_emb[0])
assert cos > 0.9999, f"Async vs sync mismatch, cosine={cos}"
# ===================================================================
# Edge cases
# ===================================================================
def test_empty_string(embedding_engine):
"""Empty string behavior depends on model type.
Encoder: [CLS]+[SEP] → valid embedding. Decoder: zero tokens → error."""
if embedding_engine.model_type == "encoder":
# Encoder adds [CLS]/[SEP], so empty string still produces valid embedding
embeddings, _ = embedding_engine.embed([""])
assert len(embeddings) == 1
assert len(embeddings[0]) > 0
else:
# Decoder has no special tokens, zero tokens → skipped, empty result
embeddings, tokens = embedding_engine.embed([""])
assert len(embeddings) == 0
assert tokens == 0
# ===================================================================
# Long text handling (model-type dependent)
# ===================================================================
def test_long_text_decoder_chunked_prefill(embedding_engine):
"""[Decoder only] Text >prefill_chunk_size triggers chunked prefill.
~5000 tokens processed in 3 chunks. Result is unit-norm embedding."""
if embedding_engine.model_type != "decoder":
pytest.skip("Chunked prefill is decoder-only")
long_text = "word " * 5000
embeddings, tokens = embedding_engine.embed([long_text])
assert tokens > 2048, f"Expected >2048 tokens to trigger chunking, got {tokens}"
norm = float(np.linalg.norm(embeddings[0]))
assert abs(norm - 1.0) < 1e-3
def test_long_text_encoder_truncation(embedding_engine):
"""[Encoder only] Text exceeding prefill_chunk_size is truncated.
Two texts with the same prefix but different suffixes beyond the limit
should produce identical embeddings, since the suffix gets truncated."""
if embedding_engine.model_type != "encoder":
pytest.skip("Truncation test is encoder-only")
prefill_chunk = embedding_engine._metadata.get("prefill_chunk_size", 512)
# Same prefix, different suffixes — both exceed the limit
shared_prefix = "machine learning is great " * 500 # ~2500 tokens
text_a = shared_prefix + " alpha beta gamma " * 500
text_b = shared_prefix + " totally different ending " * 500
emb_a, tokens_a = embedding_engine.embed([text_a])
emb_b, tokens_b = embedding_engine.embed([text_b])
# Verify truncation happened
assert (
tokens_a <= prefill_chunk
), f"Encoder should truncate to {prefill_chunk}, got {tokens_a} tokens"
assert tokens_b <= prefill_chunk
# Both should be valid unit-norm embeddings
assert abs(float(np.linalg.norm(emb_a[0])) - 1.0) < 1e-3
assert abs(float(np.linalg.norm(emb_b[0])) - 1.0) < 1e-3
# Both truncated to same first N tokens → identical embeddings
cos = cosine_similarity(emb_a[0], emb_b[0])
assert cos > 0.99, f"Same-prefix texts after truncation should match, cosine={cos:.6f}"
def test_long_vs_short_semantic_quality(embedding_engine):
"""Long text should still capture semantic meaning correctly.
Decoder: chunked prefill preserves full context.
Encoder: truncation keeps most relevant prefix."""
short_ml = "Machine learning enables systems to learn from data"
long_ml = (
"Machine learning is a fascinating field of study. " * 200
+ "It enables systems to learn from data."
)
pizza = "I want to order a pepperoni pizza for dinner"
embs, _ = embedding_engine.embed([short_ml, long_ml, pizza])
e_short, e_long, e_pizza = [np.array(e) for e in embs]
sim_same_topic = float(np.dot(e_short, e_long))
sim_different = float(np.dot(e_short, e_pizza))
assert (
sim_same_topic > sim_different
), f"Same topic ({sim_same_topic:.4f}) should > different ({sim_different:.4f})"
def test_unicode_text(embedding_engine):
"""Unicode input is handled correctly."""
texts = ["Привет мир", "你好世界", "こんにちは世界"]
embeddings, _ = embedding_engine.embed(texts)
assert len(embeddings) == 3
for emb in embeddings:
assert abs(float(np.linalg.norm(emb)) - 1.0) < 1e-4
# ===================================================================
# Standalone runner (like test_serve_engine.py)
# ===================================================================
if __name__ == "__main__":
_skip_if_no_model()
from mlc_llm.serve.embedding_engine import AsyncEmbeddingEngine
engine = AsyncEmbeddingEngine(
model=EMBEDDING_MODEL_DIR,
model_lib=EMBEDDING_MODEL_LIB,
device="auto",
)
try:
test_engine_model_type(engine)
test_engine_pooling_strategy(engine)
test_single_text_shape(engine)
test_single_text_unit_norm(engine)
test_batch_count(engine)
test_batch_all_normalized(engine)
test_batch_consistent_dimension(engine)
test_cosine_similarity_ranking(engine)
test_deterministic_output(engine)
test_async_embed(engine)
test_empty_string(engine)
test_long_text_decoder_chunked_prefill(engine)
test_long_text_encoder_truncation(engine)
test_long_vs_short_semantic_quality(engine)
test_unicode_text(engine)
print("\nAll embedding engine tests passed!")
finally:
engine.terminate()
| {
"repo_id": "mlc-ai/mlc-llm",
"file_path": "tests/python/serve/test_embedding_engine.py",
"license": "Apache License 2.0",
"lines": 254,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlc-ai/mlc-llm:tests/python/model/test_gemma3.py | # pylint: disable=invalid-name,missing-docstring
"""Unit tests for Gemma3 model architecture."""
import pytest
from mlc_llm.model import MODEL_PRESETS, MODELS
def test_gemma3_model_registered():
"""Verify Gemma3 model is in the registry."""
assert "gemma3" in MODELS, "gemma3 should be registered in MODELS"
@pytest.mark.parametrize(
"model_name",
[
"gemma3_2b",
"gemma3_9b",
],
)
def test_gemma3_creation(model_name: str):
"""Test Gemma3 model creation and export to TVM IR.
Verifies:
- Config can be loaded from preset
- Model instance can be created
- Model exports to TVM IR successfully
- Named parameters are extracted
"""
model_info = MODELS["gemma3"]
config = model_info.config.from_dict(MODEL_PRESETS[model_name])
model = model_info.model(config)
mod, named_params = model.export_tvm(
spec=model.get_default_spec(), # type: ignore
)
# Verify export succeeded
assert mod is not None
assert len(named_params) > 0
# Optional: show module structure
mod.show(black_format=False)
# Print parameters for debugging
for name, param in named_params:
print(name, param.shape, param.dtype)
def test_gemma3_config_validation():
"""Test Gemma3 configuration has required fields."""
model_info = MODELS["gemma3"]
config = model_info.config.from_dict(MODEL_PRESETS["gemma3_2b"])
# Check required config parameters
assert hasattr(config, "hidden_size") and config.hidden_size > 0
assert hasattr(config, "num_hidden_layers") and config.num_hidden_layers > 0
assert hasattr(config, "num_attention_heads") and config.num_attention_heads > 0
assert hasattr(config, "vocab_size") and config.vocab_size > 0
print(
f"Gemma3 Config: hidden_size={config.hidden_size}, "
f"layers={config.num_hidden_layers}, "
f"heads={config.num_attention_heads}, "
f"vocab={config.vocab_size}"
)
if __name__ == "__main__":
# Allow running tests directly
test_gemma3_creation("gemma3_2b")
test_gemma3_creation("gemma3_9b")
| {
"repo_id": "mlc-ai/mlc-llm",
"file_path": "tests/python/model/test_gemma3.py",
"license": "Apache License 2.0",
"lines": 55,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlc-ai/mlc-llm:python/mlc_llm/conversation_template/ministral3_reasoning.py | """Ministral3 reasoning templates"""
from mlc_llm.protocol.conversation_protocol import Conversation, MessagePlaceholders
from .registry import ConvTemplateRegistry
# Ministral-3-XB-Reasoning-2512
ConvTemplateRegistry.register_conv_template(
Conversation(
name="ministral3_reasoning",
system_template=(
f"[SYSTEM_PROMPT]{MessagePlaceholders.SYSTEM.value}[/SYSTEM_PROMPT]"
f"{MessagePlaceholders.FUNCTION.value}"
),
system_message=(
"# HOW YOU SHOULD THINK AND ANSWER\n\n"
"First draft your thinking process (inner monologue) until you arrive at a response. "
"Format your response using Markdown, and use LaTeX for any mathematical equations. "
"Write both your thoughts and the response in the same language as the input.\n\n"
"Your thinking process must follow the template below:"
"[THINK]Your thoughts or/and draft, like working through an exercise on scratch paper. "
"Be as casual and as long as you want until you are confident to generate the response "
"to the user.[/THINK]Here, provide a self-contained response."
),
role_templates={
"user": f"[INST]{MessagePlaceholders.USER.value}[/INST]",
"assistant": f"{MessagePlaceholders.ASSISTANT.value}</s>",
"tool": f"[TOOL_RESULTS]{MessagePlaceholders.TOOL.value}[/TOOL_RESULTS]",
},
roles={"user": "", "assistant": "", "tool": ""},
seps=[""],
role_content_sep="",
role_empty_sep="",
stop_str=["</s>"],
stop_token_ids=[2],
system_prefix_token_ids=[1],
)
)
| {
"repo_id": "mlc-ai/mlc-llm",
"file_path": "python/mlc_llm/conversation_template/ministral3_reasoning.py",
"license": "Apache License 2.0",
"lines": 35,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlc-ai/mlc-llm:python/mlc_llm/conversation_template/ministral3.py | """Ministral3 templates"""
from mlc_llm.protocol.conversation_protocol import Conversation, MessagePlaceholders
from .registry import ConvTemplateRegistry
# Ministral3
ConvTemplateRegistry.register_conv_template(
Conversation(
name="ministral3",
system_template=(
f"[SYSTEM_PROMPT]{MessagePlaceholders.SYSTEM.value}[/SYSTEM_PROMPT]"
f"{MessagePlaceholders.FUNCTION.value}"
),
system_message=(
"You are Ministral-3-3B-Instruct-2512, a Large Language Model (LLM) created by "
"Mistral AI, a French startup headquartered in Paris.\n"
"You power an AI assistant called Le Chat.\n"
"Your knowledge base was last updated on 2023-10-01.\n"
"The current date is {today}.\n\n"
"When you're not sure about some information or when the user's request requires "
"up-to-date or specific data, you must use the available tools to fetch the "
"information. Do not hesitate to use tools whenever they can provide a more "
"accurate or complete response. If no relevant tools are available, then clearly "
"state that you don't have the information and avoid making up anything.\n"
"If the user's question is not clear, ambiguous, or does not provide enough "
"context for you to accurately answer the question, you do not try to answer it "
'right away and you rather ask the user to clarify their request (e.g. "What are '
'some good restaurants around me?" => "Where are you?" or "When is the next '
'flight to Tokyo" => "Where do you travel from?").\n'
"You are always very attentive to dates, in particular you try to resolve dates "
'(e.g. "yesterday" is {yesterday}) and when asked about information at specific '
"dates, you discard information that is at another date.\n"
"You follow these instructions in all languages, and always respond to the user in "
"the language they use or request.\n"
"Next sections describe the capabilities that you have.\n\n"
"# WEB BROWSING INSTRUCTIONS\n\n"
"You cannot perform any web search or access internet to open URLs, links etc. If "
"it seems like the user is expecting you to do so, you clarify the situation and "
"ask the user to copy paste the text directly in the chat.\n\n"
"# MULTI-MODAL INSTRUCTIONS\n\n"
"You have the ability to read images, but you cannot generate images. You also "
"cannot transcribe audio files or videos.\n"
"You cannot read nor transcribe audio files or videos.\n\n"
"# TOOL CALLING INSTRUCTIONS\n\n"
"You may have access to tools that you can use to fetch information or perform "
"actions. You must use these tools in the following situations:\n\n"
"1. When the request requires up-to-date information.\n"
"2. When the request requires specific data that you do not have in your knowledge "
"base.\n"
"3. When the request involves actions that you cannot perform without tools.\n\n"
"Always prioritize using tools to provide the most accurate and helpful response. "
"If tools are not available, inform the user that you cannot perform the requested "
"action at the moment."
),
role_templates={
"user": f"[INST]{MessagePlaceholders.USER.value}[/INST]",
"assistant": f"{MessagePlaceholders.ASSISTANT.value}</s>",
"tool": f"[TOOL_RESULTS]{MessagePlaceholders.TOOL.value}[/TOOL_RESULTS]",
},
roles={"user": "", "assistant": "", "tool": ""},
seps=[""],
role_content_sep="",
role_empty_sep="",
stop_str=["</s>"],
stop_token_ids=[2],
system_prefix_token_ids=[1],
)
)
| {
"repo_id": "mlc-ai/mlc-llm",
"file_path": "python/mlc_llm/conversation_template/ministral3.py",
"license": "Apache License 2.0",
"lines": 66,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlc-ai/mlc-llm:python/mlc_llm/model/ministral3/ministral3_loader.py | """
This file specifies how MLC's Ministral3 parameter maps from other formats, for example HuggingFace
PyTorch, HuggingFace safetensors.
"""
import functools
from typing import Callable, List, Optional, Tuple
import numpy as np
from mlc_llm.loader import ExternMapping, QuantizeMapping
from mlc_llm.quantization import BlockScaleQuantize, Quantization
from .ministral3_model import Ministral3Config, Mistral3ForConditionalGeneration
def _dequantize_block_scale_weight( # pylint: disable=too-many-locals
weight: np.ndarray, weight_scale: np.ndarray, block_size: Tuple[int, int]
) -> np.ndarray:
"""Reconstruct float weights from FP8 block-scale storage."""
rows, cols = weight.shape
block_rows, block_cols = block_size
out = np.empty((rows, cols), dtype="float32")
weight = weight.astype("float32")
num_row_blocks, num_col_blocks = weight_scale.shape
for i in range(num_row_blocks):
row_start = i * block_rows
if row_start >= rows:
break
row_end = min(row_start + block_rows, rows)
scale_row = weight_scale[i]
for j in range(num_col_blocks):
col_start = j * block_cols
if col_start >= cols:
break
col_end = min(col_start + block_cols, cols)
out[row_start:row_end, col_start:col_end] = (
weight[row_start:row_end, col_start:col_end] * scale_row[j]
)
return out
def huggingface( # pylint: disable=too-many-locals,too-many-statements
model_config: Ministral3Config, quantization: Quantization
) -> ExternMapping:
"""Returns a parameter mapping that maps from the names of MLC LLM parameters to
the names of HuggingFace PyTorch parameters.
Parameters
----------
model_config : Ministral3Config
The configuration of the Ministral3 model.
quantization : Quantization
The quantization configuration.
Returns
-------
param_map : ExternMapping
The parameter mapping from MLC to HuggingFace PyTorch.
"""
model = Mistral3ForConditionalGeneration(model_config)
if quantization is not None:
model.to(quantization.model_dtype)
if isinstance(quantization, BlockScaleQuantize):
# Convert the model to block-scale quantized model before loading parameters
model = quantization.quantize_model(model, QuantizeMapping({}, {}), "")
if model_config.weight_block_size is None:
raise ValueError(
"The input Ministral 3 model is not fp8 block quantized. "
"Thus BlockScaleQuantize is not supported."
)
_, _named_params, _ = model.export_tvm( # type: ignore[misc]
spec=model.get_default_spec(),
allow_extern=True,
)
raw_params = dict(_named_params)
if any(name.startswith("language_model.") for name in raw_params):
named_parameters = {
name.replace("language_model.", "", 1): value for name, value in raw_params.items()
}
else:
named_parameters = raw_params
mapping = ExternMapping()
hf_prefix = ""
if "vision_config" in model_config.kwargs:
hf_prefix = "language_model."
def hf(name: str) -> str:
return f"{hf_prefix}{name}"
if (
not isinstance(quantization, BlockScaleQuantize)
and model_config.weight_block_size is not None
):
raise ValueError(
"The input Ministral 3 model is fp8 block quantized. "
"Please use BlockScaleQuantize for the model."
)
# Helper function to add both weight and scale mappings
def add_weight_and_scale_mapping( # pylint: disable=too-many-locals
weight_mlc_name: str,
weight_hf_names: List[str],
weight_transform_func: Callable,
activation_transform_func: Optional[Callable] = None,
):
mlc_param = named_parameters[weight_mlc_name]
mapping.add_mapping(
weight_mlc_name,
weight_hf_names,
functools.partial(weight_transform_func, dtype=mlc_param.dtype),
)
if isinstance(quantization, BlockScaleQuantize):
weight_scale_mlc_name = f"{weight_mlc_name}_scale_inv"
if weight_scale_mlc_name in named_parameters:
weight_scale_hf_names = [f"{name}_scale_inv" for name in weight_hf_names]
weight_scale_param = named_parameters[weight_scale_mlc_name]
expected_weight_scale_shape = tuple(int(dim) for dim in weight_scale_param.shape)
def _weight_scale_transform(*arrays, dtype: str, _transform=weight_transform_func):
processed = []
for arr in arrays:
arr_np = np.asarray(arr)
if arr_np.ndim == 0:
arr_np = arr_np.reshape((1,))
processed.append(arr_np)
result = _transform(*processed, dtype=dtype)
result = np.asarray(result, dtype=dtype)
if result.shape == expected_weight_scale_shape:
return result
if result.shape == ():
return np.full(expected_weight_scale_shape, result.item(), dtype=dtype)
if result.shape == (1,) and expected_weight_scale_shape != (1,):
return np.broadcast_to(result, expected_weight_scale_shape).astype(dtype)
if (
result.ndim == 1
and result.size > 1
and len(expected_weight_scale_shape) >= 2
and expected_weight_scale_shape[0] % result.size == 0
):
rows_per_segment = expected_weight_scale_shape[0] // result.size
tiled = np.repeat(result, rows_per_segment)
tiled = tiled.reshape(expected_weight_scale_shape[0], 1)
return np.broadcast_to(tiled, expected_weight_scale_shape).astype(dtype)
raise ValueError(
f"Unexpected weight scale shape {result.shape} for "
f"{weight_scale_mlc_name}, expected {expected_weight_scale_shape}"
)
mapping.add_mapping(
weight_scale_mlc_name,
weight_scale_hf_names,
functools.partial(_weight_scale_transform, dtype=weight_scale_param.dtype),
)
activation_scale_mlc_name = f"{weight_mlc_name[: -len('.weight')]}.activation_scale"
if activation_scale_mlc_name in named_parameters:
activation_scale_hf_names = [
f"{name[: -len('.weight')]}.activation_scale" for name in weight_hf_names
]
activation_scale_param = named_parameters[activation_scale_mlc_name]
transform = activation_transform_func or weight_transform_func
expected_shape = tuple(int(dim) for dim in activation_scale_param.shape)
def _activation_scale_transform(*arrays, dtype: str, _transform=transform):
result = _transform(*arrays, dtype=dtype)
result = np.asarray(result, dtype=dtype)
if result.shape == expected_shape:
return result
if result.shape == ():
# HF checkpoint stores a single scale; broadcast across the expected
# dimension.
return np.full(expected_shape, result.item(), dtype=dtype)
if result.shape == (1,) and expected_shape != (1,):
return np.broadcast_to(result, expected_shape).astype(dtype)
if (
result.ndim == 1
and result.size > 1
and len(expected_shape) >= 1
and expected_shape[0] % result.size == 0
):
rows_per_segment = expected_shape[0] // result.size
tiled = np.repeat(result, rows_per_segment)
return tiled.reshape(expected_shape).astype(dtype)
raise ValueError(
f"Unexpected activation scale shape {result.shape} for "
f"{activation_scale_mlc_name}, expected {expected_shape}"
)
mapping.add_mapping(
activation_scale_mlc_name,
activation_scale_hf_names,
functools.partial(
_activation_scale_transform, dtype=activation_scale_param.dtype
),
)
def identity_transform(param: np.ndarray, dtype: str):
return param.astype(dtype)
def make_shared_activation_transform(target_name: str):
def func(first: np.ndarray, *rest: np.ndarray, dtype: str):
for _, arr in enumerate(rest, start=1):
if not np.allclose(arr, first):
raise ValueError(
f"Activation scales for {target_name} must be identical between "
"concatenated sources."
)
return first.astype(dtype)
return func
for i in range(model_config.num_hidden_layers):
# Add QKV in self attention
attn = f"model.layers.{i}.self_attn"
mlc_name = f"{attn}.qkv_proj.weight"
proj_sources = [hf(f"{attn}.{proj}.weight") for proj in ["q_proj", "k_proj", "v_proj"]]
add_weight_and_scale_mapping(
mlc_name,
proj_sources,
lambda q, k, v, dtype: np.concatenate([q, k, v], axis=0).astype(dtype),
activation_transform_func=make_shared_activation_transform(
f"{mlc_name}_activation_scale"
),
)
# Add gates in MLP
mlp = f"model.layers.{i}.mlp"
mlc_name = f"{mlp}.gate_up_proj.weight"
gate_sources = [hf(f"{mlp}.{proj}.weight") for proj in ["gate_proj", "up_proj"]]
add_weight_and_scale_mapping(
mlc_name,
gate_sources,
lambda gate, up, dtype: np.concatenate([gate, up], axis=0).astype(dtype),
activation_transform_func=make_shared_activation_transform(
f"{mlc_name}_activation_scale"
),
)
for linear_name in [f"{attn}.o_proj.weight", f"{mlp}.down_proj.weight"]:
add_weight_and_scale_mapping(
linear_name,
[hf(linear_name)],
identity_transform,
)
# inv_freq is not used in the model
mapping.add_unused(f"{attn}.rotary_emb.inv_freq")
for mlc_name, mlc_param in named_parameters.items():
if mlc_name not in mapping.param_map:
mapping.add_mapping(
mlc_name,
[hf(mlc_name)],
functools.partial(
lambda x, dtype: x.astype(dtype),
dtype=mlc_param.dtype,
),
)
return mapping
| {
"repo_id": "mlc-ai/mlc-llm",
"file_path": "python/mlc_llm/model/ministral3/ministral3_loader.py",
"license": "Apache License 2.0",
"lines": 234,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlc-ai/mlc-llm:python/mlc_llm/model/ministral3/ministral3_model.py | """
Implementation for Ministral 3 architecture.
"""
import dataclasses
import math
from functools import partial
from typing import Any, Dict, Optional, Tuple
from tvm import te, tir
from tvm.relax.frontend import nn
from tvm.relax.frontend.nn import Tensor, op
from mlc_llm import op as op_ext
from mlc_llm.nn import PagedKVCache, RopeMode
from mlc_llm.support import logging
from mlc_llm.support import tensor_parallel as tp
from mlc_llm.support.config import ConfigBase
from mlc_llm.support.style import bold
logger = logging.getLogger(__name__)
@dataclasses.dataclass
class Ministral3Config(ConfigBase): # pylint: disable=too-many-instance-attributes
"""Configuration of the Ministral 3 model."""
hidden_size: int
intermediate_size: int
num_attention_heads: int
num_hidden_layers: int
rms_norm_eps: float
vocab_size: int
attention_sink_size: int = 0
context_window_size: int = 0
dtype: str = "float32"
head_dim: int = 0
hidden_act: str = "silu"
max_batch_size: int = 1
num_key_value_heads: int = 0
position_embedding_base: int = 0
prefill_chunk_size: int = 0
rope_parameters: Optional[Dict[str, Any]] = None
sliding_window_size: int = 0
tensor_parallel_shards: int = 1
tie_word_embeddings: bool = False
weight_block_size: Optional[Tuple[int, int]] = None
kwargs: Dict[str, Any] = dataclasses.field(default_factory=dict)
modules_to_not_convert: Tuple[str, ...] = dataclasses.field(default_factory=tuple)
@classmethod
def from_dict( # type: ignore[override]
cls,
source: Dict[str, Any],
) -> "Ministral3Config":
if "text_config" in source and isinstance(source["text_config"], dict):
top_level = dict(source)
text_cfg = top_level.pop("text_config")
merged: Dict[str, Any] = dict(top_level)
merged.update(text_cfg)
if "tie_word_embeddings" in source:
merged["tie_word_embeddings"] = source["tie_word_embeddings"]
if "dtype" in source:
merged["dtype"] = source["dtype"]
return super().from_dict(merged)
return super().from_dict(source)
def __post_init__(self): # pylint: disable=too-many-branches,too-many-statements
if "quantization_config" in self.kwargs:
quantization_config = self.kwargs.pop("quantization_config")
if isinstance(quantization_config, dict):
activation_scheme = quantization_config.get("activation_scheme", "")
quant_method = quantization_config.get("quant_method", "")
weight_block_size = quantization_config.get("weight_block_size")
modules_to_not_convert = quantization_config.get("modules_to_not_convert", [])
if isinstance(modules_to_not_convert, list):
self.modules_to_not_convert = tuple(modules_to_not_convert)
if quant_method == "fp8" and activation_scheme == "static":
if weight_block_size is not None:
self.weight_block_size = weight_block_size
if (
not isinstance(self.weight_block_size, (tuple, list))
or len(self.weight_block_size) != 2
):
raise ValueError(
"Invalid Ministral3 quantization config: ",
"weight_block_size must be a list or tuple of two integers, ",
f"got {self.weight_block_size} of type",
f"{type(self.weight_block_size)}",
)
else:
# Set default block size if not provided.
self.weight_block_size = (128, 128)
logger.info( # pylint: disable=logging-too-many-args
"Setting default weight_block_size=%s, ",
"since quantization_config does not provide ",
"FP8 block-scale details required by ",
"MLC (activation_scheme=%s, quant_method=%s)",
self.weight_block_size,
activation_scheme,
quant_method,
)
else:
raise ValueError(
"Invalid Ministral 3 model quantization config: ",
"only FP8 static quantization is supported, ",
f"got activation_scheme={activation_scheme}, quant_method={quant_method}",
)
else:
raise ValueError(
"Invalid Ministral 3 model quantization config: ",
"unrecognized quantization config: ",
f"{quantization_config}",
)
if self.position_embedding_base == 0:
if self.rope_parameters is not None and "rope_theta" in self.rope_parameters:
self.position_embedding_base = self.rope_parameters.pop("rope_theta")
elif "rope_theta" in self.kwargs:
self.position_embedding_base = self.kwargs.pop("rope_theta")
else:
self.position_embedding_base = 10000
if self.sliding_window_size == 0:
self.sliding_window_size = self.kwargs.pop("sliding_window", -1)
if self.sliding_window_size is None:
# Sliding window is disabled.
self.sliding_window_size = -1
if self.context_window_size == 0:
if self.sliding_window_size == -1:
for name in ["max_position_embeddings", "max_sequence_length"]:
if name in self.kwargs:
self.context_window_size = self.kwargs.pop(name)
logger.info(
"%s not found in config.json. Falling back to %s (%d)",
bold("context_window_size"),
bold(name),
self.context_window_size,
)
break
else:
raise ValueError(
"Unable to determine the maximum sequence length, because none of "
"`context_window_size`, `max_position_embeddings` or "
"`max_sequence_length` is provided in `config.json`."
)
else:
self.context_window_size = -1
if self.num_key_value_heads == 0:
self.num_key_value_heads = self.num_attention_heads
if self.head_dim == 0:
self.head_dim = self.hidden_size // self.num_attention_heads
assert self.num_attention_heads % self.num_key_value_heads == 0
assert self.attention_sink_size >= 0
if self.prefill_chunk_size == 0:
prefill_chunk_size_candidates = []
if self.sliding_window_size != -1:
prefill_chunk_size_candidates.append(self.sliding_window_size)
if self.context_window_size != -1:
prefill_chunk_size_candidates.append(self.context_window_size)
logger.info(
"%s defaults to %d",
bold("prefill_chunk_size"),
min(*prefill_chunk_size_candidates, 8192),
)
self.prefill_chunk_size = min(*prefill_chunk_size_candidates, 8192)
ACT2FN = {
"gelu": partial(nn.gelu, approximate=False),
"relu": nn.relu,
"silu": nn.silu,
"swish": nn.silu,
"gelu_new": partial(nn.gelu, approximate=True),
}
class Ministral3Embedding(nn.Embedding):
"""The embedding module specialized for Ministral3 so that
it can be shared with the final lm_head.
"""
def lm_head_forward(self, x: nn.Tensor):
"""The lm_head forwarding, which transposes the weight and multiplies
with the input tensor.
"""
weight = nn.op.permute_dims(self.weight)
return nn.op.matmul(x, weight, out_dtype="float32")
# pylint: disable=invalid-name,missing-docstring
class Ministral3MLP(nn.Module):
"""Same as in Llama architecture (LlamaFFN)."""
def __init__(self, config: Ministral3Config):
super().__init__()
if config.intermediate_size % config.tensor_parallel_shards != 0:
raise ValueError(
f"Cannot split MLP intermediate size {config.intermediate_size} "
f"evenly to {config.tensor_parallel_shards} GPUs."
)
self.intermediate_size = config.intermediate_size // config.tensor_parallel_shards
self.gate_up_proj = nn.Linear(
in_features=config.hidden_size,
out_features=2 * self.intermediate_size,
bias=False,
)
self.down_proj = nn.Linear(self.intermediate_size, config.hidden_size, bias=False)
self.act_fn = ACT2FN[config.hidden_act]
def forward(self, x: Tensor):
concat_x1_x2 = self.gate_up_proj(x)
x1, x2 = op.split(concat_x1_x2, 2, axis=-1)
return self.down_proj(self.act_fn(x1) * x2)
def yarn_get_sm_scale(scale=1, mscale=1):
if scale <= 1:
return 1.0
return 0.1 * mscale * math.log(scale) + 1.0
class Ministral3Attention(nn.Module): # pylint: disable=too-many-instance-attributes
"""Same as LlamaAttention, but with sliding window attention using a rolling buffer cache."""
def __init__(self, config: Ministral3Config):
self.head_dim = config.head_dim
if config.num_key_value_heads % config.tensor_parallel_shards != 0:
raise ValueError(
f"Cannot split {config.num_key_value_heads} key-value attention heads "
f"evenly to {config.tensor_parallel_shards} GPUs."
)
self.num_q_heads = config.num_attention_heads // config.tensor_parallel_shards
self.num_kv_heads = config.num_key_value_heads // config.tensor_parallel_shards
self.qkv_proj = nn.Linear(
in_features=config.hidden_size,
out_features=(self.num_q_heads + 2 * self.num_kv_heads) * self.head_dim,
bias=False,
)
self.o_proj = nn.Linear(self.num_q_heads * self.head_dim, config.hidden_size, bias=False)
self.softmax_scale = self.head_dim ** (-0.5)
if config.rope_parameters is not None:
mscale_all_dim = config.rope_parameters.get("mscale_all_dim", 0)
scaling_factor = config.rope_parameters["factor"]
if mscale_all_dim:
sm_scale = yarn_get_sm_scale(scaling_factor, mscale_all_dim)
self.softmax_scale = self.softmax_scale * sm_scale * sm_scale
def forward(self, hidden_states: Tensor, paged_kv_cache: PagedKVCache, layer_id: int):
d, h_q, h_kv = self.head_dim, self.num_q_heads, self.num_kv_heads
b, s, _ = hidden_states.shape
# QKV Projection
qkv = self.qkv_proj(hidden_states)
qkv = op.reshape(qkv, (b, s, h_q + h_kv + h_kv, d))
# Attention
output = op.reshape(
paged_kv_cache.attention_with_fused_qkv(
layer_id, qkv, self.num_q_heads, sm_scale=self.softmax_scale
),
(b, s, h_q * d),
)
return self.o_proj(output)
class Ministral3DecoderLayer(nn.Module):
"""Exact same as LlamaDecoderLayer."""
def __init__(self, config: Ministral3Config):
rms_norm_eps = config.rms_norm_eps
self.self_attn = Ministral3Attention(config)
self.mlp = Ministral3MLP(config)
self.input_layernorm = nn.RMSNorm(config.hidden_size, -1, rms_norm_eps, bias=False)
self.post_attention_layernorm = nn.RMSNorm(config.hidden_size, -1, rms_norm_eps, bias=False)
def _set_tp():
def _set(layer, hint):
layer.weight.attrs["shard_strategy"] = hint
hd = config.head_dim
q = self.self_attn.num_q_heads * hd
k = self.self_attn.num_kv_heads * hd
v = self.self_attn.num_kv_heads * hd
i = self.mlp.intermediate_size
_set(
self.self_attn.qkv_proj,
tp.ShardSingleDim("_shard_qkv", segs=[q, k, v], dim=0),
)
_set(self.self_attn.o_proj, tp.ShardSingleDim("_shard_o", dim=1))
_set(
self.mlp.gate_up_proj,
tp.ShardSingleDim("_shard_mlp_up", segs=[i, i], dim=0),
)
_set(self.mlp.down_proj, tp.ShardSingleDim("_shard_mlp_down", dim=1))
self.tensor_parallel_shards = config.tensor_parallel_shards
_set_tp()
def forward(self, hidden_states: Tensor, paged_kv_cache: PagedKVCache, layer_id: int):
out = self.self_attn(self.input_layernorm(hidden_states), paged_kv_cache, layer_id)
hidden_states = self._apply_residual(out, residual=hidden_states)
out = self.mlp(self.post_attention_layernorm(hidden_states))
hidden_states = self._apply_residual(out, residual=hidden_states)
return hidden_states
def _apply_residual(self, out, residual):
if self.tensor_parallel_shards > 1:
return op.ccl_allreduce(out, "sum") + residual
return out + residual
class Ministral3Model(nn.Module):
"""Exact same as LlamaModel."""
def __init__(self, config: Ministral3Config):
assert config.hidden_size % config.num_attention_heads == 0
# self.embed_tokens = nn.Embedding("vocab_size", config.hidden_size)
self.embed_tokens = Ministral3Embedding(config.vocab_size, config.hidden_size)
self.layers = nn.ModuleList(
[Ministral3DecoderLayer(config) for _ in range(config.num_hidden_layers)]
)
self.norm = nn.RMSNorm(config.hidden_size, -1, config.rms_norm_eps, bias=False)
self.tensor_parallel_shards = config.tensor_parallel_shards
def forward(self, input_embed: Tensor, paged_kv_cache: PagedKVCache):
hidden_states = input_embed
for layer_id, layer in enumerate(self.layers):
hidden_states = layer(hidden_states, paged_kv_cache, layer_id)
hidden_states = self.norm(hidden_states)
return hidden_states
class Mistral3ForConditionalGeneration(nn.Module): # pylint: disable=too-many-instance-attributes
def __init__(self, config: Ministral3Config):
self.model = Ministral3Model(config)
self.tie_word_embeddings = config.tie_word_embeddings
if not config.tie_word_embeddings:
self.lm_head = nn.Linear(
config.hidden_size, config.vocab_size, bias=False
) # "vocab_size"
self._mark_modules_no_quant(config.modules_to_not_convert)
self.num_hidden_layers = config.num_hidden_layers
self.num_attention_heads = config.num_attention_heads
self.num_key_value_heads = config.num_key_value_heads
self.head_dim = config.head_dim
self.hidden_size = config.hidden_size
self.vocab_size = config.vocab_size
self.rope_theta = config.position_embedding_base
self.rope_parameters = config.rope_parameters
self.tensor_parallel_shards = config.tensor_parallel_shards
self.sliding_window_size = config.sliding_window_size
self.dtype = config.dtype
self.weight_block_size = config.weight_block_size
def _mark_modules_no_quant(self, modules: Tuple[str, ...]):
for path in modules:
if not path:
continue
parts = path.split(".")
target = self
for part in parts:
if not hasattr(target, part):
target = None
break
target = getattr(target, part)
if target is not None:
setattr(target, "no_quantization", True)
def to(self, dtype: Optional[str] = None):
super().to(dtype=dtype)
if dtype is not None:
self.dtype = dtype
def batch_forward(
self,
input_embeds: Tensor,
paged_kv_cache: PagedKVCache,
logit_positions: Optional[Tensor] = None,
):
op_ext.configure()
hidden_states = self.model(input_embeds, paged_kv_cache)
if logit_positions is not None:
hidden_states = op.take(hidden_states, logit_positions, axis=1)
if self.tie_word_embeddings:
logits = self.model.embed_tokens.lm_head_forward(hidden_states)
else:
logits = self.lm_head(hidden_states)
if logits.dtype != "float32":
logits = logits.astype("float32")
return logits
def embed(self, input_ids: Tensor):
if self.tensor_parallel_shards > 1:
input_ids = op.ccl_broadcast_from_worker0(input_ids)
return self.model.embed_tokens(input_ids)
def prefill(self, input_embed: Tensor, paged_kv_cache: PagedKVCache):
op_ext.configure()
def _index(x: te.Tensor): # x[:-1,:]
b, s, d = x.shape
return te.compute((b, 1, d), lambda i, _, k: x[i, s - 1, k], name="index")
hidden_states = self.model(input_embed, paged_kv_cache)
hidden_states = op.tensor_expr_op(_index, name_hint="index", args=[hidden_states])
if self.tie_word_embeddings:
logits = self.model.embed_tokens.lm_head_forward(hidden_states)
else:
logits = self.lm_head(hidden_states)
if logits.dtype != "float32":
logits = logits.astype("float32")
return logits, paged_kv_cache
def decode(self, input_embed: Tensor, paged_kv_cache: PagedKVCache):
op_ext.configure()
hidden_states = self.model(input_embed, paged_kv_cache)
if self.tie_word_embeddings:
logits = self.model.embed_tokens.lm_head_forward(hidden_states)
else:
logits = self.lm_head(hidden_states)
if logits.dtype != "float32":
logits = logits.astype("float32")
return logits, paged_kv_cache
def batch_prefill(
self,
input_embeds: Tensor,
logit_positions: Tensor,
paged_kv_cache: PagedKVCache,
):
if self.tensor_parallel_shards > 1:
logit_positions = op.ccl_broadcast_from_worker0(logit_positions)
logits = self.batch_forward(input_embeds, paged_kv_cache, logit_positions)
return logits, paged_kv_cache
def batch_decode(self, input_embeds: Tensor, paged_kv_cache: PagedKVCache):
logits = self.batch_forward(input_embeds, paged_kv_cache)
return logits, paged_kv_cache
def batch_verify(self, input_embeds: Tensor, paged_kv_cache: PagedKVCache):
logits = self.batch_forward(input_embeds, paged_kv_cache)
return logits, paged_kv_cache
def create_paged_kv_cache( # pylint: disable=too-many-arguments
self,
max_batch_size: tir.Var,
max_total_seq_len: tir.Var,
prefill_chunk_size: tir.Var,
page_size: tir.Var,
support_sliding_window: tir.Var,
) -> PagedKVCache:
return PagedKVCache.create_generic(
attn_kind="mha",
max_batch_size=max_batch_size,
max_total_seq_len=max_total_seq_len,
prefill_chunk_size=prefill_chunk_size,
page_size=page_size,
support_sliding_window=support_sliding_window,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads // self.tensor_parallel_shards,
num_key_value_heads=self.num_key_value_heads // self.tensor_parallel_shards,
qk_head_dim=self.head_dim,
v_head_dim=self.head_dim,
rope_mode=RopeMode.NORMAL,
rope_scale=1,
rope_theta=self.rope_theta,
rope_scaling=self.rope_parameters,
dtype=self.dtype,
)
def get_default_spec(self):
mod_spec = {
"embed": {
"input_ids": nn.spec.Tensor(["seq_len"], "int32"),
"$": {
"param_mode": "packed",
"effect_mode": "none",
},
},
"prefill": {
"input_embed": nn.spec.Tensor([1, "seq_len", self.hidden_size], self.dtype),
"paged_kv_cache": nn.spec.Object(object_type=PagedKVCache),
"$": {
"param_mode": "packed",
"effect_mode": "none",
},
},
"decode": {
"input_embed": nn.spec.Tensor([1, 1, self.hidden_size], self.dtype),
"paged_kv_cache": nn.spec.Object(object_type=PagedKVCache),
"$": {
"param_mode": "packed",
"effect_mode": "none",
},
},
"batch_prefill": {
"input_embeds": nn.spec.Tensor([1, "seq_len", self.hidden_size], self.dtype),
"logit_positions": nn.spec.Tensor(["batch_size"], "int32"),
"paged_kv_cache": nn.spec.Object(object_type=PagedKVCache),
"$": {
"param_mode": "packed",
"effect_mode": "none",
},
},
"batch_decode": {
"input_embeds": nn.spec.Tensor(["batch_size", 1, self.hidden_size], self.dtype),
"paged_kv_cache": nn.spec.Object(object_type=PagedKVCache),
"$": {
"param_mode": "packed",
"effect_mode": "none",
},
},
"batch_verify": {
"input_embeds": nn.spec.Tensor([1, "seq_len", self.hidden_size], self.dtype),
"paged_kv_cache": nn.spec.Object(object_type=PagedKVCache),
"$": {
"param_mode": "packed",
"effect_mode": "none",
},
},
"create_paged_kv_cache": {
"max_batch_size": int,
"max_total_seq_len": int,
"prefill_chunk_size": int,
"page_size": int,
"support_sliding_window": int,
"$": {
"param_mode": "none",
"effect_mode": "none",
},
},
}
return nn.spec.ModuleSpec.from_raw(mod_spec, self)
| {
"repo_id": "mlc-ai/mlc-llm",
"file_path": "python/mlc_llm/model/ministral3/ministral3_model.py",
"license": "Apache License 2.0",
"lines": 479,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlc-ai/mlc-llm:python/mlc_llm/model/llama4/llama4_loader.py | """
This file specifies how MLC's Llama parameter maps from other formats, for example HuggingFace
PyTorch, HuggingFace safetensors.
"""
import functools
import numpy as np
from mlc_llm.loader import ExternMapping
from mlc_llm.quantization import Quantization
from .llama4_model import Llama4Config, Llama4ForCausalLM
def huggingface(model_config: Llama4Config, quantization: Quantization) -> ExternMapping:
"""Returns a parameter mapping that maps from the names of MLC LLM parameters to
the names of HuggingFace PyTorch parameters.
Parameters
----------
model_config : Llama4Config
The configuration of the Llama model.
quantization : Quantization
The quantization configuration.
Returns
-------
param_map : ExternMapping
The parameter mapping from MLC to HuggingFace PyTorch.
"""
model = Llama4ForCausalLM(model_config)
if quantization is not None:
model.to(quantization.model_dtype)
_, _named_params, _ = model.export_tvm( # type: ignore[misc]
spec=model.get_default_spec(),
allow_extern=True,
)
named_parameters = dict(_named_params)
mapping = ExternMapping()
for i in range(model_config.text_config.num_hidden_layers):
# Add shared expert weights
mlp = f"model.layers.{i}.feed_forward.shared_expert"
mlc_name = f"{mlp}.gate_up_proj.weight"
mlc_param = named_parameters[mlc_name]
mapping.add_mapping(
mlc_name,
[
f"language_model.{mlp}.gate_proj.weight",
f"language_model.{mlp}.up_proj.weight",
],
functools.partial(
lambda gate, up, dtype: np.concatenate([gate, up], axis=0).astype(dtype),
dtype=mlc_param.dtype,
),
)
# Add router weights
mlp = f"model.layers.{i}.feed_forward"
mlc_name = f"{mlp}.router.router.weight"
hf_name = f"language_model.{mlp}.router.weight"
mlc_param = named_parameters[mlc_name]
mapping.add_mapping(
mlc_name,
[
hf_name,
],
functools.partial(
lambda x, dtype: x.astype(dtype),
dtype=mlc_param.dtype,
),
)
# Add experts weights
mlp = f"model.layers.{i}.feed_forward"
hf_name = f"language_model.{mlp}.experts.gate_up_proj"
mlc_name = f"{mlp}.experts.gate_up_proj"
mlc_param = named_parameters[mlc_name]
mapping.add_mapping(
mlc_name,
[
hf_name,
],
functools.partial(
lambda x, dtype: x.astype(dtype),
dtype=mlc_param.dtype,
),
)
mlp = f"model.layers.{i}.feed_forward"
mlc_name = f"{mlp}.experts.down_proj"
hf_name = f"language_model.{mlp}.experts.down_proj"
mlc_param = named_parameters[mlc_name]
mapping.add_mapping(
mlc_name,
[
hf_name,
],
functools.partial(
lambda x, dtype: x.astype(dtype),
dtype=mlc_param.dtype,
),
)
for mlc_name, mlc_param in named_parameters.items():
if mlc_name not in mapping.param_map:
mapping.add_mapping(
mlc_name,
[f"language_model.{mlc_name}"],
functools.partial(
lambda x, dtype: x.astype(dtype),
dtype=mlc_param.dtype,
),
)
return mapping
| {
"repo_id": "mlc-ai/mlc-llm",
"file_path": "python/mlc_llm/model/llama4/llama4_loader.py",
"license": "Apache License 2.0",
"lines": 103,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlc-ai/mlc-llm:python/mlc_llm/model/llama4/llama4_model.py | """
Implementation for Llama4 architecture.
"""
import dataclasses
from typing import Any, Dict, Optional
import tvm
from tvm import te, tir
from tvm.relax.frontend import nn
from tvm.relax.frontend.nn import Tensor, op
from tvm.relax.frontend.nn.llm import position_embedding
from mlc_llm import op as op_ext
from mlc_llm.model.qwen3.qwen3_model import ACT2FN
from mlc_llm.nn import PagedKVCache, RopeMode
from mlc_llm.support import logging
from mlc_llm.support import tensor_parallel as tp
from mlc_llm.support.config import ConfigBase
from mlc_llm.support.style import bold
logger = logging.getLogger(__name__)
@dataclasses.dataclass
class Llama4TextConfig(ConfigBase): # pylint: disable=too-many-instance-attributes
"""Configuration of the Text portion of the Llama model."""
hidden_size: int
intermediate_size: int
num_attention_heads: int
num_hidden_layers: int
rms_norm_eps: float
rope_theta: float
use_qk_norm: bool
interleave_moe_layer_step: int
num_experts_per_tok: int
num_local_experts: int
hidden_act: str
tie_word_embeddings: bool = False
position_embedding_base: int = 0
rope_scaling: Optional[Dict[str, Any]] = None
num_key_value_heads: int = 0
head_dim: int = 0
attn_scale: float = 0.1
floor_scale: int = 8192
vocab_size: int = 202048
attention_bias: bool = False
attn_temperature_tuning: bool = True
no_rope_layers: list[int] = None
no_rope_layer_interval: int = 4
moe_layers: list[int] = None
kwargs: Dict[str, Any] = dataclasses.field(default_factory=dict)
def __post_init__(self): # pylint: disable=too-many-branches
if self.position_embedding_base == 0:
if "rope_theta" in self.kwargs:
self.position_embedding_base = self.kwargs.pop("rope_theta")
else:
self.position_embedding_base = 10000
if self.rope_scaling is not None:
if "rope_type" not in self.rope_scaling:
self.rope_scaling = None
else:
assert (
self.rope_scaling["rope_type"] == "llama3"
), f"Unsupported RoPE scaling type {self.rope_scaling['rope_type']} for Llama"
# Define which layers to avoid RoPE
if self.no_rope_layers == []:
self.no_rope_layers = None
default_no_rope_layers = [
int((layer_idx + 1) % self.no_rope_layer_interval != 0)
for layer_idx in range(self.num_hidden_layers)
]
self.no_rope_layers = self.no_rope_layers if self.no_rope_layers else default_no_rope_layers
# Define which layers to apply MoE
self.moe_layers = (
self.moe_layers
if self.moe_layers is not None
else list(
range(
self.interleave_moe_layer_step - 1,
self.num_hidden_layers,
self.interleave_moe_layer_step,
)
)
)
@dataclasses.dataclass
class Llama4Config(ConfigBase): # pylint: disable=too-many-instance-attributes
"""Configuration of the Llama model."""
text_config: Llama4TextConfig
tensor_parallel_shards: int = 1
context_window_size: int = 0
pipeline_parallel_stages: int = 1
prefill_chunk_size: int = 0
max_batch_size: int = 1
disaggregation: bool = False
max_position_embeddings = 4096 * 32
vocab_size: int = 202048
kwargs: Dict[str, Any] = dataclasses.field(default_factory=dict)
def __post_init__(self) -> None:
text_config_dict: Dict[str, Any]
if isinstance(self.text_config, ConfigBase):
text_config_dict = dataclasses.asdict(self.text_config)
else:
text_config_dict = dict(self.text_config)
for k, v in text_config_dict.pop("kwargs", {}).items():
text_config_dict[k] = v
self.text_config = Llama4TextConfig.from_dict(text_config_dict) # type: ignore
if self.context_window_size == 0:
# Fall back to max_position_embeddings
self.context_window_size = self.max_position_embeddings
logger.info(
"%s not found in config.json. Falling back to %s (%d)",
bold("context_window_size"),
bold("max_position_embeddings"),
self.context_window_size,
)
if self.text_config.num_key_value_heads == 0:
self.text_config.num_key_value_heads = self.text_config.num_attention_heads
if self.text_config.head_dim == 0:
self.text_config.head_dim = (
self.text_config.hidden_size // self.text_config.num_attention_heads
)
assert self.text_config.num_attention_heads % self.text_config.num_key_value_heads == 0
if self.prefill_chunk_size == 0:
logger.info(
"%s defaults to %d",
bold("prefill_chunk_size"),
min(self.context_window_size, 8192),
)
self.prefill_chunk_size = min(self.context_window_size, 8192)
elif self.prefill_chunk_size > self.context_window_size:
logger.info(
"Overriding %s from %d to %d",
bold("prefill_chunk_size"),
self.prefill_chunk_size,
min(self.context_window_size, 8192),
)
self.prefill_chunk_size = min(self.context_window_size, 8192)
# pylint: disable=invalid-name,missing-docstring
class Llama4TextMLP(nn.Module):
def __init__(self, config: Llama4Config):
super().__init__()
if config.text_config.intermediate_size % config.tensor_parallel_shards != 0:
raise ValueError(
f"Cannot split MLP intermediate size {config.text_config.intermediate_size} "
f"evenly to {config.tensor_parallel_shards} GPUs."
)
self.intermediate_size = (
config.text_config.intermediate_size // config.tensor_parallel_shards
)
self.gate_up_proj = nn.Linear(
in_features=config.text_config.hidden_size,
out_features=2 * self.intermediate_size,
bias=False,
)
self.down_proj = nn.Linear(
self.intermediate_size, config.text_config.hidden_size, bias=False
)
def forward(self, x: Tensor):
concat_x1_x2 = self.gate_up_proj(x)
x1, x2 = op.split(concat_x1_x2, 2, axis=-1)
inter_out = op.silu(x1) * x2
return self.down_proj(inter_out)
class LlamaEmbedding(nn.Embedding):
"""The embedding module that can be shared with the final lm_head. From Qwen2Embedding."""
def lm_head_forward(self, x: nn.Tensor):
"""The lm_head forwarding, which transposes the weight and multiplies
with the input tensor.
"""
weight = nn.op.permute_dims(self.weight)
return nn.op.matmul(x, weight, out_dtype="float32")
class Llama4TextL2Norm(nn.Module):
def __init__(self, eps, hidden_size):
self.eps = eps
self.hidden_size = hidden_size
def forward(self, x):
weight = op.ones((self.hidden_size,), dtype=x.dtype)
return op.rms_norm(x, weight=weight, axes=[-1], epsilon=self.eps)
class Llama4TextAttention(nn.Module): # pylint: disable=too-many-instance-attributes
def __init__(self, config: Llama4Config, layer_idx):
self.head_dim = config.text_config.head_dim
self.attn_scale = config.text_config.attn_scale
self.floor_scale = config.text_config.floor_scale
self.num_attention_heads = config.text_config.num_attention_heads
self.num_kv_heads = config.text_config.num_key_value_heads
self.num_q_heads = config.text_config.num_attention_heads // config.tensor_parallel_shards
assert config.text_config.num_key_value_heads % config.tensor_parallel_shards == 0, (
f"num_kv_heads({config.text_config.num_key_value_heads}) must be divisible by "
f"tensor_parallel_shards"
)
assert config.text_config.num_key_value_heads >= config.tensor_parallel_shards, (
f"Too large tensor_parallel_shards, must be smaller than "
f"{config.text_config.num_key_value_heads}"
)
self.num_kv_heads = config.text_config.num_key_value_heads // config.tensor_parallel_shards
self.q_proj = nn.Linear(
config.text_config.hidden_size,
self.num_q_heads * self.head_dim,
bias=config.text_config.attention_bias,
)
self.k_proj = nn.Linear(
config.text_config.hidden_size,
self.num_kv_heads * self.head_dim,
bias=config.text_config.attention_bias,
)
self.v_proj = nn.Linear(
config.text_config.hidden_size,
self.num_kv_heads * self.head_dim,
bias=config.text_config.attention_bias,
)
self.o_proj = nn.Linear(
self.num_q_heads * self.head_dim,
config.text_config.hidden_size,
bias=config.text_config.attention_bias,
)
self.attn_temperature_tuning = config.text_config.attn_temperature_tuning
self.use_rope = config.text_config.no_rope_layers[layer_idx]
self.layer_idx = layer_idx
self.rope_theta = config.text_config.rope_theta
self.rope_scaling = config.text_config.rope_scaling
self.rope_scaling["rope_type"] = "llama4"
self.use_qk_norm = config.text_config.use_qk_norm
self.rms_norm_eps = config.text_config.rms_norm_eps
self.q_norm = Llama4TextL2Norm(self.rms_norm_eps, self.head_dim)
self.k_norm = Llama4TextL2Norm(self.rms_norm_eps, self.head_dim)
def forward( # pylint: disable=too-many-locals
self,
hidden_states: Tensor,
paged_kv_cache: PagedKVCache,
layer_id: int,
cache_position,
):
d, h_q = self.head_dim, self.num_q_heads
b, s, _ = hidden_states.shape
# QKV Projection
query_states = op.reshape(self.q_proj(hidden_states), (b, s, -1, d))
key_states = op.reshape(self.k_proj(hidden_states), (b, s, -1, d))
value_states = op.reshape(self.v_proj(hidden_states), (b, s, -1, d))
if self.use_rope:
qkv = op.concat([query_states, key_states, value_states], dim=2)
apply_rope = tvm.tir.IntImm("int64", 1)
rotary_emb = position_embedding.llama4_rope_with_position_map(
theta=self.rope_theta,
scale=1.0,
head_dim=self.head_dim,
num_q_heads=self.num_q_heads,
num_kv_heads=self.num_kv_heads,
dtype=query_states.dtype,
rope_scaling=self.rope_scaling,
)
query_states, key_states, value_states = op.tensor_ir_op(
rotary_emb,
"llama4_rope_with_position_map",
args=[op.squeeze(qkv, axis=0), cache_position, apply_rope],
out=(
Tensor.placeholder((s, h_q, d), query_states.dtype),
Tensor.placeholder((s, self.num_kv_heads, d), query_states.dtype),
Tensor.placeholder((s, self.num_kv_heads, d), query_states.dtype),
),
)
query_states = query_states.reshape(b, s, h_q, d)
key_states = key_states.reshape(b, s, self.num_kv_heads, d)
value_states = value_states.reshape(b, s, self.num_kv_heads, d)
if self.use_qk_norm and self.use_rope:
query_states = self.q_norm(query_states)
key_states = self.k_norm(key_states)
if self.attn_temperature_tuning and not self.use_rope:
attn_scales = (
op.log(
op.floor(
(op.astype(cache_position, query_states.dtype) + 1.0) / self.floor_scale
)
+ 1.0
)
* self.attn_scale
+ 1.0
)
attn_scales = op.broadcast_to(attn_scales.reshape(1, s, 1, 1), (b, s, 1, 1))
query_states = query_states * attn_scales
qkv = op.concat([query_states, key_states, value_states], dim=2)
# Attention
output = op.reshape(
paged_kv_cache.attention_with_fused_qkv(
layer_id, qkv, self.num_q_heads, sm_scale=self.head_dim**-0.5
),
(b, s, h_q * d),
)
return self.o_proj(output)
class Llama4TextExperts(nn.Module):
def __init__(self, config: Llama4Config):
self.num_experts = config.text_config.num_local_experts
self.intermediate_size = (
config.text_config.intermediate_size // config.tensor_parallel_shards
)
self.hidden_size = config.text_config.hidden_size
self.expert_dim = self.intermediate_size
self.gate_up_proj = nn.Parameter(
shape=(self.num_experts, self.hidden_size, 2 * self.expert_dim)
)
self.down_proj = nn.Parameter(shape=(self.num_experts, self.expert_dim, self.hidden_size))
self.act_fn = ACT2FN[config.text_config.hidden_act]
def forward(self, hidden_states):
hidden_states = hidden_states.reshape(self.gate_up_proj.shape[0], -1, self.hidden_size)
gate_up = op.matmul(hidden_states, self.gate_up_proj)
gate, up = op.chunk(gate_up, chunks=2, dim=-1)
next_states = op.matmul((up * self.act_fn(gate)), self.down_proj)
next_states = next_states.reshape(-1, self.hidden_size)
return next_states
class Llama4Router(nn.Module):
def __init__(self, config: Llama4Config):
self.num_experts = config.text_config.num_local_experts
self.top_k = config.text_config.num_experts_per_tok
self.intermediate_size = self.num_experts // config.tensor_parallel_shards
self.router = nn.Linear(
in_features=config.text_config.hidden_size,
out_features=self.intermediate_size,
bias=False,
)
def forward(self, hidden_states):
router_logits = self.router(hidden_states)
router_top_value, router_indices = op_ext.moe_misc.gating_topk(router_logits, self.top_k)
j_axis = op.arange(0, self.num_experts)
j_axis = op.unsqueeze(j_axis, 0)
idx_exp = op.unsqueeze(router_indices, -1)
mask = op.equal(idx_exp, j_axis)
val_exp = op.unsqueeze(router_top_value, -1)
neg_inf = op.full(mask.shape, -1e9, dtype=hidden_states.dtype)
masked_vals = op.where(mask, val_exp, neg_inf)
router_scores = op.max(masked_vals, axis=1)
router_scores = op.sigmoid(router_scores)
return router_scores, router_logits
class Llama4TextMoe(nn.Module):
def __init__(self, config: Llama4Config):
self.top_k = config.text_config.num_experts_per_tok
self.hidden_dim = config.text_config.hidden_size
self.num_experts = config.text_config.num_local_experts
self.experts = Llama4TextExperts(config)
self.router = Llama4Router(config)
self.shared_expert = Llama4TextMLP(config)
def forward(self, hidden_states):
hidden_states = hidden_states.reshape(-1, self.hidden_dim)
router_scores, _ = self.router(hidden_states)
routed_in = op.broadcast_to(
hidden_states.reshape(1, *hidden_states.shape),
[router_scores.shape[1], *hidden_states.shape],
)
routed_in = routed_in.reshape(-1, self.hidden_dim)
routed_in = routed_in * op.permute_dims(router_scores, axes=[1, 0]).reshape(-1, 1)
routed_out = self.experts(routed_in)
out = self.shared_expert(hidden_states)
out += op.sum(routed_out.reshape(router_scores.shape[1], -1, routed_out.shape[-1]), axis=0)
return out
class Llama4TextDecoderLayer(nn.Module):
def __init__(self, config: Llama4Config, layer_idx):
rms_norm_eps = config.text_config.rms_norm_eps
self.self_attn = Llama4TextAttention(config, layer_idx)
self.is_moe_layer = layer_idx in config.text_config.moe_layers
if self.is_moe_layer: # the 128E model interleaves dense / sparse
self.feed_forward = Llama4TextMoe(config)
else:
self.feed_forward = Llama4TextMLP(config)
self.input_layernorm = nn.RMSNorm(
config.text_config.hidden_size, -1, rms_norm_eps, bias=False
)
self.post_attention_layernorm = nn.RMSNorm(
config.text_config.hidden_size, -1, rms_norm_eps, bias=False
)
def _set_tp():
def _set(layer, hint):
if hasattr(layer, "weight"):
layer.weight.attrs["shard_strategy"] = hint
else:
layer.attrs["shard_strategy"] = hint
_set(self.self_attn.q_proj, tp.ShardSingleDim("_shard_q", dim=0))
_set(self.self_attn.k_proj, tp.ShardSingleDim("_shard_k", dim=0))
_set(self.self_attn.v_proj, tp.ShardSingleDim("_shard_v", dim=0))
_set(self.self_attn.o_proj, tp.ShardSingleDim("_shard_o", dim=1))
if isinstance(self.feed_forward, Llama4TextMLP):
i = self.feed_forward.intermediate_size
_set(
self.feed_forward.gate_up_proj,
tp.ShardSingleDim("_shard_mlp_up", segs=[i, i], dim=0),
)
_set(
self.feed_forward.down_proj,
tp.ShardSingleDim("_shard_mlp_down", dim=1),
)
else:
assert isinstance(self.feed_forward, Llama4TextMoe)
i = self.feed_forward.shared_expert.intermediate_size
_set(
self.feed_forward.shared_expert.gate_up_proj,
tp.ShardSingleDim("_shard_mlp_up", segs=[i, i], dim=0),
)
_set(
self.feed_forward.shared_expert.down_proj,
tp.ShardSingleDim("_shard_mlp_down", dim=1),
)
j = self.feed_forward.experts.intermediate_size
_set(
self.feed_forward.experts.gate_up_proj,
tp.ShardSingleDim("_shard_expert_mlp_up", segs=[j, j], dim=2),
)
_set(
self.feed_forward.experts.down_proj,
tp.ShardSingleDim("_shard_expert_mlp_down", dim=1),
)
_set(
self.feed_forward.router.router,
tp.ShardSingleDim("_shard_router", dim=0),
)
self.tensor_parallel_shards = config.tensor_parallel_shards
_set_tp()
def forward(
self,
hidden_states: Tensor,
paged_kv_cache: PagedKVCache,
layer_id: int,
cache_position,
):
out = self.self_attn(
self.input_layernorm(hidden_states),
paged_kv_cache,
layer_id,
cache_position,
)
hidden_states = self._apply_residual(out, residual=hidden_states)
out = self.feed_forward(self.post_attention_layernorm(hidden_states))
hidden_states = self._apply_residual(
op.reshape(out, hidden_states.shape), residual=hidden_states
)
return hidden_states
def _apply_residual(self, out, residual):
if self.tensor_parallel_shards > 1:
return op.ccl_allreduce(out, "sum") + residual
return out + residual
class Llama4TextModel(nn.Module):
def __init__(self, config: Llama4Config):
assert config.text_config.hidden_size % config.text_config.num_attention_heads == 0
self.embed_tokens = LlamaEmbedding("vocab_size", config.text_config.hidden_size)
self.layers = nn.ModuleList(
[
Llama4TextDecoderLayer(config, layer_idx)
for layer_idx in range(config.text_config.num_hidden_layers)
]
)
self.norm = nn.RMSNorm(
config.text_config.hidden_size,
-1,
config.text_config.rms_norm_eps,
bias=False,
)
def forward(self, input_embed: Tensor, paged_kv_cache: PagedKVCache):
hidden_states = input_embed
cache_position = paged_kv_cache.get_query_positions(
input_embed.shape[0] * input_embed.shape[1]
)
for layer_id, layer in enumerate(self.layers):
hidden_states = layer(hidden_states, paged_kv_cache, layer_id, cache_position)
hidden_states = self.norm(hidden_states)
return hidden_states
class Llama4ForCausalLM(nn.Module): # pylint: disable=too-many-instance-attributes
def __init__(self, config: Llama4Config):
self.text_config = config.text_config
self.model = Llama4TextModel(config)
self.tie_word_embeddings = self.text_config.tie_word_embeddings
if not self.text_config.tie_word_embeddings:
self.lm_head = nn.Linear(self.text_config.hidden_size, "vocab_size", bias=False)
self.num_hidden_layers = self.text_config.num_hidden_layers
self.num_attention_heads = self.text_config.num_attention_heads
self.num_key_value_heads = self.text_config.num_key_value_heads
self.head_dim = self.text_config.head_dim
self.hidden_size = self.text_config.hidden_size
self.vocab_size = self.text_config.vocab_size
self.rope_scaling = self.text_config.rope_scaling
self.rope_theta = self.text_config.position_embedding_base
self.tensor_parallel_shards = config.tensor_parallel_shards
self.disaggregation = config.disaggregation
self.dtype = "float32"
def to(self, dtype: Optional[str] = None):
super().to(dtype=dtype)
if dtype is not None:
self.dtype = dtype
def batch_forward(
self,
input_embeds: Tensor,
paged_kv_cache: PagedKVCache,
logit_positions: Optional[Tensor] = None,
):
op_ext.configure()
hidden_states = self.model(input_embeds, paged_kv_cache)
if logit_positions is not None:
if self.tensor_parallel_shards > 1:
logit_positions = op.ccl_broadcast_from_worker0(logit_positions)
hidden_states = op.take(hidden_states, logit_positions, axis=1)
return self.get_logits(hidden_states)
def batch_forward_to_last_hidden_states(
self,
input_embeds: Tensor,
paged_kv_cache: PagedKVCache,
):
op_ext.configure()
hidden_states = self.model(input_embeds, paged_kv_cache)
return hidden_states
def embed(self, input_ids: Tensor):
if self.tensor_parallel_shards > 1:
input_ids = op.ccl_broadcast_from_worker0(input_ids)
return self.model.embed_tokens(input_ids)
def get_logits(self, hidden_states: Tensor):
op_ext.configure()
if self.tie_word_embeddings:
logits = self.model.embed_tokens.lm_head_forward(hidden_states)
else:
logits = self.lm_head(hidden_states)
if logits.dtype != "float32":
logits = logits.astype("float32")
return logits
def batch_select_last_hidden_states(self, hidden_states: Tensor, logit_positions: Tensor):
op_ext.configure()
if self.tensor_parallel_shards > 1:
logit_positions = op.ccl_broadcast_from_worker0(logit_positions)
hidden_states = op.take(hidden_states, logit_positions, axis=0)
return hidden_states
def prefill(self, input_embed: Tensor, paged_kv_cache: PagedKVCache):
op_ext.configure()
def _index(x: te.Tensor):
b, s, d = x.shape
return te.compute((b, 1, d), lambda i, _, k: x[i, s - 1, k], name="index")
hidden_states = self.model(input_embed, paged_kv_cache)
hidden_states = op.tensor_expr_op(_index, name_hint="index", args=[hidden_states])
logits = self.get_logits(hidden_states)
return logits, paged_kv_cache
def decode(self, input_embed: Tensor, paged_kv_cache: PagedKVCache):
op_ext.configure()
hidden_states = self.model(input_embed, paged_kv_cache)
logits = self.get_logits(hidden_states)
return logits, paged_kv_cache
def prefill_to_last_hidden_states(self, input_embed: Tensor, paged_kv_cache: PagedKVCache):
op_ext.configure()
hidden_states = self.model(input_embed, paged_kv_cache)
return hidden_states, paged_kv_cache
def decode_to_last_hidden_states(self, input_embed: Tensor, paged_kv_cache: PagedKVCache):
op_ext.configure()
hidden_states = self.model(input_embed, paged_kv_cache)
return hidden_states, paged_kv_cache
def batch_prefill(
self,
input_embeds: Tensor,
logit_positions: Tensor,
paged_kv_cache: PagedKVCache,
):
logits = self.batch_forward(input_embeds, paged_kv_cache, logit_positions)
return logits, paged_kv_cache
def batch_decode(self, input_embeds: Tensor, paged_kv_cache: PagedKVCache):
logits = self.batch_forward(input_embeds, paged_kv_cache)
return logits, paged_kv_cache
def batch_verify(self, input_embeds: Tensor, paged_kv_cache: PagedKVCache):
logits = self.batch_forward(input_embeds, paged_kv_cache)
return logits, paged_kv_cache
def batch_prefill_to_last_hidden_states(
self, input_embeds: Tensor, paged_kv_cache: PagedKVCache
):
hidden_states = self.batch_forward_to_last_hidden_states(input_embeds, paged_kv_cache)
return hidden_states, paged_kv_cache
def batch_decode_to_last_hidden_states(
self, input_embeds: Tensor, paged_kv_cache: PagedKVCache
):
hidden_states = self.batch_forward_to_last_hidden_states(input_embeds, paged_kv_cache)
return hidden_states, paged_kv_cache
def batch_verify_to_last_hidden_states(
self, input_embeds: Tensor, paged_kv_cache: PagedKVCache
):
hidden_states = self.batch_forward_to_last_hidden_states(input_embeds, paged_kv_cache)
return hidden_states, paged_kv_cache
def create_paged_kv_cache( # pylint: disable=too-many-arguments
self,
max_batch_size: tir.Var,
max_total_seq_len: tir.Var,
prefill_chunk_size: tir.Var,
page_size: tir.Var,
support_sliding_window: tir.Var,
) -> PagedKVCache:
return PagedKVCache.create_generic(
attn_kind="mha",
max_batch_size=max_batch_size,
max_total_seq_len=max_total_seq_len,
prefill_chunk_size=prefill_chunk_size,
page_size=page_size,
support_sliding_window=support_sliding_window,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads // self.tensor_parallel_shards,
num_key_value_heads=self.num_key_value_heads // self.tensor_parallel_shards,
qk_head_dim=self.head_dim,
v_head_dim=self.head_dim,
rope_mode=RopeMode.NONE,
rope_scale=1,
rope_theta=self.rope_theta,
rope_scaling=self.rope_scaling,
enable_disaggregation=self.disaggregation,
dtype=self.dtype,
)
def get_default_spec(self):
mod_spec = {
"embed": {
"input_ids": nn.spec.Tensor(["seq_len"], "int32"),
"$": {
"param_mode": "packed",
"effect_mode": "none",
},
},
"get_logits": {
"hidden_states": nn.spec.Tensor(["seq_len", self.hidden_size], self.dtype),
"$": {
"param_mode": "packed",
"effect_mode": "none",
},
},
"batch_select_last_hidden_states": {
"hidden_states": nn.spec.Tensor(["seq_len", self.hidden_size], self.dtype),
"logit_positions": nn.spec.Tensor(["batch_size"], "int32"),
"$": {
"param_mode": "none",
"effect_mode": "none",
},
},
"prefill": {
"input_embed": nn.spec.Tensor([1, "seq_len", self.hidden_size], self.dtype),
"paged_kv_cache": nn.spec.Object(object_type=PagedKVCache),
"$": {
"param_mode": "packed",
"effect_mode": "none",
},
},
"decode": {
"input_embed": nn.spec.Tensor([1, 1, self.hidden_size], self.dtype),
"paged_kv_cache": nn.spec.Object(object_type=PagedKVCache),
"$": {
"param_mode": "packed",
"effect_mode": "none",
},
},
"prefill_to_last_hidden_states": {
"input_embed": nn.spec.Tensor([1, "seq_len", self.hidden_size], self.dtype),
"paged_kv_cache": nn.spec.Object(object_type=PagedKVCache),
"$": {
"param_mode": "packed",
"effect_mode": "none",
},
},
"decode_to_last_hidden_states": {
"input_embed": nn.spec.Tensor([1, 1, self.hidden_size], self.dtype),
"paged_kv_cache": nn.spec.Object(object_type=PagedKVCache),
"$": {
"param_mode": "packed",
"effect_mode": "none",
},
},
"batch_prefill": {
"input_embeds": nn.spec.Tensor([1, "seq_len", self.hidden_size], self.dtype),
"logit_positions": nn.spec.Tensor(["batch_size"], "int32"),
"paged_kv_cache": nn.spec.Object(object_type=PagedKVCache),
"$": {
"param_mode": "packed",
"effect_mode": "none",
},
},
"batch_decode": {
"input_embeds": nn.spec.Tensor(["batch_size", 1, self.hidden_size], self.dtype),
"paged_kv_cache": nn.spec.Object(object_type=PagedKVCache),
"$": {
"param_mode": "packed",
"effect_mode": "none",
},
},
"batch_verify": {
"input_embeds": nn.spec.Tensor([1, "seq_len", self.hidden_size], self.dtype),
"paged_kv_cache": nn.spec.Object(object_type=PagedKVCache),
"$": {
"param_mode": "packed",
"effect_mode": "none",
},
},
"batch_prefill_to_last_hidden_states": {
"input_embeds": nn.spec.Tensor([1, "seq_len", self.hidden_size], self.dtype),
"paged_kv_cache": nn.spec.Object(object_type=PagedKVCache),
"$": {
"param_mode": "packed",
"effect_mode": "none",
},
},
"batch_decode_to_last_hidden_states": {
"input_embeds": nn.spec.Tensor(["batch_size", 1, self.hidden_size], self.dtype),
"paged_kv_cache": nn.spec.Object(object_type=PagedKVCache),
"$": {
"param_mode": "packed",
"effect_mode": "none",
},
},
"batch_verify_to_last_hidden_states": {
"input_embeds": nn.spec.Tensor([1, "seq_len", self.hidden_size], self.dtype),
"paged_kv_cache": nn.spec.Object(object_type=PagedKVCache),
"$": {
"param_mode": "packed",
"effect_mode": "none",
},
},
"create_paged_kv_cache": {
"max_batch_size": int,
"max_total_seq_len": int,
"prefill_chunk_size": int,
"page_size": int,
"support_sliding_window": int,
"$": {
"param_mode": "none",
"effect_mode": "none",
},
},
}
return nn.spec.ModuleSpec.from_raw(mod_spec, self)
| {
"repo_id": "mlc-ai/mlc-llm",
"file_path": "python/mlc_llm/model/llama4/llama4_model.py",
"license": "Apache License 2.0",
"lines": 713,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:mlflow/utils/uv_utils.py | """
Utilities for uv package manager integration.
This module provides functions for detecting uv projects and exporting dependencies
via ``uv export`` for automatic dependency inference during model logging.
"""
import logging
import re
import shutil
import subprocess
from pathlib import Path
from typing import NamedTuple
from packaging.version import Version
from mlflow.environment_variables import MLFLOW_LOG_UV_FILES
_logger = logging.getLogger(__name__)
# Minimum uv version required for ``uv export`` functionality
_MIN_UV_VERSION = Version("0.5.0")
# File names used for uv project detection and artifacts
_UV_LOCK_FILE = "uv.lock"
_PYPROJECT_FILE = "pyproject.toml"
_PYTHON_VERSION_FILE = ".python-version"
def get_uv_version() -> Version | None:
"""
Get the installed uv version.
Returns:
The uv version as a packaging.version.Version object, or None if uv is not installed
or version cannot be determined.
"""
uv_bin = shutil.which("uv")
if uv_bin is None:
return None
try:
result = subprocess.run(
[uv_bin, "--version"],
capture_output=True,
text=True,
check=True,
)
# Output format: "uv 0.5.0 (abc123 2024-01-01)"
version_str = result.stdout.strip().split()[1]
return Version(version_str)
except (subprocess.CalledProcessError, IndexError, ValueError) as e:
_logger.debug(f"Failed to determine uv version: {e}")
return None
def _get_uv_binary() -> str | None:
"""Return the uv binary path if installed and meeting version requirements, else None."""
uv_bin = shutil.which("uv")
if uv_bin is None:
return None
version = get_uv_version()
if version is None:
return None
if version < _MIN_UV_VERSION:
_logger.debug(f"uv version {version} is below minimum required version {_MIN_UV_VERSION}")
return None
return uv_bin
def is_uv_available() -> bool:
"""
Check if uv is installed and meets the minimum version requirement.
Returns:
True if uv is installed and version >= 0.5.0, False otherwise.
"""
return _get_uv_binary() is not None
class UVProjectInfo(NamedTuple):
"""Paths for a detected uv project."""
uv_lock: Path
pyproject: Path
def detect_uv_project(directory: str | Path | None = None) -> UVProjectInfo | None:
"""
Detect if the given directory is a uv project.
A uv project is identified by the presence of BOTH ``uv.lock`` and ``pyproject.toml``
in the specified directory.
Args:
directory: The directory to check. Defaults to the current working directory.
Returns:
A UVProjectInfo with paths to uv.lock and pyproject.toml,
or None if the directory is not a uv project.
"""
directory = Path.cwd() if directory is None else Path(directory)
uv_lock_path = directory / _UV_LOCK_FILE
pyproject_path = directory / _PYPROJECT_FILE
if uv_lock_path.exists() and pyproject_path.exists():
_logger.info(
f"Detected uv project: found {_UV_LOCK_FILE} and {_PYPROJECT_FILE} in {directory}"
)
return UVProjectInfo(uv_lock=uv_lock_path, pyproject=pyproject_path)
return None
def export_uv_requirements(
directory: str | Path | None = None,
no_dev: bool = True,
no_hashes: bool = True,
frozen: bool = True,
) -> list[str] | None:
"""
Export dependencies from a uv project to pip-compatible requirements.
Runs ``uv export`` to generate a list of pinned dependencies from the uv lock file.
Environment markers (e.g. ``; python_version < '3.12'``) are preserved in the output
so that pip can evaluate them at install time.
Args:
directory: The uv project directory. Defaults to the current working directory.
no_dev: Exclude development dependencies. Defaults to True.
no_hashes: Omit hashes from output. Defaults to True.
frozen: Use frozen lockfile without updating. Defaults to True.
Returns:
A list of requirement strings (e.g., ``["requests==2.28.0", "numpy==1.24.0"]``),
or None if export fails.
"""
uv_bin = _get_uv_binary()
if uv_bin is None:
_logger.warning(
"uv is not available or version is below minimum required. "
"Falling back to pip-based inference."
)
return None
directory = Path.cwd() if directory is None else Path(directory)
cmd = [uv_bin, "export"]
if no_dev:
cmd.append("--no-dev")
if no_hashes:
cmd.append("--no-hashes")
if frozen:
cmd.append("--frozen")
# Additional flags for cleaner output
cmd.extend(
[
"--no-header", # Omit the "autogenerated by uv" comment
"--no-emit-project", # Don't emit the project itself
"--no-annotate", # Omit "# via ..." comment annotations
]
)
try:
_logger.debug(f"Running uv export: {' '.join(str(c) for c in cmd)}")
result = subprocess.run(
cmd,
capture_output=True,
text=True,
check=True,
cwd=directory,
)
requirements = [
line.strip()
for line in result.stdout.strip().splitlines()
if line.strip() and not line.strip().startswith("#")
]
_logger.info(f"Exported {len(requirements)} dependencies via uv")
return requirements
except subprocess.CalledProcessError as e:
_logger.warning(
f"uv export failed with exit code {e.returncode}. "
f"stderr: {e.stderr}. Falling back to pip-based inference."
)
return None
except Exception as e:
_logger.warning(f"uv export failed: {e}. Falling back to pip-based inference.")
return None
def copy_uv_project_files(
dest_dir: str | Path,
source_dir: str | Path,
) -> bool:
"""
Copy uv project files to the model artifact directory.
Copies uv.lock, pyproject.toml, and .python-version (if present) to preserve
uv project configuration as model artifacts for reproducibility.
Can be disabled by setting MLFLOW_LOG_UV_FILES=false environment variable
for large projects where uv.lock size is a concern.
Args:
dest_dir: The destination directory (model artifact directory).
source_dir: The source directory containing uv project files.
Returns:
True if uv files were copied, False otherwise.
"""
if not MLFLOW_LOG_UV_FILES.get():
_logger.info("uv file logging disabled via MLFLOW_LOG_UV_FILES environment variable")
return False
dest_dir = Path(dest_dir)
source_dir = Path(source_dir)
uv_project = detect_uv_project(source_dir)
if uv_project is None:
return False
try:
shutil.copy2(uv_project.uv_lock, dest_dir / _UV_LOCK_FILE)
_logger.info(f"Copied {_UV_LOCK_FILE} to model artifacts")
shutil.copy2(uv_project.pyproject, dest_dir / _PYPROJECT_FILE)
_logger.info(f"Copied {_PYPROJECT_FILE} to model artifacts")
python_version_src = source_dir / _PYTHON_VERSION_FILE
if python_version_src.exists():
shutil.copy2(python_version_src, dest_dir / _PYTHON_VERSION_FILE)
_logger.info(f"Copied {_PYTHON_VERSION_FILE} to model artifacts")
return True
except Exception as e:
_logger.warning(f"Failed to copy uv project files: {e}")
return False
def extract_index_urls_from_uv_lock(uv_lock_path: str | Path) -> list[str]:
"""
Extract private index URLs from a uv.lock file.
Parses the uv.lock TOML file to find package sources that use non-PyPI registries.
These URLs are returned as `--extra-index-url` compatible strings.
Note: Credentials are NEVER stored in uv.lock. Users must provide credentials
at restore time via UV_INDEX_* environment variables or .netrc file.
Args:
uv_lock_path: Path to the uv.lock file.
Returns:
A list of unique index URLs (excluding PyPI). Empty list if none found
or if parsing fails.
"""
uv_lock_path = Path(uv_lock_path)
if not uv_lock_path.exists():
return []
try:
content = uv_lock_path.read_text()
# Extract registry URLs from uv.lock using regex
# Format in uv.lock: source = { registry = "https://..." }
registry_pattern = r'source\s*=\s*\{\s*registry\s*=\s*["\']([^"\']+)["\']'
urls = set(re.findall(registry_pattern, content))
# Filter out default PyPI URLs
pypi_urls = {
"https://pypi.org/simple",
"https://pypi.org/simple/",
"https://pypi.python.org/simple",
"https://pypi.python.org/simple/",
}
private_urls = [url for url in urls if url.lower() not in pypi_urls]
if private_urls:
_logger.info(f"Extracted {len(private_urls)} private index URL(s) from uv.lock")
_logger.warning(
"Private package indexes detected in uv lockfile. "
"Ensure credentials are available at model load time via "
"UV_INDEX_* environment variables or .netrc file."
)
return sorted(private_urls)
except Exception as e:
_logger.debug(f"Failed to extract index URLs from uv.lock: {e}")
return []
def create_uv_sync_pyproject(
dest_dir: str | Path,
python_version: str,
project_name: str = "mlflow-model-env",
) -> Path:
"""
Create a minimal pyproject.toml for uv sync.
This is required for `uv sync --frozen` to work when restoring a model
environment from a uv.lock artifact.
Args:
dest_dir: Directory where pyproject.toml will be created.
python_version: Python version requirement (e.g., "3.11" or "3.11.5").
project_name: Name for the temporary project. Defaults to "mlflow-model-env".
Returns:
Path to the created pyproject.toml file.
"""
dest_dir = Path(dest_dir)
# Pin the exact Python version including micro (e.g. "==3.11.5") so uv
# restores the environment with the same interpreter that was used at
# log time. "dependencies" is left empty because all deps come from
# uv.lock.
pyproject_content = f"""[project]
name = "{project_name}"
version = "0.0.0"
requires-python = "=={python_version}"
dependencies = []
[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"
"""
pyproject_path = dest_dir / _PYPROJECT_FILE
pyproject_path.write_text(pyproject_content)
_logger.debug(f"Created minimal pyproject.toml for uv sync at {pyproject_path}")
return pyproject_path
def setup_uv_sync_environment(
env_dir: str | Path,
model_path: str | Path,
python_version: str,
) -> bool:
"""
Set up a uv project structure for environment restoration via ``uv sync --frozen``.
Copies uv.lock from model artifacts and creates a minimal pyproject.toml
to enable exact environment restoration using uv.
Args:
env_dir: The environment directory where the uv project will be set up.
model_path: Path to the model artifacts containing uv.lock.
python_version: Python version for the environment.
Returns:
True if setup succeeded (uv.lock exists in model artifacts), False otherwise.
"""
env_dir = Path(env_dir)
model_path = Path(model_path)
uv_lock_artifact = model_path / _UV_LOCK_FILE
if not uv_lock_artifact.exists():
_logger.debug(f"No uv.lock found in model artifacts at {model_path}")
return False
env_dir.mkdir(parents=True, exist_ok=True)
# Copy uv.lock to environment directory
shutil.copy2(uv_lock_artifact, env_dir / _UV_LOCK_FILE)
_logger.debug(f"Copied uv.lock to {env_dir}")
# Copy pyproject.toml from model if available, otherwise create minimal one
pyproject_artifact = model_path / _PYPROJECT_FILE
if pyproject_artifact.exists():
shutil.copy2(pyproject_artifact, env_dir / _PYPROJECT_FILE)
_logger.debug(f"Copied pyproject.toml from model artifacts to {env_dir}")
else:
create_uv_sync_pyproject(env_dir, python_version)
# Copy .python-version if it exists in model artifacts
python_version_artifact = model_path / _PYTHON_VERSION_FILE
if python_version_artifact.exists():
shutil.copy2(python_version_artifact, env_dir / _PYTHON_VERSION_FILE)
_logger.debug(f"Copied .python-version to {env_dir}")
_logger.info(f"Set up uv sync environment at {env_dir}")
return True
def run_uv_sync(
project_dir: str | Path,
frozen: bool = True,
no_dev: bool = True,
capture_output: bool = False,
) -> bool:
"""
Run `uv sync` to install dependencies from a uv.lock file.
This provides exact cross-platform environment restoration, significantly
faster than pip-based installation.
Args:
project_dir: Directory containing pyproject.toml and uv.lock.
frozen: Use frozen lockfile without updating. Defaults to True.
no_dev: Exclude development dependencies. Defaults to True.
capture_output: Whether to capture stdout/stderr. Defaults to False.
Returns:
True if sync succeeded, False otherwise.
"""
uv_bin = _get_uv_binary()
if uv_bin is None:
_logger.warning("uv is not available for environment sync")
return False
project_dir = Path(project_dir)
cmd = [uv_bin, "sync"]
if frozen:
cmd.append("--frozen")
if no_dev:
cmd.append("--no-dev")
try:
_logger.info(f"Running uv sync in {project_dir}")
_logger.debug(f"uv sync command: {' '.join(str(c) for c in cmd)}")
subprocess.run(
cmd,
cwd=project_dir,
capture_output=capture_output,
check=True,
text=True,
)
_logger.info("uv sync completed successfully")
return True
except subprocess.CalledProcessError as e:
_logger.warning(f"uv sync failed with exit code {e.returncode}: {e.stderr}")
return False
except Exception as e:
_logger.warning(f"uv sync failed: {e}")
return False
def has_uv_lock_artifact(model_path: str | Path) -> bool:
"""Check if a model has a uv.lock artifact."""
return (Path(model_path) / _UV_LOCK_FILE).exists()
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/utils/uv_utils.py",
"license": "Apache License 2.0",
"lines": 355,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:tests/pyfunc/test_uv_model_logging.py | """
Integration tests for uv package manager support in model logging and loading.
Tests the end-to-end workflow:
1. uv project detection during log_model()
2. Artifact generation (uv.lock, pyproject.toml, .python-version, requirements.txt)
3. Model loading with uv artifacts
These tests use REAL uv calls (not mocked) where possible, following MLflow best practices.
Tests requiring uv are skipped if uv is not installed or below minimum version.
"""
import platform
import shutil
import subprocess
from pathlib import Path
from unittest import mock
import pytest
import mlflow
import mlflow.pyfunc
from mlflow.utils.uv_utils import (
_PYPROJECT_FILE,
_PYTHON_VERSION_FILE,
_UV_LOCK_FILE,
is_uv_available,
)
# Constants for artifact file names
_REQUIREMENTS_FILE_NAME = "requirements.txt"
_PYTHON_ENV_FILE_NAME = "python_env.yaml"
# Skip marker for tests requiring uv
requires_uv = pytest.mark.skipif(
not is_uv_available(),
reason="uv is not installed or below minimum required version (0.5.0)",
)
class SimplePythonModel(mlflow.pyfunc.PythonModel):
def predict(self, context, model_input, params=None):
return model_input
@pytest.fixture
def python_model():
return SimplePythonModel()
@pytest.fixture
def tmp_uv_project(tmp_path):
"""Create a real uv project with uv lock."""
pyproject_content = """[project]
name = "test_uv_project"
version = "0.1.0"
requires-python = ">=3.10"
dependencies = [
"numpy>=1.24.0",
]
[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"
"""
(tmp_path / _PYPROJECT_FILE).write_text(pyproject_content)
# Create .python-version
(tmp_path / _PYTHON_VERSION_FILE).write_text("3.11.5\n")
# Create minimal package structure for hatchling
pkg_dir = tmp_path / "test_uv_project"
pkg_dir.mkdir()
(pkg_dir / "__init__.py").write_text('"""Test uv project."""\n__version__ = "0.1.0"\n')
# Run uv lock to generate real uv.lock
result = subprocess.run(
["uv", "lock"],
cwd=tmp_path,
capture_output=True,
text=True,
)
if result.returncode != 0:
pytest.skip(f"uv lock failed: {result.stderr}")
return tmp_path
# --- Model Logging Tests with Real uv ---
@requires_uv
def test_pyfunc_log_model_copies_uv_artifacts(tmp_uv_project, python_model, monkeypatch):
monkeypatch.chdir(tmp_uv_project)
monkeypatch.setenv("MLFLOW_UV_AUTO_DETECT", "true")
with mlflow.start_run() as run:
mlflow.pyfunc.log_model(name="model", python_model=python_model)
artifact_path = mlflow.artifacts.download_artifacts(
run_id=run.info.run_id, artifact_path="model"
)
artifact_dir = Path(artifact_path)
# Verify uv artifacts are copied
assert (artifact_dir / _UV_LOCK_FILE).exists()
assert (artifact_dir / _PYPROJECT_FILE).exists()
assert (artifact_dir / _PYTHON_VERSION_FILE).exists()
# Verify content matches source
assert "version = 1" in (artifact_dir / _UV_LOCK_FILE).read_text()
assert "test_uv_project" in (artifact_dir / _PYPROJECT_FILE).read_text()
assert "3.11.5" in (artifact_dir / _PYTHON_VERSION_FILE).read_text()
@requires_uv
def test_pyfunc_log_model_python_env_uses_current_python_version(
tmp_uv_project, python_model, monkeypatch
):
monkeypatch.chdir(tmp_uv_project)
monkeypatch.setenv("MLFLOW_UV_AUTO_DETECT", "true")
with mlflow.start_run() as run:
mlflow.pyfunc.log_model(name="model", python_model=python_model)
artifact_path = mlflow.artifacts.download_artifacts(
run_id=run.info.run_id, artifact_path="model"
)
artifact_dir = Path(artifact_path)
python_env_file = artifact_dir / _PYTHON_ENV_FILE_NAME
assert python_env_file.exists()
python_env_content = python_env_file.read_text()
# python_env.yaml always uses the current interpreter version
assert platform.python_version() in python_env_content
@requires_uv
def test_pyfunc_log_model_respects_mlflow_log_uv_files_env_var(
tmp_uv_project, python_model, monkeypatch
):
monkeypatch.chdir(tmp_uv_project)
monkeypatch.setenv("MLFLOW_UV_AUTO_DETECT", "true")
monkeypatch.setenv("MLFLOW_LOG_UV_FILES", "false")
with mlflow.start_run() as run:
mlflow.pyfunc.log_model(name="model", python_model=python_model)
artifact_path = mlflow.artifacts.download_artifacts(
run_id=run.info.run_id, artifact_path="model"
)
artifact_dir = Path(artifact_path)
# uv artifacts should NOT be copied when env var is false
assert not (artifact_dir / _UV_LOCK_FILE).exists()
assert not (artifact_dir / _PYPROJECT_FILE).exists()
# But requirements.txt should still exist (from uv export)
assert (artifact_dir / _REQUIREMENTS_FILE_NAME).exists()
requirements_content = (artifact_dir / _REQUIREMENTS_FILE_NAME).read_text()
assert "numpy" in requirements_content.lower()
@requires_uv
def test_pyfunc_log_model_with_explicit_uv_project_path_parameter(
tmp_path, tmp_uv_project, python_model, monkeypatch
):
# Work from a different directory than the uv project
work_dir = tmp_path / "work"
work_dir.mkdir()
monkeypatch.chdir(work_dir)
monkeypatch.setenv("MLFLOW_UV_AUTO_DETECT", "true")
with mlflow.start_run() as run:
mlflow.pyfunc.log_model(
name="model",
python_model=python_model,
uv_project_path=tmp_uv_project,
)
artifact_path = mlflow.artifacts.download_artifacts(
run_id=run.info.run_id, artifact_path="model"
)
artifact_dir = Path(artifact_path)
assert (artifact_dir / _UV_LOCK_FILE).exists()
assert (artifact_dir / _PYPROJECT_FILE).exists()
assert "test_uv_project" in (artifact_dir / _PYPROJECT_FILE).read_text()
@requires_uv
def test_pyfunc_log_model_generates_requirements_from_uv_export(
tmp_uv_project, python_model, monkeypatch
):
monkeypatch.chdir(tmp_uv_project)
monkeypatch.setenv("MLFLOW_UV_AUTO_DETECT", "true")
with mlflow.start_run() as run:
mlflow.pyfunc.log_model(name="model", python_model=python_model)
artifact_path = mlflow.artifacts.download_artifacts(
run_id=run.info.run_id, artifact_path="model"
)
artifact_dir = Path(artifact_path)
requirements_file = artifact_dir / _REQUIREMENTS_FILE_NAME
assert requirements_file.exists()
requirements_content = requirements_file.read_text()
assert "numpy" in requirements_content.lower()
# --- Fallback Tests (mocking required to simulate uv unavailable) ---
def test_pyfunc_log_model_falls_back_when_uv_not_available(tmp_path, python_model, monkeypatch):
(tmp_path / _UV_LOCK_FILE).write_text('version = 1\nrequires-python = ">=3.10"\n')
(tmp_path / _PYPROJECT_FILE).write_text('[project]\nname = "test"\nversion = "0.1.0"\n')
monkeypatch.chdir(tmp_path)
with mock.patch("mlflow.utils.uv_utils._get_uv_binary", return_value=None):
with mlflow.start_run() as run:
mlflow.pyfunc.log_model(name="model", python_model=python_model)
artifact_path = mlflow.artifacts.download_artifacts(
run_id=run.info.run_id, artifact_path="model"
)
artifact_dir = Path(artifact_path)
assert (artifact_dir / _REQUIREMENTS_FILE_NAME).exists()
def test_pyfunc_log_model_falls_back_when_uv_export_fails(tmp_path, python_model, monkeypatch):
(tmp_path / _UV_LOCK_FILE).write_text('version = 1\nrequires-python = ">=3.10"\n')
(tmp_path / _PYPROJECT_FILE).write_text('[project]\nname = "test"\nversion = "0.1.0"\n')
monkeypatch.chdir(tmp_path)
with (
mock.patch("mlflow.utils.uv_utils._get_uv_binary", return_value="/usr/bin/uv"),
mock.patch(
"mlflow.utils.uv_utils.subprocess.run",
side_effect=subprocess.CalledProcessError(1, "uv"),
),
):
with mlflow.start_run() as run:
mlflow.pyfunc.log_model(name="model", python_model=python_model)
artifact_path = mlflow.artifacts.download_artifacts(
run_id=run.info.run_id, artifact_path="model"
)
artifact_dir = Path(artifact_path)
assert (artifact_dir / _REQUIREMENTS_FILE_NAME).exists()
def test_pyfunc_log_model_non_uv_project_uses_standard_inference(
python_model, tmp_path, monkeypatch
):
monkeypatch.chdir(tmp_path)
with mlflow.start_run() as run:
mlflow.pyfunc.log_model(name="model", python_model=python_model)
artifact_path = mlflow.artifacts.download_artifacts(
run_id=run.info.run_id, artifact_path="model"
)
artifact_dir = Path(artifact_path)
assert (artifact_dir / _REQUIREMENTS_FILE_NAME).exists()
assert (artifact_dir / _PYTHON_ENV_FILE_NAME).exists()
assert not (artifact_dir / _UV_LOCK_FILE).exists()
assert not (artifact_dir / _PYPROJECT_FILE).exists()
# --- Model Loading Tests ---
@requires_uv
def test_load_pyfunc_model_with_uv_artifacts_and_predict(tmp_uv_project, python_model, monkeypatch):
monkeypatch.chdir(tmp_uv_project)
monkeypatch.setenv("MLFLOW_UV_AUTO_DETECT", "true")
with mlflow.start_run() as run:
mlflow.pyfunc.log_model(name="model", python_model=python_model)
model_uri = f"runs:/{run.info.run_id}/model"
loaded_model = mlflow.pyfunc.load_model(model_uri)
assert loaded_model is not None
assert loaded_model.metadata is not None
import pandas as pd
test_input = pd.DataFrame({"a": [1, 2, 3]})
predictions = loaded_model.predict(test_input)
assert predictions is not None
# --- Save Model Tests ---
@requires_uv
def test_pyfunc_save_model_with_uv_project(tmp_uv_project, python_model, tmp_path, monkeypatch):
monkeypatch.chdir(tmp_uv_project)
monkeypatch.setenv("MLFLOW_UV_AUTO_DETECT", "true")
model_path = tmp_path / "saved_model"
mlflow.pyfunc.save_model(model_path, python_model=python_model)
assert (model_path / _REQUIREMENTS_FILE_NAME).exists()
assert (model_path / _UV_LOCK_FILE).exists()
assert (model_path / _PYPROJECT_FILE).exists()
assert (model_path / _PYTHON_VERSION_FILE).exists()
@requires_uv
def test_pyfunc_save_model_with_explicit_uv_project_path(
tmp_uv_project, python_model, tmp_path, monkeypatch
):
work_dir = tmp_path / "work"
work_dir.mkdir()
model_path = tmp_path / "saved_model"
monkeypatch.chdir(work_dir)
monkeypatch.setenv("MLFLOW_UV_AUTO_DETECT", "true")
mlflow.pyfunc.save_model(
model_path,
python_model=python_model,
uv_project_path=tmp_uv_project,
)
assert (model_path / _UV_LOCK_FILE).exists()
assert (model_path / _PYPROJECT_FILE).exists()
# --- Environment Variable Variations ---
@requires_uv
@pytest.mark.parametrize("env_value", ["false", "0", "FALSE", "False"])
def test_mlflow_log_uv_files_env_var_false_variants(
tmp_uv_project, python_model, monkeypatch, env_value
):
monkeypatch.chdir(tmp_uv_project)
monkeypatch.setenv("MLFLOW_UV_AUTO_DETECT", "true")
monkeypatch.setenv("MLFLOW_LOG_UV_FILES", env_value)
with mlflow.start_run() as run:
mlflow.pyfunc.log_model(name="model", python_model=python_model)
artifact_path = mlflow.artifacts.download_artifacts(
run_id=run.info.run_id, artifact_path="model"
)
artifact_dir = Path(artifact_path)
assert not (artifact_dir / _UV_LOCK_FILE).exists()
assert not (artifact_dir / _PYPROJECT_FILE).exists()
assert (artifact_dir / _REQUIREMENTS_FILE_NAME).exists()
@requires_uv
@pytest.mark.parametrize("env_value", ["true", "1", "TRUE", "True"])
def test_mlflow_log_uv_files_env_var_true_variants(
tmp_uv_project, python_model, monkeypatch, env_value
):
monkeypatch.chdir(tmp_uv_project)
monkeypatch.setenv("MLFLOW_UV_AUTO_DETECT", "true")
monkeypatch.setenv("MLFLOW_LOG_UV_FILES", env_value)
with mlflow.start_run() as run:
mlflow.pyfunc.log_model(name="model", python_model=python_model)
artifact_path = mlflow.artifacts.download_artifacts(
run_id=run.info.run_id, artifact_path="model"
)
artifact_dir = Path(artifact_path)
assert (artifact_dir / _UV_LOCK_FILE).exists()
assert (artifact_dir / _PYPROJECT_FILE).exists()
# --- uv Sync Environment Setup Integration Tests ---
@requires_uv
def test_setup_uv_sync_environment_real(tmp_uv_project, tmp_path):
from mlflow.utils.uv_utils import has_uv_lock_artifact, setup_uv_sync_environment
model_artifacts = tmp_path / "model_artifacts"
model_artifacts.mkdir()
shutil.copy2(tmp_uv_project / _UV_LOCK_FILE, model_artifacts / _UV_LOCK_FILE)
shutil.copy2(tmp_uv_project / _PYTHON_VERSION_FILE, model_artifacts / _PYTHON_VERSION_FILE)
assert has_uv_lock_artifact(model_artifacts)
env_dir = tmp_path / "env"
result = setup_uv_sync_environment(env_dir, model_artifacts, "3.11.5")
assert result is True
assert (env_dir / _UV_LOCK_FILE).exists()
assert (env_dir / _PYPROJECT_FILE).exists()
assert (env_dir / _PYTHON_VERSION_FILE).exists()
# No pyproject.toml in model_artifacts, so create_uv_sync_pyproject
# generates one with pinned version
pyproject_content = (env_dir / _PYPROJECT_FILE).read_text()
assert 'name = "mlflow-model-env"' in pyproject_content
assert 'requires-python = "==3.11.5"' in pyproject_content
@requires_uv
def test_extract_index_urls_from_real_uv_lock(tmp_uv_project):
from mlflow.utils.uv_utils import extract_index_urls_from_uv_lock
result = extract_index_urls_from_uv_lock(tmp_uv_project / _UV_LOCK_FILE)
well_known_public = {"https://download.pytorch.org/whl/cpu"}
truly_private = [url for url in result if url not in well_known_public]
assert truly_private == []
@requires_uv
def test_run_uv_sync_real(tmp_uv_project, tmp_path):
from mlflow.utils.uv_utils import run_uv_sync
sync_dir = tmp_path / "sync_project"
shutil.copytree(tmp_uv_project, sync_dir)
result = run_uv_sync(sync_dir, frozen=True, no_dev=True)
assert result is True
assert (sync_dir / ".venv").exists()
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/pyfunc/test_uv_model_logging.py",
"license": "Apache License 2.0",
"lines": 318,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/utils/test_uv_utils.py | import subprocess
from unittest import mock
import pytest
from packaging.version import Version
from mlflow.environment_variables import MLFLOW_UV_AUTO_DETECT
from mlflow.utils.environment import infer_pip_requirements
from mlflow.utils.uv_utils import (
_PYPROJECT_FILE,
_UV_LOCK_FILE,
copy_uv_project_files,
create_uv_sync_pyproject,
detect_uv_project,
export_uv_requirements,
extract_index_urls_from_uv_lock,
get_uv_version,
has_uv_lock_artifact,
is_uv_available,
run_uv_sync,
setup_uv_sync_environment,
)
# --- get_uv_version tests ---
def test_get_uv_version_returns_none_when_uv_not_installed():
with mock.patch("mlflow.utils.uv_utils.shutil.which", return_value=None):
assert get_uv_version() is None
def test_get_uv_version_returns_version_when_uv_installed():
mock_result = mock.Mock()
mock_result.stdout = "uv 0.5.0 (abc123 2024-01-01)"
with (
mock.patch("mlflow.utils.uv_utils.shutil.which", return_value="/usr/bin/uv"),
mock.patch("mlflow.utils.uv_utils.subprocess.run", return_value=mock_result) as mock_run,
):
version = get_uv_version()
assert version == Version("0.5.0")
mock_run.assert_called_once()
def test_get_uv_version_returns_none_on_subprocess_error():
with (
mock.patch("mlflow.utils.uv_utils.shutil.which", return_value="/usr/bin/uv"),
mock.patch(
"mlflow.utils.uv_utils.subprocess.run",
side_effect=subprocess.CalledProcessError(1, "uv"),
),
):
assert get_uv_version() is None
def test_get_uv_version_returns_none_on_parse_error():
mock_result = mock.Mock()
mock_result.stdout = "invalid output"
with (
mock.patch("mlflow.utils.uv_utils.shutil.which", return_value="/usr/bin/uv"),
mock.patch("mlflow.utils.uv_utils.subprocess.run", return_value=mock_result),
):
assert get_uv_version() is None
# --- is_uv_available tests ---
def test_is_uv_available_returns_false_when_uv_not_installed():
with mock.patch("mlflow.utils.uv_utils.shutil.which", return_value=None):
assert is_uv_available() is False
def test_is_uv_available_returns_false_when_version_below_minimum():
mock_result = mock.Mock()
mock_result.stdout = "uv 0.4.0 (abc123 2024-01-01)"
with (
mock.patch("mlflow.utils.uv_utils.shutil.which", return_value="/usr/bin/uv"),
mock.patch("mlflow.utils.uv_utils.subprocess.run", return_value=mock_result),
):
assert is_uv_available() is False
@pytest.mark.parametrize("version_str", ["0.5.0", "1.0.0"])
def test_is_uv_available_returns_true_when_version_meets_or_exceeds_minimum(version_str):
mock_result = mock.Mock()
mock_result.stdout = f"uv {version_str} (abc123 2024-01-01)"
with (
mock.patch("mlflow.utils.uv_utils.shutil.which", return_value="/usr/bin/uv"),
mock.patch("mlflow.utils.uv_utils.subprocess.run", return_value=mock_result),
):
assert is_uv_available() is True
# --- detect_uv_project tests ---
@pytest.mark.parametrize(
("create_uv_lock", "create_pyproject"),
[(False, True), (True, False)],
ids=["missing_uv_lock", "missing_pyproject"],
)
def test_detect_uv_project_returns_none_when_file_missing(
tmp_path, create_uv_lock, create_pyproject
):
if create_uv_lock:
(tmp_path / _UV_LOCK_FILE).touch()
if create_pyproject:
(tmp_path / _PYPROJECT_FILE).touch()
assert detect_uv_project(tmp_path) is None
def test_detect_uv_project_returns_paths_when_both_files_exist(tmp_path):
(tmp_path / _UV_LOCK_FILE).touch()
(tmp_path / _PYPROJECT_FILE).touch()
result = detect_uv_project(tmp_path)
assert result is not None
assert result.uv_lock == tmp_path / _UV_LOCK_FILE
assert result.pyproject == tmp_path / _PYPROJECT_FILE
def test_detect_uv_project_uses_cwd_when_directory_not_specified(tmp_path, monkeypatch):
monkeypatch.chdir(tmp_path)
(tmp_path / _UV_LOCK_FILE).touch()
(tmp_path / _PYPROJECT_FILE).touch()
result = detect_uv_project()
assert result is not None
assert result.uv_lock == tmp_path / _UV_LOCK_FILE
# --- export_uv_requirements tests ---
def test_export_uv_requirements_returns_none_when_uv_not_available():
with mock.patch("mlflow.utils.uv_utils._get_uv_binary", return_value=None):
assert export_uv_requirements() is None
def test_export_uv_requirements_returns_requirements_list(tmp_path):
uv_output = """requests==2.28.0
numpy==1.24.0
pandas==2.0.0
"""
mock_result = mock.Mock()
mock_result.stdout = uv_output
with (
mock.patch("mlflow.utils.uv_utils._get_uv_binary", return_value="/usr/bin/uv"),
mock.patch("mlflow.utils.uv_utils.subprocess.run", return_value=mock_result) as mock_run,
):
result = export_uv_requirements(tmp_path)
assert result == ["requests==2.28.0", "numpy==1.24.0", "pandas==2.0.0"]
mock_run.assert_called_once()
def test_export_uv_requirements_preserves_environment_markers(tmp_path):
uv_output = """requests==2.28.0
pywin32==306 ; sys_platform == 'win32'
numpy==1.24.0
"""
mock_result = mock.Mock()
mock_result.stdout = uv_output
with (
mock.patch("mlflow.utils.uv_utils._get_uv_binary", return_value="/usr/bin/uv"),
mock.patch("mlflow.utils.uv_utils.subprocess.run", return_value=mock_result),
):
result = export_uv_requirements(tmp_path)
assert result is not None
assert len(result) == 3
assert "pywin32==306 ; sys_platform == 'win32'" in result
def test_export_uv_requirements_keeps_all_marker_variants(tmp_path):
uv_output = """numpy==2.2.6 ; python_version < '3.11'
numpy==2.4.1 ; python_version >= '3.11'
"""
mock_result = mock.Mock()
mock_result.stdout = uv_output
with (
mock.patch("mlflow.utils.uv_utils._get_uv_binary", return_value="/usr/bin/uv"),
mock.patch("mlflow.utils.uv_utils.subprocess.run", return_value=mock_result),
):
result = export_uv_requirements(tmp_path)
assert result is not None
numpy_entries = [r for r in result if r.startswith("numpy")]
assert len(numpy_entries) == 2
def test_export_uv_requirements_returns_none_on_subprocess_error(tmp_path):
with (
mock.patch("mlflow.utils.uv_utils._get_uv_binary", return_value="/usr/bin/uv"),
mock.patch(
"mlflow.utils.uv_utils.subprocess.run",
side_effect=subprocess.CalledProcessError(1, "uv"),
),
):
assert export_uv_requirements(tmp_path) is None
def test_export_uv_requirements_with_explicit_directory(tmp_path):
(tmp_path / _UV_LOCK_FILE).touch()
uv_output = """requests==2.28.0
numpy==1.24.0
"""
mock_result = mock.Mock()
mock_result.stdout = uv_output
with (
mock.patch("mlflow.utils.uv_utils._get_uv_binary", return_value="/usr/bin/uv"),
mock.patch("mlflow.utils.uv_utils.subprocess.run", return_value=mock_result) as mock_run,
):
result = export_uv_requirements(directory=tmp_path)
assert result is not None
assert "requests==2.28.0" in result
assert "numpy==1.24.0" in result
mock_run.assert_called_once()
assert mock_run.call_args.kwargs["cwd"] == tmp_path
# --- copy_uv_project_files tests ---
def test_copy_uv_project_files_returns_false_when_not_uv_project(tmp_path):
dest_dir = tmp_path / "dest"
dest_dir.mkdir()
source_dir = tmp_path / "source"
source_dir.mkdir()
result = copy_uv_project_files(dest_dir, source_dir)
assert result is False
def test_copy_uv_project_files_copies_files_when_uv_project(tmp_path):
source_dir = tmp_path / "source"
source_dir.mkdir()
(source_dir / _UV_LOCK_FILE).write_text("lock content")
(source_dir / _PYPROJECT_FILE).write_text("pyproject content")
dest_dir = tmp_path / "dest"
dest_dir.mkdir()
result = copy_uv_project_files(dest_dir, source_dir)
assert result is True
assert (dest_dir / _UV_LOCK_FILE).read_text() == "lock content"
assert (dest_dir / _PYPROJECT_FILE).read_text() == "pyproject content"
@pytest.mark.parametrize(
("has_python_version", "expected_exists"),
[(True, True), (False, False)],
ids=["with_python_version", "without_python_version"],
)
def test_copy_uv_project_files_python_version_handling(
tmp_path, has_python_version, expected_exists
):
source_dir = tmp_path / "source"
source_dir.mkdir()
(source_dir / _UV_LOCK_FILE).write_text("lock content")
(source_dir / _PYPROJECT_FILE).write_text("pyproject content")
if has_python_version:
(source_dir / ".python-version").write_text("3.11.5")
dest_dir = tmp_path / "dest"
dest_dir.mkdir()
result = copy_uv_project_files(dest_dir, source_dir)
assert result is True
assert (dest_dir / ".python-version").exists() == expected_exists
if expected_exists:
assert (dest_dir / ".python-version").read_text() == "3.11.5"
def test_copy_uv_project_files_respects_mlflow_log_uv_files_env_false(tmp_path, monkeypatch):
monkeypatch.setenv("MLFLOW_LOG_UV_FILES", "false")
source_dir = tmp_path / "source"
source_dir.mkdir()
(source_dir / _UV_LOCK_FILE).write_text("lock content")
(source_dir / _PYPROJECT_FILE).write_text("pyproject content")
dest_dir = tmp_path / "dest"
dest_dir.mkdir()
result = copy_uv_project_files(dest_dir, source_dir)
assert result is False
assert not (dest_dir / _UV_LOCK_FILE).exists()
assert not (dest_dir / _PYPROJECT_FILE).exists()
@pytest.mark.parametrize("env_value", ["0", "false", "FALSE", "False"])
def test_copy_uv_project_files_env_var_false_variants(tmp_path, monkeypatch, env_value):
monkeypatch.setenv("MLFLOW_LOG_UV_FILES", env_value)
source_dir = tmp_path / "source"
source_dir.mkdir()
(source_dir / _UV_LOCK_FILE).write_text("lock content")
(source_dir / _PYPROJECT_FILE).write_text("pyproject content")
dest_dir = tmp_path / "dest"
dest_dir.mkdir()
result = copy_uv_project_files(dest_dir, source_dir)
assert result is False
@pytest.mark.parametrize("env_value", ["true", "1", "TRUE", "True"])
def test_copy_uv_project_files_env_var_true_variants(tmp_path, monkeypatch, env_value):
monkeypatch.setenv("MLFLOW_LOG_UV_FILES", env_value)
source_dir = tmp_path / "source"
source_dir.mkdir()
(source_dir / _UV_LOCK_FILE).write_text("lock content")
(source_dir / _PYPROJECT_FILE).write_text("pyproject content")
dest_dir = tmp_path / "dest"
dest_dir.mkdir()
result = copy_uv_project_files(dest_dir, source_dir)
assert result is True
def test_copy_uv_project_files_with_monorepo_layout(tmp_path):
project_dir = tmp_path / "monorepo" / "subproject"
project_dir.mkdir(parents=True)
(project_dir / _UV_LOCK_FILE).write_text("lock content from monorepo")
(project_dir / _PYPROJECT_FILE).write_text("pyproject from monorepo")
(project_dir / ".python-version").write_text("3.12.0")
dest_dir = tmp_path / "dest"
dest_dir.mkdir()
result = copy_uv_project_files(dest_dir, source_dir=project_dir)
assert result is True
assert (dest_dir / _UV_LOCK_FILE).read_text() == "lock content from monorepo"
assert (dest_dir / ".python-version").read_text() == "3.12.0"
def test_copy_uv_project_files_with_nonexistent_source(tmp_path):
dest_dir = tmp_path / "dest"
dest_dir.mkdir()
nonexistent_dir = tmp_path / "nonexistent"
result = copy_uv_project_files(dest_dir, source_dir=nonexistent_dir)
assert result is False
def test_copy_uv_project_files_with_missing_pyproject(tmp_path):
project_dir = tmp_path / "incomplete_project"
project_dir.mkdir()
(project_dir / _UV_LOCK_FILE).write_text("lock content")
dest_dir = tmp_path / "dest"
dest_dir.mkdir()
result = copy_uv_project_files(dest_dir, source_dir=project_dir)
assert result is False
# --- Integration tests for infer_pip_requirements uv path ---
def test_infer_pip_requirements_uses_uv_when_project_detected(tmp_path, monkeypatch):
monkeypatch.chdir(tmp_path)
monkeypatch.setenv("MLFLOW_UV_AUTO_DETECT", "true")
(tmp_path / _UV_LOCK_FILE).touch()
(tmp_path / _PYPROJECT_FILE).touch()
uv_output = "requests==2.28.0\nnumpy==1.24.0\n"
mock_result = mock.Mock()
mock_result.stdout = uv_output
with (
mock.patch("mlflow.utils.uv_utils._get_uv_binary", return_value="/usr/bin/uv"),
mock.patch("mlflow.utils.uv_utils.subprocess.run", return_value=mock_result),
):
result = infer_pip_requirements("runs:/fake/model", "sklearn")
assert "requests==2.28.0" in result
assert "numpy==1.24.0" in result
def test_export_uv_requirements_returns_none_when_uv_binary_missing(tmp_path):
with mock.patch("mlflow.utils.uv_utils._get_uv_binary", return_value=None):
result = export_uv_requirements(tmp_path)
assert result is None
def test_detect_uv_project_not_detected_when_files_missing(tmp_path, monkeypatch):
monkeypatch.chdir(tmp_path)
assert detect_uv_project() is None
# --- MLFLOW_UV_AUTO_DETECT Environment Variable Tests ---
def test_mlflow_uv_auto_detect_returns_true_by_default(monkeypatch):
monkeypatch.delenv("MLFLOW_UV_AUTO_DETECT", raising=False)
assert MLFLOW_UV_AUTO_DETECT.get() is True
@pytest.mark.parametrize("env_value", ["false", "0", "FALSE", "False"])
def test_mlflow_uv_auto_detect_returns_false_when_disabled(monkeypatch, env_value):
monkeypatch.setenv("MLFLOW_UV_AUTO_DETECT", env_value)
assert MLFLOW_UV_AUTO_DETECT.get() is False
@pytest.mark.parametrize("env_value", ["true", "1", "TRUE", "True"])
def test_mlflow_uv_auto_detect_returns_true_when_enabled(monkeypatch, env_value):
monkeypatch.setenv("MLFLOW_UV_AUTO_DETECT", env_value)
assert MLFLOW_UV_AUTO_DETECT.get() is True
def test_infer_pip_requirements_skips_uv_when_auto_detect_disabled(tmp_path, monkeypatch):
monkeypatch.chdir(tmp_path)
(tmp_path / "uv.lock").touch()
(tmp_path / "pyproject.toml").touch()
assert detect_uv_project() is not None
monkeypatch.setenv("MLFLOW_UV_AUTO_DETECT", "false")
with (
mock.patch("mlflow.utils.environment.detect_uv_project") as mock_detect,
mock.patch("mlflow.utils.environment.export_uv_requirements") as mock_export,
mock.patch(
"mlflow.utils.environment._infer_requirements",
return_value=["scikit-learn==1.0"],
),
):
result = infer_pip_requirements(str(tmp_path), "sklearn")
mock_detect.assert_not_called()
mock_export.assert_not_called()
assert "scikit-learn==1.0" in result
def test_infer_pip_requirements_uses_explicit_uv_project_dir(tmp_path, monkeypatch):
work_dir = tmp_path / "work"
work_dir.mkdir()
monkeypatch.chdir(work_dir)
monkeypatch.setenv("MLFLOW_UV_AUTO_DETECT", "true")
uv_project = tmp_path / "my_project"
uv_project.mkdir()
(uv_project / _UV_LOCK_FILE).touch()
(uv_project / _PYPROJECT_FILE).touch()
uv_output = "requests==2.28.0\n"
mock_result = mock.Mock()
mock_result.stdout = uv_output
with (
mock.patch("mlflow.utils.uv_utils._get_uv_binary", return_value="/usr/bin/uv"),
mock.patch("mlflow.utils.uv_utils.subprocess.run", return_value=mock_result),
):
result = infer_pip_requirements(str(tmp_path), "sklearn", uv_project_dir=uv_project)
assert "requests==2.28.0" in result
def test_infer_pip_requirements_explicit_uv_project_dir_overrides_disabled_auto_detect(
tmp_path, monkeypatch
):
monkeypatch.chdir(tmp_path)
monkeypatch.setenv("MLFLOW_UV_AUTO_DETECT", "false")
uv_project = tmp_path / "my_project"
uv_project.mkdir()
(uv_project / _UV_LOCK_FILE).touch()
(uv_project / _PYPROJECT_FILE).touch()
uv_output = "numpy==1.24.0\n"
mock_result = mock.Mock()
mock_result.stdout = uv_output
with (
mock.patch("mlflow.utils.uv_utils._get_uv_binary", return_value="/usr/bin/uv"),
mock.patch("mlflow.utils.uv_utils.subprocess.run", return_value=mock_result),
):
result = infer_pip_requirements(str(tmp_path), "sklearn", uv_project_dir=uv_project)
assert "numpy==1.24.0" in result
def test_export_uv_requirements_strips_comment_lines(tmp_path):
uv_output = """requests==2.28.0
# via
# some-package
urllib3==1.26.0
# via requests
certifi==2023.7.22
"""
mock_result = mock.Mock()
mock_result.stdout = uv_output
with (
mock.patch("mlflow.utils.uv_utils._get_uv_binary", return_value="/usr/bin/uv"),
mock.patch("mlflow.utils.uv_utils.subprocess.run", return_value=mock_result),
):
result = export_uv_requirements(tmp_path)
assert result == ["requests==2.28.0", "urllib3==1.26.0", "certifi==2023.7.22"]
def test_export_uv_requirements_returns_empty_list_on_empty_output(tmp_path):
mock_result = mock.Mock()
mock_result.stdout = ""
with (
mock.patch("mlflow.utils.uv_utils._get_uv_binary", return_value="/usr/bin/uv"),
mock.patch("mlflow.utils.uv_utils.subprocess.run", return_value=mock_result),
):
result = export_uv_requirements(tmp_path)
assert result == []
# --- Private Index URL Extraction Tests ---
@pytest.mark.parametrize(
("uv_lock_content", "expected_urls"),
[
(
"""
version = 1
requires-python = ">=3.11"
[[package]]
name = "my-private-pkg"
version = "1.0.0"
source = { registry = "https://internal.company.com/simple" }
[[package]]
name = "numpy"
version = "1.24.0"
source = { registry = "https://pypi.org/simple" }
""",
["https://internal.company.com/simple"],
),
(
"""
version = 1
[[package]]
name = "pkg1"
source = { registry = "https://private1.com/simple" }
[[package]]
name = "pkg2"
source = { registry = "https://private2.com/simple" }
[[package]]
name = "pkg3"
source = { registry = "https://private1.com/simple" }
""",
["https://private1.com/simple", "https://private2.com/simple"],
),
(
"""
version = 1
[[package]]
name = "numpy"
source = { registry = "https://pypi.org/simple" }
""",
[],
),
],
ids=["single_private", "multiple_private_deduped", "no_private"],
)
def test_extract_index_urls_from_uv_lock(tmp_path, uv_lock_content, expected_urls):
uv_lock_path = tmp_path / "uv.lock"
uv_lock_path.write_text(uv_lock_content)
result = extract_index_urls_from_uv_lock(uv_lock_path)
assert result == expected_urls
def test_extract_index_urls_from_uv_lock_file_not_exists(tmp_path):
result = extract_index_urls_from_uv_lock(tmp_path / "nonexistent.lock")
assert result == []
# --- uv Sync Environment Setup Tests ---
@pytest.mark.parametrize(
("python_version", "project_name", "expected_name", "expected_python"),
[
("3.11.5", "mlflow-model-env", "mlflow-model-env", "==3.11.5"),
("3.10.14", "my-custom-env", "my-custom-env", "==3.10.14"),
],
ids=["default_name", "custom_name"],
)
def test_create_uv_sync_pyproject(
tmp_path, python_version, project_name, expected_name, expected_python
):
result_path = create_uv_sync_pyproject(tmp_path, python_version, project_name=project_name)
assert result_path.exists()
content = result_path.read_text()
assert f'name = "{expected_name}"' in content
assert f'requires-python = "{expected_python}"' in content
def test_setup_uv_sync_environment(tmp_path):
model_path = tmp_path / "model"
model_path.mkdir()
(model_path / "uv.lock").write_text('version = 1\nrequires-python = ">=3.11"')
(model_path / ".python-version").write_text("3.11.5")
env_dir = tmp_path / "env"
result = setup_uv_sync_environment(env_dir, model_path, "3.11.5")
assert result is True
assert (env_dir / "uv.lock").exists()
assert (env_dir / "pyproject.toml").exists()
assert (env_dir / ".python-version").exists()
def test_setup_uv_sync_environment_copies_existing_pyproject(tmp_path):
model_path = tmp_path / "model"
model_path.mkdir()
original_pyproject = '[project]\nname = "my-model"\nversion = "1.0.0"\n'
(model_path / "uv.lock").write_text('version = 1\nrequires-python = ">=3.11"')
(model_path / "pyproject.toml").write_text(original_pyproject)
env_dir = tmp_path / "env"
result = setup_uv_sync_environment(env_dir, model_path, "3.11.5")
assert result is True
# Copied from model, not generated (should have "my-model" not "mlflow-model-env")
pyproject_content = (env_dir / "pyproject.toml").read_text()
assert 'name = "my-model"' in pyproject_content
assert "mlflow-model-env" not in pyproject_content
def test_setup_uv_sync_environment_no_uv_lock(tmp_path):
model_path = tmp_path / "model"
model_path.mkdir()
env_dir = tmp_path / "env"
result = setup_uv_sync_environment(env_dir, model_path, "3.11")
assert result is False
assert not env_dir.exists()
def test_has_uv_lock_artifact(tmp_path):
model_path = tmp_path / "model"
model_path.mkdir()
assert has_uv_lock_artifact(model_path) is False
(model_path / "uv.lock").write_text("version = 1")
assert has_uv_lock_artifact(model_path) is True
def test_run_uv_sync_returns_false_when_uv_not_available(tmp_path):
with mock.patch("mlflow.utils.uv_utils._get_uv_binary", return_value=None):
result = run_uv_sync(tmp_path)
assert result is False
def test_run_uv_sync_builds_correct_command(tmp_path):
with (
mock.patch("mlflow.utils.uv_utils._get_uv_binary", return_value="/usr/bin/uv"),
mock.patch("mlflow.utils.uv_utils.subprocess.run") as mock_run,
):
run_uv_sync(tmp_path, frozen=True, no_dev=True)
mock_run.assert_called_once()
call_args = mock_run.call_args[0][0]
assert call_args[0] == "/usr/bin/uv"
assert call_args[1] == "sync"
assert "--frozen" in call_args
assert "--no-dev" in call_args
def test_run_uv_sync_returns_false_on_failure(tmp_path):
with (
mock.patch("mlflow.utils.uv_utils._get_uv_binary", return_value="/usr/bin/uv"),
mock.patch(
"mlflow.utils.uv_utils.subprocess.run",
side_effect=subprocess.CalledProcessError(1, "uv sync"),
),
):
result = run_uv_sync(tmp_path)
assert result is False
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/utils/test_uv_utils.py",
"license": "Apache License 2.0",
"lines": 521,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/store/tracking/test_sqlalchemy_store_issues.py | import pytest
from mlflow.exceptions import MlflowException
def test_create_issue_required_fields_only(store):
exp_id = store.create_experiment("test")
issue = store.create_issue(
experiment_id=exp_id,
name="High latency",
description="API calls are taking too long",
status="draft",
)
assert issue.issue_id.startswith("iss-")
assert issue.experiment_id == exp_id
assert issue.name == "High latency"
assert issue.description == "API calls are taking too long"
assert issue.status == "draft"
assert issue.confidence is None
assert issue.root_causes is None
assert issue.source_run_id is None
assert issue.created_by is None
assert issue.created_timestamp > 0
assert issue.last_updated_timestamp == issue.created_timestamp
def test_create_issue_with_all_fields(store):
exp_id = store.create_experiment("test")
run = store.create_run(
experiment_id=exp_id,
user_id="user",
start_time=0,
run_name="test_run",
tags=[],
)
issue = store.create_issue(
experiment_id=exp_id,
name="Token limit exceeded",
description="Model is hitting token limits frequently",
status="accepted",
confidence="high",
root_causes=["Input prompts are too long", "Context window exceeded"],
source_run_id=run.info.run_id,
created_by="user@example.com",
)
assert issue.issue_id.startswith("iss-")
assert issue.experiment_id == exp_id
assert issue.name == "Token limit exceeded"
assert issue.description == "Model is hitting token limits frequently"
assert issue.status == "accepted"
assert issue.confidence == "high"
assert issue.root_causes == ["Input prompts are too long", "Context window exceeded"]
assert issue.source_run_id == run.info.run_id
assert issue.created_by == "user@example.com"
def test_create_issue_invalid_experiment(store):
with pytest.raises(MlflowException, match=r"No Experiment with id=999999 exists"):
store.create_issue(
experiment_id="999999",
name="Test issue",
description="This should fail",
status="draft",
)
def test_create_issue_invalid_run(store):
exp_id = store.create_experiment("test")
with pytest.raises(MlflowException, match=r"FOREIGN KEY constraint failed"):
store.create_issue(
experiment_id=exp_id,
source_run_id="nonexistent-run-id",
name="Test issue",
description="This should fail",
status="draft",
)
def test_get_issue(store):
exp_id = store.create_experiment("test")
run = store.create_run(
experiment_id=exp_id,
user_id="user",
start_time=0,
run_name="test_run",
tags=[],
)
created_issue = store.create_issue(
experiment_id=exp_id,
name="Low accuracy",
description="Model accuracy below threshold",
status="draft",
confidence="medium",
root_causes=["Insufficient training data", "Model drift"],
source_run_id=run.info.run_id,
created_by="alice@example.com",
)
retrieved_issue = store.get_issue(created_issue.issue_id)
# Verify all fields
assert retrieved_issue.issue_id == created_issue.issue_id
assert retrieved_issue.experiment_id == exp_id
assert retrieved_issue.name == "Low accuracy"
assert retrieved_issue.description == "Model accuracy below threshold"
assert retrieved_issue.status == "draft"
assert retrieved_issue.confidence == "medium"
assert retrieved_issue.root_causes == ["Insufficient training data", "Model drift"]
assert retrieved_issue.source_run_id == run.info.run_id
assert retrieved_issue.created_by == "alice@example.com"
assert retrieved_issue.created_timestamp is not None
assert retrieved_issue.created_timestamp > 0
def test_get_issue_nonexistent(store):
with pytest.raises(MlflowException, match=r"Issue with ID 'nonexistent-id' not found"):
store.get_issue("nonexistent-id")
def test_update_issue(store):
exp_id = store.create_experiment("test")
created_issue = store.create_issue(
experiment_id=exp_id,
name="Original name",
description="Original description",
status="draft",
root_causes=["Initial root cause"],
confidence="low",
)
# Update all supported fields (status, name, description, confidence)
updated_issue = store.update_issue(
issue_id=created_issue.issue_id,
status="accepted",
name="Updated name",
description="Updated description",
confidence="high",
)
# Verify updated fields
assert updated_issue.issue_id == created_issue.issue_id
assert updated_issue.experiment_id == exp_id
assert updated_issue.status == "accepted"
assert updated_issue.name == "Updated name"
assert updated_issue.description == "Updated description"
assert updated_issue.confidence == "high"
# Verify other fields remain unchanged
assert updated_issue.root_causes == ["Initial root cause"]
assert updated_issue.source_run_id is None
assert updated_issue.created_by == created_issue.created_by
assert updated_issue.created_timestamp == created_issue.created_timestamp
assert updated_issue.last_updated_timestamp > created_issue.last_updated_timestamp
# Verify the updates are persisted by retrieving the issue again
retrieved_issue = store.get_issue(created_issue.issue_id)
assert retrieved_issue.status == "accepted"
assert retrieved_issue.name == "Updated name"
assert retrieved_issue.description == "Updated description"
assert retrieved_issue.confidence == "high"
assert retrieved_issue.root_causes == ["Initial root cause"]
assert retrieved_issue.last_updated_timestamp == updated_issue.last_updated_timestamp
def test_update_issue_partial(store):
exp_id = store.create_experiment("test")
created_issue = store.create_issue(
experiment_id=exp_id,
name="Test issue",
description="Test description",
status="draft",
root_causes=["Initial root cause"],
)
# Update only status field
updated_issue = store.update_issue(
issue_id=created_issue.issue_id,
status="accepted",
)
# Verify updated field changed
assert updated_issue.status == "accepted"
# Verify other fields unchanged
assert updated_issue.name == "Test issue"
assert updated_issue.description == "Test description"
assert updated_issue.root_causes == ["Initial root cause"]
def test_update_issue_nonexistent(store):
with pytest.raises(MlflowException, match=r"Issue with ID 'nonexistent-id' not found"):
store.update_issue(issue_id="nonexistent-id", status="accepted")
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/store/tracking/test_sqlalchemy_store_issues.py",
"license": "Apache License 2.0",
"lines": 164,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:mlflow/server/gateway_budget.py | """Budget tracking and enforcement for the MLflow Gateway.
This module provides budget-related functions for recording costs, refreshing policies,
firing exceeded-budget webhooks, and creating on_complete callbacks for budget recording.
"""
import logging
from fastapi import HTTPException
import mlflow
from mlflow.entities.gateway_budget_policy import BudgetAction, BudgetTargetScope
from mlflow.entities.webhook import WebhookAction, WebhookEntity, WebhookEvent
from mlflow.gateway.budget_tracker import BudgetWindow, get_budget_tracker
from mlflow.gateway.tracing_utils import _get_model_span_info
from mlflow.server.handlers import _get_model_registry_store
from mlflow.store.tracking.sqlalchemy_store import SqlAlchemyStore
from mlflow.tracing.constant import CostKey, SpanAttributeKey
from mlflow.tracing.utils import calculate_cost_by_model_and_token_usage
from mlflow.utils.workspace_utils import DEFAULT_WORKSPACE_NAME
from mlflow.webhooks.delivery import deliver_webhook
from mlflow.webhooks.types import BudgetPolicyExceededPayload
_logger = logging.getLogger(__name__)
def calculate_existing_cost_for_new_windows(
store: SqlAlchemyStore, new_windows: list[BudgetWindow]
) -> dict[str, float]:
"""Calculate existing spend for newly created budget windows from trace history.
When a new BudgetWindow is created (server restart or new policy), its
cumulative_spend starts at 0. This queries historical trace cost data so
that budget tracking survives server restarts.
Returns:
Dict mapping budget_policy_id to historical spend amount.
"""
result: dict[str, float] = {}
if not new_windows:
return result
for window in new_windows:
try:
start_ms = int(window.window_start.timestamp() * 1000)
end_ms = int(window.window_end.timestamp() * 1000)
workspace = (
window.policy.workspace
if window.policy.target_scope == BudgetTargetScope.WORKSPACE
else None
)
spend = store.sum_gateway_trace_cost(
start_time_ms=start_ms,
end_time_ms=end_ms,
workspace=workspace,
)
if spend > 0:
result[window.policy.budget_policy_id] = spend
except Exception:
_logger.debug(
"Failed to calculate existing cost for policy %s",
window.policy.budget_policy_id,
exc_info=True,
)
return result
def maybe_refresh_budget_policies(store: SqlAlchemyStore) -> None:
"""Refresh budget policies from the database if stale."""
tracker = get_budget_tracker()
if tracker.needs_refresh():
try:
policies = store.list_budget_policies()
new_windows = tracker.refresh_policies(policies)
existing_spend = calculate_existing_cost_for_new_windows(store, new_windows)
tracker.backfill_spend(existing_spend)
except Exception:
_logger.debug("Failed to refresh budget policies", exc_info=True)
def _compute_cost_from_child_spans(trace_id: str) -> float:
"""Sum total cost across child spans.
Prefers ``LLM_COST`` if already set on the span (computed at span.end()),
otherwise falls back to calculating from MODEL + CHAT_USAGE via LiteLLM.
"""
total = 0.0
for info in _get_model_span_info(trace_id):
if llm_cost := info.attributes.get(SpanAttributeKey.LLM_COST):
total += llm_cost.get(CostKey.TOTAL_COST, 0.0)
else:
model_name = info.attributes.get(SpanAttributeKey.MODEL)
usage = info.attributes.get(SpanAttributeKey.CHAT_USAGE)
if not usage:
continue
model_provider = info.attributes.get(SpanAttributeKey.MODEL_PROVIDER)
if cost := calculate_cost_by_model_and_token_usage(model_name, usage, model_provider):
total += cost.get(CostKey.TOTAL_COST, 0.0)
return total
def fire_budget_exceeded_webhooks(
newly_exceeded: list[BudgetWindow],
workspace: str | None,
registry_store,
) -> None:
"""Fire budget_policy.exceeded webhooks for newly-exceeded budget windows."""
event = WebhookEvent(WebhookEntity.BUDGET_POLICY, WebhookAction.EXCEEDED)
for window in newly_exceeded:
policy = window.policy
if policy.budget_action != BudgetAction.ALERT:
continue
payload = BudgetPolicyExceededPayload(
budget_policy_id=policy.budget_policy_id,
budget_unit=policy.budget_unit.value,
budget_amount=policy.budget_amount,
current_spend=window.cumulative_spend,
duration_unit=policy.duration_unit.value,
duration_value=policy.duration_value,
target_scope=policy.target_scope.value,
workspace=workspace or (policy.workspace or DEFAULT_WORKSPACE_NAME),
window_start=int(window.window_start.timestamp() * 1000),
)
deliver_webhook(event=event, payload=payload, store=registry_store)
def check_budget_limit(store: SqlAlchemyStore, workspace: str | None = None) -> None:
"""Check if any REJECT-capable budget policy is exceeded.
Raises HTTPException(429) if the budget limit is exceeded.
"""
maybe_refresh_budget_policies(store)
tracker = get_budget_tracker()
exceeded, policy = tracker.should_reject_request(workspace=workspace)
if exceeded:
raise HTTPException(
status_code=429,
detail=(
f"Budget limit exceeded for policy '{policy.budget_policy_id}'. "
f"Limit: ${policy.budget_amount:.2f} USD per "
f"{policy.duration_value} {policy.duration_unit.value.lower()}. "
"Request rejected."
),
)
def make_budget_on_complete(
store: SqlAlchemyStore,
workspace: str | None,
):
"""Create an on_complete callback that records budget cost from child span attributes."""
try:
registry_store = _get_model_registry_store()
except Exception:
registry_store = None
def on_complete():
try:
span = mlflow.get_current_active_span()
if not span:
return
total_cost = _compute_cost_from_child_spans(span.trace_id)
if total_cost <= 0:
return
maybe_refresh_budget_policies(store)
tracker = get_budget_tracker()
if newly_exceeded := tracker.record_cost(total_cost, workspace=workspace):
if registry_store:
fire_budget_exceeded_webhooks(newly_exceeded, workspace, registry_store)
except Exception:
_logger.debug("Failed to record budget cost", exc_info=True)
return on_complete
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/server/gateway_budget.py",
"license": "Apache License 2.0",
"lines": 149,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:tests/gateway/test_gateway_budget.py | from unittest.mock import MagicMock, patch
import fastapi
import pytest
import mlflow
import mlflow.gateway.budget_tracker as _bt_module
from mlflow.entities import SpanType
from mlflow.entities.gateway_budget_policy import (
BudgetAction,
BudgetDurationUnit,
BudgetTargetScope,
BudgetUnit,
GatewayBudgetPolicy,
)
from mlflow.gateway.budget_tracker import get_budget_tracker
from mlflow.gateway.tracing_utils import maybe_traced_gateway_call
from mlflow.server.gateway_budget import (
calculate_existing_cost_for_new_windows,
check_budget_limit,
fire_budget_exceeded_webhooks,
make_budget_on_complete,
maybe_refresh_budget_policies,
)
from mlflow.store.tracking.gateway.entities import GatewayEndpointConfig
from mlflow.tracing.constant import CostKey, SpanAttributeKey
from mlflow.tracking.fluent import _get_experiment_id
_DELIVER_FUNC = "mlflow.server.gateway_budget.deliver_webhook"
@pytest.fixture(autouse=True)
def _reset_budget_tracker():
_bt_module._budget_tracker = None
yield
_bt_module._budget_tracker = None
def _make_policy(
budget_policy_id="bp-test",
budget_amount=100.0,
budget_action=BudgetAction.ALERT,
):
return GatewayBudgetPolicy(
budget_policy_id=budget_policy_id,
budget_unit=BudgetUnit.USD,
budget_amount=budget_amount,
duration_unit=BudgetDurationUnit.DAYS,
duration_value=1,
target_scope=BudgetTargetScope.GLOBAL,
budget_action=budget_action,
created_at=0,
last_updated_at=0,
)
def _make_endpoint_config():
return GatewayEndpointConfig(
endpoint_id="ep-test",
endpoint_name="test-endpoint",
experiment_id=_get_experiment_id(),
models=[],
)
def _make_store(policies=None):
store = MagicMock()
store.list_budget_policies.return_value = policies or []
store.sum_gateway_trace_cost.return_value = 0.0
return store
async def maybe_traced_call(provider_func, endpoint_config, on_complete):
traced = maybe_traced_gateway_call(provider_func, endpoint_config, on_complete=on_complete)
return await traced({"messages": [{"role": "user", "content": "test"}]})
async def _provider_with_cost(payload):
"""Simulates a provider that sets LLM_COST on its child span."""
with mlflow.start_span("provider/openai/gpt-4o", span_type=SpanType.LLM) as span:
span.set_attributes(
{
SpanAttributeKey.MODEL: "gpt-4o",
SpanAttributeKey.MODEL_PROVIDER: "openai",
SpanAttributeKey.LLM_COST: {
CostKey.INPUT_COST: 0.025,
CostKey.OUTPUT_COST: 0.050,
CostKey.TOTAL_COST: 0.075,
},
}
)
return {"choices": [{"message": {"content": "Hello"}}]}
async def _provider_no_cost(payload):
"""Simulates a provider that creates a span without cost attributes."""
with mlflow.start_span("provider/custom/no-cost", span_type=SpanType.LLM) as span:
span.set_attribute(SpanAttributeKey.MODEL, "custom-model")
return {"choices": [{"message": {"content": "Hello"}}]}
# --- make_budget_on_complete integration tests ---
@pytest.mark.asyncio
async def test_budget_on_complete_records_cost():
policy = _make_policy(budget_amount=100.0)
store = _make_store(policies=[policy])
on_complete = make_budget_on_complete(store, workspace=None)
await maybe_traced_call(_provider_with_cost, _make_endpoint_config(), on_complete)
tracker = get_budget_tracker()
window = tracker._get_window_info("bp-test")
assert window.cumulative_spend == pytest.approx(0.075)
@pytest.mark.asyncio
async def test_budget_on_complete_no_span():
store = _make_store()
on_complete = make_budget_on_complete(store, workspace=None)
on_complete() # called outside trace context — should not raise
@pytest.mark.asyncio
async def test_budget_on_complete_no_cost():
policy = _make_policy(budget_amount=100.0)
store = _make_store(policies=[policy])
on_complete = make_budget_on_complete(store, workspace=None)
await maybe_traced_call(_provider_no_cost, _make_endpoint_config(), on_complete)
# No cost was computed, so record_cost was never called.
# The tracker may or may not have refreshed policies (early return before refresh).
tracker = get_budget_tracker()
window = tracker._get_window_info("bp-test")
if window is not None:
assert window.cumulative_spend == 0.0
@pytest.mark.asyncio
async def test_budget_on_complete_triggers_webhook():
with patch(_DELIVER_FUNC) as mock_deliver:
policy = _make_policy(budget_amount=0.05, budget_action=BudgetAction.ALERT)
store = _make_store(policies=[policy])
endpoint_config = _make_endpoint_config()
on_complete = make_budget_on_complete(store, workspace=None)
await maybe_traced_call(_provider_with_cost, endpoint_config, on_complete)
mock_deliver.assert_called_once()
payload = mock_deliver.call_args.kwargs["payload"]
assert payload["budget_policy_id"] == "bp-test"
assert payload["budget_amount"] == 0.05
assert payload["current_spend"] == pytest.approx(0.075)
@pytest.mark.asyncio
async def test_budget_on_complete_no_webhook_for_reject():
with patch(_DELIVER_FUNC) as mock_deliver:
policy = _make_policy(budget_amount=0.05, budget_action=BudgetAction.REJECT)
store = _make_store(policies=[policy])
endpoint_config = _make_endpoint_config()
on_complete = make_budget_on_complete(store, workspace=None)
await maybe_traced_call(_provider_with_cost, endpoint_config, on_complete)
mock_deliver.assert_not_called()
tracker = get_budget_tracker()
assert tracker._get_window_info("bp-test").exceeded is True
# --- multi-invocation integration test ---
@pytest.mark.asyncio
async def test_budget_accumulates_over_multiple_invocations():
with patch(_DELIVER_FUNC) as mock_deliver:
policy = _make_policy(budget_amount=0.20, budget_action=BudgetAction.ALERT)
store = _make_store(policies=[policy])
endpoint_config = _make_endpoint_config()
# Call 1: 0.075 spend, under budget
on_complete = make_budget_on_complete(store, workspace=None)
await maybe_traced_call(_provider_with_cost, endpoint_config, on_complete)
mock_deliver.assert_not_called()
# Call 2: 0.15 spend, still under budget
on_complete = make_budget_on_complete(store, workspace=None)
await maybe_traced_call(_provider_with_cost, endpoint_config, on_complete)
mock_deliver.assert_not_called()
# Call 3: 0.225 spend, exceeds $0.20 budget → webhook fires
on_complete = make_budget_on_complete(store, workspace=None)
await maybe_traced_call(_provider_with_cost, endpoint_config, on_complete)
mock_deliver.assert_called_once()
tracker = get_budget_tracker()
window = tracker._get_window_info("bp-test")
assert window.cumulative_spend == pytest.approx(0.225)
assert window.exceeded is True
# Call 4: already exceeded, webhook should not fire again
on_complete = make_budget_on_complete(store, workspace=None)
await maybe_traced_call(_provider_with_cost, endpoint_config, on_complete)
mock_deliver.assert_called_once() # still just the one call
window = tracker._get_window_info("bp-test")
assert window.cumulative_spend == pytest.approx(0.30)
# --- fire_budget_exceeded_webhooks tests ---
def test_fire_budget_exceeded_webhooks_alert():
with patch(_DELIVER_FUNC) as mock_deliver:
tracker = get_budget_tracker()
policy = _make_policy(budget_amount=50.0, budget_action=BudgetAction.ALERT)
tracker.refresh_policies([policy])
crossed = tracker.record_cost(60.0)
assert len(crossed) == 1
fire_budget_exceeded_webhooks(crossed, workspace=None, registry_store=MagicMock())
mock_deliver.assert_called_once()
payload = mock_deliver.call_args.kwargs["payload"]
assert payload["budget_policy_id"] == "bp-test"
assert payload["budget_amount"] == 50.0
assert payload["current_spend"] == 60.0
def test_fire_budget_exceeded_webhooks_reject_skipped():
with patch(_DELIVER_FUNC) as mock_deliver:
tracker = get_budget_tracker()
policy = _make_policy(budget_amount=50.0, budget_action=BudgetAction.REJECT)
tracker.refresh_policies([policy])
crossed = tracker.record_cost(60.0)
assert len(crossed) == 1
fire_budget_exceeded_webhooks(crossed, workspace=None, registry_store=MagicMock())
mock_deliver.assert_not_called()
def test_fire_budget_exceeded_webhooks_with_workspace():
with patch(_DELIVER_FUNC) as mock_deliver:
tracker = get_budget_tracker()
policy = _make_policy(budget_amount=50.0, budget_action=BudgetAction.ALERT)
tracker.refresh_policies([policy])
crossed = tracker.record_cost(60.0)
fire_budget_exceeded_webhooks(crossed, workspace="my-ws", registry_store=MagicMock())
payload = mock_deliver.call_args.kwargs["payload"]
assert payload["workspace"] == "my-ws"
# --- maybe_refresh_budget_policies tests ---
def test_maybe_refresh_budget_policies():
store = MagicMock()
store.list_budget_policies.return_value = [_make_policy()]
store.sum_gateway_trace_cost.return_value = 0.0
maybe_refresh_budget_policies(store)
store.list_budget_policies.assert_called_once()
tracker = get_budget_tracker()
assert tracker._get_window_info("bp-test") is not None
def test_maybe_refresh_skips_when_not_needed():
tracker = get_budget_tracker()
tracker.refresh_policies([_make_policy()])
store = MagicMock()
maybe_refresh_budget_policies(store)
store.list_budget_policies.assert_not_called()
# --- calculate_existing_cost_for_new_windows tests ---
def test_calculate_existing_cost_on_new_windows():
tracker = get_budget_tracker()
new_windows = tracker.refresh_policies([_make_policy(budget_amount=100.0)])
store = MagicMock()
store.sum_gateway_trace_cost.return_value = 42.0
existing_spend = calculate_existing_cost_for_new_windows(store, new_windows)
tracker.backfill_spend(existing_spend)
store.sum_gateway_trace_cost.assert_called_once()
assert tracker._get_window_info("bp-test").cumulative_spend == 42.0
def test_calculate_existing_cost_skipped_when_no_new_windows():
store = MagicMock()
result = calculate_existing_cost_for_new_windows(store, [])
assert result == {}
store.sum_gateway_trace_cost.assert_not_called()
def test_calculate_existing_cost_handles_store_error():
tracker = get_budget_tracker()
new_windows = tracker.refresh_policies([_make_policy(budget_amount=100.0)])
store = MagicMock()
store.sum_gateway_trace_cost.side_effect = Exception("DB error")
existing_spend = calculate_existing_cost_for_new_windows(store, new_windows)
tracker.backfill_spend(existing_spend)
assert tracker._get_window_info("bp-test").cumulative_spend == 0.0
def test_calculate_existing_cost_zero_spend_excluded():
tracker = get_budget_tracker()
new_windows = tracker.refresh_policies([_make_policy(budget_amount=100.0)])
store = MagicMock()
store.sum_gateway_trace_cost.return_value = 0.0
existing_spend = calculate_existing_cost_for_new_windows(store, new_windows)
assert existing_spend == {}
tracker.backfill_spend(existing_spend)
assert tracker._get_window_info("bp-test").cumulative_spend == 0.0
def test_refresh_triggers_backfill():
store = MagicMock()
store.list_budget_policies.return_value = [_make_policy(budget_amount=100.0)]
store.sum_gateway_trace_cost.return_value = 25.0
maybe_refresh_budget_policies(store)
store.sum_gateway_trace_cost.assert_called_once()
tracker = get_budget_tracker()
assert tracker._get_window_info("bp-test").cumulative_spend == 25.0
# --- check_budget_limit tests ---
def test_check_budget_limit_no_policies():
store = _make_store(policies=[])
check_budget_limit(store)
def test_check_budget_limit_not_exceeded():
policy = _make_policy(budget_amount=100.0, budget_action=BudgetAction.REJECT)
store = _make_store(policies=[policy])
tracker = get_budget_tracker()
tracker.refresh_policies([policy])
tracker.record_cost(50.0)
check_budget_limit(store)
def test_check_budget_limit_exceeded_rejects():
policy = _make_policy(budget_amount=100.0, budget_action=BudgetAction.REJECT)
store = _make_store(policies=[policy])
tracker = get_budget_tracker()
tracker.refresh_policies([policy])
tracker.record_cost(150.0)
with pytest.raises(fastapi.HTTPException, match="Request rejected"):
check_budget_limit(store)
def test_check_budget_limit_alert_does_not_reject():
policy = _make_policy(budget_amount=100.0, budget_action=BudgetAction.ALERT)
store = _make_store(policies=[policy])
tracker = get_budget_tracker()
tracker.refresh_policies([policy])
tracker.record_cost(150.0)
check_budget_limit(store)
def test_check_budget_limit_error_message_format():
policy = _make_policy(
budget_policy_id="bp-monthly",
budget_amount=500.0,
budget_action=BudgetAction.REJECT,
)
store = _make_store(policies=[policy])
tracker = get_budget_tracker()
tracker.refresh_policies([policy])
tracker.record_cost(600.0)
with pytest.raises(fastapi.HTTPException, match="Request rejected") as exc_info:
check_budget_limit(store)
detail = exc_info.value.detail
assert "bp-monthly" in detail
assert "$500.00" in detail
assert "1 days" in detail
assert "Request rejected" in detail
def test_check_budget_limit_with_workspace():
policy = GatewayBudgetPolicy(
budget_policy_id="bp-ws",
budget_unit=BudgetUnit.USD,
budget_amount=50.0,
duration_unit=BudgetDurationUnit.DAYS,
duration_value=1,
target_scope=BudgetTargetScope.WORKSPACE,
budget_action=BudgetAction.REJECT,
created_at=0,
last_updated_at=0,
workspace="ws1",
)
store = _make_store(policies=[policy])
tracker = get_budget_tracker()
tracker.refresh_policies([policy])
tracker.record_cost(100.0, workspace="ws1")
with pytest.raises(fastapi.HTTPException, match="Request rejected"):
check_budget_limit(store, workspace="ws1")
check_budget_limit(store, workspace="ws2")
def test_check_budget_limit_multiple_policies():
alert_policy = _make_policy(
budget_policy_id="bp-alert",
budget_amount=50.0,
budget_action=BudgetAction.ALERT,
)
reject_policy = _make_policy(
budget_policy_id="bp-reject",
budget_amount=100.0,
budget_action=BudgetAction.REJECT,
)
store = _make_store(policies=[alert_policy, reject_policy])
tracker = get_budget_tracker()
tracker.refresh_policies([alert_policy, reject_policy])
# 75 exceeds alert (50) but not reject (100) → no rejection
tracker.record_cost(75.0)
check_budget_limit(store)
# Push to 105 → exceeds reject policy → should raise
tracker.record_cost(30.0)
with pytest.raises(fastapi.HTTPException, match="Request rejected"):
check_budget_limit(store)
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/gateway/test_gateway_budget.py",
"license": "Apache License 2.0",
"lines": 333,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:mlflow/gateway/budget_tracker/in_memory.py | """In-memory budget tracker implementation."""
from __future__ import annotations
import threading
from dataclasses import dataclass, field
from datetime import datetime, timezone
from mlflow.entities.gateway_budget_policy import BudgetAction, GatewayBudgetPolicy
from mlflow.gateway.budget_tracker import (
BudgetTracker,
BudgetWindow,
_compute_window_end,
_compute_window_start,
_policy_applies,
)
@dataclass
class InMemoryBudgetTracker(BudgetTracker):
"""Thread-safe in-memory budget tracker.
Tracks cumulative cost per budget policy within fixed time windows.
Policies are periodically refreshed from the database.
Cost accumulation resets on server restart.
"""
_windows: dict[str, BudgetWindow] = field(default_factory=dict)
_lock: threading.Lock = field(default_factory=threading.Lock)
def refresh_policies(self, policies: list[GatewayBudgetPolicy]) -> list[BudgetWindow]:
"""Load or refresh policies from the database.
Preserves accumulated cost for unchanged windows. Removes windows
for policies that no longer exist.
Returns:
List of newly created windows (cumulative_spend=0) that may need
backfilling from historical trace data.
"""
now = datetime.now(timezone.utc)
new_windows: dict[str, BudgetWindow] = {}
fresh_windows: list[BudgetWindow] = []
with self._lock:
for policy in policies:
pid = policy.budget_policy_id
window_start = _compute_window_start(
policy.duration_unit, policy.duration_value, now
)
window_end = _compute_window_end(
policy.duration_unit, policy.duration_value, window_start
)
existing = self._windows.get(pid)
if existing and existing.window_start == window_start:
existing.policy = policy
existing.window_end = window_end
new_windows[pid] = existing
else:
window = BudgetWindow(
policy=policy,
window_start=window_start,
window_end=window_end,
)
new_windows[pid] = window
fresh_windows.append(window)
self._windows = new_windows
self.mark_refreshed()
return fresh_windows
def record_cost(
self,
cost_usd: float,
workspace: str | None = None,
) -> list[BudgetWindow]:
"""Record a cost against all applicable policies.
Args:
cost_usd: The cost in USD to record.
workspace: The workspace the request was made from.
Returns:
List of windows that were newly exceeded (limit exceeded for the first
time in this window). Used to trigger webhook alerts.
"""
newly_exceeded: list[BudgetWindow] = []
now = datetime.now(timezone.utc)
with self._lock:
for window in self._windows.values():
if now >= window.window_end:
window.window_start = _compute_window_start(
window.policy.duration_unit,
window.policy.duration_value,
now,
)
window.window_end = _compute_window_end(
window.policy.duration_unit,
window.policy.duration_value,
window.window_start,
)
window.cumulative_spend = 0.0
window.exceeded = False
if not _policy_applies(window.policy, workspace):
continue
window.cumulative_spend += cost_usd
if not window.exceeded and window.cumulative_spend >= window.policy.budget_amount:
window.exceeded = True
newly_exceeded.append(window)
return newly_exceeded
def should_reject_request(
self,
workspace: str | None = None,
) -> tuple[bool, GatewayBudgetPolicy | None]:
"""Check if any REJECT-capable policy is exceeded.
Args:
workspace: The workspace to check against.
Returns:
Tuple of (exceeded, policy). If exceeded is True, policy is the
first exceeded policy found.
"""
now = datetime.now(timezone.utc)
with self._lock:
for window in self._windows.values():
if now >= window.window_end:
continue
if not _policy_applies(window.policy, workspace):
continue
if window.policy.budget_action != BudgetAction.REJECT:
continue
if window.cumulative_spend >= window.policy.budget_amount:
return True, window.policy
return False, None
def backfill_spend(self, spend_by_policy: dict[str, float]) -> None:
"""Set cumulative spend on windows from historical data."""
with self._lock:
for budget_policy_id, spend in spend_by_policy.items():
window = self._windows.get(budget_policy_id)
if window is None:
continue
window.cumulative_spend = spend
window.exceeded = spend >= window.policy.budget_amount
def _get_window_info(self, budget_policy_id: str) -> BudgetWindow | None:
"""Get the current window info for a policy (for payload construction)."""
with self._lock:
return self._windows.get(budget_policy_id)
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/gateway/budget_tracker/in_memory.py",
"license": "Apache License 2.0",
"lines": 131,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:tests/gateway/test_budget_tracker.py | from datetime import datetime, timedelta, timezone
from unittest.mock import patch
import pytest
from mlflow.entities.gateway_budget_policy import (
BudgetAction,
BudgetDurationUnit,
BudgetTargetScope,
BudgetUnit,
GatewayBudgetPolicy,
)
from mlflow.gateway.budget_tracker import (
BudgetTracker,
_compute_window_end,
_compute_window_start,
_policy_applies,
)
from mlflow.gateway.budget_tracker.in_memory import InMemoryBudgetTracker
def _make_policy(
budget_policy_id="bp-test",
budget_amount=100.0,
duration_unit=BudgetDurationUnit.DAYS,
duration_value=1,
target_scope=BudgetTargetScope.GLOBAL,
budget_action=BudgetAction.ALERT,
workspace=None,
):
return GatewayBudgetPolicy(
budget_policy_id=budget_policy_id,
budget_unit=BudgetUnit.USD,
budget_amount=budget_amount,
duration_unit=duration_unit,
duration_value=duration_value,
target_scope=target_scope,
budget_action=budget_action,
created_at=0,
last_updated_at=0,
workspace=workspace,
)
# --- _compute_window_start tests ---
def test_compute_window_start_minutes():
now = datetime(2025, 6, 15, 10, 37, 0, tzinfo=timezone.utc)
start = _compute_window_start(BudgetDurationUnit.MINUTES, 15, now)
assert start == datetime(2025, 6, 15, 10, 30, 0, tzinfo=timezone.utc)
def test_compute_window_start_hours():
now = datetime(2025, 6, 15, 10, 30, 0, tzinfo=timezone.utc)
start = _compute_window_start(BudgetDurationUnit.HOURS, 2, now)
assert start == datetime(2025, 6, 15, 10, 0, 0, tzinfo=timezone.utc)
def test_compute_window_start_days():
now = datetime(2025, 6, 15, 10, 0, 0, tzinfo=timezone.utc)
start = _compute_window_start(BudgetDurationUnit.DAYS, 7, now)
epoch = datetime(1970, 1, 1, tzinfo=timezone.utc)
days_since_epoch = (now - epoch).days
window_index = days_since_epoch // 7
expected = epoch + timedelta(days=window_index * 7)
assert start == expected
def test_compute_window_start_weeks():
# June 15, 2025 is a Sunday — window should start on that Sunday
now = datetime(2025, 6, 15, 10, 0, 0, tzinfo=timezone.utc)
start = _compute_window_start(BudgetDurationUnit.WEEKS, 1, now)
assert start == datetime(2025, 6, 15, 0, 0, 0, tzinfo=timezone.utc)
assert start.weekday() == 6 # Sunday
# Wednesday mid-week — window should start on preceding Sunday
now = datetime(2025, 6, 18, 14, 30, 0, tzinfo=timezone.utc)
start = _compute_window_start(BudgetDurationUnit.WEEKS, 1, now)
assert start == datetime(2025, 6, 15, 0, 0, 0, tzinfo=timezone.utc)
assert start.weekday() == 6 # Sunday
# Multi-week (2-week) windows also start on Sundays
now = datetime(2025, 6, 20, 0, 0, 0, tzinfo=timezone.utc)
start = _compute_window_start(BudgetDurationUnit.WEEKS, 2, now)
assert start.weekday() == 6 # Sunday
def test_compute_window_start_months():
now = datetime(2025, 8, 15, tzinfo=timezone.utc)
start = _compute_window_start(BudgetDurationUnit.MONTHS, 3, now)
# Total months from epoch: (2025-1970)*12 + (8-1) = 660 + 7 = 667
# Window index: 667 // 3 = 222, window_start_months = 666
# start_year = 1970 + 666//12 = 1970 + 55 = 2025, start_month = (666%12)+1 = 7
assert start == datetime(2025, 7, 1, tzinfo=timezone.utc)
# --- _compute_window_end tests ---
def test_compute_window_end_minutes():
start = datetime(2025, 6, 15, 10, 30, 0, tzinfo=timezone.utc)
end = _compute_window_end(BudgetDurationUnit.MINUTES, 15, start)
assert end == datetime(2025, 6, 15, 10, 45, 0, tzinfo=timezone.utc)
def test_compute_window_end_hours():
start = datetime(2025, 6, 15, 10, 0, 0, tzinfo=timezone.utc)
end = _compute_window_end(BudgetDurationUnit.HOURS, 2, start)
assert end == datetime(2025, 6, 15, 12, 0, 0, tzinfo=timezone.utc)
def test_compute_window_end_days():
start = datetime(2025, 6, 15, 0, 0, 0, tzinfo=timezone.utc)
end = _compute_window_end(BudgetDurationUnit.DAYS, 7, start)
assert end == datetime(2025, 6, 22, 0, 0, 0, tzinfo=timezone.utc)
def test_compute_window_end_weeks():
start = datetime(2025, 6, 12, 0, 0, 0, tzinfo=timezone.utc)
end = _compute_window_end(BudgetDurationUnit.WEEKS, 2, start)
assert end == datetime(2025, 6, 26, 0, 0, 0, tzinfo=timezone.utc)
def test_compute_window_end_months():
start = datetime(2025, 7, 1, tzinfo=timezone.utc)
end = _compute_window_end(BudgetDurationUnit.MONTHS, 3, start)
assert end == datetime(2025, 10, 1, tzinfo=timezone.utc)
def test_compute_window_end_months_crosses_year():
start = datetime(2025, 11, 1, tzinfo=timezone.utc)
end = _compute_window_end(BudgetDurationUnit.MONTHS, 3, start)
assert end == datetime(2026, 2, 1, tzinfo=timezone.utc)
# --- _policy_applies tests ---
def test_policy_applies_global():
policy = _make_policy(target_scope=BudgetTargetScope.GLOBAL)
assert _policy_applies(policy, None) is True
assert _policy_applies(policy, "ws1") is True
def test_policy_applies_workspace_match():
policy = _make_policy(target_scope=BudgetTargetScope.WORKSPACE, workspace="ws1")
assert _policy_applies(policy, "ws1") is True
def test_policy_applies_workspace_no_match():
policy = _make_policy(target_scope=BudgetTargetScope.WORKSPACE, workspace="ws1")
assert _policy_applies(policy, "ws2") is False
def test_policy_applies_workspace_none():
policy = _make_policy(target_scope=BudgetTargetScope.WORKSPACE, workspace="ws1")
assert _policy_applies(policy, None) is False
def test_policy_applies_workspace_none_matches_default():
policy = _make_policy(target_scope=BudgetTargetScope.WORKSPACE)
# policy.workspace resolves to "default" via __post_init__
assert _policy_applies(policy, None) is True
# --- InMemoryBudgetTracker tests ---
def test_in_memory_tracker_is_budget_tracker():
tracker = InMemoryBudgetTracker()
assert isinstance(tracker, BudgetTracker)
def test_record_cost_below_limit():
tracker = InMemoryBudgetTracker()
tracker.refresh_policies([_make_policy(budget_amount=100.0)])
newly_exceeded = tracker.record_cost(50.0)
assert newly_exceeded == []
window = tracker._get_window_info("bp-test")
assert window.cumulative_spend == 50.0
assert window.exceeded is False
def test_record_cost_exceeds_threshold():
tracker = InMemoryBudgetTracker()
tracker.refresh_policies([_make_policy(budget_amount=100.0)])
newly_exceeded = tracker.record_cost(150.0)
assert len(newly_exceeded) == 1
assert newly_exceeded[0].policy.budget_policy_id == "bp-test"
window = tracker._get_window_info("bp-test")
assert window.cumulative_spend == 150.0
assert window.exceeded is True
def test_record_cost_exceeds_only_once():
tracker = InMemoryBudgetTracker()
tracker.refresh_policies([_make_policy(budget_amount=100.0)])
exceeded1 = tracker.record_cost(150.0)
assert len(exceeded1) == 1
exceeded2 = tracker.record_cost(50.0)
assert exceeded2 == []
window = tracker._get_window_info("bp-test")
assert window.cumulative_spend == 200.0
def test_record_cost_incremental_exceeding():
tracker = InMemoryBudgetTracker()
tracker.refresh_policies([_make_policy(budget_amount=100.0)])
assert tracker.record_cost(60.0) == []
exceeded = tracker.record_cost(50.0)
assert len(exceeded) == 1
assert tracker._get_window_info("bp-test").cumulative_spend == 110.0
def test_should_reject_request_reject():
tracker = InMemoryBudgetTracker()
tracker.refresh_policies([_make_policy(budget_amount=100.0, budget_action=BudgetAction.REJECT)])
tracker.record_cost(150.0)
exceeded, policy = tracker.should_reject_request()
assert exceeded is True
assert policy.budget_policy_id == "bp-test"
def test_should_reject_request_alert_only():
tracker = InMemoryBudgetTracker()
tracker.refresh_policies([_make_policy(budget_amount=100.0, budget_action=BudgetAction.ALERT)])
tracker.record_cost(150.0)
exceeded, policy = tracker.should_reject_request()
assert exceeded is False
assert policy is None
def test_should_reject_request_not_yet():
tracker = InMemoryBudgetTracker()
tracker.refresh_policies([_make_policy(budget_amount=100.0, budget_action=BudgetAction.REJECT)])
tracker.record_cost(50.0)
exceeded, policy = tracker.should_reject_request()
assert exceeded is False
assert policy is None
def test_window_resets_on_expiry():
tracker = InMemoryBudgetTracker()
policy = _make_policy(
budget_amount=100.0,
duration_unit=BudgetDurationUnit.MINUTES,
duration_value=5,
)
tracker.refresh_policies([policy])
tracker.record_cost(80.0)
window = tracker._get_window_info("bp-test")
assert window.cumulative_spend == 80.0
# Simulate time passing beyond window end
with patch(
"mlflow.gateway.budget_tracker.in_memory.datetime",
) as mock_dt:
mock_dt.now.return_value = window.window_end + timedelta(seconds=1)
mock_dt.side_effect = lambda *args, **kw: datetime(*args, **kw)
newly_exceeded = tracker.record_cost(10.0)
window = tracker._get_window_info("bp-test")
assert window.cumulative_spend == 10.0
assert window.exceeded is False
assert newly_exceeded == []
def test_refresh_policies_preserves_spend_in_same_window():
tracker = InMemoryBudgetTracker()
policy = _make_policy(budget_amount=100.0)
tracker.refresh_policies([policy])
tracker.record_cost(60.0)
# Reload same policy — spend should be preserved
tracker.refresh_policies([policy])
window = tracker._get_window_info("bp-test")
assert window.cumulative_spend == 60.0
def test_refresh_policies_removes_deleted_policy():
tracker = InMemoryBudgetTracker()
policy1 = _make_policy(budget_policy_id="bp-1", budget_amount=100.0)
policy2 = _make_policy(budget_policy_id="bp-2", budget_amount=200.0)
tracker.refresh_policies([policy1, policy2])
tracker.record_cost(50.0)
# Reload with only policy1 — policy2 window should be gone
tracker.refresh_policies([policy1])
assert tracker._get_window_info("bp-1") is not None
assert tracker._get_window_info("bp-2") is None
def test_multiple_policies_independent():
tracker = InMemoryBudgetTracker()
policy_alert = _make_policy(
budget_policy_id="bp-alert",
budget_amount=50.0,
budget_action=BudgetAction.ALERT,
)
policy_reject = _make_policy(
budget_policy_id="bp-reject",
budget_amount=100.0,
budget_action=BudgetAction.REJECT,
)
tracker.refresh_policies([policy_alert, policy_reject])
exceeded = tracker.record_cost(75.0)
# Only the alert policy should be exceeded (50 < 75)
assert len(exceeded) == 1
assert exceeded[0].policy.budget_policy_id == "bp-alert"
# Reject policy should be at 75, not exceeded yet
exceeded, _ = tracker.should_reject_request()
assert exceeded is False
# Push reject over threshold
tracker.record_cost(30.0)
exceeded, policy = tracker.should_reject_request()
assert exceeded is True
assert policy.budget_policy_id == "bp-reject"
def test_workspace_scoped_cost_recording():
tracker = InMemoryBudgetTracker()
policy = _make_policy(
target_scope=BudgetTargetScope.WORKSPACE,
workspace="ws1",
budget_amount=100.0,
)
tracker.refresh_policies([policy])
# Cost from different workspace — should not apply
tracker.record_cost(200.0, workspace="ws2")
window = tracker._get_window_info("bp-test")
assert window.cumulative_spend == 0.0
# Cost from matching workspace — should apply
tracker.record_cost(50.0, workspace="ws1")
assert window.cumulative_spend == 50.0
@pytest.mark.parametrize("duration_unit", list(BudgetDurationUnit))
def test_all_duration_units_window_consistency(duration_unit):
now = datetime(2025, 6, 15, 10, 30, 0, tzinfo=timezone.utc)
start = _compute_window_start(duration_unit, 1, now)
end = _compute_window_end(duration_unit, 1, start)
assert start < end
assert start <= now < end
# --- refresh_policies return value tests ---
def test_refresh_policies_returns_new_windows():
tracker = InMemoryBudgetTracker()
policy1 = _make_policy(budget_policy_id="bp-1")
policy2 = _make_policy(budget_policy_id="bp-2")
new_windows = tracker.refresh_policies([policy1, policy2])
assert len(new_windows) == 2
ids = {w.policy.budget_policy_id for w in new_windows}
assert ids == {"bp-1", "bp-2"}
def test_refresh_policies_returns_empty_on_reload():
tracker = InMemoryBudgetTracker()
policy = _make_policy()
new_windows = tracker.refresh_policies([policy])
assert len(new_windows) == 1
# Reload same policy within same window — no new windows
new_windows = tracker.refresh_policies([policy])
assert new_windows == []
def test_refresh_policies_returns_only_new_on_mixed():
tracker = InMemoryBudgetTracker()
policy1 = _make_policy(budget_policy_id="bp-1")
tracker.refresh_policies([policy1])
policy2 = _make_policy(budget_policy_id="bp-2")
new_windows = tracker.refresh_policies([policy1, policy2])
assert len(new_windows) == 1
assert new_windows[0].policy.budget_policy_id == "bp-2"
# --- backfill_spend tests ---
def test_backfill_spend_sets_cumulative():
tracker = InMemoryBudgetTracker()
tracker.refresh_policies([_make_policy(budget_amount=100.0)])
tracker.backfill_spend({"bp-test": 42.5})
window = tracker._get_window_info("bp-test")
assert window.cumulative_spend == 42.5
assert window.exceeded is False
def test_backfill_spend_sets_exceeded_when_exceeds():
tracker = InMemoryBudgetTracker()
tracker.refresh_policies([_make_policy(budget_amount=100.0)])
tracker.backfill_spend({"bp-test": 150.0})
window = tracker._get_window_info("bp-test")
assert window.cumulative_spend == 150.0
assert window.exceeded is True
def test_backfill_spend_sets_exceeded_at_exact_limit():
tracker = InMemoryBudgetTracker()
tracker.refresh_policies([_make_policy(budget_amount=100.0)])
tracker.backfill_spend({"bp-test": 100.0})
window = tracker._get_window_info("bp-test")
assert window.cumulative_spend == 100.0
assert window.exceeded is True
def test_backfill_spend_nonexistent_is_noop():
tracker = InMemoryBudgetTracker()
tracker.refresh_policies([_make_policy()])
# Should not raise
tracker.backfill_spend({"nonexistent-policy": 50.0})
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/gateway/test_budget_tracker.py",
"license": "Apache License 2.0",
"lines": 318,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:mlflow/entities/issue.py | from __future__ import annotations
from dataclasses import dataclass
from typing import Any
from mlflow.entities._mlflow_object import _MlflowObject
from mlflow.protos.issues_pb2 import Issue as ProtoIssue
@dataclass
class Issue(_MlflowObject):
"""
An Issue represents a quality or operational problem discovered in traces.
"""
issue_id: str
"""Unique identifier for the issue."""
experiment_id: str
"""Experiment ID."""
name: str
"""Short descriptive name for the issue."""
description: str
"""Detailed description of the issue."""
status: str
"""Issue status."""
created_timestamp: int
"""Creation timestamp in milliseconds."""
last_updated_timestamp: int
"""Last update timestamp in milliseconds."""
confidence: str | None = None
"""Confidence level indicator."""
root_causes: list[str] | None = None
"""Analysis of the root causes of the issue."""
source_run_id: str | None = None
"""MLflow run ID that discovered this issue."""
created_by: str | None = None
"""Identifier for who created this issue."""
def to_dictionary(self) -> dict[str, Any]:
"""Convert Issue to dictionary representation."""
return {
"issue_id": self.issue_id,
"experiment_id": self.experiment_id,
"name": self.name,
"description": self.description,
"status": self.status,
"confidence": self.confidence,
"root_causes": self.root_causes,
"source_run_id": self.source_run_id,
"created_timestamp": self.created_timestamp,
"last_updated_timestamp": self.last_updated_timestamp,
"created_by": self.created_by,
}
@classmethod
def from_dictionary(cls, issue_dict: dict[str, Any]) -> Issue:
"""Create Issue from dictionary representation."""
return cls(
issue_id=issue_dict["issue_id"],
experiment_id=issue_dict["experiment_id"],
name=issue_dict["name"],
description=issue_dict["description"],
status=issue_dict["status"],
created_timestamp=issue_dict["created_timestamp"],
last_updated_timestamp=issue_dict["last_updated_timestamp"],
confidence=issue_dict.get("confidence"),
root_causes=issue_dict.get("root_causes"),
source_run_id=issue_dict.get("source_run_id"),
created_by=issue_dict.get("created_by"),
)
def to_proto(self) -> ProtoIssue:
"""Convert Issue to protobuf representation."""
proto_issue = ProtoIssue()
proto_issue.issue_id = self.issue_id
proto_issue.experiment_id = self.experiment_id
proto_issue.name = self.name
proto_issue.description = self.description
proto_issue.status = self.status
proto_issue.created_timestamp = self.created_timestamp
proto_issue.last_updated_timestamp = self.last_updated_timestamp
if self.confidence:
proto_issue.confidence = self.confidence
if self.root_causes:
proto_issue.root_causes.extend(self.root_causes)
if self.source_run_id:
proto_issue.source_run_id = self.source_run_id
if self.created_by:
proto_issue.created_by = self.created_by
return proto_issue
@classmethod
def from_proto(cls, proto: ProtoIssue) -> Issue:
"""Create Issue from protobuf representation."""
return cls(
issue_id=proto.issue_id,
experiment_id=proto.experiment_id,
name=proto.name,
description=proto.description,
status=proto.status,
created_timestamp=proto.created_timestamp,
last_updated_timestamp=proto.last_updated_timestamp,
confidence=proto.confidence or None,
root_causes=list(proto.root_causes) or None,
source_run_id=proto.source_run_id or None,
created_by=proto.created_by or None,
)
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/entities/issue.py",
"license": "Apache License 2.0",
"lines": 98,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:tests/entities/test_issue.py | from mlflow.entities.issue import Issue
from mlflow.protos.issues_pb2 import Issue as ProtoIssue
def test_issue_creation_required_fields():
issue = Issue(
issue_id="iss-123",
experiment_id="exp-123",
name="High latency",
description="API calls are taking too long",
status="draft",
created_timestamp=1234567890,
last_updated_timestamp=1234567890,
)
assert issue.issue_id == "iss-123"
assert issue.experiment_id == "exp-123"
assert issue.name == "High latency"
assert issue.description == "API calls are taking too long"
assert issue.status == "draft"
assert issue.created_timestamp == 1234567890
assert issue.last_updated_timestamp == 1234567890
assert issue.confidence is None
assert issue.root_causes is None
assert issue.source_run_id is None
assert issue.created_by is None
def test_issue_creation_all_fields():
issue = Issue(
issue_id="iss-456",
experiment_id="exp-456",
name="Token limit exceeded",
description="Model is hitting token limits frequently",
status="accepted",
created_timestamp=1234567890,
last_updated_timestamp=1234567900,
confidence="high",
root_causes=["Input prompts are too long", "Context window exceeded"],
source_run_id="run-789",
created_by="user@example.com",
)
assert issue.issue_id == "iss-456"
assert issue.experiment_id == "exp-456"
assert issue.name == "Token limit exceeded"
assert issue.description == "Model is hitting token limits frequently"
assert issue.status == "accepted"
assert issue.created_timestamp == 1234567890
assert issue.last_updated_timestamp == 1234567900
assert issue.confidence == "high"
assert issue.root_causes == ["Input prompts are too long", "Context window exceeded"]
assert issue.source_run_id == "run-789"
assert issue.created_by == "user@example.com"
def test_issue_to_dictionary():
issue = Issue(
issue_id="iss-789",
experiment_id="exp-789",
name="Authentication failure",
description="Users are getting auth errors",
status="rejected",
created_timestamp=9876543210,
last_updated_timestamp=9876543220,
confidence="medium",
root_causes=["API key rotation issue", "Token expired"],
source_run_id="run-abc",
created_by="system",
)
issue_dict = issue.to_dictionary()
assert issue_dict["issue_id"] == "iss-789"
assert issue_dict["experiment_id"] == "exp-789"
assert issue_dict["name"] == "Authentication failure"
assert issue_dict["description"] == "Users are getting auth errors"
assert issue_dict["status"] == "rejected"
assert issue_dict["created_timestamp"] == 9876543210
assert issue_dict["last_updated_timestamp"] == 9876543220
assert issue_dict["confidence"] == "medium"
assert issue_dict["root_causes"] == ["API key rotation issue", "Token expired"]
assert issue_dict["source_run_id"] == "run-abc"
assert issue_dict["created_by"] == "system"
def test_issue_from_dictionary_all_fields():
issue_dict = {
"issue_id": "iss-999",
"experiment_id": "exp-999",
"name": "Low accuracy",
"description": "Model accuracy below threshold",
"status": "draft",
"confidence": "low",
"root_causes": ["Training data quality issues", "Model drift"],
"source_run_id": "run-xyz",
"created_timestamp": 1111111111,
"last_updated_timestamp": 2222222222,
"created_by": "admin@example.com",
}
issue = Issue.from_dictionary(issue_dict)
assert issue.issue_id == "iss-999"
assert issue.experiment_id == "exp-999"
assert issue.name == "Low accuracy"
assert issue.description == "Model accuracy below threshold"
assert issue.status == "draft"
assert issue.confidence == "low"
assert issue.root_causes == ["Training data quality issues", "Model drift"]
assert issue.source_run_id == "run-xyz"
assert issue.created_timestamp == 1111111111
assert issue.last_updated_timestamp == 2222222222
assert issue.created_by == "admin@example.com"
def test_issue_from_dictionary_required_fields_only():
issue_dict = {
"issue_id": "iss-minimal",
"experiment_id": "exp-minimal",
"name": "Minimal issue",
"description": "Issue with only required fields",
"status": "draft",
"created_timestamp": 5555555555,
"last_updated_timestamp": 5555555555,
}
issue = Issue.from_dictionary(issue_dict)
assert issue.issue_id == "iss-minimal"
assert issue.experiment_id == "exp-minimal"
assert issue.name == "Minimal issue"
assert issue.description == "Issue with only required fields"
assert issue.status == "draft"
assert issue.created_timestamp == 5555555555
assert issue.last_updated_timestamp == 5555555555
assert issue.confidence is None
assert issue.root_causes is None
assert issue.source_run_id is None
assert issue.created_by is None
def test_issue_roundtrip_conversion():
original = Issue(
issue_id="iss-roundtrip",
experiment_id="exp-roundtrip",
name="Roundtrip test",
description="Testing dictionary conversion",
status="accepted",
created_timestamp=3333333333,
last_updated_timestamp=4444444444,
confidence="high",
root_causes=["Test root cause", "Another cause"],
source_run_id="run-test",
created_by="test-user",
)
issue_dict = original.to_dictionary()
recovered = Issue.from_dictionary(issue_dict)
assert recovered.issue_id == original.issue_id
assert recovered.experiment_id == original.experiment_id
assert recovered.name == original.name
assert recovered.description == original.description
assert recovered.status == original.status
assert recovered.created_timestamp == original.created_timestamp
assert recovered.last_updated_timestamp == original.last_updated_timestamp
assert recovered.confidence == original.confidence
assert recovered.root_causes == original.root_causes
assert recovered.source_run_id == original.source_run_id
assert recovered.created_by == original.created_by
def test_issue_to_proto_required_fields():
issue = Issue(
issue_id="iss-proto-1",
experiment_id="exp-proto-1",
name="Proto test",
description="Testing proto conversion",
status="draft",
created_timestamp=1000000000,
last_updated_timestamp=1000000001,
)
proto = issue.to_proto()
assert proto.issue_id == "iss-proto-1"
assert proto.experiment_id == "exp-proto-1"
assert proto.name == "Proto test"
assert proto.description == "Testing proto conversion"
assert proto.status == "draft"
assert proto.created_timestamp == 1000000000
assert proto.last_updated_timestamp == 1000000001
assert proto.confidence == ""
assert len(proto.root_causes) == 0
assert proto.source_run_id == ""
assert proto.created_by == ""
def test_issue_to_proto_all_fields():
issue = Issue(
issue_id="iss-proto-2",
experiment_id="exp-proto-2",
name="Full proto test",
description="Testing proto conversion with all fields",
status="accepted",
created_timestamp=2000000000,
last_updated_timestamp=2000000010,
confidence="very_high",
root_causes=["Proto test root cause", "Another root cause"],
source_run_id="run-proto-2",
created_by="proto-user@example.com",
)
proto = issue.to_proto()
assert proto.issue_id == "iss-proto-2"
assert proto.experiment_id == "exp-proto-2"
assert proto.name == "Full proto test"
assert proto.description == "Testing proto conversion with all fields"
assert proto.status == "accepted"
assert proto.created_timestamp == 2000000000
assert proto.last_updated_timestamp == 2000000010
assert proto.confidence == "very_high"
assert list(proto.root_causes) == ["Proto test root cause", "Another root cause"]
assert proto.source_run_id == "run-proto-2"
assert proto.created_by == "proto-user@example.com"
def test_issue_from_proto_required_fields():
proto = ProtoIssue(
issue_id="iss-from-proto-1",
experiment_id="exp-from-proto-1",
name="From proto test",
description="Testing conversion from proto",
status="draft",
created_timestamp=3000000000,
last_updated_timestamp=3000000001,
)
issue = Issue.from_proto(proto)
assert issue.issue_id == "iss-from-proto-1"
assert issue.experiment_id == "exp-from-proto-1"
assert issue.name == "From proto test"
assert issue.description == "Testing conversion from proto"
assert issue.status == "draft"
assert issue.created_timestamp == 3000000000
assert issue.last_updated_timestamp == 3000000001
assert issue.confidence is None
assert issue.root_causes is None
assert issue.source_run_id is None
assert issue.created_by is None
def test_issue_from_proto_all_fields():
proto = ProtoIssue(
issue_id="iss-from-proto-2",
experiment_id="exp-from-proto-2",
name="Full from proto test",
description="Testing conversion from proto with all fields",
status="rejected",
created_timestamp=4000000000,
last_updated_timestamp=4000000020,
confidence="low",
source_run_id="run-from-proto-2",
created_by="from-proto-user@example.com",
)
proto.root_causes.extend(["From proto root cause", "Another cause"])
issue = Issue.from_proto(proto)
assert issue.issue_id == "iss-from-proto-2"
assert issue.experiment_id == "exp-from-proto-2"
assert issue.name == "Full from proto test"
assert issue.description == "Testing conversion from proto with all fields"
assert issue.status == "rejected"
assert issue.created_timestamp == 4000000000
assert issue.last_updated_timestamp == 4000000020
assert issue.confidence == "low"
assert issue.root_causes == ["From proto root cause", "Another cause"]
assert issue.source_run_id == "run-from-proto-2"
assert issue.created_by == "from-proto-user@example.com"
def test_issue_proto_roundtrip_required_fields():
original = Issue(
issue_id="iss-proto-roundtrip-1",
experiment_id="exp-proto-roundtrip-1",
name="Proto roundtrip test",
description="Testing proto roundtrip conversion",
status="accepted",
created_timestamp=5000000000,
last_updated_timestamp=5000000005,
)
proto = original.to_proto()
recovered = Issue.from_proto(proto)
assert recovered.issue_id == original.issue_id
assert recovered.experiment_id == original.experiment_id
assert recovered.name == original.name
assert recovered.description == original.description
assert recovered.status == original.status
assert recovered.created_timestamp == original.created_timestamp
assert recovered.last_updated_timestamp == original.last_updated_timestamp
assert recovered.confidence == original.confidence
assert recovered.root_causes == original.root_causes
assert recovered.source_run_id == original.source_run_id
assert recovered.created_by == original.created_by
def test_issue_proto_roundtrip_all_fields():
original = Issue(
issue_id="iss-proto-roundtrip-2",
experiment_id="exp-proto-roundtrip-2",
name="Full proto roundtrip test",
description="Testing proto roundtrip with all fields",
status="draft",
created_timestamp=6000000000,
last_updated_timestamp=6000000030,
confidence="medium",
root_causes=["Proto roundtrip root cause", "Secondary cause", "Tertiary cause"],
source_run_id="run-proto-roundtrip-2",
created_by="roundtrip-user@example.com",
)
proto = original.to_proto()
recovered = Issue.from_proto(proto)
assert recovered.issue_id == original.issue_id
assert recovered.experiment_id == original.experiment_id
assert recovered.name == original.name
assert recovered.description == original.description
assert recovered.status == original.status
assert recovered.created_timestamp == original.created_timestamp
assert recovered.last_updated_timestamp == original.last_updated_timestamp
assert recovered.confidence == original.confidence
assert recovered.root_causes == original.root_causes
assert recovered.source_run_id == original.source_run_id
assert recovered.created_by == original.created_by
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/entities/test_issue.py",
"license": "Apache License 2.0",
"lines": 295,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:dev/clint/src/clint/rules/prefer_os_environ.py | import ast
from typing import Literal
from typing_extensions import Self
from clint.resolver import Resolver
from clint.rules.base import Rule
# See https://github.com/astral-sh/ruff/issues/3608
class PreferOsEnviron(Rule):
def __init__(self, func: Literal["getenv", "putenv"]) -> None:
self.func = func
def _message(self) -> str:
if self.func == "putenv":
return "Use `os.environ[key] = value` instead of `os.putenv()`."
return "Use `os.environ.get()` instead of `os.getenv()`."
@classmethod
def check(cls, node: ast.Call, resolver: Resolver) -> Self | None:
match resolver.resolve(node.func):
case ["os", ("getenv" | "putenv") as func]:
return cls(func)
return None
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/src/clint/rules/prefer_os_environ.py",
"license": "Apache License 2.0",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:dev/clint/tests/rules/test_prefer_os_environ.py | from pathlib import Path
import pytest
from clint.config import Config
from clint.linter import lint_file
from clint.rules.prefer_os_environ import PreferOsEnviron
@pytest.mark.parametrize(
"code",
[
pytest.param('import os\n\nval = os.getenv("FOO")', id="os.getenv"),
pytest.param('import os\n\nval = os.getenv("FOO", "default")', id="os.getenv with default"),
pytest.param('import os\n\nos.putenv("FOO", "bar")', id="os.putenv"),
pytest.param('from os import getenv\n\nval = getenv("FOO")', id="from os import getenv"),
pytest.param('from os import putenv\n\nputenv("FOO", "bar")', id="from os import putenv"),
],
)
def test_violation(code: str, index_path: Path) -> None:
config = Config(select={PreferOsEnviron.name})
violations = lint_file(Path("file.py"), code, config, index_path)
assert len(violations) == 1
assert isinstance(violations[0].rule, PreferOsEnviron)
@pytest.mark.parametrize(
"code",
[
pytest.param('import os\n\nval = os.environ.get("FOO")', id="os.environ.get"),
pytest.param('import os\n\nval = os.environ["FOO"]', id="os.environ subscript"),
pytest.param('import os\n\nos.environ["FOO"] = "bar"', id="os.environ set"),
],
)
def test_no_violation(code: str, index_path: Path) -> None:
config = Config(select={PreferOsEnviron.name})
violations = lint_file(Path("file.py"), code, config, index_path)
assert len(violations) == 0
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/tests/rules/test_prefer_os_environ.py",
"license": "Apache License 2.0",
"lines": 32,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/tracing/utils/test_otlp_auth.py | import base64
from collections.abc import Generator
from contextlib import contextmanager
from unittest.mock import patch
from mlflow.tracing.utils.otlp import MLFLOW_EXPERIMENT_ID_HEADER, build_otlp_headers
from mlflow.utils.credentials import MlflowCreds
@contextmanager
def mock_creds(username: str | None = None, password: str | None = None) -> Generator[None]:
with patch(
"mlflow.tracing.utils.otlp.read_mlflow_creds",
return_value=MlflowCreds(username=username, password=password),
) as m:
yield
m.assert_called_once()
def test_build_otlp_headers_no_credentials(monkeypatch):
monkeypatch.delenv("MLFLOW_TRACKING_TOKEN", raising=False)
with mock_creds():
headers = build_otlp_headers("42")
assert headers == {MLFLOW_EXPERIMENT_ID_HEADER: "42"}
assert "Authorization" not in headers
def test_build_otlp_headers_basic_auth(monkeypatch):
monkeypatch.delenv("MLFLOW_TRACKING_TOKEN", raising=False)
with mock_creds(username="admin", password="s3cret"):
headers = build_otlp_headers("7")
expected = base64.standard_b64encode(b"admin:s3cret").decode()
assert headers[MLFLOW_EXPERIMENT_ID_HEADER] == "7"
assert headers["Authorization"] == f"Basic {expected}"
def test_build_otlp_headers_bearer_token(monkeypatch):
monkeypatch.setenv("MLFLOW_TRACKING_TOKEN", "tok-abc")
with mock_creds():
headers = build_otlp_headers("1")
assert headers[MLFLOW_EXPERIMENT_ID_HEADER] == "1"
assert headers["Authorization"] == "Bearer tok-abc"
def test_build_otlp_headers_basic_auth_takes_precedence_over_token(monkeypatch):
monkeypatch.setenv("MLFLOW_TRACKING_TOKEN", "tok-xyz")
with mock_creds(username="admin", password="pass"):
headers = build_otlp_headers("5")
expected = base64.standard_b64encode(b"admin:pass").decode()
assert headers[MLFLOW_EXPERIMENT_ID_HEADER] == "5"
assert headers["Authorization"] == f"Basic {expected}"
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/tracing/utils/test_otlp_auth.py",
"license": "Apache License 2.0",
"lines": 40,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/transformers/version.py | import transformers
from packaging.version import Version
transformers_version = Version(transformers.__version__)
IS_NEW_FEATURE_EXTRACTION_API = transformers_version >= Version("4.27.0")
IS_TRANSFORMERS_V5_OR_LATER = transformers_version.major >= 5
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/transformers/version.py",
"license": "Apache License 2.0",
"lines": 5,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:dev/clint/src/clint/rules/prefer_next.py | import ast
from clint.rules.base import Rule
class PreferNext(Rule):
def _message(self) -> str:
return (
"Use `next(x for x in items if condition)` instead of "
"`[x for x in items if condition][0]` for finding the first matching element."
)
@staticmethod
def check(node: ast.Subscript) -> bool:
"""
Returns True if the node is a list comprehension with an `if` clause
subscripted with `[0]`.
Examples that should be flagged:
- [x for x in items if f(x)][0]
- [x.name for x in items if x.active][0]
Examples that should NOT be flagged:
- [x for x in items][0] (no if clause)
- [x for x in items if f(x)][1] (not [0])
- [x for x in items if f(x)][-1] (not [0])
- (x for x in items if f(x)) (already a generator)
"""
match node:
case ast.Subscript(
value=ast.ListComp(generators=generators),
slice=ast.Constant(value=0),
) if any(gen.ifs for gen in generators):
return True
case _:
return False
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/src/clint/rules/prefer_next.py",
"license": "Apache License 2.0",
"lines": 30,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:dev/clint/tests/rules/test_prefer_next.py | from pathlib import Path
import pytest
from clint.config import Config
from clint.linter import lint_file
from clint.rules import PreferNext
@pytest.mark.parametrize(
"code",
[
pytest.param("[x for x in items if f(x)][0]", id="basic_pattern"),
],
)
def test_flag(index_path: Path, code: str) -> None:
config = Config(select={PreferNext.name})
results = lint_file(Path("test.py"), code, config, index_path)
assert len(results) == 1
assert isinstance(results[0].rule, PreferNext)
@pytest.mark.parametrize(
"code",
[
pytest.param("[x for x in items][0]", id="no_if_clause"),
pytest.param("[x for x in items if f(x)][1]", id="not_zero_index"),
pytest.param("[x for x in items if f(x)][-1]", id="negative_index"),
pytest.param("(x for x in items if f(x))", id="already_generator"),
pytest.param("next(x for x in items if f(x))", id="already_using_next"),
pytest.param("[x for x in items if f(x)]", id="no_subscript"),
pytest.param("items[0]", id="simple_subscript"),
],
)
def test_no_flag(index_path: Path, code: str) -> None:
config = Config(select={PreferNext.name})
results = lint_file(Path("test.py"), code, config, index_path)
assert len(results) == 0
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/tests/rules/test_prefer_next.py",
"license": "Apache License 2.0",
"lines": 32,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:mlflow/tracing/otel/translation/langfuse.py | """
Translation utilities for Langfuse observation attributes.
Maps ``langfuse.observation.*`` attributes to MLflow span semantics so that
spans forwarded from Langfuse via the generic OTEL processor are stored with
correct span types, inputs, and outputs.
"""
from mlflow.entities.span import SpanType
from mlflow.tracing.otel.translation.base import OtelSchemaTranslator
class LangfuseTranslator(OtelSchemaTranslator):
SPAN_KIND_ATTRIBUTE_KEY = "langfuse.observation.type"
SPAN_KIND_TO_MLFLOW_TYPE = {
"generation": SpanType.LLM,
"embedding": SpanType.EMBEDDING,
"tool": SpanType.TOOL,
"retriever": SpanType.RETRIEVER,
"agent": SpanType.AGENT,
"chain": SpanType.CHAIN,
"evaluator": SpanType.EVALUATOR,
"guardrail": SpanType.GUARDRAIL,
"span": SpanType.UNKNOWN,
}
INPUT_VALUE_KEYS = ["langfuse.observation.input"]
OUTPUT_VALUE_KEYS = ["langfuse.observation.output"]
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/tracing/otel/translation/langfuse.py",
"license": "Apache License 2.0",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:tests/otel/test_otel_autolog.py | import asyncio
import pytest
langfuse = pytest.importorskip("langfuse", reason="langfuse is not installed")
from langfuse import observe
from langfuse._client.resource_manager import LangfuseResourceManager
from opentelemetry import trace as otel_trace_api
from opentelemetry.sdk.trace import TracerProvider as SdkTracerProvider
from opentelemetry.sdk.trace.export import SimpleSpanProcessor, SpanExporter, SpanExportResult
import mlflow
import mlflow.otel
from mlflow.entities.span import SpanType
from mlflow.server import handlers
from mlflow.server.fastapi_app import app
from mlflow.server.handlers import initialize_backend_stores
from tests.helper_functions import get_safe_port
from tests.tracing.helper import get_traces
from tests.tracking.integration_test_utils import ServerThread
@pytest.fixture(autouse=True)
def otel_env(monkeypatch, tmp_path):
"""Reset OTEL state, start a local MLflow server, and configure the Langfuse test driver.
A local MLflow server is required because the OTLP exporter sends spans
over HTTP to the ``/v1/traces`` endpoint.
"""
monkeypatch.setenv("LANGFUSE_PUBLIC_KEY", "pk-test-dummy")
monkeypatch.setenv("LANGFUSE_SECRET_KEY", "sk-test-dummy")
monkeypatch.setenv("LANGFUSE_HOST", "http://localhost:9999")
# Default to synchronous export so tests don't need force_flush().
# test_batch_export explicitly passes batch=True.
monkeypatch.setenv("MLFLOW_ENABLE_ASYNC_TRACE_LOGGING", "false")
# Set up a local MLflow server backed by SQLite.
backend_uri = f"sqlite:///{tmp_path / 'mlflow.db'}"
handlers._tracking_store = None
handlers._model_registry_store = None
initialize_backend_stores(backend_uri, default_artifact_root=tmp_path.as_uri())
with ServerThread(app, get_safe_port()) as url:
mlflow.set_tracking_uri(url)
# Start each test with a fresh global TracerProvider so processors from
# previous tests don't interfere.
otel_trace_api.set_tracer_provider(SdkTracerProvider())
mlflow.otel._active_processor = None
yield
mlflow.otel.autolog(disable=True)
mlflow.otel._active_processor = None
LangfuseResourceManager.reset()
def test_sync_observe_autolog():
mlflow.otel.autolog()
@observe()
def add(x, y):
return x + y
result = add(2, 3)
assert result == 5
traces = get_traces()
assert len(traces) == 1
assert traces[0].info.status == "OK"
assert len(traces[0].data.spans) == 1
span = traces[0].data.spans[0]
assert span.name == "add"
assert span.inputs == {"args": [2, 3], "kwargs": {}}
# The OTLP proto round-trip double-serializes the integer return value
# into a JSON string (Span.from_otel_proto applies dump_span_attribute_value
# on an already-serialized value).
assert span.outputs == "5"
def test_sync_observe_with_custom_name():
mlflow.otel.autolog()
@observe(name="custom-add")
def add(x, y):
return x + y
result = add(2, 3)
assert result == 5
traces = get_traces()
assert len(traces) == 1
span = traces[0].data.spans[0]
assert span.name == "custom-add"
def test_async_observe_autolog():
mlflow.otel.autolog()
@observe()
async def async_add(x, y):
return x + y
result = asyncio.run(async_add(10, 20))
assert result == 30
traces = get_traces()
assert len(traces) == 1
assert traces[0].info.status == "OK"
assert len(traces[0].data.spans) == 1
span = traces[0].data.spans[0]
assert span.name == "async_add"
def test_nested_observe_autolog():
mlflow.otel.autolog()
@observe()
def inner(x):
return x * 2
@observe()
def outer(x):
return inner(x) + 1
result = outer(10)
assert result == 21
traces = get_traces()
assert len(traces) == 1
assert traces[0].info.status == "OK"
assert len(traces[0].data.spans) == 2
span_names = sorted(s.name for s in traces[0].data.spans)
assert span_names == ["inner", "outer"]
# Verify parent-child relationship
spans_by_name = {s.name: s for s in traces[0].data.spans}
outer_span = spans_by_name["outer"]
inner_span = spans_by_name["inner"]
assert outer_span.parent_id is None
assert inner_span.parent_id == outer_span.span_id
def test_disable_autolog():
mlflow.otel.autolog()
@observe()
def add(x, y):
return x + y
add(1, 2)
traces = get_traces()
assert len(traces) == 1
mlflow.otel.autolog(disable=True)
result = add(3, 4)
assert result == 7
# No new trace should be created
traces = get_traces()
assert len(traces) == 1
def test_exception_propagation():
mlflow.otel.autolog()
@observe()
def fail():
raise ValueError("test error")
with pytest.raises(ValueError, match="test error"):
fail()
traces = get_traces()
assert len(traces) == 1
assert traces[0].info.status == "ERROR"
def test_observe_without_parentheses():
mlflow.otel.autolog()
@observe
def add(x, y):
return x + y
result = add(2, 3)
assert result == 5
traces = get_traces()
assert len(traces) == 1
span = traces[0].data.spans[0]
assert span.name == "add"
def test_autolog_is_additive():
exported_spans: list[object] = []
class RecordingExporter(SpanExporter):
def export(self, spans):
exported_spans.extend(spans)
return SpanExportResult.SUCCESS
mlflow.otel.autolog()
provider = otel_trace_api.get_tracer_provider()
provider.add_span_processor(SimpleSpanProcessor(RecordingExporter()))
@observe()
def add(x, y):
return x + y
result = add(2, 3)
assert result == 5
# MLflow received the trace
traces = get_traces()
assert len(traces) == 1
# The recording exporter also received the span, proving dispatch to
# all processors on the shared TracerProvider.
assert any(s.name == "add" for s in exported_spans)
def test_batch_export():
mlflow.otel.autolog(batch=True)
@observe()
def add(x, y):
return x + y
result = add(2, 3)
assert result == 5
# BatchSpanProcessor exports asynchronously; flush to ensure export completes.
processor = mlflow.otel._active_processor
processor.force_flush()
traces = get_traces()
assert len(traces) == 1
assert traces[0].info.status == "OK"
assert len(traces[0].data.spans) == 1
span = traces[0].data.spans[0]
assert span.name == "add"
@pytest.mark.parametrize(
("source_type", "expected_mlflow_type"),
[
("generation", SpanType.LLM),
("tool", SpanType.TOOL),
("retriever", SpanType.RETRIEVER),
("embedding", SpanType.EMBEDDING),
("agent", SpanType.AGENT),
("chain", SpanType.CHAIN),
("evaluator", SpanType.EVALUATOR),
("guardrail", SpanType.GUARDRAIL),
("span", SpanType.UNKNOWN),
],
)
def test_span_type_mapping(source_type, expected_mlflow_type):
mlflow.otel.autolog()
@observe(as_type=source_type)
def func(x):
return x
func("test")
traces = get_traces()
assert len(traces) == 1
span = traces[0].data.spans[0]
assert span.span_type == expected_mlflow_type
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/otel/test_otel_autolog.py",
"license": "Apache License 2.0",
"lines": 203,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:mlflow/entities/gateway_budget_policy.py | from __future__ import annotations
from dataclasses import dataclass
from enum import Enum
from mlflow.entities._mlflow_object import _MlflowObject
from mlflow.protos.service_pb2 import BudgetAction as ProtoBudgetAction
from mlflow.protos.service_pb2 import BudgetDurationUnit as ProtoBudgetDurationUnit
from mlflow.protos.service_pb2 import BudgetTargetScope as ProtoBudgetTargetScope
from mlflow.protos.service_pb2 import BudgetUnit as ProtoBudgetUnit
from mlflow.protos.service_pb2 import GatewayBudgetPolicy as ProtoGatewayBudgetPolicy
from mlflow.utils.workspace_utils import resolve_entity_workspace_name
class BudgetDurationUnit(str, Enum):
"""Duration unit for budget policy fixed windows."""
MINUTES = "MINUTES"
HOURS = "HOURS"
DAYS = "DAYS"
WEEKS = "WEEKS"
MONTHS = "MONTHS"
@classmethod
def from_proto(cls, proto: ProtoBudgetDurationUnit) -> BudgetDurationUnit | None:
try:
return cls(ProtoBudgetDurationUnit.Name(proto))
except ValueError:
return None
def to_proto(self) -> ProtoBudgetDurationUnit:
return ProtoBudgetDurationUnit.Value(self.value)
class BudgetTargetScope(str, Enum):
"""Target scope for a budget policy."""
GLOBAL = "GLOBAL"
WORKSPACE = "WORKSPACE"
@classmethod
def from_proto(cls, proto: ProtoBudgetTargetScope) -> BudgetTargetScope | None:
try:
return cls(ProtoBudgetTargetScope.Name(proto))
except ValueError:
return None
def to_proto(self) -> ProtoBudgetTargetScope:
return ProtoBudgetTargetScope.Value(self.value)
class BudgetAction(str, Enum):
"""Action to take when a budget is exceeded."""
ALERT = "ALERT"
REJECT = "REJECT"
@classmethod
def from_proto(cls, proto: ProtoBudgetAction) -> BudgetAction | None:
try:
return cls(ProtoBudgetAction.Name(proto))
except ValueError:
return None
def to_proto(self) -> ProtoBudgetAction:
return ProtoBudgetAction.Value(self.value)
class BudgetUnit(str, Enum):
"""Budget measurement unit."""
USD = "USD"
@classmethod
def from_proto(cls, proto: ProtoBudgetUnit) -> BudgetUnit | None:
try:
return cls(ProtoBudgetUnit.Name(proto))
except ValueError:
return None
def to_proto(self) -> ProtoBudgetUnit:
return ProtoBudgetUnit.Value(self.value)
@dataclass
class GatewayBudgetPolicy(_MlflowObject):
"""
Represents a budget policy for the AI Gateway.
Budget policies set limits with fixed time windows,
supporting global or per-workspace scoping.
Args:
budget_policy_id: Unique identifier for this budget policy.
budget_unit: Budget measurement unit (e.g. USD).
budget_amount: Budget limit amount.
duration_unit: Unit of time window (MINUTES, HOURS, DAYS, WEEKS, MONTHS).
duration_value: Length of the window in units of duration_unit.
target_scope: Scope of the budget (GLOBAL or WORKSPACE).
budget_action: Action when budget is exceeded (ALERT, REJECT).
created_at: Timestamp (milliseconds) when the policy was created.
last_updated_at: Timestamp (milliseconds) when the policy was last updated.
created_by: User ID who created the policy.
last_updated_by: User ID who last updated the policy.
workspace: Workspace that owns the policy.
"""
budget_policy_id: str
budget_unit: BudgetUnit
budget_amount: float
duration_unit: BudgetDurationUnit
duration_value: int
target_scope: BudgetTargetScope
budget_action: BudgetAction
created_at: int
last_updated_at: int
created_by: str | None = None
last_updated_by: str | None = None
workspace: str | None = None
def __post_init__(self):
self.workspace = resolve_entity_workspace_name(self.workspace)
if isinstance(self.budget_unit, str):
self.budget_unit = BudgetUnit(self.budget_unit)
if isinstance(self.duration_unit, str):
self.duration_unit = BudgetDurationUnit(self.duration_unit)
if isinstance(self.target_scope, str):
self.target_scope = BudgetTargetScope(self.target_scope)
if isinstance(self.budget_action, str):
self.budget_action = BudgetAction(self.budget_action)
def to_proto(self):
proto = ProtoGatewayBudgetPolicy()
proto.budget_policy_id = self.budget_policy_id
proto.budget_unit = self.budget_unit.to_proto()
proto.budget_amount = self.budget_amount
proto.duration_unit = self.duration_unit.to_proto()
proto.duration_value = self.duration_value
proto.target_scope = self.target_scope.to_proto()
proto.budget_action = self.budget_action.to_proto()
proto.created_by = self.created_by or ""
proto.created_at = self.created_at
proto.last_updated_by = self.last_updated_by or ""
proto.last_updated_at = self.last_updated_at
return proto
@classmethod
def from_proto(cls, proto):
return cls(
budget_policy_id=proto.budget_policy_id,
budget_unit=BudgetUnit.from_proto(proto.budget_unit),
budget_amount=proto.budget_amount,
duration_unit=BudgetDurationUnit.from_proto(proto.duration_unit),
duration_value=proto.duration_value,
target_scope=BudgetTargetScope.from_proto(proto.target_scope),
budget_action=BudgetAction.from_proto(proto.budget_action),
created_by=proto.created_by or None,
created_at=proto.created_at,
last_updated_by=proto.last_updated_by or None,
last_updated_at=proto.last_updated_at,
)
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/entities/gateway_budget_policy.py",
"license": "Apache License 2.0",
"lines": 131,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.