sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
docling-project/docling:docs/examples/gpu_vlm_pipeline.py | # %% [markdown]
#
# What this example does
# - Run a conversion using the best setup for GPU using VLM models
# - Demonstrates using presets with API runtime (vLLM) for high-throughput GPU processing
#
# Requirements
# - Python 3.10+
# - Install Docling: `pip install docling`
# - Install vLLM: `pip install vllm`
#
# How to run
# - `python docs/examples/gpu_vlm_pipeline.py`
#
# This example is part of a set of GPU optimization strategies. Read more about it in [GPU support](../../usage/gpu/)
#
# ### Start models with vllm
#
# ```console
# vllm serve ibm-granite/granite-docling-258M \
# --host 127.0.0.1 --port 8000 \
# --max-num-seqs 512 \
# --max-num-batched-tokens 8192 \
# --enable-chunked-prefill \
# --gpu-memory-utilization 0.9
# ```
#
# ## Example code
# %%
import datetime
import logging
import time
from pathlib import Path
import numpy as np
from pydantic import TypeAdapter
from docling.datamodel.base_models import ConversionStatus, InputFormat
from docling.datamodel.pipeline_options import (
VlmConvertOptions,
VlmPipelineOptions,
)
from docling.datamodel.settings import settings
from docling.datamodel.vlm_engine_options import (
ApiVlmEngineOptions,
VlmEngineType,
)
from docling.document_converter import DocumentConverter, PdfFormatOption
from docling.pipeline.vlm_pipeline import VlmPipeline
from docling.utils.profiling import ProfilingItem
_log = logging.getLogger(__name__)
def main():
logging.getLogger("docling").setLevel(logging.WARNING)
_log.setLevel(logging.INFO)
BATCH_SIZE = 64
settings.perf.page_batch_size = BATCH_SIZE
settings.debug.profile_pipeline_timings = True
data_folder = Path(__file__).parent / "../../tests/data"
# input_doc_path = data_folder / "pdf" / "2305.03393v1.pdf" # 14 pages
input_doc_path = data_folder / "pdf" / "redp5110_sampled.pdf" # 18 pages
# Use the granite_docling preset with API runtime override for vLLM
vlm_options = VlmConvertOptions.from_preset(
"granite_docling",
engine_options=ApiVlmEngineOptions(
runtime_type=VlmEngineType.API,
url="http://localhost:8000/v1/chat/completions",
concurrency=BATCH_SIZE,
),
)
pipeline_options = VlmPipelineOptions(
vlm_options=vlm_options,
enable_remote_services=True, # required when using a remote inference service.
)
doc_converter = DocumentConverter(
format_options={
InputFormat.PDF: PdfFormatOption(
pipeline_cls=VlmPipeline,
pipeline_options=pipeline_options,
),
}
)
start_time = time.time()
doc_converter.initialize_pipeline(InputFormat.PDF)
end_time = time.time() - start_time
_log.info(f"Pipeline initialized in {end_time:.2f} seconds.")
now = datetime.datetime.now()
conv_result = doc_converter.convert(input_doc_path)
assert conv_result.status == ConversionStatus.SUCCESS
num_pages = len(conv_result.pages)
pipeline_runtime = conv_result.timings["pipeline_total"].times[0]
_log.info(f"Document converted in {pipeline_runtime:.2f} seconds.")
_log.info(f" [efficiency]: {num_pages / pipeline_runtime:.2f} pages/second.")
for stage in ("page_init", "vlm"):
values = np.array(conv_result.timings[stage].times)
_log.info(
f" [{stage}]: {np.min(values):.2f} / {np.median(values):.2f} / {np.max(values):.2f} seconds/page"
)
TimingsT = TypeAdapter(dict[str, ProfilingItem])
timings_file = Path(f"result-timings-gpu-vlm-{now:%Y-%m-%d_%H-%M-%S}.json")
with timings_file.open("wb") as fp:
r = TimingsT.dump_json(conv_result.timings, indent=2)
fp.write(r)
_log.info(f"Profile details in {timings_file}.")
if __name__ == "__main__":
main()
| {
"repo_id": "docling-project/docling",
"file_path": "docs/examples/gpu_vlm_pipeline.py",
"license": "MIT License",
"lines": 103,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
docling-project/docling:tests/test_pdf_password.py | from dataclasses import dataclass
from pathlib import Path
from typing import Iterable
import pytest
from docling.backend.docling_parse_backend import DoclingParseDocumentBackend
from docling.backend.pypdfium2_backend import (
PyPdfiumDocumentBackend,
)
from docling.datamodel.backend_options import PdfBackendOptions
from docling.datamodel.base_models import ConversionStatus, InputFormat
from docling.datamodel.pipeline_options import PdfPipelineOptions
from docling.document_converter import DocumentConverter, PdfFormatOption
@pytest.fixture
def test_doc_path():
return Path("./tests/data/pdf_password/2206.01062_pg3.pdf")
@dataclass
class TestOption:
options: PdfFormatOption
name: str
def converter_opts_gen() -> Iterable[TestOption]:
pipeline_options = PdfPipelineOptions(
do_ocr=False,
do_table_structure=False,
)
backend_options = PdfBackendOptions(password="1234")
yield TestOption(
options=PdfFormatOption(
pipeline_options=pipeline_options,
backend=PyPdfiumDocumentBackend,
backend_options=backend_options,
),
name="PyPdfium",
)
yield TestOption(
options=PdfFormatOption(
pipeline_options=pipeline_options,
backend=DoclingParseDocumentBackend,
backend_options=backend_options,
),
name="DoclingParse",
)
@pytest.mark.asyncio
@pytest.mark.parametrize("test_options", converter_opts_gen(), ids=lambda o: o.name)
def test_get_text_from_rect(test_doc_path: Path, test_options: TestOption):
converter = DocumentConverter(
format_options={InputFormat.PDF: test_options.options}
)
res = converter.convert(test_doc_path)
assert res.status == ConversionStatus.SUCCESS
| {
"repo_id": "docling-project/docling",
"file_path": "tests/test_pdf_password.py",
"license": "MIT License",
"lines": 49,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
docling-project/docling:docling/datamodel/backend_options.py | from pathlib import Path, PurePath
from typing import Annotated, Literal, Optional, Union
from pydantic import AnyUrl, BaseModel, Field, SecretStr
class BaseBackendOptions(BaseModel):
"""Common options for all declarative document backends."""
enable_remote_fetch: bool = Field(
False, description="Enable remote resource fetching."
)
enable_local_fetch: bool = Field(
False, description="Enable local resource fetching."
)
class DeclarativeBackendOptions(BaseBackendOptions):
"""Default backend options for a declarative document backend."""
kind: Literal["declarative"] = Field("declarative", exclude=True, repr=False)
class HTMLBackendOptions(BaseBackendOptions):
"""Options specific to the HTML backend.
This class can be extended to include options specific to HTML processing.
"""
kind: Literal["html"] = Field("html", exclude=True, repr=False)
fetch_images: bool = Field(
False,
description=(
"Whether the backend should access remote or local resources to parse "
"images in an HTML document."
),
)
source_uri: Optional[Union[AnyUrl, PurePath]] = Field(
None,
description=(
"The URI that originates the HTML document. If provided, the backend "
"will use it to resolve relative paths in the HTML document."
),
)
add_title: bool = Field(
True, description="Add the HTML title tag as furniture in the DoclingDocument."
)
infer_furniture: bool = Field(
True, description="Infer all the content before the first header as furniture."
)
class MarkdownBackendOptions(BaseBackendOptions):
"""Options specific to the Markdown backend."""
kind: Literal["md"] = Field("md", exclude=True, repr=False)
fetch_images: bool = Field(
False,
description=(
"Whether the backend should access remote or local resources to parse "
"images in the markdown document."
),
)
source_uri: Optional[Union[AnyUrl, PurePath]] = Field(
None,
description=(
"The URI that originates the markdown document. If provided, the backend "
"will use it to resolve relative paths in the markdown document."
),
)
class PdfBackendOptions(BaseBackendOptions):
"""Backend options for pdf document backends."""
kind: Literal["pdf"] = Field("pdf", exclude=True, repr=False)
password: Optional[SecretStr] = None
class MsExcelBackendOptions(BaseBackendOptions):
"""Options specific to the MS Excel backend."""
kind: Literal["xlsx"] = Field("xlsx", exclude=True, repr=False)
treat_singleton_as_text: bool = Field(
False,
description=(
"Whether to treat singleton cells (1x1 tables with empty neighboring "
"cells) as TextItem instead of TableItem."
),
)
gap_tolerance: int = Field(
0,
description=(
"The tolerance (in number of empty rows/columns) for merging nearby "
"data clusters into a single table. Default is 0 (strict)."
),
)
class LatexBackendOptions(BaseBackendOptions):
"""Options specific to the LaTeX backend."""
kind: Literal["latex"] = Field("latex", exclude=True, repr=False)
class XBRLBackendOptions(BaseBackendOptions):
"""Options specific to the XBRL backend."""
kind: Annotated[Literal["xbrl"], Field("xbrl", exclude=True, repr=False)] = "xbrl"
taxonomy: Annotated[
Path | None,
Field(
description=(
"Path to a folder with the taxonomy required by the XBRL instance"
" reports. It should include schemas (`.xsd`) and linkbases (`.xml`)"
" referenced by the XBRL reports in their relative locations."
" Optionally, it can also include taxonomy packages (`.zip`)"
" referenced by the reports with absolute URLs and mapped to files"
" with a taxonomy catalog (`catalog.xml`) for offline parsing."
)
),
] = None
BackendOptions = Annotated[
Union[
DeclarativeBackendOptions,
HTMLBackendOptions,
MarkdownBackendOptions,
PdfBackendOptions,
MsExcelBackendOptions,
LatexBackendOptions,
XBRLBackendOptions,
],
Field(discriminator="kind"),
]
| {
"repo_id": "docling-project/docling",
"file_path": "docling/datamodel/backend_options.py",
"license": "MIT License",
"lines": 108,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
docling-project/docling:docs/examples/asr_pipeline_performance_comparison.py | #!/usr/bin/env python3
"""
Performance comparison between CPU and MLX Whisper on Apple Silicon.
This script compares the performance of:
1. Native Whisper (forced to CPU)
2. MLX Whisper (Apple Silicon optimized)
Both use the same model size for fair comparison.
"""
import argparse
import sys
import time
from pathlib import Path
# Add the repository root to the path so we can import docling
sys.path.insert(0, str(Path(__file__).parent.parent.parent))
from docling.datamodel.accelerator_options import AcceleratorDevice, AcceleratorOptions
from docling.datamodel.base_models import InputFormat
from docling.datamodel.pipeline_options import AsrPipelineOptions
from docling.datamodel.pipeline_options_asr_model import (
InferenceAsrFramework,
InlineAsrMlxWhisperOptions,
InlineAsrNativeWhisperOptions,
)
from docling.document_converter import AudioFormatOption, DocumentConverter
from docling.pipeline.asr_pipeline import AsrPipeline
def create_cpu_whisper_options(model_size: str = "turbo"):
"""Create native Whisper options forced to CPU."""
return InlineAsrNativeWhisperOptions(
repo_id=model_size,
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
def create_mlx_whisper_options(model_size: str = "turbo"):
"""Create MLX Whisper options for Apple Silicon."""
model_map = {
"tiny": "mlx-community/whisper-tiny-mlx",
"small": "mlx-community/whisper-small-mlx",
"base": "mlx-community/whisper-base-mlx",
"medium": "mlx-community/whisper-medium-mlx-8bit",
"large": "mlx-community/whisper-large-mlx-8bit",
"turbo": "mlx-community/whisper-turbo",
}
return InlineAsrMlxWhisperOptions(
repo_id=model_map[model_size],
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
def run_transcription_test(
audio_file: Path, asr_options, device: AcceleratorDevice, test_name: str
):
"""Run a single transcription test and return timing results."""
print(f"\n{'=' * 60}")
print(f"Running {test_name}")
print(f"Device: {device}")
print(f"Model: {asr_options.repo_id}")
print(f"Framework: {asr_options.inference_framework}")
print(f"{'=' * 60}")
# Create pipeline options
pipeline_options = AsrPipelineOptions(
accelerator_options=AcceleratorOptions(device=device),
asr_options=asr_options,
)
# Create document converter
converter = DocumentConverter(
format_options={
InputFormat.AUDIO: AudioFormatOption(
pipeline_cls=AsrPipeline,
pipeline_options=pipeline_options,
)
}
)
# Run transcription with timing
start_time = time.time()
try:
result = converter.convert(audio_file)
end_time = time.time()
duration = end_time - start_time
if result.status.value == "success":
# Extract text for verification
text_content = []
for item in result.document.texts:
text_content.append(item.text)
print(f"✅ Success! Duration: {duration:.2f} seconds")
print(f"Transcribed text: {''.join(text_content)[:100]}...")
return duration, True
else:
print(f"❌ Failed! Status: {result.status}")
return duration, False
except Exception as e:
end_time = time.time()
duration = end_time - start_time
print(f"❌ Error: {e}")
return duration, False
def parse_args():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(
description="Performance comparison between CPU and MLX Whisper on Apple Silicon",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# Use default test audio file
python asr_pipeline_performance_comparison.py
# Use your own audio file
python asr_pipeline_performance_comparison.py --audio /path/to/your/audio.mp3
# Use a different audio file from the tests directory
python asr_pipeline_performance_comparison.py --audio tests/data/audio/another_sample.wav
""",
)
parser.add_argument(
"--audio",
type=str,
help="Path to audio file for testing (default: tests/data/audio/sample_10s.mp3)",
)
return parser.parse_args()
def main():
"""Run performance comparison between CPU and MLX Whisper."""
args = parse_args()
# Check if we're on Apple Silicon
try:
import torch
has_mps = torch.backends.mps.is_built() and torch.backends.mps.is_available()
except ImportError:
has_mps = False
try:
import mlx_whisper
has_mlx_whisper = True
except ImportError:
has_mlx_whisper = False
print("ASR Pipeline Performance Comparison")
print("=" * 50)
print(f"Apple Silicon (MPS) available: {has_mps}")
print(f"MLX Whisper available: {has_mlx_whisper}")
if not has_mps:
print("⚠️ Apple Silicon (MPS) not available - running CPU-only comparison")
print(" For MLX Whisper performance benefits, run on Apple Silicon devices")
print(" MLX Whisper is optimized for Apple Silicon devices.")
if not has_mlx_whisper:
print("⚠️ MLX Whisper not installed - running CPU-only comparison")
print(" Install with: pip install mlx-whisper")
print(" Or: uv sync --extra asr")
print(" For MLX Whisper performance benefits, install the dependency")
# Determine audio file path
if args.audio:
audio_file = Path(args.audio)
if not audio_file.is_absolute():
# If relative path, make it relative to the script's directory
audio_file = Path(__file__).parent.parent.parent / audio_file
else:
# Use default test audio file
audio_file = (
Path(__file__).parent.parent.parent
/ "tests"
/ "data"
/ "audio"
/ "sample_10s.mp3"
)
if not audio_file.exists():
print(f"❌ Audio file not found: {audio_file}")
print(" Please check the path and try again.")
sys.exit(1)
print(f"Using test audio: {audio_file}")
print(f"File size: {audio_file.stat().st_size / 1024:.1f} KB")
# Test different model sizes
model_sizes = ["tiny", "base", "turbo"]
results = {}
for model_size in model_sizes:
print(f"\n{'#' * 80}")
print(f"Testing model size: {model_size}")
print(f"{'#' * 80}")
model_results = {}
# Test 1: Native Whisper (forced to CPU)
cpu_options = create_cpu_whisper_options(model_size)
cpu_duration, cpu_success = run_transcription_test(
audio_file,
cpu_options,
AcceleratorDevice.CPU,
f"Native Whisper {model_size} (CPU)",
)
model_results["cpu"] = {"duration": cpu_duration, "success": cpu_success}
# Test 2: MLX Whisper (Apple Silicon optimized) - only if available
if has_mps and has_mlx_whisper:
mlx_options = create_mlx_whisper_options(model_size)
mlx_duration, mlx_success = run_transcription_test(
audio_file,
mlx_options,
AcceleratorDevice.MPS,
f"MLX Whisper {model_size} (MPS)",
)
model_results["mlx"] = {"duration": mlx_duration, "success": mlx_success}
else:
print(f"\n{'=' * 60}")
print(f"Skipping MLX Whisper {model_size} (MPS) - not available")
print(f"{'=' * 60}")
model_results["mlx"] = {"duration": 0.0, "success": False}
results[model_size] = model_results
# Print summary
print(f"\n{'#' * 80}")
print("PERFORMANCE COMPARISON SUMMARY")
print(f"{'#' * 80}")
print(
f"{'Model':<10} {'CPU (sec)':<12} {'MLX (sec)':<12} {'Speedup':<12} {'Status':<10}"
)
print("-" * 80)
for model_size, model_results in results.items():
cpu_duration = model_results["cpu"]["duration"]
mlx_duration = model_results["mlx"]["duration"]
cpu_success = model_results["cpu"]["success"]
mlx_success = model_results["mlx"]["success"]
if cpu_success and mlx_success:
speedup = cpu_duration / mlx_duration
status = "✅ Both OK"
elif cpu_success:
speedup = float("inf")
status = "❌ MLX Failed"
elif mlx_success:
speedup = 0
status = "❌ CPU Failed"
else:
speedup = 0
status = "❌ Both Failed"
print(
f"{model_size:<10} {cpu_duration:<12.2f} {mlx_duration:<12.2f} {speedup:<12.2f}x {status:<10}"
)
# Calculate overall improvement
successful_tests = [
(r["cpu"]["duration"], r["mlx"]["duration"])
for r in results.values()
if r["cpu"]["success"] and r["mlx"]["success"]
]
if successful_tests:
avg_cpu = sum(cpu for cpu, mlx in successful_tests) / len(successful_tests)
avg_mlx = sum(mlx for cpu, mlx in successful_tests) / len(successful_tests)
avg_speedup = avg_cpu / avg_mlx
print("-" * 80)
print(
f"{'AVERAGE':<10} {avg_cpu:<12.2f} {avg_mlx:<12.2f} {avg_speedup:<12.2f}x {'Overall':<10}"
)
print(f"\n🎯 MLX Whisper provides {avg_speedup:.1f}x average speedup over CPU!")
else:
if has_mps and has_mlx_whisper:
print("\n❌ No successful comparisons available.")
else:
print("\n⚠️ MLX Whisper not available - only CPU results shown.")
print(
" Install MLX Whisper and run on Apple Silicon for performance comparison."
)
if __name__ == "__main__":
main()
| {
"repo_id": "docling-project/docling",
"file_path": "docs/examples/asr_pipeline_performance_comparison.py",
"license": "MIT License",
"lines": 257,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
docling-project/docling:docs/examples/mlx_whisper_example.py | #!/usr/bin/env python3
"""
Example script demonstrating MLX Whisper integration for Apple Silicon.
This script shows how to use the MLX Whisper models for speech recognition
on Apple Silicon devices with optimized performance.
"""
import argparse
import sys
from pathlib import Path
# Add the repository root to the path so we can import docling
sys.path.insert(0, str(Path(__file__).parent.parent.parent))
from docling.datamodel.accelerator_options import AcceleratorDevice, AcceleratorOptions
from docling.datamodel.asr_model_specs import (
WHISPER_BASE,
WHISPER_LARGE,
WHISPER_MEDIUM,
WHISPER_SMALL,
WHISPER_TINY,
WHISPER_TURBO,
)
from docling.datamodel.base_models import InputFormat
from docling.datamodel.pipeline_options import AsrPipelineOptions
from docling.document_converter import AudioFormatOption, DocumentConverter
from docling.pipeline.asr_pipeline import AsrPipeline
def transcribe_audio_with_mlx_whisper(audio_file_path: str, model_size: str = "base"):
"""
Transcribe audio using Whisper models with automatic MLX optimization for Apple Silicon.
Args:
audio_file_path: Path to the audio file to transcribe
model_size: Size of the Whisper model to use
("tiny", "base", "small", "medium", "large", "turbo")
Note: MLX optimization is automatically used on Apple Silicon when available
Returns:
The transcribed text
"""
# Select the appropriate Whisper model (automatically uses MLX on Apple Silicon)
model_map = {
"tiny": WHISPER_TINY,
"base": WHISPER_BASE,
"small": WHISPER_SMALL,
"medium": WHISPER_MEDIUM,
"large": WHISPER_LARGE,
"turbo": WHISPER_TURBO,
}
if model_size not in model_map:
raise ValueError(
f"Invalid model size: {model_size}. Choose from: {list(model_map.keys())}"
)
asr_options = model_map[model_size]
# Configure accelerator options for Apple Silicon
accelerator_options = AcceleratorOptions(device=AcceleratorDevice.MPS)
# Create pipeline options
pipeline_options = AsrPipelineOptions(
asr_options=asr_options,
accelerator_options=accelerator_options,
)
# Create document converter with MLX Whisper configuration
converter = DocumentConverter(
format_options={
InputFormat.AUDIO: AudioFormatOption(
pipeline_cls=AsrPipeline,
pipeline_options=pipeline_options,
)
}
)
# Run transcription
result = converter.convert(Path(audio_file_path))
if result.status.value == "success":
# Extract text from the document
text_content = []
for item in result.document.texts:
text_content.append(item.text)
return "\n".join(text_content)
else:
raise RuntimeError(f"Transcription failed: {result.status}")
def parse_args():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(
description="MLX Whisper example for Apple Silicon speech recognition",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# Use default test audio file
python mlx_whisper_example.py
# Use your own audio file
python mlx_whisper_example.py --audio /path/to/your/audio.mp3
# Use specific model size
python mlx_whisper_example.py --audio audio.wav --model tiny
# Use default test file with specific model
python mlx_whisper_example.py --model turbo
""",
)
parser.add_argument(
"--audio",
type=str,
help="Path to audio file for transcription (default: tests/data/audio/sample_10s.mp3)",
)
parser.add_argument(
"--model",
type=str,
choices=["tiny", "base", "small", "medium", "large", "turbo"],
default="base",
help="Whisper model size to use (default: base)",
)
return parser.parse_args()
def main():
"""Main function to demonstrate MLX Whisper usage."""
args = parse_args()
# Determine audio file path
if args.audio:
audio_file_path = args.audio
else:
# Use default test audio file if no audio file specified
default_audio = (
Path(__file__).parent.parent.parent
/ "tests"
/ "data"
/ "audio"
/ "sample_10s.mp3"
)
if default_audio.exists():
audio_file_path = str(default_audio)
print("No audio file specified, using default test file:")
print(f" Audio file: {audio_file_path}")
print(f" Model size: {args.model}")
print()
else:
print("Error: No audio file specified and default test file not found.")
print(
"Please specify an audio file with --audio or ensure tests/data/audio/sample_10s.mp3 exists."
)
sys.exit(1)
if not Path(audio_file_path).exists():
print(f"Error: Audio file '{audio_file_path}' not found.")
sys.exit(1)
try:
print(f"Transcribing '{audio_file_path}' using Whisper {args.model} model...")
print(
"Note: MLX optimization is automatically used on Apple Silicon when available."
)
print()
transcribed_text = transcribe_audio_with_mlx_whisper(
audio_file_path, args.model
)
print("Transcription Result:")
print("=" * 50)
print(transcribed_text)
print("=" * 50)
except ImportError as e:
print(f"Error: {e}")
print("Please install mlx-whisper: pip install mlx-whisper")
print("Or install with uv: uv sync --extra asr")
sys.exit(1)
except Exception as e:
print(f"Error during transcription: {e}")
sys.exit(1)
if __name__ == "__main__":
main()
| {
"repo_id": "docling-project/docling",
"file_path": "docs/examples/mlx_whisper_example.py",
"license": "MIT License",
"lines": 158,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
docling-project/docling:tests/test_asr_mlx_whisper.py | """
Test MLX Whisper integration for Apple Silicon ASR pipeline.
"""
import sys
from pathlib import Path
from unittest.mock import Mock, patch
import pytest
from docling.datamodel.accelerator_options import AcceleratorDevice, AcceleratorOptions
from docling.datamodel.asr_model_specs import (
WHISPER_BASE,
WHISPER_BASE_MLX,
WHISPER_LARGE,
WHISPER_LARGE_MLX,
WHISPER_MEDIUM,
WHISPER_SMALL,
WHISPER_TINY,
WHISPER_TURBO,
)
from docling.datamodel.pipeline_options import AsrPipelineOptions
from docling.datamodel.pipeline_options_asr_model import (
InferenceAsrFramework,
InlineAsrMlxWhisperOptions,
)
from docling.pipeline.asr_pipeline import AsrPipeline, _MlxWhisperModel
class TestMlxWhisperIntegration:
"""Test MLX Whisper model integration."""
def test_mlx_whisper_options_creation(self):
"""Test that MLX Whisper options are created correctly."""
options = InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-tiny-mlx",
language="en",
task="transcribe",
)
assert options.inference_framework == InferenceAsrFramework.MLX
assert options.repo_id == "mlx-community/whisper-tiny-mlx"
assert options.language == "en"
assert options.task == "transcribe"
assert options.word_timestamps is True
assert AcceleratorDevice.MPS in options.supported_devices
def test_whisper_models_auto_select_mlx(self):
"""Test that Whisper models automatically select MLX when MPS and mlx-whisper are available."""
# This test verifies that the models are correctly configured
# In a real Apple Silicon environment with mlx-whisper installed,
# these models would automatically use MLX
# Check that the models exist and have the correct structure
assert hasattr(WHISPER_TURBO, "inference_framework")
assert hasattr(WHISPER_TURBO, "repo_id")
assert hasattr(WHISPER_BASE, "inference_framework")
assert hasattr(WHISPER_BASE, "repo_id")
assert hasattr(WHISPER_SMALL, "inference_framework")
assert hasattr(WHISPER_SMALL, "repo_id")
def test_explicit_mlx_models_shape(self):
"""Explicit MLX options should have MLX framework and valid repos."""
assert WHISPER_BASE_MLX.inference_framework.name == "MLX"
assert WHISPER_LARGE_MLX.inference_framework.name == "MLX"
assert WHISPER_BASE_MLX.repo_id.startswith("mlx-community/")
def test_model_selectors_mlx_and_native_paths(self, monkeypatch):
"""Cover MLX/native selection branches in asr_model_specs getters."""
from docling.datamodel import asr_model_specs as specs
# Force MLX path
class _Mps:
def is_built(self):
return True
def is_available(self):
return True
class _Torch:
class backends:
mps = _Mps()
monkeypatch.setitem(sys.modules, "torch", _Torch())
monkeypatch.setitem(sys.modules, "mlx_whisper", object())
m_tiny = specs._get_whisper_tiny_model()
m_small = specs._get_whisper_small_model()
m_base = specs._get_whisper_base_model()
m_medium = specs._get_whisper_medium_model()
m_large = specs._get_whisper_large_model()
m_turbo = specs._get_whisper_turbo_model()
assert (
m_tiny.inference_framework == InferenceAsrFramework.MLX
and m_tiny.repo_id.startswith("mlx-community/whisper-tiny")
)
assert (
m_small.inference_framework == InferenceAsrFramework.MLX
and m_small.repo_id.startswith("mlx-community/whisper-small")
)
assert (
m_base.inference_framework == InferenceAsrFramework.MLX
and m_base.repo_id.startswith("mlx-community/whisper-base")
)
assert (
m_medium.inference_framework == InferenceAsrFramework.MLX
and "medium" in m_medium.repo_id
)
assert (
m_large.inference_framework == InferenceAsrFramework.MLX
and "large" in m_large.repo_id
)
assert (
m_turbo.inference_framework == InferenceAsrFramework.MLX
and m_turbo.repo_id.endswith("whisper-turbo")
)
# Force native path (no mlx or no mps)
if "mlx_whisper" in sys.modules:
del sys.modules["mlx_whisper"]
class _MpsOff:
def is_built(self):
return False
def is_available(self):
return False
class _TorchOff:
class backends:
mps = _MpsOff()
monkeypatch.setitem(sys.modules, "torch", _TorchOff())
n_tiny = specs._get_whisper_tiny_model()
n_small = specs._get_whisper_small_model()
n_base = specs._get_whisper_base_model()
n_medium = specs._get_whisper_medium_model()
n_large = specs._get_whisper_large_model()
n_turbo = specs._get_whisper_turbo_model()
assert (
n_tiny.inference_framework == InferenceAsrFramework.WHISPER
and n_tiny.repo_id == "tiny"
)
assert (
n_small.inference_framework == InferenceAsrFramework.WHISPER
and n_small.repo_id == "small"
)
assert (
n_base.inference_framework == InferenceAsrFramework.WHISPER
and n_base.repo_id == "base"
)
assert (
n_medium.inference_framework == InferenceAsrFramework.WHISPER
and n_medium.repo_id == "medium"
)
assert (
n_large.inference_framework == InferenceAsrFramework.WHISPER
and n_large.repo_id == "large"
)
assert (
n_turbo.inference_framework == InferenceAsrFramework.WHISPER
and n_turbo.repo_id == "turbo"
)
def test_selector_import_errors_force_native(self, monkeypatch):
"""If torch import fails, selector must return native."""
from docling.datamodel import asr_model_specs as specs
# Simulate environment where MPS is unavailable and mlx_whisper missing
class _MpsOff:
def is_built(self):
return False
def is_available(self):
return False
class _TorchOff:
class backends:
mps = _MpsOff()
monkeypatch.setitem(sys.modules, "torch", _TorchOff())
if "mlx_whisper" in sys.modules:
del sys.modules["mlx_whisper"]
model = specs._get_whisper_base_model()
assert model.inference_framework == InferenceAsrFramework.WHISPER
@patch("builtins.__import__")
def test_mlx_whisper_model_initialization(self, mock_import):
"""Test MLX Whisper model initialization."""
# Mock the mlx_whisper import
mock_mlx_whisper = Mock()
mock_import.return_value = mock_mlx_whisper
accelerator_options = AcceleratorOptions(device=AcceleratorDevice.MPS)
asr_options = InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-tiny-mlx",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
model = _MlxWhisperModel(
enabled=True,
artifacts_path=None,
accelerator_options=accelerator_options,
asr_options=asr_options,
)
assert model.enabled is True
assert model.model_path == "mlx-community/whisper-tiny-mlx"
assert model.language == "en"
assert model.task == "transcribe"
assert model.word_timestamps is True
def test_mlx_whisper_model_import_error(self):
"""Test that ImportError is raised when mlx-whisper is not available."""
accelerator_options = AcceleratorOptions(device=AcceleratorDevice.MPS)
asr_options = InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-tiny-mlx",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
with patch(
"builtins.__import__",
side_effect=ImportError("No module named 'mlx_whisper'"),
):
with pytest.raises(ImportError, match="mlx-whisper is not installed"):
_MlxWhisperModel(
enabled=True,
artifacts_path=None,
accelerator_options=accelerator_options,
asr_options=asr_options,
)
@patch("builtins.__import__")
def test_mlx_whisper_transcribe(self, mock_import):
"""Test MLX Whisper transcription method."""
# Mock the mlx_whisper module and its transcribe function
mock_mlx_whisper = Mock()
mock_import.return_value = mock_mlx_whisper
# Mock the transcribe result
mock_result = {
"segments": [
{
"start": 0.0,
"end": 2.5,
"text": "Hello world",
"words": [
{"start": 0.0, "end": 0.5, "word": "Hello"},
{"start": 0.5, "end": 1.0, "word": "world"},
],
}
]
}
mock_mlx_whisper.transcribe.return_value = mock_result
accelerator_options = AcceleratorOptions(device=AcceleratorDevice.MPS)
asr_options = InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-tiny-mlx",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
model = _MlxWhisperModel(
enabled=True,
artifacts_path=None,
accelerator_options=accelerator_options,
asr_options=asr_options,
)
# Test transcription
audio_path = Path("test_audio.wav")
result = model.transcribe(audio_path)
# Verify the result
assert len(result) == 1
assert result[0].start_time == 0.0
assert result[0].end_time == 2.5
assert result[0].text == "Hello world"
assert len(result[0].words) == 2
assert result[0].words[0].text == "Hello"
assert result[0].words[1].text == "world"
# Verify mlx_whisper.transcribe was called with correct parameters
mock_mlx_whisper.transcribe.assert_called_once_with(
str(audio_path),
path_or_hf_repo="mlx-community/whisper-tiny-mlx",
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
@patch("builtins.__import__")
def test_asr_pipeline_with_mlx_whisper(self, mock_import):
"""Test that AsrPipeline can be initialized with MLX Whisper options."""
# Mock the mlx_whisper import
mock_mlx_whisper = Mock()
mock_import.return_value = mock_mlx_whisper
accelerator_options = AcceleratorOptions(device=AcceleratorDevice.MPS)
asr_options = InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-tiny-mlx",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
pipeline_options = AsrPipelineOptions(
asr_options=asr_options,
accelerator_options=accelerator_options,
)
pipeline = AsrPipeline(pipeline_options)
assert isinstance(pipeline._model, _MlxWhisperModel)
assert pipeline._model.model_path == "mlx-community/whisper-tiny-mlx"
| {
"repo_id": "docling-project/docling",
"file_path": "tests/test_asr_mlx_whisper.py",
"license": "MIT License",
"lines": 295,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
docling-project/docling:docling/backend/docx/drawingml/utils.py | import os
import shutil
import subprocess
from pathlib import Path
from tempfile import mkdtemp
from typing import Callable, Optional
import pypdfium2
from docx.document import Document
from PIL import Image, ImageChops
def get_libreoffice_cmd(raise_if_unavailable: bool = False) -> Optional[str]:
"""Return the libreoffice cmd and optionally test it."""
libreoffice_cmd = (
shutil.which("libreoffice")
or shutil.which("soffice")
or (
"/Applications/LibreOffice.app/Contents/MacOS/soffice"
if os.path.isfile("/Applications/LibreOffice.app/Contents/MacOS/soffice")
else None
)
)
if raise_if_unavailable:
if libreoffice_cmd is None:
raise RuntimeError("Libreoffice not found")
# The following test will raise if the libreoffice_cmd cannot be used
subprocess.run(
[
libreoffice_cmd,
"-h",
],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
check=True,
)
return libreoffice_cmd
def get_docx_to_pdf_converter() -> Optional[Callable]:
"""
Detects the best available DOCX to PDF tool and returns a conversion function.
The returned function accepts (input_path, output_path).
Returns None if no tool is available.
"""
# Try LibreOffice
libreoffice_cmd = get_libreoffice_cmd()
if libreoffice_cmd:
def convert_with_libreoffice(input_path, output_path):
subprocess.run(
[
libreoffice_cmd,
"--headless",
"--convert-to",
"pdf",
"--outdir",
os.path.dirname(output_path),
input_path,
],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
check=True,
)
expected_output = os.path.join(
os.path.dirname(output_path),
os.path.splitext(os.path.basename(input_path))[0] + ".pdf",
)
if expected_output != output_path:
os.rename(expected_output, output_path)
return convert_with_libreoffice
## Space for other DOCX to PDF converters if available
# No tools found
return None
def crop_whitespace(image: Image.Image, bg_color=None, padding=0) -> Image.Image:
if bg_color is None:
bg_color = image.getpixel((0, 0))
bg = Image.new(image.mode, image.size, bg_color)
diff = ImageChops.difference(image, bg)
bbox = diff.getbbox()
if bbox:
left, upper, right, lower = bbox
left = max(0, left - padding)
upper = max(0, upper - padding)
right = min(image.width, right + padding)
lower = min(image.height, lower + padding)
return image.crop((left, upper, right, lower))
else:
return image
def get_pil_from_dml_docx(
docx: Document, converter: Optional[Callable]
) -> Optional[Image.Image]:
if converter is None:
return None
temp_dir = Path(mkdtemp())
temp_docx = Path(temp_dir / "drawing_only.docx")
temp_pdf = Path(temp_dir / "drawing_only.pdf")
# 1) Save docx temporarily
docx.save(str(temp_docx))
# 2) Export to PDF
converter(temp_docx, temp_pdf)
# 3) Load PDF as PNG
pdf = pypdfium2.PdfDocument(temp_pdf)
page = pdf[0]
image = crop_whitespace(page.render(scale=2).to_pil())
page.close()
pdf.close()
shutil.rmtree(temp_dir, ignore_errors=True)
return image
| {
"repo_id": "docling-project/docling",
"file_path": "docling/backend/docx/drawingml/utils.py",
"license": "MIT License",
"lines": 103,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
docling-project/docling:docs/examples/pii_obfuscate.py | # %% [markdown]
# Detect and obfuscate PII using a Hugging Face NER model.
#
# What this example does
# - Converts a PDF and saves original Markdown with embedded images.
# - Runs a HF token-classification pipeline (NER) to detect PII-like entities.
# - Obfuscates occurrences in TextItem and TableItem by stable, type-based IDs.
#
# Prerequisites
# - Install Docling. Install Transformers: `pip install transformers`.
# - Optional (advanced): Install GLiNER for richer PII labels:
# `pip install gliner`
# If needed for CPU-only envs:
# `pip install torch --extra-index-url https://download.pytorch.org/whl/cpu`
# - Optionally, set `HF_MODEL` to a different NER/PII model.
#
# How to run
# - From the repo root: `python docs/examples/pii_obfuscate.py`.
# - To use GLiNER instead of HF pipeline:
# python docs/examples/pii_obfuscate.py --engine gliner
# or set env var `PII_ENGINE=gliner`.
# - The script writes original and obfuscated Markdown to `scratch/`.
#
# Notes
# - This is a simple demonstration. For production PII detection, consider
# specialized models/pipelines and thorough evaluation.
# %%
import argparse
import logging
import os
import re
from pathlib import Path
from typing import Dict, List, Tuple
from docling_core.types.doc import ImageRefMode, TableItem, TextItem
from tabulate import tabulate
from docling.datamodel.base_models import InputFormat
from docling.datamodel.pipeline_options import PdfPipelineOptions
from docling.document_converter import DocumentConverter, PdfFormatOption
_log = logging.getLogger(__name__)
IMAGE_RESOLUTION_SCALE = 2.0
HF_MODEL = "dslim/bert-base-NER" # Swap with another HF NER/PII model if desired, eg https://huggingface.co/urchade/gliner_multi_pii-v1 looks very promising too!
GLINER_MODEL = "urchade/gliner_multi_pii-v1"
def _build_simple_ner_pipeline():
"""Create a Hugging Face token-classification pipeline for NER.
Returns a callable like: ner(text) -> List[dict]
"""
try:
from transformers import (
AutoModelForTokenClassification,
AutoTokenizer,
pipeline,
)
except Exception:
_log.error("Transformers not installed. Please run: pip install transformers")
raise
tokenizer = AutoTokenizer.from_pretrained(HF_MODEL)
model = AutoModelForTokenClassification.from_pretrained(HF_MODEL)
ner = pipeline(
"token-classification",
model=model,
tokenizer=tokenizer,
aggregation_strategy="simple", # groups subwords into complete entities
# Note: modern Transformers returns `start`/`end` when possible with aggregation
)
return ner
class SimplePiiObfuscator:
"""Tracks PII strings and replaces them with stable IDs per entity type."""
def __init__(self, ner_callable):
self.ner = ner_callable
self.entity_map: Dict[str, str] = {}
self.counters: Dict[str, int] = {
"person": 0,
"org": 0,
"location": 0,
"misc": 0,
}
# Map model labels to our coarse types
self.label_map = {
"PER": "person",
"PERSON": "person",
"ORG": "org",
"ORGANIZATION": "org",
"LOC": "location",
"LOCATION": "location",
"GPE": "location",
# Fallbacks
"MISC": "misc",
"O": "misc",
}
# Only obfuscate these by default. Adjust as needed.
self.allowed_types = {"person", "org", "location"}
def _next_id(self, typ: str) -> str:
self.counters[typ] += 1
return f"{typ}-{self.counters[typ]}"
def _normalize(self, s: str) -> str:
return re.sub(r"\s+", " ", s).strip()
def _extract_entities(self, text: str) -> List[Tuple[str, str]]:
"""Run NER and return a list of (surface_text, type) to obfuscate."""
if not text:
return []
results = self.ner(text)
# Collect normalized items with optional span info
items = []
for r in results:
raw_label = r.get("entity_group") or r.get("entity") or "MISC"
label = self.label_map.get(raw_label, "misc")
if label not in self.allowed_types:
continue
start = r.get("start")
end = r.get("end")
word = self._normalize(r.get("word") or r.get("text") or "")
items.append({"label": label, "start": start, "end": end, "word": word})
found: List[Tuple[str, str]] = []
# If the pipeline provides character spans, merge consecutive/overlapping
# entities of the same type into a single span, then take the substring
# from the original text. This handles cases like subword tokenization
# where multiple adjacent pieces belong to the same named entity.
have_spans = any(i["start"] is not None and i["end"] is not None for i in items)
if have_spans:
spans = [
i for i in items if i["start"] is not None and i["end"] is not None
]
# Ensure processing order by start (then end)
spans.sort(key=lambda x: (x["start"], x["end"]))
merged = []
for s in spans:
if not merged:
merged.append(dict(s))
continue
last = merged[-1]
if s["label"] == last["label"] and s["start"] <= last["end"]:
# Merge identical, overlapping, or touching spans of same type
last["start"] = min(last["start"], s["start"])
last["end"] = max(last["end"], s["end"])
else:
merged.append(dict(s))
for m in merged:
surface = self._normalize(text[m["start"] : m["end"]])
if surface:
found.append((surface, m["label"]))
# Include any items lacking spans as-is (fallback)
for i in items:
if i["start"] is None or i["end"] is None:
if i["word"]:
found.append((i["word"], i["label"]))
else:
# Fallback when spans aren't provided: return normalized words
for i in items:
if i["word"]:
found.append((i["word"], i["label"]))
return found
def obfuscate_text(self, text: str) -> str:
if not text:
return text
entities = self._extract_entities(text)
if not entities:
return text
# Deduplicate per text, keep stable global mapping
unique_words: Dict[str, str] = {}
for word, label in entities:
if word not in self.entity_map:
replacement = self._next_id(label)
self.entity_map[word] = replacement
unique_words[word] = self.entity_map[word]
# Replace longer matches first to avoid partial overlaps
sorted_pairs = sorted(
unique_words.items(), key=lambda x: len(x[0]), reverse=True
)
def replace_once(s: str, old: str, new: str) -> str:
# Use simple substring replacement; for stricter matching, use word boundaries
# when appropriate (e.g., names). This is a demo, keep it simple.
pattern = re.escape(old)
return re.sub(pattern, new, s)
obfuscated = text
for old, new in sorted_pairs:
obfuscated = replace_once(obfuscated, old, new)
return obfuscated
def _build_gliner_model():
"""Create a GLiNER model for PII-like entity extraction.
Returns a tuple (model, labels) where model.predict_entities(text, labels)
yields entities with "text" and "label" fields.
"""
try:
from gliner import GLiNER # type: ignore
except Exception:
_log.error(
"GLiNER not installed. Please run: pip install gliner torch --extra-index-url https://download.pytorch.org/whl/cpu"
)
raise
model = GLiNER.from_pretrained(GLINER_MODEL)
# Curated set of labels for PII detection. Adjust as needed.
labels = [
# "work",
"booking number",
"personally identifiable information",
"driver licence",
"person",
"full address",
"company",
# "actor",
# "character",
"email",
"passport number",
"Social Security Number",
"phone number",
]
return model, labels
class AdvancedPIIObfuscator:
"""PII obfuscator powered by GLiNER with fine-grained labels.
- Uses GLiNER's `predict_entities(text, labels)` to detect entities.
- Obfuscates with stable IDs per fine-grained label, e.g. `email-1`.
"""
def __init__(self, gliner_model, labels: List[str]):
self.model = gliner_model
self.labels = labels
self.entity_map: Dict[str, str] = {}
self.counters: Dict[str, int] = {}
def _normalize(self, s: str) -> str:
return re.sub(r"\s+", " ", s).strip()
def _norm_label(self, label: str) -> str:
return (
re.sub(
r"[^a-z0-9_]+", "_", label.lower().replace(" ", "_").replace("-", "_")
).strip("_")
or "pii"
)
def _next_id(self, typ: str) -> str:
self.cc(typ)
self.counters[typ] += 1
return f"{typ}-{self.counters[typ]}"
def cc(self, typ: str) -> None:
if typ not in self.counters:
self.counters[typ] = 0
def _extract_entities(self, text: str) -> List[Tuple[str, str]]:
if not text:
return []
results = self.model.predict_entities(
text, self.labels
) # expects dicts with text/label
found: List[Tuple[str, str]] = []
for r in results:
label = self._norm_label(str(r.get("label", "pii")))
surface = self._normalize(str(r.get("text", "")))
if surface:
found.append((surface, label))
return found
def obfuscate_text(self, text: str) -> str:
if not text:
return text
entities = self._extract_entities(text)
if not entities:
return text
unique_words: Dict[str, str] = {}
for word, label in entities:
if word not in self.entity_map:
replacement = self._next_id(label)
self.entity_map[word] = replacement
unique_words[word] = self.entity_map[word]
sorted_pairs = sorted(
unique_words.items(), key=lambda x: len(x[0]), reverse=True
)
def replace_once(s: str, old: str, new: str) -> str:
pattern = re.escape(old)
return re.sub(pattern, new, s)
obfuscated = text
for old, new in sorted_pairs:
obfuscated = replace_once(obfuscated, old, new)
return obfuscated
def main():
logging.basicConfig(level=logging.INFO)
data_folder = Path(__file__).parent / "../../tests/data"
input_doc_path = data_folder / "pdf/2206.01062.pdf"
output_dir = Path("scratch") # ensure this directory exists before saving
# Choose engine via CLI flag or env var (default: hf)
parser = argparse.ArgumentParser(description="PII obfuscation example")
parser.add_argument(
"--engine",
choices=["hf", "gliner"],
default=os.getenv("PII_ENGINE", "hf"),
help="NER engine: 'hf' (Transformers) or 'gliner' (GLiNER)",
)
args = parser.parse_args()
# Ensure output dir exists
output_dir.mkdir(parents=True, exist_ok=True)
# Keep and generate images so Markdown can embed them
pipeline_options = PdfPipelineOptions()
pipeline_options.images_scale = IMAGE_RESOLUTION_SCALE
pipeline_options.generate_page_images = True
pipeline_options.generate_picture_images = True
doc_converter = DocumentConverter(
format_options={
InputFormat.PDF: PdfFormatOption(pipeline_options=pipeline_options)
}
)
conv_res = doc_converter.convert(input_doc_path)
conv_doc = conv_res.document
doc_filename = conv_res.input.file.name
# Save markdown with embedded pictures in original text
md_filename = output_dir / f"{doc_filename}-with-images-orig.md"
conv_doc.save_as_markdown(md_filename, image_mode=ImageRefMode.EMBEDDED)
# Build NER pipeline and obfuscator
if args.engine == "gliner":
_log.info("Using GLiNER-based AdvancedPIIObfuscator")
gliner_model, gliner_labels = _build_gliner_model()
obfuscator = AdvancedPIIObfuscator(gliner_model, gliner_labels)
else:
_log.info("Using HF Transformers-based SimplePiiObfuscator")
ner = _build_simple_ner_pipeline()
obfuscator = SimplePiiObfuscator(ner)
for element, _level in conv_res.document.iterate_items():
if isinstance(element, TextItem):
element.orig = element.text
element.text = obfuscator.obfuscate_text(element.text)
# print(element.orig, " => ", element.text)
elif isinstance(element, TableItem):
for cell in element.data.table_cells:
cell.text = obfuscator.obfuscate_text(cell.text)
# Save markdown with embedded pictures and obfuscated text
md_filename = output_dir / f"{doc_filename}-with-images-pii-obfuscated.md"
conv_doc.save_as_markdown(md_filename, image_mode=ImageRefMode.EMBEDDED)
# Optional: log mapping summary
if obfuscator.entity_map:
data = []
for key, val in obfuscator.entity_map.items():
data.append([key, val])
_log.info(
f"Obfuscated entities:\n\n{tabulate(data)}",
)
if __name__ == "__main__":
main()
| {
"repo_id": "docling-project/docling",
"file_path": "docs/examples/pii_obfuscate.py",
"license": "MIT License",
"lines": 330,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
docling-project/docling:docling/models/utils/generation_utils.py | import itertools
import logging
import re
import sys
from abc import abstractmethod
from typing import List
from transformers import StoppingCriteria
_log = logging.getLogger(__name__)
class GenerationStopper:
"""
Base interface for stopping logic.
- should_stop(s): True to stop given the current decoded text window.
- lookback_tokens(): how many tokens should be considered (default: sys.maxsize).
"""
@abstractmethod
def should_stop(self, s: str) -> bool:
pass
def lookback_tokens(self) -> int:
return sys.maxsize
class DocTagsRepetitionStopper(GenerationStopper):
"""
Detects repetitive <tag>...<loc_x><loc_y><loc_w><loc_h>text</tag> blocks,
but only when repeats are **consecutive** and both tag & inner text are identical.
Performance:
- Heavy check runs every N calls (default 32).
- Only decodes the last LOOKBACK_TOKENS tokens per sequence (default 200).
"""
def __init__(self, *, N: int = 32, lookback_tokens: int = 200):
self.N = max(1, int(N))
self._lookback_tokens = max(1, int(lookback_tokens))
self._call_count = 0
# <tag> ... <loc_x><loc_y><loc_w><loc_h> text ... </tag>
self._PATTERN = re.compile(
r"""
<(?P<tag>[a-zA-Z0-9_]+)>\s*
(?P<prefix>.*?)?
<loc_(?P<x>\d+)><loc_(?P<y>\d+)><loc_(?P<w>\d+)><loc_(?P<h>\d+)>
(?P<text>.*?)
</(?P=tag)>
""",
re.DOTALL | re.VERBOSE,
)
# --- small helper ---
def _regular(self, vals: List[int]) -> bool:
"""3+ strictly increasing values with ~regular spacing (±20%)."""
if len(vals) < 3:
return False
diffs = [b - a for a, b in itertools.pairwise(vals)]
if any(d <= 0 for d in diffs):
return False
mean = sum(diffs) / len(diffs)
tol = 0.2 * mean
return all(abs(d - mean) <= tol for d in diffs)
def should_stop(self, s: str) -> bool:
"""
Trip only on **consecutive** runs (no other matched blocks between) of ≥3 items
with the same <tag> and identical inner text, where within that run we see:
- any exact duplicate (x,y,w,h), or
- stable X/W with regular Y progression, or
- stable Y/H with regular X progression.
"""
# Stream matches and evaluate runs on-the-fly to stay compact and fast.
prev_tag = prev_text = None
run = [] # list of (x,y,w,h)
def run_repetitive(boxes: List[tuple]) -> bool:
if len(boxes) < 3:
return False
# duplicates?
if len(set(boxes)) < len(boxes):
return True
xs, ys, ws, hs = zip(*boxes)
x_stable = all(x == xs[0] for x in xs)
y_stable = all(y == ys[0] for y in ys)
w_stable = all(w == ws[0] for w in ws)
h_stable = all(h == hs[0] for h in hs)
# horizontal (down the page): X/W stable, Y regular
if (x_stable or w_stable) and self._regular(list(ys)):
return True
# vertical (across): Y/H stable, X regular
if (y_stable or h_stable) and self._regular(list(xs)):
return True
return False
for m in self._PATTERN.finditer(s):
tag, text = m.group("tag"), m.group("text")
box = (
int(m.group("x")),
int(m.group("y")),
int(m.group("w")),
int(m.group("h")),
)
if prev_tag == tag and prev_text == text:
run.append(box) # consecutive same-tag+text
else:
# evaluate previous run before starting a new one
if run_repetitive(run):
return True
prev_tag, prev_text = tag, text
run = [box]
# check the last run
return run_repetitive(run)
class HFStoppingCriteriaWrapper(StoppingCriteria):
"""
Adapts any GenerationStopper to HuggingFace Transformers.
Decodes exactly min(seq_len, stopper.lookback_tokens()) tokens from the end.
"""
def __init__(
self,
tokenizer,
stopper: GenerationStopper,
*,
skip_special_tokens: bool = False,
):
self.tokenizer = tokenizer
self.stopper = stopper
self.skip_special_tokens = skip_special_tokens
def __call__(self, input_ids, scores, **kwargs) -> bool:
lb = max(1, int(self.stopper.lookback_tokens()))
for seq in input_ids: # (batch, seq_len)
window = seq[-lb:] # slicing handles lb > len(seq)
try:
text = self.tokenizer.decode(
window, skip_special_tokens=self.skip_special_tokens
)
except Exception as e:
_log.info(f"Decoding failed for stopping check: {e}")
continue
try:
if self.stopper.should_stop(text):
_log.info(
"HF wrapper: stopping due to TextStopper.should_stop==True"
)
return True
except Exception as e:
_log.info(f"Error in TextStopper.should_stop: {e}")
continue
return False
| {
"repo_id": "docling-project/docling",
"file_path": "docling/models/utils/generation_utils.py",
"license": "MIT License",
"lines": 136,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
docling-project/docling:docs/examples/granitedocling_repetition_stopping.py | # %% [markdown]
# Experimental VLM pipeline with custom repetition stopping criteria (LEGACY).
#
# **NOTE:** This example uses the LEGACY vlm_model_specs approach because
# custom_stopping_criteria is a feature of the old InlineVlmOptions system.
# This feature is not yet migrated to the new preset/runtime system.
#
# This script demonstrates the use of custom stopping criteria that detect
# repetitive location coordinate patterns in generated text and stop generation
# when such patterns are found.
#
# What this example does
# - Uses the GraniteDocling model with custom repetition stopping criteria injected
# - Processes a PDF document or image and monitors for repetitive coordinate patterns
# - Stops generation early when repetitive patterns are detected
# %%
import logging
from docling.datamodel import vlm_model_specs
from docling.datamodel.base_models import InputFormat
from docling.datamodel.pipeline_options import VlmPipelineOptions
from docling.document_converter import DocumentConverter, PdfFormatOption
from docling.models.utils.generation_utils import (
DocTagsRepetitionStopper,
)
from docling.pipeline.vlm_pipeline import VlmPipeline
logging.basicConfig(level=logging.INFO, format="%(levelname)s:%(name)s:%(message)s")
# Set up logging to see when repetition stopping is triggered
logging.basicConfig(level=logging.INFO)
# Replace with a local path if preferred.
# source = "https://ibm.biz/docling-page-with-table" # Example that shows no repetitions.
source = "tests/data_scanned/old_newspaper.png" # Example that creates repetitions.
print(f"Processing document: {source}")
###### USING GRANITEDOCLING WITH CUSTOM REPETITION STOPPING (LEGACY)
## Using standard Huggingface Transformers (most portable, slowest)
custom_vlm_options = vlm_model_specs.GRANITEDOCLING_TRANSFORMERS.model_copy()
# Uncomment this to use MLX-accelerated version on Apple Silicon
# custom_vlm_options = vlm_model_specs.GRANITEDOCLING_MLX.model_copy() # use this for Apple Silicon
# Create custom VLM options with repetition stopping criteria
custom_vlm_options.custom_stopping_criteria = [
DocTagsRepetitionStopper(N=32)
] # check for repetitions for every 32 new tokens decoded.
pipeline_options = VlmPipelineOptions(
vlm_options=custom_vlm_options,
)
converter = DocumentConverter(
format_options={
InputFormat.IMAGE: PdfFormatOption(
pipeline_cls=VlmPipeline,
pipeline_options=pipeline_options,
),
}
)
doc = converter.convert(source=source).document
print(doc.export_to_markdown())
###### ALTERNATIVE: USING A REMOTE VLM INFERENCE SERVICE (e.g., VLLM) - LEGACY
# from docling.datamodel.pipeline_options_vlm_model import ApiVlmOptions, ResponseFormat
#
# custom_vlm_options = ApiVlmOptions(
# url="http://localhost:8000/v1/chat/completions", # LM studio defaults to port 1234, VLLM to 8000
# params=dict(
# model=vlm_model_specs.GRANITEDOCLING_TRANSFORMERS.repo_id,
# max_tokens=8192,
# seed=42,
# ),
# response_format=ResponseFormat.DOCTAGS,
# headers={
# # "Authorization": "Bearer YOUR_API_KEY", # if needed
# },
# prompt=vlm_model_specs.GRANITEDOCLING_TRANSFORMERS.prompt,
# timeout=90,
# # Note: Custom stopping criteria work differently with API runtimes
# # They are applied client-side after receiving tokens from the API
# custom_stopping_criteria=[DocTagsRepetitionStopper(N=32)],
# )
#
# pipeline_options = VlmPipelineOptions(
# vlm_options=custom_vlm_options,
# enable_remote_services=True, # required when using a remote inference service.
# )
#
# converter = DocumentConverter(
# format_options={
# InputFormat.IMAGE: PdfFormatOption(
# pipeline_cls=VlmPipeline,
# pipeline_options=pipeline_options,
# ),
# }
# )
#
# doc = converter.convert(source=source).document
# print(doc.export_to_markdown())
| {
"repo_id": "docling-project/docling",
"file_path": "docs/examples/granitedocling_repetition_stopping.py",
"license": "MIT License",
"lines": 91,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
docling-project/docling:docling/backend/webvtt_backend.py | import logging
from dataclasses import dataclass, field
from io import BytesIO
from pathlib import Path
from docling_core.types.doc import (
ContentLayer,
DocItemLabel,
DoclingDocument,
DocumentOrigin,
Formatting,
TrackSource,
)
from docling_core.types.doc.webvtt import (
WebVTTCueBoldSpan,
WebVTTCueComponent,
WebVTTCueComponentWithTerminator,
WebVTTCueItalicSpan,
WebVTTCueTextSpan,
WebVTTCueUnderlineSpan,
WebVTTCueVoiceSpan,
WebVTTFile,
)
from typing_extensions import override
from docling.backend.abstract_backend import DeclarativeDocumentBackend
from docling.datamodel.base_models import InputFormat
from docling.datamodel.document import InputDocument
_log = logging.getLogger(__name__)
@dataclass
class AnnotatedText:
text: str
voice: str | None = None
formatting: Formatting | None = None
def copy_meta(self, text):
return AnnotatedText(
text=text,
voice=self.voice,
formatting=self.formatting.model_copy() if self.formatting else None,
)
@dataclass
class AnnotatedPar:
items: list[AnnotatedText]
class WebVTTDocumentBackend(DeclarativeDocumentBackend):
"""Declarative backend for WebVTT (.vtt) files.
This parser reads the content of a WebVTT file and converts
it to a DoclingDocument, following the W3C specs on https://www.w3.org/TR/webvtt1
Each cue becomes a TextItem and the items are appended to the
document body by the cue's start time.
"""
@override
def __init__(self, in_doc: InputDocument, path_or_stream: BytesIO | Path):
super().__init__(in_doc, path_or_stream)
self.content: str = ""
try:
if isinstance(self.path_or_stream, BytesIO):
self.content = self.path_or_stream.getvalue().decode("utf-8")
if isinstance(self.path_or_stream, Path):
with open(self.path_or_stream, encoding="utf-8") as f:
self.content = f.read()
except Exception as e:
raise RuntimeError(
"Could not initialize the WebVTT backend for file with hash "
f"{self.document_hash}."
) from e
@override
def is_valid(self) -> bool:
return WebVTTFile.verify_signature(self.content)
@classmethod
@override
def supports_pagination(cls) -> bool:
return False
@override
def unload(self):
if isinstance(self.path_or_stream, BytesIO):
self.path_or_stream.close()
self.path_or_stream = None
@classmethod
@override
def supported_formats(cls) -> set[InputFormat]:
return {InputFormat.VTT}
@override
def convert(self) -> DoclingDocument:
_log.debug("Starting WebVTT conversion...")
if not self.is_valid():
raise RuntimeError("Invalid WebVTT document.")
origin = DocumentOrigin(
filename=self.file.name or "file",
mimetype="text/vtt",
binary_hash=self.document_hash,
)
doc = DoclingDocument(name=self.file.stem or "file", origin=origin)
vtt: WebVTTFile = WebVTTFile.parse(self.content)
cue_text: list[AnnotatedPar] = []
parents: list[AnnotatedText] = []
def _extract_components(
payload: list[WebVTTCueComponentWithTerminator],
) -> None:
nonlocal cue_text, parents
if not cue_text:
cue_text.append(AnnotatedPar(items=[]))
par = cue_text[-1]
for comp in payload:
item: AnnotatedText = (
parents[-1].copy_meta("") if parents else AnnotatedText(text="")
)
component: WebVTTCueComponent = comp.component
if isinstance(component, WebVTTCueTextSpan):
item.text = component.text
par.items.append(item)
else:
# configure metadata based on span type
if isinstance(component, WebVTTCueBoldSpan):
item.formatting = item.formatting or Formatting()
item.formatting.bold = True
elif isinstance(component, WebVTTCueItalicSpan):
item.formatting = item.formatting or Formatting()
item.formatting.italic = True
elif isinstance(component, WebVTTCueUnderlineSpan):
item.formatting = item.formatting or Formatting()
item.formatting.underline = True
elif isinstance(component, WebVTTCueVoiceSpan):
# voice spans cannot be embedded
item.voice = component.start_tag.annotation
parents.append(item)
_extract_components(component.internal_text.components)
parents.pop()
if comp.terminator is not None:
cue_text.append(AnnotatedPar(items=[]))
par = cue_text[-1]
def _add_text_item(
text: str,
formatting: Formatting | None,
item: AnnotatedText,
parent=None,
):
track = TrackSource(
start_time=block.timings.start.seconds,
end_time=block.timings.end.seconds,
identifier=identifier,
voice=item.voice or None,
)
doc.add_text(
label=DocItemLabel.TEXT,
text=text,
content_layer=ContentLayer.BODY,
formatting=formatting,
parent=parent,
source=track,
)
if vtt.title:
doc.add_title(vtt.title, content_layer=ContentLayer.BODY)
for block in vtt.cue_blocks:
cue_text = []
parents = []
identifier = str(block.identifier) if block.identifier else None
_extract_components(block.payload)
for par in cue_text:
if not par.items:
continue
if len(par.items) == 1:
item = par.items[0]
_add_text_item(
text=item.text,
formatting=item.formatting,
item=item,
)
else:
group = doc.add_inline_group(
"WebVTT cue span", content_layer=ContentLayer.BODY
)
for item in par.items:
_add_text_item(
text=item.text,
formatting=item.formatting,
item=item,
parent=group,
)
return doc
| {
"repo_id": "docling-project/docling",
"file_path": "docling/backend/webvtt_backend.py",
"license": "MIT License",
"lines": 177,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
docling-project/docling:tests/test_backend_vtt.py | import warnings
from io import BytesIO
from pathlib import Path
import pytest
from docling_core.types.doc import DoclingDocument, GroupItem, TextItem
from docling.datamodel.base_models import DocumentStream, InputFormat
from docling.datamodel.document import ConversionResult, _DocumentConversionInput
from docling.document_converter import DocumentConverter
from .test_data_gen_flag import GEN_TEST_DATA
from .verify_utils import verify_document, verify_export
GENERATE = GEN_TEST_DATA
def test_e2e_vtt_conversions():
directory = Path("./tests/data/webvtt/")
vtt_paths = sorted(directory.rglob("*.vtt"))
converter = DocumentConverter(allowed_formats=[InputFormat.VTT])
for vtt in vtt_paths:
gt_path = vtt.parent.parent / "groundtruth" / "docling_v2" / vtt.name
conv_result: ConversionResult = converter.convert(vtt)
doc: DoclingDocument = conv_result.document
pred_md: str = doc.export_to_markdown(escape_html=False)
assert verify_export(pred_md, str(gt_path) + ".md", generate=GENERATE), (
"export to md"
)
pred_itxt: str = doc._export_to_indented_text(
max_text_len=70, explicit_tables=False
)
assert verify_export(pred_itxt, str(gt_path) + ".itxt", generate=GENERATE), (
"export to indented-text"
)
assert verify_document(doc, str(gt_path) + ".json", GENERATE)
def _create_vtt_stream(content: str) -> DocumentStream:
stream = DocumentStream(name="test.vtt", stream=BytesIO(content.strip().encode()))
dci = _DocumentConversionInput(path_or_stream_iterator=[])
assert dci._guess_format(stream) == InputFormat.VTT
return stream
def _process_vtt_doc(doc: DoclingDocument) -> str:
text: str = ""
for item in doc.texts:
if (
isinstance(item, TextItem)
and item.source
and item.source[0].kind == "track"
):
parent = item.parent.resolve(doc)
if parent and isinstance(parent, GroupItem):
text += " "
text += item.text
return text.strip()
@pytest.fixture(scope="module")
def converter() -> DocumentConverter:
return DocumentConverter()
def test_simple_two_cues_basic(converter):
vtt = """
WEBVTT
00:00:00.000 --> 00:00:02.000
Hello world!
00:00:02.500 --> 00:00:04.000
Second cue.
"""
stream = _create_vtt_stream(vtt)
doc = converter.convert(stream).document
expected = "Hello world! Second cue."
assert _process_vtt_doc(doc) == expected
def test_cue_ids_present_are_ignored_in_output(converter):
vtt = """
WEBVTT
1
00:00:00.000 --> 00:00:01.000
First with ID.
2
00:00:01.250 --> 00:00:02.000
Second with ID.
"""
stream = _create_vtt_stream(vtt)
doc = converter.convert(stream).document
expected = "First with ID. Second with ID."
assert _process_vtt_doc(doc) == expected
def test_multi_line_cue_text_preserved(converter):
vtt = """
WEBVTT
00:00:00.000 --> 00:00:03.000
This is line one.
This is line two.
00:00:03.500 --> 00:00:05.000
Another cue line one.
Another cue line two.
"""
stream = _create_vtt_stream(vtt)
doc = converter.convert(stream).document
expected = "This is line one. This is line two. Another cue line one. Another cue line two."
assert _process_vtt_doc(doc) == expected
def test_styling_and_voice_tags_stripped(converter):
vtt = """
WEBVTT
00:00:00.000 --> 00:00:02.000
<v Roger><b>Hello</b> <i>there</i><u>!</u></v>
00:00:02.200 --> 00:00:04.000
<c.red>Styled</c> and <v Ann>voiced</v> text.
"""
stream = _create_vtt_stream(vtt)
doc = converter.convert(stream).document
# Expect tags removed but inner text retained, spacing preserved.
# expected = "Hello there! Styled and voiced text."
# TODO: temporary ground truth (issue docling-project/docling-core/#371)
expected = "Hello there ! Styled and voiced text."
assert _process_vtt_doc(doc) == expected
def test_blank_cue_contributes_no_text(converter):
# First cue has text; second cue is intentionally blank (zero transcript lines).
vtt = """
WEBVTT
00:00:00.000 --> 00:00:02.000
Visible text.
00:00:02.500 --> 00:00:04.000
"""
stream = _create_vtt_stream(vtt)
doc = converter.convert(stream).document
expected = "Visible text."
assert _process_vtt_doc(doc) == expected
def test_note_blocks_are_ignored(converter):
vtt = """
WEBVTT
NOTE This is a file-level note
It can span multiple lines.
00:00:00.000 --> 00:00:02.000
First cue text.
NOTE Another note between cues
00:00:02.500 --> 00:00:04.000
Second cue text.
"""
stream = _create_vtt_stream(vtt)
doc = converter.convert(stream).document
expected = "First cue text. Second cue text."
assert _process_vtt_doc(doc) == expected
def test_region_block_ignored_but_region_reference_ok(converter):
vtt = """
WEBVTT
REGION
id:top
width:40%
lines:3
00:00:00.000 --> 00:00:02.000 region:top line:90% position:50% size:35% align:start
Top region text.
00:00:02.500 --> 00:00:04.000
Normal region text.
"""
stream = _create_vtt_stream(vtt)
doc = converter.convert(stream).document
expected = "Top region text. Normal region text."
assert _process_vtt_doc(doc) == expected
def test_varied_timestamp_formats_and_settings_ignored(converter):
# First cue uses MM:SS.mmm; second uses HH:MM:SS.mmm and includes settings.
vtt = """
WEBVTT
00:01.000 --> 00:03.000
Under one minute format.
01:00:00.000 --> 01:00:02.000 line:0 position:10% align:end
Hour format with settings.
"""
stream = _create_vtt_stream(vtt)
doc = converter.convert(stream).document
expected = "Under one minute format. Hour format with settings."
assert _process_vtt_doc(doc) == expected
def test_cue_ids_plus_multiline_with_voice_and_style(converter):
# Mix multiple concepts: cue IDs, multi-line text, voice tags, style tags.
vtt = """
WEBVTT
intro
00:00:00.000 --> 00:00:02.000
<v Narrator><i>Welcome</i> to the show.</v>
<b>Enjoy</b> your time.
outro
00:00:02.500 --> 00:00:04.000
<v Host>Goodbye</v>, see you <u>soon</u>.
"""
stream = _create_vtt_stream(vtt)
doc = converter.convert(stream).document
# expected = "Welcome to the show. Enjoy your time. Goodbye, see you soon."
# TODO: temporary ground truth (issue docling-project/docling-core/#371)
expected = "Welcome to the show. Enjoy your time. Goodbye , see you soon ."
assert _process_vtt_doc(doc) == expected
def test_style_blocks_and_note_between_styles_are_ignored(converter):
vtt = """
WEBVTT
STYLE
::cue {
background-image: linear-gradient(to bottom, dimgray, lightgray);
color: papayawhip;
}
/* Style blocks cannot use blank lines nor "dash dash greater than" */
NOTE comment blocks can be used between style blocks.
STYLE
::cue(b) {
color: peachpuff;
}
hello
00:00:00.000 --> 00:00:10.000
Hello <b>world</b>.
"""
stream = _create_vtt_stream(vtt)
with warnings.catch_warnings():
# STYLE and NOTE blocks should be ignored without warnings
warnings.simplefilter("error")
doc = converter.convert(stream).document
# expected = "Hello world."
# TODO: temporary ground truth (issue docling-project/docling-core/#371)
expected = "Hello world ."
assert _process_vtt_doc(doc) == expected
| {
"repo_id": "docling-project/docling",
"file_path": "tests/test_backend_vtt.py",
"license": "MIT License",
"lines": 207,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
docling-project/docling:docs/examples/enrich_simple_pipeline.py | import logging
from pathlib import Path
from docling.datamodel.base_models import InputFormat
from docling.datamodel.pipeline_options import ConvertPipelineOptions
from docling.document_converter import (
DocumentConverter,
HTMLFormatOption,
WordFormatOption,
)
_log = logging.getLogger(__name__)
def main():
input_path = Path("tests/data/docx/word_sample.docx")
pipeline_options = ConvertPipelineOptions()
pipeline_options.do_picture_classification = True
pipeline_options.do_picture_description = True
doc_converter = DocumentConverter(
format_options={
InputFormat.DOCX: WordFormatOption(pipeline_options=pipeline_options),
InputFormat.HTML: HTMLFormatOption(pipeline_options=pipeline_options),
},
)
res = doc_converter.convert(input_path)
print(res.document.export_to_markdown())
if __name__ == "__main__":
main()
| {
"repo_id": "docling-project/docling",
"file_path": "docs/examples/enrich_simple_pipeline.py",
"license": "MIT License",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
docling-project/docling:docling/datamodel/extraction.py | """Data models for document extraction functionality."""
from typing import Any, Dict, List, Optional, Type, Union
from pydantic import BaseModel, Field
from docling.datamodel.base_models import ConversionStatus, ErrorItem, VlmStopReason
from docling.datamodel.document import InputDocument
class ExtractedPageData(BaseModel):
"""Data model for extracted content from a single page."""
page_no: int = Field(..., description="1-indexed page number")
extracted_data: Optional[Dict[str, Any]] = Field(
None, description="Extracted structured data from the page"
)
raw_text: Optional[str] = Field(None, description="Raw extracted text")
errors: List[str] = Field(
default_factory=list,
description="Any errors encountered during extraction for this page",
)
class ExtractionResult(BaseModel):
"""Result of document extraction."""
input: InputDocument
status: ConversionStatus = ConversionStatus.PENDING
errors: List[ErrorItem] = []
# Pages field - always a list for consistency
pages: List[ExtractedPageData] = Field(
default_factory=list, description="Extracted data from each page"
)
# Type alias for template parameters that can be string, dict, or BaseModel
ExtractionTemplateType = Union[str, Dict[str, Any], BaseModel, Type[BaseModel]]
| {
"repo_id": "docling-project/docling",
"file_path": "docling/datamodel/extraction.py",
"license": "MIT License",
"lines": 27,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
docling-project/docling:docling/document_extractor.py | import hashlib
import logging
import sys
import threading
import time
import warnings
from collections.abc import Iterable, Iterator
from concurrent.futures import ThreadPoolExecutor
from functools import partial
from pathlib import Path
from typing import Optional, Type, Union
from pydantic import ConfigDict, model_validator, validate_call
from typing_extensions import Self
from docling.backend.abstract_backend import AbstractDocumentBackend
from docling.backend.image_backend import ImageDocumentBackend
from docling.backend.pypdfium2_backend import PyPdfiumDocumentBackend
from docling.datamodel.base_models import (
BaseFormatOption,
ConversionStatus,
DoclingComponentType,
DocumentStream,
ErrorItem,
InputFormat,
)
from docling.datamodel.document import (
InputDocument,
_DocumentConversionInput, # intentionally reused builder
)
from docling.datamodel.extraction import ExtractionResult, ExtractionTemplateType
from docling.datamodel.pipeline_options import PipelineOptions
from docling.datamodel.settings import (
DEFAULT_PAGE_RANGE,
DocumentLimits,
PageRange,
settings,
)
from docling.exceptions import ConversionError
from docling.pipeline.base_extraction_pipeline import BaseExtractionPipeline
from docling.pipeline.extraction_vlm_pipeline import ExtractionVlmPipeline
from docling.utils.utils import chunkify
_log = logging.getLogger(__name__)
_PIPELINE_CACHE_LOCK = threading.Lock()
class ExtractionFormatOption(BaseFormatOption):
"""Per-format configuration for extraction.
Notes:
- `pipeline_cls` must subclass `BaseExtractionPipeline`.
- `pipeline_options` is typed as `PipelineOptions` which MUST inherit from
`BaseOptions` (as used by `BaseExtractionPipeline`).
- `backend` is the document-opening backend used by `_DocumentConversionInput`.
"""
pipeline_cls: Type[BaseExtractionPipeline]
@model_validator(mode="after")
def set_optional_field_default(self) -> Self:
if self.pipeline_options is None:
# `get_default_options` comes from BaseExtractionPipeline
self.pipeline_options = self.pipeline_cls.get_default_options() # type: ignore[assignment]
return self
def _get_default_extraction_option(fmt: InputFormat) -> ExtractionFormatOption:
"""Return the default extraction option for a given input format.
Defaults mirror the converter's *backend* choices, while the pipeline is
the VLM extractor. This duplication will be removed when we deduplicate
the format registry between convert/extract.
"""
format_to_default_backend: dict[InputFormat, Type[AbstractDocumentBackend]] = {
InputFormat.IMAGE: ImageDocumentBackend,
InputFormat.PDF: PyPdfiumDocumentBackend,
}
backend = format_to_default_backend.get(fmt)
if backend is None:
raise RuntimeError(f"No default extraction backend configured for {fmt}")
return ExtractionFormatOption(
pipeline_cls=ExtractionVlmPipeline,
backend=backend,
)
class DocumentExtractor:
"""Standalone extractor class.
Public API:
- `extract(...) -> ExtractionResult`
- `extract_all(...) -> Iterator[ExtractionResult]`
Implementation intentionally reuses `_DocumentConversionInput` to build
`InputDocument` with the correct backend per format.
"""
def __init__(
self,
allowed_formats: Optional[list[InputFormat]] = None,
extraction_format_options: Optional[
dict[InputFormat, ExtractionFormatOption]
] = None,
) -> None:
self.allowed_formats: list[InputFormat] = (
allowed_formats if allowed_formats is not None else list(InputFormat)
)
# Build per-format options with defaults, then apply any user overrides
overrides = extraction_format_options or {}
self.extraction_format_to_options: dict[InputFormat, ExtractionFormatOption] = {
fmt: overrides.get(fmt, _get_default_extraction_option(fmt))
for fmt in self.allowed_formats
}
# Cache pipelines by (class, options-hash)
self._initialized_pipelines: dict[
tuple[Type[BaseExtractionPipeline], str], BaseExtractionPipeline
] = {}
# ---------------------------- Public API ---------------------------------
@validate_call(config=ConfigDict(strict=True))
def extract(
self,
source: Union[Path, str, DocumentStream],
template: ExtractionTemplateType,
headers: Optional[dict[str, str]] = None,
raises_on_error: bool = True,
max_num_pages: int = sys.maxsize,
max_file_size: int = sys.maxsize,
page_range: PageRange = DEFAULT_PAGE_RANGE,
) -> ExtractionResult:
all_res = self.extract_all(
source=[source],
headers=headers,
raises_on_error=raises_on_error,
max_num_pages=max_num_pages,
max_file_size=max_file_size,
page_range=page_range,
template=template,
)
return next(all_res)
@validate_call(config=ConfigDict(strict=True))
def extract_all(
self,
source: Iterable[Union[Path, str, DocumentStream]],
template: ExtractionTemplateType,
headers: Optional[dict[str, str]] = None,
raises_on_error: bool = True,
max_num_pages: int = sys.maxsize,
max_file_size: int = sys.maxsize,
page_range: PageRange = DEFAULT_PAGE_RANGE,
) -> Iterator[ExtractionResult]:
warnings.warn(
"The extract API is currently experimental and may change without prior notice.\n"
"Only PDF and image formats are supported.",
UserWarning,
stacklevel=2,
)
limits = DocumentLimits(
max_num_pages=max_num_pages,
max_file_size=max_file_size,
page_range=page_range,
)
conv_input = _DocumentConversionInput(
path_or_stream_iterator=source, limits=limits, headers=headers
)
ext_res_iter = self._extract(
conv_input, raises_on_error=raises_on_error, template=template
)
had_result = False
for ext_res in ext_res_iter:
had_result = True
if raises_on_error and ext_res.status not in {
ConversionStatus.SUCCESS,
ConversionStatus.PARTIAL_SUCCESS,
}:
raise ConversionError(
f"Extraction failed for: {ext_res.input.file} with status: {ext_res.status}"
)
else:
yield ext_res
if not had_result and raises_on_error:
raise ConversionError(
"Extraction failed because the provided file has no recognizable format or it wasn't in the list of allowed formats."
)
# --------------------------- Internal engine ------------------------------
def _extract(
self,
conv_input: _DocumentConversionInput,
raises_on_error: bool,
template: ExtractionTemplateType,
) -> Iterator[ExtractionResult]:
start_time = time.monotonic()
for input_batch in chunkify(
conv_input.docs(self.extraction_format_to_options),
settings.perf.doc_batch_size,
):
_log.info("Going to extract document batch...")
process_func = partial(
self._process_document_extraction,
raises_on_error=raises_on_error,
template=template,
)
if (
settings.perf.doc_batch_concurrency > 1
and settings.perf.doc_batch_size > 1
):
with ThreadPoolExecutor(
max_workers=settings.perf.doc_batch_concurrency
) as pool:
for item in pool.map(
process_func,
input_batch,
):
yield item
else:
for item in map(
process_func,
input_batch,
):
elapsed = time.monotonic() - start_time
start_time = time.monotonic()
_log.info(
f"Finished extracting document {item.input.file.name} in {elapsed:.2f} sec."
)
yield item
def _process_document_extraction(
self,
in_doc: InputDocument,
raises_on_error: bool,
template: ExtractionTemplateType,
) -> ExtractionResult:
valid = (
self.allowed_formats is not None and in_doc.format in self.allowed_formats
)
if valid:
return self._execute_extraction_pipeline(
in_doc, raises_on_error=raises_on_error, template=template
)
else:
error_message = f"File format not allowed: {in_doc.file}"
if raises_on_error:
raise ConversionError(error_message)
else:
error_item = ErrorItem(
component_type=DoclingComponentType.USER_INPUT,
module_name="",
error_message=error_message,
)
return ExtractionResult(
input=in_doc, status=ConversionStatus.SKIPPED, errors=[error_item]
)
def _execute_extraction_pipeline(
self,
in_doc: InputDocument,
raises_on_error: bool,
template: ExtractionTemplateType,
) -> ExtractionResult:
if not in_doc.valid:
if raises_on_error:
raise ConversionError(f"Input document {in_doc.file} is not valid.")
else:
return ExtractionResult(input=in_doc, status=ConversionStatus.FAILURE)
pipeline = self._get_pipeline(in_doc.format)
if pipeline is None:
if raises_on_error:
raise ConversionError(
f"No extraction pipeline could be initialized for {in_doc.file}."
)
else:
return ExtractionResult(input=in_doc, status=ConversionStatus.FAILURE)
return pipeline.execute(
in_doc, raises_on_error=raises_on_error, template=template
)
def _get_pipeline(
self, doc_format: InputFormat
) -> Optional[BaseExtractionPipeline]:
"""Retrieve or initialize a pipeline, reusing instances based on class and options."""
fopt = self.extraction_format_to_options.get(doc_format)
if fopt is None or fopt.pipeline_options is None:
return None
pipeline_class = fopt.pipeline_cls
pipeline_options = fopt.pipeline_options
options_hash = self._get_pipeline_options_hash(pipeline_options)
cache_key = (pipeline_class, options_hash)
with _PIPELINE_CACHE_LOCK:
if cache_key not in self._initialized_pipelines:
_log.info(
f"Initializing extraction pipeline for {pipeline_class.__name__} with options hash {options_hash}"
)
self._initialized_pipelines[cache_key] = pipeline_class(
pipeline_options=pipeline_options # type: ignore[arg-type]
)
else:
_log.debug(
f"Reusing cached extraction pipeline for {pipeline_class.__name__} with options hash {options_hash}"
)
return self._initialized_pipelines[cache_key]
@staticmethod
def _get_pipeline_options_hash(pipeline_options: PipelineOptions) -> str:
"""Generate a stable hash of pipeline options to use as part of the cache key."""
options_str = str(pipeline_options.model_dump())
return hashlib.md5(
options_str.encode("utf-8"), usedforsecurity=False
).hexdigest()
| {
"repo_id": "docling-project/docling",
"file_path": "docling/document_extractor.py",
"license": "MIT License",
"lines": 288,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
docling-project/docling:docling/pipeline/base_extraction_pipeline.py | import logging
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Optional
from docling.datamodel.base_models import ConversionStatus, ErrorItem
from docling.datamodel.document import InputDocument
from docling.datamodel.extraction import ExtractionResult, ExtractionTemplateType
from docling.datamodel.pipeline_options import BaseOptions, PipelineOptions
from docling.datamodel.settings import settings
_log = logging.getLogger(__name__)
class BaseExtractionPipeline(ABC):
def __init__(self, pipeline_options: PipelineOptions):
self.pipeline_options = pipeline_options
self.artifacts_path: Optional[Path] = None
if pipeline_options.artifacts_path is not None:
self.artifacts_path = Path(pipeline_options.artifacts_path).expanduser()
elif settings.artifacts_path is not None:
self.artifacts_path = Path(settings.artifacts_path).expanduser()
if self.artifacts_path is not None and not self.artifacts_path.is_dir():
raise RuntimeError(
f"The value of {self.artifacts_path=} is not valid. "
"When defined, it must point to a folder containing all models required by the pipeline."
)
def execute(
self,
in_doc: InputDocument,
raises_on_error: bool,
template: Optional[ExtractionTemplateType] = None,
) -> ExtractionResult:
ext_res = ExtractionResult(input=in_doc)
try:
ext_res = self._extract_data(ext_res, template)
ext_res.status = self._determine_status(ext_res)
except Exception as e:
ext_res.status = ConversionStatus.FAILURE
error_item = ErrorItem(
component_type="extraction_pipeline",
module_name=self.__class__.__name__,
error_message=str(e),
)
ext_res.errors.append(error_item)
if raises_on_error:
raise e
return ext_res
@abstractmethod
def _extract_data(
self,
ext_res: ExtractionResult,
template: Optional[ExtractionTemplateType] = None,
) -> ExtractionResult:
"""Subclass must populate ext_res.pages/errors and return the result."""
raise NotImplementedError
@abstractmethod
def _determine_status(self, ext_res: ExtractionResult) -> ConversionStatus:
"""Subclass must decide SUCCESS/PARTIAL_SUCCESS/FAILURE based on ext_res."""
raise NotImplementedError
@classmethod
@abstractmethod
def get_default_options(cls) -> PipelineOptions:
pass
| {
"repo_id": "docling-project/docling",
"file_path": "docling/pipeline/base_extraction_pipeline.py",
"license": "MIT License",
"lines": 60,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
docling-project/docling:docling/pipeline/extraction_vlm_pipeline.py | import inspect
import json
import logging
from typing import Optional
from PIL.Image import Image
from pydantic import BaseModel
from docling.backend.abstract_backend import PaginatedDocumentBackend
from docling.backend.pdf_backend import PdfDocumentBackend
from docling.datamodel.base_models import ConversionStatus, ErrorItem, VlmStopReason
from docling.datamodel.document import InputDocument
from docling.datamodel.extraction import (
ExtractedPageData,
ExtractionResult,
ExtractionTemplateType,
)
from docling.datamodel.pipeline_options import (
PipelineOptions,
VlmExtractionPipelineOptions,
)
from docling.datamodel.settings import settings
from docling.models.extraction.nuextract_transformers_model import (
NuExtractTransformersModel,
)
from docling.pipeline.base_extraction_pipeline import BaseExtractionPipeline
from docling.utils.accelerator_utils import decide_device
_log = logging.getLogger(__name__)
class ExtractionVlmPipeline(BaseExtractionPipeline):
def __init__(self, pipeline_options: VlmExtractionPipelineOptions):
super().__init__(pipeline_options)
# Initialize VLM model with default options
self.accelerator_options = pipeline_options.accelerator_options
self.pipeline_options: VlmExtractionPipelineOptions
# Create VLM model instance
self.vlm_model = NuExtractTransformersModel(
enabled=True,
artifacts_path=self.artifacts_path, # Will download automatically
accelerator_options=self.accelerator_options,
vlm_options=pipeline_options.vlm_options,
)
def _extract_data(
self,
ext_res: ExtractionResult,
template: Optional[ExtractionTemplateType] = None,
) -> ExtractionResult:
"""Extract data using the VLM model."""
try:
# Get images from input document using the backend
images = self._get_images_from_input(ext_res.input)
if not images:
ext_res.status = ConversionStatus.FAILURE
ext_res.errors.append(
ErrorItem(
component_type="extraction_pipeline",
module_name=self.__class__.__name__,
error_message="No images found in document",
)
)
return ext_res
# Use provided template or default prompt
if template is not None:
prompt = self._serialize_template(template)
else:
prompt = "Extract all text and structured information from this document. Return as JSON."
# Process all images with VLM model
start_page, end_page = ext_res.input.limits.page_range
for i, image in enumerate(images):
# Calculate the actual page number based on the filtered range
page_number = start_page + i
try:
predictions = list(self.vlm_model.process_images([image], prompt))
if predictions:
# Parse the extracted text as JSON if possible, otherwise use as-is
extracted_text = predictions[0].text
extracted_data = None
vlm_stop_reason: VlmStopReason = predictions[0].stop_reason
if (
vlm_stop_reason == VlmStopReason.LENGTH
or vlm_stop_reason == VlmStopReason.STOP_SEQUENCE
):
ext_res.status = ConversionStatus.PARTIAL_SUCCESS
try:
extracted_data = json.loads(extracted_text)
except (json.JSONDecodeError, ValueError):
# If not valid JSON, keep extracted_data as None
pass
# Create page data with proper structure
page_data = ExtractedPageData(
page_no=page_number,
extracted_data=extracted_data,
raw_text=extracted_text, # Always populate raw_text
)
ext_res.pages.append(page_data)
else:
# Add error page data
page_data = ExtractedPageData(
page_no=page_number,
extracted_data=None,
errors=["No extraction result from VLM model"],
)
ext_res.pages.append(page_data)
except Exception as e:
_log.error(f"Error processing page {page_number}: {e}")
page_data = ExtractedPageData(
page_no=page_number, extracted_data=None, errors=[str(e)]
)
ext_res.pages.append(page_data)
except Exception as e:
_log.error(f"Error during extraction: {e}")
ext_res.errors.append(
ErrorItem(
component_type="extraction_pipeline",
module_name=self.__class__.__name__,
error_message=str(e),
)
)
return ext_res
def _determine_status(self, ext_res: ExtractionResult) -> ConversionStatus:
"""Determine the status based on extraction results."""
if ext_res.pages and not any(page.errors for page in ext_res.pages):
return (
ConversionStatus.PARTIAL_SUCCESS
if ext_res.status == ConversionStatus.PARTIAL_SUCCESS
else ConversionStatus.SUCCESS
)
else:
return ConversionStatus.FAILURE
def _get_images_from_input(self, input_doc: InputDocument) -> list[Image]:
"""Extract images from input document using the backend."""
images = []
try:
backend = input_doc._backend
assert isinstance(backend, PdfDocumentBackend)
# Use the backend's pagination interface
page_count = backend.page_count()
# Respect page range limits, following the same pattern as PaginatedPipeline
start_page, end_page = input_doc.limits.page_range
_log.info(
f"Processing pages {start_page}-{end_page} of {page_count} total pages for extraction"
)
for page_num in range(page_count):
# Only process pages within the specified range (0-based indexing)
if start_page - 1 <= page_num <= end_page - 1:
try:
page_backend = backend.load_page(page_num)
if page_backend.is_valid():
# Get page image at a reasonable scale
page_image = page_backend.get_page_image(
scale=self.pipeline_options.vlm_options.scale
)
images.append(page_image)
else:
_log.warning(f"Page {page_num + 1} backend is not valid")
except Exception as e:
_log.error(f"Error loading page {page_num + 1}: {e}")
except Exception as e:
_log.error(f"Error getting images from input document: {e}")
return images
def _serialize_template(self, template: ExtractionTemplateType) -> str:
"""Serialize template to string based on its type."""
if isinstance(template, str):
return template
elif isinstance(template, dict):
return json.dumps(template, indent=2)
elif isinstance(template, BaseModel):
return template.model_dump_json(indent=2)
elif inspect.isclass(template) and issubclass(template, BaseModel):
from polyfactory.factories.pydantic_factory import ModelFactory
class ExtractionTemplateFactory(ModelFactory[template]): # type: ignore
__use_examples__ = True # prefer Field(examples=...) when present
__use_defaults__ = True # use field defaults instead of random values
__check_model__ = (
True # setting the value to avoid deprecation warnings
)
return ExtractionTemplateFactory.build().model_dump_json(indent=2) # type: ignore
else:
raise ValueError(f"Unsupported template type: {type(template)}")
@classmethod
def get_default_options(cls) -> PipelineOptions:
return VlmExtractionPipelineOptions()
| {
"repo_id": "docling-project/docling",
"file_path": "docling/pipeline/extraction_vlm_pipeline.py",
"license": "MIT License",
"lines": 179,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
docling-project/docling:tests/test_extraction.py | """
Test unit for document extraction functionality.
"""
import os
from pathlib import Path
import pytest
from pydantic import BaseModel, Field
from docling.datamodel.base_models import InputFormat
from docling.document_converter import DocumentConverter
from docling.document_extractor import DocumentExtractor
IS_CI = bool(os.getenv("CI"))
class ExampleTemplate(BaseModel):
bill_no: str = Field(
examples=["A123", "5414"]
) # provide some examples, but not the actual value of the test sample
total: float = Field(
default=10.0, examples=[20.0]
) # provide a default value and some examples
@pytest.fixture
def extractor() -> DocumentExtractor:
"""Create a document converter instance for testing."""
return DocumentExtractor(allowed_formats=[InputFormat.IMAGE, InputFormat.PDF])
@pytest.fixture
def test_file_path() -> Path:
"""Get the path to the test QR bill image."""
return Path(__file__).parent / "data_scanned" / "qr_bill_example.jpg"
# return Path("tests/data/pdf/code_and_formula.pdf")
@pytest.mark.skipif(
IS_CI, reason="Skipping test in CI because the dataset is too heavy."
)
def test_extraction_with_string_template(
extractor: DocumentExtractor, test_file_path: Path
) -> None:
"""Test extraction using string template."""
str_templ = '{"bill_no": "string", "total": "number"}'
result = extractor.extract(test_file_path, template=str_templ)
print(result.pages)
assert result.status is not None
assert len(result.pages) == 1
assert result.pages[0].extracted_data["bill_no"] == "3139"
assert result.pages[0].extracted_data["total"] == 3949.75
@pytest.mark.skipif(
IS_CI, reason="Skipping test in CI because the dataset is too heavy."
)
def test_extraction_with_dict_template(
extractor: DocumentExtractor, test_file_path: Path
) -> None:
"""Test extraction using dictionary template."""
dict_templ = {
"bill_no": "string",
"total": "number",
}
result = extractor.extract(test_file_path, template=dict_templ)
assert len(result.pages) == 1
assert result.pages[0].extracted_data["bill_no"] == "3139"
assert result.pages[0].extracted_data["total"] == 3949.75
@pytest.mark.skipif(
IS_CI, reason="Skipping test in CI because the dataset is too heavy."
)
def test_extraction_with_pydantic_instance_template(
extractor: DocumentExtractor, test_file_path: Path
) -> None:
"""Test extraction using pydantic instance template."""
pydantic_instance_templ = ExampleTemplate(bill_no="4321")
result = extractor.extract(test_file_path, template=pydantic_instance_templ)
assert len(result.pages) == 1
assert result.pages[0].extracted_data["bill_no"] == "3139"
assert result.pages[0].extracted_data["total"] == 3949.75
@pytest.mark.skipif(
IS_CI, reason="Skipping test in CI because the dataset is too heavy."
)
def test_extraction_with_pydantic_class_template(
extractor: DocumentExtractor, test_file_path: Path
) -> None:
"""Test extraction using pydantic class template."""
pydantic_class_templ = ExampleTemplate
result = extractor.extract(test_file_path, template=pydantic_class_templ)
assert len(result.pages) == 1
assert result.pages[0].extracted_data["bill_no"] == "3139"
assert result.pages[0].extracted_data["total"] == 3949.75
| {
"repo_id": "docling-project/docling",
"file_path": "tests/test_extraction.py",
"license": "MIT License",
"lines": 80,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
docling-project/docling:tests/test_backend_mets_gbs.py | from pathlib import Path
import pytest
from docling.backend.mets_gbs_backend import MetsGbsDocumentBackend, MetsGbsPageBackend
from docling.datamodel.base_models import BoundingBox, InputFormat
from docling.datamodel.document import InputDocument
@pytest.fixture
def test_doc_path():
return Path("tests/data/mets_gbs/32044009881525_select.tar.gz")
def _get_backend(pdf_doc):
in_doc = InputDocument(
path_or_stream=pdf_doc,
format=InputFormat.METS_GBS,
backend=MetsGbsDocumentBackend,
)
doc_backend = in_doc._backend
return doc_backend
def test_process_pages(test_doc_path):
doc_backend: MetsGbsDocumentBackend = _get_backend(test_doc_path)
for page_index in range(doc_backend.page_count()):
page_backend: MetsGbsPageBackend = doc_backend.load_page(page_index)
list(page_backend.get_text_cells())
# Clean up page backend after each iteration
page_backend.unload()
# Explicitly clean up document backend to prevent race conditions in CI
doc_backend.unload()
def test_get_text_from_rect(test_doc_path):
doc_backend: MetsGbsDocumentBackend = _get_backend(test_doc_path)
page_backend: MetsGbsPageBackend = doc_backend.load_page(0)
# Get the title text of the DocLayNet paper
textpiece = page_backend.get_text_in_rect(
bbox=BoundingBox(l=275, t=263, r=1388, b=311)
)
ref = "recently become prevalent that he who speaks"
assert textpiece.strip() == ref
# Explicitly clean up resources
page_backend.unload()
doc_backend.unload()
def test_crop_page_image(test_doc_path):
doc_backend: MetsGbsDocumentBackend = _get_backend(test_doc_path)
page_backend: MetsGbsPageBackend = doc_backend.load_page(0)
page_backend.get_page_image(
scale=2, cropbox=BoundingBox(l=270, t=587, r=1385, b=1995)
)
# im.show()
# Explicitly clean up resources
page_backend.unload()
doc_backend.unload()
def test_num_pages(test_doc_path):
doc_backend: MetsGbsDocumentBackend = _get_backend(test_doc_path)
assert doc_backend.is_valid()
assert doc_backend.page_count() == 3
# Explicitly clean up resources to prevent race conditions in CI
doc_backend.unload()
| {
"repo_id": "docling-project/docling",
"file_path": "tests/test_backend_mets_gbs.py",
"license": "MIT License",
"lines": 53,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
docling-project/docling:tests/test_threaded_pipeline.py | import logging
import time
from pathlib import Path
from typing import List
import pytest
from docling.backend.pypdfium2_backend import PyPdfiumDocumentBackend
from docling.datamodel.base_models import ConversionStatus, InputFormat
from docling.datamodel.document import ConversionResult
from docling.datamodel.pipeline_options import (
PdfPipelineOptions,
ThreadedPdfPipelineOptions,
)
from docling.document_converter import DocumentConverter, PdfFormatOption
from docling.pipeline.standard_pdf_pipeline import StandardPdfPipeline
from docling.pipeline.threaded_standard_pdf_pipeline import ThreadedStandardPdfPipeline
def test_threaded_pipeline_multiple_documents():
"""Test threaded pipeline with multiple documents and compare with standard pipeline"""
test_files = [
"tests/data/pdf/2203.01017v2.pdf",
"tests/data/pdf/2206.01062.pdf",
"tests/data/pdf/2305.03393v1.pdf",
]
# test_files = [str(f) for f in Path("test/data/pdf").rglob("*.pdf")]
do_ts = False
do_ocr = False
run_threaded = True
run_serial = True
if run_threaded:
# Threaded pipeline
threaded_converter = DocumentConverter(
format_options={
InputFormat.PDF: PdfFormatOption(
pipeline_cls=ThreadedStandardPdfPipeline,
pipeline_options=ThreadedPdfPipelineOptions(
layout_batch_size=1,
table_batch_size=1,
ocr_batch_size=1,
batch_polling_interval_seconds=1.0,
do_table_structure=do_ts,
do_ocr=do_ocr,
),
)
}
)
threaded_converter.initialize_pipeline(InputFormat.PDF)
# Test threaded pipeline
threaded_success_count = 0
threaded_failure_count = 0
start_time = time.perf_counter()
for result in threaded_converter.convert_all(test_files, raises_on_error=True):
print(
"Finished converting document with threaded pipeline:",
result.input.file.name,
)
if result.status == ConversionStatus.SUCCESS:
threaded_success_count += 1
else:
threaded_failure_count += 1
threaded_time = time.perf_counter() - start_time
del threaded_converter
print(f"Threaded pipeline: {threaded_time:.2f} seconds")
if run_serial:
# Standard pipeline
standard_converter = DocumentConverter(
format_options={
InputFormat.PDF: PdfFormatOption(
pipeline_cls=StandardPdfPipeline,
pipeline_options=PdfPipelineOptions(
do_table_structure=do_ts,
do_ocr=do_ocr,
),
)
}
)
standard_converter.initialize_pipeline(InputFormat.PDF)
# Test standard pipeline
standard_success_count = 0
standard_failure_count = 0
start_time = time.perf_counter()
for result in standard_converter.convert_all(test_files, raises_on_error=True):
print(
"Finished converting document with standard pipeline:",
result.input.file.name,
)
if result.status == ConversionStatus.SUCCESS:
standard_success_count += 1
else:
standard_failure_count += 1
standard_time = time.perf_counter() - start_time
del standard_converter
print(f"Standard pipeline: {standard_time:.2f} seconds")
# Verify results
if run_threaded and run_serial:
assert standard_success_count == threaded_success_count
assert standard_failure_count == threaded_failure_count
if run_serial:
assert standard_success_count == len(test_files)
assert standard_failure_count == 0
if run_threaded:
assert threaded_success_count == len(test_files)
assert threaded_failure_count == 0
def test_pipeline_comparison():
"""Compare all three pipeline implementations"""
test_file = "tests/data/pdf/2206.01062.pdf"
# Sync pipeline
sync_converter = DocumentConverter(
format_options={
InputFormat.PDF: PdfFormatOption(
pipeline_cls=StandardPdfPipeline,
)
}
)
start_time = time.perf_counter()
sync_results = list(sync_converter.convert_all([test_file]))
sync_time = time.perf_counter() - start_time
# Threaded pipeline
threaded_converter = DocumentConverter(
format_options={
InputFormat.PDF: PdfFormatOption(
pipeline_cls=ThreadedStandardPdfPipeline,
pipeline_options=ThreadedPdfPipelineOptions(
layout_batch_size=1,
ocr_batch_size=1,
table_batch_size=1,
),
)
}
)
start_time = time.perf_counter()
threaded_results = list(threaded_converter.convert_all([test_file]))
threaded_time = time.perf_counter() - start_time
print("\nPipeline Comparison:")
print(f"Sync pipeline: {sync_time:.2f} seconds")
print(f"Threaded pipeline: {threaded_time:.2f} seconds")
print(f"Speedup: {sync_time / threaded_time:.2f}x")
# Verify results are equivalent
assert len(sync_results) == len(threaded_results) == 1
assert (
sync_results[0].status == threaded_results[0].status == ConversionStatus.SUCCESS
)
# Basic content comparison
sync_doc = sync_results[0].document
threaded_doc = threaded_results[0].document
assert len(sync_doc.pages) == len(threaded_doc.pages)
assert len(sync_doc.texts) == len(threaded_doc.texts)
def test_pypdfium_threaded_pipeline():
doc_converter = (
DocumentConverter( # all of the below is optional, has internal defaults.
format_options={
InputFormat.PDF: PdfFormatOption(
pipeline_cls=ThreadedStandardPdfPipeline,
backend=PyPdfiumDocumentBackend,
),
},
)
)
test_file = "tests/data/pdf/2206.01062.pdf"
for i in range(6):
print(f"iteration {i=}")
conv_result = doc_converter.convert(test_file)
assert conv_result.status == ConversionStatus.SUCCESS
print(f"[{i=}] Success")
print("All done!")
if __name__ == "__main__":
# Run basic performance test
test_pipeline_comparison()
| {
"repo_id": "docling-project/docling",
"file_path": "tests/test_threaded_pipeline.py",
"license": "MIT License",
"lines": 166,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
docling-project/docling:docs/examples/enrich_doclingdocument.py | # %% [markdown]
# Enrich an existing DoclingDocument JSON with a custom model (post-conversion).
#
# What this example does
# - Loads a previously converted DoclingDocument from JSON (no reconversion).
# - Uses a backend to crop images for items and runs an enrichment model in batches.
# - Prints a few example annotations to stdout.
#
# Prerequisites
# - A DoclingDocument JSON produced by another conversion (path configured below).
# - Install Docling and dependencies for the chosen enrichment model.
# - Ensure the JSON and the referenced PDF match (same document/version), so
# provenance bounding boxes line up for accurate cropping.
#
# How to run
# - From the repo root: `python docs/examples/enrich_doclingdocument.py`.
# - Adjust `input_doc_path` and `input_pdf_path` if your data is elsewhere.
#
# Notes
# - `BATCH_SIZE` controls how many elements are passed to the model at once.
# - `prepare_element()` crops context around elements based on the model's expansion.
# %%
### Load modules
from pathlib import Path
from typing import Iterable, Optional
from docling_core.types.doc import BoundingBox, DocItem, DoclingDocument, NodeItem
from rich.pretty import pprint
from docling.backend.pypdfium2_backend import PyPdfiumDocumentBackend
from docling.datamodel.accelerator_options import AcceleratorOptions
from docling.datamodel.base_models import InputFormat, ItemAndImageEnrichmentElement
from docling.datamodel.document import InputDocument
from docling.models.base_model import BaseItemAndImageEnrichmentModel
from docling.models.stages.picture_classifier.document_picture_classifier import (
DocumentPictureClassifier,
DocumentPictureClassifierOptions,
)
from docling.utils.utils import chunkify
### Define batch size used for processing
BATCH_SIZE = 4
# Trade-off: larger batches improve throughput but increase memory usage.
### From DocItem to the model inputs
# The following function is responsible for taking an item and applying the required pre-processing for the model.
# In this case we generate a cropped image from the document backend.
def prepare_element(
doc: DoclingDocument,
backend: PyPdfiumDocumentBackend,
model: BaseItemAndImageEnrichmentModel,
element: NodeItem,
) -> Optional[ItemAndImageEnrichmentElement]:
if not model.is_processable(doc=doc, element=element):
return None
assert isinstance(element, DocItem)
element_prov = element.prov[0]
bbox = element_prov.bbox
width = bbox.r - bbox.l
height = bbox.t - bbox.b
expanded_bbox = BoundingBox(
l=bbox.l - width * model.expansion_factor,
t=bbox.t + height * model.expansion_factor,
r=bbox.r + width * model.expansion_factor,
b=bbox.b - height * model.expansion_factor,
coord_origin=bbox.coord_origin,
)
page_ix = element_prov.page_no - 1
page_backend = backend.load_page(page_no=page_ix)
cropped_image = page_backend.get_page_image(
scale=model.images_scale, cropbox=expanded_bbox
)
return ItemAndImageEnrichmentElement(item=element, image=cropped_image)
### Iterate through the document
# This block defines the `enrich_document()` which is responsible for iterating through the document
# and batch the selected document items for running through the model.
def enrich_document(
doc: DoclingDocument,
backend: PyPdfiumDocumentBackend,
model: BaseItemAndImageEnrichmentModel,
) -> DoclingDocument:
def _prepare_elements(
doc: DoclingDocument,
backend: PyPdfiumDocumentBackend,
model: BaseItemAndImageEnrichmentModel,
) -> Iterable[NodeItem]:
for doc_element, _level in doc.iterate_items():
prepared_element = prepare_element(
doc=doc, backend=backend, model=model, element=doc_element
)
if prepared_element is not None:
yield prepared_element
for element_batch in chunkify(
_prepare_elements(doc, backend, model),
BATCH_SIZE,
):
for element in model(doc=doc, element_batch=element_batch): # Must exhaust!
pass
return doc
### Open and process
# The `main()` function which initializes the document and model objects for calling `enrich_document()`.
def main():
data_folder = Path(__file__).parent / "../../tests/data"
input_pdf_path = data_folder / "pdf/2206.01062.pdf"
input_doc_path = data_folder / "groundtruth/docling_v2/2206.01062.json"
doc = DoclingDocument.load_from_json(input_doc_path)
in_pdf_doc = InputDocument(
input_pdf_path,
format=InputFormat.PDF,
backend=PyPdfiumDocumentBackend,
filename=input_pdf_path.name,
)
backend = in_pdf_doc._backend
model = DocumentPictureClassifier(
enabled=True,
artifacts_path=None,
options=DocumentPictureClassifierOptions.from_preset(
"document_figure_classifier_v2"
),
accelerator_options=AcceleratorOptions(),
)
doc = enrich_document(doc=doc, backend=backend, model=model)
for pic in doc.pictures[:5]:
print(pic.self_ref)
pprint(pic.meta)
if __name__ == "__main__":
main()
| {
"repo_id": "docling-project/docling",
"file_path": "docs/examples/enrich_doclingdocument.py",
"license": "MIT License",
"lines": 123,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
docling-project/docling:docling/datamodel/layout_model_specs.py | import logging
from enum import Enum
from pathlib import Path
from typing import Annotated, Optional
from pydantic import BaseModel, Field
from docling.datamodel.accelerator_options import AcceleratorDevice
_log = logging.getLogger(__name__)
class LayoutModelConfig(BaseModel):
"""Configuration for document layout analysis models from HuggingFace."""
name: Annotated[
str,
Field(
description=(
"Human-readable name identifier for the layout model. Used for "
"logging, debugging, and model selection."
),
examples=["docling_layout_heron", "docling_layout_egret_large"],
),
]
repo_id: Annotated[
str,
Field(
description=(
"HuggingFace repository ID where the model is hosted. Used to "
"download model weights and configuration files from "
"HuggingFace Hub."
),
examples=[
"docling-project/docling-layout-heron",
"docling-project/docling-layout-egret-large",
],
),
]
revision: Annotated[
str,
Field(
description=(
"Git revision (branch, tag, or commit hash) of the model "
"repository to use. Allows pinning to specific model versions "
"for reproducibility."
),
examples=["main", "v1.0.0"],
),
]
model_path: Annotated[
str,
Field(
description=(
"Relative path within the repository to model artifacts. Empty "
"string indicates artifacts are in the repository root. Used "
"for repositories with multiple models or nested structures."
),
),
]
supported_devices: Annotated[
list[AcceleratorDevice],
Field(
description=(
"List of hardware accelerators supported by this model. The "
"model can only run on devices in this list."
)
),
] = [
AcceleratorDevice.CPU,
AcceleratorDevice.CUDA,
AcceleratorDevice.MPS,
AcceleratorDevice.XPU,
]
@property
def model_repo_folder(self) -> str:
return self.repo_id.replace("/", "--")
# HuggingFace Layout Models
# Default Docling Layout Model
DOCLING_LAYOUT_V2 = LayoutModelConfig(
name="docling_layout_v2",
repo_id="docling-project/docling-layout-old",
revision="main",
model_path="",
)
DOCLING_LAYOUT_HERON = LayoutModelConfig(
name="docling_layout_heron",
repo_id="docling-project/docling-layout-heron",
revision="main",
model_path="",
)
DOCLING_LAYOUT_HERON_101 = LayoutModelConfig(
name="docling_layout_heron_101",
repo_id="docling-project/docling-layout-heron-101",
revision="main",
model_path="",
)
DOCLING_LAYOUT_EGRET_MEDIUM = LayoutModelConfig(
name="docling_layout_egret_medium",
repo_id="docling-project/docling-layout-egret-medium",
revision="main",
model_path="",
)
DOCLING_LAYOUT_EGRET_LARGE = LayoutModelConfig(
name="docling_layout_egret_large",
repo_id="docling-project/docling-layout-egret-large",
revision="main",
model_path="",
)
DOCLING_LAYOUT_EGRET_XLARGE = LayoutModelConfig(
name="docling_layout_egret_xlarge",
repo_id="docling-project/docling-layout-egret-xlarge",
revision="main",
model_path="",
)
# Example for a hypothetical alternative model
# ALTERNATIVE_LAYOUT = LayoutModelConfig(
# name="alternative_layout",
# repo_id="someorg/alternative-layout",
# revision="main",
# model_path="model_artifacts/layout_alt",
# )
class LayoutModelType(str, Enum):
DOCLING_LAYOUT_V2 = "docling_layout_v2"
DOCLING_LAYOUT_HERON = "docling_layout_heron"
DOCLING_LAYOUT_HERON_101 = "docling_layout_heron_101"
DOCLING_LAYOUT_EGRET_MEDIUM = "docling_layout_egret_medium"
DOCLING_LAYOUT_EGRET_LARGE = "docling_layout_egret_large"
DOCLING_LAYOUT_EGRET_XLARGE = "docling_layout_egret_xlarge"
# ALTERNATIVE_LAYOUT = "alternative_layout"
| {
"repo_id": "docling-project/docling",
"file_path": "docling/datamodel/layout_model_specs.py",
"license": "MIT License",
"lines": 124,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
docling-project/docling:tests/test_ocr_utils.py | from typing import Tuple
import pytest
from docling_core.types.doc import BoundingBox, CoordOrigin
from docling_core.types.doc.page import BoundingRectangle
from docling.utils.orientation import rotate_bounding_box
IM_SIZE = (4, 5)
BBOX = BoundingBox(l=1, t=3, r=3, b=4, coord_origin=CoordOrigin.TOPLEFT)
RECT = BoundingRectangle(
r_x0=1,
r_y0=4,
r_x1=3,
r_y1=4,
r_x2=3,
r_y2=3,
r_x3=1,
r_y3=3,
coord_origin=CoordOrigin.TOPLEFT,
)
RECT_90 = BoundingRectangle(
r_x0=4,
r_y0=3,
r_x1=4,
r_y1=1,
r_x2=3,
r_y2=1,
r_x3=3,
r_y3=3,
coord_origin=CoordOrigin.TOPLEFT,
)
RECT_180 = BoundingRectangle(
r_x0=3,
r_y0=1,
r_x1=1,
r_y1=1,
r_x2=1,
r_y2=2,
r_x3=3,
r_y3=2,
coord_origin=CoordOrigin.TOPLEFT,
)
RECT_270 = BoundingRectangle(
r_x0=1,
r_y0=1,
r_x1=1,
r_y1=3,
r_x2=2,
r_y2=3,
r_x3=2,
r_y3=1,
coord_origin=CoordOrigin.TOPLEFT,
)
@pytest.mark.parametrize(
["bbox", "im_size", "angle", "expected_rectangle"],
[
# (BBOX, IM_SIZE, 0, RECT),
# (BBOX, IM_SIZE, 90, RECT_90),
(BBOX, IM_SIZE, 180, RECT_180),
# (BBOX, IM_SIZE, 270, RECT_270),
# (BBOX, IM_SIZE, 360, RECT),
# (BBOX, IM_SIZE, -90, RECT_270),
(BBOX, IM_SIZE, -180, RECT_180),
# (BBOX, IM_SIZE, -270, RECT_90),
],
)
def test_rotate_bounding_box(
bbox: BoundingBox,
im_size: Tuple[int, int],
angle: int,
expected_rectangle: BoundingRectangle,
):
rotated = rotate_bounding_box(bbox, angle, im_size)
assert rotated == expected_rectangle
expected_angle_360 = angle % 360
assert rotated.angle_360 == expected_angle_360
| {
"repo_id": "docling-project/docling",
"file_path": "tests/test_ocr_utils.py",
"license": "MIT License",
"lines": 74,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
docling-project/docling:docs/examples/minimal_asr_pipeline.py | # %% [markdown]
# Minimal ASR pipeline example: transcribe an audio file to Markdown text.
#
# What this example does
# - Configures the ASR pipeline with a default model spec and converts one audio file.
# - Prints the recognized speech segments in Markdown with timestamps.
#
# Prerequisites
# - Install Docling with ASR extras and any audio dependencies (ffmpeg, etc.).
# - Ensure your environment can download or access the configured ASR model.
# - Some formats require ffmpeg codecs; install ffmpeg and ensure it's on PATH.
#
# How to run
# - From the repository root, run: `python docs/examples/minimal_asr_pipeline.py`.
# - The script prints the transcription to stdout.
#
# Customizing the model
# - The script automatically selects the best model for your hardware (MLX Whisper for Apple Silicon, native Whisper otherwise).
# - Edit `get_asr_converter()` to manually override `pipeline_options.asr_options` with any model from `asr_model_specs`.
# - Keep `InputFormat.AUDIO` and `AsrPipeline` unchanged for a minimal setup.
#
# Input audio
# - Defaults to `tests/data/audio/sample_10s.mp3`. Update `audio_path` to your own file if needed.
# %%
from pathlib import Path
from docling_core.types.doc import DoclingDocument
from docling.datamodel import asr_model_specs
from docling.datamodel.base_models import ConversionStatus, InputFormat
from docling.datamodel.document import ConversionResult
from docling.datamodel.pipeline_options import AsrPipelineOptions
from docling.document_converter import AudioFormatOption, DocumentConverter
from docling.pipeline.asr_pipeline import AsrPipeline
def get_asr_converter():
"""Create a DocumentConverter configured for ASR with automatic model selection.
Uses `asr_model_specs.WHISPER_TURBO` which automatically selects the best
implementation for your hardware:
- MLX Whisper Turbo for Apple Silicon (M1/M2/M3) with mlx-whisper installed
- Native Whisper Turbo as fallback
You can swap in another model spec from `docling.datamodel.asr_model_specs`
to experiment with different model sizes.
"""
pipeline_options = AsrPipelineOptions()
pipeline_options.asr_options = asr_model_specs.WHISPER_TURBO
converter = DocumentConverter(
format_options={
InputFormat.AUDIO: AudioFormatOption(
pipeline_cls=AsrPipeline,
pipeline_options=pipeline_options,
)
}
)
return converter
def asr_pipeline_conversion(audio_path: Path) -> DoclingDocument:
"""Run the ASR pipeline and return a `DoclingDocument` transcript."""
# Check if the test audio file exists
assert audio_path.exists(), f"Test audio file not found: {audio_path}"
converter = get_asr_converter()
# Convert the audio file
result: ConversionResult = converter.convert(audio_path)
# Verify conversion was successful
assert result.status == ConversionStatus.SUCCESS, (
f"Conversion failed with status: {result.status}"
)
return result.document
if __name__ == "__main__":
audio_path = Path("tests/data/audio/sample_10s.mp3")
doc = asr_pipeline_conversion(audio_path=audio_path)
print(doc.export_to_markdown())
# Expected output:
#
# [time: 0.0-4.0] Shakespeare on Scenery by Oscar Wilde
#
# [time: 5.28-9.96] This is a LibriVox recording. All LibriVox recordings are in the public domain.
| {
"repo_id": "docling-project/docling",
"file_path": "docs/examples/minimal_asr_pipeline.py",
"license": "MIT License",
"lines": 73,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
docling-project/docling:docling/backend/noop_backend.py | import logging
from io import BytesIO
from pathlib import Path
from typing import Set, Union
from docling.backend.abstract_backend import AbstractDocumentBackend
from docling.datamodel.base_models import InputFormat
from docling.datamodel.document import InputDocument
_log = logging.getLogger(__name__)
class NoOpBackend(AbstractDocumentBackend):
"""
A no-op backend that only validates input existence.
Used e.g. for audio files where actual processing is handled by the ASR pipeline.
"""
def __init__(self, in_doc: "InputDocument", path_or_stream: Union[BytesIO, Path]):
super().__init__(in_doc, path_or_stream)
_log.debug(f"NoOpBackend initialized for: {path_or_stream}")
# Validate input
try:
if isinstance(self.path_or_stream, BytesIO):
# Check if stream has content
self.valid = len(self.path_or_stream.getvalue()) > 0
_log.debug(
f"BytesIO stream length: {len(self.path_or_stream.getvalue())}"
)
elif isinstance(self.path_or_stream, Path):
# Check if file exists
self.valid = self.path_or_stream.exists()
_log.debug(f"File exists: {self.valid}")
else:
self.valid = False
except Exception as e:
_log.error(f"NoOpBackend validation failed: {e}")
self.valid = False
def is_valid(self) -> bool:
return self.valid
@classmethod
def supports_pagination(cls) -> bool:
return False
@classmethod
def supported_formats(cls) -> Set[InputFormat]:
return set(InputFormat)
| {
"repo_id": "docling-project/docling",
"file_path": "docling/backend/noop_backend.py",
"license": "MIT License",
"lines": 41,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
docling-project/docling:docling/datamodel/pipeline_options_asr_model.py | from enum import Enum
from typing import Annotated, Any, Literal, Optional, Union
from pydantic import AnyUrl, BaseModel, Field
from typing_extensions import deprecated
from docling.datamodel.accelerator_options import AcceleratorDevice
from docling.datamodel.pipeline_options_vlm_model import (
# InferenceFramework,
TransformersModelType,
)
class BaseAsrOptions(BaseModel):
"""Base configuration for automatic speech recognition models."""
kind: Annotated[
str,
Field(
description=(
"Type identifier for the ASR options. Used for discriminating "
"between different ASR configurations."
),
),
]
class InferenceAsrFramework(str, Enum):
MLX = "mlx"
# TRANSFORMERS = "transformers" # disabled for now
WHISPER = "whisper"
class InlineAsrOptions(BaseAsrOptions):
"""Configuration for inline ASR models running locally."""
kind: Literal["inline_model_options"] = "inline_model_options"
repo_id: Annotated[
str,
Field(
description=(
"HuggingFace model repository ID for the ASR model. Must be a "
"Whisper-compatible model for automatic speech recognition."
),
examples=["openai/whisper-tiny", "openai/whisper-base"],
),
]
verbose: Annotated[
bool,
Field(
description=(
"Enable verbose logging output from the ASR model for debugging "
"purposes."
)
),
] = False
timestamps: Annotated[
bool,
Field(
description=(
"Generate timestamps for transcribed segments. When enabled, "
"each transcribed segment includes start and end times for "
"temporal alignment with the audio."
)
),
] = True
temperature: Annotated[
float,
Field(
description=(
"Sampling temperature for text generation. 0.0 uses greedy "
"decoding (deterministic), higher values (e.g., 0.7-1.0) "
"increase randomness. Recommended: 0.0 for consistent "
"transcriptions."
)
),
] = 0.0
max_new_tokens: Annotated[
int,
Field(
description=(
"Maximum number of tokens to generate per transcription segment. "
"Limits output length to prevent runaway generation. Adjust "
"based on expected transcript length."
)
),
] = 256
max_time_chunk: Annotated[
float,
Field(
description=(
"Maximum duration in seconds for each audio chunk processed by "
"the model. Audio longer than this is split into chunks. "
"Whisper models are typically trained on 30-second segments."
)
),
] = 30.0
torch_dtype: Annotated[
Optional[str],
Field(
description=(
"PyTorch data type for model weights. Options: `float32`, "
"`float16`, `bfloat16`. Lower precision (float16/bfloat16) "
"reduces memory usage and increases speed. If None, uses model "
"default."
)
),
] = None
supported_devices: Annotated[
list[AcceleratorDevice],
Field(
description=(
"List of hardware accelerators supported by this ASR model "
"configuration."
)
),
] = [
AcceleratorDevice.CPU,
AcceleratorDevice.CUDA,
AcceleratorDevice.MPS,
AcceleratorDevice.XPU,
]
@property
def repo_cache_folder(self) -> str:
return self.repo_id.replace("/", "--")
class InlineAsrNativeWhisperOptions(InlineAsrOptions):
"""Configuration for native Whisper ASR implementation."""
inference_framework: Annotated[
InferenceAsrFramework,
Field(
description=(
"Inference framework for ASR. Uses native Whisper "
"implementation for optimal performance."
)
),
] = InferenceAsrFramework.WHISPER
language: Annotated[
str,
Field(
description=(
"Language code for transcription. Specifying the correct "
"language improves accuracy. Use ISO 639-1 codes (e.g., `en`, "
"`es`, `fr`)."
),
examples=["en", "es", "fr", "de"],
),
] = "en"
supported_devices: Annotated[
list[AcceleratorDevice],
Field(
description=(
"Hardware accelerators supported by native Whisper. Supports "
"CPU and CUDA only."
)
),
] = [
AcceleratorDevice.CPU,
AcceleratorDevice.CUDA,
]
word_timestamps: Annotated[
bool,
Field(
description=(
"Generate word-level timestamps in addition to segment "
"timestamps. Provides fine-grained temporal alignment for each "
"word in the transcription."
)
),
] = True
class InlineAsrMlxWhisperOptions(InlineAsrOptions):
"""MLX Whisper options for Apple Silicon optimization.
Uses mlx-whisper library for efficient inference on Apple Silicon devices.
"""
inference_framework: Annotated[
InferenceAsrFramework,
Field(
description=(
"Inference framework for ASR. Uses MLX for optimized "
"performance on Apple Silicon (M1/M2/M3)."
)
),
] = InferenceAsrFramework.MLX
language: Annotated[
str,
Field(
description=(
"Language code for transcription. Specifying the correct "
"language improves accuracy. Use ISO 639-1 codes (e.g., `en`, "
"`es`, `fr`)."
),
examples=["en", "es", "fr", "de"],
),
] = "en"
task: Annotated[
str,
Field(
description=(
"ASR task type. `transcribe` converts speech to text in the "
"same language. `translate` converts speech to English text "
"regardless of input language."
),
examples=["transcribe", "translate"],
),
] = "transcribe"
supported_devices: Annotated[
list[AcceleratorDevice],
Field(
description=(
"Hardware accelerators supported by MLX Whisper. Optimized for "
"Apple Silicon (MPS) only."
)
),
] = [AcceleratorDevice.MPS]
word_timestamps: Annotated[
bool,
Field(
description=(
"Generate word-level timestamps in addition to segment "
"timestamps. Provides fine-grained temporal alignment for each "
"word in the transcription."
)
),
] = True
no_speech_threshold: Annotated[
float,
Field(
description=(
"Threshold for detecting speech vs. silence. Segments with "
"no-speech probability above this threshold are considered "
"silent. Range: 0.0-1.0. Higher values are more aggressive in "
"filtering silence."
)
),
] = 0.6
logprob_threshold: Annotated[
float,
Field(
description=(
"Log probability threshold for filtering low-confidence "
"transcriptions. Segments with average log probability below "
"this threshold are filtered out. More negative values are more "
"permissive."
)
),
] = -1.0
compression_ratio_threshold: Annotated[
float,
Field(
description=(
"Compression ratio threshold for detecting repetitive or "
"low-quality transcriptions. Segments with compression ratio "
"above this threshold are filtered. Higher values are more "
"permissive."
)
),
] = 2.4
| {
"repo_id": "docling-project/docling",
"file_path": "docling/datamodel/pipeline_options_asr_model.py",
"license": "MIT License",
"lines": 246,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
docling-project/docling:tests/test_asr_pipeline.py | import sys
from pathlib import Path
from unittest.mock import Mock, patch
import pytest
from docling.datamodel import asr_model_specs
from docling.datamodel.base_models import ConversionStatus, InputFormat
from docling.datamodel.document import ConversionResult, InputDocument
from docling.datamodel.pipeline_options import AsrPipelineOptions
from docling.document_converter import AudioFormatOption, DocumentConverter
from docling.pipeline.asr_pipeline import AsrPipeline
# pytestmark = pytest.mark.skipif(
# sys.version_info >= (3, 14),
# reason="Python 3.14 is not yet supported by whisper dependencies.",
# )
@pytest.fixture
def test_audio_path():
return Path("./tests/data/audio/sample_10s.mp3")
def get_asr_converter():
"""Create a DocumentConverter configured for ASR with whisper_turbo model."""
pipeline_options = AsrPipelineOptions()
pipeline_options.asr_options = asr_model_specs.WHISPER_TINY
converter = DocumentConverter(
format_options={
InputFormat.AUDIO: AudioFormatOption(
pipeline_cls=AsrPipeline,
pipeline_options=pipeline_options,
)
}
)
return converter
def test_asr_pipeline_conversion(test_audio_path):
"""Test ASR pipeline conversion using whisper_turbo model on sample_10s.mp3."""
# Check if the test audio file exists
assert test_audio_path.exists(), f"Test audio file not found: {test_audio_path}"
converter = get_asr_converter()
# Convert the audio file
doc_result: ConversionResult = converter.convert(test_audio_path)
# Verify conversion was successful
assert doc_result.status == ConversionStatus.SUCCESS, (
f"Conversion failed with status: {doc_result.status}"
)
# Verify we have a document
assert doc_result.document is not None, "No document was created"
# Verify we have text content (transcribed audio)
texts = doc_result.document.texts
assert len(texts) > 0, "No text content found in transcribed audio"
# Print transcribed text for verification (optional, for debugging)
print(f"Transcribed text from {test_audio_path.name}:")
for i, text_item in enumerate(texts):
print(f" {i + 1}: {text_item.text}")
@pytest.fixture
def silent_audio_path():
"""Fixture to provide the path to a silent audio file."""
path = Path("./tests/data/audio/silent_1s.wav")
if not path.exists():
pytest.skip("Silent audio file for testing not found at " + str(path))
return path
def test_asr_pipeline_with_silent_audio(silent_audio_path):
"""
Test that the ASR pipeline correctly handles silent audio files
by returning a PARTIAL_SUCCESS status.
"""
converter = get_asr_converter()
doc_result: ConversionResult = converter.convert(silent_audio_path)
# Accept PARTIAL_SUCCESS or SUCCESS depending on runtime behavior
assert doc_result.status in (
ConversionStatus.PARTIAL_SUCCESS,
ConversionStatus.SUCCESS,
)
def test_has_text_and_determine_status_helpers():
"""Unit-test _has_text and _determine_status on a minimal ConversionResult."""
pipeline_options = AsrPipelineOptions()
pipeline_options.asr_options = asr_model_specs.WHISPER_TINY
# Avoid importing torch in decide_device by forcing CPU-only native path
pipeline_options.asr_options = asr_model_specs.WHISPER_TINY_NATIVE
pipeline = AsrPipeline(pipeline_options)
# Create an empty ConversionResult with proper InputDocument
doc_path = Path("./tests/data/audio/sample_10s.mp3")
from docling.backend.noop_backend import NoOpBackend
from docling.datamodel.base_models import InputFormat
input_doc = InputDocument(
path_or_stream=doc_path,
format=InputFormat.AUDIO,
backend=NoOpBackend,
)
conv_res = ConversionResult(input=input_doc)
# Simulate run result with empty document/texts
conv_res.status = ConversionStatus.SUCCESS
assert pipeline._has_text(conv_res.document) is False
assert pipeline._determine_status(conv_res) in (
ConversionStatus.PARTIAL_SUCCESS,
ConversionStatus.SUCCESS,
ConversionStatus.FAILURE,
)
# Now make a document with whitespace-only text to exercise empty detection
conv_res.document.texts = []
conv_res.errors = []
assert pipeline._has_text(conv_res.document) is False
# Emulate non-empty
class _T:
def __init__(self, t):
self.text = t
conv_res.document.texts = [_T(" "), _T("ok")]
assert pipeline._has_text(conv_res.document) is True
def test_is_backend_supported_noop_backend():
from pathlib import Path
from docling.backend.noop_backend import NoOpBackend
from docling.datamodel.base_models import InputFormat
from docling.datamodel.document import InputDocument
class _Dummy:
pass
# Create a proper NoOpBackend instance
doc_path = Path("./tests/data/audio/sample_10s.mp3")
input_doc = InputDocument(
path_or_stream=doc_path,
format=InputFormat.AUDIO,
backend=NoOpBackend,
)
noop_backend = NoOpBackend(input_doc, doc_path)
assert AsrPipeline.is_backend_supported(noop_backend) is True
assert AsrPipeline.is_backend_supported(_Dummy()) is False
def test_native_and_mlx_transcribe_language_handling(monkeypatch, tmp_path):
"""Cover language None/empty handling in model.transcribe wrappers."""
from docling.datamodel.accelerator_options import (
AcceleratorDevice,
AcceleratorOptions,
)
from docling.datamodel.pipeline_options_asr_model import (
InferenceAsrFramework,
InlineAsrMlxWhisperOptions,
InlineAsrNativeWhisperOptions,
)
from docling.pipeline.asr_pipeline import _MlxWhisperModel, _NativeWhisperModel
# Native
opts_n = InlineAsrNativeWhisperOptions(
repo_id="tiny",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=False,
timestamps=False,
word_timestamps=False,
temperature=0.0,
max_new_tokens=1,
max_time_chunk=1.0,
language="",
)
m = _NativeWhisperModel(
True, None, AcceleratorOptions(device=AcceleratorDevice.CPU), opts_n
)
m.model = Mock()
m.verbose = False
m.word_timestamps = False
# ensure language mapping occurs and transcribe is called
m.model.transcribe.return_value = {"segments": []}
m.transcribe(tmp_path / "a.wav")
m.model.transcribe.assert_called()
# MLX
opts_m = InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-tiny-mlx",
inference_framework=InferenceAsrFramework.MLX,
language="",
)
with patch.dict("sys.modules", {"mlx_whisper": Mock()}):
mm = _MlxWhisperModel(
True, None, AcceleratorOptions(device=AcceleratorDevice.MPS), opts_m
)
mm.mlx_whisper = Mock()
mm.mlx_whisper.transcribe.return_value = {"segments": []}
mm.transcribe(tmp_path / "b.wav")
mm.mlx_whisper.transcribe.assert_called()
def test_native_init_with_artifacts_path_and_device_logging(tmp_path):
"""Cover _NativeWhisperModel init path with artifacts_path passed."""
from docling.datamodel.accelerator_options import (
AcceleratorDevice,
AcceleratorOptions,
)
from docling.datamodel.pipeline_options_asr_model import (
InferenceAsrFramework,
InlineAsrNativeWhisperOptions,
)
from docling.pipeline.asr_pipeline import _NativeWhisperModel
opts = InlineAsrNativeWhisperOptions(
repo_id="tiny",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=False,
timestamps=False,
word_timestamps=False,
temperature=0.0,
max_new_tokens=1,
max_time_chunk=1.0,
language="en",
)
# Patch out whisper import side-effects during init by stubbing decide_device path only
model = _NativeWhisperModel(
True, tmp_path, AcceleratorOptions(device=AcceleratorDevice.CPU), opts
)
# swap real model for mock to avoid actual load
model.model = Mock()
assert model.enabled is True
def test_native_run_success_with_bytesio_builds_document(tmp_path):
"""Cover _NativeWhisperModel.run with BytesIO input and success path."""
from io import BytesIO
from docling.backend.noop_backend import NoOpBackend
from docling.datamodel.accelerator_options import (
AcceleratorDevice,
AcceleratorOptions,
)
from docling.datamodel.document import ConversionResult, InputDocument
from docling.datamodel.pipeline_options_asr_model import (
InferenceAsrFramework,
InlineAsrNativeWhisperOptions,
)
from docling.pipeline.asr_pipeline import _NativeWhisperModel
# Prepare InputDocument with BytesIO
audio_bytes = BytesIO(b"RIFF....WAVE")
input_doc = InputDocument(
path_or_stream=audio_bytes,
format=InputFormat.AUDIO,
backend=NoOpBackend,
filename="a.wav",
)
conv_res = ConversionResult(input=input_doc)
# Model with mocked underlying whisper
opts = InlineAsrNativeWhisperOptions(
repo_id="tiny",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=False,
timestamps=False,
word_timestamps=True,
temperature=0.0,
max_new_tokens=1,
max_time_chunk=1.0,
language="en",
)
model = _NativeWhisperModel(
True, None, AcceleratorOptions(device=AcceleratorDevice.CPU), opts
)
model.model = Mock()
model.verbose = False
model.word_timestamps = True
model.model.transcribe.return_value = {
"segments": [
{
"start": 0.0,
"end": 1.0,
"text": "hi",
"words": [{"start": 0.0, "end": 0.5, "word": "hi"}],
}
]
}
out = model.run(conv_res)
# Status is determined later by pipeline; here we validate document content
assert out.document is not None
assert len(out.document.texts) >= 1
def test_native_run_failure_sets_status(tmp_path):
"""Cover _NativeWhisperModel.run failure path when transcribe raises."""
from docling.backend.noop_backend import NoOpBackend
from docling.datamodel.accelerator_options import (
AcceleratorDevice,
AcceleratorOptions,
)
from docling.datamodel.document import ConversionResult, InputDocument
from docling.datamodel.pipeline_options_asr_model import (
InferenceAsrFramework,
InlineAsrNativeWhisperOptions,
)
from docling.pipeline.asr_pipeline import _NativeWhisperModel
# Create a real file so backend initializes
audio_path = tmp_path / "a.wav"
audio_path.write_bytes(b"RIFF....WAVE")
input_doc = InputDocument(
path_or_stream=audio_path, format=InputFormat.AUDIO, backend=NoOpBackend
)
conv_res = ConversionResult(input=input_doc)
opts = InlineAsrNativeWhisperOptions(
repo_id="tiny",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=False,
timestamps=False,
word_timestamps=False,
temperature=0.0,
max_new_tokens=1,
max_time_chunk=1.0,
language="en",
)
model = _NativeWhisperModel(
True, None, AcceleratorOptions(device=AcceleratorDevice.CPU), opts
)
model.model = Mock()
model.model.transcribe.side_effect = RuntimeError("boom")
out = model.run(conv_res)
assert out.status.name == "FAILURE"
def test_mlx_run_success_and_failure(tmp_path):
"""Cover _MlxWhisperModel.run success and failure paths."""
from docling.backend.noop_backend import NoOpBackend
from docling.datamodel.accelerator_options import (
AcceleratorDevice,
AcceleratorOptions,
)
from docling.datamodel.document import ConversionResult, InputDocument
from docling.datamodel.pipeline_options_asr_model import (
InferenceAsrFramework,
InlineAsrMlxWhisperOptions,
)
from docling.pipeline.asr_pipeline import _MlxWhisperModel
# Success path
# Create real files so backend initializes and hashes compute
path_ok = tmp_path / "b.wav"
path_ok.write_bytes(b"RIFF....WAVE")
input_doc = InputDocument(
path_or_stream=path_ok, format=InputFormat.AUDIO, backend=NoOpBackend
)
conv_res = ConversionResult(input=input_doc)
with patch.dict("sys.modules", {"mlx_whisper": Mock()}):
opts = InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-tiny-mlx",
inference_framework=InferenceAsrFramework.MLX,
language="en",
)
model = _MlxWhisperModel(
True, None, AcceleratorOptions(device=AcceleratorDevice.MPS), opts
)
model.mlx_whisper = Mock()
model.mlx_whisper.transcribe.return_value = {
"segments": [{"start": 0.0, "end": 1.0, "text": "ok"}]
}
out = model.run(conv_res)
assert out.status.name == "SUCCESS"
# Failure path
path_fail = tmp_path / "c.wav"
path_fail.write_bytes(b"RIFF....WAVE")
input_doc2 = InputDocument(
path_or_stream=path_fail, format=InputFormat.AUDIO, backend=NoOpBackend
)
conv_res2 = ConversionResult(input=input_doc2)
with patch.dict("sys.modules", {"mlx_whisper": Mock()}):
opts2 = InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-tiny-mlx",
inference_framework=InferenceAsrFramework.MLX,
language="en",
)
model2 = _MlxWhisperModel(
True, None, AcceleratorOptions(device=AcceleratorDevice.MPS), opts2
)
model2.mlx_whisper = Mock()
model2.mlx_whisper.transcribe.side_effect = RuntimeError("fail")
out2 = model2.run(conv_res2)
assert out2.status.name == "FAILURE"
def test_native_whisper_handles_zero_duration_timestamps(tmp_path):
"""Tests that _NativeWhisperModel correctly adjusts zero-duration segments."""
from docling.backend.noop_backend import NoOpBackend
from docling.datamodel.accelerator_options import (
AcceleratorDevice,
AcceleratorOptions,
)
from docling.datamodel.document import ConversionResult, InputDocument
from docling.datamodel.pipeline_options_asr_model import (
InferenceAsrFramework,
InlineAsrNativeWhisperOptions,
)
from docling.pipeline.asr_pipeline import _NativeWhisperModel
# Create a real file so backend initializes
audio_path = tmp_path / "test.wav"
audio_path.write_bytes(b"RIFF....WAVE")
input_doc = InputDocument(
path_or_stream=audio_path, format=InputFormat.AUDIO, backend=NoOpBackend
)
conv_res = ConversionResult(input=input_doc)
opts = InlineAsrNativeWhisperOptions(
repo_id="tiny",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=False,
timestamps=True,
word_timestamps=False,
temperature=0.0,
max_new_tokens=1,
max_time_chunk=1.0,
language="en",
)
# Patch whisper import
with patch.dict("sys.modules", {"whisper": Mock()}):
model = _NativeWhisperModel(
True, None, AcceleratorOptions(device=AcceleratorDevice.CPU), opts
)
model.model = Mock()
model.verbose = False
model.word_timestamps = False
# Mix of valid and zero-duration segments
model.model.transcribe.return_value = {
"segments": [
{"start": 0.0, "end": 1.0, "text": "valid segment"},
{"start": 2.0, "end": 2.0, "text": "zero-duration"},
{"start": 3.0, "end": 4.0, "text": "another valid"},
]
}
out = model.run(conv_res)
# All segments should be present with adjusted durations where needed
assert out.document is not None
assert len(out.document.texts) == 3
assert out.document.texts[0].text == "valid segment"
assert out.document.texts[1].text == "zero-duration"
assert out.document.texts[2].text == "another valid"
def test_mlx_whisper_handles_zero_duration_timestamps(tmp_path):
"""Tests that _MlxWhisperModel correctly adjusts zero-duration segments."""
from docling.backend.noop_backend import NoOpBackend
from docling.datamodel.accelerator_options import (
AcceleratorDevice,
AcceleratorOptions,
)
from docling.datamodel.document import ConversionResult, InputDocument
from docling.datamodel.pipeline_options_asr_model import (
InferenceAsrFramework,
InlineAsrMlxWhisperOptions,
)
from docling.pipeline.asr_pipeline import _MlxWhisperModel
# Create a real file so backend initializes
audio_path = tmp_path / "test.wav"
audio_path.write_bytes(b"RIFF....WAVE")
input_doc = InputDocument(
path_or_stream=audio_path, format=InputFormat.AUDIO, backend=NoOpBackend
)
conv_res = ConversionResult(input=input_doc)
with patch.dict("sys.modules", {"mlx_whisper": Mock()}):
opts = InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-tiny-mlx",
inference_framework=InferenceAsrFramework.MLX,
language="en",
)
model = _MlxWhisperModel(
True, None, AcceleratorOptions(device=AcceleratorDevice.MPS), opts
)
model.mlx_whisper = Mock()
# Mix of valid and zero-duration segments
model.mlx_whisper.transcribe.return_value = {
"segments": [
{"start": 0.0, "end": 1.0, "text": "valid segment"},
{"start": 2.0, "end": 2.0, "text": "zero-duration"},
{"start": 3.0, "end": 4.0, "text": "another valid"},
]
}
out = model.run(conv_res)
# All segments should be present with adjusted durations where needed
assert out.document is not None
assert len(out.document.texts) == 3
assert out.document.texts[0].text == "valid segment"
assert out.document.texts[1].text == "zero-duration"
assert out.document.texts[2].text == "another valid"
def test_native_whisper_skips_empty_zero_duration(tmp_path):
"""Tests that _NativeWhisperModel skips empty zero-duration segments."""
from unittest.mock import Mock, patch
from docling.backend.noop_backend import NoOpBackend
from docling.datamodel.accelerator_options import (
AcceleratorDevice,
AcceleratorOptions,
)
from docling.datamodel.base_models import InputFormat
from docling.datamodel.document import ConversionResult, InputDocument
from docling.datamodel.pipeline_options_asr_model import (
InferenceAsrFramework,
InlineAsrNativeWhisperOptions,
)
from docling.pipeline.asr_pipeline import _NativeWhisperModel
audio_path = tmp_path / "test.wav"
audio_path.write_bytes(b"RIFF....WAVE")
input_doc = InputDocument(
path_or_stream=audio_path, format=InputFormat.AUDIO, backend=NoOpBackend
)
conv_res = ConversionResult(input=input_doc)
opts = InlineAsrNativeWhisperOptions(
repo_id="tiny",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=False,
timestamps=True,
word_timestamps=False,
temperature=0.0,
max_new_tokens=1,
max_time_chunk=1.0,
language="en",
)
with patch.dict("sys.modules", {"whisper": Mock()}):
model = _NativeWhisperModel(
True, None, AcceleratorOptions(device=AcceleratorDevice.CPU), opts
)
model.model = Mock()
model.verbose = False
model.word_timestamps = False
# Valid segment with empty zero-duration segments
model.model.transcribe.return_value = {
"segments": [
{"start": 0.0, "end": 1.0, "text": "valid segment"},
{"start": 2.0, "end": 2.0, "text": " "}, # Empty (whitespace only)
{"start": 3.0, "end": 3.0, "text": ""}, # Empty
{"start": 4.0, "end": 5.0, "text": "another valid"},
]
}
out = model.run(conv_res)
# Should have two valid segments, empty zero-duration segments skipped
assert out.document is not None
assert len(out.document.texts) == 2
assert out.document.texts[0].text == "valid segment"
assert out.document.texts[1].text == "another valid"
| {
"repo_id": "docling-project/docling",
"file_path": "tests/test_asr_pipeline.py",
"license": "MIT License",
"lines": 500,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
docling-project/docling:docling/datamodel/accelerator_options.py | import logging
import os
import re
from enum import Enum
from typing import Annotated, Any, Union
from pydantic import Field, field_validator, model_validator
from pydantic_settings import BaseSettings, SettingsConfigDict
_log = logging.getLogger(__name__)
class AcceleratorDevice(str, Enum):
"""Devices to run model inference"""
AUTO = "auto"
CPU = "cpu"
CUDA = "cuda"
MPS = "mps"
XPU = "xpu"
class AcceleratorOptions(BaseSettings):
"""Hardware acceleration configuration for model inference.
Can be configured via environment variables with DOCLING_ prefix.
"""
model_config = SettingsConfigDict(
env_prefix="DOCLING_", env_nested_delimiter="_", populate_by_name=True
)
num_threads: Annotated[
int,
Field(
description=(
"Number of CPU threads to use for model inference. Higher values "
"can improve throughput on multi-core systems but may increase "
"memory usage. Can be set via DOCLING_NUM_THREADS or "
"OMP_NUM_THREADS environment variables. Recommended: number of "
"physical CPU cores."
)
),
] = 4
device: Annotated[
Union[str, AcceleratorDevice],
Field(
description=(
"Hardware device for model inference. Options: `auto` "
"(automatic detection), `cpu` (CPU only), `cuda` (NVIDIA GPU), "
"`cuda:N` (specific GPU), `mps` (Apple Silicon), `xpu` (Intel "
"GPU). Auto mode selects the best available device. Can be set "
"via DOCLING_DEVICE environment variable."
)
),
] = "auto"
cuda_use_flash_attention2: Annotated[
bool,
Field(
description=(
"Enable Flash Attention 2 optimization for CUDA devices. "
"Provides significant speedup and memory reduction for "
"transformer models on compatible NVIDIA GPUs (Ampere or newer). "
"Requires flash-attn package installation. Can be set via "
"DOCLING_CUDA_USE_FLASH_ATTENTION2 environment variable."
)
),
] = False
@field_validator("device")
def validate_device(cls, value):
# "auto", "cpu", "cuda", "mps", "xpu", or "cuda:N"
if value in {d.value for d in AcceleratorDevice} or re.match(
r"^cuda(:\d+)?$", value
):
return value
raise ValueError(
"Invalid device option. Use `auto`, `cpu`, `mps`, `xpu`, `cuda`, "
"or `cuda:N`."
)
@model_validator(mode="before")
@classmethod
def check_alternative_envvars(cls, data: Any) -> Any:
r"""
Set num_threads from the "alternative" envvar OMP_NUM_THREADS.
The alternative envvar is used only if it is valid and the regular
envvar is not set.
Notice: The standard pydantic settings mechanism with parameter
"aliases" does not provide the same functionality. In case the alias
envvar is set and the user tries to override the parameter in settings
initialization, Pydantic treats the parameter provided in __init__()
as an extra input instead of simply overwriting the evvar value for
that parameter.
"""
if isinstance(data, dict):
input_num_threads = data.get("num_threads")
# Check if to set the num_threads from the alternative envvar
if input_num_threads is None:
docling_num_threads = os.getenv("DOCLING_NUM_THREADS")
omp_num_threads = os.getenv("OMP_NUM_THREADS")
if docling_num_threads is None and omp_num_threads is not None:
try:
data["num_threads"] = int(omp_num_threads)
except ValueError:
_log.error(
"Ignoring misformatted envvar OMP_NUM_THREADS '%s'",
omp_num_threads,
)
return data
| {
"repo_id": "docling-project/docling",
"file_path": "docling/datamodel/accelerator_options.py",
"license": "MIT License",
"lines": 98,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
docling-project/docling:docling/datamodel/pipeline_options_vlm_model.py | from enum import Enum
from typing import TYPE_CHECKING, Annotated, Any, Literal, Optional, Union
from docling_core.types.doc.page import SegmentedPage
from pydantic import AnyUrl, BaseModel, ConfigDict, Field
from transformers import StoppingCriteria
from typing_extensions import deprecated
from docling.datamodel.accelerator_options import AcceleratorDevice
from docling.models.utils.generation_utils import GenerationStopper
if TYPE_CHECKING:
from docling_core.types.doc.page import SegmentedPage
from docling.datamodel.base_models import Page
class BaseVlmOptions(BaseModel):
"""Base configuration for vision-language models."""
kind: Annotated[
str,
Field(
description=(
"Type identifier for the VLM options. Used for discriminating "
"between different VLM configurations."
),
),
]
prompt: Annotated[
str,
Field(
description=(
"Prompt template for the vision-language model. Guides the "
"model's output format and content focus."
),
),
]
scale: Annotated[
float,
Field(
description=(
"Scaling factor for image resolution before processing. Higher "
"values provide more detail but increase processing time and "
"memory usage. Range: 0.5-4.0 typical."
)
),
] = 2.0
max_size: Annotated[
Optional[int],
Field(
description=(
"Maximum image dimension (width or height) in pixels. Images "
"larger than this are resized while maintaining aspect ratio. "
"If None, no size limit is enforced."
)
),
] = None
temperature: Annotated[
float,
Field(
description=(
"Sampling temperature for text generation. 0.0 uses greedy "
"decoding (deterministic), higher values (e.g., 0.7-1.0) "
"increase randomness. Recommended: 0.0 for consistent outputs."
)
),
] = 0.0
def build_prompt(
self,
page: Optional["SegmentedPage"],
*,
_internal_page: Optional["Page"] = None,
) -> str:
"""Build the prompt for VLM inference.
Args:
page: The parsed/segmented page to process.
_internal_page: Internal parameter for experimental layout-aware pipelines.
Do not rely on this in user code - subject to change.
Returns:
The formatted prompt string.
"""
return self.prompt
def decode_response(self, text: str) -> str:
return text
class ResponseFormat(str, Enum):
DOCTAGS = "doctags"
MARKDOWN = "markdown"
DEEPSEEKOCR_MARKDOWN = "deepseekocr_markdown"
HTML = "html"
OTSL = "otsl"
PLAINTEXT = "plaintext"
class InferenceFramework(str, Enum):
MLX = "mlx"
TRANSFORMERS = "transformers"
VLLM = "vllm"
class TransformersModelType(str, Enum):
AUTOMODEL = "automodel"
AUTOMODEL_VISION2SEQ = "automodel-vision2seq"
AUTOMODEL_CAUSALLM = "automodel-causallm"
AUTOMODEL_IMAGETEXTTOTEXT = "automodel-imagetexttotext"
class TransformersPromptStyle(str, Enum):
CHAT = "chat"
RAW = "raw"
NONE = "none"
class InlineVlmOptions(BaseVlmOptions):
"""Configuration for inline vision-language models running locally."""
model_config = ConfigDict(arbitrary_types_allowed=True)
kind: Literal["inline_model_options"] = "inline_model_options"
repo_id: Annotated[
str,
Field(
description=(
"HuggingFace model repository ID for the vision-language "
"model. Must be a model capable of processing images and "
"generating text."
),
examples=["Qwen/Qwen2-VL-2B-Instruct", "ibm-granite/granite-vision-3.3-2b"],
),
]
revision: Annotated[
str,
Field(
description=(
"Git revision (branch, tag, or commit hash) of the model "
"repository. Allows pinning to specific model versions for "
"reproducibility."
),
examples=["main", "v1.0.0"],
),
] = "main"
trust_remote_code: Annotated[
bool,
Field(
description=(
"Allow execution of custom code from the model repository. "
"Required for some models with custom architectures. Enable "
"only for trusted sources due to security implications."
)
),
] = False
load_in_8bit: Annotated[
bool,
Field(
description=(
"Load model weights in 8-bit precision using bitsandbytes "
"quantization. Reduces memory usage by ~50% with minimal "
"accuracy loss. Requires bitsandbytes library and CUDA."
)
),
] = True
llm_int8_threshold: Annotated[
float,
Field(
description=(
"Threshold for LLM.int8() quantization outlier detection. "
"Values with magnitude above this threshold are kept in "
"float16 for accuracy. Lower values increase quantization but "
"may reduce quality."
)
),
] = 6.0
quantized: Annotated[
bool,
Field(
description=(
"Indicates if the model is pre-quantized (e.g., GGUF, AWQ). "
"When True, skips runtime quantization. Use for models already "
"quantized during training or conversion."
)
),
] = False
inference_framework: Annotated[
InferenceFramework,
Field(
description=(
"Inference framework for running the VLM. Options: "
"`transformers` (HuggingFace), `mlx` (Apple Silicon), `vllm` "
"(high-throughput serving)."
),
),
]
transformers_model_type: Annotated[
TransformersModelType,
Field(
description=(
"HuggingFace Transformers model class to use. Options: "
"`automodel` (auto-detect), `automodel-vision2seq` "
"(vision-to-sequence), `automodel-causallm` (causal LM), "
"`automodel-imagetexttotext` (image+text to text)."
)
),
] = TransformersModelType.AUTOMODEL
transformers_prompt_style: Annotated[
TransformersPromptStyle,
Field(
description=(
"Prompt formatting style for Transformers models. Options: "
"`chat` (chat template), `raw` (raw text), `none` (no "
"formatting). Use `chat` for instruction-tuned models."
)
),
] = TransformersPromptStyle.CHAT
response_format: Annotated[
ResponseFormat,
Field(
description=(
"Expected output format from the VLM. Options: `doctags` "
"(structured tags), `markdown`, `html`, `otsl` (table "
"structure), `plaintext`. Guides model output parsing."
),
),
]
torch_dtype: Annotated[
Optional[str],
Field(
description=(
"PyTorch data type for model weights. Options: `float32`, "
"`float16`, `bfloat16`. Lower precision reduces memory and "
"increases speed. If None, uses model default."
)
),
] = None
supported_devices: Annotated[
list[AcceleratorDevice],
Field(
description=(
"List of hardware accelerators supported by this VLM configuration."
)
),
] = [
AcceleratorDevice.CPU,
AcceleratorDevice.CUDA,
AcceleratorDevice.MPS,
AcceleratorDevice.XPU,
]
stop_strings: Annotated[
list[str],
Field(
description=(
"List of strings that trigger generation stopping when "
"encountered. Used to prevent the model from generating beyond "
"desired output boundaries."
)
),
] = []
custom_stopping_criteria: Annotated[
list[Union[StoppingCriteria, GenerationStopper]],
Field(
description=(
"Custom stopping criteria objects for fine-grained control "
"over generation termination. Allows implementing complex "
"stopping logic beyond simple string matching."
)
),
] = []
extra_generation_config: Annotated[
dict[str, Any],
Field(
description=(
"Additional generation configuration parameters passed to the "
"model. Overrides or extends default generation settings (e.g., "
"top_p, top_k, repetition_penalty)."
)
),
] = {}
extra_processor_kwargs: Annotated[
dict[str, Any],
Field(
description=(
"Additional keyword arguments passed to the image processor. "
"Used for model-specific preprocessing options not covered by "
"standard parameters."
)
),
] = {}
use_kv_cache: Annotated[
bool,
Field(
description=(
"Enable key-value caching for transformer attention. "
"Significantly speeds up generation by caching attention "
"computations. Disable only for debugging or "
"memory-constrained scenarios."
)
),
] = True
max_new_tokens: Annotated[
int,
Field(
description=(
"Maximum number of tokens to generate. Limits output length to "
"prevent runaway generation. Adjust based on expected output "
"size and memory constraints."
)
),
] = 4096
track_generated_tokens: Annotated[
bool,
Field(
description=(
"Track and store generated tokens during inference. Useful for "
"debugging, analysis, or implementing custom post-processing. "
"Increases memory usage."
)
),
] = False
track_input_prompt: Annotated[
bool,
Field(
description=(
"Track and store the input prompt sent to the model. Useful "
"for debugging, logging, or auditing. May contain sensitive "
"information."
)
),
] = False
@property
def repo_cache_folder(self) -> str:
return self.repo_id.replace("/", "--")
@deprecated("Use InlineVlmOptions instead.")
class HuggingFaceVlmOptions(InlineVlmOptions):
pass
class ApiVlmOptions(BaseVlmOptions):
"""Configuration for API-based vision-language model services."""
model_config = ConfigDict(arbitrary_types_allowed=True)
kind: Literal["api_model_options"] = "api_model_options"
url: Annotated[
AnyUrl,
Field(
description=(
"API endpoint URL for VLM service. Must be OpenAI-compatible "
"chat completions endpoint. Default points to local Ollama "
"server; update for cloud services or custom deployments."
)
),
] = AnyUrl("http://localhost:11434/v1/chat/completions")
headers: Annotated[
dict[str, str],
Field(
description=(
"HTTP headers to include in API requests. Use for "
"authentication or custom headers required by your API service."
),
examples=[{"Authorization": "Bearer TOKEN"}],
),
] = {}
params: Annotated[
dict[str, Any],
Field(
description=(
"Additional query parameters to include in API requests. "
"Service-specific parameters for customizing API behavior "
"beyond standard options."
)
),
] = {}
timeout: Annotated[
float,
Field(
description=(
"Maximum time in seconds to wait for API response before "
"timing out. Increase for slow networks or complex vision "
"tasks. Recommended: 30-120 seconds."
)
),
] = 60.0
concurrency: Annotated[
int,
Field(
description=(
"Number of concurrent API requests allowed. Higher values "
"improve throughput but may hit API rate limits. Adjust based "
"on API service quotas and network capacity."
)
),
] = 1
response_format: Annotated[
ResponseFormat,
Field(
description=(
"Expected output format from the VLM API. Options: `doctags` "
"(structured tags), `markdown`, `html`, `otsl` (table "
"structure), `plaintext`. Guides response parsing."
),
),
]
stop_strings: Annotated[
list[str],
Field(
description=(
"List of strings that trigger generation stopping when "
"encountered. Sent to API to prevent the model from generating "
"beyond desired output boundaries."
)
),
] = []
custom_stopping_criteria: Annotated[
list[GenerationStopper],
Field(
description=(
"Custom stopping criteria objects for client-side generation "
"control. Applied after receiving API responses for additional "
"filtering or termination logic."
)
),
] = []
track_input_prompt: Annotated[
bool,
Field(
description=(
"Track and store the input prompt sent to the API. Useful for "
"debugging, logging, or auditing. May contain sensitive "
"information."
)
),
] = False
| {
"repo_id": "docling-project/docling",
"file_path": "docling/datamodel/pipeline_options_vlm_model.py",
"license": "MIT License",
"lines": 410,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
docling-project/docling:docling/datamodel/vlm_model_specs.py | import logging
from enum import Enum
from pydantic import (
AnyUrl,
)
from docling.datamodel.accelerator_options import AcceleratorDevice
from docling.datamodel.pipeline_options_vlm_model import (
ApiVlmOptions,
InferenceFramework,
InlineVlmOptions,
ResponseFormat,
TransformersModelType,
TransformersPromptStyle,
)
_log = logging.getLogger(__name__)
# Granite-Docling
GRANITEDOCLING_TRANSFORMERS = InlineVlmOptions(
repo_id="ibm-granite/granite-docling-258M",
prompt="Convert this page to docling.",
response_format=ResponseFormat.DOCTAGS,
inference_framework=InferenceFramework.TRANSFORMERS,
transformers_model_type=TransformersModelType.AUTOMODEL_IMAGETEXTTOTEXT,
supported_devices=[
AcceleratorDevice.CPU,
AcceleratorDevice.CUDA,
AcceleratorDevice.XPU,
],
extra_generation_config=dict(skip_special_tokens=False),
scale=2.0,
temperature=0.0,
max_new_tokens=8192,
stop_strings=["</doctag>", "<|end_of_text|>"],
)
GRANITEDOCLING_VLLM = GRANITEDOCLING_TRANSFORMERS.model_copy(deep=True)
GRANITEDOCLING_VLLM.inference_framework = InferenceFramework.VLLM
GRANITEDOCLING_MLX = InlineVlmOptions(
repo_id="ibm-granite/granite-docling-258M-mlx",
prompt="Convert this page to docling.",
response_format=ResponseFormat.DOCTAGS,
inference_framework=InferenceFramework.MLX,
supported_devices=[AcceleratorDevice.MPS],
scale=2.0,
temperature=0.0,
max_new_tokens=8192,
stop_strings=["</doctag>", "<|end_of_text|>"],
)
GRANITEDOCLING_VLLM_API = ApiVlmOptions(
url="http://localhost:8000/v1/chat/completions", # LM studio defaults to port 1234, VLLM to 8000
params=dict(
model=GRANITEDOCLING_TRANSFORMERS.repo_id,
max_tokens=4096,
skip_special_tokens=True,
),
prompt=GRANITEDOCLING_TRANSFORMERS.prompt,
timeout=90,
scale=2.0,
temperature=0.0,
concurrency=4,
stop_strings=["</doctag>", "<|end_of_text|>"],
response_format=ResponseFormat.DOCTAGS,
)
GRANITEDOCLING_OLLAMA = GRANITEDOCLING_VLLM_API.model_copy(deep=True)
GRANITEDOCLING_OLLAMA.url = AnyUrl("http://localhost:11434/v1/chat/completions")
GRANITEDOCLING_OLLAMA.params["model"] = "ibm/granite-docling:258m"
# SmolDocling
SMOLDOCLING_MLX = InlineVlmOptions(
repo_id="docling-project/SmolDocling-256M-preview-mlx-bf16",
prompt="Convert this page to docling.",
response_format=ResponseFormat.DOCTAGS,
inference_framework=InferenceFramework.MLX,
supported_devices=[AcceleratorDevice.MPS],
scale=2.0,
temperature=0.0,
stop_strings=["</doctag>", "<end_of_utterance>"],
)
SMOLDOCLING_TRANSFORMERS = InlineVlmOptions(
repo_id="docling-project/SmolDocling-256M-preview",
prompt="Convert this page to docling.",
response_format=ResponseFormat.DOCTAGS,
inference_framework=InferenceFramework.TRANSFORMERS,
transformers_model_type=TransformersModelType.AUTOMODEL_IMAGETEXTTOTEXT,
supported_devices=[
AcceleratorDevice.CPU,
AcceleratorDevice.CUDA,
AcceleratorDevice.XPU,
],
torch_dtype="bfloat16",
scale=2.0,
temperature=0.0,
stop_strings=["</doctag>", "<end_of_utterance>"],
)
SMOLDOCLING_VLLM = InlineVlmOptions(
repo_id="docling-project/SmolDocling-256M-preview",
prompt="Convert this page to docling.",
response_format=ResponseFormat.DOCTAGS,
inference_framework=InferenceFramework.VLLM,
supported_devices=[
AcceleratorDevice.CUDA,
AcceleratorDevice.XPU,
],
scale=2.0,
temperature=0.0,
stop_strings=["</doctag>", "<end_of_utterance>"],
)
# SmolVLM-256M-Instruct
SMOLVLM256_TRANSFORMERS = InlineVlmOptions(
repo_id="HuggingFaceTB/SmolVLM-256M-Instruct",
prompt="Transcribe this image to plain text.",
response_format=ResponseFormat.PLAINTEXT,
inference_framework=InferenceFramework.TRANSFORMERS,
transformers_model_type=TransformersModelType.AUTOMODEL_IMAGETEXTTOTEXT,
supported_devices=[
AcceleratorDevice.CPU,
AcceleratorDevice.CUDA,
# AcceleratorDevice.MPS,
AcceleratorDevice.XPU,
],
torch_dtype="bfloat16",
scale=2.0,
temperature=0.0,
)
# SmolVLM2-2.2b-Instruct
SMOLVLM256_MLX = InlineVlmOptions(
repo_id="moot20/SmolVLM-256M-Instruct-MLX",
prompt="Extract the text.",
response_format=ResponseFormat.DOCTAGS,
inference_framework=InferenceFramework.MLX,
transformers_model_type=TransformersModelType.AUTOMODEL_IMAGETEXTTOTEXT,
supported_devices=[
AcceleratorDevice.MPS,
],
scale=2.0,
temperature=0.0,
)
SMOLVLM256_VLLM = InlineVlmOptions(
repo_id="HuggingFaceTB/SmolVLM-256M-Instruct",
prompt="Transcribe this image to plain text.",
response_format=ResponseFormat.PLAINTEXT,
inference_framework=InferenceFramework.VLLM,
supported_devices=[
AcceleratorDevice.CUDA,
AcceleratorDevice.XPU,
],
scale=2.0,
temperature=0.0,
)
# GraniteVision
GRANITE_VISION_TRANSFORMERS = InlineVlmOptions(
repo_id="ibm-granite/granite-vision-3.2-2b",
prompt="Convert this page to markdown. Do not miss any text and only output the bare markdown!",
response_format=ResponseFormat.MARKDOWN,
inference_framework=InferenceFramework.TRANSFORMERS,
transformers_model_type=TransformersModelType.AUTOMODEL_VISION2SEQ,
supported_devices=[
AcceleratorDevice.CPU,
AcceleratorDevice.CUDA,
AcceleratorDevice.MPS,
AcceleratorDevice.XPU,
],
scale=2.0,
temperature=0.0,
)
GRANITE_VISION_VLLM = InlineVlmOptions(
repo_id="ibm-granite/granite-vision-3.2-2b",
prompt="Convert this page to markdown. Do not miss any text and only output the bare markdown!",
response_format=ResponseFormat.MARKDOWN,
inference_framework=InferenceFramework.VLLM,
supported_devices=[
AcceleratorDevice.CUDA,
AcceleratorDevice.XPU,
],
scale=2.0,
temperature=0.0,
)
GRANITE_VISION_OLLAMA = ApiVlmOptions(
url=AnyUrl("http://localhost:11434/v1/chat/completions"),
params={"model": "granite3.2-vision:2b"},
prompt="Convert this page to markdown. Do not miss any text and only output the bare markdown!",
scale=1.0,
timeout=120,
response_format=ResponseFormat.MARKDOWN,
temperature=0.0,
)
# Pixtral
PIXTRAL_12B_TRANSFORMERS = InlineVlmOptions(
repo_id="mistral-community/pixtral-12b",
prompt="Convert this page to markdown. Do not miss any text and only output the bare markdown!",
response_format=ResponseFormat.MARKDOWN,
inference_framework=InferenceFramework.TRANSFORMERS,
transformers_model_type=TransformersModelType.AUTOMODEL_VISION2SEQ,
supported_devices=[
AcceleratorDevice.CPU,
AcceleratorDevice.CUDA,
AcceleratorDevice.XPU,
],
scale=2.0,
temperature=0.0,
)
PIXTRAL_12B_MLX = InlineVlmOptions(
repo_id="mlx-community/pixtral-12b-bf16",
prompt="Convert this page to markdown. Do not miss any text and only output the bare markdown!",
response_format=ResponseFormat.MARKDOWN,
inference_framework=InferenceFramework.MLX,
supported_devices=[AcceleratorDevice.MPS],
scale=2.0,
temperature=0.0,
)
# Phi4
PHI4_TRANSFORMERS = InlineVlmOptions(
repo_id="microsoft/Phi-4-multimodal-instruct",
prompt="Convert this page to MarkDown. Do not miss any text and only output the bare markdown",
trust_remote_code=True,
response_format=ResponseFormat.MARKDOWN,
inference_framework=InferenceFramework.TRANSFORMERS,
transformers_model_type=TransformersModelType.AUTOMODEL_CAUSALLM,
supported_devices=[
AcceleratorDevice.CPU,
AcceleratorDevice.CUDA,
AcceleratorDevice.XPU,
],
scale=2.0,
temperature=0.0,
extra_generation_config=dict(num_logits_to_keep=0),
)
# Qwen
QWEN25_VL_3B_MLX = InlineVlmOptions(
repo_id="mlx-community/Qwen2.5-VL-3B-Instruct-bf16",
prompt="Convert this page to markdown. Do not miss any text and only output the bare markdown!",
response_format=ResponseFormat.MARKDOWN,
inference_framework=InferenceFramework.MLX,
supported_devices=[AcceleratorDevice.MPS],
scale=2.0,
temperature=0.0,
)
# GoT 2.0
GOT2_TRANSFORMERS = InlineVlmOptions(
repo_id="stepfun-ai/GOT-OCR-2.0-hf",
prompt="",
response_format=ResponseFormat.MARKDOWN,
inference_framework=InferenceFramework.TRANSFORMERS,
transformers_prompt_style=TransformersPromptStyle.NONE,
transformers_model_type=TransformersModelType.AUTOMODEL_IMAGETEXTTOTEXT,
supported_devices=[
AcceleratorDevice.CPU,
AcceleratorDevice.CUDA,
# AcceleratorDevice.MPS,
AcceleratorDevice.XPU,
],
scale=2.0,
temperature=0.0,
stop_strings=["<|im_end|>"],
extra_processor_kwargs={"format": True},
)
# Gemma-3
GEMMA3_12B_MLX = InlineVlmOptions(
repo_id="mlx-community/gemma-3-12b-it-bf16",
prompt="Convert this page to markdown. Do not miss any text and only output the bare markdown!",
response_format=ResponseFormat.MARKDOWN,
inference_framework=InferenceFramework.MLX,
supported_devices=[AcceleratorDevice.MPS],
scale=2.0,
temperature=0.0,
)
GEMMA3_27B_MLX = InlineVlmOptions(
repo_id="mlx-community/gemma-3-27b-it-bf16",
prompt="Convert this page to markdown. Do not miss any text and only output the bare markdown!",
response_format=ResponseFormat.MARKDOWN,
inference_framework=InferenceFramework.MLX,
supported_devices=[AcceleratorDevice.MPS],
scale=2.0,
temperature=0.0,
)
# Dolphin
DOLPHIN_TRANSFORMERS = InlineVlmOptions(
repo_id="ByteDance/Dolphin",
prompt="<s>Read text in the image. <Answer/>",
response_format=ResponseFormat.MARKDOWN,
inference_framework=InferenceFramework.TRANSFORMERS,
transformers_model_type=TransformersModelType.AUTOMODEL_IMAGETEXTTOTEXT,
transformers_prompt_style=TransformersPromptStyle.RAW,
supported_devices=[
AcceleratorDevice.CUDA,
AcceleratorDevice.CPU,
AcceleratorDevice.MPS,
AcceleratorDevice.XPU,
],
scale=2.0,
temperature=0.0,
)
# DeepSeek-OCR
DEEPSEEKOCR_OLLAMA = ApiVlmOptions(
url="http://localhost:11434/v1/chat/completions",
params=dict(
model="deepseek-ocr:3b",
max_tokens=4096,
skip_special_tokens=True,
),
prompt="<|grounding|>Convert the document to markdown. ",
timeout=90,
scale=2.0,
temperature=0.0,
concurrency=4,
response_format=ResponseFormat.DEEPSEEKOCR_MARKDOWN,
)
# NuExtract
NU_EXTRACT_2B_TRANSFORMERS = InlineVlmOptions(
repo_id="numind/NuExtract-2.0-2B",
revision="fe5b2f0b63b81150721435a3ca1129a75c59c74e", # 489efed leads to MPS issues
prompt="", # This won't be used, template is passed separately
torch_dtype="bfloat16",
inference_framework=InferenceFramework.TRANSFORMERS,
transformers_model_type=TransformersModelType.AUTOMODEL_IMAGETEXTTOTEXT,
response_format=ResponseFormat.PLAINTEXT,
supported_devices=[
AcceleratorDevice.CPU,
AcceleratorDevice.CUDA,
AcceleratorDevice.MPS,
AcceleratorDevice.XPU,
],
scale=2.0,
temperature=0.0,
)
class VlmModelType(str, Enum):
SMOLDOCLING = "smoldocling"
SMOLDOCLING_VLLM = "smoldocling_vllm"
GRANITE_VISION = "granite_vision"
GRANITE_VISION_VLLM = "granite_vision_vllm"
GRANITE_VISION_OLLAMA = "granite_vision_ollama"
GOT_OCR_2 = "got_ocr_2"
GRANITEDOCLING = "granite_docling"
GRANITEDOCLING_VLLM = "granite_docling_vllm"
DEEPSEEKOCR_OLLAMA = "deepseekocr_ollama"
| {
"repo_id": "docling-project/docling",
"file_path": "docling/datamodel/vlm_model_specs.py",
"license": "MIT License",
"lines": 332,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
docling-project/docling:docling/models/utils/hf_model_download.py | import logging
from pathlib import Path
from typing import Optional
_log = logging.getLogger(__name__)
def download_hf_model(
repo_id: str,
local_dir: Optional[Path] = None,
force: bool = False,
progress: bool = False,
revision: Optional[str] = None,
) -> Path:
from huggingface_hub import snapshot_download
from huggingface_hub.utils import disable_progress_bars
if not progress:
disable_progress_bars()
download_path = snapshot_download(
repo_id=repo_id,
force_download=force,
local_dir=local_dir,
revision=revision,
)
return Path(download_path)
class HuggingFaceModelDownloadMixin:
@staticmethod
def download_models(
repo_id: str,
local_dir: Optional[Path] = None,
force: bool = False,
progress: bool = False,
revision: Optional[str] = None,
) -> Path:
return download_hf_model(
repo_id=repo_id,
local_dir=local_dir,
force=force,
progress=progress,
revision=revision,
)
| {
"repo_id": "docling-project/docling",
"file_path": "docling/models/utils/hf_model_download.py",
"license": "MIT License",
"lines": 38,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
docling-project/docling:docs/examples/compare_vlm_models.py | # %% [markdown]
# Compare different VLM models by running the VLM pipeline and timing outputs.
#
# What this example does
# - Iterates through a list of VLM presets and converts the same file.
# - Prints per-page generation times and saves JSON/MD/HTML to `scratch/`.
# - Summarizes total inference time and pages processed in a table.
# - Demonstrates the NEW preset-based approach with runtime overrides.
#
# Requirements
# - Install `tabulate` for pretty printing (`pip install tabulate`).
#
# Prerequisites
# - Install Docling with VLM extras. Ensure models can be downloaded or are available.
#
# How to run
# - From the repo root: `python docs/examples/compare_vlm_models.py`.
# - Results are saved to `scratch/` with filenames including the model and runtime.
#
# Notes
# - MLX models are skipped automatically on non-macOS platforms.
# - On CUDA systems, you can enable flash_attention_2 (see commented lines).
# - Running multiple VLMs can be GPU/CPU intensive and time-consuming; ensure
# enough VRAM/system RAM and close other memory-heavy apps.
# %%
import json
import sys
import time
from pathlib import Path
from docling_core.types.doc import DocItemLabel, ImageRefMode
from docling_core.types.doc.document import DEFAULT_EXPORT_LABELS
from tabulate import tabulate
from docling.datamodel.base_models import InputFormat
from docling.datamodel.pipeline_options import (
VlmConvertOptions,
VlmPipelineOptions,
)
from docling.datamodel.vlm_engine_options import (
ApiVlmEngineOptions,
MlxVlmEngineOptions,
TransformersVlmEngineOptions,
VlmEngineType,
)
from docling.document_converter import DocumentConverter, PdfFormatOption
from docling.pipeline.vlm_pipeline import VlmPipeline
def convert(
sources: list[Path],
converter: DocumentConverter,
preset_name: str,
runtime_type: VlmEngineType,
):
# Note: this helper assumes a single-item `sources` list. It returns after
# processing the first source to keep runtime/output focused.
for source in sources:
print("================================================")
print("Processing...")
print(f"Source: {source}")
print("---")
print(f"Preset: {preset_name}")
print(f"Runtime: {runtime_type}")
print("================================================")
print("")
# Measure actual conversion time
start_time = time.time()
res = converter.convert(source)
end_time = time.time()
wall_clock_time = end_time - start_time
print("")
fname = f"{res.input.file.stem}-{preset_name}-{runtime_type.value}"
# Try to get timing from VLM response, but use wall clock as fallback
inference_time = 0.0
for i, page in enumerate(res.pages):
if page.predictions.vlm_response is not None:
gen_time = getattr(
page.predictions.vlm_response, "generation_time", 0.0
)
# Skip negative times (indicates timing not available)
if gen_time >= 0:
inference_time += gen_time
print("")
print(f" ---------- Predicted page {i} in {gen_time:.2f} [sec]:")
else:
print("")
print(f" ---------- Predicted page {i} (timing not available):")
print(page.predictions.vlm_response.text)
print(" ---------- ")
else:
print(f" ---------- Page {i}: No VLM response available ---------- ")
# Use wall clock time if VLM timing not available
if inference_time == 0.0:
inference_time = wall_clock_time
print("===== Final output of the converted document =======")
# Manual export for illustration. Below, `save_as_json()` writes the same
# JSON again; kept intentionally to show both approaches.
with (out_path / f"{fname}.json").open("w") as fp:
fp.write(json.dumps(res.document.export_to_dict()))
res.document.save_as_json(
out_path / f"{fname}.json",
image_mode=ImageRefMode.PLACEHOLDER,
)
print(f" => produced {out_path / fname}.json")
res.document.save_as_markdown(
out_path / f"{fname}.md",
image_mode=ImageRefMode.PLACEHOLDER,
)
print(f" => produced {out_path / fname}.md")
res.document.save_as_html(
out_path / f"{fname}.html",
image_mode=ImageRefMode.EMBEDDED,
labels=[*DEFAULT_EXPORT_LABELS, DocItemLabel.FOOTNOTE],
split_page_view=True,
)
print(f" => produced {out_path / fname}.html")
pg_num = res.document.num_pages()
print("")
print(
f"Total document prediction time: {inference_time:.2f} seconds, pages: {pg_num}"
)
print("====================================================")
return [
source,
preset_name,
str(runtime_type.value),
pg_num,
inference_time,
]
if __name__ == "__main__":
sources = [
"tests/data/pdf/2305.03393v1-pg9.pdf",
]
out_path = Path("scratch")
out_path.mkdir(parents=True, exist_ok=True)
## Use VlmPipeline with presets
pipeline_options = VlmPipelineOptions()
pipeline_options.generate_page_images = True
## On GPU systems, enable flash_attention_2 with CUDA:
# pipeline_options.accelerator_options.device = AcceleratorDevice.CUDA
# pipeline_options.accelerator_options.cuda_use_flash_attention2 = True
# Define preset configurations to test
# Each tuple is (preset_name, engine_options)
preset_configs = [
# SmolDocling
("smoldocling", MlxVlmEngineOptions()),
# GraniteDocling with different runtimes
("granite_docling", MlxVlmEngineOptions()),
("granite_docling", TransformersVlmEngineOptions()),
# Granite models
("granite_vision", TransformersVlmEngineOptions()),
# Other presets with MLX (macOS only)
("pixtral", MlxVlmEngineOptions()),
("qwen", MlxVlmEngineOptions()),
("gemma_12b", MlxVlmEngineOptions()),
# Other presets with Ollama
("deepseek_ocr", ApiVlmEngineOptions(runtime_type=VlmEngineType.API_OLLAMA)),
# Other presets with LM Studio
(
"deepseek_ocr",
ApiVlmEngineOptions(runtime_type=VlmEngineType.API_LMSTUDIO),
),
]
# Remove MLX configs if not on Mac
if sys.platform != "darwin":
preset_configs = [
(preset, runtime)
for preset, runtime in preset_configs
if runtime.runtime_type != VlmEngineType.MLX
]
rows = []
for preset_name, engine_options in preset_configs:
# Create VLM options from preset with runtime override
vlm_options = VlmConvertOptions.from_preset(
preset_name,
engine_options=engine_options,
)
pipeline_options.vlm_options = vlm_options
## Set up pipeline for PDF or image inputs
converter = DocumentConverter(
format_options={
InputFormat.PDF: PdfFormatOption(
pipeline_cls=VlmPipeline,
pipeline_options=pipeline_options,
),
InputFormat.IMAGE: PdfFormatOption(
pipeline_cls=VlmPipeline,
pipeline_options=pipeline_options,
),
},
)
row = convert(
sources=sources,
converter=converter,
preset_name=preset_name,
runtime_type=engine_options.runtime_type,
)
rows.append(row)
print(
tabulate(rows, headers=["source", "preset", "runtime", "num_pages", "time"])
)
print("see if memory gets released ...")
time.sleep(10)
| {
"repo_id": "docling-project/docling",
"file_path": "docs/examples/compare_vlm_models.py",
"license": "MIT License",
"lines": 200,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
docling-project/docling:docling/utils/orientation.py | from typing import Tuple
from docling_core.types.doc import BoundingBox, CoordOrigin
from docling_core.types.doc.page import BoundingRectangle
CLIPPED_ORIENTATIONS = [0, 90, 180, 270]
def rotate_bounding_box(
bbox: BoundingBox, angle: int, im_size: Tuple[int, int]
) -> BoundingRectangle:
# The box is left top width height in TOPLEFT coordinates
# Bounding rectangle start with r_0 at the bottom left whatever the
# coordinate system. Then other corners are found rotating counterclockwise
bbox = bbox.to_top_left_origin(im_size[1])
left, top, width, height = bbox.l, bbox.t, bbox.width, bbox.height
im_w, im_h = im_size
angle = angle % 360
if angle == 0:
return BoundingRectangle.from_bounding_box(bbox)
elif angle == 90:
r_x0 = top + height
r_y0 = im_w - left
r_x1 = r_x0
r_y1 = r_y0 - width
r_x2 = r_x1 - height
r_y2 = r_y1
r_x3 = r_x2
r_y3 = r_y0
elif angle == 180:
r_x0 = im_w - left
r_y0 = im_h - (top + height)
r_x1 = r_x0 - width
r_y1 = r_y0
r_x2 = r_x1
r_y2 = r_y1 + height
r_x3 = r_x0
r_y3 = r_y2
elif angle == 270:
r_x0 = im_h - (top + height)
r_y0 = left
r_x1 = r_x0
r_y1 = r_y0 + width
r_x2 = r_x1 + height
r_y2 = r_y1
r_x3 = r_x2
r_y3 = r_y0
else:
msg = (
f"invalid orientation {angle}, expected values in:"
f" {sorted(CLIPPED_ORIENTATIONS)}"
)
raise ValueError(msg)
rectangle = BoundingRectangle(
r_x0=r_x0,
r_y0=r_y0,
r_x1=r_x1,
r_y1=r_y1,
r_x2=r_x2,
r_y2=r_y2,
r_x3=r_x3,
r_y3=r_y3,
coord_origin=CoordOrigin.TOPLEFT,
)
return rectangle
| {
"repo_id": "docling-project/docling",
"file_path": "docling/utils/orientation.py",
"license": "MIT License",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
docling-project/docling:tests/test_settings_load.py | def _setup_env(monkeypatch):
monkeypatch.setenv("DOCLING_PERF_PAGE_BATCH_SIZE", "12")
monkeypatch.setenv("DOCLING_DEBUG_VISUALIZE_RAW_LAYOUT", "True")
monkeypatch.setenv("DOCLING_ARTIFACTS_PATH", "/path/to/artifacts")
monkeypatch.setenv("DOCLING_INFERENCE_COMPILE_TORCH_MODELS", "True")
def test_settings(monkeypatch):
_setup_env(monkeypatch)
import importlib
import docling.datamodel.settings as m
# Reinitialize settings module
importlib.reload(m)
# Check top level setting
assert str(m.settings.artifacts_path) == "/path/to/artifacts"
# Check nested set via environment variables
assert m.settings.perf.page_batch_size == 12
assert m.settings.debug.visualize_raw_layout is True
assert m.settings.inference.compile_torch_models is True
# Check nested defaults
assert m.settings.perf.doc_batch_size == 1
assert m.settings.debug.visualize_ocr is False
def test_compile_model_defaults_from_settings(monkeypatch):
monkeypatch.setenv("DOCLING_INFERENCE_COMPILE_TORCH_MODELS", "True")
import importlib
import docling.datamodel.settings as settings_module
from docling.datamodel.image_classification_engine_options import (
TransformersImageClassificationEngineOptions,
)
from docling.datamodel.object_detection_engine_options import (
TransformersObjectDetectionEngineOptions,
)
from docling.datamodel.vlm_engine_options import TransformersVlmEngineOptions
importlib.reload(settings_module)
assert TransformersObjectDetectionEngineOptions().compile_model is True
assert TransformersImageClassificationEngineOptions().compile_model is True
assert TransformersVlmEngineOptions().compile_model is True
| {
"repo_id": "docling-project/docling",
"file_path": "tests/test_settings_load.py",
"license": "MIT License",
"lines": 35,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
docling-project/docling:tests/test_backend_webp.py | import sys
from pathlib import Path
from typing import List
from pydantic.type_adapter import R
from docling.datamodel.base_models import InputFormat
from docling.datamodel.document import ConversionResult, DoclingDocument
from docling.datamodel.pipeline_options import (
EasyOcrOptions,
OcrMacOptions,
OcrOptions,
RapidOcrOptions,
TesseractCliOcrOptions,
TesseractOcrOptions,
)
from docling.document_converter import DocumentConverter, ImageFormatOption
from tests.verify_utils import verify_conversion_result_v2
from .test_data_gen_flag import GEN_TEST_DATA
GENERATE = GEN_TEST_DATA
def get_webp_paths():
# Define the directory you want to search
directory = Path("./tests/data/webp/")
# List all WEBP files in the directory and its subdirectories
webp_files = sorted(directory.rglob("*.webp"))
return webp_files
def get_converter(ocr_options: OcrOptions):
image_format_option = ImageFormatOption()
image_format_option.pipeline_options.ocr_options = ocr_options
converter = DocumentConverter(
format_options={InputFormat.IMAGE: image_format_option},
allowed_formats=[InputFormat.IMAGE],
)
return converter
def test_e2e_webp_conversions():
webp_paths = get_webp_paths()
engines: List[OcrOptions] = [
EasyOcrOptions(),
TesseractOcrOptions(),
TesseractCliOcrOptions(),
EasyOcrOptions(force_full_page_ocr=True),
TesseractOcrOptions(force_full_page_ocr=True),
TesseractOcrOptions(force_full_page_ocr=True, lang=["auto"]),
TesseractCliOcrOptions(force_full_page_ocr=True),
TesseractCliOcrOptions(force_full_page_ocr=True, lang=["auto"]),
]
# rapidocr is only available for Python >=3.6,<3.14
if sys.version_info < (3, 14):
engines.append(RapidOcrOptions())
engines.append(RapidOcrOptions(force_full_page_ocr=True))
# only works on mac
if "darwin" == sys.platform:
engines.append(OcrMacOptions())
engines.append(OcrMacOptions(force_full_page_ocr=True))
for ocr_options in engines:
print(
f"Converting with ocr_engine: {ocr_options.kind}, language: {ocr_options.lang}"
)
converter = get_converter(ocr_options=ocr_options)
for webp_path in webp_paths:
print(f"converting {webp_path}")
doc_result: ConversionResult = converter.convert(
webp_path, raises_on_error=True
)
verify_conversion_result_v2(
input_path=webp_path,
doc_result=doc_result,
generate=GENERATE,
fuzzy=True,
)
| {
"repo_id": "docling-project/docling",
"file_path": "tests/test_backend_webp.py",
"license": "MIT License",
"lines": 68,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
exo-explore/exo:src/exo/worker/tests/unittests/test_mlx/test_pipeline_prefill_callbacks.py | # type: ignore
"""Test that pipeline prefill callbacks and output exactly match stream_generate.
Spins up a single-device (non-pipeline) run and a distributed pipeline run,
then verifies that the prompt_progress_callback sequences are identical
and that generated text matches.
"""
import json
import multiprocessing as mp
import os
import tempfile
import traceback
from typing import Any, cast
import pytest
from exo.shared.constants import EXO_MODELS_DIR
from exo.shared.models.model_cards import ModelCard, ModelTask
from exo.shared.types.common import ModelId
from exo.shared.types.memory import Memory
from exo.shared.types.text_generation import InputMessage, TextGenerationTaskParams
MODEL_ID = "mlx-community/gpt-oss-20b-MXFP4-Q8"
MODEL_PATH = EXO_MODELS_DIR / "mlx-community--gpt-oss-20b-MXFP4-Q8"
TOTAL_LAYERS = 24
MAX_TOKENS = 10
SEED = 42
TEMPERATURE = 0.0
def _model_card() -> ModelCard:
return ModelCard(
model_id=ModelId(MODEL_ID),
storage_size=Memory.from_gb(12),
n_layers=TOTAL_LAYERS,
hidden_size=2880,
supports_tensor=False,
tasks=[ModelTask.TextGeneration],
)
def _build_prompt(tokenizer: Any, prompt_tokens: int) -> tuple[str, Any]:
"""Build a prompt with the given number of user-content tokens, return (chat_prompt, task)."""
from exo.worker.engines.mlx.utils_mlx import apply_chat_template
base_text = "The quick brown fox jumps over the lazy dog. "
base_toks = tokenizer.encode(base_text)
repeats = (prompt_tokens // len(base_toks)) + 2
long_text = base_text * repeats
tokens = tokenizer.encode(long_text)[:prompt_tokens]
prompt_text = tokenizer.decode(tokens)
task = TextGenerationTaskParams(
model=MODEL_ID,
input=[InputMessage(role="user", content=prompt_text)],
max_output_tokens=MAX_TOKENS,
temperature=TEMPERATURE,
seed=SEED,
)
prompt = apply_chat_template(tokenizer, task)
return prompt, task
# ---------------------------------------------------------------------------
# Single-device process: uses stream_generate path (no pipeline layers)
# ---------------------------------------------------------------------------
def _run_single_device(
prompt_tokens: int,
result_queue: Any,
) -> None:
"""Load full model without pipeline sharding, run mlx_generate, record callbacks."""
try:
import mlx.core as mx
from mlx_lm.utils import load_model
from exo.shared.types.worker.shards import PipelineShardMetadata
from exo.worker.engines.mlx.cache import encode_prompt
from exo.worker.engines.mlx.generator.generate import mlx_generate
from exo.worker.engines.mlx.utils_mlx import (
build_model_path,
get_tokenizer,
)
model_path = build_model_path(ModelId(MODEL_ID))
model, _ = load_model(model_path, lazy=True, strict=False)
mx.eval(model)
# Use PipelineShardMetadata just for get_tokenizer (needs model_card), but
# do NOT apply pipeline sharding — the model keeps all layers unwrapped.
dummy_meta = PipelineShardMetadata(
model_card=_model_card(),
device_rank=0,
world_size=1,
start_layer=0,
end_layer=TOTAL_LAYERS,
n_layers=TOTAL_LAYERS,
)
tokenizer = get_tokenizer(model_path, dummy_meta)
prompt, task = _build_prompt(tokenizer, prompt_tokens)
callbacks: list[tuple[int, int]] = []
def on_progress(processed: int, total: int) -> None:
callbacks.append((processed, total))
generated_text = ""
for response in mlx_generate(
model=model,
tokenizer=tokenizer,
task=task,
prompt=prompt,
kv_prefix_cache=None,
group=None,
on_prefill_progress=on_progress,
):
generated_text += response.text
if response.finish_reason is not None:
break
# Also record the token count that prefill() received (prompt_tokens[:-1])
all_tokens = encode_prompt(tokenizer, prompt)
prefill_token_count = len(all_tokens) - 1
result_queue.put(
(
True,
{
"callbacks": callbacks,
"text": generated_text,
"prefill_token_count": prefill_token_count,
},
)
)
except Exception as e:
result_queue.put((False, f"{e}\n{traceback.format_exc()}"))
# ---------------------------------------------------------------------------
# Pipeline device process: uses _pipeline_prefill_cache path
# ---------------------------------------------------------------------------
def _run_pipeline_device(
rank: int,
world_size: int,
hostfile_path: str,
layer_splits: list[tuple[int, int]],
prompt_tokens: int,
result_queue: Any,
) -> None:
"""Load model with pipeline sharding, run mlx_generate, record callbacks."""
os.environ["MLX_HOSTFILE"] = hostfile_path
os.environ["MLX_RANK"] = str(rank)
try:
import mlx.core as mx
from exo.shared.types.worker.shards import PipelineShardMetadata
from exo.worker.engines.mlx.cache import encode_prompt
from exo.worker.engines.mlx.generator.generate import mlx_generate
from exo.worker.engines.mlx.utils_mlx import shard_and_load
group = mx.distributed.init(backend="ring", strict=True)
start_layer, end_layer = layer_splits[rank]
shard_meta = PipelineShardMetadata(
model_card=_model_card(),
device_rank=rank,
world_size=world_size,
start_layer=start_layer,
end_layer=end_layer,
n_layers=TOTAL_LAYERS,
)
model, tokenizer = shard_and_load(
shard_meta, group, on_timeout=None, on_layer_loaded=None
)
model = cast(Any, model)
prompt, task = _build_prompt(tokenizer, prompt_tokens)
callbacks: list[tuple[int, int]] = []
def on_progress(processed: int, total: int) -> None:
callbacks.append((processed, total))
def distributed_prompt_progress_callback(_group: Any = group) -> None:
from exo.worker.engines.mlx.utils_mlx import mx_any
mx_any(False, _group)
generated_text = ""
for response in mlx_generate(
model=model,
tokenizer=tokenizer,
task=task,
prompt=prompt,
kv_prefix_cache=None,
group=group,
on_prefill_progress=on_progress,
distributed_prompt_progress_callback=distributed_prompt_progress_callback,
):
generated_text += response.text
if response.finish_reason is not None:
break
all_tokens = encode_prompt(tokenizer, prompt)
prefill_token_count = len(all_tokens) - 1
result_queue.put(
(
rank,
True,
{
"callbacks": callbacks,
"text": generated_text,
"prefill_token_count": prefill_token_count,
},
)
)
except Exception as e:
result_queue.put((rank, False, f"{e}\n{traceback.format_exc()}"))
# ---------------------------------------------------------------------------
# Test helpers
# ---------------------------------------------------------------------------
def _create_hostfile(world_size: int, base_port: int) -> str:
hosts = [f"127.0.0.1:{base_port + i}" for i in range(world_size)]
with tempfile.NamedTemporaryFile(mode="w", suffix=".json", delete=False) as f:
json.dump(hosts, f)
return f.name
def _run_single_device_test(prompt_tokens: int, timeout: int = 120) -> dict[str, Any]:
"""Run single-device (stream_generate) prefill and return results."""
ctx = mp.get_context("spawn")
result_queue: Any = ctx.Queue()
p = ctx.Process(target=_run_single_device, args=(prompt_tokens, result_queue))
p.start()
p.join(timeout=timeout)
if p.is_alive():
p.terminate()
p.join(timeout=5)
pytest.fail("Single-device process timed out")
assert not result_queue.empty(), "Single-device process produced no result"
success, data = result_queue.get()
assert success, f"Single-device process failed:\n{data}"
return data
def _run_pipeline_test(
layer_splits: list[tuple[int, int]],
prompt_tokens: int,
base_port: int,
timeout: int = 120,
) -> dict[int, dict[str, Any]]:
"""Run pipeline prefill across ranks and return per-rank results."""
world_size = len(layer_splits)
hostfile_path = _create_hostfile(world_size, base_port)
ctx = mp.get_context("spawn")
result_queue: Any = ctx.Queue()
try:
processes: list[Any] = []
for rank in range(world_size):
p = ctx.Process(
target=_run_pipeline_device,
args=(
rank,
world_size,
hostfile_path,
layer_splits,
prompt_tokens,
result_queue,
),
)
p.start()
processes.append(p)
for p in processes:
p.join(timeout=timeout)
timed_out = any(p.is_alive() for p in processes)
for p in processes:
if p.is_alive():
p.terminate()
p.join(timeout=5)
assert not timed_out, "Pipeline processes timed out"
results: dict[int, dict[str, Any]] = {}
while not result_queue.empty():
rank, success, data = result_queue.get()
assert success, f"Pipeline rank {rank} failed:\n{data}"
results[rank] = data
assert len(results) == world_size, (
f"Expected {world_size} results, got {len(results)}: missing ranks {set(range(world_size)) - results.keys()}"
)
return results
finally:
os.unlink(hostfile_path)
# ---------------------------------------------------------------------------
# Tests
# ---------------------------------------------------------------------------
pytestmark = [
pytest.mark.slow,
pytest.mark.skipif(
not MODEL_PATH.exists(),
reason=f"GPT-OSS model not found at {MODEL_PATH}",
),
]
LAYER_SPLITS_4WAY: list[tuple[int, int]] = [(0, 6), (6, 12), (12, 18), (18, 24)]
LAYER_SPLITS_2WAY: list[tuple[int, int]] = [(0, 12), (12, 24)]
class TestPipelineNoDeadlock:
"""Pipeline prefill must not deadlock at any rank count or prompt length."""
@pytest.mark.parametrize(
"layer_splits,prompt_tokens",
[
(LAYER_SPLITS_2WAY, 128),
(LAYER_SPLITS_2WAY, 4096),
(LAYER_SPLITS_2WAY, 8192),
(LAYER_SPLITS_2WAY, 16384),
(LAYER_SPLITS_4WAY, 128),
(LAYER_SPLITS_4WAY, 4096),
(LAYER_SPLITS_4WAY, 8192),
(LAYER_SPLITS_4WAY, 16384),
],
ids=[
"2rank_128tok",
"2rank_4096tok",
"2rank_8192tok",
"2rank_16384tok",
"4rank_128tok",
"4rank_4096tok",
"4rank_8192tok",
"4rank_16384tok",
],
)
def test_no_deadlock(
self,
layer_splits: list[tuple[int, int]],
prompt_tokens: int,
) -> None:
"""Pipeline must complete without deadlock at various prompt lengths."""
pipeline_results = _run_pipeline_test(
layer_splits=layer_splits,
prompt_tokens=prompt_tokens,
base_port=29650,
timeout=60,
)
# If we get here, no deadlock. Verify all ranks produced output.
for rank, pipe_data in sorted(pipeline_results.items()):
assert pipe_data["text"], f"Rank {rank} produced no output text"
class TestPipelinePrefillCallbacks:
"""Verify that pipeline prefill callbacks exactly match stream_generate callbacks."""
@pytest.mark.parametrize(
"prompt_tokens",
[50, 500, 5000],
ids=["short_50", "medium_500", "long_5000"],
)
def test_callbacks_match(self, prompt_tokens: int) -> None:
"""All pipeline ranks must produce identical callback sequences."""
# Run 4-rank pipeline
pipeline_results = _run_pipeline_test(
layer_splits=LAYER_SPLITS_4WAY,
prompt_tokens=prompt_tokens,
base_port=29700,
timeout=180,
)
# All ranks must agree on prefill token count and callback sequence
rank0_data = pipeline_results[0]
rank0_callbacks = rank0_data["callbacks"]
prefill_count = rank0_data["prefill_token_count"]
for rank, pipe_data in sorted(pipeline_results.items()):
pipe_callbacks = pipe_data["callbacks"]
assert pipe_data["prefill_token_count"] == prefill_count, (
f"Rank {rank} prefill token count mismatch: "
f"{pipe_data['prefill_token_count']} vs {prefill_count}"
)
assert pipe_callbacks == rank0_callbacks, (
f"Rank {rank} callback mismatch for {prompt_tokens} prompt tokens "
f"(prefill M={prefill_count}):\n"
f" pipeline R0 ({len(rank0_callbacks)} callbacks): {rank0_callbacks}\n"
f" pipeline R{rank} ({len(pipe_callbacks)} callbacks): {pipe_callbacks}"
)
# Structural checks: starts with (0, M), ends with (M, M), monotonically increasing
assert rank0_callbacks[0] == (0, prefill_count), (
f"First callback should be (0, {prefill_count}), got {rank0_callbacks[0]}"
)
assert rank0_callbacks[-1] == (prefill_count, prefill_count), (
f"Last callback should be ({prefill_count}, {prefill_count}), got {rank0_callbacks[-1]}"
)
for i in range(1, len(rank0_callbacks)):
assert rank0_callbacks[i][0] >= rank0_callbacks[i - 1][0], (
f"Callbacks not monotonically increasing at index {i}: {rank0_callbacks}"
)
@pytest.mark.parametrize(
"prompt_tokens",
[50, 500],
ids=["short_50", "medium_500"],
)
def test_output_matches(self, prompt_tokens: int) -> None:
"""Pipeline-generated text must match single-device output."""
single = _run_single_device_test(prompt_tokens, timeout=180)
pipeline_results = _run_pipeline_test(
layer_splits=LAYER_SPLITS_4WAY,
prompt_tokens=prompt_tokens,
base_port=29800,
timeout=180,
)
single_text = single["text"]
# The last rank produces the final logits, so its output should match.
# Due to SDPA tiling non-determinism, allow minor differences in text.
last_rank = max(pipeline_results.keys())
pipe_text = pipeline_results[last_rank]["text"]
# For deterministic sampling (temp=0.0), outputs should match exactly
# or be very close. Log both for debugging even if they match.
if single_text != pipe_text:
# Find first divergence point
min_len = min(len(single_text), len(pipe_text))
diverge_idx = next(
(i for i in range(min_len) if single_text[i] != pipe_text[i]),
min_len,
)
pytest.fail(
f"Output text diverged at character {diverge_idx} for {prompt_tokens} prompt tokens:\n"
f" single-device: {single_text!r}\n"
f" pipeline R{last_rank}: {pipe_text!r}"
)
class TestPipelineCallbacksStructure:
"""Verify structural properties of callbacks independent of model output."""
def test_callback_structure_matches_generate_step(self) -> None:
"""Verify callbacks follow generate_step's pattern: (0,M), chunks up to M-1, (M,M)."""
prompt_tokens = 200
pipeline_results = _run_pipeline_test(
layer_splits=LAYER_SPLITS_4WAY,
prompt_tokens=prompt_tokens,
base_port=29900,
timeout=180,
)
for rank, pipe_data in sorted(pipeline_results.items()):
callbacks = pipe_data["callbacks"]
m = pipe_data["prefill_token_count"]
assert m > 0, f"Rank {rank}: prefill token count is 0"
assert callbacks[0] == (0, m), (
f"Rank {rank}: first callback should be (0, {m}), got {callbacks[0]}"
)
assert callbacks[-1] == (m, m), (
f"Rank {rank}: last callback should be ({m}, {m}), got {callbacks[-1]}"
)
if len(callbacks) > 2:
second_to_last = callbacks[-2]
assert second_to_last[0] < m, (
f"Rank {rank}: second-to-last callback should report < {m}, "
f"got {second_to_last}"
)
# All callbacks must have total == M
for i, (_, total) in enumerate(callbacks):
assert total == m, (
f"Rank {rank}: callback {i} has total={total}, expected {m}"
)
# processed values must be non-decreasing
processed_vals = [p for p, _ in callbacks]
for i in range(1, len(processed_vals)):
assert processed_vals[i] >= processed_vals[i - 1], (
f"Rank {rank}: callbacks not non-decreasing at index {i}: "
f"{processed_vals}"
)
# No duplicate consecutive callbacks (pipeline dummies must not emit callbacks)
for i in range(1, len(callbacks)):
assert callbacks[i] != callbacks[i - 1], (
f"Rank {rank}: duplicate consecutive callback at index {i}: "
f"{callbacks[i]} (this suggests dummy iterations are emitting callbacks)"
)
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/worker/tests/unittests/test_mlx/test_pipeline_prefill_callbacks.py",
"license": "Apache License 2.0",
"lines": 427,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
exo-explore/exo:src/exo/routing/event_router.py | from dataclasses import dataclass, field
from random import random
import anyio
from anyio import BrokenResourceError, ClosedResourceError
from anyio.abc import CancelScope
from loguru import logger
from exo.shared.types.commands import ForwarderCommand, RequestEventLog
from exo.shared.types.common import SessionId, SystemId
from exo.shared.types.events import (
Event,
EventId,
GlobalForwarderEvent,
IndexedEvent,
LocalForwarderEvent,
)
from exo.utils.channels import Receiver, Sender, channel
from exo.utils.event_buffer import OrderedBuffer
from exo.utils.task_group import TaskGroup
@dataclass
class EventRouter:
session_id: SessionId
command_sender: Sender[ForwarderCommand]
external_inbound: Receiver[GlobalForwarderEvent]
external_outbound: Sender[LocalForwarderEvent]
_system_id: SystemId = field(init=False, default_factory=SystemId)
internal_outbound: list[Sender[IndexedEvent]] = field(
init=False, default_factory=list
)
event_buffer: OrderedBuffer[Event] = field(
init=False, default_factory=OrderedBuffer
)
out_for_delivery: dict[EventId, tuple[float, LocalForwarderEvent]] = field(
init=False, default_factory=dict
)
_tg: TaskGroup = field(init=False, default_factory=TaskGroup)
_nack_cancel_scope: CancelScope | None = field(init=False, default=None)
_nack_attempts: int = field(init=False, default=0)
_nack_base_seconds: float = field(init=False, default=0.5)
_nack_cap_seconds: float = field(init=False, default=10.0)
async def run(self):
try:
async with self._tg as tg:
tg.start_soon(self._run_ext_in)
tg.start_soon(self._simple_retry)
finally:
self.external_outbound.close()
for send in self.internal_outbound:
send.close()
# can make this better in future
async def _simple_retry(self):
while True:
await anyio.sleep(1 + random())
# list here is a shallow clone for shared mutation
for e_id, (time, event) in list(self.out_for_delivery.items()):
if anyio.current_time() > time + 5:
self.out_for_delivery[e_id] = (anyio.current_time(), event)
await self.external_outbound.send(event)
def sender(self) -> Sender[Event]:
send, recv = channel[Event]()
if self._tg.is_running():
self._tg.start_soon(self._ingest, SystemId(), recv)
else:
self._tg.queue(self._ingest, SystemId(), recv)
return send
def receiver(self) -> Receiver[IndexedEvent]:
send, recv = channel[IndexedEvent]()
self.internal_outbound.append(send)
return recv
def shutdown(self) -> None:
self._tg.cancel_tasks()
async def _ingest(self, system_id: SystemId, recv: Receiver[Event]):
idx = 0
with recv as events:
async for event in events:
f_ev = LocalForwarderEvent(
origin_idx=idx,
origin=system_id,
session=self.session_id,
event=event,
)
idx += 1
await self.external_outbound.send(f_ev)
self.out_for_delivery[event.event_id] = (anyio.current_time(), f_ev)
async def _run_ext_in(self):
buf = OrderedBuffer[Event]()
with self.external_inbound as events:
async for event in events:
if event.session != self.session_id:
continue
if event.origin != self.session_id.master_node_id:
continue
buf.ingest(event.origin_idx, event.event)
event_id = event.event.event_id
if event_id in self.out_for_delivery:
self.out_for_delivery.pop(event_id)
drained = buf.drain_indexed()
if drained:
self._nack_attempts = 0
if self._nack_cancel_scope:
self._nack_cancel_scope.cancel()
if not drained and (
self._nack_cancel_scope is None
or self._nack_cancel_scope.cancel_called
):
# Request the next index.
self._tg.start_soon(self._nack_request, buf.next_idx_to_release)
continue
for idx, event in drained:
to_clear = set[int]()
for i, sender in enumerate(self.internal_outbound):
try:
await sender.send(IndexedEvent(idx=idx, event=event))
except (ClosedResourceError, BrokenResourceError):
to_clear.add(i)
for i in sorted(to_clear, reverse=True):
self.internal_outbound.pop(i)
async def _nack_request(self, since_idx: int) -> None:
# We request all events after (and including) the missing index.
# This function is started whenever we receive an event that is out of sequence.
# It is cancelled as soon as we receiver an event that is in sequence.
if since_idx < 0:
logger.warning(f"Negative value encountered for nack request {since_idx=}")
since_idx = 0
with CancelScope() as scope:
self._nack_cancel_scope = scope
delay: float = self._nack_base_seconds * (2.0**self._nack_attempts)
delay = min(self._nack_cap_seconds, delay)
self._nack_attempts += 1
try:
await anyio.sleep(delay)
logger.info(
f"Nack attempt {self._nack_attempts}: Requesting Event Log from {since_idx}"
)
await self.command_sender.send(
ForwarderCommand(
origin=self._system_id,
command=RequestEventLog(since_idx=since_idx),
)
)
finally:
if self._nack_cancel_scope is scope:
self._nack_cancel_scope = None
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/routing/event_router.py",
"license": "Apache License 2.0",
"lines": 142,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
exo-explore/exo:src/exo/utils/task_group.py | from collections.abc import Awaitable, Callable
from dataclasses import dataclass, field
from types import TracebackType
from typing import Any, Unpack
from anyio import create_task_group
from anyio.abc import TaskGroup as TaskGroupABC
@dataclass
class TaskGroup:
_tg: TaskGroupABC | None = field(default=None, init=False)
_queued: list[tuple[Any, Any, Any]] | None = field(default_factory=list, init=False)
def is_running(self) -> bool:
return self._tg is not None
def cancel_tasks(self):
assert self._tg
self._tg.cancel_scope.cancel()
def cancel_called(self) -> bool:
assert self._tg
return self._tg.cancel_scope.cancel_called
def start_soon[*T](
self,
func: Callable[[Unpack[T]], Awaitable[Any]],
*args: Unpack[T],
name: object = None,
) -> None:
assert self._tg is not None
assert self._queued is None
self._tg.start_soon(func, *args, name=name)
def queue[*T](
self,
func: Callable[[Unpack[T]], Awaitable[Any]],
*args: Unpack[T],
name: object = None,
) -> None:
assert self._tg is None
assert self._queued is not None
self._queued.append((func, args, name))
async def __aenter__(self) -> TaskGroupABC:
assert self._tg is None
assert self._queued is not None
self._tg = create_task_group()
r = await self._tg.__aenter__()
for func, args, name in self._queued: # pyright: ignore[reportAny]
self._tg.start_soon(func, *args, name=name) # pyright: ignore[reportAny]
self._queued = None
return r
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> bool:
"""Exit the task group context waiting for all tasks to finish."""
assert self._tg is not None, "aenter sets self.lazy, so it exists when we aexit"
assert self._queued is None
return await self._tg.__aexit__(exc_type, exc_val, exc_tb)
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/utils/task_group.py",
"license": "Apache License 2.0",
"lines": 55,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
exo-explore/exo:packaging/dmg/generate-background.py | #!/usr/bin/env python3
"""Generate the DMG background image with a centered drag-to-Applications arrow.
The output is a 1600×740 retina PNG (2× for 800×400 logical window).
Icons are positioned at (200, 190) and (600, 190) in logical coordinates;
the arrow is drawn centered between them.
Usage:
python3 generate-background.py [output.png]
If no output path is given, overwrites the bundled background.png in-place.
"""
from __future__ import annotations
import math
import sys
from pathlib import Path
from PIL import Image, ImageDraw
# Retina dimensions (2× logical 800×400)
WIDTH = 1600
HEIGHT = 740
# Icon positions in logical coords → retina coords
# App icon at (200, 190), Applications at (600, 190)
APP_X = 200 * 2 # 400
APPS_X = 600 * 2 # 1200
ICON_Y = 190 * 2 # 380
# Arrow drawn between icons, slightly above icon center
ARROW_START_X = APP_X + 160 # past the icon
ARROW_END_X = APPS_X - 160 # before the Applications icon
ARROW_Y = ICON_Y # same height as icons
ARROW_RISE = 120 # upward arc height
def draw_arrow(draw: ImageDraw.ImageDraw) -> None:
"""Draw a hand-drawn-style curved arrow from app icon toward Applications."""
color = (30, 30, 30)
line_width = 8
# Compute bezier curve points for a gentle upward arc
points: list[tuple[float, float]] = []
steps = 80
for i in range(steps + 1):
t = i / steps
# Quadratic bezier: start → control → end
cx = (ARROW_START_X + ARROW_END_X) / 2
cy = ARROW_Y - ARROW_RISE
x = (1 - t) ** 2 * ARROW_START_X + 2 * (1 - t) * t * cx + t**2 * ARROW_END_X
y = (1 - t) ** 2 * ARROW_Y + 2 * (1 - t) * t * cy + t**2 * ARROW_Y
points.append((x, y))
# Draw the curve as connected line segments
for i in range(len(points) - 1):
draw.line([points[i], points[i + 1]], fill=color, width=line_width)
# Arrowhead at the end
end_x, end_y = points[-1]
# Direction from second-to-last to last point
prev_x, prev_y = points[-3]
angle = math.atan2(end_y - prev_y, end_x - prev_x)
head_len = 36
head_angle = math.radians(25)
left_x = end_x - head_len * math.cos(angle - head_angle)
left_y = end_y - head_len * math.sin(angle - head_angle)
right_x = end_x - head_len * math.cos(angle + head_angle)
right_y = end_y - head_len * math.sin(angle + head_angle)
draw.polygon(
[(end_x, end_y), (left_x, left_y), (right_x, right_y)],
fill=color,
)
def generate_background(output_path: str) -> None:
"""Generate a white DMG background with a centered arrow."""
img = Image.new("RGBA", (WIDTH, HEIGHT), (255, 255, 255, 255))
draw = ImageDraw.Draw(img)
draw_arrow(draw)
img.save(output_path, "PNG")
if __name__ == "__main__":
default_output = str(Path(__file__).parent / "background.png")
out = sys.argv[1] if len(sys.argv) >= 2 else default_output
generate_background(out)
print(f"Background image written to {out}")
| {
"repo_id": "exo-explore/exo",
"file_path": "packaging/dmg/generate-background.py",
"license": "Apache License 2.0",
"lines": 71,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
exo-explore/exo:src/exo/worker/runner/image_models/runner.py | import base64
import resource
import time
from typing import TYPE_CHECKING, Literal
import mlx.core as mx
from exo.shared.constants import EXO_MAX_CHUNK_SIZE, EXO_TRACING_ENABLED
from exo.shared.models.model_cards import ModelTask
from exo.shared.tracing import clear_trace_buffer, get_trace_buffer
from exo.shared.types.api import ImageGenerationStats
from exo.shared.types.chunks import ErrorChunk, ImageChunk
from exo.shared.types.common import CommandId, ModelId
from exo.shared.types.events import (
ChunkGenerated,
Event,
RunnerStatusUpdated,
TaskAcknowledged,
TaskStatusUpdated,
TraceEventData,
TracesCollected,
)
from exo.shared.types.tasks import (
ConnectToGroup,
ImageEdits,
ImageGeneration,
LoadModel,
Shutdown,
StartWarmup,
Task,
TaskId,
TaskStatus,
)
from exo.shared.types.worker.instances import BoundInstance
from exo.shared.types.worker.runner_response import (
ImageGenerationResponse,
PartialImageResponse,
)
from exo.shared.types.worker.runners import (
RunnerConnected,
RunnerConnecting,
RunnerFailed,
RunnerIdle,
RunnerLoaded,
RunnerLoading,
RunnerReady,
RunnerRunning,
RunnerShutdown,
RunnerShuttingDown,
RunnerStatus,
RunnerWarmingUp,
)
from exo.shared.types.worker.shards import (
CfgShardMetadata,
PipelineShardMetadata,
ShardMetadata,
)
from exo.utils.channels import MpReceiver, MpSender
from exo.worker.engines.image import (
DistributedImageModel,
generate_image,
initialize_image_model,
warmup_image_generator,
)
from exo.worker.engines.mlx.utils_mlx import (
initialize_mlx,
)
from exo.worker.runner.bootstrap import logger
def _is_primary_output_node(shard_metadata: ShardMetadata) -> bool:
"""Check if this node is the primary output node for image generation.
For CFG models: the last pipeline stage in CFG group 0 (positive prompt).
For non-CFG models: the last pipeline stage.
"""
if isinstance(shard_metadata, CfgShardMetadata):
is_pipeline_last = (
shard_metadata.pipeline_rank == shard_metadata.pipeline_world_size - 1
)
return is_pipeline_last and shard_metadata.cfg_rank == 0
elif isinstance(shard_metadata, PipelineShardMetadata):
return shard_metadata.device_rank == shard_metadata.world_size - 1
return False
def _process_image_response(
response: ImageGenerationResponse | PartialImageResponse,
command_id: CommandId,
shard_metadata: ShardMetadata,
event_sender: MpSender[Event],
image_index: int,
) -> None:
"""Process a single image response and send chunks."""
encoded_data = base64.b64encode(response.image_data).decode("utf-8")
is_partial = isinstance(response, PartialImageResponse)
# Extract stats from final ImageGenerationResponse if available
stats = response.stats if isinstance(response, ImageGenerationResponse) else None
_send_image_chunk(
encoded_data=encoded_data,
command_id=command_id,
model_id=shard_metadata.model_card.model_id,
event_sender=event_sender,
image_index=response.image_index,
is_partial=is_partial,
partial_index=response.partial_index if is_partial else None,
total_partials=response.total_partials if is_partial else None,
stats=stats,
image_format=response.format,
)
def _send_traces_if_enabled(
event_sender: MpSender[Event],
task_id: TaskId,
rank: int,
) -> None:
if not EXO_TRACING_ENABLED:
return
traces = get_trace_buffer()
if traces:
trace_data = [
TraceEventData(
name=t.name,
start_us=t.start_us,
duration_us=t.duration_us,
rank=t.rank,
category=t.category,
)
for t in traces
]
event_sender.send(
TracesCollected(
task_id=task_id,
rank=rank,
traces=trace_data,
)
)
clear_trace_buffer()
def _send_image_chunk(
encoded_data: str,
command_id: CommandId,
model_id: ModelId,
event_sender: MpSender[Event],
image_index: int,
is_partial: bool,
partial_index: int | None = None,
total_partials: int | None = None,
stats: ImageGenerationStats | None = None,
image_format: Literal["png", "jpeg", "webp"] | None = None,
) -> None:
"""Send base64-encoded image data as chunks via events."""
data_chunks = [
encoded_data[i : i + EXO_MAX_CHUNK_SIZE]
for i in range(0, len(encoded_data), EXO_MAX_CHUNK_SIZE)
]
total_chunks = len(data_chunks)
for chunk_index, chunk_data in enumerate(data_chunks):
# Only include stats on the last chunk of the final image
chunk_stats = (
stats if chunk_index == total_chunks - 1 and not is_partial else None
)
event_sender.send(
ChunkGenerated(
command_id=command_id,
chunk=ImageChunk(
model=model_id,
data=chunk_data,
chunk_index=chunk_index,
total_chunks=total_chunks,
image_index=image_index,
is_partial=is_partial,
partial_index=partial_index,
total_partials=total_partials,
stats=chunk_stats,
format=image_format,
),
)
)
def main(
bound_instance: BoundInstance,
event_sender: MpSender[Event],
task_receiver: MpReceiver[Task],
cancel_receiver: MpReceiver[TaskId],
):
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (min(max(soft, 2048), hard), hard))
instance, runner_id, shard_metadata = (
bound_instance.instance,
bound_instance.bound_runner_id,
bound_instance.bound_shard,
)
device_rank = shard_metadata.device_rank
logger.info("hello from the runner")
if getattr(shard_metadata, "immediate_exception", False):
raise Exception("Fake exception - runner failed to spin up.")
if timeout := getattr(shard_metadata, "should_timeout", 0):
time.sleep(timeout)
setup_start_time = time.time()
cancelled_tasks = set[TaskId]()
image_model: DistributedImageModel | None = None
group = None
current_status: RunnerStatus = RunnerIdle()
logger.info("runner created")
event_sender.send(
RunnerStatusUpdated(runner_id=runner_id, runner_status=current_status)
)
seen = set[TaskId]()
with task_receiver as tasks:
for task in tasks:
if task.task_id in seen:
logger.warning("repeat task - potential error")
seen.add(task.task_id)
cancelled_tasks.discard(TaskId("CANCEL_CURRENT_TASK"))
event_sender.send(
TaskStatusUpdated(task_id=task.task_id, task_status=TaskStatus.Running)
)
match task:
case ConnectToGroup() if isinstance(
current_status, (RunnerIdle, RunnerFailed)
):
logger.info("runner connecting")
current_status = RunnerConnecting()
event_sender.send(
RunnerStatusUpdated(
runner_id=runner_id, runner_status=current_status
)
)
event_sender.send(TaskAcknowledged(task_id=task.task_id))
group = initialize_mlx(bound_instance)
logger.info("runner connected")
current_status = RunnerConnected()
# we load the model if it's connected with a group, or idle without a group. we should never tell a model to connect if it doesn't need to
case LoadModel() if (
isinstance(current_status, RunnerConnected) and group is not None
) or (isinstance(current_status, RunnerIdle) and group is None):
current_status = RunnerLoading()
logger.info("runner loading")
event_sender.send(
RunnerStatusUpdated(
runner_id=runner_id, runner_status=current_status
)
)
event_sender.send(TaskAcknowledged(task_id=task.task_id))
assert (
ModelTask.TextToImage in shard_metadata.model_card.tasks
or ModelTask.ImageToImage in shard_metadata.model_card.tasks
), f"Incorrect model task(s): {shard_metadata.model_card.tasks}"
image_model = initialize_image_model(bound_instance)
current_status = RunnerLoaded()
logger.info("runner loaded")
case StartWarmup() if isinstance(current_status, RunnerLoaded):
current_status = RunnerWarmingUp()
logger.info("runner warming up")
event_sender.send(
RunnerStatusUpdated(
runner_id=runner_id, runner_status=current_status
)
)
event_sender.send(TaskAcknowledged(task_id=task.task_id))
logger.info(f"warming up inference for instance: {instance}")
assert image_model
image = warmup_image_generator(model=image_model)
if image is not None:
logger.info(f"warmed up by generating {image.size} image")
else:
logger.info("warmup completed (non-primary node)")
logger.info(
f"runner initialized in {time.time() - setup_start_time} seconds"
)
current_status = RunnerReady()
logger.info("runner ready")
case ImageGeneration(
task_params=task_params, command_id=command_id
) if isinstance(current_status, RunnerReady):
assert image_model
logger.info(f"received image generation request: {str(task)[:500]}")
current_status = RunnerRunning()
logger.info("runner running")
event_sender.send(
RunnerStatusUpdated(
runner_id=runner_id, runner_status=current_status
)
)
event_sender.send(TaskAcknowledged(task_id=task.task_id))
try:
image_index = 0
for response in generate_image(
model=image_model, task=task_params
):
is_primary_output = _is_primary_output_node(shard_metadata)
if is_primary_output:
match response:
case PartialImageResponse():
logger.info(
f"sending partial ImageChunk {response.partial_index}/{response.total_partials}"
)
_process_image_response(
response,
command_id,
shard_metadata,
event_sender,
image_index,
)
case ImageGenerationResponse():
logger.info("sending final ImageChunk")
_process_image_response(
response,
command_id,
shard_metadata,
event_sender,
image_index,
)
image_index += 1
# can we make this more explicit?
except Exception as e:
if _is_primary_output_node(shard_metadata):
event_sender.send(
ChunkGenerated(
command_id=command_id,
chunk=ErrorChunk(
model=shard_metadata.model_card.model_id,
finish_reason="error",
error_message=str(e),
),
)
)
raise
finally:
_send_traces_if_enabled(event_sender, task.task_id, device_rank)
current_status = RunnerReady()
logger.info("runner ready")
case ImageEdits(task_params=task_params, command_id=command_id) if (
isinstance(current_status, RunnerReady)
):
assert image_model
logger.info(f"received image edits request: {str(task)[:500]}")
current_status = RunnerRunning()
logger.info("runner running")
event_sender.send(
RunnerStatusUpdated(
runner_id=runner_id, runner_status=current_status
)
)
event_sender.send(TaskAcknowledged(task_id=task.task_id))
try:
image_index = 0
for response in generate_image(
model=image_model, task=task_params
):
if _is_primary_output_node(shard_metadata):
match response:
case PartialImageResponse():
logger.info(
f"sending partial ImageChunk {response.partial_index}/{response.total_partials}"
)
_process_image_response(
response,
command_id,
shard_metadata,
event_sender,
image_index,
)
case ImageGenerationResponse():
logger.info("sending final ImageChunk")
_process_image_response(
response,
command_id,
shard_metadata,
event_sender,
image_index,
)
image_index += 1
except Exception as e:
if _is_primary_output_node(shard_metadata):
event_sender.send(
ChunkGenerated(
command_id=command_id,
chunk=ErrorChunk(
model=shard_metadata.model_card.model_id,
finish_reason="error",
error_message=str(e),
),
)
)
raise
finally:
_send_traces_if_enabled(event_sender, task.task_id, device_rank)
current_status = RunnerReady()
logger.info("runner ready")
case Shutdown():
current_status = RunnerShuttingDown()
logger.info("runner shutting down")
if not TYPE_CHECKING:
del image_model, group
mx.clear_cache()
import gc
gc.collect()
event_sender.send(
RunnerStatusUpdated(
runner_id=runner_id, runner_status=current_status
)
)
event_sender.send(TaskAcknowledged(task_id=task.task_id))
current_status = RunnerShutdown()
case _:
raise ValueError(
f"Received {task.__class__.__name__} outside of state machine in {current_status=}"
)
was_cancelled = (task.task_id in cancelled_tasks) or (
TaskId("CANCEL_CURRENT_TASK") in cancelled_tasks
)
if not was_cancelled:
event_sender.send(
TaskStatusUpdated(
task_id=task.task_id, task_status=TaskStatus.Complete
)
)
event_sender.send(
RunnerStatusUpdated(runner_id=runner_id, runner_status=current_status)
)
if isinstance(current_status, RunnerShutdown):
break
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/worker/runner/image_models/runner.py",
"license": "Apache License 2.0",
"lines": 414,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
exo-explore/exo:src/exo/master/adapters/ollama.py | from __future__ import annotations
import json
from collections.abc import AsyncGenerator
from typing import Any
from exo.shared.types.chunks import (
ErrorChunk,
PrefillProgressChunk,
TokenChunk,
ToolCallChunk,
)
from exo.shared.types.common import CommandId
from exo.shared.types.ollama_api import (
OllamaChatRequest,
OllamaChatResponse,
OllamaDoneReason,
OllamaGenerateRequest,
OllamaGenerateResponse,
OllamaMessage,
OllamaToolCall,
OllamaToolFunction,
)
from exo.shared.types.text_generation import InputMessage, TextGenerationTaskParams
def _map_done_reason(
finish_reason: str | None,
) -> OllamaDoneReason | None:
if finish_reason is None:
return None
if finish_reason == "stop":
return "stop"
if finish_reason == "length":
return "length"
if finish_reason in ("tool_calls", "function_call"):
return "tool_call"
if finish_reason == "error":
return "error"
return "stop"
def _try_parse_json(value: str) -> dict[str, Any] | str:
try:
return json.loads(value) # type: ignore
except json.JSONDecodeError:
return value
def _build_tool_calls(chunk: ToolCallChunk) -> list[OllamaToolCall]:
tool_calls: list[OllamaToolCall] = []
for index, tool in enumerate(chunk.tool_calls):
# tool.arguments is always str; try to parse as JSON dict for Ollama format
arguments: dict[str, Any] | str = _try_parse_json(tool.arguments)
tool_calls.append(
OllamaToolCall(
id=tool.id,
type="function",
function=OllamaToolFunction(
name=tool.name, arguments=arguments, index=index
),
)
)
return tool_calls
def _get_usage(
chunk: TokenChunk | ToolCallChunk,
) -> tuple[int | None, int | None]:
"""Extract (prompt_eval_count, eval_count) from a chunk."""
if chunk.usage is not None:
return (chunk.usage.prompt_tokens, chunk.usage.completion_tokens)
if chunk.stats is not None:
return (chunk.stats.prompt_tokens, chunk.stats.generation_tokens)
return (None, None)
def ollama_request_to_text_generation(
request: OllamaChatRequest,
) -> TextGenerationTaskParams:
"""Convert Ollama chat request to exo's internal text generation format."""
instructions: str | None = None
input_messages: list[InputMessage] = []
chat_template_messages: list[dict[str, Any]] = []
tool_message_index = 0
for msg in request.messages:
content = msg.content or ""
if msg.role == "system":
if instructions is None:
instructions = content
else:
instructions = f"{instructions}\n{content}"
chat_template_messages.append({"role": "system", "content": content})
continue
if msg.role in ("user", "assistant") and (
msg.content is not None or msg.thinking is not None or msg.tool_calls
):
input_messages.append(InputMessage(role=msg.role, content=content))
dumped: dict[str, Any] = {"role": msg.role, "content": content}
if msg.thinking is not None:
dumped["thinking"] = msg.thinking
if msg.tool_calls is not None:
tool_calls_list: list[dict[str, Any]] = []
for tc in msg.tool_calls:
function: dict[str, Any] = {
"name": tc.function.name,
"arguments": (
json.dumps(tc.function.arguments)
if isinstance(tc.function.arguments, dict)
else tc.function.arguments
),
}
if tc.function.index is not None:
function["index"] = tc.function.index
tool_call: dict[str, Any] = {"function": function}
if tc.id is not None:
tool_call["id"] = tc.id
if tc.type is not None:
tool_call["type"] = tc.type
tool_calls_list.append(tool_call)
dumped["tool_calls"] = tool_calls_list
if msg.name is not None:
dumped["name"] = msg.name
if msg.role == "tool":
tool_message_index += 1
tool_call_id = msg.tool_name or msg.name or f"tool_{tool_message_index}"
dumped["tool_call_id"] = tool_call_id
if msg.tool_name is not None:
dumped["tool_name"] = msg.tool_name
chat_template_messages.append(dumped)
options = request.options
return TextGenerationTaskParams(
model=request.model,
input=input_messages
if input_messages
else [InputMessage(role="user", content="")],
instructions=instructions,
max_output_tokens=options.num_predict if options else None,
temperature=options.temperature if options else None,
top_p=options.top_p if options else None,
top_k=options.top_k if options else None,
stop=options.stop if options else None,
seed=options.seed if options else None,
stream=request.stream,
tools=request.tools,
enable_thinking=request.think,
chat_template_messages=chat_template_messages
if chat_template_messages
else None,
)
async def generate_ollama_chat_stream(
_command_id: CommandId,
chunk_stream: AsyncGenerator[
ErrorChunk | ToolCallChunk | TokenChunk | PrefillProgressChunk, None
],
) -> AsyncGenerator[str, None]:
"""Generate streaming responses in Ollama format (newline-delimited JSON)."""
thinking_parts: list[str] = []
async for chunk in chunk_stream:
match chunk:
case PrefillProgressChunk():
continue
case ErrorChunk():
error_response = OllamaChatResponse(
model=str(chunk.model),
message=OllamaMessage(
role="assistant", content=chunk.error_message
),
done=True,
done_reason="error",
)
yield f"{error_response.model_dump_json(exclude_none=True)}\n"
return
case ToolCallChunk():
prompt_eval, eval_count = _get_usage(chunk)
response = OllamaChatResponse(
model=str(chunk.model),
message=OllamaMessage(
role="assistant",
content="",
tool_calls=_build_tool_calls(chunk),
thinking="".join(thinking_parts) if thinking_parts else None,
),
done=True,
done_reason="tool_call",
prompt_eval_count=prompt_eval,
eval_count=eval_count,
)
yield f"{response.model_dump_json(exclude_none=True)}\n"
return
case TokenChunk():
done = chunk.finish_reason is not None
if chunk.is_thinking:
thinking_parts.append(chunk.text)
response = OllamaChatResponse(
model=str(chunk.model),
message=OllamaMessage(
role="assistant", content="", thinking=chunk.text
),
done=False,
)
yield f"{response.model_dump_json(exclude_none=True)}\n"
elif done:
prompt_eval, eval_count = _get_usage(chunk)
response = OllamaChatResponse(
model=str(chunk.model),
message=OllamaMessage(
role="assistant",
content=chunk.text,
),
done=True,
done_reason=_map_done_reason(chunk.finish_reason),
prompt_eval_count=prompt_eval,
eval_count=eval_count,
)
yield f"{response.model_dump_json(exclude_none=True)}\n"
else:
response = OllamaChatResponse(
model=str(chunk.model),
message=OllamaMessage(role="assistant", content=chunk.text),
done=False,
)
yield f"{response.model_dump_json(exclude_none=True)}\n"
if done:
return
async def collect_ollama_chat_response(
_command_id: CommandId,
chunk_stream: AsyncGenerator[
ErrorChunk | ToolCallChunk | TokenChunk | PrefillProgressChunk, None
],
) -> AsyncGenerator[str]:
"""Collect streaming chunks into a single non-streaming Ollama response.
Returns an AsyncGenerator[str] (single yield) for consistency with FastAPI
StreamingResponse cancellation handling.
"""
text_parts: list[str] = []
thinking_parts: list[str] = []
tool_calls: list[OllamaToolCall] = []
model: str | None = None
finish_reason: str | None = None
prompt_eval_count: int | None = None
eval_count: int | None = None
async for chunk in chunk_stream:
match chunk:
case PrefillProgressChunk():
continue
case ErrorChunk():
raise ValueError(chunk.error_message or "Internal server error")
case TokenChunk():
if model is None:
model = str(chunk.model)
if chunk.is_thinking:
thinking_parts.append(chunk.text)
else:
text_parts.append(chunk.text)
if chunk.finish_reason is not None:
finish_reason = chunk.finish_reason
prompt_eval_count, eval_count = _get_usage(chunk)
case ToolCallChunk():
if model is None:
model = str(chunk.model)
tool_calls.extend(_build_tool_calls(chunk))
finish_reason = chunk.finish_reason
prompt_eval_count, eval_count = _get_usage(chunk)
combined_text = "".join(text_parts)
combined_thinking = "".join(thinking_parts) if thinking_parts else None
assert model is not None
yield OllamaChatResponse(
model=model,
message=OllamaMessage(
role="assistant",
content=combined_text,
thinking=combined_thinking,
tool_calls=tool_calls if tool_calls else None,
),
done=True,
done_reason=_map_done_reason(finish_reason),
prompt_eval_count=prompt_eval_count,
eval_count=eval_count,
).model_dump_json(exclude_none=True)
return
# ── /api/generate ──
def ollama_generate_request_to_text_generation(
request: OllamaGenerateRequest,
) -> TextGenerationTaskParams:
"""Convert Ollama generate request to exo's internal text generation format."""
chat_template_messages: list[dict[str, Any]] = []
if request.system:
chat_template_messages.append({"role": "system", "content": request.system})
chat_template_messages.append({"role": "user", "content": request.prompt})
options = request.options
return TextGenerationTaskParams(
model=request.model,
input=[InputMessage(role="user", content=request.prompt)],
instructions=request.system,
max_output_tokens=options.num_predict if options else None,
temperature=options.temperature if options else None,
top_p=options.top_p if options else None,
top_k=options.top_k if options else None,
stop=options.stop if options else None,
seed=options.seed if options else None,
stream=request.stream,
enable_thinking=request.think,
chat_template_messages=chat_template_messages
if chat_template_messages
else None,
)
async def generate_ollama_generate_stream(
_command_id: CommandId,
chunk_stream: AsyncGenerator[
ErrorChunk | ToolCallChunk | TokenChunk | PrefillProgressChunk, None
],
) -> AsyncGenerator[str, None]:
"""Generate streaming responses for /api/generate in Ollama NDJSON format."""
thinking_parts: list[str] = []
async for chunk in chunk_stream:
match chunk:
case PrefillProgressChunk():
continue
case ErrorChunk():
resp = OllamaGenerateResponse(
model=str(chunk.model),
response="",
done=True,
done_reason="error",
)
yield f"{resp.model_dump_json(exclude_none=True)}\n"
return
case ToolCallChunk():
# generate endpoint doesn't support tools; emit as done
prompt_eval, eval_count = _get_usage(chunk)
resp = OllamaGenerateResponse(
model=str(chunk.model),
response="",
done=True,
done_reason="stop",
prompt_eval_count=prompt_eval,
eval_count=eval_count,
)
yield f"{resp.model_dump_json(exclude_none=True)}\n"
return
case TokenChunk():
done = chunk.finish_reason is not None
if chunk.is_thinking:
thinking_parts.append(chunk.text)
resp = OllamaGenerateResponse(
model=str(chunk.model),
response="",
thinking=chunk.text,
done=False,
)
yield f"{resp.model_dump_json(exclude_none=True)}\n"
elif done:
prompt_eval, eval_count = _get_usage(chunk)
resp = OllamaGenerateResponse(
model=str(chunk.model),
response=chunk.text,
done=True,
done_reason=_map_done_reason(chunk.finish_reason),
prompt_eval_count=prompt_eval,
eval_count=eval_count,
)
yield f"{resp.model_dump_json(exclude_none=True)}\n"
else:
resp = OllamaGenerateResponse(
model=str(chunk.model),
response=chunk.text,
done=False,
)
yield f"{resp.model_dump_json(exclude_none=True)}\n"
if done:
return
async def collect_ollama_generate_response(
_command_id: CommandId,
chunk_stream: AsyncGenerator[
ErrorChunk | ToolCallChunk | TokenChunk | PrefillProgressChunk, None
],
) -> AsyncGenerator[str]:
"""Collect chunks into a single non-streaming /api/generate response."""
text_parts: list[str] = []
thinking_parts: list[str] = []
model: str | None = None
finish_reason: str | None = None
prompt_eval_count: int | None = None
eval_count: int | None = None
async for chunk in chunk_stream:
match chunk:
case PrefillProgressChunk():
continue
case ErrorChunk():
raise ValueError(chunk.error_message or "Internal server error")
case TokenChunk():
if model is None:
model = str(chunk.model)
if chunk.is_thinking:
thinking_parts.append(chunk.text)
else:
text_parts.append(chunk.text)
if chunk.finish_reason is not None:
finish_reason = chunk.finish_reason
prompt_eval_count, eval_count = _get_usage(chunk)
case ToolCallChunk():
if model is None:
model = str(chunk.model)
finish_reason = chunk.finish_reason
prompt_eval_count, eval_count = _get_usage(chunk)
assert model is not None
yield OllamaGenerateResponse(
model=model,
response="".join(text_parts),
thinking="".join(thinking_parts) if thinking_parts else None,
done=True,
done_reason=_map_done_reason(finish_reason),
prompt_eval_count=prompt_eval_count,
eval_count=eval_count,
).model_dump_json(exclude_none=True)
return
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/master/adapters/ollama.py",
"license": "Apache License 2.0",
"lines": 405,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
exo-explore/exo:src/exo/shared/types/ollama_api.py | from __future__ import annotations
import time
from typing import Any, Literal
from pydantic import BaseModel, Field
from exo.shared.models.model_cards import ModelId
# https://github.com/ollama/ollama/blob/main/docs/api.md
OllamaRole = Literal["system", "user", "assistant", "tool"]
OllamaDoneReason = Literal["stop", "length", "tool_call", "error"]
class OllamaToolFunction(BaseModel, frozen=True):
name: str
arguments: dict[str, Any] | str
index: int | None = None
class OllamaToolCall(BaseModel, frozen=True):
id: str | None = None
type: Literal["function"] | None = None
function: OllamaToolFunction
class OllamaMessage(BaseModel, frozen=True):
role: OllamaRole
content: str | None = None
thinking: str | None = None
tool_calls: list[OllamaToolCall] | None = None
name: str | None = None
tool_name: str | None = None
images: list[str] | None = None
class OllamaOptions(BaseModel, frozen=True):
num_predict: int | None = None
temperature: float | None = None
top_p: float | None = None
top_k: int | None = None
stop: str | list[str] | None = None
seed: int | None = None
class OllamaChatRequest(BaseModel, frozen=True):
model: ModelId
messages: list[OllamaMessage]
stream: bool = True
options: OllamaOptions | None = None
tools: list[dict[str, Any]] | None = None
format: Literal["json"] | dict[str, Any] | None = None
keep_alive: str | int | None = None
think: bool | None = None
class OllamaGenerateRequest(BaseModel, frozen=True):
model: ModelId
prompt: str = ""
system: str | None = None
stream: bool = True
options: OllamaOptions | None = None
format: Literal["json"] | dict[str, Any] | None = None
keep_alive: str | int | None = None
think: bool | None = None
raw: bool = False
class OllamaGenerateResponse(BaseModel, frozen=True, strict=True):
model: str
created_at: str = Field(
default_factory=lambda: time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
)
response: str
thinking: str | None = None
done: bool
done_reason: OllamaDoneReason | None = None
total_duration: int | None = None
load_duration: int | None = None
prompt_eval_count: int | None = None
prompt_eval_duration: int | None = None
eval_count: int | None = None
eval_duration: int | None = None
class OllamaShowRequest(BaseModel, frozen=True):
name: str | None = None
model: str | None = None
verbose: bool | None = None
class OllamaChatResponse(BaseModel, frozen=True, strict=True):
model: str
created_at: str = Field(
default_factory=lambda: time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
)
message: OllamaMessage
done: bool
done_reason: OllamaDoneReason | None = None
total_duration: int | None = None
load_duration: int | None = None
prompt_eval_count: int | None = None
prompt_eval_duration: int | None = None
eval_count: int | None = None
eval_duration: int | None = None
class OllamaModelDetails(BaseModel, frozen=True, strict=True):
format: str | None = None
family: str | None = None
parameter_size: str | None = None
quantization_level: str | None = None
class OllamaModelTag(BaseModel, frozen=True, strict=True):
name: str
model: str | None = None
modified_at: str | None = None
size: int | None = None
digest: str | None = None
details: OllamaModelDetails | None = None
class OllamaTagsResponse(BaseModel, frozen=True, strict=True):
models: list[OllamaModelTag]
class OllamaShowResponse(BaseModel, frozen=True, strict=True):
modelfile: str | None = None
parameters: str | None = None
template: str | None = None
details: OllamaModelDetails | None = None
model_info: dict[str, Any] | None = None
class OllamaPsModel(BaseModel, frozen=True, strict=True):
name: str
model: str
size: int
digest: str | None = None
details: OllamaModelDetails | None = None
expires_at: str | None = None
size_vram: int | None = None
class OllamaPsResponse(BaseModel, frozen=True, strict=True):
models: list[OllamaPsModel]
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/shared/types/ollama_api.py",
"license": "Apache License 2.0",
"lines": 113,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
exo-explore/exo:src/exo/worker/engines/mlx/dsml_encoding.py | import json
import re
from typing import Any
from mlx_lm.chat_templates import deepseek_v32
from exo.shared.types.api import ToolCallItem
BOS_TOKEN: str = deepseek_v32.bos_token
EOS_TOKEN: str = deepseek_v32.eos_token
DSML_TOKEN: str = deepseek_v32.dsml_token
THINKING_START: str = deepseek_v32.thinking_start_token
THINKING_END: str = deepseek_v32.thinking_end_token
USER_TOKEN = "<\uff5cUser\uff5c>"
ASSISTANT_TOKEN = "<\uff5cAssistant\uff5c>"
TOOL_CALLS_START = f"<{DSML_TOKEN}function_calls>"
TOOL_CALLS_END = f"</{DSML_TOKEN}function_calls>"
encode_messages = deepseek_v32.encode_messages
_INVOKE_PATTERN = re.compile(
rf"<{re.escape(DSML_TOKEN)}invoke\s+name=\"([^\"]+)\">"
rf"(.*?)"
rf"</{re.escape(DSML_TOKEN)}invoke>",
re.DOTALL,
)
_PARAM_PATTERN = re.compile(
rf"<{re.escape(DSML_TOKEN)}parameter\s+name=\"([^\"]+)\"\s+string=\"(true|false)\">"
rf"(.*?)"
rf"</{re.escape(DSML_TOKEN)}parameter>",
re.DOTALL,
)
def parse_dsml_output(text: str) -> list[ToolCallItem] | None:
"""Parse DSML function_calls block from model output text.
Args:
text: The text containing the DSML function_calls block
(including the start/end markers).
Returns:
List of ToolCallItem, or None if parsing fails.
"""
tool_calls: list[ToolCallItem] = []
for invoke_match in _INVOKE_PATTERN.finditer(text):
func_name = invoke_match.group(1)
invoke_body = invoke_match.group(2)
args: dict[str, Any] = {}
for param_match in _PARAM_PATTERN.finditer(invoke_body):
param_name = param_match.group(1)
is_string = param_match.group(2) == "true"
param_value = param_match.group(3)
if is_string:
args[param_name] = param_value
else:
try:
args[param_name] = json.loads(param_value)
except (json.JSONDecodeError, ValueError):
args[param_name] = param_value
tool_calls.append(
ToolCallItem(
name=func_name,
arguments=json.dumps(args),
)
)
return tool_calls if tool_calls else None
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/worker/engines/mlx/dsml_encoding.py",
"license": "Apache License 2.0",
"lines": 58,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
exo-explore/exo:src/exo/worker/tests/unittests/test_runner/test_dsml_e2e.py | import json
from collections.abc import Generator
from typing import Any
from exo.shared.types.worker.runner_response import (
GenerationResponse,
ToolCallResponse,
)
from exo.worker.engines.mlx.dsml_encoding import (
ASSISTANT_TOKEN,
BOS_TOKEN,
DSML_TOKEN,
EOS_TOKEN,
THINKING_END,
THINKING_START,
TOOL_CALLS_END,
TOOL_CALLS_START,
USER_TOKEN,
encode_messages,
parse_dsml_output,
)
from exo.worker.runner.llm_inference.runner import parse_deepseek_v32
# ── Shared fixtures ──────────────────────────────────────────────
_WEATHER_TOOLS: list[dict[str, Any]] = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the current weather in a given city",
"parameters": {
"type": "object",
"properties": {
"city": {"type": "string", "description": "The city name"},
"units": {
"type": "string",
"enum": ["celsius", "fahrenheit"],
"description": "Temperature units",
},
},
"required": ["city"],
},
},
},
{
"type": "function",
"function": {
"name": "get_time",
"description": "Get the current time in a timezone",
"parameters": {
"type": "object",
"properties": {
"timezone": {"type": "string"},
},
"required": ["timezone"],
},
},
},
]
def _simulate_tokens(
texts: list[str],
finish_on_last: bool = True,
) -> Generator[GenerationResponse]:
"""Simulate a model producing tokens from a list of text strings."""
for i, text in enumerate(texts):
is_last = i == len(texts) - 1
yield GenerationResponse(
text=text,
token=i,
finish_reason="stop" if (is_last and finish_on_last) else None,
usage=None,
)
# ── Test: Standard text response (no tool calls) ────────────────
class TestE2EStandardResponse:
"""Model generates a plain text response — no tool calling involved."""
def test_plain_text_passthrough(self):
"""Simulate model producing: 'The weather in NYC is 72°F and sunny.'"""
# Step 1: Encode the prompt (with tools available)
messages: list[dict[str, Any]] = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "What's the weather in NYC?"},
]
prompt = encode_messages(messages, thinking_mode="chat", tools=_WEATHER_TOOLS)
# Verify prompt structure
assert BOS_TOKEN in prompt
assert "## Tools" in prompt
assert "get_weather" in prompt
assert f"{USER_TOKEN}What's the weather in NYC?{ASSISTANT_TOKEN}" in prompt
# Step 2: Simulate model response — plain text tokens (no DSML)
model_tokens = [
"The weather",
" in NYC",
" is 72",
"°F",
" and sunny",
".",
]
results = list(parse_deepseek_v32(_simulate_tokens(model_tokens)))
# Step 3: Verify all tokens pass through as GenerationResponse
gen_results = [r for r in results if isinstance(r, GenerationResponse)]
tool_results = [r for r in results if isinstance(r, ToolCallResponse)]
assert len(tool_results) == 0
assert len(gen_results) == 6
full_text = "".join(r.text for r in gen_results)
assert full_text == "The weather in NYC is 72°F and sunny."
assert gen_results[-1].finish_reason == "stop"
# ── Test: Tool call response ─────────────────────────────────────
class TestE2EToolCallResponse:
"""Model generates a DSML tool call — realistic token boundaries."""
def test_realistic_tool_call_tokens(self):
"""Simulate model generating a get_weather tool call with realistic token splits.
Real models split DSML markers across tokens unpredictably.
This simulates how DeepSeek V3.2 actually tokenizes DSML output.
"""
# Step 1: Encode prompt
messages: list[dict[str, Any]] = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "What's the weather in San Francisco?"},
]
prompt = encode_messages(messages, thinking_mode="chat", tools=_WEATHER_TOOLS)
assert "get_weather" in prompt
# Step 2: Simulate realistic token-by-token model output
# The model first produces some text, then a DSML tool call block
model_tokens = [
"I'll check the weather for you.",
"\n\n",
f"<{DSML_TOKEN}", # marker split across tokens
"function_calls>\n",
f'<{DSML_TOKEN}invoke name="get_weather">\n',
f'<{DSML_TOKEN}parameter name="city" string="true">',
"San Francisco",
f"</{DSML_TOKEN}parameter>\n",
f'<{DSML_TOKEN}parameter name="units" string="false">',
'"celsius"',
f"</{DSML_TOKEN}parameter>\n",
f"</{DSML_TOKEN}invoke>\n",
f"</{DSML_TOKEN}function_calls>",
]
results = list(parse_deepseek_v32(_simulate_tokens(model_tokens)))
# Step 3: Verify
gen_results = [r for r in results if isinstance(r, GenerationResponse)]
tool_results = [r for r in results if isinstance(r, ToolCallResponse)]
# Should have text tokens before tool call + one ToolCallResponse
assert len(tool_results) == 1
assert len(tool_results[0].tool_calls) == 1
tc = tool_results[0].tool_calls[0]
assert tc.name == "get_weather"
args = json.loads(tc.arguments) # pyright: ignore[reportAny]
assert args["city"] == "San Francisco"
assert args["units"] == "celsius"
# The text before the tool call should still be yielded
text_before = "".join(r.text for r in gen_results if not r.is_thinking)
assert "check the weather" in text_before
def test_multiple_tool_calls_in_one_block(self):
"""Model generates two tool calls in a single function_calls block."""
messages: list[dict[str, Any]] = [
{"role": "system", "content": "You are helpful."},
{"role": "user", "content": "Weather in NYC and time in EST?"},
]
prompt = encode_messages(messages, thinking_mode="chat", tools=_WEATHER_TOOLS)
assert "get_weather" in prompt
assert "get_time" in prompt
# Simulate model output with two invocations
model_tokens = [
"Let me check both.\n\n",
TOOL_CALLS_START,
"\n",
f'<{DSML_TOKEN}invoke name="get_weather">\n',
f'<{DSML_TOKEN}parameter name="city" string="true">NYC</{DSML_TOKEN}parameter>\n',
f"</{DSML_TOKEN}invoke>\n",
f'<{DSML_TOKEN}invoke name="get_time">\n',
f'<{DSML_TOKEN}parameter name="timezone" string="true">EST</{DSML_TOKEN}parameter>\n',
f"</{DSML_TOKEN}invoke>\n",
TOOL_CALLS_END,
]
results = list(parse_deepseek_v32(_simulate_tokens(model_tokens)))
tool_results = [r for r in results if isinstance(r, ToolCallResponse)]
assert len(tool_results) == 1
assert len(tool_results[0].tool_calls) == 2
assert tool_results[0].tool_calls[0].name == "get_weather"
assert tool_results[0].tool_calls[1].name == "get_time"
args0 = json.loads(tool_results[0].tool_calls[0].arguments) # pyright: ignore[reportAny]
args1 = json.loads(tool_results[0].tool_calls[1].arguments) # pyright: ignore[reportAny]
assert args0 == {"city": "NYC"}
assert args1 == {"timezone": "EST"}
# ── Test: Multi-turn tool use flow ───────────────────────────────
class TestE2EMultiTurnToolUse:
"""Full multi-turn: user asks → model calls tool → tool result → model answers."""
def test_encode_multi_turn_with_tool_results(self):
"""Verify the prompt for turn 2 (after tool results) is correctly encoded."""
# Turn 1: user asks, model calls tool
# Turn 2: tool result provided, model answers
messages: list[dict[str, Any]] = [
{"role": "system", "content": "You are a weather assistant."},
{"role": "user", "content": "What's the weather in NYC?"},
{
"role": "assistant",
"content": "",
"tool_calls": [
{
"type": "function",
"function": {
"name": "get_weather",
"arguments": '{"city": "NYC"}',
},
}
],
},
{"role": "tool", "content": '{"temperature": 72, "condition": "sunny"}'},
]
prompt = encode_messages(messages, thinking_mode="chat", tools=_WEATHER_TOOLS)
# Verify multi-turn structure
assert BOS_TOKEN in prompt
assert "You are a weather assistant." in prompt
assert "## Tools" in prompt
# The assistant's tool call should be encoded as DSML
assert TOOL_CALLS_START in prompt
assert f'<{DSML_TOKEN}invoke name="get_weather">' in prompt
assert EOS_TOKEN in prompt
# The tool result should be wrapped in function_results
assert "<function_results>" in prompt
assert "<result>" in prompt
assert "72" in prompt
assert "</function_results>" in prompt
# Now simulate model answering after seeing the tool result
model_tokens = [
"The current",
" weather in NYC",
" is 72°F",
" and sunny.",
]
results = list(parse_deepseek_v32(_simulate_tokens(model_tokens)))
gen_results = [r for r in results if isinstance(r, GenerationResponse)]
tool_results = [r for r in results if isinstance(r, ToolCallResponse)]
assert len(tool_results) == 0
full_text = "".join(r.text for r in gen_results)
assert full_text == "The current weather in NYC is 72°F and sunny."
def test_multi_tool_results_encoding(self):
"""Verify encoding when model called two tools and both return results."""
messages: list[dict[str, Any]] = [
{"role": "user", "content": "Weather and time?"},
{
"role": "assistant",
"content": "",
"tool_calls": [
{
"type": "function",
"function": {
"name": "get_weather",
"arguments": '{"city": "LA"}',
},
},
{
"type": "function",
"function": {
"name": "get_time",
"arguments": '{"timezone": "PST"}',
},
},
],
},
{"role": "tool", "content": "85F, clear skies"},
{"role": "tool", "content": "3:42 PM PST"},
]
prompt = encode_messages(messages, thinking_mode="chat", tools=_WEATHER_TOOLS)
# Should have one function_results block with two results
assert prompt.count("<function_results>") == 1
assert prompt.count("</function_results>") == 1
assert "<result>85F, clear skies</result>" in prompt
assert "<result>3:42 PM PST</result>" in prompt
# ── Test: Thinking + tool call ───────────────────────────────────
class TestE2EThinkingAndToolCall:
"""Model uses thinking mode, reasons, then makes a tool call."""
def test_thinking_then_tool_call(self):
"""Model thinks first, then produces a DSML tool call block."""
messages: list[dict[str, Any]] = [
{"role": "user", "content": "What's the weather?"},
]
prompt = encode_messages(
messages, tools=_WEATHER_TOOLS, thinking_mode="thinking"
)
# Thinking mode: prompt should end with <think>
assert prompt.endswith(THINKING_START)
# Simulate: model outputs <think>, thinks, closes thinking, then tool call.
# In the full pipeline, parse_thinking_models handles the case where
# <think> is in the prompt. Here we test parse_deepseek_v32 directly,
# which detects <think>/<think> markers in the stream.
model_tokens = [
THINKING_START,
"The user wants weather",
" information. I should use",
" the get_weather tool.",
THINKING_END,
"\n\n",
TOOL_CALLS_START,
"\n",
f'<{DSML_TOKEN}invoke name="get_weather">\n',
f'<{DSML_TOKEN}parameter name="city" string="true">',
"San Francisco",
f"</{DSML_TOKEN}parameter>\n",
f"</{DSML_TOKEN}invoke>\n",
TOOL_CALLS_END,
]
results = list(parse_deepseek_v32(_simulate_tokens(model_tokens)))
gen_results = [r for r in results if isinstance(r, GenerationResponse)]
tool_results = [r for r in results if isinstance(r, ToolCallResponse)]
# Should have thinking tokens + tool call
thinking_results = [r for r in gen_results if r.is_thinking]
assert len(thinking_results) >= 1
thinking_text = "".join(r.text for r in thinking_results)
assert "get_weather tool" in thinking_text
assert len(tool_results) == 1
assert tool_results[0].tool_calls[0].name == "get_weather"
args = json.loads(tool_results[0].tool_calls[0].arguments) # pyright: ignore[reportAny]
assert args["city"] == "San Francisco"
def test_thinking_prompt_encoding(self):
"""Verify thinking mode affects prompt encoding correctly."""
messages: list[dict[str, Any]] = [
{"role": "system", "content": "Be thorough."},
{"role": "user", "content": "What's the weather?"},
]
# With thinking enabled
prompt_think = encode_messages(
messages, tools=_WEATHER_TOOLS, thinking_mode="thinking"
)
assert prompt_think.endswith(THINKING_START)
# With thinking disabled
prompt_no_think = encode_messages(
messages, tools=_WEATHER_TOOLS, thinking_mode="chat"
)
assert prompt_no_think.endswith(THINKING_END)
# Both should have the same tool definitions
assert "get_weather" in prompt_think
assert "get_weather" in prompt_no_think
# ── Test: Round-trip encode → parse ──────────────────────────────
class TestE2ERoundTrip:
"""Verify that DSML we encode can be parsed back correctly."""
def test_encoded_tool_call_is_parseable(self):
"""Encode an assistant tool call message, then parse the DSML output."""
messages: list[dict[str, Any]] = [
{"role": "user", "content": "Weather?"},
{
"role": "assistant",
"content": "",
"tool_calls": [
{
"type": "function",
"function": {
"name": "get_weather",
"arguments": '{"city": "Tokyo", "units": "celsius"}',
},
}
],
},
]
prompt = encode_messages(messages, thinking_mode="chat", tools=_WEATHER_TOOLS)
# Extract the DSML function_calls block from the prompt
start = prompt.index(TOOL_CALLS_START)
end = prompt.index(TOOL_CALLS_END) + len(TOOL_CALLS_END)
dsml_block = prompt[start:end]
# Parse it back
parsed = parse_dsml_output(dsml_block)
assert parsed is not None
assert len(parsed) == 1
assert parsed[0].name == "get_weather"
args = json.loads(parsed[0].arguments) # pyright: ignore[reportAny]
assert args["city"] == "Tokyo"
assert args["units"] == "celsius"
def test_encoded_multi_tool_call_round_trips(self):
"""Encode multiple tool calls, verify they parse back correctly."""
messages: list[dict[str, Any]] = [
{"role": "user", "content": "Both please"},
{
"role": "assistant",
"content": "",
"tool_calls": [
{
"type": "function",
"function": {
"name": "get_weather",
"arguments": '{"city": "Paris"}',
},
},
{
"type": "function",
"function": {
"name": "get_time",
"arguments": '{"timezone": "CET"}',
},
},
],
},
]
prompt = encode_messages(messages, thinking_mode="chat", tools=_WEATHER_TOOLS)
start = prompt.index(TOOL_CALLS_START)
end = prompt.index(TOOL_CALLS_END) + len(TOOL_CALLS_END)
dsml_block = prompt[start:end]
parsed = parse_dsml_output(dsml_block)
assert parsed is not None
assert len(parsed) == 2
assert parsed[0].name == "get_weather"
assert parsed[1].name == "get_time"
assert json.loads(parsed[0].arguments) == {"city": "Paris"}
assert json.loads(parsed[1].arguments) == {"timezone": "CET"}
# ── Test: Edge cases with realistic token boundaries ─────────────
class TestE2EEdgeCases:
"""Edge cases that occur in real model inference."""
def test_dsml_marker_split_at_fullwidth_pipe(self):
"""The fullwidth pipe character | might be its own token."""
# This is a realistic tokenization: the DSML marker is split at the | chars
model_tokens = [
"Let me help.\n\n",
"<\uff5c", # start of |DSML|
"DSML\uff5c", # rest of DSML token
"function_calls>\n",
f'<{DSML_TOKEN}invoke name="get_weather">\n',
f'<{DSML_TOKEN}parameter name="city" string="true">NYC</{DSML_TOKEN}parameter>\n',
f"</{DSML_TOKEN}invoke>\n",
TOOL_CALLS_END,
]
results = list(parse_deepseek_v32(_simulate_tokens(model_tokens)))
tool_results = [r for r in results if isinstance(r, ToolCallResponse)]
assert len(tool_results) == 1
assert tool_results[0].tool_calls[0].name == "get_weather"
def test_tool_call_with_nested_json_object(self):
"""Model passes a complex JSON object as a non-string parameter."""
dsml_block = (
f"{TOOL_CALLS_START}\n"
f'<{DSML_TOKEN}invoke name="create_event">\n'
f'<{DSML_TOKEN}parameter name="title" string="true">Team Standup</{DSML_TOKEN}parameter>\n'
f'<{DSML_TOKEN}parameter name="config" string="false">'
f'{{"recurring": true, "days": ["mon", "wed", "fri"], "time": "09:00"}}'
f"</{DSML_TOKEN}parameter>\n"
f"</{DSML_TOKEN}invoke>\n"
f"{TOOL_CALLS_END}"
)
# Feed as single token (model might produce it all at once after prefill)
results = list(parse_deepseek_v32(_simulate_tokens([dsml_block])))
tool_results = [r for r in results if isinstance(r, ToolCallResponse)]
assert len(tool_results) == 1
tc = tool_results[0].tool_calls[0]
assert tc.name == "create_event"
args = json.loads(tc.arguments) # pyright: ignore[reportAny]
assert args["title"] == "Team Standup"
assert args["config"]["recurring"] is True
assert args["config"]["days"] == ["mon", "wed", "fri"]
def test_text_with_angle_brackets_not_mistaken_for_dsml(self):
"""Angle brackets in normal text should not trigger DSML buffering."""
model_tokens = [
"The formula is ",
"<x, y>",
" where x > 0",
" and y < 100.",
]
results = list(parse_deepseek_v32(_simulate_tokens(model_tokens)))
gen_results = [r for r in results if isinstance(r, GenerationResponse)]
tool_results = [r for r in results if isinstance(r, ToolCallResponse)]
assert len(tool_results) == 0
full_text = "".join(r.text for r in gen_results)
assert "formula" in full_text
assert "<x, y>" in full_text
def test_empty_model_response(self):
"""Model produces only EOS (empty response)."""
model_tokens = [""]
results = list(parse_deepseek_v32(_simulate_tokens(model_tokens)))
gen_results = [r for r in results if isinstance(r, GenerationResponse)]
assert len(gen_results) == 1
assert gen_results[0].text == ""
assert gen_results[0].finish_reason == "stop"
# ── Test: Full EPDP spec round-trip ──────────────────────────────
class TestE2EFullRoundTrip:
"""Full round-trip matching the vLLM EPDP spec.
Simulates the complete multi-turn flow:
Turn 1: user asks → think → tool call → tool result → think → answer
Turn 2: user asks again → old reasoning stripped → think → answer
"""
def test_single_tool_full_flow_with_thinking(self):
"""Complete flow: user → think → tool call → tool result → think → answer.
This is the core EPDP flow from the vLLM spec.
"""
# ── Turn 1.1: User asks, encode prompt ──
messages: list[dict[str, Any]] = [
{"role": "system", "content": "You are a weather assistant."},
{"role": "user", "content": "How's the weather in Hangzhou?"},
]
prompt_1 = encode_messages(
messages, tools=_WEATHER_TOOLS, thinking_mode="thinking"
)
assert prompt_1.endswith(THINKING_START)
assert "## Tools" in prompt_1
assert "get_weather" in prompt_1
# ── Turn 1.1: Model thinks, then calls tool ──
model_tokens_1 = [
THINKING_START,
"The user wants to know the weather in Hangzhou.",
" I need to use the get_weather tool.",
THINKING_END,
"\n\n",
TOOL_CALLS_START,
"\n",
f'<{DSML_TOKEN}invoke name="get_weather">\n',
f'<{DSML_TOKEN}parameter name="city" string="true">Hangzhou</{DSML_TOKEN}parameter>\n',
f"</{DSML_TOKEN}invoke>\n",
TOOL_CALLS_END,
]
results_1 = list(parse_deepseek_v32(_simulate_tokens(model_tokens_1)))
# Verify: thinking tokens + tool call
gen_1 = [r for r in results_1 if isinstance(r, GenerationResponse)]
tool_1 = [r for r in results_1 if isinstance(r, ToolCallResponse)]
thinking_1 = [r for r in gen_1 if r.is_thinking]
assert len(thinking_1) >= 1
assert "get_weather tool" in "".join(r.text for r in thinking_1)
assert len(tool_1) == 1
assert tool_1[0].tool_calls[0].name == "get_weather"
tc_args = json.loads(tool_1[0].tool_calls[0].arguments) # pyright: ignore[reportAny]
assert tc_args == {"city": "Hangzhou"}
# ── Turn 1.2: Add assistant response + tool result to messages ──
messages.append(
{
"role": "assistant",
"content": "",
"reasoning_content": "The user wants to know the weather in Hangzhou. I need to use the get_weather tool.",
"tool_calls": [
{
"type": "function",
"function": {
"name": "get_weather",
"arguments": '{"city": "Hangzhou"}',
},
}
],
}
)
messages.append(
{
"role": "tool",
"content": '{"temperature": "7~13°C", "condition": "Cloudy"}',
}
)
# Encode prompt for turn 1.2
prompt_2 = encode_messages(
messages, tools=_WEATHER_TOOLS, thinking_mode="thinking"
)
# Verify: prompt has the full conversation structure
assert TOOL_CALLS_START in prompt_2 # assistant's encoded tool call
assert EOS_TOKEN in prompt_2 # assistant turn ends with EOS
assert "<function_results>" in prompt_2
assert "<result>" in prompt_2
assert "Cloudy" in prompt_2
assert "</function_results>" in prompt_2
# After tool results with thinking enabled → <think> appended
assert prompt_2.endswith(THINKING_START)
# The assistant's reasoning_content should appear (it's after last_user_idx)
assert "get_weather tool" in prompt_2
# ── Turn 1.2: Model thinks about results, then answers ──
model_tokens_2 = [
THINKING_START,
"The weather in Hangzhou is Cloudy, 7~13°C.",
" I'll tell the user.",
THINKING_END,
"The weather in Hangzhou is currently cloudy with temperatures between 7°C and 13°C.",
]
results_2 = list(parse_deepseek_v32(_simulate_tokens(model_tokens_2)))
gen_2 = [r for r in results_2 if isinstance(r, GenerationResponse)]
tool_2 = [r for r in results_2 if isinstance(r, ToolCallResponse)]
thinking_2 = [r for r in gen_2 if r.is_thinking]
non_thinking_2 = [r for r in gen_2 if not r.is_thinking]
assert len(tool_2) == 0 # No more tool calls
assert len(thinking_2) >= 1
assert "Cloudy" in "".join(r.text for r in thinking_2)
assert len(non_thinking_2) >= 1
final_text = "".join(r.text for r in non_thinking_2)
assert "7°C" in final_text
assert "13°C" in final_text
def test_multi_tool_full_flow(self):
"""Flow with two tools: user → think → 2 tool calls → 2 results → think → answer."""
# ── Initial prompt ──
messages: list[dict[str, Any]] = [
{"role": "system", "content": "You help with weather and time."},
{"role": "user", "content": "Weather in NYC and time in EST?"},
]
prompt_1 = encode_messages(
messages, tools=_WEATHER_TOOLS, thinking_mode="thinking"
)
assert prompt_1.endswith(THINKING_START)
# ── Model thinks, calls both tools ──
model_tokens_1 = [
THINKING_START,
"Two requests: weather and time. I'll call both.",
THINKING_END,
"\n\n",
TOOL_CALLS_START,
"\n",
f'<{DSML_TOKEN}invoke name="get_weather">\n',
f'<{DSML_TOKEN}parameter name="city" string="true">NYC</{DSML_TOKEN}parameter>\n',
f"</{DSML_TOKEN}invoke>\n",
f'<{DSML_TOKEN}invoke name="get_time">\n',
f'<{DSML_TOKEN}parameter name="timezone" string="true">EST</{DSML_TOKEN}parameter>\n',
f"</{DSML_TOKEN}invoke>\n",
TOOL_CALLS_END,
]
results_1 = list(parse_deepseek_v32(_simulate_tokens(model_tokens_1)))
tool_1 = [r for r in results_1 if isinstance(r, ToolCallResponse)]
assert len(tool_1) == 1
assert len(tool_1[0].tool_calls) == 2
assert tool_1[0].tool_calls[0].name == "get_weather"
assert tool_1[0].tool_calls[1].name == "get_time"
# ── Add assistant + both tool results ──
messages.append(
{
"role": "assistant",
"content": "",
"reasoning_content": "Two requests: weather and time. I'll call both.",
"tool_calls": [
{
"type": "function",
"function": {
"name": "get_weather",
"arguments": '{"city": "NYC"}',
},
},
{
"type": "function",
"function": {
"name": "get_time",
"arguments": '{"timezone": "EST"}',
},
},
],
}
)
messages.append({"role": "tool", "content": "72°F, sunny"})
messages.append({"role": "tool", "content": "2:30 PM EST"})
prompt_2 = encode_messages(
messages, tools=_WEATHER_TOOLS, thinking_mode="thinking"
)
# Verify multi-tool result encoding
# Count is 2: 1 in _TOOLS_SYSTEM_TEMPLATE example + 1 in conversation
assert prompt_2.count("<function_results>") == 2
assert prompt_2.count("</function_results>") == 2
assert "<result>72°F, sunny</result>" in prompt_2
assert "<result>2:30 PM EST</result>" in prompt_2
assert prompt_2.endswith(THINKING_START)
# ── Model thinks about results, answers ──
model_tokens_2 = [
THINKING_START,
"Got both results. Weather is 72°F sunny, time is 2:30 PM.",
THINKING_END,
"In NYC it's currently 72°F and sunny. The time in EST is 2:30 PM.",
]
results_2 = list(parse_deepseek_v32(_simulate_tokens(model_tokens_2)))
tool_2 = [r for r in results_2 if isinstance(r, ToolCallResponse)]
gen_2 = [r for r in results_2 if isinstance(r, GenerationResponse)]
non_thinking_2 = [r for r in gen_2 if not r.is_thinking]
assert len(tool_2) == 0
final_text = "".join(r.text for r in non_thinking_2)
assert "72°F" in final_text
assert "2:30 PM" in final_text
def test_two_user_turns_reasoning_stripped(self):
"""Turn 2: old reasoning_content is stripped from history.
Per the vLLM spec, clear_reasoning_content is called between user turns
to save bandwidth. Our _drop_old_thinking handles this.
"""
# Full turn 1 conversation (already completed)
messages: list[dict[str, Any]] = [
{"role": "system", "content": "You are helpful."},
{"role": "user", "content": "Weather in Hangzhou?"},
{
"role": "assistant",
"content": "",
"reasoning_content": "I need to call get_weather for Hangzhou.",
"tool_calls": [
{
"type": "function",
"function": {
"name": "get_weather",
"arguments": '{"city": "Hangzhou"}',
},
}
],
},
{"role": "tool", "content": "Cloudy 7~13°C"},
{
"role": "assistant",
"content": "The weather in Hangzhou is cloudy, 7-13°C.",
"reasoning_content": "The tool returned cloudy weather. I'll summarize.",
},
# Turn 2: user asks again
{"role": "user", "content": "What about Beijing?"},
]
prompt = encode_messages(
messages, tools=_WEATHER_TOOLS, thinking_mode="thinking"
)
# Old reasoning_content from turn 1 assistants should be STRIPPED
# (they're before the last user message at index 5)
assert "I need to call get_weather" not in prompt
assert "tool returned cloudy" not in prompt
# But the assistant's content and tool calls should still be there
assert "cloudy, 7-13°C" in prompt
assert TOOL_CALLS_START in prompt
# Prompt ends with <think> for the new turn
assert prompt.endswith(THINKING_START)
# ── Turn 2: Model thinks, calls tool for Beijing ──
model_tokens = [
THINKING_START,
"Now the user wants Beijing weather.",
THINKING_END,
"\n\n",
TOOL_CALLS_START,
"\n",
f'<{DSML_TOKEN}invoke name="get_weather">\n',
f'<{DSML_TOKEN}parameter name="city" string="true">Beijing</{DSML_TOKEN}parameter>\n',
f"</{DSML_TOKEN}invoke>\n",
TOOL_CALLS_END,
]
results = list(parse_deepseek_v32(_simulate_tokens(model_tokens)))
tool_results = [r for r in results if isinstance(r, ToolCallResponse)]
assert len(tool_results) == 1
assert tool_results[0].tool_calls[0].name == "get_weather"
args = json.loads(tool_results[0].tool_calls[0].arguments) # pyright: ignore[reportAny]
assert args == {"city": "Beijing"}
def test_chained_tool_calls_loop(self):
"""Model calls tool, gets result, calls another tool, gets result, answers.
This simulates the inner while loop from the vLLM spec where the model
may need multiple sub-turns of tool calling before it has enough info.
"""
# ── Sub-turn 1: user asks, model calls get_time ──
messages: list[dict[str, Any]] = [
{"role": "system", "content": "You are helpful."},
{"role": "user", "content": "What's the weather in Hangzhou tomorrow?"},
]
prompt_1 = encode_messages(
messages, tools=_WEATHER_TOOLS, thinking_mode="thinking"
)
assert prompt_1.endswith(THINKING_START)
# Model first calls get_time to figure out the date
model_tokens_1 = [
THINKING_START,
"I need the current date first to calculate tomorrow.",
THINKING_END,
"\n\n",
TOOL_CALLS_START,
"\n",
f'<{DSML_TOKEN}invoke name="get_time">\n',
f'<{DSML_TOKEN}parameter name="timezone" string="true">Asia/Shanghai</{DSML_TOKEN}parameter>\n',
f"</{DSML_TOKEN}invoke>\n",
TOOL_CALLS_END,
]
results_1 = list(parse_deepseek_v32(_simulate_tokens(model_tokens_1)))
tool_1 = [r for r in results_1 if isinstance(r, ToolCallResponse)]
assert len(tool_1) == 1
assert tool_1[0].tool_calls[0].name == "get_time"
# ── Sub-turn 2: add tool result, model calls get_weather ──
messages.append(
{
"role": "assistant",
"content": "",
"reasoning_content": "I need the current date first to calculate tomorrow.",
"tool_calls": [
{
"type": "function",
"function": {
"name": "get_time",
"arguments": '{"timezone": "Asia/Shanghai"}',
},
}
],
}
)
messages.append({"role": "tool", "content": "2025-12-01 14:30 CST"})
prompt_2 = encode_messages(
messages, tools=_WEATHER_TOOLS, thinking_mode="thinking"
)
assert "<result>2025-12-01 14:30 CST</result>" in prompt_2
assert prompt_2.endswith(THINKING_START)
# Model now knows the date, calls get_weather
model_tokens_2 = [
THINKING_START,
"Today is 2025-12-01, so tomorrow is 2025-12-02.",
" Now I can check weather for Hangzhou.",
THINKING_END,
"\n\n",
TOOL_CALLS_START,
"\n",
f'<{DSML_TOKEN}invoke name="get_weather">\n',
f'<{DSML_TOKEN}parameter name="city" string="true">Hangzhou</{DSML_TOKEN}parameter>\n',
f"</{DSML_TOKEN}invoke>\n",
TOOL_CALLS_END,
]
results_2 = list(parse_deepseek_v32(_simulate_tokens(model_tokens_2)))
tool_2 = [r for r in results_2 if isinstance(r, ToolCallResponse)]
assert len(tool_2) == 1
assert tool_2[0].tool_calls[0].name == "get_weather"
# ── Sub-turn 3: add weather result, model answers ──
messages.append(
{
"role": "assistant",
"content": "",
"reasoning_content": "Today is 2025-12-01, so tomorrow is 2025-12-02. Now I can check weather for Hangzhou.",
"tool_calls": [
{
"type": "function",
"function": {
"name": "get_weather",
"arguments": '{"city": "Hangzhou"}',
},
}
],
}
)
messages.append({"role": "tool", "content": "Sunny, 5~12°C"})
prompt_3 = encode_messages(
messages, tools=_WEATHER_TOOLS, thinking_mode="thinking"
)
# Should have both function_results blocks (one per tool round)
# Count is 3: 1 in _TOOLS_SYSTEM_TEMPLATE example + 2 in conversation
assert prompt_3.count("<function_results>") == 3
assert prompt_3.count("</function_results>") == 3
assert "<result>2025-12-01 14:30 CST</result>" in prompt_3
assert "<result>Sunny, 5~12°C</result>" in prompt_3
assert prompt_3.endswith(THINKING_START)
# Model finally answers
model_tokens_3 = [
THINKING_START,
"I have the weather for tomorrow in Hangzhou.",
THINKING_END,
"Tomorrow in Hangzhou will be sunny with temperatures between 5°C and 12°C.",
]
results_3 = list(parse_deepseek_v32(_simulate_tokens(model_tokens_3)))
tool_3 = [r for r in results_3 if isinstance(r, ToolCallResponse)]
gen_3 = [r for r in results_3 if isinstance(r, GenerationResponse)]
non_thinking_3 = [r for r in gen_3 if not r.is_thinking]
assert len(tool_3) == 0 # No more tool calls — loop ends
final_text = "".join(r.text for r in non_thinking_3)
assert "sunny" in final_text.lower()
assert "5°C" in final_text
assert "12°C" in final_text
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/worker/tests/unittests/test_runner/test_dsml_e2e.py",
"license": "Apache License 2.0",
"lines": 835,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
exo-explore/exo:bench/eval_tool_calls.py | # pyright: reportAny=false, reportUnknownMemberType=false, reportUnknownVariableType=false, reportUnknownArgumentType=false
from __future__ import annotations
import argparse
import contextlib
import json
import os
import sys
import time
import tomllib
from dataclasses import dataclass, field
from pathlib import Path
from typing import Any, Literal
import httpx
from harness import (
ExoClient,
ExoHttpError,
add_common_instance_args,
instance_id_from_instance,
nodes_used_in_instance,
resolve_model_short_id,
run_planning_phase,
settle_and_fetch_placements,
wait_for_instance_gone,
wait_for_instance_ready,
)
SCENARIOS_PATH = Path(__file__).parent / "scenarios.toml"
@dataclass
class Scenario:
name: str
description: str
messages: list[dict[str, Any]]
tools: list[dict[str, Any]]
expect_tool_call: bool
expected_function: str | None = None
required_arg_keys: list[str] | None = None
tool_result: str | None = None
nested_array_key: str | None = None
required_item_keys: list[str] | None = None
def load_scenarios(path: Path) -> list[Scenario]:
with open(path, "rb") as f:
data = tomllib.load(f)
tools_data = data.get("tools", {})
all_tools: list[dict[str, Any]] = []
tool_by_name: dict[str, dict[str, Any]] = {}
for name, defn in tools_data.items():
tool: dict[str, Any] = {
"type": "function",
"function": {
"name": name,
"description": defn.get("description", ""),
"parameters": {
"type": "object",
"properties": defn.get("properties", {}),
"required": defn.get("required", []),
},
},
}
all_tools.append(tool)
tool_by_name[name] = tool
scenarios: list[Scenario] = []
for s in data.get("scenarios", []):
if "tools" in s:
scenario_tools = [tool_by_name[t] for t in s["tools"]]
else:
scenario_tools = list(all_tools)
messages: list[dict[str, Any]] = []
for msg in s.get("messages", []):
m: dict[str, Any] = {"role": msg["role"]}
if "content" in msg:
m["content"] = msg["content"]
if "tool_calls" in msg:
m["tool_calls"] = [
{
"id": tc["id"],
"type": "function",
"function": {
"name": tc["name"],
"arguments": json.dumps(tc["arguments"]),
},
}
for tc in msg["tool_calls"]
]
if "tool_call_id" in msg:
m["tool_call_id"] = msg["tool_call_id"]
messages.append(m)
tool_result: str | None = None
if "tool_result" in s:
tool_result = json.dumps(s["tool_result"])
scenarios.append(
Scenario(
name=s["name"],
description=s["description"],
messages=messages,
tools=scenario_tools,
expect_tool_call=s["expect_tool_call"],
expected_function=s.get("expected_function"),
required_arg_keys=s.get("required_arg_keys"),
tool_result=tool_result,
nested_array_key=s.get("nested_array_key"),
required_item_keys=s.get("required_item_keys"),
)
)
return scenarios
ApiName = Literal["openai", "claude", "responses"]
@dataclass
class ParsedResponse:
finish_reason: str # "tool_calls" | "stop" | ...
has_tool_call: bool
tool_call: dict[str, str] | None # {"id": ..., "name": ..., "arguments": ...}
content: str | None
@dataclass
class ScenarioResult:
name: str
api: str
phase: str # "tool_call" or "follow_up"
passed: bool
checks: dict[str, bool] = field(default_factory=dict)
error: str | None = None
latency_ms: float = 0.0
def validate_args(args_str: str, required_keys: list[str]) -> tuple[bool, str | None]:
"""Parse JSON arguments and check required keys exist."""
try:
args = json.loads(args_str)
except (json.JSONDecodeError, TypeError) as exc:
return False, f"Invalid JSON: {exc}"
if not isinstance(args, dict):
return False, f"Expected dict, got {type(args).__name__}"
missing = [k for k in required_keys if k not in args]
if missing:
return False, f"Missing keys: {missing}"
return True, None
def validate_nested_args(
args_str: str,
array_key: str,
required_item_keys: list[str],
) -> tuple[bool, str | None]:
"""Check that args[array_key] is a list of objects with required keys."""
try:
args = json.loads(args_str)
except (json.JSONDecodeError, TypeError) as exc:
return False, f"Invalid JSON: {exc}"
if not isinstance(args, dict):
return False, f"Expected dict, got {type(args).__name__}"
arr = args.get(array_key)
if not isinstance(arr, list):
return False, f"'{array_key}' is not an array (got {type(arr).__name__})"
if len(arr) == 0:
return False, f"'{array_key}' is empty"
for i, item in enumerate(arr):
if not isinstance(item, dict):
return (
False,
f"'{array_key}[{i}]' is not an object (got {type(item).__name__})",
)
missing = [k for k in required_item_keys if k not in item]
if missing:
return False, f"'{array_key}[{i}]' missing keys: {missing}"
return True, None
def call_api(
client: httpx.Client,
host: str,
port: int,
path: str,
body: dict[str, Any],
timeout: float,
) -> tuple[dict[str, Any], float]:
"""POST to http://{host}:{port}{path}, return (response_json, latency_ms)."""
url = f"http://{host}:{port}{path}"
t0 = time.monotonic()
resp = client.post(url, json=body, timeout=timeout)
latency = (time.monotonic() - t0) * 1000
resp.raise_for_status()
return resp.json(), latency
def _openai_build_request(
model: str,
messages: list[dict[str, Any]],
tools: list[dict[str, Any]],
) -> tuple[str, dict[str, Any]]:
"""Build request for /v1/chat/completions."""
body: dict[str, Any] = {
"model": model,
"messages": messages,
"tools": tools,
"max_tokens": 16384,
"temperature": 0.0,
}
return "/v1/chat/completions", body
def _openai_parse_response(data: dict[str, Any]) -> ParsedResponse:
"""Parse OpenAI Chat Completions response into common format."""
choice = data["choices"][0]
finish_reason = choice.get("finish_reason", "")
message = choice.get("message", {})
tool_calls = message.get("tool_calls")
content = message.get("content")
has_tool_call = isinstance(tool_calls, list) and len(tool_calls) > 0
tool_call_info: dict[str, str] | None = None
if has_tool_call:
tc = tool_calls[0]
fn = tc.get("function", {})
tool_call_info = {
"id": tc.get("id", "call_0"),
"name": fn.get("name", ""),
"arguments": fn.get("arguments", "{}"),
}
return ParsedResponse(
finish_reason=finish_reason,
has_tool_call=has_tool_call,
tool_call=tool_call_info,
content=content,
)
def _openai_build_followup(
messages: list[dict[str, Any]],
tools: list[dict[str, Any]],
model: str,
parsed: ParsedResponse,
tool_result: str,
) -> tuple[str, dict[str, Any]]:
"""Build multi-turn follow-up for OpenAI Chat Completions."""
assert parsed.tool_call is not None
tc = parsed.tool_call
followup_messages: list[dict[str, Any]] = list(messages) + [
{
"role": "assistant",
"tool_calls": [
{
"id": tc["id"],
"type": "function",
"function": {
"name": tc["name"],
"arguments": tc["arguments"],
},
}
],
},
{
"role": "tool",
"tool_call_id": tc["id"],
"content": tool_result,
},
]
body: dict[str, Any] = {
"model": model,
"messages": followup_messages,
"tools": tools,
"max_tokens": 16384,
"temperature": 0.0,
}
return "/v1/chat/completions", body
def _claude_translate_tools(tools: list[dict[str, Any]]) -> list[dict[str, Any]]:
"""Translate OpenAI-format tools to Claude format."""
claude_tools: list[dict[str, Any]] = []
for tool in tools:
fn = tool["function"]
claude_tools.append(
{
"name": fn["name"],
"description": fn.get("description", ""),
"input_schema": fn.get("parameters", {}),
}
)
return claude_tools
def _claude_translate_messages(messages: list[dict[str, Any]]) -> list[dict[str, Any]]:
"""Translate OpenAI-format messages to Claude Messages format."""
claude_messages: list[dict[str, Any]] = []
for msg in messages:
role = msg["role"]
if role == "user":
claude_messages.append(
{
"role": "user",
"content": msg["content"],
}
)
elif role == "assistant":
content_blocks: list[dict[str, Any]] = []
text_content = msg.get("content")
if text_content and isinstance(text_content, str) and text_content.strip():
content_blocks.append({"type": "text", "text": text_content})
tool_calls = msg.get("tool_calls")
if tool_calls:
for tc in tool_calls:
fn = tc.get("function", {})
args_str = fn.get("arguments", "{}")
try:
args_dict = json.loads(args_str)
except (json.JSONDecodeError, TypeError):
args_dict = {}
content_blocks.append(
{
"type": "tool_use",
"id": tc.get("id", "call_0"),
"name": fn.get("name", ""),
"input": args_dict,
}
)
if not content_blocks:
content_blocks.append({"type": "text", "text": ""})
claude_messages.append(
{
"role": "assistant",
"content": content_blocks,
}
)
elif role == "tool":
claude_messages.append(
{
"role": "user",
"content": [
{
"type": "tool_result",
"tool_use_id": msg.get("tool_call_id", "call_0"),
"content": msg.get("content", ""),
}
],
}
)
elif role == "system":
pass
return claude_messages
def _claude_build_request(
model: str,
messages: list[dict[str, Any]],
tools: list[dict[str, Any]],
) -> tuple[str, dict[str, Any]]:
"""Build request for /v1/messages."""
claude_messages = _claude_translate_messages(messages)
claude_tools = _claude_translate_tools(tools)
system_content: str | None = None
for msg in messages:
if msg["role"] == "system":
system_content = msg["content"]
break
body: dict[str, Any] = {
"model": model,
"messages": claude_messages,
"tools": claude_tools,
"max_tokens": 16384,
"temperature": 0.0,
}
if system_content is not None:
body["system"] = system_content
return "/v1/messages", body
def _claude_parse_response(data: dict[str, Any]) -> ParsedResponse:
"""Parse Claude Messages response into common format."""
stop_reason = data.get("stop_reason", "")
content_blocks = data.get("content", [])
if stop_reason == "tool_use":
finish_reason = "tool_calls"
elif stop_reason == "end_turn":
finish_reason = "stop"
else:
finish_reason = stop_reason
tool_call_info: dict[str, str] | None = None
text_parts: list[str] = []
has_tool_call = False
for block in content_blocks:
block_type = block.get("type")
if block_type == "tool_use":
has_tool_call = True
if tool_call_info is None:
input_data = block.get("input", {})
tool_call_info = {
"id": block.get("id", "call_0"),
"name": block.get("name", ""),
"arguments": json.dumps(input_data)
if isinstance(input_data, dict)
else str(input_data),
}
elif block_type == "text":
text = block.get("text", "")
if text.strip():
text_parts.append(text)
content = "\n".join(text_parts) if text_parts else None
return ParsedResponse(
finish_reason=finish_reason,
has_tool_call=has_tool_call,
tool_call=tool_call_info,
content=content,
)
def _claude_build_followup(
messages: list[dict[str, Any]],
tools: list[dict[str, Any]],
model: str,
parsed: ParsedResponse,
tool_result: str,
) -> tuple[str, dict[str, Any]]:
"""Build multi-turn follow-up for Claude Messages."""
assert parsed.tool_call is not None
tc = parsed.tool_call
try:
args_dict = json.loads(tc["arguments"])
except (json.JSONDecodeError, TypeError):
args_dict = {}
claude_messages = _claude_translate_messages(messages)
claude_messages.append(
{
"role": "assistant",
"content": [
{
"type": "tool_use",
"id": tc["id"],
"name": tc["name"],
"input": args_dict,
}
],
}
)
claude_messages.append(
{
"role": "user",
"content": [
{
"type": "tool_result",
"tool_use_id": tc["id"],
"content": tool_result,
}
],
}
)
claude_tools = _claude_translate_tools(tools)
system_content: str | None = None
for msg in messages:
if msg["role"] == "system":
system_content = msg["content"]
break
body: dict[str, Any] = {
"model": model,
"messages": claude_messages,
"tools": claude_tools,
"max_tokens": 16384,
"temperature": 0.0,
}
if system_content is not None:
body["system"] = system_content
return "/v1/messages", body
def _responses_translate_input(messages: list[dict[str, Any]]) -> list[dict[str, Any]]:
"""Translate OpenAI chat messages to Responses API input items."""
items: list[dict[str, Any]] = []
for msg in messages:
role = msg["role"]
if role in ("user", "system"):
items.append(
{
"type": "message",
"role": role,
"content": msg["content"],
}
)
elif role == "assistant":
text_content = msg.get("content")
if text_content and isinstance(text_content, str) and text_content.strip():
items.append(
{
"type": "message",
"role": "assistant",
"content": text_content,
}
)
tool_calls = msg.get("tool_calls")
if tool_calls:
for tc in tool_calls:
fn = tc.get("function", {})
items.append(
{
"type": "function_call",
"call_id": tc.get("id", "call_0"),
"name": fn.get("name", ""),
"arguments": fn.get("arguments", "{}"),
}
)
elif role == "tool":
items.append(
{
"type": "function_call_output",
"call_id": msg.get("tool_call_id", "call_0"),
"output": msg.get("content", ""),
}
)
return items
def _responses_build_request(
model: str,
messages: list[dict[str, Any]],
tools: list[dict[str, Any]],
) -> tuple[str, dict[str, Any]]:
"""Build request for /v1/responses."""
input_items = _responses_translate_input(messages)
body: dict[str, Any] = {
"model": model,
"input": input_items,
"tools": tools,
"temperature": 0.0,
"max_output_tokens": 4096,
}
return "/v1/responses", body
def _responses_parse_response(data: dict[str, Any]) -> ParsedResponse:
"""Parse OpenAI Responses API response into common format."""
output = data.get("output", [])
tool_call_info: dict[str, str] | None = None
text_parts: list[str] = []
has_tool_call = False
for item in output:
item_type = item.get("type")
if item_type == "function_call":
has_tool_call = True
if tool_call_info is None:
tool_call_info = {
"id": item.get("call_id", "call_0"),
"name": item.get("name", ""),
"arguments": item.get("arguments", "{}"),
}
elif item_type == "message":
msg_content = item.get("content", [])
if isinstance(msg_content, list):
for block in msg_content:
if isinstance(block, dict):
text = block.get("text", "")
if text and text.strip():
text_parts.append(text)
elif isinstance(msg_content, str) and msg_content.strip():
text_parts.append(msg_content)
content = "\n".join(text_parts) if text_parts else None
if has_tool_call:
finish_reason = "tool_calls"
else:
status = data.get("status", "completed")
finish_reason = "stop" if status == "completed" else status
return ParsedResponse(
finish_reason=finish_reason,
has_tool_call=has_tool_call,
tool_call=tool_call_info,
content=content,
)
def _responses_build_followup(
messages: list[dict[str, Any]],
tools: list[dict[str, Any]],
model: str,
parsed: ParsedResponse,
tool_result: str,
) -> tuple[str, dict[str, Any]]:
"""Build multi-turn follow-up for Responses API."""
assert parsed.tool_call is not None
tc = parsed.tool_call
input_items = _responses_translate_input(messages)
input_items.append(
{
"type": "function_call",
"call_id": tc["id"],
"name": tc["name"],
"arguments": tc["arguments"],
}
)
input_items.append(
{
"type": "function_call_output",
"call_id": tc["id"],
"output": tool_result,
}
)
body: dict[str, Any] = {
"model": model,
"input": input_items,
"tools": tools,
"temperature": 0.0,
"max_output_tokens": 4096,
}
return "/v1/responses", body
ADAPTERS: dict[ApiName, dict[str, Any]] = {
"openai": {
"build_request": _openai_build_request,
"parse_response": _openai_parse_response,
"build_followup": _openai_build_followup,
},
"claude": {
"build_request": _claude_build_request,
"parse_response": _claude_parse_response,
"build_followup": _claude_build_followup,
},
"responses": {
"build_request": _responses_build_request,
"parse_response": _responses_parse_response,
"build_followup": _responses_build_followup,
},
}
def run_scenario(
client: httpx.Client,
host: str,
port: int,
model: str,
scenario: Scenario,
api_name: ApiName,
timeout: float,
verbose: bool,
) -> list[ScenarioResult]:
"""Run a single scenario against one API adapter. Returns 1-2 results."""
adapter = ADAPTERS[api_name]
build_request = adapter["build_request"]
parse_response = adapter["parse_response"]
build_followup = adapter["build_followup"]
results: list[ScenarioResult] = []
# --- Phase 1: initial request ---
path, body = build_request(model, scenario.messages, scenario.tools)
if verbose:
print(
f" [{api_name}] request: {path} {json.dumps(body, indent=2)}",
file=sys.stderr,
)
try:
data, latency = call_api(client, host, port, path, body, timeout)
except Exception as exc:
results.append(
ScenarioResult(
name=scenario.name,
api=api_name,
phase="tool_call",
passed=False,
error=f"API error: {exc}",
)
)
return results
if verbose:
print(
f" [{api_name}] response: {json.dumps(data, indent=2)}", file=sys.stderr
)
parsed = parse_response(data)
checks: dict[str, bool] = {}
if scenario.expect_tool_call:
checks["finish_reason_tool_calls"] = parsed.finish_reason == "tool_calls"
checks["has_tool_call"] = parsed.has_tool_call
args_err: str | None = None
if parsed.has_tool_call and parsed.tool_call is not None:
checks["correct_function"] = (
scenario.expected_function is None
or parsed.tool_call["name"] == scenario.expected_function
)
if scenario.required_arg_keys:
ok, args_err = validate_args(
parsed.tool_call["arguments"], scenario.required_arg_keys
)
checks["valid_arguments"] = ok
else:
checks["valid_arguments"] = True
if scenario.nested_array_key and scenario.required_item_keys:
ok, nested_err = validate_nested_args(
parsed.tool_call["arguments"],
scenario.nested_array_key,
scenario.required_item_keys,
)
checks["valid_nested_structure"] = ok
if not ok:
args_err = nested_err
else:
checks["correct_function"] = False
checks["valid_arguments"] = False
args_err = "No tool call returned"
passed = all(checks.values())
error = args_err if not passed else None
else:
checks["finish_reason_stop"] = parsed.finish_reason == "stop"
checks["no_tool_call"] = not parsed.has_tool_call
checks["has_content"] = (
parsed.content is not None and len(parsed.content.strip()) > 0
)
passed = all(checks.values())
error = (
None
if passed
else (
f"finish_reason={parsed.finish_reason}, "
f"tool_call={'yes' if parsed.has_tool_call else 'no'}, "
f"content={'yes' if parsed.content else 'no'}"
)
)
results.append(
ScenarioResult(
name=scenario.name,
api=api_name,
phase="tool_call",
passed=passed,
checks=checks,
error=error,
latency_ms=latency,
)
)
# --- Phase 2: multi-turn follow-up ---
if (
scenario.tool_result is not None
and parsed.has_tool_call
and parsed.tool_call is not None
):
followup_path, followup_body = build_followup(
scenario.messages,
scenario.tools,
model,
parsed,
scenario.tool_result,
)
if verbose:
print(
f" [{api_name}] follow_up request: {followup_path} {json.dumps(followup_body, indent=2)}",
file=sys.stderr,
)
try:
data2, latency2 = call_api(
client, host, port, followup_path, followup_body, timeout
)
except Exception as exc:
results.append(
ScenarioResult(
name=scenario.name,
api=api_name,
phase="follow_up",
passed=False,
error=f"API error: {exc}",
)
)
return results
if verbose:
print(
f" [{api_name}] follow_up response: {json.dumps(data2, indent=2)}",
file=sys.stderr,
)
parsed2 = parse_response(data2)
checks2: dict[str, bool] = {}
checks2["finish_reason_stop"] = parsed2.finish_reason == "stop"
checks2["no_tool_call"] = not parsed2.has_tool_call
checks2["has_content"] = (
parsed2.content is not None and len(parsed2.content.strip()) > 0
)
passed2 = all(checks2.values())
error2: str | None = None
if not passed2:
error2 = (
f"finish_reason={parsed2.finish_reason}, "
f"tool_call={'yes' if parsed2.has_tool_call else 'no'}, "
f"content={'yes' if parsed2.content else 'no'}"
)
results.append(
ScenarioResult(
name=scenario.name,
api=api_name,
phase="follow_up",
passed=passed2,
checks=checks2,
error=error2,
latency_ms=latency2,
)
)
return results
def result_to_dict(result: ScenarioResult) -> dict[str, Any]:
"""Convert a ScenarioResult to a JSON-serializable dict."""
return {
"name": result.name,
"api": result.api,
"phase": result.phase,
"passed": result.passed,
"checks": result.checks,
"error": result.error,
"latency_ms": round(result.latency_ms, 1),
}
_MULTI_NODE_PRIORITY: dict[tuple[str, str], int] = {
("tensor", "jaccl"): 0,
("pipeline", "jaccl"): 2,
("pipeline", "ring"): 3,
("tensor", "ring"): 4,
}
_SINGLE_NODE_PRIORITY = 1
def _placement_sort_key(p: dict[str, Any]) -> tuple[int, int]:
sharding = p.get("sharding", "").lower()
meta = p.get("instance_meta", "").lower()
kind = (
"tensor" if "tensor" in sharding else "pipeline",
"jaccl" if "jaccl" in meta else "ring",
)
n_nodes = nodes_used_in_instance(p["instance"])
if n_nodes == 1:
return (_SINGLE_NODE_PRIORITY, -n_nodes)
priority = _MULTI_NODE_PRIORITY.get(kind, 99)
return (priority, -n_nodes)
def main() -> None:
parser = argparse.ArgumentParser(
description="Multi-API tool-calling eval for exo",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""\
Examples:
%(prog)s --model mlx-community/Qwen3-30B-A3B-4bit
%(prog)s --model my-model --api openai --repeat 3
%(prog)s --model my-model --api all --scenarios weather_simple calculator_multi_turn
%(prog)s --model my-model --stdout
""",
)
add_common_instance_args(parser)
parser.add_argument(
"--api",
choices=["openai", "claude", "responses", "all"],
default="all",
help="Which API adapter(s) to test (default: all)",
)
parser.add_argument(
"--repeat",
type=int,
default=1,
help="Repeat each scenario N times (default: 1)",
)
parser.add_argument(
"--scenarios",
nargs="*",
help="Run only these scenarios (by name)",
)
parser.add_argument(
"--verbose",
action="store_true",
help="Print full API responses to stderr",
)
parser.add_argument(
"--json-out",
default="bench/eval_results.json",
help="Write JSON results to file (default: bench/eval_results.json)",
)
parser.add_argument(
"--stdout",
action="store_true",
help="Write JSON results to stdout instead of file",
)
args = parser.parse_args()
all_scenarios = load_scenarios(SCENARIOS_PATH)
if args.scenarios:
scenarios = [s for s in all_scenarios if s.name in args.scenarios]
if not scenarios:
print(
f"No matching scenarios. Available: {[s.name for s in all_scenarios]}",
file=sys.stderr,
)
sys.exit(1)
else:
scenarios = all_scenarios
api_names: list[ApiName] = (
["openai", "claude", "responses"] if args.api == "all" else [args.api]
)
log = sys.stderr if args.stdout else sys.stdout
exo = ExoClient(args.host, args.port, timeout_s=args.timeout)
_short_id, full_model_id = resolve_model_short_id(exo, args.model)
selected = settle_and_fetch_placements(
exo, full_model_id, args, settle_timeout=args.settle_timeout
)
if not selected:
print("No valid placements matched your filters.", file=sys.stderr)
sys.exit(1)
selected.sort(key=_placement_sort_key)
preview = selected[0]
settle_deadline = (
time.monotonic() + args.settle_timeout if args.settle_timeout > 0 else None
)
print("Planning phase: checking downloads...", file=log)
run_planning_phase(
exo,
full_model_id,
preview,
args.danger_delete_downloads,
args.timeout,
settle_deadline,
)
instance = preview["instance"]
instance_id = instance_id_from_instance(instance)
sharding = str(preview["sharding"])
instance_meta = str(preview["instance_meta"])
n_nodes = nodes_used_in_instance(instance)
print(f"Model: {full_model_id}", file=log)
print(f"Placement: {sharding} / {instance_meta} / {n_nodes} nodes", file=log)
print(f"Endpoint: http://{args.host}:{args.port}", file=log)
print(f"APIs: {', '.join(api_names)}", file=log)
total_runs = len(scenarios) * args.repeat * len(api_names)
print(
f"Scenarios: {len(scenarios)} x {args.repeat} repeats x {len(api_names)} APIs = {total_runs} runs",
file=log,
)
print("=" * 72, file=log)
exo.request_json("POST", "/instance", body={"instance": instance})
try:
wait_for_instance_ready(exo, instance_id)
except (RuntimeError, TimeoutError) as e:
print(f"Failed to initialize placement: {e}", file=sys.stderr)
with contextlib.suppress(ExoHttpError):
exo.request_json("DELETE", f"/instance/{instance_id}")
sys.exit(1)
time.sleep(1)
all_results: list[ScenarioResult] = []
try:
with httpx.Client() as http_client:
for run_idx in range(args.repeat):
if args.repeat > 1:
print(f"\n--- Run {run_idx + 1}/{args.repeat} ---", file=log)
for scenario in scenarios:
for api_name in api_names:
print(
f"\n [{api_name:>9}] {scenario.name}: {scenario.description}",
file=log,
)
scenario_results = run_scenario(
http_client,
args.host,
args.port,
full_model_id,
scenario,
api_name,
args.timeout,
args.verbose,
)
all_results.extend(scenario_results)
for r in scenario_results:
status = "PASS" if r.passed else "FAIL"
print(
f" [{r.phase:>10}] {status} ({r.latency_ms:.0f}ms)",
file=log,
)
for check_name, check_ok in r.checks.items():
mark = "+" if check_ok else "-"
print(f" {mark} {check_name}", file=log)
if r.error:
print(f" ! {r.error}", file=log)
finally:
try:
exo.request_json("DELETE", f"/instance/{instance_id}")
except ExoHttpError as e:
if e.status != 404:
raise
wait_for_instance_gone(exo, instance_id)
# --- Summary ---
print(f"\n{'=' * 72}", file=log)
total = len(all_results)
passed = sum(1 for r in all_results if r.passed)
tool_call_results = [r for r in all_results if r.phase == "tool_call"]
follow_up_results = [r for r in all_results if r.phase == "follow_up"]
tc_passed = sum(1 for r in tool_call_results if r.passed)
fu_passed = sum(1 for r in follow_up_results if r.passed)
avg_latency = sum(r.latency_ms for r in all_results) / total if total else 0
print(
f"Total: {passed}/{total} passed ({100 * passed / total:.0f}%)", file=log
)
print(f"Tool call: {tc_passed}/{len(tool_call_results)} passed", file=log)
if follow_up_results:
print(f"Follow-up: {fu_passed}/{len(follow_up_results)} passed", file=log)
print(f"Avg latency: {avg_latency:.0f}ms", file=log)
for api_name in api_names:
api_results = [r for r in all_results if r.api == api_name]
api_passed = sum(1 for r in api_results if r.passed)
print(f" {api_name:>9}: {api_passed}/{len(api_results)} passed", file=log)
if passed < total:
print("\nFailed:", file=log)
for r in all_results:
if not r.passed:
print(f" - {r.name} [{r.api}/{r.phase}]: {r.error}", file=log)
json_results = [result_to_dict(r) for r in all_results]
if args.stdout:
print(json.dumps(json_results, indent=2))
else:
json_path = args.json_out
parent = os.path.dirname(json_path)
if parent:
os.makedirs(parent, exist_ok=True)
with open(json_path, "w") as f:
json.dump(json_results, f, indent=2)
f.write("\n")
print(f"\nJSON results written to {json_path}", file=log)
sys.exit(0 if passed == total else 1)
if __name__ == "__main__":
main()
| {
"repo_id": "exo-explore/exo",
"file_path": "bench/eval_tool_calls.py",
"license": "Apache License 2.0",
"lines": 966,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
exo-explore/exo:bench/harness.py | # type: ignore
from __future__ import annotations
import argparse
import http.client
import json
import os
import time
from typing import Any
from urllib.parse import urlencode
from loguru import logger
_SETTLE_INITIAL_BACKOFF_S = 1.0
_SETTLE_MAX_BACKOFF_S = 60.0
_SETTLE_BACKOFF_MULTIPLIER = 2.0
class ExoHttpError(RuntimeError):
def __init__(self, status: int, reason: str, body_preview: str):
super().__init__(f"HTTP {status} {reason}: {body_preview}")
self.status = status
class ExoClient:
def __init__(self, host: str, port: int, timeout_s: float = 7200.0):
self.host = host
self.port = port
self.timeout_s = timeout_s
def request_json(
self,
method: str,
path: str,
params: dict[str, Any] | None = None,
body: dict[str, Any] | None = None,
headers: dict[str, str] | None = None,
) -> Any:
if not path.startswith("/"):
path = "/" + path
if params:
path = path + "?" + urlencode(params)
conn = http.client.HTTPConnection(self.host, self.port, timeout=self.timeout_s)
try:
payload: bytes | None = None
hdrs: dict[str, str] = {"Accept": "application/json"}
if body is not None:
payload = json.dumps(body).encode("utf-8")
hdrs["Content-Type"] = "application/json"
if headers:
hdrs.update(headers)
conn.request(method.upper(), path, body=payload, headers=hdrs)
resp = conn.getresponse()
raw = resp.read()
text = raw.decode("utf-8", errors="replace") if raw else ""
if resp.status >= 400:
raise ExoHttpError(resp.status, resp.reason, text[:300])
if not text:
return None
return json.loads(text)
finally:
conn.close()
def post_bench_chat_completions(self, payload: dict[str, Any]) -> dict[str, Any]:
return self.request_json("POST", "/bench/chat/completions", body=payload)
def unwrap_instance(instance: dict[str, Any]) -> dict[str, Any]:
if len(instance) != 1:
raise KeyError(f"Expected 1 key, got keys={list(instance.keys())}")
tag = next(iter(instance))
inner = instance[tag]
if not isinstance(inner, dict):
raise TypeError(f"payload for {tag} must be dict, got {type(inner)}")
return inner
def instance_id_from_instance(instance: dict[str, Any]) -> str:
inner = unwrap_instance(instance)
return str(inner["instanceId"])
def nodes_used_in_instance(instance: dict[str, Any]) -> int:
inner = unwrap_instance(instance)
return len(inner["shardAssignments"]["nodeToRunner"])
def runner_ids_from_instance(instance: dict[str, Any]) -> list[str]:
inner = unwrap_instance(instance)
runner_to_shard = inner["shardAssignments"]["runnerToShard"]
return list(runner_to_shard.keys())
def runner_ready(runner: dict[str, Any]) -> bool:
return "RunnerReady" in runner
def runner_failed(runner: dict[str, Any]) -> bool:
return "RunnerFailed" in runner
def get_runner_failed_message(runner: dict[str, Any]) -> str | None:
if "RunnerFailed" in runner:
return runner["RunnerFailed"].get("errorMessage")
return None
def wait_for_instance_ready(
client: ExoClient, instance_id: str, timeout: float = 24000.0
) -> None:
start_time = time.time()
instance_existed = False
while time.time() - start_time < timeout:
state = client.request_json("GET", "/state")
instances = state.get("instances", {})
if instance_id not in instances:
if instance_existed:
# Instance was deleted after being created - likely due to runner failure
raise RuntimeError(
f"Instance {instance_id} was deleted (runner may have failed)"
)
time.sleep(0.1)
continue
instance_existed = True
instance = instances[instance_id]
runner_ids = runner_ids_from_instance(instance)
runners = state.get("runners", {})
# Check for failed runners first
for rid in runner_ids:
runner = runners.get(rid, {})
if runner_failed(runner):
error_msg = get_runner_failed_message(runner) or "Unknown error"
raise RuntimeError(f"Runner {rid} failed: {error_msg}")
if all(runner_ready(runners.get(rid, {})) for rid in runner_ids):
return
time.sleep(0.1)
raise TimeoutError(f"Instance {instance_id} did not become ready within {timeout=}")
def wait_for_instance_gone(
client: ExoClient, instance_id: str, timeout: float = 3.0
) -> None:
start_time = time.time()
while time.time() - start_time < timeout:
try:
client.request_json("GET", f"/instance/{instance_id}")
time.sleep(0.4)
except ExoHttpError as e:
if e.status == 404:
return
raise
raise TimeoutError(f"Instance {instance_id} did not get deleted within {timeout=}")
def resolve_model_short_id(client: ExoClient, model_arg: str) -> tuple[str, str]:
models = client.request_json("GET", "/models") or {}
data = models.get("data") or []
for m in data:
if (m.get("name") or "").lower() == model_arg.lower():
short_id = str(m["name"])
full_id = str(m.get("hugging_face_id") or m["name"])
return short_id, full_id
for m in data:
if m.get("hugging_face_id") == model_arg:
short_id = str(m["name"])
full_id = str(m["hugging_face_id"])
return short_id, full_id
raise ValueError(f"Model not found in /models: {model_arg}")
def placement_filter(instance_meta: str, wanted: str) -> bool:
s = (instance_meta or "").lower()
if wanted == "both":
return ("ring" in s) or ("jaccl" in s)
return wanted in s
def sharding_filter(sharding: str, wanted: str) -> bool:
s = (sharding or "").lower()
if wanted == "both":
return ("pipeline" in s) or ("tensor" in s)
return wanted in s
def fetch_and_filter_placements(
client: ExoClient, full_model_id: str, args: argparse.Namespace
) -> list[dict[str, Any]]:
previews_resp = client.request_json(
"GET", "/instance/previews", params={"model_id": full_model_id}
)
previews = previews_resp.get("previews") or []
selected: list[dict[str, Any]] = []
for p in previews:
if p.get("error") is not None:
continue
if not placement_filter(str(p.get("instance_meta", "")), args.instance_meta):
continue
if not sharding_filter(str(p.get("sharding", "")), args.sharding):
continue
instance = p.get("instance")
if not isinstance(instance, dict):
continue
n = nodes_used_in_instance(instance)
# Skip tensor ring single node as it is pointless when pipeline ring
if n == 1 and (
(args.sharding == "both" and "tensor" in p.get("sharding", "").lower())
or (
args.instance_meta == "both"
and "jaccl" in p.get("instance_meta", "").lower()
)
):
continue
if (
args.skip_pipeline_jaccl
and (
args.instance_meta == "both"
and "jaccl" in p.get("instance_meta", "").lower()
)
and (
args.sharding == "both" and "pipeline" in p.get("sharding", "").lower()
)
):
continue
if (
args.skip_tensor_ring
and (
args.instance_meta == "both"
and "ring" in p.get("instance_meta", "").lower()
)
and (args.sharding == "both" and "tensor" in p.get("sharding", "").lower())
):
continue
if args.min_nodes <= n <= args.max_nodes:
selected.append(p)
return selected
def settle_and_fetch_placements(
client: ExoClient,
full_model_id: str,
args: argparse.Namespace,
settle_timeout: float = 0,
) -> list[dict[str, Any]]:
selected = fetch_and_filter_placements(client, full_model_id, args)
if not selected and settle_timeout > 0:
backoff = _SETTLE_INITIAL_BACKOFF_S
deadline = time.monotonic() + settle_timeout
while not selected and time.monotonic() < deadline:
remaining = deadline - time.monotonic()
logger.warning(
f"No valid placements yet (cluster may still be settling). "
f"Retrying in {backoff:.1f}s ({remaining:.0f}s remaining)..."
)
time.sleep(min(backoff, remaining))
backoff = min(backoff * _SETTLE_BACKOFF_MULTIPLIER, _SETTLE_MAX_BACKOFF_S)
selected = fetch_and_filter_placements(client, full_model_id, args)
return selected
def run_planning_phase(
client: ExoClient,
full_model_id: str,
preview: dict[str, Any],
danger_delete: bool,
timeout: float,
settle_deadline: float | None,
) -> float | None:
"""Check disk space and ensure model is downloaded before benchmarking.
Returns the wall-clock download duration in seconds if a fresh download
was needed, or None if the model was already cached on all nodes.
"""
# Get model size from /models
models = client.request_json("GET", "/models") or {}
model_bytes = 0
for m in models.get("data", []):
if m.get("hugging_face_id") == full_model_id:
model_bytes = m.get("storage_size_megabytes", 0) * 1024 * 1024
break
if not model_bytes:
logger.warning(
f"Could not determine size for {full_model_id}, skipping disk check"
)
return None
# Get nodes from preview
inner = unwrap_instance(preview["instance"])
node_ids = list(inner["shardAssignments"]["nodeToRunner"].keys())
runner_to_shard = inner["shardAssignments"]["runnerToShard"]
state = client.request_json("GET", "/state")
downloads = state.get("downloads", {})
node_disk = state.get("nodeDisk", {})
needs_download = False
for node_id in node_ids:
node_downloads = downloads.get(node_id, [])
# Check if model already downloaded on this node
already_downloaded = any(
"DownloadCompleted" in p
and unwrap_instance(p["DownloadCompleted"]["shardMetadata"])["modelCard"][
"modelId"
]
== full_model_id
for p in node_downloads
)
if already_downloaded:
continue
needs_download = True
# Wait for disk info if settle_deadline is set
disk_info = node_disk.get(node_id, {})
backoff = _SETTLE_INITIAL_BACKOFF_S
while not disk_info and settle_deadline and time.monotonic() < settle_deadline:
remaining = settle_deadline - time.monotonic()
logger.info(
f"Waiting for disk info on {node_id} ({remaining:.0f}s remaining)..."
)
time.sleep(min(backoff, remaining))
backoff = min(backoff * _SETTLE_BACKOFF_MULTIPLIER, _SETTLE_MAX_BACKOFF_S)
state = client.request_json("GET", "/state")
node_disk = state.get("nodeDisk", {})
disk_info = node_disk.get(node_id, {})
if not disk_info:
logger.warning(f"No disk info for {node_id}, skipping space check")
continue
avail = disk_info.get("available", {}).get("inBytes", 0)
if avail >= model_bytes:
continue
if not danger_delete:
raise RuntimeError(
f"Insufficient disk on {node_id}: need {model_bytes // (1024**3)}GB, "
f"have {avail // (1024**3)}GB. Use --danger-delete-downloads to free space."
)
# Delete from smallest to largest (skip read-only models from EXO_MODELS_PATH)
completed = [
(
unwrap_instance(p["DownloadCompleted"]["shardMetadata"])["modelCard"][
"modelId"
],
p["DownloadCompleted"]["total"]["inBytes"],
)
for p in node_downloads
if "DownloadCompleted" in p
and not p["DownloadCompleted"].get("readOnly", False)
]
for del_model, size in sorted(completed, key=lambda x: x[1]):
logger.info(f"Deleting {del_model} from {node_id} ({size // (1024**2)}MB)")
client.request_json("DELETE", f"/download/{node_id}/{del_model}")
avail += size
if avail >= model_bytes:
break
if avail < model_bytes:
raise RuntimeError(f"Could not free enough space on {node_id}")
# Start downloads (idempotent)
download_t0 = time.perf_counter() if needs_download else None
for node_id in node_ids:
runner_id = inner["shardAssignments"]["nodeToRunner"][node_id]
shard = runner_to_shard[runner_id]
client.request_json(
"POST",
"/download/start",
body={
"targetNodeId": node_id,
"shardMetadata": shard,
},
)
logger.info(f"Started download on {node_id}")
# Wait for downloads
start = time.time()
while time.time() - start < timeout:
state = client.request_json("GET", "/state")
downloads = state.get("downloads", {})
all_done = True
for node_id in node_ids:
done = any(
"DownloadCompleted" in p
and unwrap_instance(p["DownloadCompleted"]["shardMetadata"])[
"modelCard"
]["modelId"]
== full_model_id
for p in downloads.get(node_id, [])
)
failed = [
p["DownloadFailed"]["errorMessage"]
for p in downloads.get(node_id, [])
if "DownloadFailed" in p
and unwrap_instance(p["DownloadFailed"]["shardMetadata"])["modelCard"][
"modelId"
]
== full_model_id
]
if failed:
raise RuntimeError(f"Download failed on {node_id}: {failed[0]}")
if not done:
all_done = False
if all_done:
if download_t0 is not None:
return time.perf_counter() - download_t0
return None
time.sleep(1)
raise TimeoutError("Downloads did not complete in time")
def add_common_instance_args(ap: argparse.ArgumentParser) -> None:
ap.add_argument("--host", default=os.environ.get("EXO_HOST", "localhost"))
ap.add_argument(
"--port", type=int, default=int(os.environ.get("EXO_PORT", "52415"))
)
ap.add_argument("--model", required=True, help="Model short id or huggingface id")
ap.add_argument(
"--max-nodes",
type=int,
default=4,
help="Only consider placements using <= this many nodes.",
)
ap.add_argument(
"--min-nodes",
type=int,
default=1,
help="Only consider placements using >= this many nodes.",
)
ap.add_argument(
"--instance-meta", choices=["ring", "jaccl", "both"], default="both"
)
ap.add_argument(
"--sharding", choices=["pipeline", "tensor", "both"], default="both"
)
ap.add_argument(
"--skip-pipeline-jaccl",
action="store_true",
help="Skip pipeline+jaccl placements, as it's often pointless.",
)
ap.add_argument(
"--skip-tensor-ring",
action="store_true",
help="Skip tensor+ring placements, as it's so slow.",
)
ap.add_argument(
"--timeout", type=float, default=7200.0, help="HTTP timeout (seconds)."
)
ap.add_argument(
"--settle-timeout",
type=float,
default=0,
help="Max seconds to wait for the cluster to produce valid placements (0 = try once).",
)
ap.add_argument(
"--danger-delete-downloads",
action="store_true",
help="Delete existing models from smallest to largest to make room for benchmark model.",
)
| {
"repo_id": "exo-explore/exo",
"file_path": "bench/harness.py",
"license": "Apache License 2.0",
"lines": 406,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
exo-explore/exo:src/exo/worker/tests/unittests/test_mlx/test_prefix_cache_architectures.py | import copy
import gc
import importlib
import json
import shutil
import tempfile
from dataclasses import dataclass
from pathlib import Path
from typing import Any, cast
import mlx.core as mx
import mlx.nn as nn
import pytest
from mlx.utils import tree_flatten, tree_unflatten
from mlx_lm.tokenizer_utils import TokenizerWrapper
from exo.shared.types.common import ModelId
from exo.shared.types.mlx import Model
from exo.shared.types.text_generation import InputMessage, TextGenerationTaskParams
from exo.worker.engines.mlx.cache import KVPrefixCache
from exo.worker.engines.mlx.generator.generate import mlx_generate
from exo.worker.engines.mlx.utils_mlx import (
apply_chat_template,
load_tokenizer_for_model_id,
)
HF_CACHE = Path.home() / ".cache" / "huggingface" / "hub"
# ── Config reduction ──────────────────────────────────────────────────────── #
_REDUCE = {
"num_hidden_layers": 4,
"hidden_size": 256,
"num_attention_heads": 4,
"num_key_value_heads": 4,
"intermediate_size": 512,
"moe_intermediate_size": 128,
"num_experts": 4,
"num_experts_per_tok": 2,
"n_routed_experts": 4,
"num_local_experts": 4,
"num_nextn_predict_layers": 0,
"first_k_dense_replace": 0,
"linear_num_key_heads": 2,
"linear_num_value_heads": 2,
"num_attention_groups": 4,
}
def _reduce_dict(cfg: dict[str, Any]) -> dict[str, Any]:
result = dict(cfg)
for key, val in _REDUCE.items():
if key in result:
result[key] = val
return result
def _reduce_config(cfg: dict[str, Any]) -> dict[str, Any]:
result = _reduce_dict(cfg)
n_layers = cast(int, result.get("num_hidden_layers", 4))
if "text_config" in result and isinstance(result["text_config"], dict):
result["text_config"] = _reduce_dict(
cast(dict[str, Any], result["text_config"])
)
tc: dict[str, Any] = result["text_config"]
if "num_nextn_predict_layers" in tc:
tc["num_nextn_predict_layers"] = 0
if "layer_types" in result and isinstance(result["layer_types"], list):
result["layer_types"] = result["layer_types"][:n_layers]
if "attention_other_setting" in result and isinstance(
result["attention_other_setting"], dict
):
aos: dict[str, Any] = dict(
cast(dict[str, Any], result["attention_other_setting"])
)
if "num_attention_heads" in aos:
aos["num_attention_heads"] = result.get("num_attention_heads", 4)
if "num_attention_groups" in aos:
aos["num_attention_groups"] = result.get(
"num_attention_groups", cast(int, aos["num_attention_groups"])
)
result["attention_other_setting"] = aos
if "moe_layers_enum" in result and isinstance(result["moe_layers_enum"], str):
indices = [int(x) for x in result["moe_layers_enum"].split(",") if x.strip()]
valid = [i for i in indices if i < n_layers]
result["moe_layers_enum"] = ",".join(str(i) for i in valid) if valid else ""
return result
# ── Helpers ───────────────────────────────────────────────────────────────── #
def _find_snapshot(hub_name: str) -> Path | None:
model_dir = HF_CACHE / f"models--mlx-community--{hub_name}"
snaps = model_dir / "snapshots"
if not snaps.exists():
return None
children = sorted(snaps.iterdir())
return children[0] if children else None
def _copy_tokenizer(src: Path, dst: Path) -> None:
for f in src.iterdir():
name = f.name
if (
"tokeniz" in name.lower()
or "tiktoken" in name.lower()
or name.startswith("vocab")
or name.endswith(".jinja")
or "tool_declaration" in name
) and f.is_file():
shutil.copy2(f, dst / name)
def _build_model(module_name: str, cfg: dict[str, Any]) -> Model:
mod = importlib.import_module(f"mlx_lm.models.{module_name}")
args = mod.ModelArgs.from_dict(cfg) # pyright: ignore[reportAny]
model: nn.Module = mod.Model(args) # pyright: ignore[reportAny]
flat = cast(list[tuple[str, mx.array]], tree_flatten(model.parameters()))
random_weights = [
(k, mx.random.normal(shape=v.shape, dtype=mx.float16)) for k, v in flat
]
model.update(cast(dict[str, Any], tree_unflatten(random_weights)))
mx.eval(model.parameters())
return cast(Model, model)
def _collect_tokens(
model: Model,
tokenizer: TokenizerWrapper,
task: TextGenerationTaskParams,
prompt: str,
kv_prefix_cache: KVPrefixCache | None,
) -> list[int]:
tokens: list[int] = []
for resp in mlx_generate(
model=model,
tokenizer=tokenizer,
task=task,
prompt=prompt,
kv_prefix_cache=kv_prefix_cache,
group=None,
):
tokens.append(resp.token)
if resp.finish_reason is not None:
break
return tokens
# ── Architecture definitions ──────────────────────────────────────────────── #
@dataclass(frozen=True)
class ArchSpec:
name: str
hub_name: str
module: str
tokenizer_hub: str | None = None # fallback for models without bundled tokenizer
ARCHITECTURES: list[ArchSpec] = [
ArchSpec("llama", "Llama-3.2-1B-Instruct-4bit", "llama"),
ArchSpec("glm_moe_dsa", "GLM-5-MXFP4-Q8", "glm_moe_dsa"),
ArchSpec(
"glm4_moe", "GLM-4.5-Air-8bit", "glm4_moe", tokenizer_hub="GLM-4.7-8bit-gs32"
),
ArchSpec(
"glm4_moe_lite",
"GLM-4.7-Flash-8bit",
"glm4_moe_lite",
tokenizer_hub="GLM-4.7-8bit-gs32",
),
ArchSpec("glm4_moe_47", "GLM-4.7-8bit-gs32", "glm4_moe"),
ArchSpec("qwen3", "Qwen3-4B-Instruct-2507-4bit", "qwen3"),
ArchSpec("qwen3_moe", "Qwen3-30B-A3B-4bit", "qwen3_moe"),
ArchSpec("qwen3_next", "Qwen3-Next-80B-A3B-Thinking-4bit", "qwen3_next"),
ArchSpec("minimax", "MiniMax-M2.1-3bit", "minimax"),
ArchSpec("gpt_oss", "gpt-oss-20b-MXFP4-Q8", "gpt_oss"),
ArchSpec("step3p5", "Step-3.5-Flash-4bit", "step3p5"),
ArchSpec("kimi_k25", "Kimi-K2.5", "kimi_k25"),
]
def _arch_available(spec: ArchSpec) -> bool:
snap = _find_snapshot(spec.hub_name)
if snap is None:
return False
if spec.tokenizer_hub is not None:
return _find_snapshot(spec.tokenizer_hub) is not None
return True
def _make_task() -> TextGenerationTaskParams:
return TextGenerationTaskParams(
model=ModelId("test"),
input=[
InputMessage(
role="user",
content="Use the calculator to compute 1847 * 263 + 5921",
)
],
max_output_tokens=20,
temperature=0.0,
tools=[
{
"type": "function",
"function": {
"name": "calculate",
"description": "Evaluate a mathematical expression",
"parameters": {
"type": "object",
"properties": {"expression": {"type": "string"}},
"required": ["expression"],
},
},
}
],
)
# ── Test class ────────────────────────────────────────────────────────────── #
@pytest.mark.slow
class TestPrefixCacheArchitectures:
"""Verify prefix cache produces identical output to fresh generation for every architecture."""
@pytest.fixture(autouse=True)
def _cleanup(self):
yield
mx.clear_cache()
gc.collect()
@pytest.mark.parametrize(
"spec",
ARCHITECTURES,
ids=[a.name for a in ARCHITECTURES],
)
def test_prefix_cache_exact_hit(self, spec: ArchSpec) -> None:
if not _arch_available(spec):
pytest.skip(f"Model {spec.hub_name} not cached locally")
snapshot = _find_snapshot(spec.hub_name)
assert snapshot is not None
tmpdir = Path(tempfile.mkdtemp(prefix=f"exo_test_{spec.name}_"))
try:
# Build reduced config
with open(snapshot / "config.json") as f:
cfg = cast(dict[str, Any], json.load(f))
reduced = _reduce_config(copy.deepcopy(cfg))
(tmpdir / "config.json").write_text(json.dumps(reduced))
# Copy tokenizer
tok_src = snapshot
if spec.tokenizer_hub is not None:
alt = _find_snapshot(spec.tokenizer_hub)
if alt is not None:
tok_src = alt
_copy_tokenizer(tok_src, tmpdir)
# Load tokenizer and model
model_id = ModelId(f"mlx-community/{spec.hub_name}")
tokenizer = load_tokenizer_for_model_id(model_id, tmpdir)
mx.random.seed(0)
model = _build_model(spec.module, reduced)
task = _make_task()
prompt = apply_chat_template(tokenizer=tokenizer, task_params=task)
# Run 1: fresh
mx.random.seed(42)
fresh = _collect_tokens(model, tokenizer, task, prompt, None)
assert len(fresh) > 0, "Fresh generation produced no tokens"
# Run 2: populate cache
kv = KVPrefixCache(None)
mx.random.seed(42)
populate = _collect_tokens(model, tokenizer, task, prompt, kv)
# Run 3: exact cache hit
mx.random.seed(42)
cached = _collect_tokens(model, tokenizer, task, prompt, kv)
assert fresh == populate, (
f"Fresh vs populate mismatch: {fresh[:5]} vs {populate[:5]}"
)
assert fresh == cached, (
f"Fresh vs cached mismatch: {fresh[:5]} vs {cached[:5]}"
)
finally:
shutil.rmtree(tmpdir, ignore_errors=True)
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/worker/tests/unittests/test_mlx/test_prefix_cache_architectures.py",
"license": "Apache License 2.0",
"lines": 248,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
exo-explore/exo:src/exo/worker/tests/unittests/test_runner/test_parse_gpt_oss.py | from collections.abc import Generator
from exo.shared.types.worker.runner_response import (
GenerationResponse,
ToolCallResponse,
)
from exo.worker.runner.llm_inference.runner import parse_gpt_oss
# Token IDs from mlx-community/gpt-oss-20b-MXFP4-Q8 tokenizer.
# These are stable since they come from the model's vocabulary.
_CHANNEL = 200005 # <|channel|>
_START = 200006 # <|start|>
_MESSAGE = 200008 # <|message|>
_CALL = 200012 # <|call|>
_END = 200007 # <|end|>
_ASSISTANT = 173781 # "assistant"
# fmt: off
# " to=functions.get_current_weather<|channel|>commentary json<|message|>{\"location\": \"Tokyo\"}<|call|>"
FORMAT_A_TOKENS: list[tuple[int, str]] = [
(316, " to"),
(28, "="),
(44580, "functions"),
(775, ".get"),
(23981, "_current"),
(170154, "_weather"),
(_CHANNEL, "<|channel|>"),
(12606, "comment"),
(815, "ary"),
(5701, " json"),
(_MESSAGE, "<|message|>"),
(10848, '{"'),
(7693, "location"),
(1243, '":'),
(392, ' "'),
(173844, "Tokyo"),
(18583, '"}'),
(_CALL, "<|call|>"),
]
# "<|channel|>commentary to=functions.get_current_weather json<|message|>{\"location\": \"Tokyo\"}<|call|>"
FORMAT_B_TOKENS: list[tuple[int, str]] = [
(_CHANNEL, "<|channel|>"),
(12606, "comment"),
(815, "ary"),
(316, " to"),
(28, "="),
(44580, "functions"),
(775, ".get"),
(23981, "_current"),
(170154, "_weather"),
(5701, " json"),
(_MESSAGE, "<|message|>"),
(10848, '{"'),
(7693, "location"),
(1243, '":'),
(392, ' "'),
(173844, "Tokyo"),
(18583, '"}'),
(_CALL, "<|call|>"),
]
# "<|channel|>analysis<|message|>Let me think...<|end|><|start|>assistant<|channel|>commentary to=functions.X ..."
# Full analysis-then-tool-call as the model actually generates it.
THINKING_THEN_TOOL_TOKENS: list[tuple[int, str]] = [
(_CHANNEL, "<|channel|>"),
(35644, "analysis"),
(_MESSAGE, "<|message|>"),
(12845, "Let"),
(668, " me"),
(2411, " think"),
(1078, " about"),
(495, " this"),
(13, "."),
(_END, "<|end|>"),
# Model generates a new message header for the tool call:
(_START, "<|start|>"),
(_ASSISTANT, "assistant"),
*FORMAT_B_TOKENS,
]
# fmt: on
def _make_gen_responses(
tokens: list[tuple[int, str]],
) -> list[GenerationResponse]:
"""Build GenerationResponse list from (token_id, text) pairs."""
responses: list[GenerationResponse] = []
for i, (tid, text) in enumerate(tokens):
is_last = i == len(tokens) - 1
responses.append(
GenerationResponse(
text=text,
token=tid,
finish_reason="stop" if is_last else None,
usage=None,
)
)
return responses
def _collect(
tokens: list[tuple[int, str]],
) -> list[GenerationResponse | ToolCallResponse]:
"""Feed tokens through parse_gpt_oss and collect all yielded responses."""
def _gen() -> Generator[GenerationResponse, None, None]:
yield from _make_gen_responses(tokens)
return list(parse_gpt_oss(_gen()))
def _get_tool_call(
results: list[GenerationResponse | ToolCallResponse],
) -> ToolCallResponse:
"""Extract the single ToolCallResponse from results."""
tool_calls = [r for r in results if isinstance(r, ToolCallResponse)]
assert len(tool_calls) == 1, f"Expected 1 ToolCallResponse, got {len(tool_calls)}"
return tool_calls[0]
class TestParseGptOssRecipientPlacement:
"""Both Harmony recipient placements must produce identical tool calls."""
def test_format_a_yields_tool_call(self):
results = _collect(FORMAT_A_TOKENS)
tc = _get_tool_call(results)
assert tc.tool_calls[0].name == "get_current_weather"
assert '"location"' in tc.tool_calls[0].arguments
assert "Tokyo" in tc.tool_calls[0].arguments
def test_format_b_yields_tool_call(self):
results = _collect(FORMAT_B_TOKENS)
tc = _get_tool_call(results)
assert tc.tool_calls[0].name == "get_current_weather"
assert '"location"' in tc.tool_calls[0].arguments
assert "Tokyo" in tc.tool_calls[0].arguments
def test_both_formats_produce_identical_tool_calls(self):
tc_a = _get_tool_call(_collect(FORMAT_A_TOKENS))
tc_b = _get_tool_call(_collect(FORMAT_B_TOKENS))
assert tc_a.tool_calls[0].name == tc_b.tool_calls[0].name
assert tc_a.tool_calls[0].arguments == tc_b.tool_calls[0].arguments
class TestParseGptOssThinkingThenToolCall:
"""Analysis (thinking) followed by a tool call must yield both."""
def test_thinking_then_tool_call(self):
results = _collect(THINKING_THEN_TOOL_TOKENS)
# Thinking tokens should have is_thinking=True and no <think> tags
thinking_responses = [
r for r in results if isinstance(r, GenerationResponse) and r.is_thinking
]
thinking_text = "".join(r.text for r in thinking_responses)
assert "Let me think about this." in thinking_text
assert "<think>" not in thinking_text
assert "</think>" not in thinking_text
# Non-thinking tokens should have is_thinking=False
non_thinking = [
r
for r in results
if isinstance(r, GenerationResponse) and not r.is_thinking
]
non_thinking_text = "".join(r.text for r in non_thinking)
assert "<think>" not in non_thinking_text
# And the tool call
tc = _get_tool_call(results)
assert tc.tool_calls[0].name == "get_current_weather"
assert "Tokyo" in tc.tool_calls[0].arguments
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/worker/tests/unittests/test_runner/test_parse_gpt_oss.py",
"license": "Apache License 2.0",
"lines": 149,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
exo-explore/exo:src/exo/download/tests/test_offline_mode.py | """Tests for offline/air-gapped mode."""
from collections.abc import AsyncIterator
from pathlib import Path
from unittest.mock import AsyncMock, patch
import aiofiles
import aiofiles.os as aios
import pytest
from exo.download.download_utils import (
_download_file, # pyright: ignore[reportPrivateUsage]
download_file_with_retry,
fetch_file_list_with_cache,
)
from exo.shared.types.common import ModelId
from exo.shared.types.worker.downloads import FileListEntry
@pytest.fixture
def model_id() -> ModelId:
return ModelId("test-org/test-model")
@pytest.fixture
async def temp_models_dir(tmp_path: Path) -> AsyncIterator[Path]:
models_dir = tmp_path / "models"
await aios.makedirs(models_dir, exist_ok=True)
with patch("exo.download.download_utils.EXO_MODELS_DIR", models_dir):
yield models_dir
class TestDownloadFileOffline:
"""Tests for _download_file with skip_internet=True."""
async def test_returns_local_file_without_http_verification(
self, model_id: ModelId, tmp_path: Path
) -> None:
"""When skip_internet=True and file exists locally, return it immediately
without making any HTTP calls (no file_meta verification)."""
target_dir = tmp_path / "downloads"
await aios.makedirs(target_dir, exist_ok=True)
local_file = target_dir / "model.safetensors"
async with aiofiles.open(local_file, "wb") as f:
await f.write(b"model weights data")
with patch(
"exo.download.download_utils.file_meta",
new_callable=AsyncMock,
) as mock_file_meta:
result = await _download_file(
model_id,
"main",
"model.safetensors",
target_dir,
skip_internet=True,
)
assert result == local_file
mock_file_meta.assert_not_called()
async def test_raises_file_not_found_for_missing_file(
self, model_id: ModelId, tmp_path: Path
) -> None:
"""When skip_internet=True and file does NOT exist locally,
raise FileNotFoundError instead of attempting download."""
target_dir = tmp_path / "downloads"
await aios.makedirs(target_dir, exist_ok=True)
with pytest.raises(FileNotFoundError, match="offline mode"):
await _download_file(
model_id,
"main",
"missing_model.safetensors",
target_dir,
skip_internet=True,
)
async def test_returns_local_file_in_subdirectory(
self, model_id: ModelId, tmp_path: Path
) -> None:
"""When skip_internet=True and file exists in a subdirectory,
return it without HTTP calls."""
target_dir = tmp_path / "downloads"
subdir = target_dir / "transformer"
await aios.makedirs(subdir, exist_ok=True)
local_file = subdir / "diffusion_pytorch_model.safetensors"
async with aiofiles.open(local_file, "wb") as f:
await f.write(b"weights")
with patch(
"exo.download.download_utils.file_meta",
new_callable=AsyncMock,
) as mock_file_meta:
result = await _download_file(
model_id,
"main",
"transformer/diffusion_pytorch_model.safetensors",
target_dir,
skip_internet=True,
)
assert result == local_file
mock_file_meta.assert_not_called()
class TestDownloadFileWithRetryOffline:
"""Tests for download_file_with_retry with skip_internet=True."""
async def test_propagates_skip_internet_to_download_file(
self, model_id: ModelId, tmp_path: Path
) -> None:
"""Verify skip_internet is passed through to _download_file."""
target_dir = tmp_path / "downloads"
await aios.makedirs(target_dir, exist_ok=True)
local_file = target_dir / "config.json"
async with aiofiles.open(local_file, "wb") as f:
await f.write(b'{"model_type": "qwen2"}')
with patch(
"exo.download.download_utils.file_meta",
new_callable=AsyncMock,
) as mock_file_meta:
result = await download_file_with_retry(
model_id,
"main",
"config.json",
target_dir,
skip_internet=True,
)
assert result == local_file
mock_file_meta.assert_not_called()
async def test_file_not_found_does_not_retry(
self, model_id: ModelId, tmp_path: Path
) -> None:
"""FileNotFoundError from offline mode should not trigger retries."""
target_dir = tmp_path / "downloads"
await aios.makedirs(target_dir, exist_ok=True)
with pytest.raises(FileNotFoundError):
await download_file_with_retry(
model_id,
"main",
"nonexistent.safetensors",
target_dir,
skip_internet=True,
)
class TestFetchFileListOffline:
"""Tests for fetch_file_list_with_cache with skip_internet=True."""
async def test_uses_cached_file_list(
self, model_id: ModelId, temp_models_dir: Path
) -> None:
"""When skip_internet=True and cache file exists, use it without network."""
from pydantic import TypeAdapter
cache_dir = temp_models_dir / "caches" / model_id.normalize()
await aios.makedirs(cache_dir, exist_ok=True)
cached_list = [
FileListEntry(type="file", path="model.safetensors", size=1000),
FileListEntry(type="file", path="config.json", size=200),
]
cache_file = cache_dir / f"{model_id.normalize()}--main--file_list.json"
async with aiofiles.open(cache_file, "w") as f:
await f.write(
TypeAdapter(list[FileListEntry]).dump_json(cached_list).decode()
)
with patch(
"exo.download.download_utils.fetch_file_list_with_retry",
new_callable=AsyncMock,
) as mock_fetch:
result = await fetch_file_list_with_cache(
model_id, "main", skip_internet=True
)
assert result == cached_list
mock_fetch.assert_not_called()
async def test_falls_back_to_local_directory_scan(
self, model_id: ModelId, temp_models_dir: Path
) -> None:
"""When skip_internet=True and no cache but local files exist,
build file list from local directory."""
import json
model_dir = temp_models_dir / model_id.normalize()
await aios.makedirs(model_dir, exist_ok=True)
async with aiofiles.open(model_dir / "config.json", "w") as f:
await f.write('{"model_type": "qwen2"}')
index_data = {
"metadata": {},
"weight_map": {"model.layers.0.weight": "model.safetensors"},
}
async with aiofiles.open(model_dir / "model.safetensors.index.json", "w") as f:
await f.write(json.dumps(index_data))
async with aiofiles.open(model_dir / "model.safetensors", "wb") as f:
await f.write(b"x" * 500)
with patch(
"exo.download.download_utils.fetch_file_list_with_retry",
new_callable=AsyncMock,
) as mock_fetch:
result = await fetch_file_list_with_cache(
model_id, "main", skip_internet=True
)
mock_fetch.assert_not_called()
paths = {entry.path for entry in result}
assert "config.json" in paths
assert "model.safetensors" in paths
async def test_raises_when_no_cache_and_no_local_files(
self, model_id: ModelId, temp_models_dir: Path
) -> None:
"""When skip_internet=True and neither cache nor local files exist,
raise FileNotFoundError."""
with pytest.raises(FileNotFoundError, match="No internet"):
await fetch_file_list_with_cache(model_id, "main", skip_internet=True)
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/download/tests/test_offline_mode.py",
"license": "Apache License 2.0",
"lines": 188,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
exo-explore/exo:src/exo/master/event_log.py | import contextlib
import json
from collections import OrderedDict
from collections.abc import Iterator
from datetime import datetime, timezone
from io import BufferedRandom, BufferedReader
from pathlib import Path
import msgspec
import zstandard
from loguru import logger
from pydantic import TypeAdapter
from exo.shared.types.events import Event
_EVENT_ADAPTER: TypeAdapter[Event] = TypeAdapter(Event)
_HEADER_SIZE = 4 # uint32 big-endian
_OFFSET_CACHE_SIZE = 128
_MAX_ARCHIVES = 5
def _serialize_event(event: Event) -> bytes:
return msgspec.msgpack.encode(event.model_dump(mode="json"))
def _deserialize_event(raw: bytes) -> Event:
# Decode msgpack into a Python dict, then re-encode as JSON for Pydantic.
# Pydantic's validate_json() uses JSON-mode coercion (e.g. string -> enum)
# even under strict=True, whereas validate_python() does not. Going through
# JSON is the only way to get correct round-trip deserialization without
# disabling strict mode or adding casts everywhere.
as_json = json.dumps(msgspec.msgpack.decode(raw, type=dict))
return _EVENT_ADAPTER.validate_json(as_json)
def _unpack_header(header: bytes) -> int:
return int.from_bytes(header, byteorder="big")
def _skip_record(f: BufferedReader) -> bool:
"""Skip one length-prefixed record. Returns False on EOF."""
header = f.read(_HEADER_SIZE)
if len(header) < _HEADER_SIZE:
return False
f.seek(_unpack_header(header), 1)
return True
def _read_record(f: BufferedReader) -> Event | None:
"""Read one length-prefixed record. Returns None on EOF."""
header = f.read(_HEADER_SIZE)
if len(header) < _HEADER_SIZE:
return None
length = _unpack_header(header)
payload = f.read(length)
if len(payload) < length:
return None
return _deserialize_event(payload)
class DiskEventLog:
"""Append-only event log backed by a file on disk.
On-disk format: sequence of length-prefixed msgpack records.
Each record is [4-byte big-endian uint32 length][msgpack payload].
Uses a bounded LRU cache of event index → byte offset for efficient
random access without storing an offset per event.
"""
def __init__(self, directory: Path) -> None:
self._directory = directory
self._directory.mkdir(parents=True, exist_ok=True)
self._active_path = directory / "events.bin"
self._offset_cache: OrderedDict[int, int] = OrderedDict()
self._count: int = 0
# Rotate stale active file from a previous session/crash
if self._active_path.exists():
self._rotate(self._active_path, self._directory)
self._file: BufferedRandom = open(self._active_path, "w+b") # noqa: SIM115
def _cache_offset(self, idx: int, offset: int) -> None:
self._offset_cache[idx] = offset
self._offset_cache.move_to_end(idx)
if len(self._offset_cache) > _OFFSET_CACHE_SIZE:
self._offset_cache.popitem(last=False)
def _seek_to(self, f: BufferedReader, target_idx: int) -> None:
"""Seek f to the byte offset of event target_idx, using cache or scanning forward."""
if target_idx in self._offset_cache:
self._offset_cache.move_to_end(target_idx)
f.seek(self._offset_cache[target_idx])
return
# Find the highest cached index before target_idx
scan_from_idx = 0
scan_from_offset = 0
for cached_idx in self._offset_cache:
if cached_idx < target_idx:
scan_from_idx = cached_idx
scan_from_offset = self._offset_cache[cached_idx]
# Scan forward, skipping records
f.seek(scan_from_offset)
for _ in range(scan_from_idx, target_idx):
_skip_record(f)
self._cache_offset(target_idx, f.tell())
def append(self, event: Event) -> None:
packed = _serialize_event(event)
self._file.write(len(packed).to_bytes(_HEADER_SIZE, byteorder="big"))
self._file.write(packed)
self._count += 1
def read_range(self, start: int, end: int) -> Iterator[Event]:
"""Yield events from index start (inclusive) to end (exclusive)."""
end = min(end, self._count)
if start < 0 or end < 0 or start >= end:
return
self._file.flush()
with open(self._active_path, "rb") as f:
self._seek_to(f, start)
for _ in range(end - start):
event = _read_record(f)
if event is None:
break
yield event
# Cache where we ended up so the next sequential read is a hit
if end < self._count:
self._cache_offset(end, f.tell())
def read_all(self) -> Iterator[Event]:
"""Yield all events from the log one at a time."""
if self._count == 0:
return
self._file.flush()
with open(self._active_path, "rb") as f:
for _ in range(self._count):
event = _read_record(f)
if event is None:
break
yield event
def __len__(self) -> int:
return self._count
def close(self) -> None:
"""Close the file and rotate active file to compressed archive."""
if self._file.closed:
return
self._file.close()
if self._active_path.exists() and self._count > 0:
self._rotate(self._active_path, self._directory)
elif self._active_path.exists():
self._active_path.unlink()
@staticmethod
def _rotate(source: Path, directory: Path) -> None:
"""Compress source into a timestamped archive.
Keeps at most ``_MAX_ARCHIVES`` compressed copies. Oldest beyond
the limit are deleted.
"""
try:
stamp = datetime.now(timezone.utc).strftime("%Y-%m-%d_%H-%M-%S_%f")
dest = directory / f"events.{stamp}.bin.zst"
compressor = zstandard.ZstdCompressor()
with open(source, "rb") as f_in, open(dest, "wb") as f_out:
compressor.copy_stream(f_in, f_out)
source.unlink()
logger.info(f"Rotated event log: {source} -> {dest}")
# Prune oldest archives beyond the limit
archives = sorted(directory.glob("events.*.bin.zst"))
for old in archives[:-_MAX_ARCHIVES]:
old.unlink()
except Exception as e:
logger.opt(exception=e).warning(f"Failed to rotate event log {source}")
# Clean up the source even if compression fails
with contextlib.suppress(OSError):
source.unlink()
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/master/event_log.py",
"license": "Apache License 2.0",
"lines": 151,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
exo-explore/exo:src/exo/master/tests/test_event_log.py | from pathlib import Path
import pytest
from exo.master.event_log import DiskEventLog
from exo.shared.types.events import TestEvent
@pytest.fixture
def log_dir(tmp_path: Path) -> Path:
return tmp_path / "event_log"
def test_append_and_read_back(log_dir: Path):
log = DiskEventLog(log_dir)
events = [TestEvent() for _ in range(5)]
for e in events:
log.append(e)
assert len(log) == 5
result = list(log.read_all())
assert len(result) == 5
for original, restored in zip(events, result, strict=True):
assert original.event_id == restored.event_id
log.close()
def test_read_range(log_dir: Path):
log = DiskEventLog(log_dir)
events = [TestEvent() for _ in range(10)]
for e in events:
log.append(e)
result = list(log.read_range(3, 7))
assert len(result) == 4
for i, restored in enumerate(result):
assert events[3 + i].event_id == restored.event_id
log.close()
def test_read_range_bounds(log_dir: Path):
log = DiskEventLog(log_dir)
events = [TestEvent() for _ in range(3)]
for e in events:
log.append(e)
# Start beyond count
assert list(log.read_range(5, 10)) == []
# Negative start
assert list(log.read_range(-1, 2)) == []
# End beyond count is clamped
result = list(log.read_range(1, 100))
assert len(result) == 2
log.close()
def test_empty_log(log_dir: Path):
log = DiskEventLog(log_dir)
assert len(log) == 0
assert list(log.read_all()) == []
assert list(log.read_range(0, 10)) == []
log.close()
def _archives(log_dir: Path) -> list[Path]:
return sorted(log_dir.glob("events.*.bin.zst"))
def test_rotation_on_close(log_dir: Path):
log = DiskEventLog(log_dir)
log.append(TestEvent())
log.close()
active = log_dir / "events.bin"
assert not active.exists()
archives = _archives(log_dir)
assert len(archives) == 1
assert archives[0].stat().st_size > 0
def test_rotation_on_construction_with_stale_file(log_dir: Path):
log_dir.mkdir(parents=True, exist_ok=True)
(log_dir / "events.bin").write_bytes(b"stale data")
log = DiskEventLog(log_dir)
archives = _archives(log_dir)
assert len(archives) == 1
assert archives[0].exists()
assert len(log) == 0
log.close()
def test_empty_log_no_archive(log_dir: Path):
"""Closing an empty log should not leave an archive."""
log = DiskEventLog(log_dir)
log.close()
active = log_dir / "events.bin"
assert not active.exists()
assert _archives(log_dir) == []
def test_close_is_idempotent(log_dir: Path):
log = DiskEventLog(log_dir)
log.append(TestEvent())
log.close()
archive = _archives(log_dir)
log.close() # should not raise
assert _archives(log_dir) == archive
def test_successive_sessions(log_dir: Path):
"""Simulate two master sessions: both archives should be kept."""
log1 = DiskEventLog(log_dir)
log1.append(TestEvent())
log1.close()
first_archive = _archives(log_dir)[-1]
log2 = DiskEventLog(log_dir)
log2.append(TestEvent())
log2.append(TestEvent())
log2.close()
# Session 1 archive shifted to slot 2, session 2 in slot 1
second_archive = _archives(log_dir)[-1]
should_be_first_archive = _archives(log_dir)[-2]
assert first_archive.exists()
assert second_archive.exists()
assert first_archive != second_archive
assert should_be_first_archive == first_archive
def test_rotation_keeps_at_most_5_archives(log_dir: Path):
"""After 7 sessions, only the 5 most recent archives should remain."""
all_archives: list[Path] = []
for _ in range(7):
log = DiskEventLog(log_dir)
log.append(TestEvent())
log.close()
all_archives.append(_archives(log_dir)[-1])
for old in all_archives[:2]:
assert not old.exists()
for recent in all_archives[2:]:
assert recent.exists()
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/master/tests/test_event_log.py",
"license": "Apache License 2.0",
"lines": 110,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
exo-explore/exo:src/exo/worker/engines/image/models/flux/kontext_adapter.py | import math
from pathlib import Path
from typing import Any, final
import mlx.core as mx
from mflux.models.common.config.config import Config
from mflux.models.common.config.model_config import ModelConfig
from mflux.models.flux.latent_creator.flux_latent_creator import FluxLatentCreator
from mflux.models.flux.model.flux_text_encoder.prompt_encoder import PromptEncoder
from mflux.models.flux.model.flux_transformer.transformer import Transformer
from mflux.models.flux.variants.kontext.flux_kontext import Flux1Kontext
from mflux.models.flux.variants.kontext.kontext_util import KontextUtil
from exo.worker.engines.image.config import ImageModelConfig
from exo.worker.engines.image.models.base import (
ModelAdapter,
PromptData,
RotaryEmbeddings,
)
from exo.worker.engines.image.models.flux.wrappers import (
FluxJointBlockWrapper,
FluxSingleBlockWrapper,
)
from exo.worker.engines.image.pipeline.block_wrapper import (
JointBlockWrapper,
SingleBlockWrapper,
)
@final
class FluxKontextPromptData(PromptData):
"""Prompt data for FLUX.1-Kontext image editing.
Stores text embeddings along with conditioning latents and position IDs
for the input image.
"""
def __init__(
self,
prompt_embeds: mx.array,
pooled_prompt_embeds: mx.array,
conditioning_latents: mx.array,
kontext_image_ids: mx.array,
):
self._prompt_embeds = prompt_embeds
self._pooled_prompt_embeds = pooled_prompt_embeds
self._conditioning_latents = conditioning_latents
self._kontext_image_ids = kontext_image_ids
@property
def prompt_embeds(self) -> mx.array:
return self._prompt_embeds
@property
def pooled_prompt_embeds(self) -> mx.array:
return self._pooled_prompt_embeds
@property
def negative_prompt_embeds(self) -> mx.array | None:
return None
@property
def negative_pooled_prompt_embeds(self) -> mx.array | None:
return None
def get_encoder_hidden_states_mask(self, positive: bool = True) -> mx.array | None:
return None
@property
def cond_image_grid(
self,
) -> tuple[int, int, int] | list[tuple[int, int, int]] | None:
return None
@property
def conditioning_latents(self) -> mx.array | None:
"""VAE-encoded input image latents for Kontext conditioning."""
return self._conditioning_latents
@property
def kontext_image_ids(self) -> mx.array | None:
"""Position IDs for Kontext conditioning (first_coord=1)."""
return self._kontext_image_ids
def get_cfg_branch_data(
self, positive: bool
) -> tuple[mx.array, mx.array | None, mx.array | None, mx.array | None]:
"""Kontext doesn't use CFG, but we return positive data for compatibility."""
return (
self._prompt_embeds,
None,
self._pooled_prompt_embeds,
self._conditioning_latents,
)
def get_batched_cfg_data(
self,
) -> tuple[mx.array, mx.array, mx.array | None, mx.array | None] | None:
# Kontext doesn't use CFG
return None
@final
class FluxKontextModelAdapter(ModelAdapter[Flux1Kontext, Transformer]):
"""Adapter for FLUX.1-Kontext image editing model.
Key differences from standard FluxModelAdapter:
- Takes an input image and computes output dimensions from it
- Creates conditioning latents from the input image via VAE
- Creates special position IDs (kontext_image_ids) for conditioning tokens
- Creates pure noise latents (not img2img blending)
"""
def __init__(
self,
config: ImageModelConfig,
model_id: str,
local_path: Path,
quantize: int | None = None,
):
self._config = config
self._model = Flux1Kontext(
model_config=ModelConfig.from_name(model_name=model_id, base_model=None),
model_path=str(local_path),
quantize=quantize,
)
self._transformer = self._model.transformer
# Stores image path and computed dimensions after set_image_dimensions
self._image_path: str | None = None
self._output_height: int | None = None
self._output_width: int | None = None
@property
def hidden_dim(self) -> int:
return self._transformer.x_embedder.weight.shape[0] # pyright: ignore[reportUnknownMemberType, reportUnknownVariableType]
@property
def needs_cfg(self) -> bool:
return False
def _get_latent_creator(self) -> type:
return FluxLatentCreator
def get_joint_block_wrappers(
self,
text_seq_len: int,
encoder_hidden_states_mask: mx.array | None = None,
) -> list[JointBlockWrapper[Any]]:
"""Create wrapped joint blocks for Flux Kontext."""
return [
FluxJointBlockWrapper(block, text_seq_len)
for block in self._transformer.transformer_blocks
]
def get_single_block_wrappers(
self,
text_seq_len: int,
) -> list[SingleBlockWrapper[Any]]:
"""Create wrapped single blocks for Flux Kontext."""
return [
FluxSingleBlockWrapper(block, text_seq_len)
for block in self._transformer.single_transformer_blocks
]
def slice_transformer_blocks(
self,
start_layer: int,
end_layer: int,
):
all_joint = list(self._transformer.transformer_blocks)
all_single = list(self._transformer.single_transformer_blocks)
total_joint_blocks = len(all_joint)
if end_layer <= total_joint_blocks:
# All assigned are joint blocks
joint_start, joint_end = start_layer, end_layer
single_start, single_end = 0, 0
elif start_layer >= total_joint_blocks:
# All assigned are single blocks
joint_start, joint_end = 0, 0
single_start = start_layer - total_joint_blocks
single_end = end_layer - total_joint_blocks
else:
# Spans both joint and single
joint_start, joint_end = start_layer, total_joint_blocks
single_start = 0
single_end = end_layer - total_joint_blocks
self._transformer.transformer_blocks = all_joint[joint_start:joint_end]
self._transformer.single_transformer_blocks = all_single[
single_start:single_end
]
def set_image_dimensions(self, image_path: Path) -> tuple[int, int]:
"""Compute and store dimensions from input image.
Also stores image_path for use in encode_prompt().
Args:
image_path: Path to the input image
Returns:
(output_width, output_height) for runtime config
"""
from mflux.utils.image_util import ImageUtil
pil_image = ImageUtil.load_image(str(image_path)).convert("RGB")
image_size = pil_image.size
# Compute output dimensions from input image aspect ratio
# Target area of 1024x1024 = ~1M pixels
target_area = 1024 * 1024
ratio = image_size[0] / image_size[1]
output_width = math.sqrt(target_area * ratio)
output_height = output_width / ratio
output_width = round(output_width / 32) * 32
output_height = round(output_height / 32) * 32
# Ensure multiple of 16 for VAE
vae_scale_factor = 8
multiple_of = vae_scale_factor * 2
output_width = output_width // multiple_of * multiple_of
output_height = output_height // multiple_of * multiple_of
self._image_path = str(image_path)
self._output_width = int(output_width)
self._output_height = int(output_height)
return self._output_width, self._output_height
def create_latents(self, seed: int, runtime_config: Config) -> mx.array:
"""Create initial noise latents for Kontext.
Unlike standard img2img which blends noise with encoded input,
Kontext uses pure noise latents. The input image is provided
separately as conditioning.
"""
return FluxLatentCreator.create_noise(
seed=seed,
height=runtime_config.height,
width=runtime_config.width,
)
def encode_prompt(
self, prompt: str, negative_prompt: str | None = None
) -> FluxKontextPromptData:
"""Encode prompt and create conditioning from stored input image.
Must call set_image_dimensions() before this method.
Args:
prompt: Text prompt for editing
negative_prompt: Ignored (Kontext doesn't use CFG)
Returns:
FluxKontextPromptData with text embeddings and image conditioning
"""
del negative_prompt # Kontext doesn't support negative prompts or CFG
if (
self._image_path is None
or self._output_height is None
or self._output_width is None
):
raise RuntimeError(
"set_image_dimensions() must be called before encode_prompt() "
"for FluxKontextModelAdapter"
)
assert isinstance(self.model.prompt_cache, dict)
assert isinstance(self.model.tokenizers, dict)
# Encode text prompt
prompt_embeds, pooled_prompt_embeds = PromptEncoder.encode_prompt(
prompt=prompt,
prompt_cache=self.model.prompt_cache,
t5_tokenizer=self.model.tokenizers["t5"], # pyright: ignore[reportAny]
clip_tokenizer=self.model.tokenizers["clip"], # pyright: ignore[reportAny]
t5_text_encoder=self.model.t5_text_encoder,
clip_text_encoder=self.model.clip_text_encoder,
)
# Create conditioning latents from input image
conditioning_latents, kontext_image_ids = (
KontextUtil.create_image_conditioning_latents(
vae=self.model.vae,
height=self._output_height,
width=self._output_width,
image_path=self._image_path,
)
)
return FluxKontextPromptData(
prompt_embeds=prompt_embeds,
pooled_prompt_embeds=pooled_prompt_embeds,
conditioning_latents=conditioning_latents,
kontext_image_ids=kontext_image_ids,
)
def compute_embeddings(
self,
hidden_states: mx.array,
prompt_embeds: mx.array,
) -> tuple[mx.array, mx.array]:
embedded_hidden = self._transformer.x_embedder(hidden_states)
embedded_encoder = self._transformer.context_embedder(prompt_embeds)
return embedded_hidden, embedded_encoder
def compute_text_embeddings(
self,
t: int,
runtime_config: Config,
pooled_prompt_embeds: mx.array | None = None,
hidden_states: mx.array | None = None,
) -> mx.array:
if pooled_prompt_embeds is None:
raise ValueError(
"pooled_prompt_embeds is required for Flux Kontext text embeddings"
)
return Transformer.compute_text_embeddings(
t, pooled_prompt_embeds, self._transformer.time_text_embed, runtime_config
)
def compute_rotary_embeddings(
self,
prompt_embeds: mx.array,
runtime_config: Config,
encoder_hidden_states_mask: mx.array | None = None,
cond_image_grid: tuple[int, int, int]
| list[tuple[int, int, int]]
| None = None,
kontext_image_ids: mx.array | None = None,
) -> RotaryEmbeddings:
return Transformer.compute_rotary_embeddings(
prompt_embeds,
self._transformer.pos_embed,
runtime_config,
kontext_image_ids,
)
def apply_guidance(
self,
noise_positive: mx.array,
noise_negative: mx.array,
guidance_scale: float,
) -> mx.array:
raise NotImplementedError("Flux Kontext does not use classifier-free guidance")
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/worker/engines/image/models/flux/kontext_adapter.py",
"license": "Apache License 2.0",
"lines": 295,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
exo-explore/exo:tmp/quantize_and_upload.py | #!/usr/bin/env python3
"""
Download an mflux model, quantize it, and upload to HuggingFace.
Usage (run from mflux project directory):
cd /path/to/mflux
uv run python /path/to/quantize_and_upload.py --model black-forest-labs/FLUX.1-Kontext-dev
uv run python /path/to/quantize_and_upload.py --model black-forest-labs/FLUX.1-Kontext-dev --skip-base --skip-8bit
uv run python /path/to/quantize_and_upload.py --model black-forest-labs/FLUX.1-Kontext-dev --dry-run
Requires:
- Must be run from mflux project directory using `uv run`
- huggingface_hub installed (add to mflux deps or install separately)
- HuggingFace authentication: run `huggingface-cli login` or set HF_TOKEN
"""
from __future__ import annotations
import argparse
import re
import shutil
import sys
from pathlib import Path
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from mflux.models.flux.variants.txt2img.flux import Flux1
HF_ORG = "exolabs"
def get_model_class(model_name: str) -> type:
"""Get the appropriate model class based on model name."""
from mflux.models.fibo.variants.txt2img.fibo import FIBO
from mflux.models.flux.variants.txt2img.flux import Flux1
from mflux.models.flux2.variants.txt2img.flux2_klein import Flux2Klein
from mflux.models.qwen.variants.txt2img.qwen_image import QwenImage
from mflux.models.z_image.variants.turbo.z_image_turbo import ZImageTurbo
model_name_lower = model_name.lower()
if "qwen" in model_name_lower:
return QwenImage
elif "fibo" in model_name_lower:
return FIBO
elif "z-image" in model_name_lower or "zimage" in model_name_lower:
return ZImageTurbo
elif "flux2" in model_name_lower or "flux.2" in model_name_lower:
return Flux2Klein
else:
return Flux1
def get_repo_name(model_name: str, bits: int | None) -> str:
"""Get the HuggingFace repo name for a model variant."""
# Extract repo name from HF path (e.g., "black-forest-labs/FLUX.1-Kontext-dev" -> "FLUX.1-Kontext-dev")
base_name = model_name.split("/")[-1] if "/" in model_name else model_name
suffix = f"-{bits}bit" if bits else ""
return f"{HF_ORG}/{base_name}{suffix}"
def get_local_path(output_dir: Path, model_name: str, bits: int | None) -> Path:
"""Get the local save path for a model variant."""
# Extract repo name from HF path (e.g., "black-forest-labs/FLUX.1-Kontext-dev" -> "FLUX.1-Kontext-dev")
base_name = model_name.split("/")[-1] if "/" in model_name else model_name
suffix = f"-{bits}bit" if bits else ""
return output_dir / f"{base_name}{suffix}"
def copy_source_repo(
source_repo: str,
local_path: Path,
dry_run: bool = False,
) -> None:
"""Copy all files from source repo (replicating original HF structure)."""
print(f"\n{'=' * 60}")
print(f"Copying full repo from source: {source_repo}")
print(f"Output path: {local_path}")
print(f"{'=' * 60}")
if dry_run:
print("[DRY RUN] Would download all files from source repo")
return
from huggingface_hub import snapshot_download
# Download all files to our local path
snapshot_download(
repo_id=source_repo,
local_dir=local_path,
)
# Remove root-level safetensors files (flux.1-dev.safetensors, etc.)
# These are redundant with the component directories
for f in local_path.glob("*.safetensors"):
print(f"Removing root-level safetensors: {f.name}")
if not dry_run:
f.unlink()
print(f"Source repo copied to {local_path}")
def load_and_save_quantized_model(
model_name: str,
bits: int,
output_path: Path,
dry_run: bool = False,
) -> None:
"""Load a model with quantization and save it in mflux format."""
print(f"\n{'=' * 60}")
print(f"Loading {model_name} with {bits}-bit quantization...")
print(f"Output path: {output_path}")
print(f"{'=' * 60}")
if dry_run:
print("[DRY RUN] Would load and save quantized model")
return
from mflux.models.common.config.model_config import ModelConfig
model_class = get_model_class(model_name)
model_config = ModelConfig.from_name(model_name=model_name, base_model=None)
model: Flux1 = model_class(
quantize=bits,
model_config=model_config,
)
print(f"Saving model to {output_path}...")
model.save_model(str(output_path))
print(f"Model saved successfully to {output_path}")
def copy_source_metadata(
source_repo: str,
local_path: Path,
dry_run: bool = False,
) -> None:
"""Copy metadata files (LICENSE, README, etc.) from source repo, excluding safetensors."""
print(f"\n{'=' * 60}")
print(f"Copying metadata from source repo: {source_repo}")
print(f"{'=' * 60}")
if dry_run:
print("[DRY RUN] Would download metadata files (excluding *.safetensors)")
return
from huggingface_hub import snapshot_download
# Download all files except safetensors to our local path
snapshot_download(
repo_id=source_repo,
local_dir=local_path,
ignore_patterns=["*.safetensors"],
)
print(f"Metadata files copied to {local_path}")
def upload_to_huggingface(
local_path: Path,
repo_id: str,
dry_run: bool = False,
clean_remote: bool = False,
) -> None:
"""Upload a saved model to HuggingFace."""
print(f"\n{'=' * 60}")
print(f"Uploading to HuggingFace: {repo_id}")
print(f"Local path: {local_path}")
print(f"Clean remote first: {clean_remote}")
print(f"{'=' * 60}")
if dry_run:
print("[DRY RUN] Would upload to HuggingFace")
return
from huggingface_hub import HfApi
api = HfApi()
# Create the repo if it doesn't exist
print(f"Creating/verifying repo: {repo_id}")
api.create_repo(repo_id=repo_id, repo_type="model", exist_ok=True)
# Clean remote repo if requested (delete old mflux-format files)
if clean_remote:
print("Cleaning old mflux-format files from remote...")
try:
# Pattern for mflux numbered shards: <dir>/<number>.safetensors
numbered_pattern = re.compile(r".*/\d+\.safetensors$")
repo_files = api.list_repo_files(repo_id=repo_id, repo_type="model")
for file_path in repo_files:
# Delete numbered safetensors (mflux format) and mflux index files
if numbered_pattern.match(file_path) or file_path.endswith(
"/model.safetensors.index.json"
):
print(f" Deleting: {file_path}")
api.delete_file(
path_in_repo=file_path, repo_id=repo_id, repo_type="model"
)
except Exception as e:
print(f"Warning: Could not clean remote files: {e}")
# Upload the folder
print("Uploading folder contents...")
api.upload_folder(
folder_path=str(local_path),
repo_id=repo_id,
repo_type="model",
)
print(f"Upload complete: https://huggingface.co/{repo_id}")
def clean_local_files(local_path: Path, dry_run: bool = False) -> None:
"""Remove local model files after upload."""
print(f"\nCleaning up: {local_path}")
if dry_run:
print("[DRY RUN] Would remove local files")
return
if local_path.exists():
shutil.rmtree(local_path)
print(f"Removed {local_path}")
def main() -> int:
parser = argparse.ArgumentParser(
description="Download an mflux model, quantize it, and upload to HuggingFace.",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# Process all variants (base, 4-bit, 8-bit) for FLUX.1-Kontext-dev
python tmp/quantize_and_upload.py --model black-forest-labs/FLUX.1-Kontext-dev
# Only process 4-bit variant
python tmp/quantize_and_upload.py --model black-forest-labs/FLUX.1-Kontext-dev --skip-base --skip-8bit
# Save locally without uploading
python tmp/quantize_and_upload.py --model black-forest-labs/FLUX.1-Kontext-dev --skip-upload
# Preview what would happen
python tmp/quantize_and_upload.py --model black-forest-labs/FLUX.1-Kontext-dev --dry-run
""",
)
parser.add_argument(
"--model",
"-m",
required=True,
help="HuggingFace model path (e.g., black-forest-labs/FLUX.1-Kontext-dev)",
)
parser.add_argument(
"--output-dir",
type=Path,
default=Path("./tmp/models"),
help="Local directory to save models (default: ./tmp/models)",
)
parser.add_argument(
"--skip-base",
action="store_true",
help="Skip base model (no quantization)",
)
parser.add_argument(
"--skip-4bit",
action="store_true",
help="Skip 4-bit quantized model",
)
parser.add_argument(
"--skip-8bit",
action="store_true",
help="Skip 8-bit quantized model",
)
parser.add_argument(
"--skip-download",
action="store_true",
help="Skip downloading/processing, only do upload/clean operations",
)
parser.add_argument(
"--skip-upload",
action="store_true",
help="Only save locally, don't upload to HuggingFace",
)
parser.add_argument(
"--clean",
action="store_true",
help="Remove local files after upload",
)
parser.add_argument(
"--clean-remote",
action="store_true",
help="Delete old mflux-format files from remote repo before uploading",
)
parser.add_argument(
"--dry-run",
action="store_true",
help="Print actions without executing",
)
args = parser.parse_args()
# Determine which variants to process
variants: list[int | None] = []
if not args.skip_base:
variants.append(None) # Base model (no quantization)
if not args.skip_4bit:
variants.append(4)
if not args.skip_8bit:
variants.append(8)
if not variants:
print("Error: All variants skipped. Nothing to do.")
return 1
# Create output directory
args.output_dir.mkdir(parents=True, exist_ok=True)
print(f"Model: {args.model}")
print(f"Output directory: {args.output_dir}")
print(
f"Variants to process: {['base' if v is None else f'{v}-bit' for v in variants]}"
)
print(f"Upload to HuggingFace: {not args.skip_upload}")
print(f"Clean after upload: {args.clean}")
if args.dry_run:
print("\n*** DRY RUN MODE - No actual changes will be made ***")
# Process each variant
for bits in variants:
local_path = get_local_path(args.output_dir, args.model, bits)
repo_id = get_repo_name(args.model, bits)
if not args.skip_download:
if bits is None:
# Base model: copy original HF repo structure (no mflux conversion)
copy_source_repo(
source_repo=args.model,
local_path=local_path,
dry_run=args.dry_run,
)
else:
# Quantized model: load, quantize, and save with mflux
load_and_save_quantized_model(
model_name=args.model,
bits=bits,
output_path=local_path,
dry_run=args.dry_run,
)
# Copy metadata from source repo (LICENSE, README, etc.)
copy_source_metadata(
source_repo=args.model,
local_path=local_path,
dry_run=args.dry_run,
)
# Upload
if not args.skip_upload:
upload_to_huggingface(
local_path=local_path,
repo_id=repo_id,
dry_run=args.dry_run,
clean_remote=args.clean_remote,
)
# Clean up if requested
if args.clean:
clean_local_files(local_path, dry_run=args.dry_run)
print("\n" + "=" * 60)
print("All done!")
print("=" * 60)
return 0
if __name__ == "__main__":
sys.exit(main())
| {
"repo_id": "exo-explore/exo",
"file_path": "tmp/quantize_and_upload.py",
"license": "Apache License 2.0",
"lines": 312,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
exo-explore/exo:tests/get_all_models_on_cluster.py | #!/usr/bin/env python3
# pyright: reportAny=false
import json
import subprocess
import sys
from typing import Any, cast
from urllib.request import urlopen
h = sys.argv[1] if len(sys.argv) > 1 else sys.exit(f"USAGE: {sys.argv[0]} host")
ts = subprocess.run(
["tailscale", "status"], check=True, text=True, capture_output=True
).stdout.splitlines()
ip = next(
(sl[0] for line in ts if len(sl := line.split()) >= 2 if sl[1] == h), None
) or sys.exit(f"{h} not found in tailscale")
with urlopen(f"http://{ip}:52415/state", timeout=5) as r:
data = json.loads(r.read()).get("downloads", {})
def mid(x: dict[str, Any]) -> str | None:
for k in (
"DownloadCompleted",
"shardMetadata",
"PipelineShardMetadata",
"modelCard",
"modelId",
):
x = x.get(k, {})
return cast(str | None, x if x != {} else None)
common = set[str].intersection(
*[{m for d in nid if (m := mid(d))} for nid in data.values()]
)
for c in common:
print(c)
| {
"repo_id": "exo-explore/exo",
"file_path": "tests/get_all_models_on_cluster.py",
"license": "Apache License 2.0",
"lines": 31,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
exo-explore/exo:tests/start_distributed_test.py | #!/usr/bin/env python3
import itertools
import json
import subprocess
import sys
from concurrent.futures import ThreadPoolExecutor
from typing import Any, cast
from urllib.request import Request, urlopen
if not (args := sys.argv[1:]):
sys.exit(
f"USAGE: {sys.argv[0]} <kind> [host1] [host2] ...\nkind is optional, and should be jaccl or ring"
)
kind = args[0] if args[0] in ("jaccl", "ring") else "both"
hosts = args[1:] if kind != "both" else args
ts = subprocess.run(
["tailscale", "status"], check=True, text=True, capture_output=True
).stdout.splitlines()
ip = {sl[1]: sl[0] for line in ts if len(sl := line.split()) >= 2}
ips = [ip[h] for h in hosts]
devs = [[h, ip[h]] for h in hosts]
n = len(hosts)
def get_tb(a: str) -> list[dict[str, Any]]:
with urlopen(f"http://{a}:52414/tb_detection", timeout=5) as r: # pyright: ignore[reportAny]
return json.loads(r.read()) # pyright: ignore[reportAny]
def get_models(a: str) -> set[str]:
with urlopen(f"http://{a}:52414/models", timeout=5) as r: # pyright: ignore[reportAny]
return set(json.loads(r.read())) # pyright: ignore[reportAny]
def run(h: str, a: str, body: bytes) -> None:
with urlopen(
Request(
f"http://{a}:52414/run_test",
data=body,
method="POST",
headers={"Content-Type": "application/json"},
),
timeout=300,
) as r: # pyright: ignore[reportAny]
for line in r.read().decode(errors="replace").splitlines(): # pyright: ignore[reportAny]
print(f"\n{h}@{a}: {line}", flush=True)
with ThreadPoolExecutor(n) as exctr:
if kind in ("jaccl", "both"):
payloads = list(exctr.map(get_tb, ips))
u2e = {
ident["domainUuid"]: (i, ident["rdmaInterface"])
for i, p in enumerate(payloads)
for d in p
for ident in cast(
list[dict[str, str]],
d.get("MacThunderboltIdentifiers", {}).get("idents", []), # pyright: ignore[reportAny]
)
}
edges = {
(u2e[s][0], u2e[t][0]): u2e[t][1]
for p in payloads
for d in p
for c in d.get("MacThunderboltConnections", {}).get("conns", []) # pyright: ignore[reportAny]
if (s := c["sourceUuid"]) in u2e and (t := c["sinkUuid"]) in u2e # pyright: ignore[reportAny]
}
ibv_devs = [[edges.get((i, j)) for j in range(n)] for i in range(n)]
else:
ibv_devs = None
models = set[str].intersection(*exctr.map(get_models, ips))
print("\n")
print("=" * 70)
print(f"Starting test with {models}")
print("=" * 70)
print("\n")
for model in models:
body = json.dumps(
{"devs": devs, "model_id": model, "ibv_devs": ibv_devs, "kind": kind}
).encode()
list(exctr.map(run, hosts, ips, itertools.repeat(body)))
| {
"repo_id": "exo-explore/exo",
"file_path": "tests/start_distributed_test.py",
"license": "Apache License 2.0",
"lines": 72,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
exo-explore/exo:src/exo/shared/tracing.py | from __future__ import annotations
import json
import time
from collections import defaultdict
from collections.abc import Generator
from contextlib import contextmanager
from contextvars import ContextVar
from dataclasses import dataclass, field
from pathlib import Path
from typing import cast, final
from exo.shared.constants import EXO_TRACING_ENABLED
from exo.worker.runner.bootstrap import logger
# Context variable to track the current trace category for hierarchical nesting
_current_category: ContextVar[str | None] = ContextVar("current_category", default=None)
@final
@dataclass(frozen=True)
class TraceEvent:
name: str
start_us: int
duration_us: int
rank: int
category: str
@final
@dataclass
class CategoryStats:
total_us: int = 0
count: int = 0
min_us: int = 0
max_us: int = 0
def add(self, duration_us: int) -> None:
if self.count == 0:
self.min_us = duration_us
self.max_us = duration_us
else:
self.min_us = min(self.min_us, duration_us)
self.max_us = max(self.max_us, duration_us)
self.total_us += duration_us
self.count += 1
@property
def avg_us(self) -> float:
return self.total_us / self.count if self.count > 0 else 0.0
@final
@dataclass
class TraceStats:
total_wall_time_us: int = 0
by_category: dict[str, CategoryStats] = field(default_factory=dict)
by_rank: dict[int, dict[str, CategoryStats]] = field(default_factory=dict)
# Global trace buffer - each rank accumulates traces here
_trace_buffer: list[TraceEvent] = []
def _record_span(
name: str, start_us: int, duration_us: int, rank: int, category: str
) -> None:
_trace_buffer.append(
TraceEvent(
name=name,
start_us=start_us,
duration_us=duration_us,
rank=rank,
category=category,
)
)
@contextmanager
def trace(
name: str,
rank: int,
category: str = "compute",
) -> Generator[None, None, None]:
"""Context manager to trace any operation.
Nested traces automatically inherit the parent category, creating hierarchical
categories like "sync/compute" or "async/comms".
Args:
name: Name of the operation (e.g., "recv 0", "send 1", "joint_blocks")
rank: This rank's ID
category: Category for grouping in trace viewer ("comm", "compute", "step")
Example:
with trace(f"sync {t}", rank, "sync"):
with trace("joint_blocks", rank, "compute"):
# Recorded with category "sync/compute"
hidden_states = some_computation(...)
"""
if not EXO_TRACING_ENABLED:
yield
return
# Combine with parent category if nested
parent = _current_category.get()
full_category = f"{parent}/{category}" if parent else category
# Set as current for nested traces
token = _current_category.set(full_category)
try:
start_us = int(time.time() * 1_000_000)
start_perf = time.perf_counter()
yield
duration_us = int((time.perf_counter() - start_perf) * 1_000_000)
_record_span(name, start_us, duration_us, rank, full_category)
finally:
_current_category.reset(token)
def get_trace_buffer() -> list[TraceEvent]:
return list(_trace_buffer)
def clear_trace_buffer() -> None:
_trace_buffer.clear()
def export_trace(traces: list[TraceEvent], output_path: Path) -> None:
trace_events: list[dict[str, object]] = []
for event in traces:
# Chrome trace format uses "X" for complete events (with duration)
chrome_event: dict[str, object] = {
"name": event.name,
"cat": event.category,
"ph": "X",
"ts": event.start_us,
"dur": event.duration_us,
"pid": 0,
"tid": event.rank,
"args": {"rank": event.rank},
}
trace_events.append(chrome_event)
ranks_seen = set(t.rank for t in traces)
for rank in ranks_seen:
trace_events.append(
{
"name": "thread_name",
"ph": "M", # Metadata event
"pid": 0,
"tid": rank,
"args": {"name": f"Rank {rank}"},
}
)
chrome_trace = {"traceEvents": trace_events}
try:
output_path.parent.mkdir(parents=True, exist_ok=True)
with open(output_path, "w") as f:
json.dump(chrome_trace, f, indent=2)
except OSError as e:
logger.warning("Failed to export trace to %s: %s", output_path, e)
def load_trace_file(path: Path) -> list[TraceEvent]:
with open(path) as f:
data = cast(dict[str, list[dict[str, object]]], json.load(f))
events = data.get("traceEvents", [])
traces: list[TraceEvent] = []
for event in events:
# Skip metadata events
if event.get("ph") == "M":
continue
name = str(event.get("name", ""))
category = str(event.get("cat", ""))
ts_value = event.get("ts", 0)
dur_value = event.get("dur", 0)
tid_value = event.get("tid", 0)
start_us = int(ts_value) if isinstance(ts_value, (int, float, str)) else 0
duration_us = int(dur_value) if isinstance(dur_value, (int, float, str)) else 0
# Get rank from tid or args
rank = int(tid_value) if isinstance(tid_value, (int, float, str)) else 0
args = event.get("args")
if isinstance(args, dict):
args_dict = cast(dict[str, object], args)
rank_from_args = args_dict.get("rank")
if isinstance(rank_from_args, (int, float, str)):
rank = int(rank_from_args)
traces.append(
TraceEvent(
name=name,
start_us=start_us,
duration_us=duration_us,
rank=rank,
category=category,
)
)
return traces
def compute_stats(traces: list[TraceEvent]) -> TraceStats:
stats = TraceStats()
if not traces:
return stats
# Calculate wall time from earliest start to latest end
min_start = min(t.start_us for t in traces)
max_end = max(t.start_us + t.duration_us for t in traces)
stats.total_wall_time_us = max_end - min_start
# Initialize nested dicts
by_category: dict[str, CategoryStats] = defaultdict(CategoryStats)
by_rank: dict[int, dict[str, CategoryStats]] = defaultdict(
lambda: defaultdict(CategoryStats)
)
for event in traces:
# By category
by_category[event.category].add(event.duration_us)
# By rank and category
by_rank[event.rank][event.category].add(event.duration_us)
stats.by_category = dict(by_category)
stats.by_rank = {k: dict(v) for k, v in by_rank.items()}
return stats
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/shared/tracing.py",
"license": "Apache License 2.0",
"lines": 188,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
exo-explore/exo:src/exo/master/adapters/chat_completions.py | """OpenAI Chat Completions API adapter for converting requests/responses."""
import time
from collections.abc import AsyncGenerator
from typing import Any
from exo.shared.types.api import (
ChatCompletionChoice,
ChatCompletionMessage,
ChatCompletionMessageText,
ChatCompletionRequest,
ChatCompletionResponse,
ErrorInfo,
ErrorResponse,
FinishReason,
Logprobs,
LogprobsContentItem,
StreamingChoiceResponse,
ToolCall,
Usage,
)
from exo.shared.types.chunks import (
ErrorChunk,
PrefillProgressChunk,
TokenChunk,
ToolCallChunk,
)
from exo.shared.types.common import CommandId
from exo.shared.types.text_generation import InputMessage, TextGenerationTaskParams
def chat_request_to_text_generation(
request: ChatCompletionRequest,
) -> TextGenerationTaskParams:
instructions: str | None = None
input_messages: list[InputMessage] = []
chat_template_messages: list[dict[str, Any]] = []
for msg in request.messages:
# Normalize content to string
content: str
if msg.content is None:
content = ""
elif isinstance(msg.content, str):
content = msg.content
elif isinstance(msg.content, ChatCompletionMessageText):
content = msg.content.text
else:
# List of ChatCompletionMessageText
content = "\n".join(item.text for item in msg.content)
# Extract system message as instructions
if msg.role == "system":
if instructions is None:
instructions = content
else:
# Append additional system messages
instructions = f"{instructions}\n{content}"
chat_template_messages.append({"role": "system", "content": content})
else:
# Skip messages with no meaningful content
if (
msg.content is None
and msg.reasoning_content is None
and msg.tool_calls is None
):
continue
if msg.role in ("user", "assistant", "developer"):
input_messages.append(InputMessage(role=msg.role, content=content))
# Build full message dict for chat template (preserves tool_calls etc.)
# Normalize content for model_dump
msg_copy = msg.model_copy(update={"content": content})
dumped: dict[str, Any] = msg_copy.model_dump(exclude_none=True)
chat_template_messages.append(dumped)
return TextGenerationTaskParams(
model=request.model,
input=input_messages
if input_messages
else [InputMessage(role="user", content="")],
instructions=instructions,
max_output_tokens=request.max_tokens,
temperature=request.temperature,
top_p=request.top_p,
top_k=request.top_k,
stop=request.stop,
seed=request.seed,
stream=request.stream,
tools=request.tools,
enable_thinking=request.enable_thinking,
chat_template_messages=chat_template_messages
if chat_template_messages
else None,
logprobs=request.logprobs or False,
top_logprobs=request.top_logprobs,
)
def chunk_to_response(
chunk: TokenChunk, command_id: CommandId
) -> ChatCompletionResponse:
"""Convert a TokenChunk to a streaming ChatCompletionResponse."""
# Build logprobs if available
logprobs: Logprobs | None = None
if chunk.logprob is not None:
logprobs = Logprobs(
content=[
LogprobsContentItem(
token=chunk.text,
logprob=chunk.logprob,
top_logprobs=chunk.top_logprobs or [],
)
]
)
if chunk.is_thinking:
delta = ChatCompletionMessage(role="assistant", reasoning_content=chunk.text)
else:
delta = ChatCompletionMessage(role="assistant", content=chunk.text)
return ChatCompletionResponse(
id=command_id,
created=int(time.time()),
model=chunk.model,
choices=[
StreamingChoiceResponse(
index=0,
delta=delta,
logprobs=logprobs,
finish_reason=chunk.finish_reason,
)
],
)
async def generate_chat_stream(
command_id: CommandId,
chunk_stream: AsyncGenerator[
PrefillProgressChunk | ErrorChunk | ToolCallChunk | TokenChunk, None
],
) -> AsyncGenerator[str, None]:
"""Generate Chat Completions API streaming events from chunks."""
last_usage: Usage | None = None
async for chunk in chunk_stream:
match chunk:
case PrefillProgressChunk():
# Use SSE comment so third-party clients ignore it
yield f": prefill_progress {chunk.model_dump_json()}\n\n"
case ErrorChunk():
error_response = ErrorResponse(
error=ErrorInfo(
message=chunk.error_message or "Internal server error",
type="InternalServerError",
code=500,
)
)
yield f"data: {error_response.model_dump_json()}\n\n"
yield "data: [DONE]\n\n"
return
case ToolCallChunk():
last_usage = chunk.usage or last_usage
tool_call_deltas = [
ToolCall(
id=tool.id,
index=i,
function=tool,
)
for i, tool in enumerate(chunk.tool_calls)
]
tool_response = ChatCompletionResponse(
id=command_id,
created=int(time.time()),
model=chunk.model,
choices=[
StreamingChoiceResponse(
index=0,
delta=ChatCompletionMessage(
role="assistant",
tool_calls=tool_call_deltas,
),
finish_reason="tool_calls",
)
],
usage=last_usage,
)
yield f"data: {tool_response.model_dump_json()}\n\n"
yield "data: [DONE]\n\n"
return
case TokenChunk():
last_usage = chunk.usage or last_usage
chunk_response = chunk_to_response(chunk, command_id)
if chunk.finish_reason is not None:
chunk_response = chunk_response.model_copy(
update={"usage": last_usage}
)
yield f"data: {chunk_response.model_dump_json()}\n\n"
if chunk.finish_reason is not None:
yield "data: [DONE]\n\n"
async def collect_chat_response(
command_id: CommandId,
chunk_stream: AsyncGenerator[
ErrorChunk | ToolCallChunk | TokenChunk | PrefillProgressChunk, None
],
) -> AsyncGenerator[str]:
# This is an AsyncGenerator[str] rather than returning a ChatCompletionReponse because
# FastAPI handles the cancellation better but wouldn't auto-serialize for some reason
"""Collect all token chunks and return a single ChatCompletionResponse."""
text_parts: list[str] = []
thinking_parts: list[str] = []
tool_calls: list[ToolCall] = []
logprobs_content: list[LogprobsContentItem] = []
model: str | None = None
finish_reason: FinishReason | None = None
error_message: str | None = None
last_usage: Usage | None = None
async for chunk in chunk_stream:
match chunk:
case PrefillProgressChunk():
continue
case ErrorChunk():
error_message = chunk.error_message or "Internal server error"
break
case TokenChunk():
if model is None:
model = chunk.model
last_usage = chunk.usage or last_usage
if chunk.is_thinking:
thinking_parts.append(chunk.text)
else:
text_parts.append(chunk.text)
if chunk.logprob is not None:
logprobs_content.append(
LogprobsContentItem(
token=chunk.text,
logprob=chunk.logprob,
top_logprobs=chunk.top_logprobs or [],
)
)
if chunk.finish_reason is not None:
finish_reason = chunk.finish_reason
case ToolCallChunk():
if model is None:
model = chunk.model
last_usage = chunk.usage or last_usage
tool_calls.extend(
ToolCall(
id=tool.id,
index=i,
function=tool,
)
for i, tool in enumerate(chunk.tool_calls)
)
finish_reason = chunk.finish_reason
if error_message is not None:
raise ValueError(error_message)
combined_text = "".join(text_parts)
combined_thinking = "".join(thinking_parts) if thinking_parts else None
assert model is not None
yield ChatCompletionResponse(
id=command_id,
created=int(time.time()),
model=model,
choices=[
ChatCompletionChoice(
index=0,
message=ChatCompletionMessage(
role="assistant",
content=combined_text,
reasoning_content=combined_thinking,
tool_calls=tool_calls if tool_calls else None,
),
logprobs=Logprobs(content=logprobs_content)
if logprobs_content
else None,
finish_reason=finish_reason,
)
],
usage=last_usage,
).model_dump_json()
return
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/master/adapters/chat_completions.py",
"license": "Apache License 2.0",
"lines": 267,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
exo-explore/exo:src/exo/master/adapters/claude.py | """Claude Messages API adapter for converting requests/responses."""
import json
import re
from collections.abc import AsyncGenerator
from typing import Any
from exo.shared.types.api import FinishReason, Usage
from exo.shared.types.chunks import (
ErrorChunk,
PrefillProgressChunk,
TokenChunk,
ToolCallChunk,
)
from exo.shared.types.claude_api import (
ClaudeContentBlock,
ClaudeContentBlockDeltaEvent,
ClaudeContentBlockStartEvent,
ClaudeContentBlockStopEvent,
ClaudeInputJsonDelta,
ClaudeMessageDelta,
ClaudeMessageDeltaEvent,
ClaudeMessageDeltaUsage,
ClaudeMessagesRequest,
ClaudeMessagesResponse,
ClaudeMessageStart,
ClaudeMessageStartEvent,
ClaudeMessageStopEvent,
ClaudeStopReason,
ClaudeTextBlock,
ClaudeTextDelta,
ClaudeThinkingBlock,
ClaudeThinkingDelta,
ClaudeToolResultBlock,
ClaudeToolUseBlock,
ClaudeUsage,
)
from exo.shared.types.common import CommandId
from exo.shared.types.text_generation import InputMessage, TextGenerationTaskParams
def finish_reason_to_claude_stop_reason(
finish_reason: FinishReason | None,
) -> ClaudeStopReason | None:
"""Map OpenAI finish_reason to Claude stop_reason."""
if finish_reason is None:
return None
mapping: dict[FinishReason, ClaudeStopReason] = {
"stop": "end_turn",
"length": "max_tokens",
"tool_calls": "tool_use",
"content_filter": "end_turn",
"function_call": "tool_use",
}
return mapping.get(finish_reason, "end_turn")
def _extract_tool_result_text(block: ClaudeToolResultBlock) -> str:
"""Extract plain text from a tool_result content field."""
if block.content is None:
return ""
if isinstance(block.content, str):
return block.content
return "".join(sub_block.text for sub_block in block.content)
# Matches "x-anthropic-billing-header: ...;" (with optional trailing newline)
# or similar telemetry headers that change every request and break KV prefix caching.
_VOLATILE_HEADER_RE = re.compile(r"^x-anthropic-[^\n]*;\n?", re.MULTILINE)
def _strip_volatile_headers(text: str) -> str:
"""Remove Anthropic billing/telemetry headers from system prompt text.
Claude Code prepends headers like 'x-anthropic-billing-header: cc_version=...;
cc_entrypoint=...; cch=...;' that contain per-request content hashes. These
change every request and break KV prefix caching (the prefix diverges at ~20
tokens instead of matching thousands of conversation tokens).
"""
return _VOLATILE_HEADER_RE.sub("", text)
def claude_request_to_text_generation(
request: ClaudeMessagesRequest,
) -> TextGenerationTaskParams:
# Handle system message
instructions: str | None = None
chat_template_messages: list[dict[str, Any]] = []
if request.system:
if isinstance(request.system, str):
instructions = request.system
else:
instructions = "".join(block.text for block in request.system)
instructions = _strip_volatile_headers(instructions)
chat_template_messages.append({"role": "system", "content": instructions})
# Convert messages to input
input_messages: list[InputMessage] = []
for msg in request.messages:
if isinstance(msg.content, str):
input_messages.append(InputMessage(role=msg.role, content=msg.content))
chat_template_messages.append({"role": msg.role, "content": msg.content})
continue
# Process structured content blocks
text_parts: list[str] = []
thinking_parts: list[str] = []
tool_calls: list[dict[str, Any]] = []
tool_results: list[ClaudeToolResultBlock] = []
for block in msg.content:
if isinstance(block, ClaudeTextBlock):
text_parts.append(block.text)
elif isinstance(block, ClaudeThinkingBlock):
thinking_parts.append(block.thinking)
elif isinstance(block, ClaudeToolUseBlock):
tool_calls.append(
{
"id": block.id,
"type": "function",
"function": {
"name": block.name,
"arguments": json.dumps(block.input),
},
}
)
elif isinstance(block, ClaudeToolResultBlock):
tool_results.append(block)
content = "".join(text_parts)
reasoning_content = "".join(thinking_parts) if thinking_parts else None
# Build InputMessage from text content
if msg.role in ("user", "assistant"):
input_messages.append(InputMessage(role=msg.role, content=content))
# Build chat_template_messages preserving tool structure
if tool_calls:
chat_msg: dict[str, Any] = {
"role": "assistant",
"content": content,
"tool_calls": tool_calls,
}
if reasoning_content:
chat_msg["reasoning_content"] = reasoning_content
chat_template_messages.append(chat_msg)
elif tool_results:
for tr in tool_results:
chat_template_messages.append(
{
"role": "tool",
"tool_call_id": tr.tool_use_id,
"content": _extract_tool_result_text(tr),
}
)
else:
chat_msg = {"role": msg.role, "content": content}
if reasoning_content:
chat_msg["reasoning_content"] = reasoning_content
chat_template_messages.append(chat_msg)
# Convert Claude tool definitions to OpenAI-style function tools
tools: list[dict[str, Any]] | None = None
if request.tools:
tools = [
{
"type": "function",
"function": {
"name": tool.name,
"description": tool.description or "",
"parameters": tool.input_schema,
},
}
for tool in request.tools
]
enable_thinking: bool | None = None
if request.thinking is not None:
enable_thinking = request.thinking.type in ("enabled", "adaptive")
return TextGenerationTaskParams(
model=request.model,
input=input_messages
if input_messages
else [InputMessage(role="user", content="")],
instructions=instructions,
max_output_tokens=request.max_tokens,
temperature=request.temperature,
top_p=request.top_p,
top_k=request.top_k,
stop=request.stop_sequences,
stream=request.stream,
tools=tools,
enable_thinking=enable_thinking,
chat_template_messages=chat_template_messages
if chat_template_messages
else None,
)
async def collect_claude_response(
command_id: CommandId,
model: str,
chunk_stream: AsyncGenerator[
ErrorChunk | ToolCallChunk | TokenChunk | PrefillProgressChunk, None
],
) -> AsyncGenerator[str]:
# This is an AsyncGenerator[str] rather than returning a ChatCompletionReponse because
# FastAPI handles the cancellation better but wouldn't auto-serialize for some reason
"""Collect all token chunks and return a single ClaudeMessagesResponse."""
text_parts: list[str] = []
thinking_parts: list[str] = []
tool_use_blocks: list[ClaudeToolUseBlock] = []
stop_reason: ClaudeStopReason | None = None
last_usage: Usage | None = None
error_message: str | None = None
async for chunk in chunk_stream:
if isinstance(chunk, PrefillProgressChunk):
continue
if isinstance(chunk, ErrorChunk):
error_message = chunk.error_message or "Internal server error"
break
last_usage = chunk.usage or last_usage
if isinstance(chunk, ToolCallChunk):
for tool in chunk.tool_calls:
tool_use_blocks.append(
ClaudeToolUseBlock(
id=f"toolu_{tool.id}",
name=tool.name,
input=json.loads(tool.arguments), # pyright: ignore[reportAny]
)
)
stop_reason = "tool_use"
continue
if chunk.is_thinking:
thinking_parts.append(chunk.text)
else:
text_parts.append(chunk.text)
if chunk.finish_reason is not None:
stop_reason = finish_reason_to_claude_stop_reason(chunk.finish_reason)
if error_message is not None:
raise ValueError(error_message)
combined_text = "".join(text_parts)
combined_thinking = "".join(thinking_parts)
# Build content blocks
content: list[ClaudeContentBlock] = []
if combined_thinking:
content.append(ClaudeThinkingBlock(thinking=combined_thinking))
if combined_text:
content.append(ClaudeTextBlock(text=combined_text))
content.extend(tool_use_blocks)
# If no content at all, include empty text block
if not content:
content.append(ClaudeTextBlock(text=""))
# Use actual usage data if available
input_tokens = last_usage.prompt_tokens if last_usage else 0
output_tokens = last_usage.completion_tokens if last_usage else 0
yield ClaudeMessagesResponse(
id=f"msg_{command_id}",
model=model,
content=content,
stop_reason=stop_reason,
usage=ClaudeUsage(
input_tokens=input_tokens,
output_tokens=output_tokens,
),
).model_dump_json()
return
async def generate_claude_stream(
command_id: CommandId,
model: str,
chunk_stream: AsyncGenerator[
ErrorChunk | ToolCallChunk | TokenChunk | PrefillProgressChunk, None
],
) -> AsyncGenerator[str, None]:
"""Generate Claude Messages API streaming events from TokenChunks."""
# Initial message_start event
initial_message = ClaudeMessageStart(
id=f"msg_{command_id}",
model=model,
content=[],
stop_reason=None,
usage=ClaudeUsage(input_tokens=0, output_tokens=0),
)
start_event = ClaudeMessageStartEvent(message=initial_message)
yield f"event: message_start\ndata: {start_event.model_dump_json()}\n\n"
output_tokens = 0
stop_reason: ClaudeStopReason | None = None
last_usage: Usage | None = None
next_block_index = 0
# Track whether we've started thinking/text blocks
thinking_block_started = False
thinking_block_index = -1
text_block_started = False
text_block_index = -1
async for chunk in chunk_stream:
if isinstance(chunk, PrefillProgressChunk):
continue
if isinstance(chunk, ErrorChunk):
# Close text block and bail
break
last_usage = chunk.usage or last_usage
if isinstance(chunk, ToolCallChunk):
stop_reason = "tool_use"
# Emit tool_use content blocks
for tool in chunk.tool_calls:
tool_id = f"toolu_{tool.id}"
tool_input_json = tool.arguments
# content_block_start for tool_use
tool_block_start = ClaudeContentBlockStartEvent(
index=next_block_index,
content_block=ClaudeToolUseBlock(
id=tool_id, name=tool.name, input={}
),
)
yield f"event: content_block_start\ndata: {tool_block_start.model_dump_json()}\n\n"
# content_block_delta with input_json_delta
tool_delta_event = ClaudeContentBlockDeltaEvent(
index=next_block_index,
delta=ClaudeInputJsonDelta(partial_json=tool_input_json),
)
yield f"event: content_block_delta\ndata: {tool_delta_event.model_dump_json()}\n\n"
# content_block_stop
tool_block_stop = ClaudeContentBlockStopEvent(index=next_block_index)
yield f"event: content_block_stop\ndata: {tool_block_stop.model_dump_json()}\n\n"
next_block_index += 1
continue
output_tokens += 1 # Count each chunk as one token
if chunk.is_thinking:
# Start thinking block on first thinking token
if not thinking_block_started:
thinking_block_started = True
thinking_block_index = next_block_index
next_block_index += 1
block_start = ClaudeContentBlockStartEvent(
index=thinking_block_index,
content_block=ClaudeThinkingBlock(thinking=""),
)
yield f"event: content_block_start\ndata: {block_start.model_dump_json()}\n\n"
delta_event = ClaudeContentBlockDeltaEvent(
index=thinking_block_index,
delta=ClaudeThinkingDelta(thinking=chunk.text),
)
yield f"event: content_block_delta\ndata: {delta_event.model_dump_json()}\n\n"
else:
# Close thinking block when transitioning to text
if thinking_block_started and text_block_index == -1:
block_stop = ClaudeContentBlockStopEvent(index=thinking_block_index)
yield f"event: content_block_stop\ndata: {block_stop.model_dump_json()}\n\n"
# Start text block on first text token
if not text_block_started:
text_block_started = True
text_block_index = next_block_index
next_block_index += 1
block_start = ClaudeContentBlockStartEvent(
index=text_block_index,
content_block=ClaudeTextBlock(text=""),
)
yield f"event: content_block_start\ndata: {block_start.model_dump_json()}\n\n"
delta_event = ClaudeContentBlockDeltaEvent(
index=text_block_index,
delta=ClaudeTextDelta(text=chunk.text),
)
yield f"event: content_block_delta\ndata: {delta_event.model_dump_json()}\n\n"
if chunk.finish_reason is not None:
stop_reason = finish_reason_to_claude_stop_reason(chunk.finish_reason)
# Use actual token count from usage if available
if last_usage is not None:
output_tokens = last_usage.completion_tokens
# Close any open blocks
if thinking_block_started and text_block_index == -1:
block_stop = ClaudeContentBlockStopEvent(index=thinking_block_index)
yield f"event: content_block_stop\ndata: {block_stop.model_dump_json()}\n\n"
if text_block_started:
block_stop = ClaudeContentBlockStopEvent(index=text_block_index)
yield f"event: content_block_stop\ndata: {block_stop.model_dump_json()}\n\n"
if not thinking_block_started and not text_block_started:
empty_start = ClaudeContentBlockStartEvent(
index=0, content_block=ClaudeTextBlock(text="")
)
yield f"event: content_block_start\ndata: {empty_start.model_dump_json()}\n\n"
empty_stop = ClaudeContentBlockStopEvent(index=0)
yield f"event: content_block_stop\ndata: {empty_stop.model_dump_json()}\n\n"
# message_delta
message_delta = ClaudeMessageDeltaEvent(
delta=ClaudeMessageDelta(stop_reason=stop_reason),
usage=ClaudeMessageDeltaUsage(output_tokens=output_tokens),
)
yield f"event: message_delta\ndata: {message_delta.model_dump_json()}\n\n"
# message_stop
message_stop = ClaudeMessageStopEvent()
yield f"event: message_stop\ndata: {message_stop.model_dump_json()}\n\n"
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/master/adapters/claude.py",
"license": "Apache License 2.0",
"lines": 368,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
exo-explore/exo:src/exo/master/adapters/responses.py | """OpenAI Responses API adapter for converting requests/responses."""
from collections.abc import AsyncGenerator
from itertools import count
from typing import Any
from exo.shared.types.api import Usage
from exo.shared.types.chunks import (
ErrorChunk,
PrefillProgressChunk,
TokenChunk,
ToolCallChunk,
)
from exo.shared.types.common import CommandId
from exo.shared.types.openai_responses import (
FunctionCallInputItem,
ResponseCompletedEvent,
ResponseContentPart,
ResponseContentPartAddedEvent,
ResponseContentPartDoneEvent,
ResponseCreatedEvent,
ResponseFunctionCallArgumentsDeltaEvent,
ResponseFunctionCallArgumentsDoneEvent,
ResponseFunctionCallItem,
ResponseInProgressEvent,
ResponseInputMessage,
ResponseItem,
ResponseMessageItem,
ResponseOutputItemAddedEvent,
ResponseOutputItemDoneEvent,
ResponseOutputText,
ResponseReasoningItem,
ResponseReasoningSummaryPartAddedEvent,
ResponseReasoningSummaryPartDoneEvent,
ResponseReasoningSummaryText,
ResponseReasoningSummaryTextDeltaEvent,
ResponseReasoningSummaryTextDoneEvent,
ResponsesRequest,
ResponsesResponse,
ResponsesStreamEvent,
ResponseTextDeltaEvent,
ResponseTextDoneEvent,
ResponseUsage,
)
from exo.shared.types.text_generation import InputMessage, TextGenerationTaskParams
def _format_sse(event: ResponsesStreamEvent) -> str:
"""Format a streaming event as an SSE message."""
return f"event: {event.type}\ndata: {event.model_dump_json()}\n\n"
def _extract_content(content: str | list[ResponseContentPart]) -> str:
"""Extract plain text from a content field that may be a string or list of parts."""
if isinstance(content, str):
return content
return "".join(part.text for part in content)
def responses_request_to_text_generation(
request: ResponsesRequest,
) -> TextGenerationTaskParams:
input_value: list[InputMessage]
built_chat_template: list[dict[str, Any]] | None = None
if isinstance(request.input, str):
input_value = [InputMessage(role="user", content=request.input)]
else:
input_messages: list[InputMessage] = []
chat_template_messages: list[dict[str, Any]] = []
if request.instructions is not None:
chat_template_messages.append(
{"role": "system", "content": request.instructions}
)
for item in request.input:
if isinstance(item, ResponseInputMessage):
content = _extract_content(item.content)
if item.role in ("user", "assistant", "developer"):
input_messages.append(InputMessage(role=item.role, content=content))
if item.role == "system":
chat_template_messages.append(
{"role": "system", "content": content}
)
else:
chat_template_messages.append(
{"role": item.role, "content": content}
)
elif isinstance(item, FunctionCallInputItem):
chat_template_messages.append(
{
"role": "assistant",
"content": "",
"tool_calls": [
{
"id": item.call_id,
"type": "function",
"function": {
"name": item.name,
"arguments": item.arguments,
},
}
],
}
)
else:
chat_template_messages.append(
{
"role": "tool",
"tool_call_id": item.call_id,
"content": item.output,
}
)
input_value = (
input_messages
if input_messages
else [InputMessage(role="user", content="")]
)
built_chat_template = chat_template_messages if chat_template_messages else None
return TextGenerationTaskParams(
model=request.model,
input=input_value,
instructions=request.instructions,
max_output_tokens=request.max_output_tokens,
temperature=request.temperature,
top_p=request.top_p,
stream=request.stream,
tools=request.tools,
top_k=request.top_k,
stop=request.stop,
seed=request.seed,
chat_template_messages=built_chat_template or request.chat_template_messages,
)
async def collect_responses_response(
command_id: CommandId,
model: str,
chunk_stream: AsyncGenerator[
ErrorChunk | ToolCallChunk | TokenChunk | PrefillProgressChunk, None
],
) -> AsyncGenerator[str]:
# This is an AsyncGenerator[str] rather than returning a ChatCompletionReponse because
# FastAPI handles the cancellation better but wouldn't auto-serialize for some reason
"""Collect all token chunks and return a single ResponsesResponse."""
response_id = f"resp_{command_id}"
item_id = f"item_{command_id}"
reasoning_id = f"rs_{command_id}"
accumulated_text = ""
thinking_parts: list[str] = []
function_call_items: list[ResponseFunctionCallItem] = []
last_usage: Usage | None = None
error_message: str | None = None
async for chunk in chunk_stream:
if isinstance(chunk, PrefillProgressChunk):
continue
if isinstance(chunk, ErrorChunk):
error_message = chunk.error_message or "Internal server error"
break
last_usage = chunk.usage or last_usage
if isinstance(chunk, ToolCallChunk):
for tool in chunk.tool_calls:
function_call_items.append(
ResponseFunctionCallItem(
id=tool.id,
call_id=tool.id,
name=tool.name,
arguments=tool.arguments,
)
)
continue
if chunk.is_thinking:
thinking_parts.append(chunk.text)
continue
accumulated_text += chunk.text
if error_message is not None:
raise ValueError(error_message)
# Create usage from usage data if available
usage = None
if last_usage is not None:
usage = ResponseUsage(
input_tokens=last_usage.prompt_tokens,
output_tokens=last_usage.completion_tokens,
total_tokens=last_usage.total_tokens,
)
output: list[ResponseItem] = []
if thinking_parts:
output.append(
ResponseReasoningItem(
id=reasoning_id,
summary=[ResponseReasoningSummaryText(text="".join(thinking_parts))],
)
)
output.append(
ResponseMessageItem(
id=item_id,
content=[ResponseOutputText(text=accumulated_text)],
status="completed",
)
)
output.extend(function_call_items)
yield ResponsesResponse(
id=response_id,
model=model,
status="completed",
output=output,
output_text=accumulated_text,
usage=usage,
).model_dump_json()
return
async def generate_responses_stream(
command_id: CommandId,
model: str,
chunk_stream: AsyncGenerator[
ErrorChunk | ToolCallChunk | TokenChunk | PrefillProgressChunk, None
],
) -> AsyncGenerator[str, None]:
"""Generate OpenAI Responses API streaming events from TokenChunks."""
response_id = f"resp_{command_id}"
item_id = f"item_{command_id}"
reasoning_id = f"rs_{command_id}"
seq = count(1)
# response.created
initial_response = ResponsesResponse(
id=response_id,
model=model,
status="in_progress",
output=[],
output_text="",
)
created_event = ResponseCreatedEvent(
sequence_number=next(seq), response=initial_response
)
yield _format_sse(created_event)
# response.in_progress
in_progress_event = ResponseInProgressEvent(
sequence_number=next(seq), response=initial_response
)
yield _format_sse(in_progress_event)
accumulated_text = ""
accumulated_thinking = ""
function_call_items: list[ResponseFunctionCallItem] = []
last_usage: Usage | None = None
next_output_index = 0
# Track dynamic block creation
reasoning_started = False
reasoning_output_index = -1
message_started = False
message_output_index = -1
async for chunk in chunk_stream:
if isinstance(chunk, PrefillProgressChunk):
continue
if isinstance(chunk, ErrorChunk):
break
last_usage = chunk.usage or last_usage
if isinstance(chunk, ToolCallChunk):
for tool in chunk.tool_calls:
fc_id = f"fc_{tool.id}"
call_id = f"call_{tool.id}"
# response.output_item.added for function_call
fc_item = ResponseFunctionCallItem(
id=fc_id,
call_id=call_id,
name=tool.name,
arguments="",
status="in_progress",
)
fc_added = ResponseOutputItemAddedEvent(
sequence_number=next(seq),
output_index=next_output_index,
item=fc_item,
)
yield _format_sse(fc_added)
# response.function_call_arguments.delta
args_delta = ResponseFunctionCallArgumentsDeltaEvent(
sequence_number=next(seq),
item_id=fc_id,
output_index=next_output_index,
delta=tool.arguments,
)
yield _format_sse(args_delta)
# response.function_call_arguments.done
args_done = ResponseFunctionCallArgumentsDoneEvent(
sequence_number=next(seq),
item_id=fc_id,
output_index=next_output_index,
name=tool.name,
arguments=tool.arguments,
)
yield _format_sse(args_done)
# response.output_item.done
fc_done_item = ResponseFunctionCallItem(
id=fc_id,
call_id=call_id,
name=tool.name,
arguments=tool.arguments,
status="completed",
)
fc_item_done = ResponseOutputItemDoneEvent(
sequence_number=next(seq),
output_index=next_output_index,
item=fc_done_item,
)
yield _format_sse(fc_item_done)
function_call_items.append(fc_done_item)
next_output_index += 1
continue
if chunk.is_thinking:
# Start reasoning block on first thinking token
if not reasoning_started:
reasoning_started = True
reasoning_output_index = next_output_index
next_output_index += 1
# response.output_item.added for reasoning
reasoning_item = ResponseReasoningItem(
id=reasoning_id,
summary=[],
status="in_progress",
)
rs_added = ResponseOutputItemAddedEvent(
sequence_number=next(seq),
output_index=reasoning_output_index,
item=reasoning_item,
)
yield _format_sse(rs_added)
# response.reasoning_summary_part.added
part_added = ResponseReasoningSummaryPartAddedEvent(
sequence_number=next(seq),
item_id=reasoning_id,
output_index=reasoning_output_index,
summary_index=0,
part=ResponseReasoningSummaryText(text=""),
)
yield _format_sse(part_added)
accumulated_thinking += chunk.text
# response.reasoning_summary_text.delta
rs_delta = ResponseReasoningSummaryTextDeltaEvent(
sequence_number=next(seq),
item_id=reasoning_id,
output_index=reasoning_output_index,
summary_index=0,
delta=chunk.text,
)
yield _format_sse(rs_delta)
continue
# Close reasoning block when transitioning to text
if reasoning_started and not message_started:
# response.reasoning_summary_text.done
rs_text_done = ResponseReasoningSummaryTextDoneEvent(
sequence_number=next(seq),
item_id=reasoning_id,
output_index=reasoning_output_index,
summary_index=0,
text=accumulated_thinking,
)
yield _format_sse(rs_text_done)
# response.reasoning_summary_part.done
rs_part_done = ResponseReasoningSummaryPartDoneEvent(
sequence_number=next(seq),
item_id=reasoning_id,
output_index=reasoning_output_index,
summary_index=0,
part=ResponseReasoningSummaryText(text=accumulated_thinking),
)
yield _format_sse(rs_part_done)
# response.output_item.done for reasoning
rs_item_done = ResponseOutputItemDoneEvent(
sequence_number=next(seq),
output_index=reasoning_output_index,
item=ResponseReasoningItem(
id=reasoning_id,
summary=[ResponseReasoningSummaryText(text=accumulated_thinking)],
),
)
yield _format_sse(rs_item_done)
# Start message block on first text token
if not message_started:
message_started = True
message_output_index = next_output_index
next_output_index += 1
initial_item = ResponseMessageItem(
id=item_id,
content=[ResponseOutputText(text="")],
status="in_progress",
)
item_added = ResponseOutputItemAddedEvent(
sequence_number=next(seq),
output_index=message_output_index,
item=initial_item,
)
yield _format_sse(item_added)
initial_part = ResponseOutputText(text="")
part_added = ResponseContentPartAddedEvent(
sequence_number=next(seq),
item_id=item_id,
output_index=message_output_index,
content_index=0,
part=initial_part,
)
yield _format_sse(part_added)
accumulated_text += chunk.text
# response.output_text.delta
delta_event = ResponseTextDeltaEvent(
sequence_number=next(seq),
item_id=item_id,
output_index=message_output_index,
content_index=0,
delta=chunk.text,
)
yield _format_sse(delta_event)
# Close reasoning block if it was never followed by text
if reasoning_started and not message_started:
rs_text_done = ResponseReasoningSummaryTextDoneEvent(
sequence_number=next(seq),
item_id=reasoning_id,
output_index=reasoning_output_index,
summary_index=0,
text=accumulated_thinking,
)
yield _format_sse(rs_text_done)
rs_part_done = ResponseReasoningSummaryPartDoneEvent(
sequence_number=next(seq),
item_id=reasoning_id,
output_index=reasoning_output_index,
summary_index=0,
part=ResponseReasoningSummaryText(text=accumulated_thinking),
)
yield _format_sse(rs_part_done)
rs_item_done = ResponseOutputItemDoneEvent(
sequence_number=next(seq),
output_index=reasoning_output_index,
item=ResponseReasoningItem(
id=reasoning_id,
summary=[ResponseReasoningSummaryText(text=accumulated_thinking)],
),
)
yield _format_sse(rs_item_done)
# If no message block was started, create one now (empty text)
if not message_started:
message_output_index = next_output_index
next_output_index += 1
initial_item = ResponseMessageItem(
id=item_id,
content=[ResponseOutputText(text="")],
status="in_progress",
)
item_added = ResponseOutputItemAddedEvent(
sequence_number=next(seq),
output_index=message_output_index,
item=initial_item,
)
yield _format_sse(item_added)
initial_part = ResponseOutputText(text="")
part_added_evt = ResponseContentPartAddedEvent(
sequence_number=next(seq),
item_id=item_id,
output_index=message_output_index,
content_index=0,
part=initial_part,
)
yield _format_sse(part_added_evt)
# response.output_text.done
text_done = ResponseTextDoneEvent(
sequence_number=next(seq),
item_id=item_id,
output_index=message_output_index,
content_index=0,
text=accumulated_text,
)
yield _format_sse(text_done)
# response.content_part.done
final_part = ResponseOutputText(text=accumulated_text)
part_done = ResponseContentPartDoneEvent(
sequence_number=next(seq),
item_id=item_id,
output_index=message_output_index,
content_index=0,
part=final_part,
)
yield _format_sse(part_done)
# response.output_item.done
final_message_item = ResponseMessageItem(
id=item_id,
content=[ResponseOutputText(text=accumulated_text)],
status="completed",
)
item_done = ResponseOutputItemDoneEvent(
sequence_number=next(seq),
output_index=message_output_index,
item=final_message_item,
)
yield _format_sse(item_done)
# Create usage from usage data if available
usage = None
if last_usage is not None:
usage = ResponseUsage(
input_tokens=last_usage.prompt_tokens,
output_tokens=last_usage.completion_tokens,
total_tokens=last_usage.total_tokens,
)
# response.completed
output: list[ResponseItem] = []
if reasoning_started:
output.append(
ResponseReasoningItem(
id=reasoning_id,
summary=[ResponseReasoningSummaryText(text=accumulated_thinking)],
)
)
output.append(final_message_item)
output.extend(function_call_items)
final_response = ResponsesResponse(
id=response_id,
model=model,
status="completed",
output=output,
output_text=accumulated_text,
usage=usage,
)
completed_event = ResponseCompletedEvent(
sequence_number=next(seq), response=final_response
)
yield _format_sse(completed_event)
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/master/adapters/responses.py",
"license": "Apache License 2.0",
"lines": 511,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
exo-explore/exo:src/exo/master/tests/test_claude_api.py | """Tests for Claude Messages API conversion functions and types."""
import pydantic
import pytest
from exo.master.adapters.claude import (
claude_request_to_text_generation,
finish_reason_to_claude_stop_reason,
)
from exo.shared.types.claude_api import (
ClaudeMessage,
ClaudeMessagesRequest,
ClaudeTextBlock,
)
from exo.shared.types.common import ModelId
class TestFinishReasonToClaudeStopReason:
"""Tests for finish_reason to Claude stop_reason mapping."""
def test_stop_maps_to_end_turn(self):
assert finish_reason_to_claude_stop_reason("stop") == "end_turn"
def test_length_maps_to_max_tokens(self):
assert finish_reason_to_claude_stop_reason("length") == "max_tokens"
def test_tool_calls_maps_to_tool_use(self):
assert finish_reason_to_claude_stop_reason("tool_calls") == "tool_use"
def test_function_call_maps_to_tool_use(self):
assert finish_reason_to_claude_stop_reason("function_call") == "tool_use"
def test_content_filter_maps_to_end_turn(self):
assert finish_reason_to_claude_stop_reason("content_filter") == "end_turn"
def test_none_returns_none(self):
assert finish_reason_to_claude_stop_reason(None) is None
class TestClaudeRequestToInternal:
"""Tests for converting Claude Messages API requests to TextGenerationTaskParams."""
def test_basic_request_conversion(self):
request = ClaudeMessagesRequest(
model=ModelId("claude-3-opus"),
max_tokens=100,
messages=[
ClaudeMessage(role="user", content="Hello"),
],
)
params = claude_request_to_text_generation(request)
assert params.model == "claude-3-opus"
assert params.max_output_tokens == 100
assert isinstance(params.input, list)
assert len(params.input) == 1
assert params.input[0].role == "user"
assert params.input[0].content == "Hello"
assert params.instructions is None
def test_request_with_system_string(self):
request = ClaudeMessagesRequest(
model=ModelId("claude-3-opus"),
max_tokens=100,
system="You are a helpful assistant.",
messages=[
ClaudeMessage(role="user", content="Hello"),
],
)
params = claude_request_to_text_generation(request)
assert params.instructions == "You are a helpful assistant."
assert isinstance(params.input, list)
assert len(params.input) == 1
assert params.input[0].role == "user"
assert params.input[0].content == "Hello"
def test_request_with_system_text_blocks(self):
request = ClaudeMessagesRequest(
model=ModelId("claude-3-opus"),
max_tokens=100,
system=[
ClaudeTextBlock(text="You are helpful. "),
ClaudeTextBlock(text="Be concise."),
],
messages=[
ClaudeMessage(role="user", content="Hello"),
],
)
params = claude_request_to_text_generation(request)
assert params.instructions == "You are helpful. Be concise."
assert isinstance(params.input, list)
assert len(params.input) == 1
def test_request_with_content_blocks(self):
request = ClaudeMessagesRequest(
model=ModelId("claude-3-opus"),
max_tokens=100,
messages=[
ClaudeMessage(
role="user",
content=[
ClaudeTextBlock(text="First part. "),
ClaudeTextBlock(text="Second part."),
],
),
],
)
params = claude_request_to_text_generation(request)
assert isinstance(params.input, list)
assert len(params.input) == 1
assert params.input[0].content == "First part. Second part."
def test_request_with_multi_turn_conversation(self):
request = ClaudeMessagesRequest(
model=ModelId("claude-3-opus"),
max_tokens=100,
messages=[
ClaudeMessage(role="user", content="Hello"),
ClaudeMessage(role="assistant", content="Hi there!"),
ClaudeMessage(role="user", content="How are you?"),
],
)
params = claude_request_to_text_generation(request)
assert isinstance(params.input, list)
assert len(params.input) == 3
assert params.input[0].role == "user"
assert params.input[1].role == "assistant"
assert params.input[2].role == "user"
def test_request_with_optional_parameters(self):
request = ClaudeMessagesRequest(
model=ModelId("claude-3-opus"),
max_tokens=100,
messages=[ClaudeMessage(role="user", content="Hello")],
temperature=0.7,
top_p=0.9,
top_k=40,
stop_sequences=["STOP", "END"],
stream=True,
)
params = claude_request_to_text_generation(request)
assert params.temperature == 0.7
assert params.top_p == 0.9
assert params.top_k == 40
assert params.stop == ["STOP", "END"]
assert params.stream is True
class TestClaudeMessagesRequestValidation:
"""Tests for Claude Messages API request validation."""
def test_request_requires_model(self):
with pytest.raises(pydantic.ValidationError):
ClaudeMessagesRequest.model_validate(
{
"max_tokens": 100,
"messages": [{"role": "user", "content": "Hello"}],
}
)
def test_request_requires_max_tokens(self):
with pytest.raises(pydantic.ValidationError):
ClaudeMessagesRequest.model_validate(
{
"model": "claude-3-opus",
"messages": [{"role": "user", "content": "Hello"}],
}
)
def test_request_requires_messages(self):
with pytest.raises(pydantic.ValidationError):
ClaudeMessagesRequest.model_validate(
{
"model": "claude-3-opus",
"max_tokens": 100,
}
)
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/master/tests/test_claude_api.py",
"license": "Apache License 2.0",
"lines": 153,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
exo-explore/exo:src/exo/master/tests/test_claude_tool_use.py | """Tests for Claude Messages API tool_use support in the adapter."""
import json
from collections.abc import AsyncGenerator
from typing import Any, cast
from exo.master.adapters.claude import (
ClaudeMessagesResponse,
collect_claude_response,
generate_claude_stream,
)
from exo.shared.types.api import ToolCallItem
from exo.shared.types.chunks import ErrorChunk, TokenChunk, ToolCallChunk
from exo.shared.types.common import CommandId, ModelId
async def _chunks_to_stream(
chunks: list[ErrorChunk | ToolCallChunk | TokenChunk],
) -> AsyncGenerator[ErrorChunk | ToolCallChunk | TokenChunk, None]:
for chunk in chunks:
yield chunk
async def _collect_response(
command_id: CommandId,
model: str,
chunk_stream: AsyncGenerator[ErrorChunk | ToolCallChunk | TokenChunk, None],
) -> ClaudeMessagesResponse:
"""Helper to consume the async generator and parse the JSON response."""
parts: list[str] = []
async for part in collect_claude_response(command_id, model, chunk_stream):
parts.append(part)
return ClaudeMessagesResponse.model_validate_json("".join(parts))
MODEL = ModelId("test-model")
COMMAND_ID = CommandId("cmd_test123")
def _parse_sse_events(events: list[str]) -> list[dict[str, Any]]:
"""Parse SSE event strings into JSON dicts."""
parsed: list[dict[str, Any]] = []
for event_str in events:
for line in event_str.strip().split("\n"):
if line.startswith("data: "):
parsed.append(cast(dict[str, Any], json.loads(line[6:])))
return parsed
class TestCollectClaudeResponseToolUse:
"""Tests for non-streaming tool_use response collection."""
async def test_tool_call_chunk_produces_tool_use_blocks(self):
chunks: list[ErrorChunk | ToolCallChunk | TokenChunk] = [
ToolCallChunk(
model=MODEL,
usage=None,
tool_calls=[
ToolCallItem(
name="get_weather",
arguments='{"location": "San Francisco"}',
)
],
),
]
response = await _collect_response(
COMMAND_ID, "test-model", _chunks_to_stream(chunks)
)
assert response.stop_reason == "tool_use"
tool_blocks = [b for b in response.content if b.type == "tool_use"]
assert len(tool_blocks) == 1
block = tool_blocks[0]
assert block.type == "tool_use"
assert block.name == "get_weather"
assert block.input == {"location": "San Francisco"}
assert block.id.startswith("toolu_")
async def test_multiple_tool_calls(self):
chunks: list[ErrorChunk | ToolCallChunk | TokenChunk] = [
ToolCallChunk(
model=MODEL,
usage=None,
tool_calls=[
ToolCallItem(
name="get_weather",
arguments='{"location": "SF"}',
),
ToolCallItem(
name="get_time",
arguments='{"timezone": "PST"}',
),
],
),
]
response = await _collect_response(
COMMAND_ID, "test-model", _chunks_to_stream(chunks)
)
assert response.stop_reason == "tool_use"
tool_blocks = [b for b in response.content if b.type == "tool_use"]
assert len(tool_blocks) == 2
assert tool_blocks[0].name == "get_weather"
assert tool_blocks[1].name == "get_time"
async def test_mixed_text_and_tool_use(self):
chunks: list[ErrorChunk | ToolCallChunk | TokenChunk] = [
TokenChunk(model=MODEL, text="Let me check ", token_id=1, usage=None),
TokenChunk(model=MODEL, text="the weather.", token_id=2, usage=None),
ToolCallChunk(
model=MODEL,
usage=None,
tool_calls=[
ToolCallItem(
name="get_weather",
arguments='{"location": "NYC"}',
)
],
),
]
response = await _collect_response(
COMMAND_ID, "test-model", _chunks_to_stream(chunks)
)
assert response.stop_reason == "tool_use"
text_blocks = [b for b in response.content if b.type == "text"]
tool_blocks = [b for b in response.content if b.type == "tool_use"]
assert len(text_blocks) == 1
assert text_blocks[0].text == "Let me check the weather."
assert len(tool_blocks) == 1
assert tool_blocks[0].name == "get_weather"
async def test_no_content_produces_empty_text_block(self):
chunks: list[ErrorChunk | ToolCallChunk | TokenChunk] = []
response = await _collect_response(
COMMAND_ID, "test-model", _chunks_to_stream(chunks)
)
assert len(response.content) == 1
assert response.content[0].type == "text"
class TestGenerateClaudeStreamToolUse:
"""Tests for streaming tool_use event generation."""
async def test_tool_call_emits_tool_use_events(self):
chunks: list[ErrorChunk | ToolCallChunk | TokenChunk] = [
ToolCallChunk(
model=MODEL,
usage=None,
tool_calls=[
ToolCallItem(
name="get_weather",
arguments='{"location": "SF"}',
)
],
),
]
events: list[str] = []
async for event in generate_claude_stream(
COMMAND_ID, "test-model", _chunks_to_stream(chunks)
):
events.append(event)
parsed = _parse_sse_events(events)
# Find tool_use content_block_start
tool_starts = [
e
for e in parsed
if e.get("type") == "content_block_start"
and cast(dict[str, Any], e.get("content_block", {})).get("type")
== "tool_use"
]
assert len(tool_starts) == 1
content_block = cast(dict[str, Any], tool_starts[0]["content_block"])
assert content_block["name"] == "get_weather"
assert content_block["input"] == {}
assert cast(str, content_block["id"]).startswith("toolu_")
# Find input_json_delta
json_deltas = [
e
for e in parsed
if e.get("type") == "content_block_delta"
and cast(dict[str, Any], e.get("delta", {})).get("type")
== "input_json_delta"
]
assert len(json_deltas) == 1
delta = cast(dict[str, Any], json_deltas[0]["delta"])
assert json.loads(cast(str, delta["partial_json"])) == {"location": "SF"}
# Find message_delta with tool_use stop reason
msg_deltas = [e for e in parsed if e.get("type") == "message_delta"]
assert len(msg_deltas) == 1
assert cast(dict[str, Any], msg_deltas[0]["delta"])["stop_reason"] == "tool_use"
async def test_streaming_mixed_text_and_tool_use(self):
chunks: list[ErrorChunk | ToolCallChunk | TokenChunk] = [
TokenChunk(model=MODEL, text="Hello ", token_id=1, usage=None),
ToolCallChunk(
model=MODEL,
usage=None,
tool_calls=[
ToolCallItem(
name="search",
arguments='{"query": "test"}',
)
],
),
]
events: list[str] = []
async for event in generate_claude_stream(
COMMAND_ID, "test-model", _chunks_to_stream(chunks)
):
events.append(event)
parsed = _parse_sse_events(events)
# Should have text delta at index 0
text_deltas = [
e
for e in parsed
if e.get("type") == "content_block_delta"
and cast(dict[str, Any], e.get("delta", {})).get("type") == "text_delta"
]
assert len(text_deltas) == 1
assert text_deltas[0]["index"] == 0
assert cast(dict[str, Any], text_deltas[0]["delta"])["text"] == "Hello "
# Tool block at index 1
tool_starts = [
e
for e in parsed
if e.get("type") == "content_block_start"
and cast(dict[str, Any], e.get("content_block", {})).get("type")
== "tool_use"
]
assert len(tool_starts) == 1
assert tool_starts[0]["index"] == 1
# Stop reason should be tool_use
msg_deltas = [e for e in parsed if e.get("type") == "message_delta"]
assert cast(dict[str, Any], msg_deltas[0]["delta"])["stop_reason"] == "tool_use"
async def test_streaming_tool_block_stop_events(self):
chunks: list[ErrorChunk | ToolCallChunk | TokenChunk] = [
ToolCallChunk(
model=MODEL,
usage=None,
tool_calls=[
ToolCallItem(name="fn1", arguments="{}"),
ToolCallItem(name="fn2", arguments='{"a": 1}'),
],
),
]
events: list[str] = []
async for event in generate_claude_stream(
COMMAND_ID, "test-model", _chunks_to_stream(chunks)
):
events.append(event)
parsed = _parse_sse_events(events)
# Two tool block starts (at indices 0 and 1 — no text block when only tools)
tool_starts = [
e
for e in parsed
if e.get("type") == "content_block_start"
and cast(dict[str, Any], e.get("content_block", {})).get("type")
== "tool_use"
]
assert len(tool_starts) == 2
assert tool_starts[0]["index"] == 0
assert tool_starts[1]["index"] == 1
# Two tool block stops (at indices 0 and 1)
block_stops = [e for e in parsed if e.get("type") == "content_block_stop"]
stop_indices = [e["index"] for e in block_stops]
assert 0 in stop_indices
assert 1 in stop_indices
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/master/tests/test_claude_tool_use.py",
"license": "Apache License 2.0",
"lines": 245,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
exo-explore/exo:src/exo/master/tests/test_openai_responses_api.py | """Tests for OpenAI Responses API wire types.
ResponsesRequest is the API wire type for the Responses endpoint.
The responses adapter converts it to TextGenerationTaskParams for the pipeline.
"""
import pydantic
import pytest
from exo.shared.types.common import ModelId
from exo.shared.types.openai_responses import (
ResponseInputMessage,
ResponsesRequest,
)
class TestResponsesRequestValidation:
"""Tests for OpenAI Responses API request validation."""
def test_request_requires_model(self):
with pytest.raises(pydantic.ValidationError):
ResponsesRequest.model_validate(
{
"input": "Hello",
}
)
def test_request_requires_input(self):
with pytest.raises(pydantic.ValidationError):
ResponsesRequest.model_validate(
{
"model": "gpt-4o",
}
)
def test_request_accepts_string_input(self):
request = ResponsesRequest(
model=ModelId("gpt-4o"),
input="Hello",
)
assert request.input == "Hello"
def test_request_accepts_message_array_input(self):
request = ResponsesRequest(
model=ModelId("gpt-4o"),
input=[ResponseInputMessage(role="user", content="Hello")],
)
assert len(request.input) == 1
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/master/tests/test_openai_responses_api.py",
"license": "Apache License 2.0",
"lines": 39,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
exo-explore/exo:src/exo/shared/types/claude_api.py | """Claude Messages API types for request/response conversion."""
from typing import Any, Literal
from pydantic import BaseModel, Field
from exo.shared.types.common import ModelId
# Tool definition types
ClaudeToolInputSchema = dict[str, Any]
class ClaudeToolDefinition(BaseModel, frozen=True):
"""Tool definition in Claude Messages API request."""
name: str
description: str | None = None
input_schema: ClaudeToolInputSchema
# Type aliases
ClaudeRole = Literal["user", "assistant"]
ClaudeStopReason = Literal["end_turn", "max_tokens", "stop_sequence", "tool_use"]
# Content block types
class ClaudeTextBlock(BaseModel, frozen=True):
"""Text content block in Claude Messages API."""
type: Literal["text"] = "text"
text: str
class ClaudeImageSource(BaseModel, frozen=True):
"""Image source for Claude image blocks."""
type: Literal["base64", "url"]
media_type: str | None = None
data: str | None = None
url: str | None = None
class ClaudeImageBlock(BaseModel, frozen=True):
"""Image content block in Claude Messages API."""
type: Literal["image"] = "image"
source: ClaudeImageSource
class ClaudeThinkingBlock(BaseModel, frozen=True):
"""Thinking content block in Claude Messages API."""
type: Literal["thinking"] = "thinking"
thinking: str
signature: str | None = None
class ClaudeToolUseBlock(BaseModel, frozen=True):
"""Tool use content block in Claude Messages API."""
type: Literal["tool_use"] = "tool_use"
id: str
name: str
input: dict[str, Any]
class ClaudeToolResultBlock(BaseModel, frozen=True):
"""Tool result content block in Claude Messages API request."""
type: Literal["tool_result"] = "tool_result"
tool_use_id: str
content: str | list[ClaudeTextBlock] | None = None
is_error: bool | None = None
cache_control: dict[str, str] | None = None
ClaudeContentBlock = (
ClaudeTextBlock | ClaudeImageBlock | ClaudeThinkingBlock | ClaudeToolUseBlock
)
# Input content blocks can also include tool_result (sent by user after tool_use)
ClaudeInputContentBlock = (
ClaudeTextBlock
| ClaudeImageBlock
| ClaudeThinkingBlock
| ClaudeToolUseBlock
| ClaudeToolResultBlock
)
# Request types
class ClaudeMessage(BaseModel, frozen=True):
"""Message in Claude Messages API request."""
role: ClaudeRole
content: str | list[ClaudeInputContentBlock]
class ClaudeThinkingConfig(BaseModel, frozen=True):
type: Literal["enabled", "disabled", "adaptive"]
budget_tokens: int | None = None
class ClaudeMessagesRequest(BaseModel):
"""Request body for Claude Messages API."""
model: ModelId
max_tokens: int
messages: list[ClaudeMessage]
system: str | list[ClaudeTextBlock] | None = None
stop_sequences: list[str] | None = None
stream: bool = False
temperature: float | None = None
top_p: float | None = None
top_k: int | None = None
tools: list[ClaudeToolDefinition] | None = None
metadata: dict[str, str] | None = None
thinking: ClaudeThinkingConfig | None = None
# Response types
class ClaudeUsage(BaseModel, frozen=True):
"""Token usage in Claude Messages API response."""
input_tokens: int
output_tokens: int
class ClaudeMessagesResponse(BaseModel, frozen=True):
"""Response body for Claude Messages API."""
id: str
type: Literal["message"] = "message"
role: Literal["assistant"] = "assistant"
content: list[ClaudeContentBlock]
model: str
stop_reason: ClaudeStopReason | None = None
stop_sequence: str | None = None
usage: ClaudeUsage
# Streaming event types
class ClaudeMessageStart(BaseModel, frozen=True):
"""Partial message in message_start event."""
id: str
type: Literal["message"] = "message"
role: Literal["assistant"] = "assistant"
content: list[ClaudeTextBlock] = Field(default_factory=list)
model: str
stop_reason: ClaudeStopReason | None = None
stop_sequence: str | None = None
usage: ClaudeUsage
class ClaudeMessageStartEvent(BaseModel, frozen=True):
"""Event sent at start of message stream."""
type: Literal["message_start"] = "message_start"
message: ClaudeMessageStart
class ClaudeContentBlockStartEvent(BaseModel, frozen=True):
"""Event sent at start of a content block."""
type: Literal["content_block_start"] = "content_block_start"
index: int
content_block: ClaudeTextBlock | ClaudeThinkingBlock | ClaudeToolUseBlock
class ClaudeTextDelta(BaseModel, frozen=True):
"""Delta for text content block."""
type: Literal["text_delta"] = "text_delta"
text: str
class ClaudeThinkingDelta(BaseModel, frozen=True):
"""Delta for thinking content block."""
type: Literal["thinking_delta"] = "thinking_delta"
thinking: str
class ClaudeInputJsonDelta(BaseModel, frozen=True):
"""Delta for tool use input JSON content block."""
type: Literal["input_json_delta"] = "input_json_delta"
partial_json: str
class ClaudeContentBlockDeltaEvent(BaseModel, frozen=True):
"""Event sent for content block delta."""
type: Literal["content_block_delta"] = "content_block_delta"
index: int
delta: ClaudeTextDelta | ClaudeThinkingDelta | ClaudeInputJsonDelta
class ClaudeContentBlockStopEvent(BaseModel, frozen=True):
"""Event sent at end of a content block."""
type: Literal["content_block_stop"] = "content_block_stop"
index: int
class ClaudeMessageDeltaUsage(BaseModel, frozen=True):
"""Usage in message_delta event."""
output_tokens: int
class ClaudeMessageDelta(BaseModel, frozen=True):
"""Delta in message_delta event."""
stop_reason: ClaudeStopReason | None = None
stop_sequence: str | None = None
class ClaudeMessageDeltaEvent(BaseModel, frozen=True):
"""Event sent with final message delta."""
type: Literal["message_delta"] = "message_delta"
delta: ClaudeMessageDelta
usage: ClaudeMessageDeltaUsage
class ClaudeMessageStopEvent(BaseModel, frozen=True):
"""Event sent at end of message stream."""
type: Literal["message_stop"] = "message_stop"
ClaudeStreamEvent = (
ClaudeMessageStartEvent
| ClaudeContentBlockStartEvent
| ClaudeContentBlockDeltaEvent
| ClaudeContentBlockStopEvent
| ClaudeMessageDeltaEvent
| ClaudeMessageStopEvent
)
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/shared/types/claude_api.py",
"license": "Apache License 2.0",
"lines": 159,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
exo-explore/exo:src/exo/shared/types/openai_responses.py | """OpenAI Responses API wire types.
These types model the OpenAI Responses API request/response format.
ResponsesRequest is the API-level wire type; for the canonical internal
task params type used by the inference pipeline, see
``exo.shared.types.text_generation.TextGenerationTaskParams``.
"""
import time
from typing import Any, Literal
from pydantic import BaseModel, Field
from exo.shared.types.common import ModelId
# Type aliases
ResponseStatus = Literal["completed", "failed", "in_progress", "incomplete"]
ResponseRole = Literal["user", "assistant", "system", "developer"]
# Request input content part types
class ResponseInputTextPart(BaseModel, frozen=True):
"""Text content part in a Responses API input message."""
type: Literal["input_text"] = "input_text"
text: str
class ResponseOutputTextPart(BaseModel, frozen=True):
"""Output text content part (used when replaying assistant messages in input)."""
type: Literal["output_text"] = "output_text"
text: str
ResponseContentPart = ResponseInputTextPart | ResponseOutputTextPart
# Request input item types
class ResponseInputMessage(BaseModel, frozen=True):
"""Input message for Responses API."""
role: ResponseRole
content: str | list[ResponseContentPart]
type: Literal["message"] = "message"
class FunctionCallInputItem(BaseModel, frozen=True):
"""Function call item replayed in input (from a previous assistant response)."""
type: Literal["function_call"] = "function_call"
id: str | None = None
call_id: str
name: str
arguments: str
status: ResponseStatus | None = None
class FunctionCallOutputInputItem(BaseModel, frozen=True):
"""Function call output item in input (user providing tool results)."""
type: Literal["function_call_output"] = "function_call_output"
call_id: str
output: str
id: str | None = None
status: ResponseStatus | None = None
ResponseInputItem = (
ResponseInputMessage | FunctionCallInputItem | FunctionCallOutputInputItem
)
class ResponsesRequest(BaseModel, frozen=True):
"""Request body for OpenAI Responses API.
This is the API wire type for the Responses endpoint. The canonical
internal task params type is ``TextGenerationTaskParams``; see the
``responses_request_to_text_generation`` adapter for conversion.
"""
# --- OpenAI Responses API standard fields ---
model: ModelId
input: str | list[ResponseInputItem]
instructions: str | None = None
max_output_tokens: int | None = None
temperature: float | None = None
top_p: float | None = None
stream: bool = False
tools: list[dict[str, Any]] | None = None
metadata: dict[str, str] | None = None
# --- exo extensions (not in OpenAI Responses API spec) ---
top_k: int | None = Field(
default=None,
description="[exo extension] Top-k sampling parameter. Not part of the OpenAI Responses API.",
json_schema_extra={"x-exo-extension": True},
)
stop: str | list[str] | None = Field(
default=None,
description="[exo extension] Stop sequence(s). Not part of the OpenAI Responses API.",
json_schema_extra={"x-exo-extension": True},
)
seed: int | None = Field(
default=None,
description="[exo extension] Seed for deterministic sampling. Not part of the OpenAI Responses API.",
json_schema_extra={"x-exo-extension": True},
)
# --- Internal fields (preserved during serialization, hidden from OpenAPI schema) ---
chat_template_messages: list[dict[str, Any]] | None = Field(
default=None,
description="Internal: pre-formatted messages for tokenizer chat template. Not part of the OpenAI Responses API.",
json_schema_extra={"x-exo-internal": True},
)
# Response types
class ResponseOutputText(BaseModel, frozen=True):
"""Text content in response output."""
type: Literal["output_text"] = "output_text"
text: str
annotations: list[dict[str, str]] = Field(default_factory=list)
class ResponseMessageItem(BaseModel, frozen=True):
"""Message item in response output array."""
type: Literal["message"] = "message"
id: str
role: Literal["assistant"] = "assistant"
content: list[ResponseOutputText]
status: ResponseStatus = "completed"
class ResponseFunctionCallItem(BaseModel, frozen=True):
"""Function call item in response output array."""
type: Literal["function_call"] = "function_call"
id: str
call_id: str
name: str
arguments: str
status: ResponseStatus = "completed"
class ResponseReasoningSummaryText(BaseModel, frozen=True):
"""Summary text part in a reasoning output item."""
type: Literal["summary_text"] = "summary_text"
text: str
class ResponseReasoningItem(BaseModel, frozen=True):
"""Reasoning output item in response output array."""
type: Literal["reasoning"] = "reasoning"
id: str
summary: list[ResponseReasoningSummaryText] = Field(default_factory=list)
status: ResponseStatus = "completed"
ResponseItem = ResponseMessageItem | ResponseFunctionCallItem | ResponseReasoningItem
class ResponseUsage(BaseModel, frozen=True):
"""Token usage in Responses API response."""
input_tokens: int
output_tokens: int
total_tokens: int
class ResponsesResponse(BaseModel, frozen=True):
"""Response body for OpenAI Responses API."""
id: str
object: Literal["response"] = "response"
created_at: int = Field(default_factory=lambda: int(time.time()))
status: ResponseStatus = "completed"
model: str
output: list[ResponseItem]
output_text: str
usage: ResponseUsage | None = None
# Streaming event types
class ResponseCreatedEvent(BaseModel, frozen=True):
"""Event sent when response is created."""
type: Literal["response.created"] = "response.created"
sequence_number: int
response: ResponsesResponse
class ResponseInProgressEvent(BaseModel, frozen=True):
"""Event sent when response starts processing."""
type: Literal["response.in_progress"] = "response.in_progress"
sequence_number: int
response: ResponsesResponse
class ResponseOutputItemAddedEvent(BaseModel, frozen=True):
"""Event sent when an output item is added."""
type: Literal["response.output_item.added"] = "response.output_item.added"
sequence_number: int
output_index: int
item: ResponseItem
class ResponseContentPartAddedEvent(BaseModel, frozen=True):
"""Event sent when a content part is added."""
type: Literal["response.content_part.added"] = "response.content_part.added"
sequence_number: int
item_id: str
output_index: int
content_index: int
part: ResponseOutputText
class ResponseTextDeltaEvent(BaseModel, frozen=True):
"""Event sent for text delta during streaming."""
type: Literal["response.output_text.delta"] = "response.output_text.delta"
sequence_number: int
item_id: str
output_index: int
content_index: int
delta: str
class ResponseTextDoneEvent(BaseModel, frozen=True):
"""Event sent when text content is done."""
type: Literal["response.output_text.done"] = "response.output_text.done"
sequence_number: int
item_id: str
output_index: int
content_index: int
text: str
class ResponseContentPartDoneEvent(BaseModel, frozen=True):
"""Event sent when a content part is done."""
type: Literal["response.content_part.done"] = "response.content_part.done"
sequence_number: int
item_id: str
output_index: int
content_index: int
part: ResponseOutputText
class ResponseOutputItemDoneEvent(BaseModel, frozen=True):
"""Event sent when an output item is done."""
type: Literal["response.output_item.done"] = "response.output_item.done"
sequence_number: int
output_index: int
item: ResponseItem
class ResponseFunctionCallArgumentsDeltaEvent(BaseModel, frozen=True):
"""Event sent for function call arguments delta during streaming."""
type: Literal["response.function_call_arguments.delta"] = (
"response.function_call_arguments.delta"
)
sequence_number: int
item_id: str
output_index: int
delta: str
class ResponseFunctionCallArgumentsDoneEvent(BaseModel, frozen=True):
"""Event sent when function call arguments are complete."""
type: Literal["response.function_call_arguments.done"] = (
"response.function_call_arguments.done"
)
sequence_number: int
item_id: str
output_index: int
name: str
arguments: str
class ResponseReasoningSummaryPartAddedEvent(BaseModel, frozen=True):
"""Event sent when a reasoning summary part is added."""
type: Literal["response.reasoning_summary_part.added"] = (
"response.reasoning_summary_part.added"
)
sequence_number: int
item_id: str
output_index: int
summary_index: int
part: ResponseReasoningSummaryText
class ResponseReasoningSummaryTextDeltaEvent(BaseModel, frozen=True):
"""Event sent for reasoning summary text delta during streaming."""
type: Literal["response.reasoning_summary_text.delta"] = (
"response.reasoning_summary_text.delta"
)
sequence_number: int
item_id: str
output_index: int
summary_index: int
delta: str
class ResponseReasoningSummaryTextDoneEvent(BaseModel, frozen=True):
"""Event sent when reasoning summary text is done."""
type: Literal["response.reasoning_summary_text.done"] = (
"response.reasoning_summary_text.done"
)
sequence_number: int
item_id: str
output_index: int
summary_index: int
text: str
class ResponseReasoningSummaryPartDoneEvent(BaseModel, frozen=True):
"""Event sent when a reasoning summary part is done."""
type: Literal["response.reasoning_summary_part.done"] = (
"response.reasoning_summary_part.done"
)
sequence_number: int
item_id: str
output_index: int
summary_index: int
part: ResponseReasoningSummaryText
class ResponseCompletedEvent(BaseModel, frozen=True):
"""Event sent when response is completed."""
type: Literal["response.completed"] = "response.completed"
sequence_number: int
response: ResponsesResponse
ResponsesStreamEvent = (
ResponseCreatedEvent
| ResponseInProgressEvent
| ResponseOutputItemAddedEvent
| ResponseContentPartAddedEvent
| ResponseTextDeltaEvent
| ResponseTextDoneEvent
| ResponseContentPartDoneEvent
| ResponseOutputItemDoneEvent
| ResponseFunctionCallArgumentsDeltaEvent
| ResponseFunctionCallArgumentsDoneEvent
| ResponseReasoningSummaryPartAddedEvent
| ResponseReasoningSummaryTextDeltaEvent
| ResponseReasoningSummaryTextDoneEvent
| ResponseReasoningSummaryPartDoneEvent
| ResponseCompletedEvent
)
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/shared/types/openai_responses.py",
"license": "Apache License 2.0",
"lines": 268,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
exo-explore/exo:src/exo/shared/types/text_generation.py | """Canonical internal type for text generation task parameters.
All external API formats (Chat Completions, Claude Messages, OpenAI Responses)
are converted to TextGenerationTaskParams at the API boundary via adapters.
"""
from typing import Any, Literal
from pydantic import BaseModel
from exo.shared.types.common import ModelId
MessageRole = Literal["user", "assistant", "system", "developer"]
class InputMessage(BaseModel, frozen=True):
"""Internal message for text generation pipelines."""
role: MessageRole
content: str
class TextGenerationTaskParams(BaseModel, frozen=True):
"""Canonical internal task params for text generation.
Every API adapter converts its wire type into this before handing
off to the master/worker pipeline.
"""
model: ModelId
input: list[InputMessage]
instructions: str | None = None
max_output_tokens: int | None = None
temperature: float | None = None
top_p: float | None = None
stream: bool = False
tools: list[dict[str, Any]] | None = None
bench: bool = False
top_k: int | None = None
stop: str | list[str] | None = None
seed: int | None = None
chat_template_messages: list[dict[str, Any]] | None = None
enable_thinking: bool | None = None
logprobs: bool = False
top_logprobs: int | None = None
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/shared/types/text_generation.py",
"license": "Apache License 2.0",
"lines": 33,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
exo-explore/exo:src/exo/worker/tests/unittests/test_runner/test_glm_tool_parsing.py | """Tests for GLM tool call argument parsing regex."""
import regex as re
# Replicate the regex patterns from runner.py to test them in isolation
_func_name_regex = re.compile(r"^(.*?)<arg_key>", re.DOTALL)
_func_arg_regex = re.compile(
r"<arg_key>(.*?)</arg_key>(?:\n|\s)*<arg_value>(.*?)(?:</arg_value>|(?=<arg_key>)|$)",
re.DOTALL,
)
def _parse_args(text: str) -> list[tuple[str, str]]:
"""Extract (key, value) pairs from GLM tool call text."""
pairs = _func_arg_regex.findall(text)
return [(k.strip(), v.strip()) for k, v in pairs] # pyright: ignore[reportAny]
def _parse_func_name(text: str) -> str:
"""Extract function name from GLM tool call text."""
match = _func_name_regex.search(text)
if match is None:
raise ValueError(f"Could not parse function name: {text!r}")
return match.group(1).strip()
class TestGlmToolParsingWithClosingTags:
"""Tests for normal format with closing tags present."""
def test_single_argument(self):
text = (
"get_weather<arg_key>location</arg_key><arg_value>San Francisco</arg_value>"
)
assert _parse_func_name(text) == "get_weather"
pairs = _parse_args(text)
assert pairs == [("location", "San Francisco")]
def test_multiple_arguments(self):
text = (
"search<arg_key>query</arg_key><arg_value>python</arg_value>"
"<arg_key>limit</arg_key><arg_value>10</arg_value>"
)
assert _parse_func_name(text) == "search"
pairs = _parse_args(text)
assert pairs == [("query", "python"), ("limit", "10")]
def test_arguments_with_whitespace_between(self):
text = (
"fn<arg_key>a</arg_key>\n<arg_value>1</arg_value>\n"
"<arg_key>b</arg_key> <arg_value>2</arg_value>"
)
pairs = _parse_args(text)
assert pairs == [("a", "1"), ("b", "2")]
class TestGlmToolParsingMissingClosingTags:
"""Tests for format where </arg_value> closing tags are missing."""
def test_single_argument_no_closing(self):
text = "get_weather<arg_key>location</arg_key><arg_value>San Francisco"
assert _parse_func_name(text) == "get_weather"
pairs = _parse_args(text)
assert pairs == [("location", "San Francisco")]
def test_multiple_arguments_no_closing(self):
text = (
"search<arg_key>query</arg_key><arg_value>python"
"<arg_key>limit</arg_key><arg_value>10"
)
assert _parse_func_name(text) == "search"
pairs = _parse_args(text)
assert pairs == [("query", "python"), ("limit", "10")]
def test_mixed_closing_tags(self):
"""First arg has closing tag, second does not."""
text = (
"fn<arg_key>a</arg_key><arg_value>1</arg_value>"
"<arg_key>b</arg_key><arg_value>2"
)
pairs = _parse_args(text)
assert pairs == [("a", "1"), ("b", "2")]
def test_value_with_trailing_whitespace(self):
text = "fn<arg_key>x</arg_key><arg_value>hello world \n"
pairs = _parse_args(text)
assert pairs == [("x", "hello world")]
def test_value_with_newlines_no_closing(self):
text = "fn<arg_key>data</arg_key><arg_value>line1\nline2"
pairs = _parse_args(text)
assert pairs == [("data", "line1\nline2")]
class TestGlmToolParsingEdgeCases:
"""Edge case tests for GLM tool call parsing."""
def test_empty_value_with_closing(self):
text = "fn<arg_key>empty</arg_key><arg_value></arg_value>"
pairs = _parse_args(text)
assert pairs == [("empty", "")]
def test_value_with_json_content(self):
text = 'fn<arg_key>data</arg_key><arg_value>{"key": "value"}</arg_value>'
pairs = _parse_args(text)
assert pairs == [("data", '{"key": "value"}')]
def test_value_with_json_no_closing(self):
text = 'fn<arg_key>data</arg_key><arg_value>{"key": "value"}'
pairs = _parse_args(text)
assert pairs == [("data", '{"key": "value"}')]
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/worker/tests/unittests/test_runner/test_glm_tool_parsing.py",
"license": "Apache License 2.0",
"lines": 87,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
exo-explore/exo:src/exo/worker/tests/unittests/test_runner/test_parse_tool_calls.py | """Tests for parse_tool_calls generator, especially unclosed tool call handling."""
from collections.abc import Generator
from typing import Any
from exo.shared.types.worker.runner_response import GenerationResponse, ToolCallResponse
from exo.worker.runner.llm_inference.runner import parse_tool_calls
from exo.worker.runner.llm_inference.tool_parsers import make_mlx_parser
def _make_responses(
texts: list[str],
finish_on_last: bool = True,
) -> Generator[GenerationResponse]:
"""Create a sequence of GenerationResponses from text strings."""
for i, text in enumerate(texts):
is_last = i == len(texts) - 1
yield GenerationResponse(
text=text,
token=i,
finish_reason="stop" if (is_last and finish_on_last) else None,
usage=None,
)
def _dummier_parser(text: str) -> dict[str, Any]:
return {"name": "test_fn", "arguments": {"arg": text}}
_dummy_parser = make_mlx_parser("<tool_call>", "</tool_call>", _dummier_parser)
class TestParseToolCalls:
"""Tests for parse_tool_calls generator."""
def test_closed_tool_call_works_normally(self):
"""Normal tool call flow should not be affected."""
texts = ["<tool_call>", "test_fn", "</tool_call>"]
results = list(
parse_tool_calls(
_make_responses(texts, finish_on_last=False),
_dummy_parser,
)
)
assert len(results) == 1
assert isinstance(results[0], ToolCallResponse)
def test_no_tool_call_passes_through(self):
"""Responses without tool calls should pass through unchanged."""
texts = ["Hello", " world"]
results = list(
parse_tool_calls(
_make_responses(texts),
_dummy_parser,
)
)
assert len(results) == 2
assert all(isinstance(r, GenerationResponse) for r in results)
r0 = results[0]
r1 = results[1]
assert isinstance(r0, GenerationResponse)
assert isinstance(r1, GenerationResponse)
assert r0.text == "Hello"
assert r1.text == " world"
assert r1.finish_reason == "stop"
def test_failed_parse_yields_text(self):
"""When tool call parsing fails, the text should be yielded as-is."""
def _failing_parser(text: str) -> dict[str, Any]:
raise ValueError("parse failed")
texts = ["<tool_call>", "bad content", "</tool_call>"]
results = list(
parse_tool_calls(
_make_responses(texts, finish_on_last=False),
make_mlx_parser("<tool_call>", "</tool_call>", _failing_parser),
)
)
assert len(results) == 1
assert isinstance(results[0], GenerationResponse)
assert results[0].text == "<tool_call>bad content</tool_call>"
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/worker/tests/unittests/test_runner/test_parse_tool_calls.py",
"license": "Apache License 2.0",
"lines": 67,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
exo-explore/exo:src/exo/shared/types/mlx.py | """Shared types for MLX-related functionality."""
from collections.abc import Sequence
from mlx import core as mx
from mlx import nn as nn
from mlx_lm.models.cache import (
ArraysCache,
CacheList,
KVCache,
QuantizedKVCache,
RotatingKVCache,
)
# This list contains one cache entry per transformer layer
KVCacheType = Sequence[
KVCache | RotatingKVCache | QuantizedKVCache | ArraysCache | CacheList
]
# Model is a wrapper function to fix the fact that mlx is not strongly typed in the same way that EXO is.
# For example - MLX has no guarantee of the interface that nn.Module will expose. But we need a guarantee that it has a __call__() function
class Model(nn.Module):
layers: list[nn.Module]
def __call__(
self,
x: mx.array,
cache: KVCacheType | None,
input_embeddings: mx.array | None = None,
) -> mx.array: ...
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/shared/types/mlx.py",
"license": "Apache License 2.0",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
exo-explore/exo:src/exo/worker/tests/unittests/test_mlx/test_kv_prefix_cache.py | # type: ignore
import time
from typing import cast
from unittest.mock import patch
import mlx.core as mx
import pytest
from mlx_lm.models.cache import KVCache
from mlx_lm.sample_utils import make_sampler
from exo.shared.types.common import ModelId
from exo.shared.types.mlx import Model
from exo.shared.types.text_generation import InputMessage, TextGenerationTaskParams
from exo.worker.engines.mlx.cache import (
KVPrefixCache,
cache_length,
encode_prompt,
get_prefix_length,
make_kv_cache,
)
from exo.worker.engines.mlx.generator.generate import mlx_generate, prefill
from exo.worker.engines.mlx.utils_mlx import apply_chat_template
from exo.worker.tests.unittests.test_mlx.conftest import (
DEFAULT_GPT_OSS_CONFIG,
DEFAULT_GPT_OSS_MODEL_ID,
)
def _check_model_exists() -> bool:
return DEFAULT_GPT_OSS_CONFIG.model_path.exists()
class TestGetPrefixLength:
def test_identical_arrays(self):
a = mx.array([1, 2, 3, 4, 5])
b = mx.array([1, 2, 3, 4, 5])
assert get_prefix_length(a, b) == 5
def test_no_common_prefix(self):
a = mx.array([1, 2, 3])
b = mx.array([4, 5, 6])
assert get_prefix_length(a, b) == 0
def test_partial_prefix(self):
a = mx.array([1, 2, 3, 4, 5])
b = mx.array([1, 2, 3, 7, 8])
assert get_prefix_length(a, b) == 3
def test_prompt_longer_than_cached(self):
a = mx.array([1, 2, 3, 4, 5])
b = mx.array([1, 2, 3])
assert get_prefix_length(a, b) == 3
def test_cached_longer_than_prompt(self):
a = mx.array([1, 2, 3])
b = mx.array([1, 2, 3, 4, 5])
assert get_prefix_length(a, b) == 3
def test_single_token_match(self):
a = mx.array([1, 2, 3])
b = mx.array([1, 5, 6])
assert get_prefix_length(a, b) == 1
def test_empty_prompt(self):
a = mx.array([]).astype(mx.int32)
b = mx.array([1, 2, 3])
assert get_prefix_length(a, b) == 0
def test_empty_cached(self):
a = mx.array([1, 2, 3])
b = mx.array([]).astype(mx.int32)
assert get_prefix_length(a, b) == 0
def test_both_empty(self):
a = mx.array([]).astype(mx.int32)
b = mx.array([]).astype(mx.int32)
assert get_prefix_length(a, b) == 0
class TestKVPrefix:
@pytest.fixture
def mock_tokenizer(self):
"""Create a minimal mock tokenizer for tests that don't need real tokenization."""
from unittest.mock import MagicMock
tokenizer = MagicMock()
tokenizer.encode.return_value = [1, 2, 3]
return tokenizer
def test_starts_empty(self, mock_tokenizer):
cache = KVPrefixCache(None)
assert len(cache.prompts) == 0
assert len(cache.caches) == 0
def test_clear_empties_cache(self, mock_tokenizer):
cache = KVPrefixCache(None)
cache.prompts.append(mx.array([1, 2, 3]))
cache.caches.append([KVCache()])
cache.clear()
assert len(cache.prompts) == 0
assert len(cache.caches) == 0
def test_clear_on_empty_cache(self, mock_tokenizer):
cache = KVPrefixCache(None)
cache.clear()
assert len(cache.prompts) == 0
def _load_gpt_oss() -> tuple[Model, object]:
from mlx_lm.utils import load_model
from exo.worker.engines.mlx.utils_mlx import load_tokenizer_for_model_id
model_path = DEFAULT_GPT_OSS_CONFIG.model_path
model_id = ModelId(DEFAULT_GPT_OSS_MODEL_ID)
model, _ = load_model(model_path, lazy=False)
tokenizer = load_tokenizer_for_model_id(model_id, model_path)
return cast(Model, model), tokenizer
@pytest.mark.slow
@pytest.mark.skipif(
not _check_model_exists(),
reason=f"GPT-OSS model not found at {DEFAULT_GPT_OSS_CONFIG.model_path}",
)
class TestKVPrefixCacheWithModel:
@pytest.fixture(scope="class")
def model_and_tokenizer(self):
model, tokenizer = _load_gpt_oss()
return model, tokenizer
def test_prefill_populates_cache(self, model_and_tokenizer):
model, tokenizer = model_and_tokenizer
task = TextGenerationTaskParams(
model=DEFAULT_GPT_OSS_MODEL_ID,
input=[InputMessage(role="user", content="Hello!!")],
max_output_tokens=1,
)
prompt = apply_chat_template(tokenizer, task)
tokens = encode_prompt(tokenizer, prompt)
cache = make_kv_cache(model)
_, _, snapshots = prefill(
model,
tokenizer,
make_sampler(0.0),
tokens,
cache,
group=None,
on_prefill_progress=None,
distributed_prompt_progress_callback=None,
)
# Cache should now hold the prompt tokens minus one
assert cache_length(cache) == len(tokens) - 1
# Snapshots should be available for models with non-KV caches
assert len(snapshots) > 0
def test_add_and_get_exact_match(self, model_and_tokenizer):
model, tokenizer = model_and_tokenizer
task = TextGenerationTaskParams(
model=DEFAULT_GPT_OSS_MODEL_ID,
input=[InputMessage(role="user", content="Test exact")],
max_output_tokens=1,
)
prompt = apply_chat_template(tokenizer, task)
tokens = encode_prompt(tokenizer, prompt)
cache = make_kv_cache(model)
_, _, snapshots = prefill(
model,
tokenizer,
make_sampler(0.0),
tokens,
cache,
group=None,
on_prefill_progress=None,
distributed_prompt_progress_callback=None,
)
kv_prefix_cache = KVPrefixCache(None)
kv_prefix_cache.add_kv_cache(tokens, cache, snapshots)
assert len(kv_prefix_cache.prompts) == 1
stored_length = cache_length(kv_prefix_cache.caches[0])
assert stored_length > 0
# Retrieve with same prompt: exact match
result_cache, remaining_tokens, matched_index = kv_prefix_cache.get_kv_cache(
model, tokens
)
assert matched_index == 0
# Exact match returns last token(s) — for models with SSM/rotating caches,
# snapshot availability constrains how far back we can trim, so remaining
# may be 1 or 2 tokens depending on the model.
assert len(remaining_tokens) >= 1
assert mx.array_equal(remaining_tokens, tokens[-len(remaining_tokens) :])
def test_add_and_get_prefix_match(self, model_and_tokenizer):
"""get_kv_cache with a longer prompt sharing prefix should return partial match."""
model, tokenizer = model_and_tokenizer
short_task = TextGenerationTaskParams(
model=DEFAULT_GPT_OSS_MODEL_ID,
input=[InputMessage(role="user", content="Hi")],
max_output_tokens=1,
)
short_prompt = apply_chat_template(tokenizer, short_task)
short_tokens = encode_prompt(tokenizer, short_prompt)
cache = make_kv_cache(model)
_, _, snapshots = prefill(
model,
tokenizer,
make_sampler(0.0),
short_tokens,
cache,
group=None,
on_prefill_progress=None,
distributed_prompt_progress_callback=None,
)
kv_prefix_cache = KVPrefixCache(None)
kv_prefix_cache.add_kv_cache(short_tokens, cache, snapshots)
# Query with longer prompt that shares the chat template prefix
long_task = TextGenerationTaskParams(
model=DEFAULT_GPT_OSS_MODEL_ID,
input=[InputMessage(role="user", content="Hi there, how are you?")],
max_output_tokens=1,
)
long_prompt = apply_chat_template(tokenizer, long_task)
long_tokens = encode_prompt(tokenizer, long_prompt)
# The prompts share a prefix (chat template preamble + "Hi")
expected_prefix = get_prefix_length(long_tokens, short_tokens)
assert expected_prefix > 0, (
"Prompts should share a prefix from the chat template"
)
result_cache, remaining_tokens, matched_index = kv_prefix_cache.get_kv_cache(
model, long_tokens
)
assert matched_index == 0
# remaining_tokens covers from snapshot restore position to end
assert len(remaining_tokens) >= len(long_tokens) - expected_prefix
def test_stored_cache_not_mutated_after_get_and_generation(
self, model_and_tokenizer
):
"""Getting a cache and then mutating it (as generation does) must not corrupt stored cache."""
model, tokenizer = model_and_tokenizer
task = TextGenerationTaskParams(
model=DEFAULT_GPT_OSS_MODEL_ID,
input=[InputMessage(role="user", content="Mutation test")],
max_output_tokens=1,
)
prompt = apply_chat_template(tokenizer, task)
tokens = encode_prompt(tokenizer, prompt)
cache = make_kv_cache(model)
_, _, snapshots = prefill(
model,
tokenizer,
make_sampler(0.0),
tokens,
cache,
group=None,
on_prefill_progress=None,
distributed_prompt_progress_callback=None,
)
kv_prefix_cache = KVPrefixCache(None)
kv_prefix_cache.add_kv_cache(tokens, cache, snapshots)
stored_length = cache_length(kv_prefix_cache.caches[0])
# Get cache and mutate it (simulating what generation does)
result_cache, _, matched_index = kv_prefix_cache.get_kv_cache(model, tokens)
assert matched_index == 0
# Simulate generation: feed many additional tokens through the cache
head_dim = result_cache[0].keys.shape[-1]
num_heads = result_cache[0].keys.shape[1]
extra_keys = mx.random.normal((1, num_heads, 50, head_dim))
extra_values = mx.random.normal((1, num_heads, 50, head_dim))
for layer_cache in result_cache:
layer_cache.update_and_fetch(extra_keys, extra_values)
mx.eval([c.keys for c in result_cache])
# Stored cache must be unchanged
assert cache_length(kv_prefix_cache.caches[0]) == stored_length
def test_stored_cache_survives_repeated_get_mutate_cycles(
self, model_and_tokenizer
):
"""Multiple get+mutate cycles (like repeated user requests) must not corrupt cache."""
model, tokenizer = model_and_tokenizer
task = TextGenerationTaskParams(
model=DEFAULT_GPT_OSS_MODEL_ID,
input=[InputMessage(role="user", content="Repeat test")],
max_output_tokens=1,
)
prompt = apply_chat_template(tokenizer, task)
tokens = encode_prompt(tokenizer, prompt)
cache = make_kv_cache(model)
_, _, snapshots = prefill(
model,
tokenizer,
make_sampler(0.0),
tokens,
cache,
group=None,
on_prefill_progress=None,
distributed_prompt_progress_callback=None,
)
kv_prefix_cache = KVPrefixCache(None)
kv_prefix_cache.add_kv_cache(tokens, cache, snapshots)
stored_length = cache_length(kv_prefix_cache.caches[0])
for i in range(3):
result_cache, _, _ = kv_prefix_cache.get_kv_cache(model, tokens)
head_dim = result_cache[0].keys.shape[-1]
num_heads = result_cache[0].keys.shape[1]
extra = mx.random.normal((1, num_heads, 30, head_dim))
for layer_cache in result_cache:
layer_cache.update_and_fetch(extra, extra)
mx.eval([c.keys for c in result_cache])
assert cache_length(kv_prefix_cache.caches[0]) == stored_length, (
f"Failed on loop {i}"
)
def test_mlx_generate_populates_cache(self, model_and_tokenizer):
"""mlx_generate should save the cache after generation completes."""
model, tokenizer = model_and_tokenizer
kv_prefix_cache = KVPrefixCache(None)
task = TextGenerationTaskParams(
model=DEFAULT_GPT_OSS_MODEL_ID,
input=[InputMessage(role="user", content="Hello")],
max_output_tokens=5,
)
prompt = apply_chat_template(tokenizer, task)
prompt_tokens = encode_prompt(tokenizer, prompt)
# Consume the entire generator so the cache-saving code after yield runs
generated_tokens = 0
for _response in mlx_generate(
model=model,
tokenizer=tokenizer,
task=task,
prompt=prompt,
kv_prefix_cache=kv_prefix_cache,
group=None,
):
generated_tokens += 1
assert len(kv_prefix_cache.prompts) == 1
assert len(kv_prefix_cache.caches) == 1
# Cache should contain prompt + generated tokens
expected_length = len(prompt_tokens) + generated_tokens
assert cache_length(kv_prefix_cache.caches[0]) == expected_length
def test_mlx_generate_second_call_gets_prefix_hit(self, model_and_tokenizer):
"""Second mlx_generate call with same prompt should get a prefix hit from stored cache."""
model, tokenizer = model_and_tokenizer
kv_prefix_cache = KVPrefixCache(None)
task = TextGenerationTaskParams(
model=DEFAULT_GPT_OSS_MODEL_ID,
input=[InputMessage(role="user", content="Reuse test")],
max_output_tokens=5,
)
prompt = apply_chat_template(tokenizer, task)
prompt_tokens = encode_prompt(tokenizer, prompt)
# First generation populates cache
for _response in mlx_generate(
model=model,
tokenizer=tokenizer,
task=task,
prompt=prompt,
kv_prefix_cache=kv_prefix_cache,
group=None,
):
pass
assert len(kv_prefix_cache.prompts) == 1
# Second call should find a prefix match (the stored cache contains
# prompt + generated tokens, which shares the prompt prefix)
result_cache, remaining_tokens, matched_index = kv_prefix_cache.get_kv_cache(
model, prompt_tokens
)
# The stored cache is longer than the prompt (it includes generated tokens),
# so this is a prefix match where our prompt is fully contained
assert matched_index == 0
# Exact match: remaining_tokens is just the last token and the one before
assert len(remaining_tokens) == 2
assert mx.array_equal(remaining_tokens, prompt_tokens[-2:])
def test_mlx_generate_long_prompt_updates_cache_in_place(self, model_and_tokenizer):
"""With a prompt > 1000 tokens, second generation should update the cache entry in-place."""
model, tokenizer = model_and_tokenizer
kv_prefix_cache = KVPrefixCache(None)
# Build a long user message (> 1000 tokens) to exceed _MIN_PREFIX_HIT_TO_UPDATE
base_text = "The quick brown fox jumps over the lazy dog. "
base_tokens = tokenizer.encode(base_text)
repeats = (1200 // len(base_tokens)) + 2
long_content = base_text * repeats
task1 = TextGenerationTaskParams(
model=DEFAULT_GPT_OSS_MODEL_ID,
input=[InputMessage(role="user", content=long_content)],
max_output_tokens=5,
)
prompt1 = apply_chat_template(tokenizer, task1)
prompt1_tokens = encode_prompt(tokenizer, prompt1)
assert len(prompt1_tokens) > 1000, (
"Prompt must exceed _MIN_PREFIX_HIT_TO_UPDATE"
)
# First generation populates the cache (must prefill all tokens)
t0 = time.perf_counter()
for _response in mlx_generate(
model=model,
tokenizer=tokenizer,
task=task1,
prompt=prompt1,
kv_prefix_cache=kv_prefix_cache,
group=None,
):
pass
first_gen_time = time.perf_counter() - t0
assert len(kv_prefix_cache.prompts) == 1
first_cache_length = cache_length(kv_prefix_cache.caches[0])
# Second generation: same long prompt + extra content (simulating multi-turn)
task2 = TextGenerationTaskParams(
model=DEFAULT_GPT_OSS_MODEL_ID,
input=[
InputMessage(role="user", content=long_content),
InputMessage(role="assistant", content="Sure, I can help."),
InputMessage(role="user", content="Tell me more."),
],
max_output_tokens=5,
)
prompt2 = apply_chat_template(tokenizer, task2)
prompt2_tokens = encode_prompt(tokenizer, prompt2)
# Verify the prompts share a long prefix
prefix_len = get_prefix_length(prompt2_tokens, prompt1_tokens)
assert prefix_len > 1000, "Prompts must share > 1000 token prefix"
# Second generation should reuse the cached prefix (only prefill new tokens)
t0 = time.perf_counter()
for _response in mlx_generate(
model=model,
tokenizer=tokenizer,
task=task2,
prompt=prompt2,
kv_prefix_cache=kv_prefix_cache,
group=None,
):
pass
second_gen_time = time.perf_counter() - t0
# Second generation should be significantly faster due to prefix cache hit - hopefully not flaky
assert second_gen_time < first_gen_time * 0.5, (
f"Expected prefix cache speedup: "
f"first={first_gen_time:.2f}s, second={second_gen_time:.2f}s"
)
# With prefix_hit > 1000, should update in-place (not add a second entry)
assert len(kv_prefix_cache.prompts) == 1
# Updated cache should be longer (prompt2 + generated > prompt1 + generated)
updated_cache_length = cache_length(kv_prefix_cache.caches[0])
assert updated_cache_length > first_cache_length
def test_mlx_generate_stored_cache_not_mutated(self, model_and_tokenizer):
"""After mlx_generate saves a cache, a second generation must not corrupt the stored copy."""
model, tokenizer = model_and_tokenizer
kv_prefix_cache = KVPrefixCache(None)
task = TextGenerationTaskParams(
model=DEFAULT_GPT_OSS_MODEL_ID,
input=[InputMessage(role="user", content="Immutable test")],
max_output_tokens=5,
)
prompt = apply_chat_template(tokenizer, task)
# First generation populates cache
for _response in mlx_generate(
model=model,
tokenizer=tokenizer,
task=task,
prompt=prompt,
kv_prefix_cache=kv_prefix_cache,
group=None,
):
pass
firstcache_length = cache_length(kv_prefix_cache.caches[0])
# Second generation gets the cache and mutates it during generation
for _response in mlx_generate(
model=model,
tokenizer=tokenizer,
task=task,
prompt=prompt,
kv_prefix_cache=kv_prefix_cache,
group=None,
):
pass
# The first stored cache must not have been mutated by the second generation
assert cache_length(kv_prefix_cache.caches[0]) == firstcache_length
def test_evicts_lru_entry_under_memory_pressure(self, model_and_tokenizer):
"""Under memory pressure, adding a new cache entry evicts the least recently used one."""
model, tokenizer = model_and_tokenizer
kv_prefix_cache = KVPrefixCache(None)
# Add three cache entries with different prompts
prompts = ["First entry", "Second entry", "Third entry"]
for i, content in enumerate(prompts):
task = TextGenerationTaskParams(
model=DEFAULT_GPT_OSS_MODEL_ID,
input=[InputMessage(role="user", content=content)],
max_output_tokens=1,
)
prompt = apply_chat_template(tokenizer, task)
tokens = encode_prompt(tokenizer, prompt)
cache = make_kv_cache(model)
prefill(
model,
tokenizer,
make_sampler(0.0),
tokens,
cache,
group=None,
on_prefill_progress=None,
distributed_prompt_progress_callback=None,
)
kv_prefix_cache.add_kv_cache(tokens, cache)
# Stagger _last_used so LRU order is deterministic
kv_prefix_cache._last_used[i] = float(i)
assert len(kv_prefix_cache.prompts) == 3
# Access the third entry to make it most recently used
kv_prefix_cache._last_used[2] = 100.0
# Entry 0 (_last_used=0.0) is LRU, entry 1 (_last_used=1.0) is next
# Simulate memory pressure: return usage above _MEMORY_THRESHOLD (0.9)
with patch(
"exo.worker.engines.mlx.cache.get_memory_used_percentage",
return_value=0.95,
):
# Trigger eviction by adding a new entry
task = TextGenerationTaskParams(
model=DEFAULT_GPT_OSS_MODEL_ID,
input=[InputMessage(role="user", content="New entry")],
max_output_tokens=1,
)
prompt = apply_chat_template(tokenizer, task)
tokens = encode_prompt(tokenizer, prompt)
cache = make_kv_cache(model)
prefill(
model,
tokenizer,
make_sampler(0.0),
tokens,
cache,
group=None,
on_prefill_progress=None,
distributed_prompt_progress_callback=None,
)
kv_prefix_cache.add_kv_cache(tokens, cache)
# LRU entries should have been evicted (entries 0, 1, 2 in order of _last_used)
# Since fake_active stays above threshold after each eviction (we don't change it),
# all old entries get evicted, leaving only the newly added one
assert len(kv_prefix_cache.prompts) == 1
# The surviving entry should be the newly added one
assert get_prefix_length(kv_prefix_cache.prompts[0], tokens) == len(tokens)
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/worker/tests/unittests/test_mlx/test_kv_prefix_cache.py",
"license": "Apache License 2.0",
"lines": 507,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
exo-explore/exo:src/exo/download/tests/test_download_verification.py | """Tests for download verification and cache behavior."""
import time
from collections.abc import AsyncIterator
from datetime import timedelta
from pathlib import Path
from unittest.mock import AsyncMock, MagicMock, patch
import aiofiles
import aiofiles.os as aios
import pytest
from pydantic import TypeAdapter
from exo.download.download_utils import (
delete_model,
fetch_file_list_with_cache,
)
from exo.shared.types.common import ModelId
from exo.shared.types.memory import Memory
from exo.shared.types.worker.downloads import FileListEntry, RepoFileDownloadProgress
@pytest.fixture
def model_id() -> ModelId:
return ModelId("test-org/test-model")
@pytest.fixture
async def temp_models_dir(tmp_path: Path) -> AsyncIterator[Path]:
"""Set up a temporary models directory for testing."""
models_dir = tmp_path / "models"
await aios.makedirs(models_dir, exist_ok=True)
with patch("exo.download.download_utils.EXO_MODELS_DIR", models_dir):
yield models_dir
class TestFileVerification:
"""Tests for file size verification in _download_file."""
async def test_redownload_when_file_size_changes_upstream(
self, model_id: ModelId, tmp_path: Path
) -> None:
"""Test that files with mismatched sizes are re-downloaded."""
# Import inside test to allow patching
from exo.download.download_utils import (
_download_file, # pyright: ignore[reportPrivateUsage]
)
target_dir = tmp_path / "downloads"
await aios.makedirs(target_dir, exist_ok=True)
# Create a local file with wrong size
local_file = target_dir / "test.safetensors"
async with aiofiles.open(local_file, "wb") as f:
await f.write(b"local content") # 13 bytes
remote_size = 1000 # Different from local
remote_hash = "abc123"
with (
patch(
"exo.download.download_utils.file_meta",
new_callable=AsyncMock,
return_value=(remote_size, remote_hash),
) as mock_file_meta,
patch(
"exo.download.download_utils.create_http_session"
) as mock_session_factory,
):
# Set up mock HTTP response for re-download
mock_response = MagicMock()
mock_response.status = 200
mock_response.content.read = AsyncMock( # pyright: ignore[reportAny]
side_effect=[b"x" * remote_size, b""]
)
mock_session = MagicMock()
mock_session.get.return_value.__aenter__ = AsyncMock( # pyright: ignore[reportAny]
return_value=mock_response
)
mock_session.get.return_value.__aexit__ = AsyncMock( # pyright: ignore[reportAny]
return_value=None
)
mock_session_factory.return_value.__aenter__ = AsyncMock( # pyright: ignore[reportAny]
return_value=mock_session
)
mock_session_factory.return_value.__aexit__ = AsyncMock( # pyright: ignore[reportAny]
return_value=None
)
# Mock calc_hash to return the expected hash
with patch(
"exo.download.download_utils.calc_hash",
new_callable=AsyncMock,
return_value=remote_hash,
):
await _download_file(model_id, "main", "test.safetensors", target_dir)
# file_meta should be called twice: once for verification, once for download
assert mock_file_meta.call_count == 2
async def test_skip_download_when_file_size_matches(
self, model_id: ModelId, tmp_path: Path
) -> None:
"""Test that files with matching sizes are not re-downloaded."""
from exo.download.download_utils import (
_download_file, # pyright: ignore[reportPrivateUsage]
)
target_dir = tmp_path / "downloads"
await aios.makedirs(target_dir, exist_ok=True)
# Create a local file
local_file = target_dir / "test.safetensors"
local_content = b"local content"
async with aiofiles.open(local_file, "wb") as f:
await f.write(local_content)
remote_size = len(local_content) # Same as local
remote_hash = "abc123"
with (
patch(
"exo.download.download_utils.file_meta",
new_callable=AsyncMock,
return_value=(remote_size, remote_hash),
) as mock_file_meta,
patch(
"exo.download.download_utils.create_http_session"
) as mock_session_factory,
):
result = await _download_file(
model_id, "main", "test.safetensors", target_dir
)
# Should return immediately without downloading
assert result == local_file
mock_file_meta.assert_called_once()
mock_session_factory.assert_not_called()
async def test_offline_fallback_uses_local_file(
self, model_id: ModelId, tmp_path: Path
) -> None:
"""Test that local files are used when network is unavailable."""
from exo.download.download_utils import (
_download_file, # pyright: ignore[reportPrivateUsage]
)
target_dir = tmp_path / "downloads"
await aios.makedirs(target_dir, exist_ok=True)
# Create a local file
local_file = target_dir / "test.safetensors"
async with aiofiles.open(local_file, "wb") as f:
await f.write(b"local content")
with (
patch(
"exo.download.download_utils.file_meta",
new_callable=AsyncMock,
side_effect=Exception("Network error"),
),
patch(
"exo.download.download_utils.create_http_session"
) as mock_session_factory,
):
result = await _download_file(
model_id, "main", "test.safetensors", target_dir
)
# Should return local file without attempting download
assert result == local_file
mock_session_factory.assert_not_called()
class TestFileListCache:
"""Tests for file list caching behavior."""
async def test_fetch_fresh_and_update_cache(
self, model_id: ModelId, tmp_path: Path
) -> None:
"""Test that fresh data is fetched and cache is updated."""
models_dir = tmp_path / "models"
file_list = [
FileListEntry(type="file", path="model.safetensors", size=1000),
FileListEntry(type="file", path="config.json", size=100),
]
with (
patch("exo.download.download_utils.EXO_MODELS_DIR", models_dir),
patch(
"exo.download.download_utils.fetch_file_list_with_retry",
new_callable=AsyncMock,
return_value=file_list,
) as mock_fetch,
):
result = await fetch_file_list_with_cache(model_id, "main")
assert result == file_list
mock_fetch.assert_called_once()
# Verify cache was written
cache_file = (
models_dir
/ "caches"
/ model_id.normalize()
/ f"{model_id.normalize()}--main--file_list.json"
)
assert await aios.path.exists(cache_file)
async with aiofiles.open(cache_file, "r") as f:
cached_data = TypeAdapter(list[FileListEntry]).validate_json(
await f.read()
)
assert cached_data == file_list
async def test_fallback_to_cache_when_fetch_fails(
self, model_id: ModelId, tmp_path: Path
) -> None:
"""Test that cached data is used when fetch fails."""
models_dir = tmp_path / "models"
cache_dir = models_dir / "caches" / model_id.normalize()
await aios.makedirs(cache_dir, exist_ok=True)
# Create cache file
cached_file_list = [
FileListEntry(type="file", path="model.safetensors", size=1000),
]
cache_file = cache_dir / f"{model_id.normalize()}--main--file_list.json"
async with aiofiles.open(cache_file, "w") as f:
await f.write(
TypeAdapter(list[FileListEntry]).dump_json(cached_file_list).decode()
)
with (
patch("exo.download.download_utils.EXO_MODELS_DIR", models_dir),
patch(
"exo.download.download_utils.fetch_file_list_with_retry",
new_callable=AsyncMock,
side_effect=Exception("Network error"),
),
):
result = await fetch_file_list_with_cache(model_id, "main")
assert result == cached_file_list
async def test_error_propagates_when_no_cache(
self, model_id: ModelId, tmp_path: Path
) -> None:
"""Test that errors propagate when fetch fails and no cache exists."""
models_dir = tmp_path / "models"
with (
patch("exo.download.download_utils.EXO_MODELS_DIR", models_dir),
patch(
"exo.download.download_utils.fetch_file_list_with_retry",
new_callable=AsyncMock,
side_effect=Exception("Network error"),
),
pytest.raises(Exception, match="Network error"),
):
await fetch_file_list_with_cache(model_id, "main")
class TestModelDeletion:
"""Tests for model deletion including cache cleanup."""
async def test_delete_model_clears_cache(
self, model_id: ModelId, tmp_path: Path
) -> None:
"""Test that deleting a model also deletes its cache."""
models_dir = tmp_path / "models"
model_dir = models_dir / model_id.normalize()
cache_dir = models_dir / "caches" / model_id.normalize()
# Create model and cache directories
await aios.makedirs(model_dir, exist_ok=True)
await aios.makedirs(cache_dir, exist_ok=True)
# Add some files
async with aiofiles.open(model_dir / "model.safetensors", "w") as f:
await f.write("model data")
async with aiofiles.open(cache_dir / "file_list.json", "w") as f:
await f.write("[]")
with patch("exo.download.download_utils.EXO_MODELS_DIR", models_dir):
result = await delete_model(model_id)
assert result is True
assert not await aios.path.exists(model_dir)
assert not await aios.path.exists(cache_dir)
async def test_delete_model_only_cache_exists(
self, model_id: ModelId, tmp_path: Path
) -> None:
"""Test deleting when only cache exists (model already deleted)."""
models_dir = tmp_path / "models"
cache_dir = models_dir / "caches" / model_id.normalize()
# Only create cache directory
await aios.makedirs(cache_dir, exist_ok=True)
async with aiofiles.open(cache_dir / "file_list.json", "w") as f:
await f.write("[]")
with patch("exo.download.download_utils.EXO_MODELS_DIR", models_dir):
result = await delete_model(model_id)
# Returns False because model dir didn't exist
assert result is False
# But cache should still be cleaned up
assert not await aios.path.exists(cache_dir)
async def test_delete_nonexistent_model(
self, model_id: ModelId, tmp_path: Path
) -> None:
"""Test deleting a model that doesn't exist."""
models_dir = tmp_path / "models"
await aios.makedirs(models_dir, exist_ok=True)
with patch("exo.download.download_utils.EXO_MODELS_DIR", models_dir):
result = await delete_model(model_id)
assert result is False
class TestProgressResetOnRedownload:
"""Tests for progress tracking when files are re-downloaded."""
async def test_progress_resets_correctly_on_redownload(
self, model_id: ModelId
) -> None:
"""Test that progress tracking resets when a file is re-downloaded.
When a file is deleted and re-downloaded (due to size mismatch),
the progress tracking should reset rather than calculating negative
downloaded_this_session values.
"""
# Simulate file_progress dict as it exists in download_shard
file_progress: dict[str, RepoFileDownloadProgress] = {}
# Initialize with old file progress (simulating existing large file)
old_file_size = 1_500_000_000 # 1.5 GB
file_progress["model.safetensors"] = RepoFileDownloadProgress(
repo_id=model_id,
repo_revision="main",
file_path="model.safetensors",
downloaded=Memory.from_bytes(old_file_size),
downloaded_this_session=Memory.from_bytes(0),
total=Memory.from_bytes(old_file_size),
speed=0,
eta=timedelta(0),
status="not_started",
start_time=time.time() - 10, # Started 10 seconds ago
)
# Simulate the logic from on_progress_wrapper after re-download starts
# This is the exact logic from the fixed on_progress_wrapper
curr_bytes = 100_000 # 100 KB - new download just started
previous_progress = file_progress.get("model.safetensors")
# Detect re-download: curr_bytes < previous downloaded
is_redownload = (
previous_progress is not None
and curr_bytes < previous_progress.downloaded.in_bytes
)
if is_redownload or previous_progress is None:
# Fresh download or re-download: reset tracking
start_time = time.time()
downloaded_this_session = curr_bytes
else:
# Continuing download: accumulate
start_time = previous_progress.start_time
downloaded_this_session = (
previous_progress.downloaded_this_session.in_bytes
+ (curr_bytes - previous_progress.downloaded.in_bytes)
)
# Key assertions
assert is_redownload is True, "Should detect re-download scenario"
assert downloaded_this_session == curr_bytes, (
"downloaded_this_session should equal curr_bytes on re-download"
)
assert downloaded_this_session > 0, (
"downloaded_this_session should be positive, not negative"
)
# Calculate speed (should be positive)
elapsed = time.time() - start_time
speed = downloaded_this_session / elapsed if elapsed > 0 else 0
assert speed >= 0, "Speed should be non-negative"
async def test_progress_accumulates_on_continuing_download(
self, model_id: ModelId
) -> None:
"""Test that progress accumulates correctly for continuing downloads.
When a download continues from where it left off (resume),
the progress should accumulate correctly.
"""
file_progress: dict[str, RepoFileDownloadProgress] = {}
# Initialize with partial download progress
initial_downloaded = 500_000 # 500 KB already downloaded
start_time = time.time() - 5 # Started 5 seconds ago
file_progress["model.safetensors"] = RepoFileDownloadProgress(
repo_id=model_id,
repo_revision="main",
file_path="model.safetensors",
downloaded=Memory.from_bytes(initial_downloaded),
downloaded_this_session=Memory.from_bytes(initial_downloaded),
total=Memory.from_bytes(1_000_000),
speed=100_000,
eta=timedelta(seconds=5),
status="in_progress",
start_time=start_time,
)
# Progress callback with more bytes downloaded
curr_bytes = 600_000 # 600 KB - continuing download
previous_progress = file_progress.get("model.safetensors")
# This is NOT a re-download (curr_bytes > previous downloaded)
is_redownload = (
previous_progress is not None
and curr_bytes < previous_progress.downloaded.in_bytes
)
if is_redownload or previous_progress is None:
downloaded_this_session = curr_bytes
used_start_time = time.time()
else:
used_start_time = previous_progress.start_time
downloaded_this_session = (
previous_progress.downloaded_this_session.in_bytes
+ (curr_bytes - previous_progress.downloaded.in_bytes)
)
# Key assertions
assert is_redownload is False, (
"Should NOT detect re-download for continuing download"
)
assert used_start_time == start_time, "Should preserve original start_time"
expected_session = initial_downloaded + (curr_bytes - initial_downloaded)
assert downloaded_this_session == expected_session, (
f"Should accumulate: {downloaded_this_session} == {expected_session}"
)
assert downloaded_this_session == 600_000, (
"downloaded_this_session should equal total downloaded so far"
)
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/download/tests/test_download_verification.py",
"license": "Apache License 2.0",
"lines": 378,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
exo-explore/exo:src/exo/download/coordinator.py | import asyncio
from dataclasses import dataclass, field
import anyio
from anyio import current_time
from loguru import logger
from exo.download.download_utils import (
RepoDownloadProgress,
delete_model,
map_repo_download_progress_to_download_progress_data,
resolve_model_in_path,
)
from exo.download.shard_downloader import ShardDownloader
from exo.shared.constants import EXO_MODELS_DIR, EXO_MODELS_PATH
from exo.shared.models.model_cards import ModelId, get_model_cards
from exo.shared.types.commands import (
CancelDownload,
DeleteDownload,
ForwarderDownloadCommand,
StartDownload,
)
from exo.shared.types.common import NodeId
from exo.shared.types.events import (
Event,
NodeDownloadProgress,
)
from exo.shared.types.worker.downloads import (
DownloadCompleted,
DownloadFailed,
DownloadOngoing,
DownloadPending,
DownloadProgress,
)
from exo.shared.types.worker.shards import PipelineShardMetadata, ShardMetadata
from exo.utils.channels import Receiver, Sender
from exo.utils.task_group import TaskGroup
@dataclass
class DownloadCoordinator:
node_id: NodeId
shard_downloader: ShardDownloader
download_command_receiver: Receiver[ForwarderDownloadCommand]
event_sender: Sender[Event]
offline: bool = False
# Local state
download_status: dict[ModelId, DownloadProgress] = field(default_factory=dict)
active_downloads: dict[ModelId, asyncio.Task[None]] = field(default_factory=dict)
_tg: TaskGroup = field(init=False, default_factory=TaskGroup)
# Per-model throttle for download progress events
_last_progress_time: dict[ModelId, float] = field(default_factory=dict)
def __post_init__(self) -> None:
self.shard_downloader.on_progress(self._download_progress_callback)
def _model_dir(self, model_id: ModelId) -> str:
return str(EXO_MODELS_DIR / model_id.normalize())
async def _download_progress_callback(
self, callback_shard: ShardMetadata, progress: RepoDownloadProgress
) -> None:
model_id = callback_shard.model_card.model_id
throttle_interval_secs = 1.0
if progress.status == "complete":
completed = DownloadCompleted(
shard_metadata=callback_shard,
node_id=self.node_id,
total=progress.total,
model_directory=self._model_dir(model_id),
)
self.download_status[model_id] = completed
await self.event_sender.send(
NodeDownloadProgress(download_progress=completed)
)
if model_id in self.active_downloads:
del self.active_downloads[model_id]
self._last_progress_time.pop(model_id, None)
elif (
progress.status == "in_progress"
and current_time() - self._last_progress_time.get(model_id, 0.0)
> throttle_interval_secs
):
ongoing = DownloadOngoing(
node_id=self.node_id,
shard_metadata=callback_shard,
download_progress=map_repo_download_progress_to_download_progress_data(
progress
),
model_directory=self._model_dir(model_id),
)
self.download_status[model_id] = ongoing
await self.event_sender.send(
NodeDownloadProgress(download_progress=ongoing)
)
self._last_progress_time[model_id] = current_time()
async def run(self) -> None:
logger.info(
f"Starting DownloadCoordinator{' (offline mode)' if self.offline else ''}"
)
try:
async with self._tg as tg:
tg.start_soon(self._command_processor)
tg.start_soon(self._emit_existing_download_progress)
finally:
for task in self.active_downloads.values():
task.cancel()
def shutdown(self) -> None:
self._tg.cancel_tasks()
async def _command_processor(self) -> None:
with self.download_command_receiver as commands:
async for cmd in commands:
# Only process commands targeting this node
if cmd.command.target_node_id != self.node_id:
continue
match cmd.command:
case StartDownload(shard_metadata=shard):
await self._start_download(shard)
case DeleteDownload(model_id=model_id):
await self._delete_download(model_id)
case CancelDownload(model_id=model_id):
await self._cancel_download(model_id)
async def _cancel_download(self, model_id: ModelId) -> None:
if model_id in self.active_downloads and model_id in self.download_status:
logger.info(f"Cancelling download for {model_id}")
self.active_downloads.pop(model_id).cancel()
async def _start_download(self, shard: ShardMetadata) -> None:
model_id = shard.model_card.model_id
# Check if already downloading, complete, or recently failed
if model_id in self.download_status:
status = self.download_status[model_id]
if isinstance(status, (DownloadOngoing, DownloadCompleted, DownloadFailed)):
logger.debug(
f"Download for {model_id} already in progress, complete, or failed, skipping"
)
return
# Check EXO_MODELS_PATH for pre-downloaded models
found_path = resolve_model_in_path(model_id)
if found_path is not None:
logger.info(
f"DownloadCoordinator: Model {model_id} found in EXO_MODELS_PATH at {found_path}"
)
completed = DownloadCompleted(
shard_metadata=shard,
node_id=self.node_id,
total=shard.model_card.storage_size,
model_directory=str(found_path),
read_only=True,
)
self.download_status[model_id] = completed
await self.event_sender.send(
NodeDownloadProgress(download_progress=completed)
)
return
# Emit pending status
progress = DownloadPending(
shard_metadata=shard,
node_id=self.node_id,
model_directory=self._model_dir(model_id),
)
self.download_status[model_id] = progress
await self.event_sender.send(NodeDownloadProgress(download_progress=progress))
# Check initial status from downloader
initial_progress = (
await self.shard_downloader.get_shard_download_status_for_shard(shard)
)
if initial_progress.status == "complete":
completed = DownloadCompleted(
shard_metadata=shard,
node_id=self.node_id,
total=initial_progress.total,
model_directory=self._model_dir(model_id),
)
self.download_status[model_id] = completed
await self.event_sender.send(
NodeDownloadProgress(download_progress=completed)
)
return
if self.offline:
logger.warning(
f"Offline mode: model {model_id} is not fully available locally, cannot download"
)
failed = DownloadFailed(
shard_metadata=shard,
node_id=self.node_id,
error_message=f"Model files not found locally in offline mode: {model_id}",
model_directory=self._model_dir(model_id),
)
self.download_status[model_id] = failed
await self.event_sender.send(NodeDownloadProgress(download_progress=failed))
return
# Start actual download
self._start_download_task(shard, initial_progress)
def _start_download_task(
self, shard: ShardMetadata, initial_progress: RepoDownloadProgress
) -> None:
model_id = shard.model_card.model_id
# Emit ongoing status
status = DownloadOngoing(
node_id=self.node_id,
shard_metadata=shard,
download_progress=map_repo_download_progress_to_download_progress_data(
initial_progress
),
model_directory=self._model_dir(model_id),
)
self.download_status[model_id] = status
self.event_sender.send_nowait(NodeDownloadProgress(download_progress=status))
async def download_wrapper() -> None:
try:
await self.shard_downloader.ensure_shard(shard)
except Exception as e:
logger.error(f"Download failed for {model_id}: {e}")
failed = DownloadFailed(
shard_metadata=shard,
node_id=self.node_id,
error_message=str(e),
model_directory=self._model_dir(model_id),
)
self.download_status[model_id] = failed
await self.event_sender.send(
NodeDownloadProgress(download_progress=failed)
)
finally:
if model_id in self.active_downloads:
del self.active_downloads[model_id]
task = asyncio.create_task(download_wrapper())
self.active_downloads[model_id] = task
async def _delete_download(self, model_id: ModelId) -> None:
# Protect read-only models (from EXO_MODELS_PATH) from deletion
if model_id in self.download_status:
current = self.download_status[model_id]
if isinstance(current, DownloadCompleted) and current.read_only:
logger.warning(
f"Refusing to delete read-only model {model_id} (from EXO_MODELS_PATH)"
)
return
# Cancel if active
if model_id in self.active_downloads:
logger.info(f"Cancelling active download for {model_id} before deletion")
self.active_downloads[model_id].cancel()
del self.active_downloads[model_id]
# Delete from disk
logger.info(f"Deleting model files for {model_id}")
deleted = await delete_model(model_id)
if deleted:
logger.info(f"Successfully deleted model {model_id}")
else:
logger.warning(f"Model {model_id} was not found on disk")
# Emit pending status to reset UI state, then remove from local tracking
if model_id in self.download_status:
current_status = self.download_status[model_id]
pending = DownloadPending(
shard_metadata=current_status.shard_metadata,
node_id=self.node_id,
model_directory=self._model_dir(model_id),
)
await self.event_sender.send(
NodeDownloadProgress(download_progress=pending)
)
del self.download_status[model_id]
async def _emit_existing_download_progress(self) -> None:
try:
while True:
logger.debug(
"DownloadCoordinator: Fetching and emitting existing download progress..."
)
async for (
_,
progress,
) in self.shard_downloader.get_shard_download_status():
model_id = progress.shard.model_card.model_id
# Active downloads emit progress via the callback — don't overwrite
if model_id in self.active_downloads:
continue
if progress.status == "complete":
status: DownloadProgress = DownloadCompleted(
node_id=self.node_id,
shard_metadata=progress.shard,
total=progress.total,
model_directory=self._model_dir(
progress.shard.model_card.model_id
),
)
elif progress.status in ["in_progress", "not_started"]:
if progress.downloaded_this_session.in_bytes == 0:
status = DownloadPending(
node_id=self.node_id,
shard_metadata=progress.shard,
model_directory=self._model_dir(
progress.shard.model_card.model_id
),
downloaded=progress.downloaded,
total=progress.total,
)
else:
status = DownloadOngoing(
node_id=self.node_id,
shard_metadata=progress.shard,
download_progress=map_repo_download_progress_to_download_progress_data(
progress
),
model_directory=self._model_dir(
progress.shard.model_card.model_id
),
)
else:
continue
self.download_status[progress.shard.model_card.model_id] = status
await self.event_sender.send(
NodeDownloadProgress(download_progress=status)
)
# Scan EXO_MODELS_PATH for pre-downloaded models
if EXO_MODELS_PATH is not None:
for card in await get_model_cards():
mid = card.model_id
if mid in self.active_downloads:
continue
if isinstance(
self.download_status.get(mid),
(DownloadCompleted, DownloadOngoing, DownloadFailed),
):
continue
found = resolve_model_in_path(mid)
if found is not None:
path_shard = PipelineShardMetadata(
model_card=card,
device_rank=0,
world_size=1,
start_layer=0,
end_layer=card.n_layers,
n_layers=card.n_layers,
)
path_completed: DownloadProgress = DownloadCompleted(
node_id=self.node_id,
shard_metadata=path_shard,
total=card.storage_size,
model_directory=str(found),
read_only=True,
)
self.download_status[mid] = path_completed
await self.event_sender.send(
NodeDownloadProgress(download_progress=path_completed)
)
logger.debug(
"DownloadCoordinator: Done emitting existing download progress."
)
await anyio.sleep(60)
except Exception as e:
logger.error(
f"DownloadCoordinator: Error emitting existing download progress: {e}"
)
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/download/coordinator.py",
"license": "Apache License 2.0",
"lines": 345,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
exo-explore/exo:src/exo/utils/keyed_backoff.py | import time
from typing import Generic, TypeVar
K = TypeVar("K")
class KeyedBackoff(Generic[K]):
"""Tracks exponential backoff state per key."""
def __init__(self, base: float = 0.5, cap: float = 10.0):
self._base = base
self._cap = cap
self._attempts: dict[K, int] = {}
self._last_time: dict[K, float] = {}
def should_proceed(self, key: K) -> bool:
"""Returns True if enough time has elapsed since last attempt."""
now = time.monotonic()
last = self._last_time.get(key, 0.0)
attempts = self._attempts.get(key, 0)
delay = min(self._cap, self._base * (2.0**attempts))
return now - last >= delay
def record_attempt(self, key: K) -> None:
"""Record that an attempt was made for this key."""
self._last_time[key] = time.monotonic()
self._attempts[key] = self._attempts.get(key, 0) + 1
def reset(self, key: K) -> None:
"""Reset backoff state for a key (e.g., on success)."""
self._attempts.pop(key, None)
self._last_time.pop(key, None)
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/utils/keyed_backoff.py",
"license": "Apache License 2.0",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
exo-explore/exo:src/exo/master/image_store.py | import time
from pathlib import Path
from pydantic import BaseModel
from exo.shared.types.common import Id
class StoredImage(BaseModel, frozen=True):
image_id: Id
file_path: Path
content_type: str
expires_at: float
class ImageStore:
def __init__(self, storage_dir: Path, default_expiry_seconds: int = 3600) -> None:
self._storage_dir = storage_dir
self._default_expiry_seconds = default_expiry_seconds
self._images: dict[Id, StoredImage] = {}
self._storage_dir.mkdir(parents=True, exist_ok=True)
def store(self, image_bytes: bytes, content_type: str) -> StoredImage:
image_id = Id()
extension = _content_type_to_extension(content_type)
file_path = self._storage_dir / f"{image_id}{extension}"
file_path.write_bytes(image_bytes)
stored = StoredImage(
image_id=image_id,
file_path=file_path,
content_type=content_type,
expires_at=time.time() + self._default_expiry_seconds,
)
self._images[image_id] = stored
return stored
def get(self, image_id: Id) -> StoredImage | None:
stored = self._images.get(image_id)
if stored is None:
return None
if time.time() > stored.expires_at:
self._remove(image_id)
return None
return stored
def list_images(self) -> list[StoredImage]:
now = time.time()
return [stored for stored in self._images.values() if now <= stored.expires_at]
def cleanup_expired(self) -> int:
now = time.time()
expired_ids = [
image_id
for image_id, stored in self._images.items()
if now > stored.expires_at
]
for image_id in expired_ids:
self._remove(image_id)
return len(expired_ids)
def _remove(self, image_id: Id) -> None:
stored = self._images.pop(image_id, None)
if stored is not None and stored.file_path.exists():
stored.file_path.unlink()
def _content_type_to_extension(
content_type: str,
) -> str:
ext = f"{content_type.split('/')[1]}"
if ext == "jpeg":
ext = "jpg"
return f".{ext}"
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/master/image_store.py",
"license": "Apache License 2.0",
"lines": 60,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
exo-explore/exo:src/exo/worker/engines/image/config.py | from enum import Enum
from pydantic import BaseModel
class BlockType(Enum):
JOINT = "joint" # Separate image/text streams
SINGLE = "single" # Concatenated streams
class TransformerBlockConfig(BaseModel):
model_config = {"frozen": True}
block_type: BlockType
count: int
has_separate_text_output: bool # True for joint blocks that output text separately
class ImageModelConfig(BaseModel):
model_family: str
block_configs: tuple[TransformerBlockConfig, ...]
default_steps: dict[str, int] # {"low": X, "medium": Y, "high": Z}
num_sync_steps: int # Number of sync steps for distributed inference
guidance_scale: float | None = None # None or <= 1.0 disables CFG
@property
def total_blocks(self) -> int:
return sum(bc.count for bc in self.block_configs)
@property
def joint_block_count(self) -> int:
return sum(
bc.count for bc in self.block_configs if bc.block_type == BlockType.JOINT
)
@property
def single_block_count(self) -> int:
return sum(
bc.count for bc in self.block_configs if bc.block_type == BlockType.SINGLE
)
def get_steps_for_quality(self, quality: str) -> int:
return self.default_steps[quality]
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/worker/engines/image/config.py",
"license": "Apache License 2.0",
"lines": 31,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
exo-explore/exo:src/exo/worker/engines/image/distributed_model.py | from collections.abc import Generator
from pathlib import Path
from typing import Any, Literal, Optional
import mlx.core as mx
from mflux.models.common.config.config import Config
from PIL import Image
from exo.download.download_utils import build_model_path
from exo.shared.types.api import AdvancedImageParams
from exo.shared.types.worker.instances import BoundInstance
from exo.shared.types.worker.shards import CfgShardMetadata, PipelineShardMetadata
from exo.worker.engines.image.config import ImageModelConfig
from exo.worker.engines.image.models import (
create_adapter_for_model,
get_config_for_model,
)
from exo.worker.engines.image.models.base import ModelAdapter
from exo.worker.engines.image.pipeline import DiffusionRunner
from exo.worker.engines.mlx.utils_mlx import mlx_distributed_init, mx_barrier
from exo.worker.runner.bootstrap import logger
class DistributedImageModel:
_config: ImageModelConfig
_adapter: ModelAdapter[Any, Any]
_runner: DiffusionRunner
def __init__(
self,
model_id: str,
local_path: Path,
shard_metadata: PipelineShardMetadata | CfgShardMetadata,
group: Optional[mx.distributed.Group] = None,
quantize: int | None = None,
):
config = get_config_for_model(model_id)
adapter = create_adapter_for_model(config, model_id, local_path, quantize)
has_layer_sharding = (
shard_metadata.start_layer != 0
or shard_metadata.end_layer != shard_metadata.n_layers
)
if group is not None and has_layer_sharding:
adapter.slice_transformer_blocks(
start_layer=shard_metadata.start_layer,
end_layer=shard_metadata.end_layer,
)
runner = DiffusionRunner(
config=config,
adapter=adapter,
group=group,
shard_metadata=shard_metadata,
)
if group is not None:
logger.info("Initialized distributed diffusion runner")
mx.eval(adapter.model.parameters()) # pyright: ignore[reportAny]
# TODO(ciaran): Do we need this?
mx.eval(adapter.model) # pyright: ignore[reportAny]
mx_barrier(group)
logger.info(f"Transformer sharded for rank {group.rank()}")
else:
logger.info("Single-node initialization")
self._config = config
self._adapter = adapter
self._runner = runner
@classmethod
def from_bound_instance(
cls, bound_instance: BoundInstance
) -> "DistributedImageModel":
model_id = bound_instance.bound_shard.model_card.model_id
model_path = build_model_path(model_id)
shard_metadata = bound_instance.bound_shard
if not isinstance(shard_metadata, (PipelineShardMetadata, CfgShardMetadata)):
raise ValueError(
"Expected PipelineShardMetadata or CfgShardMetadata for image generation"
)
is_distributed = (
len(bound_instance.instance.shard_assignments.node_to_runner) > 1
)
if is_distributed:
logger.info("Starting distributed init for image model")
group = mlx_distributed_init(bound_instance)
else:
group = None
return cls(
model_id=model_id,
local_path=model_path,
shard_metadata=shard_metadata,
group=group,
)
def get_steps_for_quality(self, quality: Literal["low", "medium", "high"]) -> int:
"""Get the number of inference steps for a quality level."""
return self._config.get_steps_for_quality(quality)
def generate(
self,
prompt: str,
height: int,
width: int,
quality: Literal["low", "medium", "high"] = "medium",
seed: int = 2,
image_path: Path | None = None,
partial_images: int = 0,
advanced_params: AdvancedImageParams | None = None,
) -> Generator[Image.Image | tuple[Image.Image, int, int], None, None]:
if (
advanced_params is not None
and advanced_params.num_inference_steps is not None
):
steps = advanced_params.num_inference_steps
else:
steps = self._config.get_steps_for_quality(quality)
guidance_override: float | None = None
if advanced_params is not None and advanced_params.guidance is not None:
guidance_override = advanced_params.guidance
negative_prompt: str | None = None
if advanced_params is not None and advanced_params.negative_prompt is not None:
negative_prompt = advanced_params.negative_prompt
# For edit mode: compute dimensions from input image
# This also stores image_paths in the adapter for encode_prompt()
if image_path is not None:
computed_dims = self._adapter.set_image_dimensions(image_path)
if computed_dims is not None:
# Override user-provided dimensions with computed ones
width, height = computed_dims
config = Config(
num_inference_steps=steps,
height=height,
width=width,
image_path=image_path,
model_config=self._adapter.model.model_config, # pyright: ignore[reportAny]
guidance=guidance_override if guidance_override is not None else 4.0,
)
if advanced_params is not None and advanced_params.num_sync_steps is not None:
num_sync_steps = advanced_params.num_sync_steps
else:
num_sync_steps = self._config.num_sync_steps
for result in self._runner.generate_image(
runtime_config=config,
prompt=prompt,
seed=seed,
partial_images=partial_images,
guidance_override=guidance_override,
negative_prompt=negative_prompt,
num_sync_steps=num_sync_steps,
):
if isinstance(result, tuple):
# Partial image: (GeneratedImage, partial_index, total_partials)
image, partial_idx, total_partials = result
yield (image, partial_idx, total_partials)
else:
logger.info("generated image")
yield result
def initialize_image_model(bound_instance: BoundInstance) -> DistributedImageModel:
return DistributedImageModel.from_bound_instance(bound_instance)
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/worker/engines/image/distributed_model.py",
"license": "Apache License 2.0",
"lines": 149,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
exo-explore/exo:src/exo/worker/engines/image/generate.py | import base64
import io
import random
import tempfile
import time
from pathlib import Path
from typing import Generator, Literal
import mlx.core as mx
from PIL import Image
from exo.shared.types.api import (
AdvancedImageParams,
ImageEditsTaskParams,
ImageGenerationStats,
ImageGenerationTaskParams,
ImageSize,
)
from exo.shared.types.memory import Memory
from exo.shared.types.worker.runner_response import (
ImageGenerationResponse,
PartialImageResponse,
)
from exo.worker.engines.image.distributed_model import DistributedImageModel
def parse_size(size_str: ImageSize) -> tuple[int, int]:
"""Parse size parameter like '1024x1024' to (width, height) tuple."""
if size_str == "auto":
return (1024, 1024)
try:
parts = size_str.split("x")
if len(parts) == 2:
width, height = int(parts[0]), int(parts[1])
if width > 0 and height > 0:
return (width, height)
except (ValueError, AttributeError):
pass
raise ValueError(
f"Invalid size format: '{size_str}'. Expected 'WIDTHxHEIGHT' (e.g., '1024x1024')"
)
def warmup_image_generator(model: DistributedImageModel) -> Image.Image | None:
"""Warmup the image generator with a small image."""
with tempfile.TemporaryDirectory() as tmpdir:
# Create a small dummy image for warmup (needed for edit models)
dummy_image = Image.new("RGB", (256, 256), color=(128, 128, 128))
dummy_path = Path(tmpdir) / "warmup.png"
dummy_image.save(dummy_path)
warmup_params = AdvancedImageParams(num_inference_steps=2)
for result in model.generate(
prompt="Warmup",
height=256,
width=256,
quality="low",
image_path=dummy_path,
advanced_params=warmup_params,
):
if not isinstance(result, tuple):
return result
return None
def generate_image(
model: DistributedImageModel,
task: ImageGenerationTaskParams | ImageEditsTaskParams,
) -> Generator[ImageGenerationResponse | PartialImageResponse, None, None]:
"""Generate image(s), optionally yielding partial results.
When partial_images > 0 or stream=True, yields PartialImageResponse for
intermediate images, then ImageGenerationResponse for the final image.
Yields:
PartialImageResponse for intermediate images (if partial_images > 0, first image only)
ImageGenerationResponse for final complete images
"""
width, height = parse_size(task.size)
quality: Literal["low", "medium", "high"] = task.quality or "medium"
advanced_params = task.advanced_params
if advanced_params is not None and advanced_params.seed is not None:
base_seed = advanced_params.seed
else:
base_seed = random.randint(0, 2**32 - 1)
is_bench = getattr(task, "bench", False)
num_images = task.n or 1
generation_start_time: float = 0.0
if is_bench:
mx.reset_peak_memory()
generation_start_time = time.perf_counter()
partial_images = (
task.partial_images
if task.partial_images is not None and task.stream is not None and task.stream
else 0
)
image_path: Path | None = None
with tempfile.TemporaryDirectory() as tmpdir:
if isinstance(task, ImageEditsTaskParams):
# Decode base64 image data and save to temp file
image_path = Path(tmpdir) / "input.png"
image_path.write_bytes(base64.b64decode(task.image_data))
if task.size == "auto":
with Image.open(image_path) as img:
width, height = img.size
for image_num in range(num_images):
# Increment seed for each image to ensure unique results
current_seed = base_seed + image_num
for result in model.generate(
prompt=task.prompt,
height=height,
width=width,
quality=quality,
seed=current_seed,
image_path=image_path,
partial_images=partial_images,
advanced_params=advanced_params,
):
if isinstance(result, tuple):
# Partial image: (Image, partial_index, total_partials)
image, partial_idx, total_partials = result
buffer = io.BytesIO()
image_format = task.output_format.upper()
if image_format == "JPG":
image_format = "JPEG"
if image_format == "JPEG" and image.mode == "RGBA":
image = image.convert("RGB")
image.save(buffer, format=image_format)
yield PartialImageResponse(
image_data=buffer.getvalue(),
format=task.output_format,
partial_index=partial_idx,
total_partials=total_partials,
image_index=image_num,
)
else:
image = result
# Only include stats on the final image
stats: ImageGenerationStats | None = None
if is_bench and image_num == num_images - 1:
generation_end_time = time.perf_counter()
total_generation_time = (
generation_end_time - generation_start_time
)
num_inference_steps = model.get_steps_for_quality(quality)
total_steps = num_inference_steps * num_images
seconds_per_step = (
total_generation_time / total_steps
if total_steps > 0
else 0.0
)
peak_memory = Memory.from_bytes(mx.get_peak_memory())
stats = ImageGenerationStats(
seconds_per_step=seconds_per_step,
total_generation_time=total_generation_time,
num_inference_steps=num_inference_steps,
num_images=num_images,
image_width=width,
image_height=height,
peak_memory_usage=peak_memory,
)
buffer = io.BytesIO()
image_format = task.output_format.upper()
if image_format == "JPG":
image_format = "JPEG"
if image_format == "JPEG" and image.mode == "RGBA":
image = image.convert("RGB")
image.save(buffer, format=image_format)
yield ImageGenerationResponse(
image_data=buffer.getvalue(),
format=task.output_format,
stats=stats,
image_index=image_num,
)
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/worker/engines/image/generate.py",
"license": "Apache License 2.0",
"lines": 163,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
exo-explore/exo:src/exo/worker/engines/image/models/base.py | from abc import ABC, abstractmethod
from pathlib import Path
from typing import TYPE_CHECKING, Any, Generic, TypeVar
import mlx.core as mx
from mflux.models.common.config.config import Config
from mflux.models.common.latent_creator.latent_creator import Img2Img, LatentCreator
from mflux.utils.image_util import ImageUtil
from PIL import Image
from exo.worker.engines.image.config import ImageModelConfig
if TYPE_CHECKING:
from exo.worker.engines.image.pipeline.block_wrapper import (
JointBlockWrapper,
SingleBlockWrapper,
)
ModelT = TypeVar("ModelT")
TransformerT = TypeVar("TransformerT")
RotaryEmbeddings = mx.array | tuple[mx.array, mx.array]
class PromptData(ABC):
@property
@abstractmethod
def prompt_embeds(self) -> mx.array: ...
@property
@abstractmethod
def pooled_prompt_embeds(self) -> mx.array: ...
@property
@abstractmethod
def negative_prompt_embeds(self) -> mx.array | None: ...
@property
@abstractmethod
def negative_pooled_prompt_embeds(self) -> mx.array | None: ...
@abstractmethod
def get_encoder_hidden_states_mask(
self, positive: bool = True
) -> mx.array | None: ...
@property
@abstractmethod
def cond_image_grid(
self,
) -> tuple[int, int, int] | list[tuple[int, int, int]] | None:
"""Conditioning image grid dimensions for edit mode.
Returns:
Grid dimensions (edit) or None (standard generation).
"""
...
@property
@abstractmethod
def conditioning_latents(self) -> mx.array | None:
"""Conditioning latents for edit mode.
Returns:
Conditioning latents array for image editing, None for standard generation.
"""
...
@property
@abstractmethod
def kontext_image_ids(self) -> mx.array | None:
"""Kontext-style position IDs for image conditioning.
For FLUX.1-Kontext models, returns position IDs with first_coord=1
to distinguish conditioning tokens from generation tokens (first_coord=0).
Returns:
Position IDs array [1, seq_len, 3] for Kontext, None for other models.
"""
...
@abstractmethod
def get_batched_cfg_data(
self,
) -> tuple[mx.array, mx.array, mx.array | None, mx.array | None] | None:
"""Get embeddings for CFG with batch_size=2.
Combines positive and negative embeddings into batched tensors for
a single forward pass. Pads shorter sequences to max length. Attention
mask is used to mask padding.
Returns:
None if model doesn't support CFG, otherwise tuple of:
- batched_embeds: [2, max_seq, hidden] (positive then negative)
- batched_mask: [2, max_seq] attention mask
- batched_pooled: [2, hidden] pooled embeddings or None
- conditioning_latents: [2, latent_seq, latent_dim] or None
TODO(ciaran): type this
"""
...
@abstractmethod
def get_cfg_branch_data(
self, positive: bool
) -> tuple[mx.array, mx.array | None, mx.array | None, mx.array | None]:
"""Get embeddings for a single CFG branch (positive or negative).
Used for sequential CFG and CFG parallel modes where we process
one branch at a time instead of batching.
Args:
positive: True for positive prompt, False for negative prompt
Returns:
Tuple of:
- embeds: [1, seq, hidden] prompt embeddings
- mask: [1, seq] attention mask or None
- pooled: [1, hidden] pooled embeddings or None
- conditioning_latents: [1, latent_seq, latent_dim] or None
"""
...
class ModelAdapter(ABC, Generic[ModelT, TransformerT]):
_config: ImageModelConfig
_model: ModelT
_transformer: TransformerT
@property
def config(self) -> ImageModelConfig:
return self._config
@property
def model(self) -> ModelT:
return self._model
@property
def transformer(self) -> TransformerT:
return self._transformer
@property
@abstractmethod
def hidden_dim(self) -> int: ...
@property
@abstractmethod
def needs_cfg(self) -> bool:
"""Whether this model uses classifier-free guidance."""
...
@abstractmethod
def _get_latent_creator(self) -> type: ...
@abstractmethod
def get_joint_block_wrappers(
self,
text_seq_len: int,
encoder_hidden_states_mask: mx.array | None = None,
) -> list["JointBlockWrapper[Any]"]:
"""Create wrapped joint transformer blocks with pipefusion support.
Args:
text_seq_len: Number of text tokens (constant for generation)
encoder_hidden_states_mask: Attention mask for text (Qwen only)
Returns:
List of wrapped joint blocks ready for pipefusion
"""
...
@abstractmethod
def get_single_block_wrappers(
self,
text_seq_len: int,
) -> list["SingleBlockWrapper[Any]"]:
"""Create wrapped single transformer blocks with pipefusion support.
Args:
text_seq_len: Number of text tokens (constant for generation)
Returns:
List of wrapped single blocks ready for pipefusion
"""
...
@abstractmethod
def slice_transformer_blocks(
self,
start_layer: int,
end_layer: int,
):
"""Remove transformer blocks outside the assigned range.
This should be called BEFORE mx.eval() to avoid loading unused weights
in distributed mode.
Args:
start_layer: First layer index (inclusive) assigned to this node
end_layer: Last layer index (exclusive) assigned to this node
"""
...
def set_image_dimensions(self, image_path: Path) -> tuple[int, int] | None:
"""Default implementation: no dimension computation needed.
Override in edit adapters to compute dimensions from input image.
TODO(ciaran): this is a hack
Returns:
None (use user-specified dimensions)
"""
return None
def create_latents(self, seed: int, runtime_config: Config) -> mx.array:
"""Create initial latents. Uses model-specific latent creator."""
model: Any = self.model
return LatentCreator.create_for_txt2img_or_img2img(
seed=seed,
height=runtime_config.height,
width=runtime_config.width,
img2img=Img2Img(
vae=model.vae, # pyright: ignore[reportAny]
latent_creator=self._get_latent_creator(),
sigmas=runtime_config.scheduler.sigmas, # pyright: ignore[reportAny]
init_time_step=runtime_config.init_time_step,
image_path=runtime_config.image_path,
),
)
def decode_latents(
self,
latents: mx.array,
runtime_config: Config,
seed: int,
prompt: str,
) -> Image.Image:
model: Any = self.model # Allow attribute access on model
latents = self._get_latent_creator().unpack_latents( # pyright: ignore[reportUnknownMemberType, reportUnknownVariableType]
latents=latents,
height=runtime_config.height,
width=runtime_config.width,
)
decoded = model.vae.decode(latents) # pyright: ignore[reportAny]
# TODO(ciaran):
# from mflux.models.common.vae.vae_util import VAEUtil
# VAEUtil.decode(vae=model.vae, latents=latents, tiling_config=self.tiling_config)
generated_image = ImageUtil.to_image(
decoded_latents=decoded, # pyright: ignore[reportAny]
config=runtime_config,
seed=seed,
prompt=prompt,
quantization=model.bits, # pyright: ignore[reportAny]
lora_paths=model.lora_paths, # pyright: ignore[reportAny]
lora_scales=model.lora_scales, # pyright: ignore[reportAny]
image_path=runtime_config.image_path,
image_strength=runtime_config.image_strength,
generation_time=0,
)
return generated_image.image
@abstractmethod
def encode_prompt(
self, prompt: str, negative_prompt: str | None = None
) -> "PromptData": ...
@abstractmethod
def compute_embeddings(
self,
hidden_states: mx.array,
prompt_embeds: mx.array,
) -> tuple[mx.array, mx.array]: ...
@abstractmethod
def compute_text_embeddings(
self,
t: int,
runtime_config: Config,
pooled_prompt_embeds: mx.array | None = None,
hidden_states: mx.array | None = None,
) -> mx.array: ...
@abstractmethod
def compute_rotary_embeddings(
self,
prompt_embeds: mx.array,
runtime_config: Config,
encoder_hidden_states_mask: mx.array | None = None,
cond_image_grid: tuple[int, int, int]
| list[tuple[int, int, int]]
| None = None,
kontext_image_ids: mx.array | None = None,
) -> RotaryEmbeddings: ...
def merge_streams(
self,
hidden_states: mx.array,
encoder_hidden_states: mx.array,
) -> mx.array:
return mx.concatenate([encoder_hidden_states, hidden_states], axis=1)
@abstractmethod
def apply_guidance(
self,
noise_positive: mx.array,
noise_negative: mx.array,
guidance_scale: float,
) -> mx.array:
"""Apply classifier-free guidance to combine positive/negative predictions.
Only called when needs_cfg is True.
Args:
noise_positive: Noise prediction from positive prompt
noise_negative: Noise prediction from negative prompt
guidance_scale: Guidance strength
Returns:
Guided noise prediction
"""
...
def final_projection(
self,
hidden_states: mx.array,
text_embeddings: mx.array,
) -> mx.array:
transformer: Any = self.transformer
hidden_states = transformer.norm_out(hidden_states, text_embeddings) # pyright: ignore[reportAny]
return transformer.proj_out(hidden_states) # pyright: ignore[reportAny]
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/worker/engines/image/models/base.py",
"license": "Apache License 2.0",
"lines": 272,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
exo-explore/exo:src/exo/worker/engines/image/models/flux/adapter.py | from pathlib import Path
from typing import Any
import mlx.core as mx
from mflux.models.common.config.config import Config
from mflux.models.common.config.model_config import ModelConfig
from mflux.models.flux.latent_creator.flux_latent_creator import FluxLatentCreator
from mflux.models.flux.model.flux_text_encoder.prompt_encoder import PromptEncoder
from mflux.models.flux.model.flux_transformer.transformer import Transformer
from mflux.models.flux.variants.txt2img.flux import Flux1
from exo.worker.engines.image.config import ImageModelConfig
from exo.worker.engines.image.models.base import (
ModelAdapter,
PromptData,
RotaryEmbeddings,
)
from exo.worker.engines.image.models.flux.wrappers import (
FluxJointBlockWrapper,
FluxSingleBlockWrapper,
)
from exo.worker.engines.image.pipeline.block_wrapper import (
JointBlockWrapper,
SingleBlockWrapper,
)
class FluxPromptData(PromptData):
def __init__(self, prompt_embeds: mx.array, pooled_prompt_embeds: mx.array):
self._prompt_embeds = prompt_embeds
self._pooled_prompt_embeds = pooled_prompt_embeds
@property
def prompt_embeds(self) -> mx.array:
return self._prompt_embeds
@property
def pooled_prompt_embeds(self) -> mx.array:
return self._pooled_prompt_embeds
@property
def negative_prompt_embeds(self) -> mx.array | None:
return None
@property
def negative_pooled_prompt_embeds(self) -> mx.array | None:
return None
def get_encoder_hidden_states_mask(self, positive: bool = True) -> mx.array | None:
return None
@property
def cond_image_grid(
self,
) -> tuple[int, int, int] | list[tuple[int, int, int]] | None:
return None
@property
def conditioning_latents(self) -> mx.array | None:
return None
@property
def kontext_image_ids(self) -> mx.array | None:
return None
def get_batched_cfg_data(
self,
) -> tuple[mx.array, mx.array, mx.array | None, mx.array | None] | None:
return None
def get_cfg_branch_data(
self, positive: bool
) -> tuple[mx.array, mx.array | None, mx.array | None, mx.array | None]:
"""Flux doesn't use CFG, but we return positive data for compatibility."""
return (self._prompt_embeds, None, self._pooled_prompt_embeds, None)
class FluxModelAdapter(ModelAdapter[Flux1, Transformer]):
def __init__(
self,
config: ImageModelConfig,
model_id: str,
local_path: Path,
quantize: int | None = None,
):
self._config = config
self._model = Flux1(
model_config=ModelConfig.from_name(model_name=model_id, base_model=None),
model_path=str(local_path),
quantize=quantize,
)
self._transformer = self._model.transformer
@property
def hidden_dim(self) -> int:
return self._transformer.x_embedder.weight.shape[0] # pyright: ignore[reportUnknownMemberType, reportUnknownVariableType]
@property
def needs_cfg(self) -> bool:
return False
def _get_latent_creator(self) -> type:
return FluxLatentCreator
def get_joint_block_wrappers(
self,
text_seq_len: int,
encoder_hidden_states_mask: mx.array | None = None,
) -> list[JointBlockWrapper[Any]]:
"""Create wrapped joint blocks for Flux."""
return [
FluxJointBlockWrapper(block, text_seq_len)
for block in self._transformer.transformer_blocks
]
def get_single_block_wrappers(
self,
text_seq_len: int,
) -> list[SingleBlockWrapper[Any]]:
"""Create wrapped single blocks for Flux."""
return [
FluxSingleBlockWrapper(block, text_seq_len)
for block in self._transformer.single_transformer_blocks
]
def slice_transformer_blocks(
self,
start_layer: int,
end_layer: int,
):
all_joint = list(self._transformer.transformer_blocks)
all_single = list(self._transformer.single_transformer_blocks)
total_joint_blocks = len(all_joint)
if end_layer <= total_joint_blocks:
# All assigned are joint blocks
joint_start, joint_end = start_layer, end_layer
single_start, single_end = 0, 0
elif start_layer >= total_joint_blocks:
# All assigned are single blocks
joint_start, joint_end = 0, 0
single_start = start_layer - total_joint_blocks
single_end = end_layer - total_joint_blocks
else:
# Spans both joint and single
joint_start, joint_end = start_layer, total_joint_blocks
single_start = 0
single_end = end_layer - total_joint_blocks
self._transformer.transformer_blocks = all_joint[joint_start:joint_end]
self._transformer.single_transformer_blocks = all_single[
single_start:single_end
]
def encode_prompt(
self, prompt: str, negative_prompt: str | None = None
) -> FluxPromptData:
del negative_prompt
assert isinstance(self.model.prompt_cache, dict)
assert isinstance(self.model.tokenizers, dict)
prompt_embeds, pooled_prompt_embeds = PromptEncoder.encode_prompt(
prompt=prompt,
prompt_cache=self.model.prompt_cache,
t5_tokenizer=self.model.tokenizers["t5"], # pyright: ignore[reportAny]
clip_tokenizer=self.model.tokenizers["clip"], # pyright: ignore[reportAny]
t5_text_encoder=self.model.t5_text_encoder,
clip_text_encoder=self.model.clip_text_encoder,
)
return FluxPromptData(
prompt_embeds=prompt_embeds,
pooled_prompt_embeds=pooled_prompt_embeds,
)
def compute_embeddings(
self,
hidden_states: mx.array,
prompt_embeds: mx.array,
) -> tuple[mx.array, mx.array]:
embedded_hidden = self._transformer.x_embedder(hidden_states)
embedded_encoder = self._transformer.context_embedder(prompt_embeds)
return embedded_hidden, embedded_encoder
def compute_text_embeddings(
self,
t: int,
runtime_config: Config,
pooled_prompt_embeds: mx.array | None = None,
hidden_states: mx.array | None = None, # Ignored by Flux
) -> mx.array:
if pooled_prompt_embeds is None:
raise ValueError(
"pooled_prompt_embeds is required for Flux text embeddings"
)
# hidden_states is ignored - Flux uses pooled_prompt_embeds instead
return Transformer.compute_text_embeddings(
t, pooled_prompt_embeds, self._transformer.time_text_embed, runtime_config
)
def compute_rotary_embeddings(
self,
prompt_embeds: mx.array,
runtime_config: Config,
encoder_hidden_states_mask: mx.array | None = None,
cond_image_grid: tuple[int, int, int]
| list[tuple[int, int, int]]
| None = None,
kontext_image_ids: mx.array | None = None,
) -> RotaryEmbeddings:
return Transformer.compute_rotary_embeddings(
prompt_embeds,
self._transformer.pos_embed,
runtime_config,
kontext_image_ids,
)
def apply_guidance(
self,
noise_positive: mx.array,
noise_negative: mx.array,
guidance_scale: float,
) -> mx.array:
raise NotImplementedError("Flux does not use classifier-free guidance")
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/worker/engines/image/models/flux/adapter.py",
"license": "Apache License 2.0",
"lines": 193,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
exo-explore/exo:src/exo/worker/engines/image/models/flux/config.py | from exo.worker.engines.image.config import (
BlockType,
ImageModelConfig,
TransformerBlockConfig,
)
FLUX_SCHNELL_CONFIG = ImageModelConfig(
model_family="flux",
block_configs=(
TransformerBlockConfig(
block_type=BlockType.JOINT, count=19, has_separate_text_output=True
),
TransformerBlockConfig(
block_type=BlockType.SINGLE, count=38, has_separate_text_output=False
),
),
default_steps={"low": 1, "medium": 2, "high": 4},
num_sync_steps=1,
)
FLUX_DEV_CONFIG = ImageModelConfig(
model_family="flux",
block_configs=(
TransformerBlockConfig(
block_type=BlockType.JOINT, count=19, has_separate_text_output=True
),
TransformerBlockConfig(
block_type=BlockType.SINGLE, count=38, has_separate_text_output=False
),
),
default_steps={"low": 10, "medium": 25, "high": 50},
num_sync_steps=4,
)
FLUX_KONTEXT_CONFIG = ImageModelConfig(
model_family="flux-kontext",
block_configs=(
TransformerBlockConfig(
block_type=BlockType.JOINT, count=19, has_separate_text_output=True
),
TransformerBlockConfig(
block_type=BlockType.SINGLE, count=38, has_separate_text_output=False
),
),
default_steps={"low": 10, "medium": 25, "high": 50},
num_sync_steps=4,
guidance_scale=4.0,
)
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/worker/engines/image/models/flux/config.py",
"license": "Apache License 2.0",
"lines": 45,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
exo-explore/exo:src/exo/worker/engines/image/models/flux/wrappers.py | from typing import final
import mlx.core as mx
from mflux.models.flux.model.flux_transformer.common.attention_utils import (
AttentionUtils,
)
from mflux.models.flux.model.flux_transformer.joint_transformer_block import (
JointTransformerBlock,
)
from mflux.models.flux.model.flux_transformer.single_transformer_block import (
SingleTransformerBlock,
)
from pydantic import BaseModel, ConfigDict
from exo.worker.engines.image.models.base import RotaryEmbeddings
from exo.worker.engines.image.pipeline.block_wrapper import (
JointBlockWrapper,
SingleBlockWrapper,
)
@final
class FluxModulationParams(BaseModel):
model_config = ConfigDict(frozen=True, strict=True, arbitrary_types_allowed=True)
gate_msa: mx.array
shift_mlp: mx.array
scale_mlp: mx.array
gate_mlp: mx.array
@final
class FluxNormGateState(BaseModel):
model_config = ConfigDict(frozen=True, strict=True, arbitrary_types_allowed=True)
norm_hidden: mx.array
gate: mx.array
class FluxJointBlockWrapper(JointBlockWrapper[JointTransformerBlock]):
def __init__(self, block: JointTransformerBlock, text_seq_len: int):
super().__init__(block, text_seq_len)
self._num_heads = block.attn.num_heads
self._head_dim = block.attn.head_dimension
# Intermediate state stored between _compute_qkv and _apply_output
self._hidden_mod: FluxModulationParams | None = None
self._context_mod: FluxModulationParams | None = None
def _compute_qkv(
self,
hidden_states: mx.array,
encoder_hidden_states: mx.array,
text_embeddings: mx.array,
rotary_embeddings: RotaryEmbeddings,
patch_mode: bool = False,
) -> tuple[mx.array, mx.array, mx.array]:
assert isinstance(rotary_embeddings, mx.array)
attn = self.block.attn
(
norm_hidden,
gate_msa,
shift_mlp,
scale_mlp,
gate_mlp,
) = self.block.norm1(
hidden_states=hidden_states,
text_embeddings=text_embeddings,
)
self._hidden_mod = FluxModulationParams(
gate_msa=gate_msa,
shift_mlp=shift_mlp,
scale_mlp=scale_mlp,
gate_mlp=gate_mlp,
)
(
norm_encoder,
c_gate_msa,
c_shift_mlp,
c_scale_mlp,
c_gate_mlp,
) = self.block.norm1_context(
hidden_states=encoder_hidden_states,
text_embeddings=text_embeddings,
)
self._context_mod = FluxModulationParams(
gate_msa=c_gate_msa,
shift_mlp=c_shift_mlp,
scale_mlp=c_scale_mlp,
gate_mlp=c_gate_mlp,
)
img_query, img_key, img_value = AttentionUtils.process_qkv(
hidden_states=norm_hidden,
to_q=attn.to_q,
to_k=attn.to_k,
to_v=attn.to_v,
norm_q=attn.norm_q,
norm_k=attn.norm_k,
num_heads=self._num_heads,
head_dim=self._head_dim,
)
txt_query, txt_key, txt_value = AttentionUtils.process_qkv(
hidden_states=norm_encoder,
to_q=attn.add_q_proj,
to_k=attn.add_k_proj,
to_v=attn.add_v_proj,
norm_q=attn.norm_added_q,
norm_k=attn.norm_added_k,
num_heads=self._num_heads,
head_dim=self._head_dim,
)
query = mx.concatenate([txt_query, img_query], axis=2)
key = mx.concatenate([txt_key, img_key], axis=2)
value = mx.concatenate([txt_value, img_value], axis=2)
if patch_mode:
text_rope = rotary_embeddings[:, :, : self._text_seq_len, ...]
patch_img_rope = rotary_embeddings[
:,
:,
self._text_seq_len + self._patch_start : self._text_seq_len
+ self._patch_end,
...,
]
rope = mx.concatenate([text_rope, patch_img_rope], axis=2)
else:
rope = rotary_embeddings
query, key = AttentionUtils.apply_rope(xq=query, xk=key, freqs_cis=rope)
return query, key, value
def _compute_attention(
self, query: mx.array, key: mx.array, value: mx.array
) -> mx.array:
batch_size = query.shape[0]
return AttentionUtils.compute_attention(
query=query,
key=key,
value=value,
batch_size=batch_size,
num_heads=self._num_heads,
head_dim=self._head_dim,
)
def _apply_output(
self,
attn_out: mx.array,
hidden_states: mx.array,
encoder_hidden_states: mx.array,
text_embeddings: mx.array,
) -> tuple[mx.array, mx.array]:
attn = self.block.attn
context_attn_output = attn_out[:, : self._text_seq_len, :]
hidden_attn_output = attn_out[:, self._text_seq_len :, :]
hidden_attn_output = attn.to_out[0](hidden_attn_output) # pyright: ignore[reportAny]
context_attn_output = attn.to_add_out(context_attn_output)
assert self._hidden_mod is not None
assert self._context_mod is not None
hidden_states = JointTransformerBlock.apply_norm_and_feed_forward(
hidden_states=hidden_states,
attn_output=hidden_attn_output, # pyright: ignore[reportAny]
gate_mlp=self._hidden_mod.gate_mlp,
gate_msa=self._hidden_mod.gate_msa,
scale_mlp=self._hidden_mod.scale_mlp,
shift_mlp=self._hidden_mod.shift_mlp,
norm_layer=self.block.norm2,
ff_layer=self.block.ff,
)
encoder_hidden_states = JointTransformerBlock.apply_norm_and_feed_forward(
hidden_states=encoder_hidden_states,
attn_output=context_attn_output,
gate_mlp=self._context_mod.gate_mlp,
gate_msa=self._context_mod.gate_msa,
scale_mlp=self._context_mod.scale_mlp,
shift_mlp=self._context_mod.shift_mlp,
norm_layer=self.block.norm2_context,
ff_layer=self.block.ff_context,
)
return encoder_hidden_states, hidden_states
class FluxSingleBlockWrapper(SingleBlockWrapper[SingleTransformerBlock]):
"""Flux-specific single block wrapper with pipefusion support."""
def __init__(self, block: SingleTransformerBlock, text_seq_len: int):
super().__init__(block, text_seq_len)
self._num_heads = block.attn.num_heads
self._head_dim = block.attn.head_dimension
# Intermediate state stored between _compute_qkv and _apply_output
self._norm_state: FluxNormGateState | None = None
def _compute_qkv(
self,
hidden_states: mx.array,
text_embeddings: mx.array,
rotary_embeddings: RotaryEmbeddings,
patch_mode: bool = False,
) -> tuple[mx.array, mx.array, mx.array]:
assert isinstance(rotary_embeddings, mx.array)
attn = self.block.attn
norm_hidden, gate = self.block.norm(
hidden_states=hidden_states,
text_embeddings=text_embeddings,
)
self._norm_state = FluxNormGateState(norm_hidden=norm_hidden, gate=gate)
query, key, value = AttentionUtils.process_qkv(
hidden_states=norm_hidden,
to_q=attn.to_q,
to_k=attn.to_k,
to_v=attn.to_v,
norm_q=attn.norm_q,
norm_k=attn.norm_k,
num_heads=self._num_heads,
head_dim=self._head_dim,
)
if patch_mode:
text_rope = rotary_embeddings[:, :, : self._text_seq_len, ...]
patch_img_rope = rotary_embeddings[
:,
:,
self._text_seq_len + self._patch_start : self._text_seq_len
+ self._patch_end,
...,
]
rope = mx.concatenate([text_rope, patch_img_rope], axis=2)
else:
rope = rotary_embeddings
query, key = AttentionUtils.apply_rope(xq=query, xk=key, freqs_cis=rope)
return query, key, value
def _compute_attention(
self, query: mx.array, key: mx.array, value: mx.array
) -> mx.array:
batch_size = query.shape[0]
return AttentionUtils.compute_attention(
query=query,
key=key,
value=value,
batch_size=batch_size,
num_heads=self._num_heads,
head_dim=self._head_dim,
)
def _apply_output(
self,
attn_out: mx.array,
hidden_states: mx.array,
text_embeddings: mx.array,
) -> mx.array:
residual = hidden_states
assert self._norm_state is not None
output = self.block._apply_feed_forward_and_projection(
norm_hidden_states=self._norm_state.norm_hidden,
attn_output=attn_out,
gate=self._norm_state.gate,
)
return residual + output
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/worker/engines/image/models/flux/wrappers.py",
"license": "Apache License 2.0",
"lines": 235,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
exo-explore/exo:src/exo/worker/engines/image/models/qwen/adapter.py | from pathlib import Path
from typing import Any
import mlx.core as mx
from mflux.models.common.config import ModelConfig
from mflux.models.common.config.config import Config
from mflux.models.qwen.latent_creator.qwen_latent_creator import QwenLatentCreator
from mflux.models.qwen.model.qwen_text_encoder.qwen_prompt_encoder import (
QwenPromptEncoder,
)
from mflux.models.qwen.model.qwen_transformer.qwen_transformer import QwenTransformer
from mflux.models.qwen.variants.txt2img.qwen_image import QwenImage
from exo.worker.engines.image.config import ImageModelConfig
from exo.worker.engines.image.models.base import (
ModelAdapter,
PromptData,
RotaryEmbeddings,
)
from exo.worker.engines.image.models.qwen.wrappers import QwenJointBlockWrapper
from exo.worker.engines.image.pipeline.block_wrapper import (
JointBlockWrapper,
SingleBlockWrapper,
)
class QwenPromptData(PromptData):
def __init__(
self,
prompt_embeds: mx.array,
prompt_mask: mx.array,
negative_prompt_embeds: mx.array,
negative_prompt_mask: mx.array,
):
self._prompt_embeds = prompt_embeds
self._prompt_mask = prompt_mask
self._negative_prompt_embeds = negative_prompt_embeds
self._negative_prompt_mask = negative_prompt_mask
@property
def prompt_embeds(self) -> mx.array:
return self._prompt_embeds
@property
def pooled_prompt_embeds(self) -> mx.array:
return self._prompt_embeds
@property
def negative_prompt_embeds(self) -> mx.array:
return self._negative_prompt_embeds
@property
def negative_pooled_prompt_embeds(self) -> mx.array:
return self._negative_prompt_embeds
def get_encoder_hidden_states_mask(self, positive: bool = True) -> mx.array:
if positive:
return self._prompt_mask
else:
return self._negative_prompt_mask
@property
def cond_image_grid(
self,
) -> tuple[int, int, int] | list[tuple[int, int, int]] | None:
return None
@property
def conditioning_latents(self) -> mx.array | None:
return None
@property
def kontext_image_ids(self) -> mx.array | None:
return None
def get_batched_cfg_data(
self,
) -> tuple[mx.array, mx.array, mx.array | None, mx.array | None] | None:
"""Batch positive and negative embeddings for CFG with batch_size=2.
Pads shorter sequence to max length using zeros for embeddings
and zeros (masked) for attention mask.
Returns:
Tuple of (batched_embeds, batched_mask, None, conditioning_latents)
- batched_embeds: [2, max_seq, hidden]
- batched_mask: [2, max_seq]
- None for pooled (Qwen doesn't use it)
- conditioning_latents: [2, latent_seq, latent_dim] or None
"""
pos_embeds = self._prompt_embeds
neg_embeds = self._negative_prompt_embeds
pos_mask = self._prompt_mask
neg_mask = self._negative_prompt_mask
pos_seq_len = pos_embeds.shape[1]
neg_seq_len = neg_embeds.shape[1]
max_seq_len = max(pos_seq_len, neg_seq_len)
hidden_dim = pos_embeds.shape[2]
if pos_seq_len < max_seq_len:
pad_len = max_seq_len - pos_seq_len
pos_embeds = mx.concatenate(
[
pos_embeds,
mx.zeros((1, pad_len, hidden_dim), dtype=pos_embeds.dtype),
],
axis=1,
)
pos_mask = mx.concatenate(
[pos_mask, mx.zeros((1, pad_len), dtype=pos_mask.dtype)],
axis=1,
)
elif neg_seq_len < max_seq_len:
pad_len = max_seq_len - neg_seq_len
neg_embeds = mx.concatenate(
[
neg_embeds,
mx.zeros((1, pad_len, hidden_dim), dtype=neg_embeds.dtype),
],
axis=1,
)
neg_mask = mx.concatenate(
[neg_mask, mx.zeros((1, pad_len), dtype=neg_mask.dtype)],
axis=1,
)
batched_embeds = mx.concatenate([pos_embeds, neg_embeds], axis=0)
batched_mask = mx.concatenate([pos_mask, neg_mask], axis=0)
# TODO(ciaran): currently None but maybe we will deduplicate with edit
# adapter
cond_latents = self.conditioning_latents
if cond_latents is not None:
cond_latents = mx.concatenate([cond_latents, cond_latents], axis=0)
return batched_embeds, batched_mask, None, cond_latents
def get_cfg_branch_data(
self, positive: bool
) -> tuple[mx.array, mx.array | None, mx.array | None, mx.array | None]:
if positive:
return (
self._prompt_embeds,
self._prompt_mask,
None,
self.conditioning_latents,
)
else:
return (
self._negative_prompt_embeds,
self._negative_prompt_mask,
None,
self.conditioning_latents,
)
class QwenModelAdapter(ModelAdapter[QwenImage, QwenTransformer]):
"""Adapter for Qwen-Image model.
Key differences from Flux:
- Single text encoder (vs dual T5+CLIP)
- 60 joint-style blocks, no single blocks
- 3D RoPE returning ((img_cos, img_sin), (txt_cos, txt_sin))
- Norm-preserving CFG with negative prompts
- Uses attention mask for variable-length text
"""
def __init__(
self,
config: ImageModelConfig,
model_id: str,
local_path: Path,
quantize: int | None = None,
):
self._config = config
self._model = QwenImage(
model_config=ModelConfig.from_name(model_name=model_id, base_model=None),
model_path=str(local_path),
quantize=quantize,
)
self._transformer = self._model.transformer
@property
def hidden_dim(self) -> int:
return self._transformer.inner_dim
@property
def needs_cfg(self) -> bool:
gs = self._config.guidance_scale
return gs is not None and gs > 1.0
def _get_latent_creator(self) -> type:
return QwenLatentCreator
def get_joint_block_wrappers(
self,
text_seq_len: int,
encoder_hidden_states_mask: mx.array | None = None,
) -> list[JointBlockWrapper[Any]]:
"""Create wrapped joint blocks for Qwen."""
return [
QwenJointBlockWrapper(block, text_seq_len, encoder_hidden_states_mask)
for block in self._transformer.transformer_blocks
]
def get_single_block_wrappers(
self,
text_seq_len: int,
) -> list[SingleBlockWrapper[Any]]:
return []
def slice_transformer_blocks(
self,
start_layer: int,
end_layer: int,
):
self._transformer.transformer_blocks = self._transformer.transformer_blocks[
start_layer:end_layer
]
def encode_prompt(
self, prompt: str, negative_prompt: str | None = None
) -> QwenPromptData:
assert isinstance(self.model.prompt_cache, dict)
assert isinstance(self.model.tokenizers, dict)
if negative_prompt is None or negative_prompt == "":
negative_prompt = " "
prompt_embeds, prompt_mask, neg_embeds, neg_mask = (
QwenPromptEncoder.encode_prompt(
prompt=prompt,
negative_prompt=negative_prompt,
prompt_cache=self.model.prompt_cache,
qwen_tokenizer=self.model.tokenizers["qwen"], # pyright: ignore[reportAny]
qwen_text_encoder=self.model.text_encoder,
)
)
return QwenPromptData(
prompt_embeds=prompt_embeds,
prompt_mask=prompt_mask,
negative_prompt_embeds=neg_embeds,
negative_prompt_mask=neg_mask,
)
def compute_embeddings(
self,
hidden_states: mx.array,
prompt_embeds: mx.array,
) -> tuple[mx.array, mx.array]:
embedded_hidden = self._transformer.img_in(hidden_states)
encoder_hidden_states = self._transformer.txt_norm(prompt_embeds)
embedded_encoder = self._transformer.txt_in(encoder_hidden_states)
return embedded_hidden, embedded_encoder
def compute_text_embeddings(
self,
t: int,
runtime_config: Config,
pooled_prompt_embeds: mx.array | None = None,
hidden_states: mx.array | None = None,
) -> mx.array:
# Use hidden_states if provided, otherwise fall back to pooled_prompt_embeds
# (which for Qwen is the same as prompt_embeds)
ref_tensor = (
hidden_states if hidden_states is not None else pooled_prompt_embeds
)
if ref_tensor is None:
raise ValueError(
"Either hidden_states or pooled_prompt_embeds is required "
"for Qwen text embeddings"
)
timestep = QwenTransformer._compute_timestep(t, runtime_config) # noqa: SLF001
batch_size = ref_tensor.shape[0]
timestep = mx.broadcast_to(timestep, (batch_size,)).astype(mx.float32)
return self._transformer.time_text_embed(timestep, ref_tensor) # pyright: ignore[reportAny]
def compute_rotary_embeddings(
self,
prompt_embeds: mx.array,
runtime_config: Config,
encoder_hidden_states_mask: mx.array | None = None,
cond_image_grid: tuple[int, int, int]
| list[tuple[int, int, int]]
| None = None,
kontext_image_ids: mx.array | None = None,
) -> RotaryEmbeddings:
if encoder_hidden_states_mask is None:
raise ValueError(
"encoder_hidden_states_mask is required for Qwen RoPE computation"
)
return QwenTransformer._compute_rotary_embeddings(
encoder_hidden_states_mask=encoder_hidden_states_mask,
pos_embed=self._transformer.pos_embed, # pyright: ignore[reportAny]
config=runtime_config,
cond_image_grid=cond_image_grid,
)
def apply_guidance(
self,
noise_positive: mx.array,
noise_negative: mx.array,
guidance_scale: float,
) -> mx.array:
return self._model.compute_guided_noise(
noise=noise_positive,
noise_negative=noise_negative,
guidance=guidance_scale,
)
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/worker/engines/image/models/qwen/adapter.py",
"license": "Apache License 2.0",
"lines": 272,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
exo-explore/exo:src/exo/worker/engines/image/models/qwen/config.py | from exo.worker.engines.image.config import (
BlockType,
ImageModelConfig,
TransformerBlockConfig,
)
QWEN_IMAGE_CONFIG = ImageModelConfig(
model_family="qwen",
block_configs=(
TransformerBlockConfig(
block_type=BlockType.JOINT, count=60, has_separate_text_output=True
),
),
default_steps={"low": 10, "medium": 25, "high": 50},
num_sync_steps=7,
guidance_scale=3.5, # Set to None or < 1.0 to disable CFG
)
QWEN_IMAGE_EDIT_CONFIG = ImageModelConfig(
model_family="qwen-edit",
block_configs=(
TransformerBlockConfig(
block_type=BlockType.JOINT, count=60, has_separate_text_output=True
),
),
default_steps={"low": 10, "medium": 25, "high": 50},
num_sync_steps=7,
guidance_scale=3.5,
)
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/worker/engines/image/models/qwen/config.py",
"license": "Apache License 2.0",
"lines": 27,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
exo-explore/exo:src/exo/worker/engines/image/models/qwen/edit_adapter.py | import math
from dataclasses import dataclass
from pathlib import Path
from typing import Any
import mlx.core as mx
from mflux.models.common.config.config import Config
from mflux.models.qwen.latent_creator.qwen_latent_creator import QwenLatentCreator
from mflux.models.qwen.model.qwen_transformer.qwen_transformer import QwenTransformer
from mflux.models.qwen.variants.edit.qwen_edit_util import QwenEditUtil
from mflux.models.qwen.variants.edit.qwen_image_edit import QwenImageEdit
from exo.worker.engines.image.config import ImageModelConfig
from exo.worker.engines.image.models.base import (
ModelAdapter,
PromptData,
RotaryEmbeddings,
)
from exo.worker.engines.image.models.qwen.wrappers import QwenJointBlockWrapper
from exo.worker.engines.image.pipeline.block_wrapper import (
JointBlockWrapper,
SingleBlockWrapper,
)
@dataclass(frozen=True)
class EditImageDimensions:
vl_width: int
vl_height: int
vae_width: int
vae_height: int
image_paths: list[str]
class QwenEditPromptData(PromptData):
def __init__(
self,
prompt_embeds: mx.array,
prompt_mask: mx.array,
negative_prompt_embeds: mx.array,
negative_prompt_mask: mx.array,
conditioning_latents: mx.array,
qwen_image_ids: mx.array,
cond_image_grid: tuple[int, int, int] | list[tuple[int, int, int]],
):
self._prompt_embeds = prompt_embeds
self._prompt_mask = prompt_mask
self._negative_prompt_embeds = negative_prompt_embeds
self._negative_prompt_mask = negative_prompt_mask
self._conditioning_latents = conditioning_latents
self._qwen_image_ids = qwen_image_ids
self._cond_image_grid = cond_image_grid
@property
def prompt_embeds(self) -> mx.array:
return self._prompt_embeds
@property
def pooled_prompt_embeds(self) -> mx.array:
return self._prompt_embeds
@property
def negative_prompt_embeds(self) -> mx.array:
return self._negative_prompt_embeds
@property
def negative_pooled_prompt_embeds(self) -> mx.array:
return self._negative_prompt_embeds
def get_encoder_hidden_states_mask(self, positive: bool = True) -> mx.array:
if positive:
return self._prompt_mask
else:
return self._negative_prompt_mask
@property
def cond_image_grid(self) -> tuple[int, int, int] | list[tuple[int, int, int]]:
return self._cond_image_grid
@property
def conditioning_latents(self) -> mx.array:
return self._conditioning_latents
@property
def qwen_image_ids(self) -> mx.array:
return self._qwen_image_ids
@property
def kontext_image_ids(self) -> mx.array | None:
return None
@property
def is_edit_mode(self) -> bool:
return True
def get_batched_cfg_data(
self,
) -> tuple[mx.array, mx.array, mx.array | None, mx.array | None] | None:
"""Batch positive and negative embeddings for CFG with batch_size=2.
Pads shorter sequence to max length using zeros for embeddings
and zeros (masked) for attention mask. Duplicates conditioning
latents for both positive and negative passes.
Returns:
Tuple of (batched_embeds, batched_mask, None, batched_cond_latents)
- batched_embeds: [2, max_seq, hidden]
- batched_mask: [2, max_seq]
- None for pooled (Qwen doesn't use it)
- batched_cond_latents: [2, latent_seq, latent_dim]
TODO(ciaran): type this
"""
pos_embeds = self._prompt_embeds
neg_embeds = self._negative_prompt_embeds
pos_mask = self._prompt_mask
neg_mask = self._negative_prompt_mask
pos_seq_len = pos_embeds.shape[1]
neg_seq_len = neg_embeds.shape[1]
max_seq_len = max(pos_seq_len, neg_seq_len)
hidden_dim = pos_embeds.shape[2]
if pos_seq_len < max_seq_len:
pad_len = max_seq_len - pos_seq_len
pos_embeds = mx.concatenate(
[
pos_embeds,
mx.zeros((1, pad_len, hidden_dim), dtype=pos_embeds.dtype),
],
axis=1,
)
pos_mask = mx.concatenate(
[pos_mask, mx.zeros((1, pad_len), dtype=pos_mask.dtype)],
axis=1,
)
if neg_seq_len < max_seq_len:
pad_len = max_seq_len - neg_seq_len
neg_embeds = mx.concatenate(
[
neg_embeds,
mx.zeros((1, pad_len, hidden_dim), dtype=neg_embeds.dtype),
],
axis=1,
)
neg_mask = mx.concatenate(
[neg_mask, mx.zeros((1, pad_len), dtype=neg_mask.dtype)],
axis=1,
)
batched_embeds = mx.concatenate([pos_embeds, neg_embeds], axis=0)
batched_mask = mx.concatenate([pos_mask, neg_mask], axis=0)
batched_cond_latents = mx.concatenate(
[self._conditioning_latents, self._conditioning_latents], axis=0
)
return batched_embeds, batched_mask, None, batched_cond_latents
def get_cfg_branch_data(
self, positive: bool
) -> tuple[mx.array, mx.array | None, mx.array | None, mx.array | None]:
if positive:
return (
self._prompt_embeds,
self._prompt_mask,
None,
self._conditioning_latents,
)
else:
return (
self._negative_prompt_embeds,
self._negative_prompt_mask,
None,
self._conditioning_latents,
)
class QwenEditModelAdapter(ModelAdapter[QwenImageEdit, QwenTransformer]):
"""Adapter for Qwen-Image-Edit model.
Key differences from standard QwenModelAdapter:
- Uses QwenImageEdit model with vision-language components
- Encodes prompts WITH input images via VL tokenizer/encoder
- Creates conditioning latents from input images
- Supports image editing with concatenated latents during diffusion
"""
def __init__(
self,
config: ImageModelConfig,
model_id: str,
local_path: Path,
quantize: int | None = None,
):
self._config = config
self._model = QwenImageEdit(
quantize=quantize,
model_path=str(local_path),
)
self._transformer = self._model.transformer
self._edit_dimensions: EditImageDimensions | None = None
@property
def config(self) -> ImageModelConfig:
return self._config
@property
def model(self) -> QwenImageEdit:
return self._model
@property
def transformer(self) -> QwenTransformer:
return self._transformer
@property
def hidden_dim(self) -> int:
return self._transformer.inner_dim
@property
def needs_cfg(self) -> bool:
gs = self._config.guidance_scale
return gs is not None and gs > 1.0
def _get_latent_creator(self) -> type[QwenLatentCreator]:
return QwenLatentCreator
def get_joint_block_wrappers(
self,
text_seq_len: int,
encoder_hidden_states_mask: mx.array | None = None,
) -> list[JointBlockWrapper[Any]]:
"""Create wrapped joint blocks for Qwen Edit."""
return [
QwenJointBlockWrapper(block, text_seq_len, encoder_hidden_states_mask)
for block in self._transformer.transformer_blocks
]
def get_single_block_wrappers(
self,
text_seq_len: int,
) -> list[SingleBlockWrapper[Any]]:
"""Qwen has no single blocks."""
return []
def slice_transformer_blocks(
self,
start_layer: int,
end_layer: int,
):
self._transformer.transformer_blocks = self._transformer.transformer_blocks[
start_layer:end_layer
]
def set_image_dimensions(self, image_path: Path) -> tuple[int, int]:
"""Compute and store dimensions from input image.
Also stores image_paths for use in encode_prompt().
Returns:
(output_width, output_height) for runtime config
"""
vl_w, vl_h, vae_w, vae_h, out_w, out_h = self._compute_dimensions_from_image(
image_path
)
self._edit_dimensions = EditImageDimensions(
vl_width=vl_w,
vl_height=vl_h,
vae_width=vae_w,
vae_height=vae_h,
image_paths=[str(image_path)],
)
return out_w, out_h
def create_latents(self, seed: int, runtime_config: Config) -> mx.array:
"""Create initial noise latents (pure noise for edit mode)."""
return QwenLatentCreator.create_noise(
seed=seed,
height=runtime_config.height,
width=runtime_config.width,
)
def encode_prompt(
self, prompt: str, negative_prompt: str | None = None
) -> QwenEditPromptData:
dims = self._edit_dimensions
if dims is None:
raise RuntimeError(
"set_image_dimensions() must be called before encode_prompt() "
"for QwenEditModelAdapter"
)
if negative_prompt is None or negative_prompt == "":
negative_prompt = " "
# TODO(ciaran): config is untyped and unused, unsure if Config or RuntimeConfig is intended
(
prompt_embeds,
prompt_mask,
negative_prompt_embeds,
negative_prompt_mask,
) = self._model._encode_prompts_with_images(
prompt,
negative_prompt,
dims.image_paths,
self._config, # pyright: ignore[reportArgumentType]
dims.vl_width,
dims.vl_height,
)
(
conditioning_latents,
qwen_image_ids,
cond_h_patches,
cond_w_patches,
num_images,
) = QwenEditUtil.create_image_conditioning_latents( # pyright: ignore[reportUnknownMemberType]
vae=self._model.vae,
height=dims.vae_height,
width=dims.vae_width,
image_paths=dims.image_paths,
vl_width=dims.vl_width,
vl_height=dims.vl_height,
)
if num_images > 1:
cond_image_grid: tuple[int, int, int] | list[tuple[int, int, int]] = [
(1, cond_h_patches, cond_w_patches) for _ in range(num_images)
]
else:
cond_image_grid = (1, cond_h_patches, cond_w_patches)
return QwenEditPromptData(
prompt_embeds=prompt_embeds,
prompt_mask=prompt_mask,
negative_prompt_embeds=negative_prompt_embeds,
negative_prompt_mask=negative_prompt_mask,
conditioning_latents=conditioning_latents,
qwen_image_ids=qwen_image_ids,
cond_image_grid=cond_image_grid,
)
def compute_embeddings(
self,
hidden_states: mx.array,
prompt_embeds: mx.array,
) -> tuple[mx.array, mx.array]:
embedded_hidden = self._transformer.img_in(hidden_states)
encoder_hidden_states = self._transformer.txt_norm(prompt_embeds)
embedded_encoder = self._transformer.txt_in(encoder_hidden_states)
return embedded_hidden, embedded_encoder
def compute_text_embeddings(
self,
t: int,
runtime_config: Config,
pooled_prompt_embeds: mx.array | None = None,
hidden_states: mx.array | None = None,
) -> mx.array:
ref_tensor = (
hidden_states if hidden_states is not None else pooled_prompt_embeds
)
if ref_tensor is None:
raise ValueError(
"Either hidden_states or pooled_prompt_embeds is required "
"for Qwen text embeddings"
)
timestep = QwenTransformer._compute_timestep(t, runtime_config) # noqa: SLF001
batch_size = ref_tensor.shape[0]
timestep = mx.broadcast_to(timestep, (batch_size,)).astype(mx.float32)
return self._transformer.time_text_embed(timestep, ref_tensor) # pyright: ignore[reportAny]
def compute_rotary_embeddings(
self,
prompt_embeds: mx.array,
runtime_config: Config,
encoder_hidden_states_mask: mx.array | None = None,
cond_image_grid: tuple[int, int, int]
| list[tuple[int, int, int]]
| None = None,
kontext_image_ids: mx.array | None = None,
) -> RotaryEmbeddings:
if encoder_hidden_states_mask is None:
raise ValueError(
"encoder_hidden_states_mask is required for Qwen RoPE computation"
)
return QwenTransformer._compute_rotary_embeddings(
encoder_hidden_states_mask=encoder_hidden_states_mask,
pos_embed=self._transformer.pos_embed, # pyright: ignore[reportAny]
config=runtime_config,
cond_image_grid=cond_image_grid,
)
def apply_guidance(
self,
noise_positive: mx.array,
noise_negative: mx.array,
guidance_scale: float,
) -> mx.array:
from mflux.models.qwen.variants.txt2img.qwen_image import QwenImage
return QwenImage.compute_guided_noise(
noise=noise_positive,
noise_negative=noise_negative,
guidance=guidance_scale,
)
def _compute_dimensions_from_image(
self, image_path: Path
) -> tuple[int, int, int, int, int, int]:
from mflux.utils.image_util import ImageUtil
pil_image = ImageUtil.load_image(str(image_path)).convert("RGB")
image_size = pil_image.size
# Vision-language dimensions (384x384 target area)
condition_image_size = 384 * 384
condition_ratio = image_size[0] / image_size[1]
vl_width = math.sqrt(condition_image_size * condition_ratio)
vl_height = vl_width / condition_ratio
vl_width = round(vl_width / 32) * 32
vl_height = round(vl_height / 32) * 32
# VAE dimensions (1024x1024 target area)
vae_image_size = 1024 * 1024
vae_ratio = image_size[0] / image_size[1]
vae_width = math.sqrt(vae_image_size * vae_ratio)
vae_height = vae_width / vae_ratio
vae_width = round(vae_width / 32) * 32
vae_height = round(vae_height / 32) * 32
# Output dimensions from input image aspect ratio
target_area = 1024 * 1024
ratio = image_size[0] / image_size[1]
output_width = math.sqrt(target_area * ratio)
output_height = output_width / ratio
output_width = round(output_width / 32) * 32
output_height = round(output_height / 32) * 32
# Ensure multiple of 16 for VAE
vae_scale_factor = 8
multiple_of = vae_scale_factor * 2
output_width = output_width // multiple_of * multiple_of
output_height = output_height // multiple_of * multiple_of
return (
int(vl_width),
int(vl_height),
int(vae_width),
int(vae_height),
int(output_width),
int(output_height),
)
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/worker/engines/image/models/qwen/edit_adapter.py",
"license": "Apache License 2.0",
"lines": 392,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
exo-explore/exo:src/exo/worker/engines/image/models/qwen/wrappers.py | from typing import final
import mlx.core as mx
from mflux.models.qwen.model.qwen_transformer.qwen_attention import QwenAttention
from mflux.models.qwen.model.qwen_transformer.qwen_transformer_block import (
QwenTransformerBlock,
)
from pydantic import BaseModel, ConfigDict
from exo.worker.engines.image.models.base import RotaryEmbeddings
from exo.worker.engines.image.pipeline.block_wrapper import JointBlockWrapper
@final
class QwenStreamModulation(BaseModel):
model_config = ConfigDict(frozen=True, strict=True, arbitrary_types_allowed=True)
mod1: mx.array
mod2: mx.array
gate1: mx.array
class QwenJointBlockWrapper(JointBlockWrapper[QwenTransformerBlock]):
def __init__(
self,
block: QwenTransformerBlock,
text_seq_len: int,
encoder_hidden_states_mask: mx.array | None = None,
):
super().__init__(block, text_seq_len)
self._encoder_hidden_states_mask = encoder_hidden_states_mask
self._num_heads = block.attn.num_heads
self._head_dim = block.attn.head_dim
# Intermediate state stored between _compute_qkv and _apply_output
self._img_mod: QwenStreamModulation | None = None
self._txt_mod: QwenStreamModulation | None = None
def set_encoder_mask(self, mask: mx.array | None) -> None:
"""Set the encoder hidden states mask for attention."""
self._encoder_hidden_states_mask = mask
def _compute_qkv(
self,
hidden_states: mx.array,
encoder_hidden_states: mx.array,
text_embeddings: mx.array,
rotary_embeddings: RotaryEmbeddings,
patch_mode: bool = False,
) -> tuple[mx.array, mx.array, mx.array]:
assert isinstance(rotary_embeddings, tuple)
batch_size = hidden_states.shape[0]
img_seq_len = hidden_states.shape[1]
attn = self.block.attn
img_mod_params = self.block.img_mod_linear(
self.block.img_mod_silu(text_embeddings) # pyright: ignore[reportUnknownArgumentType]
)
txt_mod_params = self.block.txt_mod_linear(
self.block.txt_mod_silu(text_embeddings) # pyright: ignore[reportUnknownArgumentType]
)
img_mod1, img_mod2 = mx.split(img_mod_params, 2, axis=-1)
txt_mod1, txt_mod2 = mx.split(txt_mod_params, 2, axis=-1)
img_normed = self.block.img_norm1(hidden_states)
img_modulated, img_gate1 = QwenTransformerBlock._modulate(img_normed, img_mod1)
self._img_mod = QwenStreamModulation(
mod1=img_mod1, mod2=img_mod2, gate1=img_gate1
)
txt_normed = self.block.txt_norm1(encoder_hidden_states)
txt_modulated, txt_gate1 = QwenTransformerBlock._modulate(txt_normed, txt_mod1)
self._txt_mod = QwenStreamModulation(
mod1=txt_mod1, mod2=txt_mod2, gate1=txt_gate1
)
img_query = attn.to_q(img_modulated)
img_key = attn.to_k(img_modulated)
img_value = attn.to_v(img_modulated)
txt_query = attn.add_q_proj(txt_modulated)
txt_key = attn.add_k_proj(txt_modulated)
txt_value = attn.add_v_proj(txt_modulated)
img_query = mx.reshape(
img_query, (batch_size, img_seq_len, self._num_heads, self._head_dim)
)
img_key = mx.reshape(
img_key, (batch_size, img_seq_len, self._num_heads, self._head_dim)
)
img_value = mx.reshape(
img_value, (batch_size, img_seq_len, self._num_heads, self._head_dim)
)
txt_query = mx.reshape(
txt_query,
(batch_size, self._text_seq_len, self._num_heads, self._head_dim),
)
txt_key = mx.reshape(
txt_key, (batch_size, self._text_seq_len, self._num_heads, self._head_dim)
)
txt_value = mx.reshape(
txt_value, (batch_size, self._text_seq_len, self._num_heads, self._head_dim)
)
img_query = attn.norm_q(img_query)
img_key = attn.norm_k(img_key)
txt_query = attn.norm_added_q(txt_query)
txt_key = attn.norm_added_k(txt_key)
(img_cos, img_sin), (txt_cos, txt_sin) = rotary_embeddings
if patch_mode:
# Slice image RoPE for patch, keep full text RoPE
img_cos = img_cos[self._patch_start : self._patch_end]
img_sin = img_sin[self._patch_start : self._patch_end]
img_query = QwenAttention._apply_rope_qwen(img_query, img_cos, img_sin)
img_key = QwenAttention._apply_rope_qwen(img_key, img_cos, img_sin)
txt_query = QwenAttention._apply_rope_qwen(txt_query, txt_cos, txt_sin)
txt_key = QwenAttention._apply_rope_qwen(txt_key, txt_cos, txt_sin)
img_query = mx.transpose(img_query, (0, 2, 1, 3))
img_key = mx.transpose(img_key, (0, 2, 1, 3))
img_value = mx.transpose(img_value, (0, 2, 1, 3))
txt_query = mx.transpose(txt_query, (0, 2, 1, 3))
txt_key = mx.transpose(txt_key, (0, 2, 1, 3))
txt_value = mx.transpose(txt_value, (0, 2, 1, 3))
query = mx.concatenate([txt_query, img_query], axis=2)
key = mx.concatenate([txt_key, img_key], axis=2)
value = mx.concatenate([txt_value, img_value], axis=2)
return query, key, value
def _compute_attention(
self, query: mx.array, key: mx.array, value: mx.array
) -> mx.array:
attn = self.block.attn
mask = QwenAttention._convert_mask_for_qwen(
mask=self._encoder_hidden_states_mask,
joint_seq_len=key.shape[2],
txt_seq_len=self._text_seq_len,
)
query_bshd = mx.transpose(query, (0, 2, 1, 3))
key_bshd = mx.transpose(key, (0, 2, 1, 3))
value_bshd = mx.transpose(value, (0, 2, 1, 3))
return attn._compute_attention_qwen(
query=query_bshd,
key=key_bshd,
value=value_bshd,
mask=mask,
block_idx=None,
)
def _apply_output(
self,
attn_out: mx.array,
hidden_states: mx.array,
encoder_hidden_states: mx.array,
text_embeddings: mx.array,
) -> tuple[mx.array, mx.array]:
attn = self.block.attn
assert self._img_mod is not None
assert self._txt_mod is not None
txt_attn_output = attn_out[:, : self._text_seq_len, :]
img_attn_output = attn_out[:, self._text_seq_len :, :]
img_attn_output = attn.attn_to_out[0](img_attn_output) # pyright: ignore[reportAny]
txt_attn_output = attn.to_add_out(txt_attn_output)
hidden_states = hidden_states + self._img_mod.gate1 * img_attn_output # pyright: ignore[reportAny]
encoder_hidden_states = (
encoder_hidden_states + self._txt_mod.gate1 * txt_attn_output
)
img_normed2 = self.block.img_norm2(hidden_states)
img_modulated2, img_gate2 = QwenTransformerBlock._modulate(
img_normed2, self._img_mod.mod2
)
img_mlp_output = self.block.img_ff(img_modulated2) # pyright: ignore[reportAny]
hidden_states = hidden_states + img_gate2 * img_mlp_output # pyright: ignore[reportAny]
txt_normed2 = self.block.txt_norm2(encoder_hidden_states)
txt_modulated2, txt_gate2 = QwenTransformerBlock._modulate(
txt_normed2, self._txt_mod.mod2
)
txt_mlp_output = self.block.txt_ff(txt_modulated2) # pyright: ignore[reportAny]
encoder_hidden_states = encoder_hidden_states + txt_gate2 * txt_mlp_output # pyright: ignore[reportAny]
return encoder_hidden_states, hidden_states
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/worker/engines/image/models/qwen/wrappers.py",
"license": "Apache License 2.0",
"lines": 160,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
exo-explore/exo:src/exo/worker/engines/image/pipeline/block_wrapper.py | from abc import ABC, abstractmethod
from enum import Enum
from typing import Generic, Self, TypeVar
import mlx.core as mx
from exo.worker.engines.image.models.base import RotaryEmbeddings
from exo.worker.engines.image.pipeline.kv_cache import ImagePatchKVCache
BlockT = TypeVar("BlockT")
class BlockWrapperMode(Enum):
CACHING = "caching" # Sync mode: compute full attention, populate cache
PATCHED = "patched" # Async mode: compute patch attention, use cached KV
class BlockWrapperMixin:
"""Common cache management logic for block wrappers.
Including:
- KV cache creation and management
- Mode
- Patch range setting
"""
_text_seq_len: int
_kv_cache: ImagePatchKVCache | None
_mode: BlockWrapperMode
_patch_start: int
_patch_end: int
def _init_cache_state(self, text_seq_len: int) -> None:
self._text_seq_len = text_seq_len
self._kv_cache = None
self._mode = BlockWrapperMode.CACHING
self._patch_start = 0
self._patch_end = 0
def set_patch(
self,
mode: BlockWrapperMode,
patch_start: int = 0,
patch_end: int = 0,
) -> Self:
"""Set mode and patch range.
Args:
mode: CACHING (full attention) or PATCHED (use cached KV)
patch_start: Start token index within image (for PATCHED mode)
patch_end: End token index within image (for PATCHED mode)
Returns:
Self for method chaining
"""
self._mode = mode
self._patch_start = patch_start
self._patch_end = patch_end
return self
def set_text_seq_len(self, text_seq_len: int) -> None:
self._text_seq_len = text_seq_len
def _get_active_cache(self) -> ImagePatchKVCache | None:
return self._kv_cache
def _ensure_cache(self, img_key: mx.array) -> None:
if self._kv_cache is None:
batch, num_heads, img_seq_len, head_dim = img_key.shape
self._kv_cache = ImagePatchKVCache(
batch_size=batch,
num_heads=num_heads,
image_seq_len=img_seq_len,
head_dim=head_dim,
)
def _cache_full_image_kv(self, img_key: mx.array, img_value: mx.array) -> None:
self._ensure_cache(img_key)
cache = self._get_active_cache()
assert cache is not None
cache.update_image_patch(0, img_key.shape[2], img_key, img_value)
def _cache_patch_kv(self, img_key: mx.array, img_value: mx.array) -> None:
cache = self._get_active_cache()
assert cache is not None
cache.update_image_patch(self._patch_start, self._patch_end, img_key, img_value)
def _get_full_kv(
self, text_key: mx.array, text_value: mx.array
) -> tuple[mx.array, mx.array]:
cache = self._get_active_cache()
assert cache is not None
return cache.get_full_kv(text_key, text_value)
def reset_cache(self) -> None:
self._kv_cache = None
class JointBlockWrapper(BlockWrapperMixin, ABC, Generic[BlockT]):
"""Base class for joint transformer block wrappers with pipefusion support.
The wrapper:
- Owns its KV cache (created lazily on first CACHING forward)
- Controls the forward pass flow (CACHING vs PATCHED mode)
- Handles patch slicing and cache operations
"""
block: BlockT
def __init__(self, block: BlockT, text_seq_len: int):
self.block = block
self._init_cache_state(text_seq_len)
def set_encoder_mask(self, mask: mx.array | None) -> None: # noqa: B027
"""Set the encoder hidden states mask for attention.
Override in subclasses that use attention masks
Default is a no-op for models that don't use masks
"""
del mask # Unused in base class
def __call__(
self,
hidden_states: mx.array,
encoder_hidden_states: mx.array,
text_embeddings: mx.array,
rotary_embeddings: RotaryEmbeddings,
) -> tuple[mx.array, mx.array]:
if self._mode == BlockWrapperMode.CACHING:
return self._forward_caching(
hidden_states, encoder_hidden_states, text_embeddings, rotary_embeddings
)
return self._forward_patched(
hidden_states, encoder_hidden_states, text_embeddings, rotary_embeddings
)
def _forward_caching(
self,
hidden_states: mx.array,
encoder_hidden_states: mx.array,
text_embeddings: mx.array,
rotary_embeddings: RotaryEmbeddings,
) -> tuple[mx.array, mx.array]:
"""CACHING mode: Full attention, store image K/V in cache."""
query, key, value = self._compute_qkv(
hidden_states, encoder_hidden_states, text_embeddings, rotary_embeddings
)
img_key = key[:, :, self._text_seq_len :, :]
img_value = value[:, :, self._text_seq_len :, :]
self._cache_full_image_kv(img_key, img_value)
attn_out = self._compute_attention(query, key, value)
return self._apply_output(
attn_out, hidden_states, encoder_hidden_states, text_embeddings
)
def _forward_patched(
self,
hidden_states: mx.array,
encoder_hidden_states: mx.array,
text_embeddings: mx.array,
rotary_embeddings: RotaryEmbeddings,
) -> tuple[mx.array, mx.array]:
# hidden_states is already the patch (provided by runner)
patch_hidden = hidden_states
query, key, value = self._compute_qkv(
patch_hidden,
encoder_hidden_states,
text_embeddings,
rotary_embeddings,
patch_mode=True,
)
text_key = key[:, :, : self._text_seq_len, :]
text_value = value[:, :, : self._text_seq_len, :]
img_key = key[:, :, self._text_seq_len :, :]
img_value = value[:, :, self._text_seq_len :, :]
self._cache_patch_kv(img_key, img_value)
full_key, full_value = self._get_full_kv(text_key, text_value)
attn_out = self._compute_attention(query, full_key, full_value)
return self._apply_output(
attn_out, patch_hidden, encoder_hidden_states, text_embeddings
)
@abstractmethod
def _compute_qkv(
self,
hidden_states: mx.array,
encoder_hidden_states: mx.array,
text_embeddings: mx.array,
rotary_embeddings: RotaryEmbeddings,
patch_mode: bool = False,
) -> tuple[mx.array, mx.array, mx.array]: ...
@abstractmethod
def _compute_attention(
self, query: mx.array, key: mx.array, value: mx.array
) -> mx.array: ...
@abstractmethod
def _apply_output(
self,
attn_out: mx.array,
hidden_states: mx.array,
encoder_hidden_states: mx.array,
text_embeddings: mx.array,
) -> tuple[mx.array, mx.array]: ...
class SingleBlockWrapper(BlockWrapperMixin, ABC, Generic[BlockT]):
"""Base class for single-stream transformer block wrappers.
Similar to JointBlockWrapper but for blocks that operate on a single
concatenated [text, image] stream rather than separate streams.
"""
block: BlockT
def __init__(self, block: BlockT, text_seq_len: int):
self.block = block
self._init_cache_state(text_seq_len)
def __call__(
self,
hidden_states: mx.array,
text_embeddings: mx.array,
rotary_embeddings: RotaryEmbeddings,
) -> mx.array:
if self._mode == BlockWrapperMode.CACHING:
return self._forward_caching(
hidden_states, text_embeddings, rotary_embeddings
)
return self._forward_patched(hidden_states, text_embeddings, rotary_embeddings)
def _forward_caching(
self,
hidden_states: mx.array,
text_embeddings: mx.array,
rotary_embeddings: RotaryEmbeddings,
) -> mx.array:
"""CACHING mode: Full attention, store image K/V in cache."""
query, key, value = self._compute_qkv(
hidden_states, text_embeddings, rotary_embeddings
)
img_key = key[:, :, self._text_seq_len :, :]
img_value = value[:, :, self._text_seq_len :, :]
self._cache_full_image_kv(img_key, img_value)
attn_out = self._compute_attention(query, key, value)
return self._apply_output(attn_out, hidden_states, text_embeddings)
def _forward_patched(
self,
hidden_states: mx.array,
text_embeddings: mx.array,
rotary_embeddings: RotaryEmbeddings,
) -> mx.array:
"""PATCHED mode: Compute patch Q/K/V, use cached image K/V for attention."""
query, key, value = self._compute_qkv(
hidden_states, text_embeddings, rotary_embeddings, patch_mode=True
)
text_key = key[:, :, : self._text_seq_len, :]
text_value = value[:, :, : self._text_seq_len, :]
img_key = key[:, :, self._text_seq_len :, :]
img_value = value[:, :, self._text_seq_len :, :]
self._cache_patch_kv(img_key, img_value)
full_key, full_value = self._get_full_kv(text_key, text_value)
attn_out = self._compute_attention(query, full_key, full_value)
return self._apply_output(attn_out, hidden_states, text_embeddings)
@abstractmethod
def _compute_qkv(
self,
hidden_states: mx.array,
text_embeddings: mx.array,
rotary_embeddings: RotaryEmbeddings,
patch_mode: bool = False,
) -> tuple[mx.array, mx.array, mx.array]: ...
@abstractmethod
def _compute_attention(
self, query: mx.array, key: mx.array, value: mx.array
) -> mx.array: ...
@abstractmethod
def _apply_output(
self,
attn_out: mx.array,
hidden_states: mx.array,
text_embeddings: mx.array,
) -> mx.array: ...
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/worker/engines/image/pipeline/block_wrapper.py",
"license": "Apache License 2.0",
"lines": 244,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
exo-explore/exo:src/exo/worker/engines/image/pipeline/kv_cache.py | import mlx.core as mx
class ImagePatchKVCache:
"""KV cache that stores only IMAGE K/V with patch-level updates.
Only caches image K/V since:
- Text K/V is always computed fresh (same for all patches)
- Only image portion needs stale/fresh cache management across patches
"""
def __init__(
self,
batch_size: int,
num_heads: int,
image_seq_len: int,
head_dim: int,
dtype: mx.Dtype = mx.float32,
):
self.batch_size = batch_size
self.num_heads = num_heads
self.image_seq_len = image_seq_len
self.head_dim = head_dim
self._dtype = dtype
self.key_cache = mx.zeros(
(batch_size, num_heads, image_seq_len, head_dim), dtype=dtype
)
self.value_cache = mx.zeros(
(batch_size, num_heads, image_seq_len, head_dim), dtype=dtype
)
def update_image_patch(
self, patch_start: int, patch_end: int, key: mx.array, value: mx.array
) -> None:
"""Update cache with fresh K/V for an image patch slice.
Args:
patch_start: Start token index within image portion (0-indexed)
patch_end: End token index within image portion
key: Fresh key tensor [batch, heads, patch_seq_len, head_dim]
value: Fresh value tensor [batch, heads, patch_seq_len, head_dim]
"""
self.key_cache[:, :, patch_start:patch_end, :] = key
self.value_cache[:, :, patch_start:patch_end, :] = value
def get_full_kv(
self, text_key: mx.array, text_value: mx.array
) -> tuple[mx.array, mx.array]:
"""Return full K/V by concatenating fresh text K/V with cached image K/V.
Args:
text_key: Fresh text key tensor [batch, heads, text_seq_len, head_dim]
text_value: Fresh text value tensor [batch, heads, text_seq_len, head_dim]
Returns:
Tuple of (full_key, full_value) with shape [batch, heads, text+image, head_dim]
"""
full_key = mx.concatenate([text_key, self.key_cache], axis=2)
full_value = mx.concatenate([text_value, self.value_cache], axis=2)
return full_key, full_value
def reset(self) -> None:
"""Reset cache to zeros."""
self.key_cache = mx.zeros(
(self.batch_size, self.num_heads, self.image_seq_len, self.head_dim),
dtype=self._dtype,
)
self.value_cache = mx.zeros(
(self.batch_size, self.num_heads, self.image_seq_len, self.head_dim),
dtype=self._dtype,
)
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/worker/engines/image/pipeline/kv_cache.py",
"license": "Apache License 2.0",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
exo-explore/exo:src/exo/worker/engines/image/pipeline/runner.py | from collections.abc import Iterator
from dataclasses import dataclass
from math import ceil
from typing import Any, Optional, final
import mlx.core as mx
from mflux.models.common.config.config import Config
from mflux.utils.exceptions import StopImageGenerationException
from tqdm import tqdm
from exo.shared.constants import EXO_TRACING_ENABLED
from exo.shared.tracing import (
clear_trace_buffer,
trace,
)
from exo.shared.types.worker.shards import CfgShardMetadata, PipelineShardMetadata
from exo.worker.engines.image.config import ImageModelConfig
from exo.worker.engines.image.models.base import (
ModelAdapter,
PromptData,
RotaryEmbeddings,
)
from exo.worker.engines.image.pipeline.block_wrapper import (
BlockWrapperMode,
JointBlockWrapper,
SingleBlockWrapper,
)
@final
@dataclass(frozen=True)
class CfgBranch:
positive: bool
embeds: mx.array
mask: mx.array | None
pooled: mx.array | None
cond_latents: mx.array | None
def calculate_patch_heights(
latent_height: int, num_patches: int
) -> tuple[list[int], int]:
patch_height = ceil(latent_height / num_patches)
actual_num_patches = ceil(latent_height / patch_height)
patch_heights = [patch_height] * (actual_num_patches - 1)
last_height = latent_height - patch_height * (actual_num_patches - 1)
patch_heights.append(last_height)
return patch_heights, actual_num_patches
def calculate_token_indices(
patch_heights: list[int], latent_width: int
) -> list[tuple[int, int]]:
tokens_per_row = latent_width
token_ranges: list[tuple[int, int]] = []
cumulative_height = 0
for h in patch_heights:
start_token = tokens_per_row * cumulative_height
end_token = tokens_per_row * (cumulative_height + h)
token_ranges.append((start_token, end_token))
cumulative_height += h
return token_ranges
class DiffusionRunner:
"""Orchestrates the diffusion loop for image generation.
In distributed mode, it implements PipeFusion with:
- Sync pipeline for initial timesteps (full image, all devices in lockstep)
- Async pipeline for later timesteps (patches processed independently)
"""
def __init__(
self,
config: ImageModelConfig,
adapter: ModelAdapter[Any, Any],
group: Optional[mx.distributed.Group],
shard_metadata: PipelineShardMetadata | CfgShardMetadata,
num_patches: Optional[int] = None,
):
self.config = config
self.adapter = adapter
self.group = group
self._init_cfg_topology(shard_metadata)
self.num_patches = (
num_patches if num_patches else max(1, self.pipeline_world_size)
)
self.total_joint = config.joint_block_count
self.total_single = config.single_block_count
self.total_layers = config.total_blocks
self._guidance_override: float | None = None
self._compute_assigned_blocks()
def _init_cfg_topology(
self, shard_metadata: PipelineShardMetadata | CfgShardMetadata
) -> None:
"""Initialize CFG and pipeline topology from shard metadata.
Both CfgShardMetadata and PipelineShardMetadata represent pipeline parallel
execution. CFG adds a second parallel pipeline for negative prompt processing,
but within each pipeline group the communication pattern is identical.
"""
if self.group is None:
# Single node - no distributed communication
self.rank = 0
self.world_size = 1
self.start_layer = 0
self.end_layer = self.config.total_blocks
self.cfg_rank = 0
self.cfg_world_size = 1
self.cfg_parallel = False
self.pipeline_rank = 0
self.pipeline_world_size = 1
self.next_pipeline_rank: int | None = None
self.prev_pipeline_rank: int | None = None
self.cfg_peer_rank: int | None = None
self.first_pipeline_rank: int = 0
self.last_pipeline_rank: int = 0
return
# Common fields from base metadata
self.rank = shard_metadata.device_rank
self.world_size = shard_metadata.world_size
self.start_layer = shard_metadata.start_layer
self.end_layer = shard_metadata.end_layer
if isinstance(shard_metadata, CfgShardMetadata):
# CFG parallel: two independent pipelines
self.cfg_rank = shard_metadata.cfg_rank
self.cfg_world_size = shard_metadata.cfg_world_size
self.cfg_parallel = True
self.pipeline_rank = shard_metadata.pipeline_rank
self.pipeline_world_size = shard_metadata.pipeline_world_size
else:
# Pure pipeline: single pipeline group, sequential CFG
self.cfg_rank = 0
self.cfg_world_size = 1
self.cfg_parallel = False
self.pipeline_rank = shard_metadata.device_rank
self.pipeline_world_size = shard_metadata.world_size
# Pipeline neighbor computation (same logic for both types)
is_first = self.pipeline_rank == 0
is_last = self.pipeline_rank == self.pipeline_world_size - 1
self.next_pipeline_rank = (
None
if is_last
else self._device_rank_for(self.cfg_rank, self.pipeline_rank + 1)
)
self.prev_pipeline_rank = (
None
if is_first
else self._device_rank_for(self.cfg_rank, self.pipeline_rank - 1)
)
# CFG peer is the corresponding last stage in the other CFG group
if self.cfg_parallel and is_last:
other_cfg_rank = 1 - self.cfg_rank
self.cfg_peer_rank = self._device_rank_for(
other_cfg_rank, self.pipeline_rank
)
else:
self.cfg_peer_rank = None
# First/last pipeline ranks for ring communication (latent broadcast)
self.first_pipeline_rank = self._device_rank_for(self.cfg_rank, 0)
self.last_pipeline_rank = self._device_rank_for(
self.cfg_rank, self.pipeline_world_size - 1
)
def _device_rank_for(self, cfg_rank: int, pipeline_rank: int) -> int:
"""Convert (cfg_rank, pipeline_rank) to device_rank in the ring topology.
Ring layout: [cfg0_pipe0, cfg0_pipe1, ..., cfg1_pipeN-1, cfg1_pipeN-2, ..., cfg1_pipe0]
Group 0 is in ascending order, group 1 is reversed so last stages are neighbors.
"""
if not self.cfg_parallel:
return pipeline_rank
if cfg_rank == 0:
return pipeline_rank
else:
return self.world_size - 1 - pipeline_rank
def _compute_assigned_blocks(self) -> None:
"""Determine which joint/single blocks this stage owns."""
start = self.start_layer
end = self.end_layer
if end <= self.total_joint:
self.joint_start = start
self.joint_end = end
self.single_start = 0
self.single_end = 0
elif start >= self.total_joint:
self.joint_start = 0
self.joint_end = 0
self.single_start = start - self.total_joint
self.single_end = end - self.total_joint
else:
self.joint_start = start
self.joint_end = self.total_joint
self.single_start = 0
self.single_end = end - self.total_joint
self.has_joint_blocks = self.joint_end > self.joint_start
self.has_single_blocks = self.single_end > self.single_start
self.owns_concat_stage = self.has_joint_blocks and (
self.has_single_blocks or self.end_layer == self.total_joint
)
# Wrappers created lazily on first forward (need text_seq_len)
self.joint_block_wrappers: list[JointBlockWrapper[Any]] | None = None
self.single_block_wrappers: list[SingleBlockWrapper[Any]] | None = None
self._wrappers_initialized = False
self._current_text_seq_len: int | None = None
@property
def is_first_stage(self) -> bool:
return self.pipeline_rank == 0
@property
def is_last_stage(self) -> bool:
return self.pipeline_rank == self.pipeline_world_size - 1
@property
def is_distributed(self) -> bool:
return self.group is not None
def _get_effective_guidance_scale(self) -> float | None:
if self._guidance_override is not None:
return self._guidance_override
return self.config.guidance_scale
def _get_cfg_branches(self, prompt_data: PromptData) -> Iterator[CfgBranch]:
"""Yield the CFG branches this node should process.
- No CFG: yields one branch (positive)
- CFG parallel: yields one branch (our assigned branch)
- Sequential CFG: yields two branches (positive, then negative)
"""
if not self.adapter.needs_cfg:
embeds, mask, pooled, cond = prompt_data.get_cfg_branch_data(positive=True)
yield CfgBranch(
positive=True,
embeds=embeds,
mask=mask,
pooled=pooled,
cond_latents=cond,
)
elif self.cfg_parallel:
positive = self.cfg_rank == 0
embeds, mask, pooled, cond = prompt_data.get_cfg_branch_data(positive)
yield CfgBranch(
positive=positive,
embeds=embeds,
mask=mask,
pooled=pooled,
cond_latents=cond,
)
else:
pos_embeds, pos_mask, pos_pooled, pos_cond = (
prompt_data.get_cfg_branch_data(positive=True)
)
yield CfgBranch(
positive=True,
embeds=pos_embeds,
mask=pos_mask,
pooled=pos_pooled,
cond_latents=pos_cond,
)
neg_embeds, neg_mask, neg_pooled, neg_cond = (
prompt_data.get_cfg_branch_data(positive=False)
)
yield CfgBranch(
positive=False,
embeds=neg_embeds,
mask=neg_mask,
pooled=neg_pooled,
cond_latents=neg_cond,
)
def _combine_cfg_results(self, results: list[tuple[bool, mx.array]]) -> mx.array:
if len(results) == 1:
positive, noise = results[0]
if self.cfg_parallel and self.is_last_stage:
# TODO(ciaran): try to remove
mx.eval(noise)
return self._exchange_and_apply_guidance(noise, positive)
return noise
noise_neg = next(n for p, n in results if not p)
noise_pos = next(n for p, n in results if p)
return self._apply_guidance(noise_pos, noise_neg)
def _exchange_and_apply_guidance(
self, noise: mx.array, is_positive: bool
) -> mx.array:
assert self.group is not None
assert self.cfg_peer_rank is not None
if is_positive:
noise = mx.distributed.send(noise, self.cfg_peer_rank, group=self.group)
mx.async_eval(noise)
noise_neg = mx.distributed.recv_like(
noise, self.cfg_peer_rank, group=self.group
)
mx.eval(noise_neg)
noise_pos = noise
else:
noise_pos = mx.distributed.recv_like(
noise, self.cfg_peer_rank, group=self.group
)
mx.eval(noise_pos)
noise = mx.distributed.send(noise, self.cfg_peer_rank, group=self.group)
mx.async_eval(noise)
noise_neg = noise
return self._apply_guidance(noise_pos, noise_neg)
def _apply_guidance(self, noise_pos: mx.array, noise_neg: mx.array) -> mx.array:
scale = self._get_effective_guidance_scale()
assert scale is not None
return self.adapter.apply_guidance(noise_pos, noise_neg, scale)
def _ensure_wrappers(
self,
text_seq_len: int,
encoder_hidden_states_mask: mx.array | None = None,
) -> None:
"""Lazily create block wrappers on first forward pass.
Wrappers need text_seq_len which is only known after prompt encoding.
Re-initializes if text_seq_len changes (e.g., warmup vs real generation).
"""
if self._wrappers_initialized and self._current_text_seq_len == text_seq_len:
return
self.joint_block_wrappers = self.adapter.get_joint_block_wrappers(
text_seq_len=text_seq_len,
encoder_hidden_states_mask=encoder_hidden_states_mask,
)
self.single_block_wrappers = self.adapter.get_single_block_wrappers(
text_seq_len=text_seq_len,
)
self._wrappers_initialized = True
self._current_text_seq_len = text_seq_len
def _reset_all_caches(self) -> None:
"""Reset KV caches on all wrappers for a new generation."""
if self.joint_block_wrappers:
for wrapper in self.joint_block_wrappers:
wrapper.reset_cache()
if self.single_block_wrappers:
for wrapper in self.single_block_wrappers:
wrapper.reset_cache()
def _set_text_seq_len(self, text_seq_len: int) -> None:
if self.joint_block_wrappers:
for wrapper in self.joint_block_wrappers:
wrapper.set_text_seq_len(text_seq_len)
if self.single_block_wrappers:
for wrapper in self.single_block_wrappers:
wrapper.set_text_seq_len(text_seq_len)
def _calculate_capture_steps(
self,
partial_images: int,
init_time_step: int,
num_inference_steps: int,
) -> set[int]:
"""Calculate which timesteps should produce partial images.
Places the first partial after step 1 for fast initial feedback,
then evenly spaces remaining partials with equal gaps between them
and from the last partial to the final image.
Args:
partial_images: Number of partial images to capture
init_time_step: Starting timestep (for img2img this may not be 0)
num_inference_steps: Total inference steps
Returns:
Set of timestep indices to capture
"""
if partial_images <= 0:
return set()
total_steps = num_inference_steps - init_time_step
if total_steps <= 1:
return set()
if partial_images >= total_steps - 1:
return set(range(init_time_step, num_inference_steps - 1))
capture_steps: set[int] = set()
first_capture = init_time_step + 1
capture_steps.add(first_capture)
if partial_images == 1:
return capture_steps
final_step = num_inference_steps - 1
remaining_range = final_step - first_capture
for i in range(1, partial_images):
step_idx = first_capture + int(i * remaining_range / partial_images)
capture_steps.add(step_idx)
return capture_steps
def generate_image(
self,
runtime_config: Config,
prompt: str,
seed: int,
partial_images: int = 0,
guidance_override: float | None = None,
negative_prompt: str | None = None,
num_sync_steps: int = 1,
):
"""Primary entry point for image generation.
Orchestrates the full generation flow:
1. Create runtime config
2. Create initial latents
3. Encode prompt
4. Run diffusion loop (yielding partials if requested)
5. Decode to image
Args:
settings: Generation config (steps, height, width)
prompt: Text prompt
seed: Random seed
partial_images: Number of intermediate images to yield (0 for none)
guidance_override: Optional override for guidance scale (CFG)
Yields:
Partial images as (GeneratedImage, partial_index, total_partials) tuples
Final GeneratedImage
"""
self._guidance_override = guidance_override
latents = self.adapter.create_latents(seed, runtime_config)
prompt_data = self.adapter.encode_prompt(prompt, negative_prompt)
capture_steps = self._calculate_capture_steps(
partial_images=partial_images,
init_time_step=runtime_config.init_time_step,
num_inference_steps=runtime_config.num_inference_steps,
)
diffusion_gen = self._run_diffusion_loop(
latents=latents,
prompt_data=prompt_data,
runtime_config=runtime_config,
seed=seed,
prompt=prompt,
capture_steps=capture_steps,
num_sync_steps=num_sync_steps,
)
partial_index = 0
total_partials = len(capture_steps)
if capture_steps:
try:
while True:
partial_latents, _step = next(diffusion_gen)
if self.is_last_stage:
partial_image = self.adapter.decode_latents(
partial_latents, runtime_config, seed, prompt
)
yield (partial_image, partial_index, total_partials)
partial_index += 1
except StopIteration as e:
latents = e.value # pyright: ignore[reportAny]
else:
try:
while True:
next(diffusion_gen)
except StopIteration as e:
latents = e.value # pyright: ignore[reportAny]
if self.is_last_stage:
yield self.adapter.decode_latents(latents, runtime_config, seed, prompt) # pyright: ignore[reportAny]
def _run_diffusion_loop(
self,
latents: mx.array,
prompt_data: PromptData,
runtime_config: Config,
seed: int,
prompt: str,
num_sync_steps: int,
capture_steps: set[int] | None = None,
):
if capture_steps is None:
capture_steps = set()
self._reset_all_caches()
clear_trace_buffer()
time_steps = tqdm(range(runtime_config.num_inference_steps))
ctx = self.adapter.model.callbacks.start( # pyright: ignore[reportAny]
seed=seed, prompt=prompt, config=runtime_config
)
ctx.before_loop( # pyright: ignore[reportAny]
latents=latents,
)
for t in time_steps:
try:
latents = self._diffusion_step(
t=t,
config=runtime_config,
latents=latents,
prompt_data=prompt_data,
num_sync_steps=num_sync_steps,
)
ctx.in_loop( # pyright: ignore[reportAny]
t=t,
latents=latents,
time_steps=time_steps,
)
mx.eval(latents)
if t in capture_steps and self.is_last_stage:
yield (latents, t)
except KeyboardInterrupt: # noqa: PERF203
ctx.interruption(t=t, latents=latents) # pyright: ignore[reportAny]
raise StopImageGenerationException(
f"Stopping image generation at step {t + 1}/{len(time_steps)}"
) from None
ctx.after_loop(latents=latents) # pyright: ignore[reportAny]
return latents
def _forward_pass(
self,
latents: mx.array,
prompt_embeds: mx.array,
pooled_prompt_embeds: mx.array,
t: int,
config: Config,
encoder_hidden_states_mask: mx.array | None = None,
cond_image_grid: tuple[int, int, int]
| list[tuple[int, int, int]]
| None = None,
conditioning_latents: mx.array | None = None,
kontext_image_ids: mx.array | None = None,
) -> mx.array:
"""Run a single forward pass through the transformer.
Args:
latents: Input latents (already scaled by caller)
prompt_embeds: Text embeddings
pooled_prompt_embeds: Pooled text embeddings (Flux) or placeholder (Qwen)
t: Current timestep
config: Runtime configuration
encoder_hidden_states_mask: Attention mask for text (Qwen)
cond_image_grid: Conditioning image grid dimensions (Qwen edit)
conditioning_latents: Conditioning latents for edit mode
kontext_image_ids: Position IDs for Kontext conditioning (Flux Kontext)
Returns:
Noise prediction tensor
"""
text_seq_len = prompt_embeds.shape[1]
self._ensure_wrappers(text_seq_len, encoder_hidden_states_mask)
if self.joint_block_wrappers and encoder_hidden_states_mask is not None:
for wrapper in self.joint_block_wrappers:
wrapper.set_encoder_mask(encoder_hidden_states_mask)
scaled_latents = config.scheduler.scale_model_input(latents, t) # pyright: ignore[reportAny]
# For edit mode: concatenate with conditioning latents
original_latent_tokens: int = scaled_latents.shape[1] # pyright: ignore[reportAny]
if conditioning_latents is not None:
scaled_latents = mx.concatenate(
[scaled_latents, conditioning_latents], axis=1
)
hidden_states, encoder_hidden_states = self.adapter.compute_embeddings(
scaled_latents, prompt_embeds
)
text_embeddings = self.adapter.compute_text_embeddings(
t, config, pooled_prompt_embeds, hidden_states=hidden_states
)
rotary_embeddings = self.adapter.compute_rotary_embeddings(
prompt_embeds,
config,
encoder_hidden_states_mask=encoder_hidden_states_mask,
cond_image_grid=cond_image_grid,
kontext_image_ids=kontext_image_ids,
)
assert self.joint_block_wrappers is not None
for wrapper in self.joint_block_wrappers:
encoder_hidden_states, hidden_states = wrapper(
hidden_states=hidden_states,
encoder_hidden_states=encoder_hidden_states,
text_embeddings=text_embeddings,
rotary_embeddings=rotary_embeddings,
)
if self.joint_block_wrappers:
hidden_states = self.adapter.merge_streams(
hidden_states, encoder_hidden_states
)
assert self.single_block_wrappers is not None
for wrapper in self.single_block_wrappers:
hidden_states = wrapper(
hidden_states=hidden_states,
text_embeddings=text_embeddings,
rotary_embeddings=rotary_embeddings,
)
# Extract image portion
hidden_states = hidden_states[:, text_seq_len:, ...]
# For edit mode: extract only the generated portion (exclude conditioning latents)
if conditioning_latents is not None:
hidden_states = hidden_states[:, :original_latent_tokens, ...]
return self.adapter.final_projection(hidden_states, text_embeddings)
def _diffusion_step(
self,
t: int,
config: Config,
latents: mx.array,
prompt_data: PromptData,
num_sync_steps: int,
) -> mx.array:
if self.group is None:
return self._single_node_step(t, config, latents, prompt_data)
elif (
self.pipeline_world_size == 1 or t < config.init_time_step + num_sync_steps
):
with trace(name=f"sync {t}", rank=self.rank, category="sync"):
return self._sync_pipeline_step(
t,
config,
latents,
prompt_data,
)
else:
with trace(name=f"async {t}", rank=self.rank, category="async"):
return self._async_pipeline_step(
t,
config,
latents,
prompt_data,
is_first_async_step=t == config.init_time_step + num_sync_steps,
)
def _single_node_step(
self,
t: int,
config: Config,
latents: mx.array,
prompt_data: PromptData,
) -> mx.array:
cond_image_grid = prompt_data.cond_image_grid
kontext_image_ids = prompt_data.kontext_image_ids
results: list[tuple[bool, mx.array]] = []
for branch in self._get_cfg_branches(prompt_data):
# Reset caches before each branch to ensure no state contamination
self._reset_all_caches()
pooled_embeds = (
branch.pooled if branch.pooled is not None else branch.embeds
)
noise = self._forward_pass(
latents,
branch.embeds,
pooled_embeds,
t=t,
config=config,
encoder_hidden_states_mask=branch.mask,
cond_image_grid=cond_image_grid,
conditioning_latents=branch.cond_latents,
kontext_image_ids=kontext_image_ids,
)
results.append((branch.positive, noise))
noise = self._combine_cfg_results(results)
return config.scheduler.step(noise=noise, timestep=t, latents=latents) # pyright: ignore[reportAny]
def _create_patches(
self,
latents: mx.array,
config: Config,
) -> tuple[list[mx.array], list[tuple[int, int]]]:
latent_height = config.height // 16
latent_width = config.width // 16
patch_heights, _ = calculate_patch_heights(latent_height, self.num_patches)
token_indices = calculate_token_indices(patch_heights, latent_width)
patch_latents = [latents[:, start:end, :] for start, end in token_indices]
return patch_latents, token_indices
def _run_sync_pass(
self,
t: int,
config: Config,
scaled_hidden_states: mx.array,
prompt_embeds: mx.array,
pooled_prompt_embeds: mx.array,
encoder_hidden_states_mask: mx.array | None,
cond_image_grid: tuple[int, int, int] | list[tuple[int, int, int]] | None,
kontext_image_ids: mx.array | None,
num_img_tokens: int,
original_latent_tokens: int,
conditioning_latents: mx.array | None,
) -> mx.array | None:
hidden_states = scaled_hidden_states
batch_size = hidden_states.shape[0]
text_seq_len = prompt_embeds.shape[1]
hidden_dim = self.adapter.hidden_dim
dtype = scaled_hidden_states.dtype
self._set_text_seq_len(text_seq_len)
if self.joint_block_wrappers:
for wrapper in self.joint_block_wrappers:
wrapper.set_encoder_mask(encoder_hidden_states_mask)
encoder_hidden_states: mx.array | None = None
if self.is_first_stage:
hidden_states, encoder_hidden_states = self.adapter.compute_embeddings(
hidden_states, prompt_embeds
)
text_embeddings = self.adapter.compute_text_embeddings(
t, config, pooled_prompt_embeds, hidden_states=hidden_states
)
image_rotary_embeddings = self.adapter.compute_rotary_embeddings(
prompt_embeds,
config,
encoder_hidden_states_mask=encoder_hidden_states_mask,
cond_image_grid=cond_image_grid,
kontext_image_ids=kontext_image_ids,
)
if self.has_joint_blocks:
if not self.is_first_stage:
assert self.prev_pipeline_rank is not None
with trace(
name=f"recv {self.prev_pipeline_rank}",
rank=self.rank,
category="comms",
):
hidden_states = mx.distributed.recv(
(batch_size, num_img_tokens, hidden_dim),
dtype,
self.prev_pipeline_rank,
group=self.group,
)
encoder_hidden_states = mx.distributed.recv(
(batch_size, text_seq_len, hidden_dim),
dtype,
self.prev_pipeline_rank,
group=self.group,
)
mx.eval(hidden_states, encoder_hidden_states)
assert self.joint_block_wrappers is not None
assert encoder_hidden_states is not None
with trace(
name="joint_blocks",
rank=self.rank,
category="compute",
):
for wrapper in self.joint_block_wrappers:
wrapper.set_patch(BlockWrapperMode.CACHING)
encoder_hidden_states, hidden_states = wrapper(
hidden_states=hidden_states,
encoder_hidden_states=encoder_hidden_states,
text_embeddings=text_embeddings,
rotary_embeddings=image_rotary_embeddings,
)
if EXO_TRACING_ENABLED:
mx.eval(encoder_hidden_states, hidden_states)
if self.owns_concat_stage:
assert encoder_hidden_states is not None
concatenated = self.adapter.merge_streams(
hidden_states, encoder_hidden_states
)
if self.has_single_blocks or self.is_last_stage:
hidden_states = concatenated
else:
assert self.next_pipeline_rank is not None
with trace(
name=f"send {self.next_pipeline_rank}",
rank=self.rank,
category="comms",
):
concatenated = mx.distributed.send(
concatenated, self.next_pipeline_rank, group=self.group
)
mx.async_eval(concatenated)
elif self.has_joint_blocks and not self.is_last_stage:
assert encoder_hidden_states is not None
assert self.next_pipeline_rank is not None
with trace(
name=f"send {self.next_pipeline_rank}",
rank=self.rank,
category="comms",
):
hidden_states = mx.distributed.send(
hidden_states, self.next_pipeline_rank, group=self.group
)
encoder_hidden_states = mx.distributed.send(
encoder_hidden_states, self.next_pipeline_rank, group=self.group
)
mx.async_eval(hidden_states, encoder_hidden_states)
if self.has_single_blocks:
if not self.owns_concat_stage and not self.is_first_stage:
assert self.prev_pipeline_rank is not None
with trace(
name=f"recv {self.prev_pipeline_rank}",
rank=self.rank,
category="comms",
):
hidden_states = mx.distributed.recv(
(batch_size, text_seq_len + num_img_tokens, hidden_dim),
dtype,
self.prev_pipeline_rank,
group=self.group,
)
mx.eval(hidden_states)
assert self.single_block_wrappers is not None
with trace(
name="single blocks",
rank=self.rank,
category="compute",
):
for wrapper in self.single_block_wrappers:
wrapper.set_patch(BlockWrapperMode.CACHING)
hidden_states = wrapper(
hidden_states=hidden_states,
text_embeddings=text_embeddings,
rotary_embeddings=image_rotary_embeddings,
)
if EXO_TRACING_ENABLED:
mx.eval(hidden_states)
if not self.is_last_stage:
assert self.next_pipeline_rank is not None
with trace(
name=f"send {self.next_pipeline_rank}",
rank=self.rank,
category="comms",
):
hidden_states = mx.distributed.send(
hidden_states, self.next_pipeline_rank, group=self.group
)
mx.async_eval(hidden_states)
hidden_states = hidden_states[:, text_seq_len:, ...]
if conditioning_latents is not None:
hidden_states = hidden_states[:, :original_latent_tokens, ...]
if self.is_last_stage:
return self.adapter.final_projection(hidden_states, text_embeddings)
return None
def _sync_pipeline_step(
self,
t: int,
config: Config,
hidden_states: mx.array,
prompt_data: PromptData,
) -> mx.array:
prev_latents = hidden_states
cond_image_grid = prompt_data.cond_image_grid
kontext_image_ids = prompt_data.kontext_image_ids
scaled_hidden_states = config.scheduler.scale_model_input(hidden_states, t) # pyright: ignore[reportAny]
original_latent_tokens: int = scaled_hidden_states.shape[1] # pyright: ignore[reportAny]
results: list[tuple[bool, mx.array]] = []
for branch in self._get_cfg_branches(prompt_data):
pooled_embeds = (
branch.pooled if branch.pooled is not None else branch.embeds
)
cond_latents = branch.cond_latents
if cond_latents is not None:
num_img_tokens: int = original_latent_tokens + cond_latents.shape[1]
else:
num_img_tokens = original_latent_tokens
step_latents: mx.array = scaled_hidden_states # pyright: ignore[reportAny]
if self.is_first_stage and cond_latents is not None:
step_latents = mx.concatenate([step_latents, cond_latents], axis=1)
text_seq_len = branch.embeds.shape[1]
self._ensure_wrappers(text_seq_len, branch.mask)
noise = self._run_sync_pass(
t,
config,
step_latents,
branch.embeds,
pooled_embeds,
branch.mask,
cond_image_grid,
kontext_image_ids,
num_img_tokens,
original_latent_tokens,
cond_latents,
)
if self.is_last_stage:
assert noise is not None
results.append((branch.positive, noise))
if self.is_last_stage:
noise = self._combine_cfg_results(results)
hidden_states = config.scheduler.step( # pyright: ignore[reportAny]
noise=noise, timestep=t, latents=prev_latents
)
if not self.is_first_stage:
hidden_states = mx.distributed.send(
hidden_states, self.first_pipeline_rank, group=self.group
)
mx.async_eval(hidden_states)
elif self.is_first_stage:
hidden_states = mx.distributed.recv_like(
prev_latents, src=self.last_pipeline_rank, group=self.group
)
mx.eval(hidden_states)
else:
hidden_states = prev_latents
return hidden_states
def _async_pipeline_step(
self,
t: int,
config: Config,
latents: mx.array,
prompt_data: PromptData,
is_first_async_step: bool,
) -> mx.array:
patch_latents, token_indices = self._create_patches(latents, config)
cond_image_grid = prompt_data.cond_image_grid
kontext_image_ids = prompt_data.kontext_image_ids
prev_patch_latents = [p for p in patch_latents]
encoder_hidden_states: mx.array | None = None
for patch_idx in range(len(patch_latents)):
patch = patch_latents[patch_idx]
if (
self.is_first_stage
and not self.is_last_stage
and not is_first_async_step
):
with trace(
name=f"recv {self.last_pipeline_rank}",
rank=self.rank,
category="comms",
):
patch = mx.distributed.recv_like(
patch, src=self.last_pipeline_rank, group=self.group
)
mx.eval(patch)
results: list[tuple[bool, mx.array]] = []
for branch in self._get_cfg_branches(prompt_data):
pooled_embeds = (
branch.pooled if branch.pooled is not None else branch.embeds
)
text_seq_len = branch.embeds.shape[1]
self._ensure_wrappers(text_seq_len, branch.mask)
self._set_text_seq_len(text_seq_len)
if self.joint_block_wrappers:
for wrapper in self.joint_block_wrappers:
wrapper.set_encoder_mask(branch.mask)
text_embeddings = self.adapter.compute_text_embeddings(
t, config, pooled_embeds
)
image_rotary_embeddings = self.adapter.compute_rotary_embeddings(
branch.embeds,
config,
encoder_hidden_states_mask=branch.mask,
cond_image_grid=cond_image_grid,
kontext_image_ids=kontext_image_ids,
)
noise, encoder_hidden_states = self._run_single_patch_pass(
patch=patch,
patch_idx=patch_idx,
token_indices=token_indices[patch_idx],
prompt_embeds=branch.embeds,
text_embeddings=text_embeddings,
image_rotary_embeddings=image_rotary_embeddings,
encoder_hidden_states=encoder_hidden_states,
)
if self.is_last_stage:
assert noise is not None
results.append((branch.positive, noise))
if self.is_last_stage:
noise = self._combine_cfg_results(results)
patch_latents[patch_idx] = config.scheduler.step( # pyright: ignore[reportAny]
noise=noise,
timestep=t,
latents=prev_patch_latents[patch_idx],
)
if not self.is_first_stage and t != config.num_inference_steps - 1:
with trace(
name=f"send {self.first_pipeline_rank}",
rank=self.rank,
category="comms",
):
patch_latents[patch_idx] = mx.distributed.send(
patch_latents[patch_idx],
self.first_pipeline_rank,
group=self.group,
)
mx.async_eval(patch_latents[patch_idx])
return mx.concatenate(patch_latents, axis=1)
def _run_single_patch_pass(
self,
patch: mx.array,
patch_idx: int,
token_indices: tuple[int, int],
prompt_embeds: mx.array,
text_embeddings: mx.array,
image_rotary_embeddings: RotaryEmbeddings,
encoder_hidden_states: mx.array | None,
) -> tuple[mx.array | None, mx.array | None]:
"""Process a single patch through the forward pipeline.
Handles stage-to-stage communication (stage i -> stage i+1).
Ring communication (last stage -> first stage) is handled by the caller.
Args:
patch: The patch latents to process
patch_idx: Index of this patch (0-indexed)
token_indices: (start_token, end_token) for this patch
prompt_embeds: Text embeddings (for compute_embeddings on first stage)
text_embeddings: Precomputed text embeddings
image_rotary_embeddings: Precomputed rotary embeddings
encoder_hidden_states: Encoder hidden states (passed between patches)
Returns:
(noise_prediction, encoder_hidden_states) - noise is None for non-last stages
"""
start_token, end_token = token_indices
batch_size = patch.shape[0]
text_seq_len = prompt_embeds.shape[1]
hidden_dim = self.adapter.hidden_dim
if self.has_joint_blocks:
if not self.is_first_stage:
assert self.prev_pipeline_rank is not None
patch_len = patch.shape[1]
with trace(
name=f"recv {self.prev_pipeline_rank}",
rank=self.rank,
category="comms",
):
patch = mx.distributed.recv(
(batch_size, patch_len, hidden_dim),
patch.dtype,
self.prev_pipeline_rank,
group=self.group,
)
mx.eval(patch)
if patch_idx == 0:
with trace(
name=f"recv {self.prev_pipeline_rank}",
rank=self.rank,
category="comms",
):
encoder_hidden_states = mx.distributed.recv(
(batch_size, text_seq_len, hidden_dim),
patch.dtype,
self.prev_pipeline_rank,
group=self.group,
)
mx.eval(encoder_hidden_states)
if self.is_first_stage:
patch, encoder_hidden_states = self.adapter.compute_embeddings(
patch, prompt_embeds
)
assert self.joint_block_wrappers is not None
assert encoder_hidden_states is not None
with trace(
name=f"joint patch {patch_idx}",
rank=self.rank,
category="compute",
):
for wrapper in self.joint_block_wrappers:
wrapper.set_patch(BlockWrapperMode.PATCHED, start_token, end_token)
encoder_hidden_states, patch = wrapper(
hidden_states=patch,
encoder_hidden_states=encoder_hidden_states,
text_embeddings=text_embeddings,
rotary_embeddings=image_rotary_embeddings,
)
if EXO_TRACING_ENABLED:
mx.eval(encoder_hidden_states, patch)
if self.owns_concat_stage:
assert encoder_hidden_states is not None
patch_concat = self.adapter.merge_streams(patch, encoder_hidden_states)
if self.has_single_blocks or self.is_last_stage:
patch = patch_concat
else:
assert self.next_pipeline_rank is not None
with trace(
name=f"send {self.next_pipeline_rank}",
rank=self.rank,
category="comms",
):
patch_concat = mx.distributed.send(
patch_concat, self.next_pipeline_rank, group=self.group
)
mx.async_eval(patch_concat)
elif self.has_joint_blocks and not self.is_last_stage:
assert self.next_pipeline_rank is not None
with trace(
name=f"send {self.next_pipeline_rank}",
rank=self.rank,
category="comms",
):
patch = mx.distributed.send(
patch, self.next_pipeline_rank, group=self.group
)
mx.async_eval(patch)
if patch_idx == 0:
assert encoder_hidden_states is not None
with trace(
name=f"send {self.next_pipeline_rank}",
rank=self.rank,
category="comms",
):
encoder_hidden_states = mx.distributed.send(
encoder_hidden_states, self.next_pipeline_rank, group=self.group
)
mx.async_eval(encoder_hidden_states)
if self.has_single_blocks:
if not self.owns_concat_stage and not self.is_first_stage:
assert self.prev_pipeline_rank is not None
patch_len = patch.shape[1]
with trace(
name=f"recv {self.prev_pipeline_rank}",
rank=self.rank,
category="comms",
):
patch = mx.distributed.recv(
(batch_size, text_seq_len + patch_len, hidden_dim),
patch.dtype,
self.prev_pipeline_rank,
group=self.group,
)
mx.eval(patch)
assert self.single_block_wrappers is not None
with trace(
name=f"single patch {patch_idx}",
rank=self.rank,
category="compute",
):
for wrapper in self.single_block_wrappers:
wrapper.set_patch(BlockWrapperMode.PATCHED, start_token, end_token)
patch = wrapper(
hidden_states=patch,
text_embeddings=text_embeddings,
rotary_embeddings=image_rotary_embeddings,
)
if EXO_TRACING_ENABLED:
mx.eval(patch)
if not self.is_last_stage:
assert self.next_pipeline_rank is not None
with trace(
name=f"send {self.next_pipeline_rank}",
rank=self.rank,
category="comms",
):
patch = mx.distributed.send(
patch, self.next_pipeline_rank, group=self.group
)
mx.async_eval(patch)
noise: mx.array | None = None
if self.is_last_stage:
patch_img_only = patch[:, text_seq_len:, :]
noise = self.adapter.final_projection(patch_img_only, text_embeddings)
return noise, encoder_hidden_states
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/worker/engines/image/pipeline/runner.py",
"license": "Apache License 2.0",
"lines": 1077,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
exo-explore/exo:src/exo/worker/tests/unittests/test_mlx/test_distributed_fix.py | import multiprocessing as mp
import os
from dataclasses import dataclass
from typing import Any, Callable
import pytest
from exo.worker.tests.unittests.test_mlx.conftest import (
DEFAULT_GPT_OSS_CONFIG,
create_hostfile,
run_gpt_oss_pipeline_device,
run_gpt_oss_tensor_parallel_device,
)
def _check_model_exists() -> bool:
return DEFAULT_GPT_OSS_CONFIG.model_path.exists()
pytestmark = [
pytest.mark.slow,
pytest.mark.skipif(
not _check_model_exists(),
reason=f"GPT-OSS model not found at {DEFAULT_GPT_OSS_CONFIG.model_path}",
),
]
@dataclass
class DistributedTestResult:
timed_out: bool
world_size: int
results: dict[int, tuple[bool, str]]
@property
def all_success(self) -> bool:
if len(self.results) != self.world_size:
return False
return all(r[0] for r in self.results.values())
def run_distributed_test(
world_size: int,
port_offset: int,
process_timeout: int,
target: Callable[..., None],
make_args: Callable[[int], tuple[Any, ...]],
) -> DistributedTestResult:
ctx = mp.get_context("spawn")
hostfile_path, _ = create_hostfile(
world_size, DEFAULT_GPT_OSS_CONFIG.base_port + port_offset
)
try:
result_queue: Any = ctx.Queue()
processes: list[Any] = []
for rank in range(world_size):
args = make_args(rank)
p = ctx.Process(
target=target,
args=(rank, world_size, hostfile_path, *args, result_queue),
)
p.start()
processes.append(p)
for p in processes: # pyright: ignore[reportAny]
p.join(timeout=process_timeout) # pyright: ignore[reportAny]
timed_out = any(p.is_alive() for p in processes) # pyright: ignore[reportAny]
for p in processes: # pyright: ignore[reportAny]
if p.is_alive(): # pyright: ignore[reportAny]
p.terminate() # pyright: ignore[reportAny]
p.join(timeout=5) # pyright: ignore[reportAny]
results: dict[int, tuple[bool, str]] = {}
while not result_queue.empty(): # pyright: ignore[reportAny]
rank, success, value = result_queue.get() # pyright: ignore[reportAny]
results[rank] = (success, value)
return DistributedTestResult(
timed_out=timed_out, world_size=world_size, results=results
)
finally:
os.unlink(hostfile_path)
def run_pipeline_test(
layer_splits: list[tuple[int, int]],
prompt_tokens: int,
prefill_step_size: int,
port_offset: int = 0,
process_timeout: int = 60,
) -> DistributedTestResult:
def make_args(rank: int) -> tuple[Any, ...]:
return (
layer_splits,
prompt_tokens,
prefill_step_size,
)
return run_distributed_test(
world_size=len(layer_splits),
port_offset=port_offset,
process_timeout=process_timeout,
target=run_gpt_oss_pipeline_device,
make_args=make_args,
)
def run_tensor_test(
prompt_tokens: int,
prefill_step_size: int,
port_offset: int = 0,
process_timeout: int = 60,
) -> DistributedTestResult:
def make_args(rank: int) -> tuple[Any, ...]:
return (
prompt_tokens,
prefill_step_size,
)
return run_distributed_test(
world_size=2,
port_offset=port_offset,
process_timeout=process_timeout,
target=run_gpt_oss_tensor_parallel_device,
make_args=make_args,
)
class TestPipelineParallelFix:
BUG_TRIGGER_SPLITS: list[tuple[int, int]] = [(0, 1), (1, 24)]
def test_pipeline_single_layer_first_device(self) -> None:
result = run_pipeline_test(
layer_splits=self.BUG_TRIGGER_SPLITS,
prompt_tokens=100,
prefill_step_size=64,
process_timeout=60,
)
assert not result.timed_out, "Unexpected timeout - fix may not be working"
assert result.all_success, f"Failures: {result.results}"
class TestPipelineSplitConfigurations:
@pytest.mark.parametrize(
"layer_splits",
[
[(0, 1), (1, 24)],
[(0, 6), (6, 24)],
[(0, 12), (12, 24)],
],
ids=["1_23", "6_18", "12_12"],
)
def test_pipeline_splits(
self,
layer_splits: list[tuple[int, int]],
) -> None:
result = run_pipeline_test(
layer_splits=layer_splits,
prompt_tokens=600,
prefill_step_size=512,
port_offset=100,
)
assert not result.timed_out, f"Timeout with {layer_splits}"
assert result.all_success, f"Failures with {layer_splits}: {result.results}"
class TestPrefillStepSizeBoundaries:
@pytest.mark.parametrize(
"prefill_step_size,prompt_tokens",
[
(512, 511),
(512, 512),
(512, 513),
(512, 1024),
],
ids=["under", "exact", "over", "double"],
)
def test_boundary_conditions(
self,
prefill_step_size: int,
prompt_tokens: int,
) -> None:
result = run_pipeline_test(
layer_splits=[(0, 12), (12, 24)],
prompt_tokens=prompt_tokens,
prefill_step_size=prefill_step_size,
port_offset=200,
)
assert not result.timed_out, f"Timeout: {prompt_tokens=}, {prefill_step_size=}"
assert result.all_success, f"Failures: {result.results}"
class TestTensorParallelFix:
def test_tensor_parallel(self) -> None:
result = run_tensor_test(
prompt_tokens=100,
prefill_step_size=64,
port_offset=400,
)
assert not result.timed_out, "Unexpected timeout"
assert result.all_success, f"Failures: {result.results}"
class TestTensorParallelBoundaries:
@pytest.mark.parametrize(
"prefill_step_size,prompt_tokens",
[
(512, 511),
(512, 512),
(512, 513),
(512, 1024),
],
ids=["under", "exact", "over", "double"],
)
def test_tensor_parallel_boundaries(
self,
prefill_step_size: int,
prompt_tokens: int,
) -> None:
result = run_tensor_test(
prompt_tokens=prompt_tokens,
prefill_step_size=prefill_step_size,
port_offset=500,
)
assert not result.timed_out, f"Timeout: {prompt_tokens=}, {prefill_step_size=}"
assert result.all_success, f"Failures: {result.results}"
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/worker/tests/unittests/test_mlx/test_distributed_fix.py",
"license": "Apache License 2.0",
"lines": 195,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
exo-explore/exo:src/exo/shared/types/thunderbolt.py | import anyio
from pydantic import BaseModel, Field
from exo.utils.pydantic_ext import CamelCaseModel
class ThunderboltConnection(CamelCaseModel):
source_uuid: str
sink_uuid: str
class ThunderboltIdentifier(CamelCaseModel):
rdma_interface: str
domain_uuid: str
link_speed: str = ""
## Intentionally minimal, only collecting data we care about - there's a lot more
class _ReceptacleTag(BaseModel, extra="ignore"):
receptacle_id_key: str | None = None
current_speed_key: str | None = None
class _ConnectivityItem(BaseModel, extra="ignore"):
domain_uuid_key: str | None = None
class ThunderboltConnectivityData(BaseModel, extra="ignore"):
domain_uuid_key: str | None = None
items: list[_ConnectivityItem] | None = Field(None, alias="_items")
receptacle_1_tag: _ReceptacleTag | None = None
def ident(self, ifaces: dict[str, str]) -> ThunderboltIdentifier | None:
if (
self.domain_uuid_key is None
or self.receptacle_1_tag is None
or self.receptacle_1_tag.receptacle_id_key is None
):
return
tag = f"Thunderbolt {self.receptacle_1_tag.receptacle_id_key}"
assert tag in ifaces # doesn't need to be an assertion but im confident
# if tag not in ifaces: return None
iface = f"rdma_{ifaces[tag]}"
return ThunderboltIdentifier(
rdma_interface=iface,
domain_uuid=self.domain_uuid_key,
link_speed=self.receptacle_1_tag.current_speed_key or "",
)
def conn(self) -> ThunderboltConnection | None:
if self.domain_uuid_key is None or self.items is None:
return
sink_key = next(
(
item.domain_uuid_key
for item in self.items
if item.domain_uuid_key is not None
),
None,
)
if sink_key is None:
return None
return ThunderboltConnection(
source_uuid=self.domain_uuid_key, sink_uuid=sink_key
)
class ThunderboltConnectivity(BaseModel, extra="ignore"):
SPThunderboltDataType: list[ThunderboltConnectivityData] = []
@classmethod
async def gather(cls) -> list[ThunderboltConnectivityData] | None:
proc = await anyio.run_process(
["system_profiler", "SPThunderboltDataType", "-json"], check=False
)
if proc.returncode != 0:
return None
# Saving you from PascalCase while avoiding too much pydantic
return ThunderboltConnectivity.model_validate_json(
proc.stdout
).SPThunderboltDataType
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/shared/types/thunderbolt.py",
"license": "Apache License 2.0",
"lines": 65,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
exo-explore/exo:src/exo/utils/info_gatherer/info_gatherer.py | import os
import shutil
import sys
import tomllib
from collections.abc import Sequence
from dataclasses import dataclass, field
from subprocess import CalledProcessError
from typing import Self, cast
import anyio
from anyio import fail_after, open_process, to_thread
from anyio.streams.buffered import BufferedByteReceiveStream
from anyio.streams.text import TextReceiveStream
from loguru import logger
from pydantic import ValidationError
from exo.shared.constants import EXO_CONFIG_FILE, EXO_MODELS_DIR
from exo.shared.types.memory import Memory
from exo.shared.types.profiling import (
DiskUsage,
MemoryUsage,
NetworkInterfaceInfo,
ThunderboltBridgeStatus,
)
from exo.shared.types.thunderbolt import (
ThunderboltConnection,
ThunderboltConnectivity,
ThunderboltIdentifier,
)
from exo.utils.channels import Sender
from exo.utils.pydantic_ext import TaggedModel
from exo.utils.task_group import TaskGroup
from .macmon import MacmonMetrics
from .system_info import (
get_friendly_name,
get_model_and_chip,
get_network_interfaces,
get_os_build_version,
get_os_version,
)
IS_DARWIN = sys.platform == "darwin"
async def _get_thunderbolt_devices() -> set[str] | None:
"""Get Thunderbolt interface device names (e.g., en2, en3) from hardware ports.
Returns None if the networksetup command fails.
"""
result = await anyio.run_process(
["networksetup", "-listallhardwareports"],
check=False,
)
if result.returncode != 0:
logger.warning(
f"networksetup -listallhardwareports failed with code "
f"{result.returncode}: {result.stderr.decode()}"
)
return None
output = result.stdout.decode()
thunderbolt_devices: set[str] = set()
current_port: str | None = None
for line in output.splitlines():
line = line.strip()
if line.startswith("Hardware Port:"):
current_port = line.split(":", 1)[1].strip()
elif line.startswith("Device:") and current_port:
device = line.split(":", 1)[1].strip()
if "thunderbolt" in current_port.lower():
thunderbolt_devices.add(device)
current_port = None
return thunderbolt_devices
async def _get_bridge_services() -> dict[str, str] | None:
"""Get mapping of bridge device -> service name from network service order.
Returns None if the networksetup command fails.
"""
result = await anyio.run_process(
["networksetup", "-listnetworkserviceorder"],
check=False,
)
if result.returncode != 0:
logger.warning(
f"networksetup -listnetworkserviceorder failed with code "
f"{result.returncode}: {result.stderr.decode()}"
)
return None
# Parse service order to find bridge devices and their service names
# Format: "(1) Service Name\n(Hardware Port: ..., Device: bridge0)\n"
service_order_output = result.stdout.decode()
bridge_services: dict[str, str] = {} # device -> service name
current_service: str | None = None
for line in service_order_output.splitlines():
line = line.strip()
# Match "(N) Service Name" or "(*) Service Name" (disabled)
# but NOT "(Hardware Port: ...)" lines
if (
line
and line.startswith("(")
and ")" in line
and not line.startswith("(Hardware Port:")
):
paren_end = line.index(")")
if paren_end + 2 <= len(line):
current_service = line[paren_end + 2 :]
# Match "(Hardware Port: ..., Device: bridgeX)"
elif current_service and "Device: bridge" in line:
# Extract device name from "..., Device: bridge0)"
device_start = line.find("Device: ") + len("Device: ")
device_end = line.find(")", device_start)
if device_end > device_start:
device = line[device_start:device_end]
bridge_services[device] = current_service
return bridge_services
async def _get_bridge_members(bridge_device: str) -> set[str]:
"""Get member interfaces of a bridge device via ifconfig."""
result = await anyio.run_process(
["ifconfig", bridge_device],
check=False,
)
if result.returncode != 0:
logger.debug(f"ifconfig {bridge_device} failed with code {result.returncode}")
return set()
members: set[str] = set()
ifconfig_output = result.stdout.decode()
for line in ifconfig_output.splitlines():
line = line.strip()
if line.startswith("member:"):
parts = line.split()
if len(parts) > 1:
members.add(parts[1])
return members
async def _find_thunderbolt_bridge(
bridge_services: dict[str, str], thunderbolt_devices: set[str]
) -> str | None:
"""Find the service name of a bridge containing Thunderbolt interfaces.
Returns the service name if found, None otherwise.
"""
for bridge_device, service_name in bridge_services.items():
members = await _get_bridge_members(bridge_device)
if members & thunderbolt_devices: # intersection is non-empty
return service_name
return None
async def _is_service_enabled(service_name: str) -> bool | None:
"""Check if a network service is enabled.
Returns True if enabled, False if disabled, None on error.
"""
result = await anyio.run_process(
["networksetup", "-getnetworkserviceenabled", service_name],
check=False,
)
if result.returncode != 0:
logger.warning(
f"networksetup -getnetworkserviceenabled '{service_name}' "
f"failed with code {result.returncode}: {result.stderr.decode()}"
)
return None
stdout = result.stdout.decode().strip().lower()
return stdout == "enabled"
class StaticNodeInformation(TaggedModel):
"""Node information that should NEVER change, to be gathered once at startup"""
model: str
chip: str
os_version: str
os_build_version: str
@classmethod
async def gather(cls) -> Self:
model, chip = await get_model_and_chip()
return cls(
model=model,
chip=chip,
os_version=get_os_version(),
os_build_version=await get_os_build_version(),
)
class NodeNetworkInterfaces(TaggedModel):
ifaces: Sequence[NetworkInterfaceInfo]
class MacThunderboltIdentifiers(TaggedModel):
idents: Sequence[ThunderboltIdentifier]
class MacThunderboltConnections(TaggedModel):
conns: Sequence[ThunderboltConnection]
class RdmaCtlStatus(TaggedModel):
enabled: bool
@classmethod
async def gather(cls) -> Self | None:
if not IS_DARWIN or shutil.which("rdma_ctl") is None:
return None
try:
with anyio.fail_after(5):
proc = await anyio.run_process(["rdma_ctl", "status"], check=False)
except (TimeoutError, OSError):
return None
if proc.returncode != 0:
return None
output = proc.stdout.decode("utf-8").lower().strip()
if "enabled" in output:
return cls(enabled=True)
if "disabled" in output:
return cls(enabled=False)
return None
class ThunderboltBridgeInfo(TaggedModel):
status: ThunderboltBridgeStatus
@classmethod
async def gather(cls) -> Self | None:
"""Check if a Thunderbolt Bridge network service is enabled on this node.
Detection approach:
1. Find all Thunderbolt interface devices (en2, en3, etc.) from hardware ports
2. Find bridge devices from network service order (not hardware ports, as
bridges may not appear there)
3. Check each bridge's members via ifconfig
4. If a bridge contains Thunderbolt interfaces, it's a Thunderbolt Bridge
5. Check if that network service is enabled
"""
if not IS_DARWIN:
return None
def _no_bridge_status() -> Self:
return cls(
status=ThunderboltBridgeStatus(
enabled=False, exists=False, service_name=None
)
)
try:
tb_devices = await _get_thunderbolt_devices()
if tb_devices is None:
return _no_bridge_status()
bridge_services = await _get_bridge_services()
if not bridge_services:
return _no_bridge_status()
tb_service_name = await _find_thunderbolt_bridge(
bridge_services, tb_devices
)
if not tb_service_name:
return _no_bridge_status()
enabled = await _is_service_enabled(tb_service_name)
if enabled is None:
return cls(
status=ThunderboltBridgeStatus(
enabled=False, exists=True, service_name=tb_service_name
)
)
return cls(
status=ThunderboltBridgeStatus(
enabled=enabled,
exists=True,
service_name=tb_service_name,
)
)
except Exception as e:
logger.warning(f"Failed to gather Thunderbolt Bridge info: {e}")
return None
class NodeConfig(TaggedModel):
"""Node configuration from EXO_CONFIG_FILE, reloaded from the file only at startup. Other changes should come in through the API and propagate from there"""
@classmethod
async def gather(cls) -> Self | None:
cfg_file = anyio.Path(EXO_CONFIG_FILE)
await cfg_file.parent.mkdir(parents=True, exist_ok=True)
await cfg_file.touch(exist_ok=True)
async with await cfg_file.open("rb") as f:
try:
contents = (await f.read()).decode("utf-8")
data = tomllib.loads(contents)
return cls.model_validate(data)
except (tomllib.TOMLDecodeError, UnicodeDecodeError, ValidationError):
logger.warning("Invalid config file, skipping...")
return None
class MiscData(TaggedModel):
"""Node information that may slowly change that doesn't fall into the other categories"""
friendly_name: str
@classmethod
async def gather(cls) -> Self:
return cls(friendly_name=await get_friendly_name())
class NodeDiskUsage(TaggedModel):
"""Disk space information for the models directory."""
disk_usage: DiskUsage
@classmethod
async def gather(cls) -> Self:
return cls(
disk_usage=await to_thread.run_sync(
lambda: DiskUsage.from_path(EXO_MODELS_DIR)
)
)
async def _gather_iface_map() -> dict[str, str] | None:
proc = await anyio.run_process(
["networksetup", "-listallhardwareports"], check=False
)
if proc.returncode != 0:
return None
ports: dict[str, str] = {}
port = ""
for line in proc.stdout.decode("utf-8").split("\n"):
if line.startswith("Hardware Port:"):
port = line.split(": ")[1]
elif line.startswith("Device:"):
ports[port] = line.split(": ")[1]
port = ""
if "" in ports:
del ports[""]
return ports
GatheredInfo = (
MacmonMetrics
| MemoryUsage
| NodeNetworkInterfaces
| MacThunderboltIdentifiers
| MacThunderboltConnections
| RdmaCtlStatus
| ThunderboltBridgeInfo
| NodeConfig
| MiscData
| StaticNodeInformation
| NodeDiskUsage
)
@dataclass
class InfoGatherer:
info_sender: Sender[GatheredInfo]
interface_watcher_interval: float | None = 10
misc_poll_interval: float | None = 60
system_profiler_interval: float | None = 5 if IS_DARWIN else None
memory_poll_rate: float | None = None if IS_DARWIN else 1
macmon_interval: float | None = 1 if IS_DARWIN else None
thunderbolt_bridge_poll_interval: float | None = 10 if IS_DARWIN else None
static_info_poll_interval: float | None = 60
rdma_ctl_poll_interval: float | None = 10 if IS_DARWIN else None
disk_poll_interval: float | None = 30
_tg: TaskGroup = field(init=False, default_factory=TaskGroup)
async def run(self):
async with self._tg as tg:
if IS_DARWIN:
if (macmon_path := shutil.which("macmon")) is not None:
tg.start_soon(self._monitor_macmon, macmon_path)
else:
# macmon not installed — fall back to psutil for memory
logger.warning(
"macmon not found, falling back to psutil for memory monitoring"
)
self.memory_poll_rate = 1
tg.start_soon(self._monitor_system_profiler_thunderbolt_data)
tg.start_soon(self._monitor_thunderbolt_bridge_status)
tg.start_soon(self._monitor_rdma_ctl_status)
tg.start_soon(self._watch_system_info)
tg.start_soon(self._monitor_memory_usage)
tg.start_soon(self._monitor_misc)
tg.start_soon(self._monitor_static_info)
tg.start_soon(self._monitor_disk_usage)
nc = await NodeConfig.gather()
if nc is not None:
await self.info_sender.send(nc)
def shutdown(self):
self._tg.cancel_tasks()
async def _monitor_static_info(self):
if self.static_info_poll_interval is None:
return
while True:
try:
with fail_after(30):
await self.info_sender.send(await StaticNodeInformation.gather())
except Exception as e:
logger.warning(f"Error gathering static node info: {e}")
await anyio.sleep(self.static_info_poll_interval)
async def _monitor_misc(self):
if self.misc_poll_interval is None:
return
while True:
try:
with fail_after(10):
await self.info_sender.send(await MiscData.gather())
except Exception as e:
logger.warning(f"Error gathering misc data: {e}")
await anyio.sleep(self.misc_poll_interval)
async def _monitor_system_profiler_thunderbolt_data(self):
if self.system_profiler_interval is None:
return
while True:
try:
with fail_after(30):
iface_map = await _gather_iface_map()
if iface_map is None:
raise ValueError("Failed to gather interface map")
data = await ThunderboltConnectivity.gather()
assert data is not None
idents = [
it for i in data if (it := i.ident(iface_map)) is not None
]
await self.info_sender.send(
MacThunderboltIdentifiers(idents=idents)
)
conns = [it for i in data if (it := i.conn()) is not None]
await self.info_sender.send(MacThunderboltConnections(conns=conns))
except Exception as e:
logger.warning(f"Error gathering Thunderbolt data: {e}")
await anyio.sleep(self.system_profiler_interval)
async def _monitor_memory_usage(self):
override_memory_env = os.getenv("OVERRIDE_MEMORY_MB")
override_memory: int | None = (
Memory.from_mb(int(override_memory_env)).in_bytes
if override_memory_env
else None
)
if self.memory_poll_rate is None:
return
while True:
try:
await self.info_sender.send(
MemoryUsage.from_psutil(override_memory=override_memory)
)
except Exception as e:
logger.warning(f"Error gathering memory usage: {e}")
await anyio.sleep(self.memory_poll_rate)
async def _watch_system_info(self):
if self.interface_watcher_interval is None:
return
while True:
try:
with fail_after(10):
nics = await get_network_interfaces()
await self.info_sender.send(NodeNetworkInterfaces(ifaces=nics))
except Exception as e:
logger.warning(f"Error gathering network interfaces: {e}")
await anyio.sleep(self.interface_watcher_interval)
async def _monitor_thunderbolt_bridge_status(self):
if self.thunderbolt_bridge_poll_interval is None:
return
while True:
try:
with fail_after(30):
curr = await ThunderboltBridgeInfo.gather()
if curr is not None:
await self.info_sender.send(curr)
except Exception as e:
logger.warning(f"Error gathering Thunderbolt Bridge status: {e}")
await anyio.sleep(self.thunderbolt_bridge_poll_interval)
async def _monitor_rdma_ctl_status(self):
if self.rdma_ctl_poll_interval is None:
return
while True:
try:
curr = await RdmaCtlStatus.gather()
if curr is not None:
await self.info_sender.send(curr)
except Exception as e:
logger.warning(f"Error gathering RDMA ctl status: {e}")
await anyio.sleep(self.rdma_ctl_poll_interval)
async def _monitor_disk_usage(self):
if self.disk_poll_interval is None:
return
while True:
try:
with fail_after(5):
await self.info_sender.send(await NodeDiskUsage.gather())
except Exception as e:
logger.warning(f"Error gathering disk usage: {e}")
await anyio.sleep(self.disk_poll_interval)
async def _monitor_macmon(self, macmon_path: str):
if self.macmon_interval is None:
return
# macmon pipe --interval [interval in ms]
while True:
try:
async with await open_process(
[
macmon_path,
"pipe",
"--interval",
str(self.macmon_interval * 1000),
]
) as p:
if not p.stdout:
logger.critical("MacMon closed stdout")
return
async for text in TextReceiveStream(
BufferedByteReceiveStream(p.stdout)
):
await self.info_sender.send(MacmonMetrics.from_raw_json(text))
except CalledProcessError as e:
stderr_msg = "no stderr"
stderr_output = cast(bytes | str | None, e.stderr)
if stderr_output is not None:
stderr_msg = (
stderr_output.decode()
if isinstance(stderr_output, bytes)
else str(stderr_output)
)
logger.warning(
f"MacMon failed with return code {e.returncode}: {stderr_msg}"
)
except Exception as e:
logger.warning(f"Error in macmon monitor: {e}")
await anyio.sleep(self.macmon_interval)
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/utils/info_gatherer/info_gatherer.py",
"license": "Apache License 2.0",
"lines": 479,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
exo-explore/exo:src/exo/utils/info_gatherer/macmon.py | from typing import Self
from pydantic import BaseModel
from exo.shared.types.profiling import MemoryUsage, SystemPerformanceProfile
from exo.utils.pydantic_ext import TaggedModel
class _TempMetrics(BaseModel, extra="ignore"):
"""Temperature-related metrics returned by macmon."""
cpu_temp_avg: float
gpu_temp_avg: float
class _MemoryMetrics(BaseModel, extra="ignore"):
"""Memory-related metrics returned by macmon."""
ram_total: int
ram_usage: int
swap_total: int
swap_usage: int
class RawMacmonMetrics(BaseModel, extra="ignore"):
"""Complete set of metrics returned by macmon.
Unknown fields are ignored for forward-compatibility.
"""
timestamp: str # ignored
temp: _TempMetrics
memory: _MemoryMetrics
ecpu_usage: tuple[int, float] # freq mhz, usage %
pcpu_usage: tuple[int, float] # freq mhz, usage %
gpu_usage: tuple[int, float] # freq mhz, usage %
all_power: float
ane_power: float
cpu_power: float
gpu_power: float
gpu_ram_power: float
ram_power: float
sys_power: float
class MacmonMetrics(TaggedModel):
system_profile: SystemPerformanceProfile
memory: MemoryUsage
@classmethod
def from_raw(cls, raw: RawMacmonMetrics) -> Self:
return cls(
system_profile=SystemPerformanceProfile(
gpu_usage=raw.gpu_usage[1],
temp=raw.temp.gpu_temp_avg,
sys_power=raw.sys_power,
pcpu_usage=raw.pcpu_usage[1],
ecpu_usage=raw.ecpu_usage[1],
),
memory=MemoryUsage.from_bytes(
ram_total=raw.memory.ram_total,
ram_available=(raw.memory.ram_total - raw.memory.ram_usage),
swap_total=raw.memory.swap_total,
swap_available=(raw.memory.swap_total - raw.memory.swap_usage),
),
)
@classmethod
def from_raw_json(cls, json: str) -> Self:
return cls.from_raw(RawMacmonMetrics.model_validate_json(json))
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/utils/info_gatherer/macmon.py",
"license": "Apache License 2.0",
"lines": 54,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
exo-explore/exo:src/exo/utils/info_gatherer/tests/test_tb_parsing.py | import sys
import pytest
from exo.shared.types.thunderbolt import (
ThunderboltConnectivity,
)
from exo.utils.info_gatherer.info_gatherer import (
_gather_iface_map, # pyright: ignore[reportPrivateUsage]
)
@pytest.mark.anyio
@pytest.mark.skipif(
sys.platform != "darwin", reason="Thunderbolt info can only be gathered on macos"
)
async def test_tb_parsing():
data = await ThunderboltConnectivity.gather()
ifaces = await _gather_iface_map()
assert ifaces
assert data
for datum in data:
datum.ident(ifaces)
datum.conn()
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/utils/info_gatherer/tests/test_tb_parsing.py",
"license": "Apache License 2.0",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
exo-explore/exo:src/exo/worker/tests/unittests/test_mlx/test_auto_parallel.py | import json
import multiprocessing as mp
import os
import tempfile
from typing import Any
import mlx.core as mx
import mlx.nn as mlx_nn
import pytest
from exo.worker.engines.mlx.auto_parallel import (
CustomMlxLayer,
PipelineFirstLayer,
PipelineLastLayer,
patch_pipeline_model,
)
from exo.worker.tests.unittests.test_mlx.conftest import MockLayer
def run_pipeline_device(
rank: int,
world_size: int,
hostfile_path: str,
result_queue: Any, # pyright: ignore[reportAny]
) -> None:
import os
os.environ["MLX_HOSTFILE"] = hostfile_path
os.environ["MLX_RANK"] = str(rank)
class MockLayerInner(mlx_nn.Module):
def __init__(self) -> None:
super().__init__()
self.custom_attr = "test_value"
def __call__(self, x: mx.array, *args: object, **kwargs: object) -> mx.array:
return x * 2
class MockModel(mlx_nn.Module):
def __init__(self, layers: list[mlx_nn.Module]) -> None:
super().__init__()
self.layers = layers
def __call__(self, x: mx.array, *args: object, **kwargs: object) -> mx.array:
for layer in self.layers:
x = layer(x, *args, **kwargs) # pyright: ignore[reportUnknownVariableType]
return x # pyright: ignore[reportUnknownVariableType]
try:
group = mx.distributed.init(backend="ring", strict=True)
mock = MockLayerInner()
first = PipelineFirstLayer(mock, r=rank, group=group)
composed = PipelineLastLayer(first, r=rank, s=world_size, group=group)
# Wrap in a mock model, then wrap in PipelineParallelModel for all_gather
inner_model = MockModel([composed])
model = patch_pipeline_model(inner_model, group)
x = mx.ones((1, 4))
result = model(x)
mx.eval(result)
success = result.shape == x.shape
result_queue.put((rank, success, result)) # pyright: ignore[reportAny]
except Exception as e:
result_queue.put((rank, False, str(e))) # pyright: ignore[reportAny]
def test_single_wrapper_delegates_attributes() -> None:
mock = MockLayer()
wrapped = CustomMlxLayer(mock)
assert wrapped.custom_attr == "test_value" # type: ignore[attr-defined]
assert wrapped.use_sliding is True # type: ignore[attr-defined]
def test_composed_wrappers_delegate_attributes() -> None:
mock = MockLayer()
group = mx.distributed.init()
first = PipelineFirstLayer(mock, r=0, group=group)
composed = PipelineLastLayer(first, r=0, s=1, group=group)
assert composed.custom_attr == "test_value" # type: ignore[attr-defined]
assert composed.use_sliding is True # type: ignore[attr-defined]
def test_missing_attribute_raises() -> None:
mock = MockLayer()
wrapped = CustomMlxLayer(mock)
with pytest.raises(AttributeError):
_ = wrapped.nonexistent_attr # type: ignore[attr-defined]
def test_composed_call_works() -> None:
ctx = mp.get_context("spawn")
world_size = 2
base_port = 29500
hosts = [f"127.0.0.1:{base_port + i}" for i in range(world_size)]
with tempfile.NamedTemporaryFile(mode="w", suffix=".json", delete=False) as f:
json.dump(hosts, f)
hostfile_path = f.name
try:
result_queue: Any = ctx.Queue()
processes: list[Any] = []
for rank in range(world_size):
p = ctx.Process(
target=run_pipeline_device,
args=(rank, world_size, hostfile_path, result_queue),
)
p.start()
processes.append(p)
for p in processes: # pyright: ignore[reportAny]
p.join(timeout=10) # pyright: ignore[reportAny]
results: dict[int, Any] = {}
errors: dict[int, str] = {}
while not result_queue.empty(): # pyright: ignore[reportAny]
rank, success, value = result_queue.get() # pyright: ignore[reportAny]
if success:
results[rank] = value
else:
errors[rank] = value
assert len(results) == world_size, (
f"Expected {world_size} results, got {len(results)}. Errors: {errors}"
)
for rank in range(world_size):
assert rank in results, (
f"Device {rank} failed: {errors.get(rank, 'unknown')}"
)
result_array = results[rank]
# Both devices see the final result (4.0) after all_gather
assert (result_array == 4.0).all(), (
f"Device {rank}: expected 4.0, got {result_array}"
)
finally:
os.unlink(hostfile_path)
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/worker/tests/unittests/test_mlx/test_auto_parallel.py",
"license": "Apache License 2.0",
"lines": 112,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
exo-explore/exo:src/exo/master/tests/test_api_error_handling.py | # pyright: reportUnusedFunction=false, reportAny=false
from typing import Any
from fastapi import FastAPI, HTTPException
from fastapi.testclient import TestClient
def test_http_exception_handler_formats_openai_style() -> None:
"""Test that HTTPException is converted to OpenAI-style error format."""
from exo.master.api import API
app = FastAPI()
# Setup exception handler
api = object.__new__(API)
api.app = app
api._setup_exception_handlers() # pyright: ignore[reportPrivateUsage]
# Add test routes that raise HTTPException
@app.get("/test-error")
async def _test_error() -> None:
raise HTTPException(status_code=500, detail="Test error message")
@app.get("/test-not-found")
async def _test_not_found() -> None:
raise HTTPException(status_code=404, detail="Resource not found")
client = TestClient(app)
# Test 500 error
response = client.get("/test-error")
assert response.status_code == 500
data: dict[str, Any] = response.json()
assert "error" in data
assert data["error"]["message"] == "Test error message"
assert data["error"]["type"] == "Internal Server Error"
assert data["error"]["code"] == 500
# Test 404 error
response = client.get("/test-not-found")
assert response.status_code == 404
data = response.json()
assert "error" in data
assert data["error"]["message"] == "Resource not found"
assert data["error"]["type"] == "Not Found"
assert data["error"]["code"] == 404
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/master/tests/test_api_error_handling.py",
"license": "Apache License 2.0",
"lines": 36,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
exo-explore/exo:src/exo/worker/tests/unittests/test_mlx/test_tokenizers.py | """
Unit tests for tokenizer loading and functionality across all supported models.
This test downloads only tokenizer-related files (not full model weights) to verify
that tokenizers can be loaded and used correctly for encoding/decoding.
"""
import asyncio
import contextlib
from pathlib import Path
import pytest
from exo.download.download_utils import (
download_file_with_retry,
ensure_models_dir,
fetch_file_list_with_cache,
)
from exo.shared.models.model_cards import ModelCard, ModelId, get_model_cards
from exo.worker.engines.mlx.utils_mlx import (
get_eos_token_ids_for_model,
load_tokenizer_for_model_id,
)
# Files needed for tokenizer functionality
TOKENIZER_FILE_PATTERNS = [
"tokenizer.json",
"tokenizer_config.json",
"special_tokens_map.json",
"vocab.json",
"vocab.txt",
"merges.txt",
"tiktoken.model",
"added_tokens.json",
"tokenizer.model",
"tokenization_*.py", # Custom tokenizer implementations
"tool_declaration_ts.py", # Dependency of tokenization_kimi.py
]
def is_tokenizer_file(filename: str) -> bool:
"""Check if a file is needed for tokenizer functionality."""
for pattern in TOKENIZER_FILE_PATTERNS:
if "*" in pattern:
prefix = pattern.split("*")[0]
suffix = pattern.split("*")[1]
if filename.startswith(prefix) and filename.endswith(suffix):
return True
elif filename == pattern:
return True
return False
async def download_tokenizer_files(model_id: ModelId) -> Path:
"""Download only the tokenizer-related files for a model."""
target_dir = await ensure_models_dir() / model_id.normalize()
target_dir.mkdir(parents=True, exist_ok=True)
file_list = await fetch_file_list_with_cache(model_id, "main", recursive=True)
tokenizer_files = [f for f in file_list if is_tokenizer_file(f.path)]
if not tokenizer_files:
pytest.skip(f"No tokenizer files found for {model_id}")
for file_entry in tokenizer_files:
with contextlib.suppress(FileNotFoundError):
await download_file_with_retry(
model_id, "main", file_entry.path, target_dir
)
return target_dir
# Get a sample of models to test (one per family to keep tests fast)
def get_test_models() -> list[ModelCard]:
"""Get a representative sample of models to test."""
# Pick one model from each family to test
families: dict[str, ModelCard] = {}
for card in asyncio.run(get_model_cards()):
# Extract family name (e.g., "llama-3.1" from "llama-3.1-8b")
parts = card.model_id.short().split("-")
family = "-".join(parts[:2]) if len(parts) >= 2 else parts[0]
if family not in families:
families[family] = card
return list(families.values())
TEST_MODELS: list[ModelCard] = get_test_models()
pytestmark = pytest.mark.slow
@pytest.fixture(scope="module")
def event_loop():
"""Create event loop for async tests."""
loop = asyncio.new_event_loop()
yield loop
loop.close()
@pytest.mark.parametrize(
"model_card",
TEST_MODELS,
)
@pytest.mark.asyncio
async def test_tokenizer_encode_decode(model_card: ModelCard) -> None:
"""Test that tokenizer can encode and decode text correctly."""
model_id = model_card.model_id
# Download tokenizer files
model_path = await download_tokenizer_files(model_id)
# Verify required files exist
has_tokenizer = (
(model_path / "tokenizer.json").exists()
or (model_path / "tokenizer_config.json").exists()
or (model_path / "tiktoken.model").exists()
or (model_path / "tokenizer.model").exists()
)
if not has_tokenizer:
pytest.skip(f"Required tokenizer files not found for {model_id}")
# Load tokenizer
tokenizer = load_tokenizer_for_model_id(model_id, model_path)
# Test basic encoding
test_text = "Hello, world!"
encoded = tokenizer.encode(test_text)
assert isinstance(encoded, list), f"encode() should return a list for {model_id}"
assert len(encoded) > 0, f"encode() should return non-empty list for {model_id}"
assert all(isinstance(t, int) for t in encoded), (
f"All tokens should be integers for {model_id}"
)
# Test decoding
decoded = tokenizer.decode(encoded)
assert isinstance(decoded, str), f"decode() should return a string for {model_id}"
assert test_text in decoded or decoded.strip() == test_text.strip(), (
f"decode(encode(x)) should preserve text for {model_id}: got {decoded!r}"
)
# Test with longer text
long_text = "The quick brown fox jumps over the lazy dog. " * 10
long_encoded = tokenizer.encode(long_text)
assert len(long_encoded) > len(encoded), (
f"Longer text should produce more tokens for {model_id}"
)
# Test empty string
empty_encoded = tokenizer.encode("")
assert isinstance(empty_encoded, list), (
f"encode('') should return a list for {model_id}"
)
# Test special characters
special_text = 'Hello!\n\tWorld? <test> & "quotes"'
special_encoded = tokenizer.encode(special_text)
assert len(special_encoded) > 0, f"Special chars should encode for {model_id}"
# Test unicode
unicode_text = "Hello 世界 🌍"
unicode_encoded = tokenizer.encode(unicode_text)
assert len(unicode_encoded) > 0, f"Unicode should encode for {model_id}"
@pytest.mark.parametrize(
"model_card",
TEST_MODELS,
)
@pytest.mark.asyncio
async def test_tokenizer_has_required_attributes(model_card: ModelCard) -> None:
"""Test that tokenizer has required attributes for inference."""
model_id = model_card.model_id
model_path = await download_tokenizer_files(model_id)
has_tokenizer = (
(model_path / "tokenizer.json").exists()
or (model_path / "tokenizer_config.json").exists()
or (model_path / "tiktoken.model").exists()
or (model_path / "tokenizer.model").exists()
)
if not has_tokenizer:
pytest.skip(f"Required tokenizer files not found for {model_id}")
tokenizer = load_tokenizer_for_model_id(model_id, model_path)
eos_token_ids = get_eos_token_ids_for_model(model_id)
# Check for vocabulary size
empty_vocab: dict[str, int] = {}
vocab_size: int = getattr(tokenizer, "vocab_size", None) or len(
getattr(tokenizer, "get_vocab", lambda: empty_vocab)()
)
assert vocab_size > 0, f"Tokenizer should have vocab_size > 0 for {model_id}"
# Check for EOS token (either from tokenizer or explicitly provided)
has_eos = (
eos_token_ids is not None
or getattr(tokenizer, "eos_token_id", None) is not None
or getattr(tokenizer, "eos_token", None) is not None
)
assert has_eos, f"Tokenizer should have EOS token for {model_id}"
@pytest.mark.parametrize(
"model_card",
TEST_MODELS,
)
@pytest.mark.asyncio
async def test_tokenizer_special_tokens(model_card: ModelCard) -> None:
"""Test that tokenizer can encode text containing special tokens.
This is critical because the actual inference path uses prompts with
special tokens from chat templates. If special tokens aren't handled
correctly, encoding will fail.
"""
model_id = model_card.model_id
model_path = await download_tokenizer_files(model_id)
has_tokenizer = (
(model_path / "tokenizer.json").exists()
or (model_path / "tokenizer_config.json").exists()
or (model_path / "tiktoken.model").exists()
or (model_path / "tokenizer.model").exists()
)
assert has_tokenizer, f"Required tokenizer files not found for {model_id}"
tokenizer = load_tokenizer_for_model_id(model_id, model_path)
# Get special tokens from the tokenizer
special_tokens: list[str] = []
# Try to get special tokens from various sources
if hasattr(tokenizer, "all_special_tokens"):
special_tokens.extend(tokenizer.all_special_tokens)
elif hasattr(tokenizer, "_tokenizer") and hasattr(
tokenizer._tokenizer,
"all_special_tokens",
):
special_tokens.extend(tokenizer._tokenizer.all_special_tokens)
# Also check for common special token attributes
for attr in [
"bos_token",
"eos_token",
"pad_token",
"unk_token",
"sep_token",
"cls_token",
]:
token = getattr(tokenizer, attr, None)
if token is None and hasattr(tokenizer, "_tokenizer"):
token = getattr(tokenizer._tokenizer, attr, None)
if token and isinstance(token, str) and token not in special_tokens:
special_tokens.append(token)
# If we found special tokens, test encoding text that contains them
if special_tokens:
# Create text with special tokens interspersed
test_with_special = f"{special_tokens[0]}Hello world"
if len(special_tokens) > 1:
test_with_special += f"{special_tokens[1]}"
encoded = tokenizer.encode(test_with_special)
assert isinstance(encoded, list), (
f"encode() with special tokens should return list for {model_id}"
)
assert len(encoded) > 0, (
f"encode() with special tokens should return non-empty list for {model_id}"
)
assert all(isinstance(t, int) for t in encoded), (
f"All tokens should be integers for {model_id}"
)
# Verify we can decode
decoded = tokenizer.decode(encoded)
assert isinstance(decoded, str), f"decode() should return string for {model_id}"
# Test with angle-bracket tokens (common format for special tokens)
# These should not raise errors even if they're not actual special tokens
angle_bracket_text = "<|test|>Hello<|end|>"
encoded = tokenizer.encode(angle_bracket_text)
assert isinstance(encoded, list), (
f"encode() with angle brackets should return list for {model_id}"
)
assert len(encoded) > 0, (
f"encode() with angle brackets should be non-empty for {model_id}"
)
# Specifically test Kimi tokenizer since it has special handling
@pytest.mark.asyncio
async def test_kimi_tokenizer_specifically():
"""Test Kimi tokenizer with its specific patches and quirks."""
kimi_models = [
card for card in await get_model_cards() if "kimi" in card.model_id.lower()
]
if not kimi_models:
pytest.skip("No Kimi models found in MODEL_CARDS")
model_card = kimi_models[0]
model_id = model_card.model_id
model_path = await download_tokenizer_files(model_id)
# Ensure the custom tokenizer file exists
if not (model_path / "tokenization_kimi.py").exists():
pytest.skip("tokenization_kimi.py not found")
tokenizer = load_tokenizer_for_model_id(model_id, model_path)
eos_token_ids = get_eos_token_ids_for_model(model_id)
# Test encode/decode cycle
test_text = "Hello, world!"
encoded = tokenizer.encode(test_text)
decoded = tokenizer.decode(encoded)
assert len(encoded) > 0, "Kimi tokenizer should encode text"
assert isinstance(decoded, str), "Kimi tokenizer should decode to string"
# Test that the patched encode works (returns list of ints)
assert all(isinstance(t, int) for t in encoded), "Tokens should be integers"
# Test encoding text with special tokens (like from chat templates)
# This is critical - the warmup inference uses prompts with special tokens
special_token_text = "<|im_user|>user<|im_middle|>Hello<|im_end|><|im_assistant|>"
special_encoded = tokenizer.encode(special_token_text)
assert len(special_encoded) > 0, "Kimi tokenizer should handle special tokens"
assert all(isinstance(t, int) for t in special_encoded), (
"Special token encoding should return integers"
)
# Verify EOS token is set
assert eos_token_ids == [163586], "Kimi EOS token should be [163586]"
# Test GLM tokenizer since it also has special handling
@pytest.mark.asyncio
async def test_glm_tokenizer_specifically():
"""Test GLM tokenizer with its specific EOS tokens."""
def contains(card: ModelCard, x: str):
return x in card.model_id.lower()
glm_model_cards = [
card
for card in await get_model_cards()
if contains(card, "glm")
and not contains(card, "-5")
and not contains(card, "4.7")
]
if not glm_model_cards:
pytest.skip("No GLM models found in MODEL_CARDS")
model_card = glm_model_cards[0]
model_id = model_card.model_id
model_path = await download_tokenizer_files(model_id)
has_tokenizer = (model_path / "tokenizer.json").exists() or (
model_path / "tokenizer_config.json"
).exists()
if not has_tokenizer:
pytest.skip("GLM tokenizer files not found")
tokenizer = load_tokenizer_for_model_id(model_id, model_path)
eos_token_ids = get_eos_token_ids_for_model(model_id)
# Test encode/decode
test_text = "Hello, world!"
encoded = tokenizer.encode(test_text)
decoded = tokenizer.decode(encoded)
assert len(encoded) > 0, "GLM tokenizer should encode text"
assert isinstance(decoded, str), "GLM tokenizer should decode to string"
# Verify EOS tokens
assert eos_token_ids == [
151336,
151329,
151338,
], "GLM EOS tokens should be correct"
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/worker/tests/unittests/test_mlx/test_tokenizers.py",
"license": "Apache License 2.0",
"lines": 310,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
exo-explore/exo:tests/headless_runner.py | import socket
from typing import Literal
import anyio
from fastapi import FastAPI
from fastapi.responses import Response, StreamingResponse
from hypercorn import Config
from hypercorn.asyncio import serve # pyright: ignore[reportUnknownVariableType]
from loguru import logger
from pydantic import BaseModel
from exo.shared.constants import EXO_MODELS_DIR
from exo.shared.models.model_cards import ModelCard, ModelId
from exo.shared.types.chunks import TokenChunk
from exo.shared.types.commands import CommandId
from exo.shared.types.common import Host, NodeId
from exo.shared.types.events import ChunkGenerated, Event, RunnerStatusUpdated
from exo.shared.types.tasks import (
ConnectToGroup,
LoadModel,
Shutdown,
StartWarmup,
Task,
TextGeneration,
)
from exo.shared.types.text_generation import InputMessage, TextGenerationTaskParams
from exo.shared.types.worker.instances import (
BoundInstance,
Instance,
InstanceId,
MlxJacclInstance,
MlxRingInstance,
)
from exo.shared.types.worker.runners import (
RunnerFailed,
RunnerId,
RunnerShutdown,
ShardAssignments,
)
from exo.shared.types.worker.shards import PipelineShardMetadata, TensorShardMetadata
from exo.utils.channels import channel, mp_channel
from exo.utils.info_gatherer.info_gatherer import GatheredInfo, InfoGatherer
from exo.worker.runner.bootstrap import entrypoint
class Tests(BaseModel):
# list[hostname, ip addr]
devs: list[list[str]]
ibv_devs: list[list[str | None]] | None
model_id: ModelId
kind: Literal["ring", "jaccl", "both"]
iid = InstanceId("im testing here")
async def main():
logger.info("starting cool server majig")
cfg = Config()
cfg.bind = "0.0.0.0:52414"
# nb: shared.logging needs updating if any of this changes
cfg.accesslog = "-"
cfg.errorlog = "-"
ev = anyio.Event()
app = FastAPI()
app.post("/run_test")(run_test)
app.post("/kill")(lambda: kill(ev))
app.get("/tb_detection")(tb_detection)
app.get("/models")(list_models)
await serve(
app, # type: ignore
cfg,
shutdown_trigger=lambda: ev.wait(),
)
def kill(ev: anyio.Event):
ev.set()
return Response(status_code=204)
async def tb_detection():
send, recv = channel[GatheredInfo]()
ig = InfoGatherer(send)
with anyio.move_on_after(1):
await ig._monitor_system_profiler_thunderbolt_data() # pyright: ignore[reportPrivateUsage]
with recv:
return recv.collect()
def list_models():
sent = set[str]()
for path in EXO_MODELS_DIR.rglob("model-*.safetensors"):
if "--" not in path.parent.name:
continue
name = path.parent.name.replace("--", "/")
if name in sent:
continue
sent.add(name)
yield ModelId(path.parent.name.replace("--", "/"))
async def run_test(test: Tests):
weird_hn = socket.gethostname()
for dev in test.devs:
if weird_hn.startswith(dev[0]) or dev[0].startswith(weird_hn):
hn = dev[0]
break
else:
raise ValueError(f"{weird_hn} not in {test.devs}")
async def run():
logger.info(f"testing {test.model_id}")
instances: list[Instance] = []
if test.kind in ["ring", "both"]:
i = await ring_instance(test, hn)
if i is None:
yield "no model found"
return
instances.append(i)
if test.kind in ["jaccl", "both"]:
i = await jaccl_instance(test)
if i is None:
yield "no model found"
return
instances.append(i)
for instance in instances:
recv = await execute_test(test, instance, hn)
str_out = ""
for item in recv:
if isinstance(item, ChunkGenerated):
assert isinstance(item.chunk, TokenChunk)
str_out += item.chunk.text
if isinstance(item, RunnerStatusUpdated) and isinstance(
item.runner_status, (RunnerFailed, RunnerShutdown)
):
yield str_out + "\n"
yield item.model_dump_json() + "\n"
return StreamingResponse(run())
async def ring_instance(test: Tests, hn: str) -> Instance | None:
hbn = [Host(ip="198.51.100.0", port=52417) for _ in test.devs]
world_size = len(test.devs)
for i in range(world_size):
if test.devs[i][0] == hn:
hn = test.devs[i][0]
hbn[(i - 1) % world_size] = Host(ip=test.devs[i - 1][1], port=52417)
hbn[(i + 1) % world_size] = Host(ip=test.devs[i + 1][1], port=52417)
hbn[i] = Host(ip="0.0.0.0", port=52417)
break
else:
raise ValueError(f"{hn} not in {test.devs}")
card = await ModelCard.load(test.model_id)
instance = MlxRingInstance(
instance_id=iid,
ephemeral_port=52417,
hosts_by_node={NodeId(hn): hbn},
shard_assignments=ShardAssignments(
model_id=test.model_id,
node_to_runner={NodeId(host[0]): RunnerId(host[0]) for host in test.devs},
runner_to_shard={
RunnerId(test.devs[i][0]): PipelineShardMetadata(
model_card=card,
device_rank=i,
world_size=world_size,
start_layer=(card.n_layers // world_size) * i,
end_layer=min(
card.n_layers, (card.n_layers // world_size) * (i + 1)
),
n_layers=min(card.n_layers, (card.n_layers // world_size) * (i + 1))
- (card.n_layers // world_size) * i,
)
for i in range(world_size)
},
),
)
return instance
async def execute_test(test: Tests, instance: Instance, hn: str) -> list[Event]:
world_size = len(test.devs)
commands: list[Task] = [
(LoadModel(instance_id=iid)),
(StartWarmup(instance_id=iid)),
(
TextGeneration(
task_params=TextGenerationTaskParams(
model=test.model_id,
instructions="You are a helpful assistant",
input=[
InputMessage(
role="user", content="What is the capital of France?"
)
],
),
command_id=CommandId("yo"),
instance_id=iid,
)
),
(Shutdown(runner_id=RunnerId(hn), instance_id=iid)),
]
if world_size > 1:
commands.insert(0, ConnectToGroup(instance_id=iid))
bound_instance = BoundInstance(
instance=instance, bound_runner_id=RunnerId(hn), bound_node_id=NodeId(hn)
)
ev_send, _ev_recv = mp_channel[Event]()
task_send, task_recv = mp_channel[Task]()
for command in commands:
task_send.send(command)
entrypoint(
bound_instance,
ev_send,
task_recv,
logger,
)
# TODO(evan): return ev_recv.collect()
return []
async def jaccl_instance(test: Tests) -> MlxJacclInstance | None:
card = await ModelCard.load(test.model_id)
world_size = len(test.devs)
assert test.ibv_devs
return MlxJacclInstance(
instance_id=iid,
jaccl_devices=test.ibv_devs,
# rank 0 is always coordinator
jaccl_coordinators={
NodeId(host[0]): test.devs[0][1] + ":52417" for host in test.devs
},
shard_assignments=ShardAssignments(
model_id=test.model_id,
node_to_runner={NodeId(host[0]): RunnerId(host[0]) for host in test.devs},
runner_to_shard={
RunnerId(host[0]): TensorShardMetadata(
model_card=card,
device_rank=i,
world_size=world_size,
start_layer=0,
end_layer=card.n_layers,
n_layers=card.n_layers,
)
for i, host in enumerate(test.devs)
},
),
)
if __name__ == "__main__":
anyio.run(main)
| {
"repo_id": "exo-explore/exo",
"file_path": "tests/headless_runner.py",
"license": "Apache License 2.0",
"lines": 227,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
exo-explore/exo:bench/exo_bench.py | # type: ignore
#!/usr/bin/env python3
"""Tool-calling eval for exo's OpenAI-compatible API.
Tests whether models correctly:
- Trigger tool calls when appropriate
- Return valid JSON arguments matching function schemas
- Handle multi-turn tool use (call -> result -> final answer)
- Avoid calling tools when unnecessary
Start exo with a model first, then run:
uv run python tool_call_eval.py --model <model-id>
uv run python tool_call_eval.py --model <model-id> --host 10.0.0.5 --port 52415
uv run python tool_call_eval.py --model <model-id> --repeat 3
uv run python tool_call_eval.py --model <model-id> --scenarios weather_simple calculator_multi_turn
"""
from __future__ import annotations
import argparse
import contextlib
import itertools
import json
import sys
import time
from collections.abc import Callable
from pathlib import Path
from statistics import mean
from typing import Any
from harness import (
ExoClient,
ExoHttpError,
add_common_instance_args,
instance_id_from_instance,
nodes_used_in_instance,
resolve_model_short_id,
run_planning_phase,
settle_and_fetch_placements,
wait_for_instance_gone,
wait_for_instance_ready,
)
from loguru import logger
from transformers import AutoTokenizer
# Monkey-patch for transformers 5.x compatibility
# Kimi's tokenization_kimi.py imports bytes_to_unicode from the old location
# which was moved in transformers 5.0.0rc2
try:
import transformers.models.gpt2.tokenization_gpt2 as gpt2_tokenization
from transformers.convert_slow_tokenizer import bytes_to_unicode
if not hasattr(gpt2_tokenization, "bytes_to_unicode"):
gpt2_tokenization.bytes_to_unicode = bytes_to_unicode # type: ignore[attr-defined]
except ImportError:
pass # transformers < 5.0 or bytes_to_unicode not available
def load_tokenizer_for_bench(model_id: str) -> Any:
"""
Load tokenizer for benchmarking, with special handling for Kimi models.
Kimi uses a custom TikTokenTokenizer that transformers 5.x can't load via AutoTokenizer.
This function replicates the logic from utils_mlx.py for bench compatibility.
"""
model_id_lower = model_id.lower()
if "kimi-k2" in model_id_lower:
import importlib.util
import types
from huggingface_hub import snapshot_download
# Download/get the model path
model_path = Path(
snapshot_download(
model_id,
allow_patterns=["*.json", "*.py", "*.tiktoken", "*.model"],
)
)
sys.path.insert(0, str(model_path))
# Load tool_declaration_ts first (tokenization_kimi imports it with relative import)
tool_decl_path = model_path / "tool_declaration_ts.py"
if tool_decl_path.exists():
spec = importlib.util.spec_from_file_location(
"tool_declaration_ts", tool_decl_path
)
if spec and spec.loader:
tool_decl_module = importlib.util.module_from_spec(spec)
sys.modules["tool_declaration_ts"] = tool_decl_module
spec.loader.exec_module(tool_decl_module)
# Load tokenization_kimi with patched source (convert relative to absolute import)
tok_path = model_path / "tokenization_kimi.py"
source = tok_path.read_text()
source = source.replace("from .tool_declaration_ts", "from tool_declaration_ts")
spec = importlib.util.spec_from_file_location("tokenization_kimi", tok_path)
if spec:
tok_module = types.ModuleType("tokenization_kimi")
tok_module.__file__ = str(tok_path)
sys.modules["tokenization_kimi"] = tok_module
exec(compile(source, tok_path, "exec"), tok_module.__dict__) # noqa: S102
TikTokenTokenizer = tok_module.TikTokenTokenizer # noqa: N806
else:
from tokenization_kimi import TikTokenTokenizer # type: ignore[import-not-found] # noqa: I001
hf_tokenizer: Any = TikTokenTokenizer.from_pretrained(model_path)
# Patch encode to use internal tiktoken model directly
# transformers 5.x has a bug in the encode->pad path for slow tokenizers
def _patched_encode(text: str, **kwargs: object) -> list[int]:
# Pass allowed_special="all" to handle special tokens like <|im_user|>
return list(hf_tokenizer.model.encode(text, allowed_special="all"))
hf_tokenizer.encode = _patched_encode
return hf_tokenizer
# Default: use AutoTokenizer
return AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
def format_peak_memory(b: float) -> str:
for unit in ["B", "KB", "MB", "GB", "TB"]:
if b < 1024.0:
return f"{b:.2f}{unit}"
b /= 1024.0
raise ValueError("You're using petabytes of memory. Something went wrong...")
def parse_int_list(values: list[str]) -> list[int]:
items: list[int] = []
for v in values:
for part in v.split(","):
part = part.strip()
if part:
items.append(int(part))
return items
def run_one_completion(
client: ExoClient, model_id: str, pp_hint: int, tg: int, prompt_sizer: PromptSizer
) -> tuple[dict[str, Any], int]:
content, pp_tokens = prompt_sizer.build(pp_hint)
payload: dict[str, Any] = {
"model": model_id,
"messages": [{"role": "user", "content": content}],
"stream": False,
"max_tokens": tg,
}
t0 = time.perf_counter()
out = client.post_bench_chat_completions(payload)
elapsed = time.perf_counter() - t0
stats = out.get("generation_stats")
# Extract preview, handling None content (common for thinking models)
choices = out.get("choices") or [{}]
message = choices[0].get("message", {}) if choices else {}
content = message.get("content") or ""
preview = content[:200] if content else ""
return {
"elapsed_s": elapsed,
"output_text_preview": preview,
"stats": stats,
}, pp_tokens
class PromptSizer:
def __init__(self, tokenizer: Any, atom: str = "a "):
self.tokenizer = tokenizer
self.atom = atom
self.count_fn = PromptSizer._make_counter(tokenizer)
self.base_tokens = self.count_fn("")
@staticmethod
def _make_counter(tokenizer: Any) -> Callable[[str], int]:
def count_fn(user_content: str) -> int:
messages = [{"role": "user", "content": user_content}]
ids = tokenizer.apply_chat_template(
messages, tokenize=True, add_generation_prompt=True
)
# Fix for transformers 5.x
if hasattr(ids, "input_ids"):
ids = ids.input_ids
return int(len(ids))
return count_fn
def build(self, target_prompt_tokens: int) -> tuple[str, int]:
target = int(target_prompt_tokens)
if target < self.base_tokens:
raise RuntimeError(
f"Target ({target}) is smaller than template overhead ({self.base_tokens})."
)
# Estimate tokens per atom using a sample
sample_count = 100
sample_content = self.atom * sample_count
sample_tokens = self.count_fn(sample_content) - self.base_tokens
tokens_per_atom = sample_tokens / sample_count
# Estimate starting point
needed_tokens = target - self.base_tokens
estimated_atoms = int(needed_tokens / tokens_per_atom)
# Binary search to find exact atom count
low, high = 0, estimated_atoms * 2 + 100
while low < high:
mid = (low + high) // 2
tok = self.count_fn(self.atom * mid)
if tok < target:
low = mid + 1
else:
high = mid
content = self.atom * low
tok = self.count_fn(content)
logger.info(f"{tok=}")
if tok != target:
raise RuntimeError(
f"Overshot: got {tok} tokens (target {target}). "
f"Pick a different atom (try ' a' or '\\n' or '0 ')."
)
return content, tok
def main() -> int:
ap = argparse.ArgumentParser(
prog="exo-bench",
description="Benchmark exo model throughput across placement previews.",
)
add_common_instance_args(ap)
ap.add_argument(
"--pp",
nargs="+",
required=True,
help="Prompt-size hints (ints). Accepts commas.",
)
ap.add_argument(
"--tg",
nargs="+",
required=True,
help="Generation lengths (ints). Accepts commas.",
)
ap.add_argument(
"--repeat", type=int, default=1, help="Repetitions per (pp,tg) pair."
)
ap.add_argument(
"--warmup",
type=int,
default=0,
help="Warmup runs per placement (uses first pp/tg).",
)
ap.add_argument(
"--json-out",
default="bench/results.json",
help="Write raw per-run results JSON to this path.",
)
ap.add_argument("--stdout", action="store_true", help="Write results to stdout")
ap.add_argument(
"--dry-run", action="store_true", help="List selected placements and exit."
)
ap.add_argument(
"--all-combinations",
action="store_true",
help="Force all pp×tg combinations (cartesian product) even when lists have equal length.",
)
args = ap.parse_args()
pp_list = parse_int_list(args.pp)
tg_list = parse_int_list(args.tg)
if not pp_list or not tg_list:
logger.error("pp and tg lists must be non-empty")
return 2
if args.repeat <= 0:
logger.error("--repeat must be >= 1")
return 2
# Log pairing mode
use_combinations = args.all_combinations or len(pp_list) != len(tg_list)
if use_combinations:
logger.info(
f"pp/tg mode: combinations (product) - {len(pp_list) * len(tg_list)} pairs"
)
else:
logger.info(f"pp/tg mode: tandem (zip) - {len(pp_list)} pairs")
client = ExoClient(args.host, args.port, timeout_s=args.timeout)
short_id, full_model_id = resolve_model_short_id(client, args.model)
tokenizer = load_tokenizer_for_bench(full_model_id)
if tokenizer is None:
raise RuntimeError("[exo-bench] tokenizer load failed")
try:
prompt_sizer = PromptSizer(tokenizer)
logger.debug(f"[exo-bench] loaded tokenizer: {full_model_id} for prompt sizer")
except Exception:
logger.error("[exo-bench] tokenizer usable but prompt sizing failed")
raise
selected = settle_and_fetch_placements(
client, full_model_id, args, settle_timeout=args.settle_timeout
)
if not selected:
logger.error("No valid placements matched your filters.")
return 1
selected.sort(
key=lambda p: (
str(p.get("instance_meta", "")),
str(p.get("sharding", "")),
-nodes_used_in_instance(p["instance"]),
),
reverse=True,
)
logger.debug(f"exo-bench model: short_id={short_id} full_id={full_model_id}")
logger.info(f"placements: {len(selected)}")
for p in selected:
logger.info(
f" - {p['sharding']} / {p['instance_meta']} / nodes={nodes_used_in_instance(p['instance'])}"
)
if args.dry_run:
return 0
settle_deadline = (
time.monotonic() + args.settle_timeout if args.settle_timeout > 0 else None
)
logger.info("Planning phase: checking downloads...")
download_duration_s = run_planning_phase(
client,
full_model_id,
selected[0],
args.danger_delete_downloads,
args.timeout,
settle_deadline,
)
if download_duration_s is not None:
logger.info(f"Download: {download_duration_s:.1f}s (freshly downloaded)")
else:
logger.info("Download: model already cached")
all_rows: list[dict[str, Any]] = []
for preview in selected:
instance = preview["instance"]
instance_id = instance_id_from_instance(instance)
sharding = str(preview["sharding"])
instance_meta = str(preview["instance_meta"])
n_nodes = nodes_used_in_instance(instance)
logger.info("=" * 80)
logger.info(
f"PLACEMENT: {sharding} / {instance_meta} / nodes={n_nodes} / instance_id={instance_id}"
)
client.request_json("POST", "/instance", body={"instance": instance})
try:
wait_for_instance_ready(client, instance_id)
except (RuntimeError, TimeoutError) as e:
logger.error(f"Failed to initialize placement: {e}")
with contextlib.suppress(ExoHttpError):
client.request_json("DELETE", f"/instance/{instance_id}")
continue
time.sleep(1)
try:
for i in range(args.warmup):
run_one_completion(
client, full_model_id, pp_list[0], tg_list[0], prompt_sizer
)
logger.debug(f" warmup {i + 1}/{args.warmup} done")
# If pp and tg lists have same length, run in tandem (zip)
# Otherwise (or if --all-combinations), run all combinations (cartesian product)
if use_combinations:
pp_tg_pairs = list(itertools.product(pp_list, tg_list))
else:
pp_tg_pairs = list(zip(pp_list, tg_list, strict=True))
for pp, tg in pp_tg_pairs:
runs: list[dict[str, Any]] = []
for r in range(args.repeat):
time.sleep(3)
try:
row, actual_pp_tokens = run_one_completion(
client, full_model_id, pp, tg, prompt_sizer
)
except Exception as e:
logger.error(e)
continue
row.update(
{
"model_short_id": short_id,
"model_id": full_model_id,
"placement_sharding": sharding,
"placement_instance_meta": instance_meta,
"placement_nodes": n_nodes,
"instance_id": instance_id,
"pp_tokens": actual_pp_tokens,
"tg": tg,
"repeat_index": r,
**(
{"download_duration_s": download_duration_s}
if download_duration_s is not None
else {}
),
}
)
runs.append(row)
all_rows.append(row)
if runs:
prompt_tps = mean(x["stats"]["prompt_tps"] for x in runs)
gen_tps = mean(x["stats"]["generation_tps"] for x in runs)
ptok = mean(x["stats"]["prompt_tokens"] for x in runs)
gtok = mean(x["stats"]["generation_tokens"] for x in runs)
peak = mean(
x["stats"]["peak_memory_usage"]["inBytes"] for x in runs
)
logger.info(
f"prompt_tps={prompt_tps:.2f} gen_tps={gen_tps:.2f} "
f"prompt_tokens={ptok} gen_tokens={gtok} "
f"peak_memory={format_peak_memory(peak)}\n"
)
time.sleep(2)
finally:
try:
client.request_json("DELETE", f"/instance/{instance_id}")
except ExoHttpError as e:
if e.status != 404:
raise
wait_for_instance_gone(client, instance_id)
logger.debug(f"Deleted instance {instance_id}")
time.sleep(5)
if args.stdout:
json.dump(all_rows, sys.stdout, indent=2, ensure_ascii=False)
elif args.json_out:
with open(args.json_out, "w", encoding="utf-8") as f:
json.dump(all_rows, f, indent=2, ensure_ascii=False)
logger.debug(f"\nWrote results JSON: {args.json_out}")
return 0
if __name__ == "__main__":
raise SystemExit(main())
| {
"repo_id": "exo-explore/exo",
"file_path": "bench/exo_bench.py",
"license": "Apache License 2.0",
"lines": 391,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
exo-explore/exo:src/exo/worker/tests/unittests/test_runner/test_event_ordering.py | # Check tasks are complete before runner is ever ready.
import unittest.mock
from collections.abc import Iterable
from typing import Callable
import mlx.core as mx
import pytest
import exo.worker.runner.llm_inference.runner as mlx_runner
from exo.shared.types.chunks import TokenChunk
from exo.shared.types.events import (
ChunkGenerated,
Event,
RunnerStatusUpdated,
TaskAcknowledged,
TaskStatusUpdated,
)
from exo.shared.types.tasks import (
ConnectToGroup,
LoadModel,
Shutdown,
StartWarmup,
Task,
TaskId,
TaskStatus,
TextGeneration,
)
from exo.shared.types.text_generation import InputMessage, TextGenerationTaskParams
from exo.shared.types.worker.runner_response import GenerationResponse
from exo.shared.types.worker.runners import (
RunnerConnected,
RunnerConnecting,
RunnerIdle,
RunnerLoaded,
RunnerLoading,
RunnerReady,
RunnerRunning,
RunnerShutdown,
RunnerShuttingDown,
RunnerWarmingUp,
)
from exo.utils.channels import mp_channel
from ...constants import (
CHAT_COMPLETION_TASK_ID,
COMMAND_1_ID,
INITIALIZATION_TASK_ID,
INSTANCE_1_ID,
LOAD_TASK_ID,
MODEL_A_ID,
NODE_A,
RUNNER_1_ID,
SHUTDOWN_TASK_ID,
WARMUP_TASK_ID,
)
from ..conftest import get_bound_mlx_ring_instance
def make_nothin[T, U, V](res: T) -> Callable[[], T]:
def nothin(*_1: U, **_2: V) -> T:
return res
return nothin
nothin = make_nothin(None)
INIT_TASK = ConnectToGroup(
task_id=INITIALIZATION_TASK_ID,
instance_id=INSTANCE_1_ID,
)
LOAD_TASK = LoadModel(
task_id=LOAD_TASK_ID,
instance_id=INSTANCE_1_ID,
)
WARMUP_TASK = StartWarmup(
task_id=WARMUP_TASK_ID,
instance_id=INSTANCE_1_ID,
)
SHUTDOWN_TASK = Shutdown(
task_id=SHUTDOWN_TASK_ID,
instance_id=INSTANCE_1_ID,
runner_id=RUNNER_1_ID,
)
CHAT_PARAMS = TextGenerationTaskParams(
model=MODEL_A_ID,
input=[InputMessage(role="user", content="hello")],
stream=True,
max_output_tokens=4,
temperature=0.0,
)
CHAT_TASK = TextGeneration(
task_id=CHAT_COMPLETION_TASK_ID,
command_id=COMMAND_1_ID,
task_params=CHAT_PARAMS,
instance_id=INSTANCE_1_ID,
)
def assert_events_equal(test_events: Iterable[Event], true_events: Iterable[Event]):
for test_event, true_event in zip(test_events, true_events, strict=True):
test_event.event_id = true_event.event_id
assert test_event == true_event, f"{test_event} != {true_event}"
@pytest.fixture
def patch_out_mlx(monkeypatch: pytest.MonkeyPatch):
# initialize_mlx returns a mock group
monkeypatch.setattr(mlx_runner, "initialize_mlx", make_nothin(MockGroup()))
monkeypatch.setattr(mlx_runner, "load_mlx_items", make_nothin((1, MockTokenizer)))
monkeypatch.setattr(mlx_runner, "warmup_inference", make_nothin(1))
monkeypatch.setattr(mlx_runner, "_check_for_debug_prompts", nothin)
monkeypatch.setattr(mlx_runner, "mx_any", make_nothin(False))
# Mock apply_chat_template since we're using a fake tokenizer (integer 1).
# Returns a prompt without thinking tag so detect_thinking_prompt_suffix returns None.
monkeypatch.setattr(mlx_runner, "apply_chat_template", make_nothin("test prompt"))
monkeypatch.setattr(mlx_runner, "detect_thinking_prompt_suffix", make_nothin(False))
def fake_generate(*_1: object, **_2: object):
yield GenerationResponse(token=0, text="hi", finish_reason="stop", usage=None)
monkeypatch.setattr(mlx_runner, "mlx_generate", fake_generate)
# Use a fake event_sender to remove test flakiness.
class EventCollector:
def __init__(self) -> None:
self.events: list[Event] = []
def send(self, event: Event) -> None:
self.events.append(event)
def close(self) -> None:
pass
def join(self) -> None:
pass
class MockTokenizer:
tool_parser = None
tool_call_start = None
tool_call_end = None
has_tool_calling = False
has_thinking = False
class MockGroup:
def rank(self) -> int:
return 0
def size(self) -> int:
return 1
def _run(tasks: Iterable[Task]):
bound_instance = get_bound_mlx_ring_instance(
instance_id=INSTANCE_1_ID,
model_id=MODEL_A_ID,
runner_id=RUNNER_1_ID,
node_id=NODE_A,
)
task_sender, task_receiver = mp_channel[Task]()
_cancel_sender, cancel_receiver = mp_channel[TaskId]()
event_sender = EventCollector()
with task_sender:
for t in tasks:
task_sender.send(t)
# worst monkeypatch known to man
# this is some c++ nonsense
task_receiver.close = nothin
task_receiver.join = nothin
with unittest.mock.patch(
"exo.worker.runner.llm_inference.runner.mx.distributed.all_gather",
make_nothin(mx.array([1])),
):
mlx_runner.main(
bound_instance,
event_sender, # pyright: ignore[reportArgumentType]
task_receiver,
cancel_receiver,
)
return event_sender.events
def test_events_processed_in_correct_order(patch_out_mlx: pytest.MonkeyPatch):
events = _run([INIT_TASK, LOAD_TASK, WARMUP_TASK, CHAT_TASK, SHUTDOWN_TASK])
expected_chunk = ChunkGenerated(
command_id=COMMAND_1_ID,
chunk=TokenChunk(
model=MODEL_A_ID,
text="hi",
token_id=0,
finish_reason="stop",
usage=None,
stats=None,
),
)
assert_events_equal(
events,
[
RunnerStatusUpdated(runner_id=RUNNER_1_ID, runner_status=RunnerIdle()),
TaskStatusUpdated(
task_id=INITIALIZATION_TASK_ID, task_status=TaskStatus.Running
),
RunnerStatusUpdated(
runner_id=RUNNER_1_ID, runner_status=RunnerConnecting()
),
TaskAcknowledged(task_id=INITIALIZATION_TASK_ID),
TaskStatusUpdated(
task_id=INITIALIZATION_TASK_ID, task_status=TaskStatus.Complete
),
RunnerStatusUpdated(runner_id=RUNNER_1_ID, runner_status=RunnerConnected()),
TaskStatusUpdated(task_id=LOAD_TASK_ID, task_status=TaskStatus.Running),
RunnerStatusUpdated(
runner_id=RUNNER_1_ID,
runner_status=RunnerLoading(layers_loaded=0, total_layers=32),
),
TaskAcknowledged(task_id=LOAD_TASK_ID),
TaskStatusUpdated(task_id=LOAD_TASK_ID, task_status=TaskStatus.Complete),
RunnerStatusUpdated(runner_id=RUNNER_1_ID, runner_status=RunnerLoaded()),
TaskStatusUpdated(task_id=WARMUP_TASK_ID, task_status=TaskStatus.Running),
RunnerStatusUpdated(runner_id=RUNNER_1_ID, runner_status=RunnerWarmingUp()),
TaskAcknowledged(task_id=WARMUP_TASK_ID),
TaskStatusUpdated(task_id=WARMUP_TASK_ID, task_status=TaskStatus.Complete),
RunnerStatusUpdated(runner_id=RUNNER_1_ID, runner_status=RunnerReady()),
TaskStatusUpdated(
task_id=CHAT_COMPLETION_TASK_ID, task_status=TaskStatus.Running
),
RunnerStatusUpdated(runner_id=RUNNER_1_ID, runner_status=RunnerRunning()),
TaskAcknowledged(task_id=CHAT_COMPLETION_TASK_ID),
expected_chunk,
TaskStatusUpdated(
task_id=CHAT_COMPLETION_TASK_ID, task_status=TaskStatus.Complete
),
# CHAT COMPLETION TASK SHOULD COMPLETE BEFORE RUNNER READY
RunnerStatusUpdated(runner_id=RUNNER_1_ID, runner_status=RunnerReady()),
TaskStatusUpdated(task_id=SHUTDOWN_TASK_ID, task_status=TaskStatus.Running),
RunnerStatusUpdated(
runner_id=RUNNER_1_ID, runner_status=RunnerShuttingDown()
),
TaskAcknowledged(task_id=SHUTDOWN_TASK_ID),
TaskStatusUpdated(
task_id=SHUTDOWN_TASK_ID, task_status=TaskStatus.Complete
),
# SPECIAL EXCEPTION FOR RUNNER SHUTDOWN
RunnerStatusUpdated(runner_id=RUNNER_1_ID, runner_status=RunnerShutdown()),
],
)
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/worker/tests/unittests/test_runner/test_event_ordering.py",
"license": "Apache License 2.0",
"lines": 220,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
exo-explore/exo:src/exo/shared/tests/test_xdg_paths.py | """Tests for XDG Base Directory Specification compliance."""
import os
import sys
from pathlib import Path
from unittest import mock
def test_xdg_paths_on_linux():
"""Test that XDG paths are used on Linux when XDG env vars are set."""
with (
mock.patch.dict(
os.environ,
{
"XDG_CONFIG_HOME": "/tmp/test-config",
"XDG_DATA_HOME": "/tmp/test-data",
"XDG_CACHE_HOME": "/tmp/test-cache",
},
clear=False,
),
mock.patch.object(sys, "platform", "linux"),
):
# Re-import to pick up mocked values
import importlib
import exo.shared.constants as constants
importlib.reload(constants)
assert Path("/tmp/test-config/exo") == constants.EXO_CONFIG_HOME
assert Path("/tmp/test-data/exo") == constants.EXO_DATA_HOME
assert Path("/tmp/test-cache/exo") == constants.EXO_CACHE_HOME
def test_xdg_default_paths_on_linux():
"""Test that XDG default paths are used on Linux when env vars are not set."""
# Remove XDG env vars and EXO_HOME
env = {
k: v
for k, v in os.environ.items()
if not k.startswith("XDG_") and k != "EXO_HOME"
}
with (
mock.patch.dict(os.environ, env, clear=True),
mock.patch.object(sys, "platform", "linux"),
):
import importlib
import exo.shared.constants as constants
importlib.reload(constants)
home = Path.home()
assert home / ".config" / "exo" == constants.EXO_CONFIG_HOME
assert home / ".local/share" / "exo" == constants.EXO_DATA_HOME
assert home / ".cache" / "exo" == constants.EXO_CACHE_HOME
def test_legacy_exo_home_takes_precedence():
"""Test that EXO_HOME environment variable takes precedence for backward compatibility."""
with mock.patch.dict(
os.environ,
{
"EXO_HOME": ".custom-exo",
"XDG_CONFIG_HOME": "/tmp/test-config",
},
clear=False,
):
import importlib
import exo.shared.constants as constants
importlib.reload(constants)
home = Path.home()
assert home / ".custom-exo" == constants.EXO_CONFIG_HOME
assert home / ".custom-exo" == constants.EXO_DATA_HOME
def test_macos_uses_traditional_paths():
"""Test that macOS uses traditional ~/.exo directory."""
# Remove EXO_HOME to ensure we test the default behavior
env = {k: v for k, v in os.environ.items() if k != "EXO_HOME"}
with (
mock.patch.dict(os.environ, env, clear=True),
mock.patch.object(sys, "platform", "darwin"),
):
import importlib
import exo.shared.constants as constants
importlib.reload(constants)
home = Path.home()
assert home / ".exo" == constants.EXO_CONFIG_HOME
assert home / ".exo" == constants.EXO_DATA_HOME
assert home / ".exo" == constants.EXO_CACHE_HOME
def test_node_id_in_config_dir():
"""Test that node ID keypair is in the config directory."""
import exo.shared.constants as constants
assert constants.EXO_NODE_ID_KEYPAIR.parent == constants.EXO_CONFIG_HOME
def test_models_in_data_dir():
"""Test that models directory is in the data directory."""
# Clear EXO_MODELS_DIR to test default behavior
env = {k: v for k, v in os.environ.items() if k != "EXO_MODELS_DIR"}
with mock.patch.dict(os.environ, env, clear=True):
import importlib
import exo.shared.constants as constants
importlib.reload(constants)
assert constants.EXO_MODELS_DIR.parent == constants.EXO_DATA_HOME
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/shared/tests/test_xdg_paths.py",
"license": "Apache License 2.0",
"lines": 89,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
exo-explore/exo:rust/exo_pyo3_bindings/tests/test_python.py | import asyncio
import pytest
from exo_pyo3_bindings import Keypair, NetworkingHandle, NoPeersSubscribedToTopicError
@pytest.mark.asyncio
async def test_sleep_on_multiple_items() -> None:
print("PYTHON: starting handle")
h = NetworkingHandle(Keypair.generate_ed25519())
ct = asyncio.create_task(_await_cons(h))
mt = asyncio.create_task(_await_msg(h))
# sleep for 4 ticks
for i in range(4):
await asyncio.sleep(1)
try:
await h.gossipsub_publish("topic", b"somehting or other")
except NoPeersSubscribedToTopicError as e:
print("caught it", e)
async def _await_cons(h: NetworkingHandle):
while True:
c = await h.connection_update_recv()
print(f"PYTHON: connection update: {c}")
async def _await_msg(h: NetworkingHandle):
while True:
m = await h.gossipsub_recv()
print(f"PYTHON: message: {m}")
| {
"repo_id": "exo-explore/exo",
"file_path": "rust/exo_pyo3_bindings/tests/test_python.py",
"license": "Apache License 2.0",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.