sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
exo-explore/exo:src/exo/__main__.py | from __future__ import annotations
import sys
from collections.abc import Sequence
from multiprocessing import freeze_support
from typing import Final
from exo.main import main
INLINE_CODE_FLAG: Final[str] = "-c"
def _maybe_run_inline_code(argv: Sequence[str]) -> bool:
"""
Reproduce the bare minimum of Python's `-c` flag so multiprocessing
helper processes (for example the resource tracker) can execute.
"""
try:
flag_index = argv.index(INLINE_CODE_FLAG)
except ValueError:
return False
code_index = flag_index + 1
if code_index >= len(argv):
return False
inline_code = argv[code_index]
sys.argv = ["-c", *argv[code_index + 1 :]]
namespace: dict[str, object] = {"__name__": "__main__"}
exec(inline_code, namespace, namespace)
return True
if __name__ == "__main__":
if _maybe_run_inline_code(sys.argv):
sys.exit(0)
freeze_support()
main()
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/__main__.py",
"license": "Apache License 2.0",
"lines": 29,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
exo-explore/exo:src/exo/main.py | import argparse
import multiprocessing as mp
import os
import resource
import signal
from dataclasses import dataclass, field
from typing import Self
import anyio
from loguru import logger
from pydantic import PositiveInt
import exo.routing.topics as topics
from exo.download.coordinator import DownloadCoordinator
from exo.download.impl_shard_downloader import exo_shard_downloader
from exo.master.api import API # TODO: should API be in master?
from exo.master.main import Master
from exo.routing.event_router import EventRouter
from exo.routing.router import Router, get_node_id_keypair
from exo.shared.constants import EXO_LOG
from exo.shared.election import Election, ElectionResult
from exo.shared.logging import logger_cleanup, logger_setup
from exo.shared.types.common import NodeId, SessionId
from exo.utils.channels import Receiver, channel
from exo.utils.pydantic_ext import CamelCaseModel
from exo.utils.task_group import TaskGroup
from exo.worker.main import Worker
@dataclass
class Node:
router: Router
event_router: EventRouter
download_coordinator: DownloadCoordinator | None
worker: Worker | None
election: Election # Every node participates in election, as we do want a node to become master even if it isn't a master candidate if no master candidates are present.
election_result_receiver: Receiver[ElectionResult]
master: Master | None
api: API | None
node_id: NodeId
offline: bool
_tg: TaskGroup = field(init=False, default_factory=TaskGroup)
@classmethod
async def create(cls, args: "Args") -> Self:
keypair = get_node_id_keypair()
node_id = NodeId(keypair.to_node_id())
session_id = SessionId(master_node_id=node_id, election_clock=0)
router = Router.create(keypair)
await router.register_topic(topics.GLOBAL_EVENTS)
await router.register_topic(topics.LOCAL_EVENTS)
await router.register_topic(topics.COMMANDS)
await router.register_topic(topics.ELECTION_MESSAGES)
await router.register_topic(topics.CONNECTION_MESSAGES)
await router.register_topic(topics.DOWNLOAD_COMMANDS)
event_router = EventRouter(
session_id,
command_sender=router.sender(topics.COMMANDS),
external_outbound=router.sender(topics.LOCAL_EVENTS),
external_inbound=router.receiver(topics.GLOBAL_EVENTS),
)
logger.info(f"Starting node {node_id}")
# Create DownloadCoordinator (unless --no-downloads)
if not args.no_downloads:
download_coordinator = DownloadCoordinator(
node_id,
exo_shard_downloader(offline=args.offline),
event_sender=event_router.sender(),
download_command_receiver=router.receiver(topics.DOWNLOAD_COMMANDS),
offline=args.offline,
)
else:
download_coordinator = None
if args.spawn_api:
api = API(
node_id,
port=args.api_port,
event_receiver=event_router.receiver(),
command_sender=router.sender(topics.COMMANDS),
download_command_sender=router.sender(topics.DOWNLOAD_COMMANDS),
election_receiver=router.receiver(topics.ELECTION_MESSAGES),
)
else:
api = None
if not args.no_worker:
worker = Worker(
node_id,
event_receiver=event_router.receiver(),
event_sender=event_router.sender(),
command_sender=router.sender(topics.COMMANDS),
download_command_sender=router.sender(topics.DOWNLOAD_COMMANDS),
)
else:
worker = None
# We start every node with a master
master = Master(
node_id,
session_id,
event_sender=event_router.sender(),
global_event_sender=router.sender(topics.GLOBAL_EVENTS),
local_event_receiver=router.receiver(topics.LOCAL_EVENTS),
command_receiver=router.receiver(topics.COMMANDS),
download_command_sender=router.sender(topics.DOWNLOAD_COMMANDS),
)
er_send, er_recv = channel[ElectionResult]()
election = Election(
node_id,
# If someone manages to assemble 1 MILLION devices into an exo cluster then. well done. good job champ.
seniority=1_000_000 if args.force_master else 0,
# nb: this DOES feedback right now. i have thoughts on how to address this,
# but ultimately it seems not worth the complexity
election_message_sender=router.sender(topics.ELECTION_MESSAGES),
election_message_receiver=router.receiver(topics.ELECTION_MESSAGES),
connection_message_receiver=router.receiver(topics.CONNECTION_MESSAGES),
command_receiver=router.receiver(topics.COMMANDS),
election_result_sender=er_send,
)
return cls(
router,
event_router,
download_coordinator,
worker,
election,
er_recv,
master,
api,
node_id,
args.offline,
)
async def run(self):
async with self._tg as tg:
signal.signal(signal.SIGINT, lambda _, __: self.shutdown())
signal.signal(signal.SIGTERM, lambda _, __: self.shutdown())
tg.start_soon(self.router.run)
tg.start_soon(self.event_router.run)
tg.start_soon(self.election.run)
if self.download_coordinator:
tg.start_soon(self.download_coordinator.run)
if self.worker:
tg.start_soon(self.worker.run)
if self.master:
tg.start_soon(self.master.run)
if self.api:
tg.start_soon(self.api.run)
tg.start_soon(self._elect_loop)
def shutdown(self):
# if this is our second call to shutdown, just sys.exit
if self._tg.cancel_called():
import sys
sys.exit(1)
self._tg.cancel_tasks()
async def _elect_loop(self):
with self.election_result_receiver as results:
async for result in results:
# This function continues to have a lot of very specific entangled logic
# At least it's somewhat contained
# I don't like this duplication, but it's manageable for now.
# TODO: This function needs refactoring generally
# Ok:
# On new master:
# - Elect master locally if necessary
# - Shutdown and re-create the worker
# - Shut down and re-create the API
if result.is_new_master:
await anyio.sleep(0)
self.event_router.shutdown()
self.event_router = EventRouter(
result.session_id,
self.router.sender(topics.COMMANDS),
self.router.receiver(topics.GLOBAL_EVENTS),
self.router.sender(topics.LOCAL_EVENTS),
)
self._tg.start_soon(self.event_router.run)
if (
result.session_id.master_node_id == self.node_id
and self.master is not None
):
logger.info("Node elected Master")
elif (
result.session_id.master_node_id == self.node_id
and self.master is None
):
logger.info("Node elected Master - promoting self")
self.master = Master(
self.node_id,
result.session_id,
event_sender=self.event_router.sender(),
global_event_sender=self.router.sender(topics.GLOBAL_EVENTS),
local_event_receiver=self.router.receiver(topics.LOCAL_EVENTS),
command_receiver=self.router.receiver(topics.COMMANDS),
download_command_sender=self.router.sender(
topics.DOWNLOAD_COMMANDS
),
)
self._tg.start_soon(self.master.run)
elif (
result.session_id.master_node_id != self.node_id
and self.master is not None
):
logger.info(
f"Node {result.session_id.master_node_id} elected master - demoting self"
)
await self.master.shutdown()
self.master = None
else:
logger.info(
f"Node {result.session_id.master_node_id} elected master"
)
if result.is_new_master:
if self.download_coordinator:
self.download_coordinator.shutdown()
self.download_coordinator = DownloadCoordinator(
self.node_id,
exo_shard_downloader(offline=self.offline),
event_sender=self.event_router.sender(),
download_command_receiver=self.router.receiver(
topics.DOWNLOAD_COMMANDS
),
offline=self.offline,
)
self._tg.start_soon(self.download_coordinator.run)
if self.worker:
self.worker.shutdown()
# TODO: add profiling etc to resource monitor
self.worker = Worker(
self.node_id,
event_receiver=self.event_router.receiver(),
event_sender=self.event_router.sender(),
command_sender=self.router.sender(topics.COMMANDS),
download_command_sender=self.router.sender(
topics.DOWNLOAD_COMMANDS
),
)
self._tg.start_soon(self.worker.run)
if self.api:
self.api.reset(result.won_clock, self.event_router.receiver())
else:
if self.api:
self.api.unpause(result.won_clock)
def main():
args = Args.parse()
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
target = min(max(soft, 65535), hard)
resource.setrlimit(resource.RLIMIT_NOFILE, (target, hard))
mp.set_start_method("spawn", force=True)
# TODO: Refactor the current verbosity system
logger_setup(EXO_LOG, args.verbosity)
logger.info("Starting EXO")
logger.info(f"EXO_LIBP2P_NAMESPACE: {os.getenv('EXO_LIBP2P_NAMESPACE')}")
if args.offline:
logger.info("Running in OFFLINE mode — no internet checks, local models only")
# Set FAST_SYNCH override env var for runner subprocesses
if args.fast_synch is True:
os.environ["EXO_FAST_SYNCH"] = "on"
logger.info("FAST_SYNCH forced ON")
elif args.fast_synch is False:
os.environ["EXO_FAST_SYNCH"] = "off"
logger.info("FAST_SYNCH forced OFF")
node = anyio.run(Node.create, args)
try:
anyio.run(node.run)
except BaseException as exception:
logger.opt(exception=exception).critical(
"EXO terminated due to unhandled exception"
)
raise
finally:
logger.info("EXO Shutdown complete")
logger_cleanup()
class Args(CamelCaseModel):
verbosity: int = 0
force_master: bool = False
spawn_api: bool = False
api_port: PositiveInt = 52415
tb_only: bool = False
no_worker: bool = False
no_downloads: bool = False
offline: bool = os.getenv("EXO_OFFLINE", "false").lower() == "true"
fast_synch: bool | None = None # None = auto, True = force on, False = force off
@classmethod
def parse(cls) -> Self:
parser = argparse.ArgumentParser(prog="EXO")
default_verbosity = 0
parser.add_argument(
"-q",
"--quiet",
action="store_const",
const=-1,
dest="verbosity",
default=default_verbosity,
)
parser.add_argument(
"-v",
"--verbose",
action="count",
dest="verbosity",
default=default_verbosity,
)
parser.add_argument(
"-m",
"--force-master",
action="store_true",
dest="force_master",
)
parser.add_argument(
"--no-api",
action="store_false",
dest="spawn_api",
)
parser.add_argument(
"--api-port",
type=int,
dest="api_port",
default=52415,
)
parser.add_argument(
"--no-worker",
action="store_true",
)
parser.add_argument(
"--no-downloads",
action="store_true",
help="Disable the download coordinator (node won't download models)",
)
parser.add_argument(
"--offline",
action="store_true",
default=os.getenv("EXO_OFFLINE", "false").lower() == "true",
help="Run in offline/air-gapped mode: skip internet checks, use only pre-staged local models",
)
fast_synch_group = parser.add_mutually_exclusive_group()
fast_synch_group.add_argument(
"--fast-synch",
action="store_true",
dest="fast_synch",
default=None,
help="Force MLX FAST_SYNCH on (for JACCL backend)",
)
fast_synch_group.add_argument(
"--no-fast-synch",
action="store_false",
dest="fast_synch",
help="Force MLX FAST_SYNCH off",
)
args = parser.parse_args()
return cls(**vars(args)) # pyright: ignore[reportAny] - We are intentionally validating here, we can't do it statically
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/main.py",
"license": "Apache License 2.0",
"lines": 341,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
exo-explore/exo:src/exo/master/api.py | import base64
import contextlib
import json
import random
import time
from collections.abc import AsyncGenerator, Awaitable, Callable, Iterator
from datetime import datetime, timezone
from http import HTTPStatus
from pathlib import Path
from typing import Annotated, Literal, cast
from uuid import uuid4
import anyio
from anyio import BrokenResourceError
from fastapi import FastAPI, File, Form, HTTPException, Query, Request, UploadFile
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import FileResponse, JSONResponse, StreamingResponse
from fastapi.staticfiles import StaticFiles
from hypercorn.asyncio import serve # pyright: ignore[reportUnknownVariableType]
from hypercorn.config import Config
from hypercorn.typing import ASGIFramework
from loguru import logger
from exo.master.adapters.chat_completions import (
chat_request_to_text_generation,
collect_chat_response,
generate_chat_stream,
)
from exo.master.adapters.claude import (
claude_request_to_text_generation,
collect_claude_response,
generate_claude_stream,
)
from exo.master.adapters.ollama import (
collect_ollama_chat_response,
collect_ollama_generate_response,
generate_ollama_chat_stream,
generate_ollama_generate_stream,
ollama_generate_request_to_text_generation,
ollama_request_to_text_generation,
)
from exo.master.adapters.responses import (
collect_responses_response,
generate_responses_stream,
responses_request_to_text_generation,
)
from exo.master.event_log import DiskEventLog
from exo.master.image_store import ImageStore
from exo.master.placement import place_instance as get_instance_placements
from exo.shared.apply import apply
from exo.shared.constants import (
DASHBOARD_DIR,
EXO_CACHE_HOME,
EXO_EVENT_LOG_DIR,
EXO_IMAGE_CACHE_DIR,
EXO_MAX_CHUNK_SIZE,
EXO_TRACING_CACHE_DIR,
)
from exo.shared.election import ElectionMessage
from exo.shared.logging import InterceptLogger
from exo.shared.models.model_cards import (
ModelCard,
ModelId,
delete_custom_card,
get_model_cards,
is_custom_card,
)
from exo.shared.tracing import TraceEvent, compute_stats, export_trace, load_trace_file
from exo.shared.types.api import (
AddCustomModelParams,
AdvancedImageParams,
BenchChatCompletionRequest,
BenchChatCompletionResponse,
BenchImageGenerationResponse,
BenchImageGenerationTaskParams,
ChatCompletionChoice,
ChatCompletionMessage,
ChatCompletionRequest,
ChatCompletionResponse,
CreateInstanceParams,
CreateInstanceResponse,
DeleteDownloadResponse,
DeleteInstanceResponse,
DeleteTracesRequest,
DeleteTracesResponse,
ErrorInfo,
ErrorResponse,
FinishReason,
GenerationStats,
HuggingFaceSearchResult,
ImageData,
ImageEditsTaskParams,
ImageGenerationResponse,
ImageGenerationStats,
ImageGenerationTaskParams,
ImageListItem,
ImageListResponse,
ImageSize,
ModelList,
ModelListModel,
PlaceInstanceParams,
PlacementPreview,
PlacementPreviewResponse,
StartDownloadParams,
StartDownloadResponse,
ToolCall,
TraceCategoryStats,
TraceEventResponse,
TraceListItem,
TraceListResponse,
TraceRankStats,
TraceResponse,
TraceStatsResponse,
normalize_image_size,
)
from exo.shared.types.chunks import (
ErrorChunk,
ImageChunk,
InputImageChunk,
PrefillProgressChunk,
TokenChunk,
ToolCallChunk,
)
from exo.shared.types.claude_api import (
ClaudeMessagesRequest,
ClaudeMessagesResponse,
)
from exo.shared.types.commands import (
Command,
CreateInstance,
DeleteDownload,
DeleteInstance,
DownloadCommand,
ForwarderCommand,
ForwarderDownloadCommand,
ImageEdits,
ImageGeneration,
PlaceInstance,
SendInputChunk,
StartDownload,
TaskCancelled,
TaskFinished,
TextGeneration,
)
from exo.shared.types.common import CommandId, Id, NodeId, SystemId
from exo.shared.types.events import (
ChunkGenerated,
Event,
IndexedEvent,
TracesMerged,
)
from exo.shared.types.memory import Memory
from exo.shared.types.ollama_api import (
OllamaChatRequest,
OllamaChatResponse,
OllamaGenerateRequest,
OllamaGenerateResponse,
OllamaModelDetails,
OllamaModelTag,
OllamaPsModel,
OllamaPsResponse,
OllamaShowRequest,
OllamaShowResponse,
OllamaTagsResponse,
)
from exo.shared.types.openai_responses import (
ResponsesRequest,
ResponsesResponse,
)
from exo.shared.types.state import State
from exo.shared.types.worker.downloads import DownloadCompleted
from exo.shared.types.worker.instances import Instance, InstanceId, InstanceMeta
from exo.shared.types.worker.shards import Sharding
from exo.utils.banner import print_startup_banner
from exo.utils.channels import Receiver, Sender, channel
from exo.utils.task_group import TaskGroup
_API_EVENT_LOG_DIR = EXO_EVENT_LOG_DIR / "api"
ONBOARDING_COMPLETE_FILE = EXO_CACHE_HOME / "onboarding_complete"
def _format_to_content_type(image_format: Literal["png", "jpeg", "webp"] | None) -> str:
return f"image/{image_format or 'png'}"
def _ensure_seed(params: AdvancedImageParams | None) -> AdvancedImageParams:
"""Ensure advanced params has a seed set for distributed consistency."""
if params is None:
return AdvancedImageParams(seed=random.randint(0, 2**32 - 1))
if params.seed is None:
return params.model_copy(update={"seed": random.randint(0, 2**32 - 1)})
return params
class API:
def __init__(
self,
node_id: NodeId,
*,
port: int,
event_receiver: Receiver[IndexedEvent],
command_sender: Sender[ForwarderCommand],
download_command_sender: Sender[ForwarderDownloadCommand],
# This lets us pause the API if an election is running
election_receiver: Receiver[ElectionMessage],
) -> None:
self.state = State()
self._event_log = DiskEventLog(_API_EVENT_LOG_DIR)
self._system_id = SystemId()
self.command_sender = command_sender
self.download_command_sender = download_command_sender
self.event_receiver = event_receiver
self.election_receiver = election_receiver
self.node_id: NodeId = node_id
self.last_completed_election: int = 0
self.port = port
self.paused: bool = False
self.paused_ev: anyio.Event = anyio.Event()
self.app = FastAPI()
@self.app.middleware("http")
async def _log_requests( # pyright: ignore[reportUnusedFunction]
request: Request,
call_next: Callable[[Request], Awaitable[StreamingResponse]],
) -> StreamingResponse:
logger.debug(f"API request: {request.method} {request.url.path}")
return await call_next(request)
self._setup_exception_handlers()
self._setup_cors()
self._setup_routes()
self.app.mount(
"/",
StaticFiles(
directory=DASHBOARD_DIR,
html=True,
),
name="dashboard",
)
self._text_generation_queues: dict[
CommandId,
Sender[TokenChunk | ErrorChunk | ToolCallChunk | PrefillProgressChunk],
] = {}
self._image_generation_queues: dict[
CommandId, Sender[ImageChunk | ErrorChunk]
] = {}
self._image_store = ImageStore(EXO_IMAGE_CACHE_DIR)
self._tg: TaskGroup = TaskGroup()
def reset(self, result_clock: int, event_receiver: Receiver[IndexedEvent]):
logger.info("Resetting API State")
self._event_log.close()
self._event_log = DiskEventLog(_API_EVENT_LOG_DIR)
self.state = State()
self._system_id = SystemId()
self._text_generation_queues = {}
self._image_generation_queues = {}
self.unpause(result_clock)
self.event_receiver.close()
self.event_receiver = event_receiver
self._tg.start_soon(self._apply_state)
def unpause(self, result_clock: int):
logger.info("Unpausing API")
self.last_completed_election = result_clock
self.paused = False
self.paused_ev.set()
self.paused_ev = anyio.Event()
def _setup_exception_handlers(self) -> None:
self.app.exception_handler(HTTPException)(self.http_exception_handler)
async def http_exception_handler(
self, _: Request, exc: HTTPException
) -> JSONResponse:
err = ErrorResponse(
error=ErrorInfo(
message=exc.detail,
type=HTTPStatus(exc.status_code).phrase,
code=exc.status_code,
)
)
return JSONResponse(err.model_dump(), status_code=exc.status_code)
def _setup_cors(self) -> None:
self.app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
def _setup_routes(self) -> None:
self.app.get("/node_id")(lambda: self.node_id)
self.app.post("/instance")(self.create_instance)
self.app.post("/place_instance")(self.place_instance)
self.app.get("/instance/placement")(self.get_placement)
self.app.get("/instance/previews")(self.get_placement_previews)
self.app.get("/instance/{instance_id}")(self.get_instance)
self.app.delete("/instance/{instance_id}")(self.delete_instance)
self.app.get("/models")(self.get_models)
self.app.get("/v1/models")(self.get_models)
self.app.post("/models/add")(self.add_custom_model)
self.app.delete("/models/custom/{model_id:path}")(self.delete_custom_model)
self.app.get("/models/search")(self.search_models)
self.app.post("/v1/chat/completions", response_model=None)(
self.chat_completions
)
self.app.post("/bench/chat/completions")(self.bench_chat_completions)
self.app.post("/v1/images/generations", response_model=None)(
self.image_generations
)
self.app.post("/bench/images/generations")(self.bench_image_generations)
self.app.post("/v1/images/edits", response_model=None)(self.image_edits)
self.app.post("/bench/images/edits")(self.bench_image_edits)
self.app.get("/images")(self.list_images)
self.app.get("/images/{image_id}")(self.get_image)
self.app.post("/v1/messages", response_model=None)(self.claude_messages)
self.app.post("/v1/responses", response_model=None)(self.openai_responses)
# Ollama API
self.app.head("/ollama/")(self.ollama_version)
self.app.head("/ollama/api/version")(self.ollama_version)
self.app.post("/ollama/api/chat", response_model=None)(self.ollama_chat)
self.app.post("/ollama/api/api/chat", response_model=None)(self.ollama_chat)
self.app.post("/ollama/api/v1/chat", response_model=None)(self.ollama_chat)
self.app.post("/ollama/api/generate", response_model=None)(self.ollama_generate)
self.app.get("/ollama/api/tags")(self.ollama_tags)
self.app.get("/ollama/api/api/tags")(self.ollama_tags)
self.app.get("/ollama/api/v1/tags")(self.ollama_tags)
self.app.post("/ollama/api/show")(self.ollama_show)
self.app.get("/ollama/api/ps")(self.ollama_ps)
self.app.get("/ollama/api/version")(self.ollama_version)
self.app.get("/state")(lambda: self.state)
self.app.get("/events")(self.stream_events)
self.app.post("/download/start")(self.start_download)
self.app.delete("/download/{node_id}/{model_id:path}")(self.delete_download)
self.app.get("/v1/traces")(self.list_traces)
self.app.post("/v1/traces/delete")(self.delete_traces)
self.app.get("/v1/traces/{task_id}")(self.get_trace)
self.app.get("/v1/traces/{task_id}/stats")(self.get_trace_stats)
self.app.get("/v1/traces/{task_id}/raw")(self.get_trace_raw)
self.app.get("/onboarding")(self.get_onboarding)
self.app.post("/onboarding")(self.complete_onboarding)
async def place_instance(self, payload: PlaceInstanceParams):
command = PlaceInstance(
model_card=await ModelCard.load(payload.model_id),
sharding=payload.sharding,
instance_meta=payload.instance_meta,
min_nodes=payload.min_nodes,
)
await self._send(command)
return CreateInstanceResponse(
message="Command received.",
command_id=command.command_id,
model_card=command.model_card,
)
async def create_instance(
self, payload: CreateInstanceParams
) -> CreateInstanceResponse:
instance = payload.instance
model_card = await ModelCard.load(instance.shard_assignments.model_id)
required_memory = model_card.storage_size
available_memory = self._calculate_total_available_memory()
if required_memory > available_memory:
raise HTTPException(
status_code=400,
detail=f"Insufficient memory to create instance. Required: {required_memory.in_gb:.1f}GB, Available: {available_memory.in_gb:.1f}GB",
)
command = CreateInstance(
instance=instance,
)
await self._send(command)
return CreateInstanceResponse(
message="Command received.",
command_id=command.command_id,
model_card=model_card,
)
async def get_placement(
self,
model_id: ModelId,
sharding: Sharding = Sharding.Pipeline,
instance_meta: InstanceMeta = InstanceMeta.MlxRing,
min_nodes: int = 1,
) -> Instance:
model_card = await ModelCard.load(model_id)
try:
placements = get_instance_placements(
PlaceInstance(
model_card=model_card,
sharding=sharding,
instance_meta=instance_meta,
min_nodes=min_nodes,
),
node_memory=self.state.node_memory,
node_network=self.state.node_network,
topology=self.state.topology,
current_instances=self.state.instances,
)
except ValueError as exc:
raise HTTPException(status_code=400, detail=str(exc)) from exc
current_ids = set(self.state.instances.keys())
new_ids = [
instance_id for instance_id in placements if instance_id not in current_ids
]
if len(new_ids) != 1:
raise HTTPException(
status_code=500,
detail="Expected exactly one new instance from placement",
)
return placements[new_ids[0]]
async def get_placement_previews(
self,
model_id: ModelId,
node_ids: Annotated[list[NodeId] | None, Query()] = None,
) -> PlacementPreviewResponse:
seen: set[tuple[ModelId, Sharding, InstanceMeta, int]] = set()
previews: list[PlacementPreview] = []
required_nodes = set(node_ids) if node_ids else None
if len(list(self.state.topology.list_nodes())) == 0:
return PlacementPreviewResponse(previews=[])
try:
model_card = await ModelCard.load(model_id)
except Exception as exc:
raise HTTPException(
status_code=400, detail=f"Failed to load model card: {exc}"
) from exc
instance_combinations: list[tuple[Sharding, InstanceMeta, int]] = []
for sharding in (Sharding.Pipeline, Sharding.Tensor):
for instance_meta in (InstanceMeta.MlxRing, InstanceMeta.MlxJaccl):
instance_combinations.extend(
[
(sharding, instance_meta, i)
for i in range(
1, len(list(self.state.topology.list_nodes())) + 1
)
]
)
# TODO: PDD
# instance_combinations.append((Sharding.PrefillDecodeDisaggregation, InstanceMeta.MlxRing, 1))
for sharding, instance_meta, min_nodes in instance_combinations:
try:
placements = get_instance_placements(
PlaceInstance(
model_card=model_card,
sharding=sharding,
instance_meta=instance_meta,
min_nodes=min_nodes,
),
node_memory=self.state.node_memory,
node_network=self.state.node_network,
topology=self.state.topology,
current_instances=self.state.instances,
required_nodes=required_nodes,
)
except ValueError as exc:
if (model_card.model_id, sharding, instance_meta, 0) not in seen:
previews.append(
PlacementPreview(
model_id=model_card.model_id,
sharding=sharding,
instance_meta=instance_meta,
instance=None,
error=str(exc),
)
)
seen.add((model_card.model_id, sharding, instance_meta, 0))
continue
current_ids = set(self.state.instances.keys())
new_instances = [
instance
for instance_id, instance in placements.items()
if instance_id not in current_ids
]
if len(new_instances) != 1:
if (model_card.model_id, sharding, instance_meta, 0) not in seen:
previews.append(
PlacementPreview(
model_id=model_card.model_id,
sharding=sharding,
instance_meta=instance_meta,
instance=None,
error="Expected exactly one new instance from placement",
)
)
seen.add((model_card.model_id, sharding, instance_meta, 0))
continue
instance = new_instances[0]
shard_assignments = instance.shard_assignments
placement_node_ids = list(shard_assignments.node_to_runner.keys())
memory_delta_by_node: dict[str, int] = {}
if placement_node_ids:
total_bytes = model_card.storage_size.in_bytes
per_node = total_bytes // len(placement_node_ids)
remainder = total_bytes % len(placement_node_ids)
for index, node_id in enumerate(sorted(placement_node_ids, key=str)):
extra = 1 if index < remainder else 0
memory_delta_by_node[str(node_id)] = per_node + extra
if (
model_card.model_id,
sharding,
instance_meta,
len(placement_node_ids),
) not in seen:
previews.append(
PlacementPreview(
model_id=model_card.model_id,
sharding=sharding,
instance_meta=instance_meta,
instance=instance,
memory_delta_by_node=memory_delta_by_node or None,
error=None,
)
)
seen.add(
(
model_card.model_id,
sharding,
instance_meta,
len(placement_node_ids),
)
)
return PlacementPreviewResponse(previews=previews)
def get_instance(self, instance_id: InstanceId) -> Instance:
if instance_id not in self.state.instances:
raise HTTPException(status_code=404, detail="Instance not found")
return self.state.instances[instance_id]
async def delete_instance(self, instance_id: InstanceId) -> DeleteInstanceResponse:
if instance_id not in self.state.instances:
raise HTTPException(status_code=404, detail="Instance not found")
command = DeleteInstance(
instance_id=instance_id,
)
await self._send(command)
return DeleteInstanceResponse(
message="Command received.",
command_id=command.command_id,
instance_id=instance_id,
)
async def _token_chunk_stream(
self, command_id: CommandId
) -> AsyncGenerator[
TokenChunk | ErrorChunk | ToolCallChunk | PrefillProgressChunk, None
]:
"""Yield chunks for a given command until completion.
This is the internal low-level stream used by all API adapters.
"""
try:
self._text_generation_queues[command_id], recv = channel[
TokenChunk | ErrorChunk | ToolCallChunk | PrefillProgressChunk
]()
with recv as token_chunks:
async for chunk in token_chunks:
yield chunk
if isinstance(chunk, PrefillProgressChunk):
continue
if chunk.finish_reason is not None:
break
except anyio.get_cancelled_exc_class():
command = TaskCancelled(cancelled_command_id=command_id)
with anyio.CancelScope(shield=True):
await self.command_sender.send(
ForwarderCommand(origin=self._system_id, command=command)
)
raise
finally:
await self._send(TaskFinished(finished_command_id=command_id))
if command_id in self._text_generation_queues:
del self._text_generation_queues[command_id]
async def _collect_text_generation_with_stats(
self, command_id: CommandId
) -> BenchChatCompletionResponse:
text_parts: list[str] = []
tool_calls: list[ToolCall] = []
model: ModelId | None = None
finish_reason: FinishReason | None = None
stats: GenerationStats | None = None
async for chunk in self._token_chunk_stream(command_id):
if isinstance(chunk, PrefillProgressChunk):
continue
if chunk.finish_reason == "error":
raise HTTPException(
status_code=500,
detail=chunk.error_message or "Internal server error",
)
if model is None:
model = chunk.model
if isinstance(chunk, TokenChunk):
text_parts.append(chunk.text)
if isinstance(chunk, ToolCallChunk):
tool_calls.extend(
ToolCall(
id=str(uuid4()),
index=i,
function=tool,
)
for i, tool in enumerate(chunk.tool_calls)
)
stats = chunk.stats or stats
if chunk.finish_reason is not None:
finish_reason = chunk.finish_reason
combined_text = "".join(text_parts)
assert model is not None
resp = BenchChatCompletionResponse(
id=command_id,
created=int(time.time()),
model=model,
choices=[
ChatCompletionChoice(
index=0,
message=ChatCompletionMessage(
role="assistant",
content=combined_text,
tool_calls=tool_calls if tool_calls else None,
),
finish_reason=finish_reason,
)
],
generation_stats=stats,
)
return resp
async def _trigger_notify_user_to_download_model(self, model_id: ModelId) -> None:
logger.warning(
"TODO: we should send a notification to the user to download the model"
)
async def chat_completions(
self, payload: ChatCompletionRequest
) -> ChatCompletionResponse | StreamingResponse:
"""OpenAI Chat Completions API - adapter."""
task_params = chat_request_to_text_generation(payload)
resolved_model = await self._resolve_and_validate_text_model(
ModelId(task_params.model)
)
task_params = task_params.model_copy(update={"model": resolved_model})
command = TextGeneration(task_params=task_params)
await self._send(command)
if payload.stream:
return StreamingResponse(
generate_chat_stream(
command.command_id,
self._token_chunk_stream(command.command_id),
),
media_type="text/event-stream",
headers={
"Cache-Control": "no-cache",
"Connection": "close",
"X-Accel-Buffering": "no",
},
)
else:
return StreamingResponse(
collect_chat_response(
command.command_id,
self._token_chunk_stream(command.command_id),
),
media_type="application/json",
)
async def bench_chat_completions(
self, payload: BenchChatCompletionRequest
) -> BenchChatCompletionResponse:
task_params = chat_request_to_text_generation(payload)
resolved_model = await self._resolve_and_validate_text_model(
ModelId(task_params.model)
)
task_params = task_params.model_copy(update={"model": resolved_model})
task_params = task_params.model_copy(update={"stream": False, "bench": True})
command = TextGeneration(task_params=task_params)
await self._send(command)
return await self._collect_text_generation_with_stats(command.command_id)
async def _resolve_and_validate_text_model(self, model_id: ModelId) -> ModelId:
"""Validate a text model exists and return the resolved model ID.
Raises HTTPException 404 if no instance is found for the model.
"""
if not any(
instance.shard_assignments.model_id == model_id
for instance in self.state.instances.values()
):
await self._trigger_notify_user_to_download_model(model_id)
raise HTTPException(
status_code=404,
detail=f"No instance found for model {model_id}",
)
return model_id
async def _validate_image_model(self, model: ModelId) -> ModelId:
"""Validate model exists and return resolved model ID.
Raises HTTPException 404 if no instance is found for the model.
"""
model_card = await ModelCard.load(model)
resolved_model = model_card.model_id
if not any(
instance.shard_assignments.model_id == resolved_model
for instance in self.state.instances.values()
):
await self._trigger_notify_user_to_download_model(resolved_model)
raise HTTPException(
status_code=404, detail=f"No instance found for model {resolved_model}"
)
return resolved_model
def stream_events(self) -> StreamingResponse:
def _generate_json_array(events: Iterator[Event]) -> Iterator[str]:
yield "["
first = True
for event in events:
if not first:
yield ","
first = False
yield event.model_dump_json()
yield "]"
return StreamingResponse(
_generate_json_array(self._event_log.read_all()),
media_type="application/json",
)
async def get_image(self, image_id: str) -> FileResponse:
stored = self._image_store.get(Id(image_id))
if stored is None:
raise HTTPException(status_code=404, detail="Image not found or expired")
return FileResponse(path=stored.file_path, media_type=stored.content_type)
async def list_images(self, request: Request) -> ImageListResponse:
"""List all stored images."""
stored_images = self._image_store.list_images()
return ImageListResponse(
data=[
ImageListItem(
image_id=img.image_id,
url=self._build_image_url(request, img.image_id),
content_type=img.content_type,
expires_at=img.expires_at,
)
for img in stored_images
]
)
def _build_image_url(self, request: Request, image_id: Id) -> str:
host = request.headers.get("host", f"localhost:{self.port}")
scheme = "https" if request.url.scheme == "https" else "http"
return f"{scheme}://{host}/v1/images/{image_id}"
async def image_generations(
self, request: Request, payload: ImageGenerationTaskParams
) -> ImageGenerationResponse | StreamingResponse:
"""Handle image generation requests.
When stream=True and partial_images > 0, returns a StreamingResponse
with SSE-formatted events for partial and final images.
"""
payload = payload.model_copy(
update={
"model": await self._validate_image_model(ModelId(payload.model)),
"advanced_params": _ensure_seed(payload.advanced_params),
}
)
command = ImageGeneration(
task_params=payload,
)
await self._send(command)
# Check if streaming is requested
if payload.stream and payload.partial_images and payload.partial_images > 0:
return StreamingResponse(
self._generate_image_stream(
request=request,
command_id=command.command_id,
num_images=payload.n or 1,
response_format=payload.response_format or "b64_json",
),
media_type="text/event-stream",
)
# Non-streaming: collect all image chunks
return await self._collect_image_generation(
request=request,
command_id=command.command_id,
num_images=payload.n or 1,
response_format=payload.response_format or "b64_json",
)
async def _generate_image_stream(
self,
request: Request,
command_id: CommandId,
num_images: int,
response_format: str,
) -> AsyncGenerator[str, None]:
"""Generate SSE stream of partial and final images."""
# Track chunks: {(image_index, is_partial): {chunk_index: data}}
image_chunks: dict[tuple[int, bool], dict[int, str]] = {}
image_total_chunks: dict[tuple[int, bool], int] = {}
image_metadata: dict[tuple[int, bool], tuple[int | None, int | None]] = {}
images_complete = 0
try:
self._image_generation_queues[command_id], recv = channel[
ImageChunk | ErrorChunk
]()
with recv as chunks:
async for chunk in chunks:
if chunk.finish_reason == "error":
error_response = ErrorResponse(
error=ErrorInfo(
message=chunk.error_message or "Internal server error",
type="InternalServerError",
code=500,
)
)
yield f"data: {error_response.model_dump_json()}\n\n"
yield "data: [DONE]\n\n"
return
key = (chunk.image_index, chunk.is_partial)
if key not in image_chunks:
image_chunks[key] = {}
image_total_chunks[key] = chunk.total_chunks
image_metadata[key] = (
chunk.partial_index,
chunk.total_partials,
)
image_chunks[key][chunk.chunk_index] = chunk.data
# Check if this image is complete
if len(image_chunks[key]) == image_total_chunks[key]:
full_data = "".join(
image_chunks[key][i] for i in range(len(image_chunks[key]))
)
partial_idx, total_partials = image_metadata[key]
if chunk.is_partial:
# Yield partial image event (always use b64_json for partials)
event_data = {
"type": "partial",
"image_index": chunk.image_index,
"partial_index": partial_idx,
"total_partials": total_partials,
"format": str(chunk.format),
"data": {
"b64_json": full_data
if response_format == "b64_json"
else None,
},
}
yield f"data: {json.dumps(event_data)}\n\n"
else:
# Final image
if response_format == "url":
image_bytes = base64.b64decode(full_data)
content_type = _format_to_content_type(chunk.format)
stored = self._image_store.store(
image_bytes, content_type
)
url = self._build_image_url(request, stored.image_id)
event_data = {
"type": "final",
"image_index": chunk.image_index,
"format": str(chunk.format),
"data": {"url": url},
}
else:
event_data = {
"type": "final",
"image_index": chunk.image_index,
"format": str(chunk.format),
"data": {"b64_json": full_data},
}
yield f"data: {json.dumps(event_data)}\n\n"
images_complete += 1
if images_complete >= num_images:
yield "data: [DONE]\n\n"
break
# Clean up completed image chunks
del image_chunks[key]
del image_total_chunks[key]
del image_metadata[key]
except anyio.get_cancelled_exc_class():
command = TaskCancelled(cancelled_command_id=command_id)
with anyio.CancelScope(shield=True):
await self.command_sender.send(
ForwarderCommand(origin=self._system_id, command=command)
)
raise
finally:
await self._send(TaskFinished(finished_command_id=command_id))
if command_id in self._image_generation_queues:
del self._image_generation_queues[command_id]
async def _collect_image_chunks(
self,
request: Request | None,
command_id: CommandId,
num_images: int,
response_format: str,
capture_stats: bool = False,
) -> tuple[list[ImageData], ImageGenerationStats | None]:
"""Collect image chunks and optionally capture stats."""
# Track chunks per image: {image_index: {chunk_index: data}}
# Only track non-partial (final) images
image_chunks: dict[int, dict[int, str]] = {}
image_total_chunks: dict[int, int] = {}
image_formats: dict[int, Literal["png", "jpeg", "webp"] | None] = {}
images_complete = 0
stats: ImageGenerationStats | None = None
try:
self._image_generation_queues[command_id], recv = channel[
ImageChunk | ErrorChunk
]()
while images_complete < num_images:
with recv as chunks:
async for chunk in chunks:
if chunk.finish_reason == "error":
raise HTTPException(
status_code=500,
detail=chunk.error_message or "Internal server error",
)
if chunk.is_partial:
continue
if chunk.image_index not in image_chunks:
image_chunks[chunk.image_index] = {}
image_total_chunks[chunk.image_index] = chunk.total_chunks
image_formats[chunk.image_index] = chunk.format
image_chunks[chunk.image_index][chunk.chunk_index] = chunk.data
if capture_stats and chunk.stats is not None:
stats = chunk.stats
if (
len(image_chunks[chunk.image_index])
== image_total_chunks[chunk.image_index]
):
images_complete += 1
if images_complete >= num_images:
break
images: list[ImageData] = []
for image_idx in range(num_images):
chunks_dict = image_chunks[image_idx]
full_data = "".join(chunks_dict[i] for i in range(len(chunks_dict)))
if response_format == "url" and request is not None:
image_bytes = base64.b64decode(full_data)
content_type = _format_to_content_type(image_formats.get(image_idx))
stored = self._image_store.store(image_bytes, content_type)
url = self._build_image_url(request, stored.image_id)
images.append(ImageData(b64_json=None, url=url))
else:
images.append(
ImageData(
b64_json=full_data
if response_format == "b64_json"
else None,
url=None,
)
)
return (images, stats if capture_stats else None)
except anyio.get_cancelled_exc_class():
command = TaskCancelled(cancelled_command_id=command_id)
with anyio.CancelScope(shield=True):
await self.command_sender.send(
ForwarderCommand(origin=self._system_id, command=command)
)
raise
finally:
await self._send(TaskFinished(finished_command_id=command_id))
if command_id in self._image_generation_queues:
del self._image_generation_queues[command_id]
async def _collect_image_generation(
self,
request: Request,
command_id: CommandId,
num_images: int,
response_format: str,
) -> ImageGenerationResponse:
"""Collect all image chunks (non-streaming) and return a single response."""
images, _ = await self._collect_image_chunks(
request, command_id, num_images, response_format, capture_stats=False
)
return ImageGenerationResponse(data=images)
async def _collect_image_generation_with_stats(
self,
request: Request | None,
command_id: CommandId,
num_images: int,
response_format: str,
) -> BenchImageGenerationResponse:
images, stats = await self._collect_image_chunks(
request, command_id, num_images, response_format, capture_stats=True
)
return BenchImageGenerationResponse(data=images, generation_stats=stats)
async def bench_image_generations(
self, request: Request, payload: BenchImageGenerationTaskParams
) -> BenchImageGenerationResponse:
payload = payload.model_copy(
update={
"model": await self._validate_image_model(ModelId(payload.model)),
"stream": False,
"partial_images": 0,
"advanced_params": _ensure_seed(payload.advanced_params),
}
)
command = ImageGeneration(
task_params=payload,
)
await self._send(command)
return await self._collect_image_generation_with_stats(
request=request,
command_id=command.command_id,
num_images=payload.n or 1,
response_format=payload.response_format or "b64_json",
)
async def _send_image_edits_command(
self,
image: UploadFile,
prompt: str,
model: ModelId,
n: int,
size: ImageSize,
response_format: Literal["url", "b64_json"],
input_fidelity: Literal["low", "high"],
stream: bool,
partial_images: int,
bench: bool,
quality: Literal["high", "medium", "low"],
output_format: Literal["png", "jpeg", "webp"],
advanced_params: AdvancedImageParams | None,
) -> ImageEdits:
"""Prepare and send an image edits command with chunked image upload."""
resolved_model = await self._validate_image_model(model)
advanced_params = _ensure_seed(advanced_params)
image_content = await image.read()
image_data = base64.b64encode(image_content).decode("utf-8")
image_strength = 0.7 if input_fidelity == "high" else 0.3
data_chunks = [
image_data[i : i + EXO_MAX_CHUNK_SIZE]
for i in range(0, len(image_data), EXO_MAX_CHUNK_SIZE)
]
total_chunks = len(data_chunks)
command = ImageEdits(
task_params=ImageEditsTaskParams(
image_data="",
total_input_chunks=total_chunks,
prompt=prompt,
model=resolved_model,
n=n,
size=size,
response_format=response_format,
image_strength=image_strength,
stream=stream,
partial_images=partial_images,
bench=bench,
quality=quality,
output_format=output_format,
advanced_params=advanced_params,
),
)
logger.info(
f"Sending input image: {len(image_data)} bytes in {total_chunks} chunks"
)
for chunk_index, chunk_data in enumerate(data_chunks):
await self._send(
SendInputChunk(
chunk=InputImageChunk(
model=resolved_model,
command_id=command.command_id,
data=chunk_data,
chunk_index=chunk_index,
total_chunks=total_chunks,
)
)
)
await self._send(command)
return command
async def image_edits(
self,
request: Request,
image: UploadFile = File(...), # noqa: B008
prompt: str = Form(...),
model: str = Form(...),
n: int = Form(1),
size: str | None = Form(None),
response_format: Literal["url", "b64_json"] = Form("b64_json"),
input_fidelity: Literal["low", "high"] = Form("low"),
stream: str = Form("false"),
partial_images: str = Form("0"),
quality: Literal["high", "medium", "low"] = Form("medium"),
output_format: Literal["png", "jpeg", "webp"] = Form("png"),
advanced_params: str | None = Form(None),
) -> ImageGenerationResponse | StreamingResponse:
"""Handle image editing requests (img2img)."""
# Parse string form values to proper types
stream_bool = stream.lower() in ("true", "1", "yes")
partial_images_int = int(partial_images) if partial_images.isdigit() else 0
parsed_advanced_params: AdvancedImageParams | None = None
if advanced_params:
with contextlib.suppress(Exception):
parsed_advanced_params = AdvancedImageParams.model_validate_json(
advanced_params
)
command = await self._send_image_edits_command(
image=image,
prompt=prompt,
model=ModelId(model),
n=n,
size=normalize_image_size(size),
response_format=response_format,
input_fidelity=input_fidelity,
stream=stream_bool,
partial_images=partial_images_int,
bench=False,
quality=quality,
output_format=output_format,
advanced_params=parsed_advanced_params,
)
if stream_bool and partial_images_int > 0:
return StreamingResponse(
self._generate_image_stream(
request=request,
command_id=command.command_id,
num_images=n,
response_format=response_format,
),
media_type="text/event-stream",
)
return await self._collect_image_generation(
request=request,
command_id=command.command_id,
num_images=n,
response_format=response_format,
)
async def bench_image_edits(
self,
request: Request,
image: UploadFile = File(...), # noqa: B008
prompt: str = Form(...),
model: str = Form(...),
n: int = Form(1),
size: str | None = Form(None),
response_format: Literal["url", "b64_json"] = Form("b64_json"),
input_fidelity: Literal["low", "high"] = Form("low"),
quality: Literal["high", "medium", "low"] = Form("medium"),
output_format: Literal["png", "jpeg", "webp"] = Form("png"),
advanced_params: str | None = Form(None),
) -> BenchImageGenerationResponse:
"""Handle benchmark image editing requests with generation stats."""
parsed_advanced_params: AdvancedImageParams | None = None
if advanced_params:
with contextlib.suppress(Exception):
parsed_advanced_params = AdvancedImageParams.model_validate_json(
advanced_params
)
command = await self._send_image_edits_command(
image=image,
prompt=prompt,
model=ModelId(model),
n=n,
size=normalize_image_size(size),
response_format=response_format,
input_fidelity=input_fidelity,
stream=False,
partial_images=0,
bench=True,
quality=quality,
output_format=output_format,
advanced_params=parsed_advanced_params,
)
return await self._collect_image_generation_with_stats(
request=request,
command_id=command.command_id,
num_images=n,
response_format=response_format,
)
async def claude_messages(
self, payload: ClaudeMessagesRequest
) -> ClaudeMessagesResponse | StreamingResponse:
"""Claude Messages API - adapter."""
task_params = claude_request_to_text_generation(payload)
resolved_model = await self._resolve_and_validate_text_model(
ModelId(task_params.model)
)
task_params = task_params.model_copy(update={"model": resolved_model})
command = TextGeneration(task_params=task_params)
await self._send(command)
if payload.stream:
return StreamingResponse(
generate_claude_stream(
command.command_id,
payload.model,
self._token_chunk_stream(command.command_id),
),
media_type="text/event-stream",
headers={
"Cache-Control": "no-cache",
"Connection": "close",
"X-Accel-Buffering": "no",
},
)
else:
return StreamingResponse(
collect_claude_response(
command.command_id,
payload.model,
self._token_chunk_stream(command.command_id),
),
media_type="application/json",
)
async def openai_responses(
self, payload: ResponsesRequest
) -> ResponsesResponse | StreamingResponse:
"""OpenAI Responses API."""
task_params = responses_request_to_text_generation(payload)
resolved_model = await self._resolve_and_validate_text_model(task_params.model)
task_params = task_params.model_copy(update={"model": resolved_model})
command = TextGeneration(task_params=task_params)
await self._send(command)
if payload.stream:
return StreamingResponse(
generate_responses_stream(
command.command_id,
payload.model,
self._token_chunk_stream(command.command_id),
),
media_type="text/event-stream",
headers={
"Cache-Control": "no-cache",
"Connection": "close",
"X-Accel-Buffering": "no",
},
)
else:
return StreamingResponse(
collect_responses_response(
command.command_id,
payload.model,
self._token_chunk_stream(command.command_id),
),
media_type="application/json",
)
async def _ollama_root(self) -> JSONResponse:
"""Respond to HEAD / from Ollama CLI connectivity checks."""
return JSONResponse(content="Ollama is running")
async def ollama_chat(
self, request: Request
) -> OllamaChatResponse | StreamingResponse:
"""Ollama Chat API — accepts JSON regardless of Content-Type."""
body = await request.body()
payload = OllamaChatRequest.model_validate_json(body)
task_params = ollama_request_to_text_generation(payload)
resolved_model = await self._resolve_and_validate_text_model(
ModelId(task_params.model)
)
task_params = task_params.model_copy(update={"model": resolved_model})
command = TextGeneration(task_params=task_params)
await self._send(command)
if payload.stream:
return StreamingResponse(
generate_ollama_chat_stream(
command.command_id,
self._token_chunk_stream(command.command_id),
),
media_type="application/x-ndjson",
headers={
"Cache-Control": "no-cache",
"Connection": "close",
"X-Accel-Buffering": "no",
},
)
else:
return StreamingResponse(
collect_ollama_chat_response(
command.command_id,
self._token_chunk_stream(command.command_id),
),
media_type="application/json",
)
async def ollama_generate(
self, request: Request
) -> OllamaGenerateResponse | StreamingResponse:
"""Ollama Generate API — accepts JSON regardless of Content-Type."""
body = await request.body()
payload = OllamaGenerateRequest.model_validate_json(body)
task_params = ollama_generate_request_to_text_generation(payload)
resolved_model = await self._resolve_and_validate_text_model(
ModelId(task_params.model)
)
task_params = task_params.model_copy(update={"model": resolved_model})
command = TextGeneration(task_params=task_params)
await self._send(command)
if payload.stream:
return StreamingResponse(
generate_ollama_generate_stream(
command.command_id,
self._token_chunk_stream(command.command_id),
),
media_type="application/x-ndjson",
headers={
"Cache-Control": "no-cache",
"Connection": "close",
"X-Accel-Buffering": "no",
},
)
else:
return StreamingResponse(
collect_ollama_generate_response(
command.command_id,
self._token_chunk_stream(command.command_id),
),
media_type="application/json",
)
async def ollama_tags(self) -> OllamaTagsResponse:
"""Returns list of models in Ollama tags format. We return the downloaded ones only."""
def none_if_empty(value: str) -> str | None:
return value or None
downloaded_model_ids: set[str] = set()
for node_downloads in self.state.downloads.values():
for dl in node_downloads:
if isinstance(dl, DownloadCompleted):
downloaded_model_ids.add(dl.shard_metadata.model_card.model_id)
cards = [
c for c in await get_model_cards() if c.model_id in downloaded_model_ids
]
now = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
return OllamaTagsResponse(
models=[
OllamaModelTag(
name=str(card.model_id),
model=str(card.model_id),
modified_at=now,
size=card.storage_size.in_bytes,
digest="sha256:000000000000",
details=OllamaModelDetails(
family=none_if_empty(card.family),
quantization_level=none_if_empty(card.quantization),
),
)
for card in cards
]
)
async def ollama_show(self, request: Request) -> OllamaShowResponse:
"""Returns model information in Ollama show format."""
body = await request.body()
payload = OllamaShowRequest.model_validate_json(body)
model_name = payload.name or payload.model
if not model_name:
raise HTTPException(status_code=400, detail="name or model is required")
try:
card = await ModelCard.load(ModelId(model_name))
except Exception as exc:
raise HTTPException(
status_code=404, detail=f"Model not found: {model_name}"
) from exc
return OllamaShowResponse(
modelfile=f"FROM {card.model_id}",
template="{{ .Prompt }}",
details=OllamaModelDetails(
family=card.family or None,
quantization_level=card.quantization or None,
),
)
async def ollama_ps(self) -> OllamaPsResponse:
"""Returns list of running models (active instances)."""
models: list[OllamaPsModel] = []
seen: set[str] = set()
for instance in self.state.instances.values():
model_id = str(instance.shard_assignments.model_id)
if model_id in seen:
continue
seen.add(model_id)
models.append(
OllamaPsModel(
name=model_id,
model=model_id,
size=0,
)
)
return OllamaPsResponse(models=models)
async def ollama_version(self) -> dict[str, str]:
"""Returns version information for Ollama API compatibility."""
return {"version": "exo v1.0"}
def _calculate_total_available_memory(self) -> Memory:
"""Calculate total available memory across all nodes in bytes."""
total_available = Memory()
for memory in self.state.node_memory.values():
total_available += memory.ram_available
return total_available
async def get_models(self, status: str | None = Query(default=None)) -> ModelList:
"""Returns list of available models, optionally filtered by being downloaded."""
cards = await get_model_cards()
if status == "downloaded":
downloaded_model_ids: set[str] = set()
for node_downloads in self.state.downloads.values():
for dl in node_downloads:
if isinstance(dl, DownloadCompleted):
downloaded_model_ids.add(dl.shard_metadata.model_card.model_id)
cards = [c for c in cards if c.model_id in downloaded_model_ids]
return ModelList(
data=[
ModelListModel(
id=card.model_id,
hugging_face_id=card.model_id,
name=card.model_id.short(),
description="",
tags=[],
storage_size_megabytes=card.storage_size.in_mb,
supports_tensor=card.supports_tensor,
tasks=[task.value for task in card.tasks],
is_custom=is_custom_card(card.model_id),
family=card.family,
quantization=card.quantization,
base_model=card.base_model,
capabilities=card.capabilities,
)
for card in cards
]
)
async def add_custom_model(self, payload: AddCustomModelParams) -> ModelListModel:
"""Fetch a model from HuggingFace and save as a custom model card."""
try:
card = await ModelCard.fetch_from_hf(payload.model_id)
except Exception as exc:
raise HTTPException(
status_code=400, detail=f"Failed to fetch model: {exc}"
) from exc
return ModelListModel(
id=card.model_id,
hugging_face_id=card.model_id,
name=card.model_id.short(),
description="",
tags=[],
storage_size_megabytes=int(card.storage_size.in_mb),
supports_tensor=card.supports_tensor,
tasks=[task.value for task in card.tasks],
is_custom=True,
)
async def delete_custom_model(self, model_id: ModelId) -> JSONResponse:
"""Delete a user-added custom model card."""
deleted = await delete_custom_card(model_id)
if not deleted:
raise HTTPException(status_code=404, detail="Custom model card not found")
return JSONResponse(
{"message": "Model card deleted", "model_id": str(model_id)}
)
async def search_models(
self, query: str = "", limit: int = 20
) -> list[HuggingFaceSearchResult]:
"""Search HuggingFace Hub for mlx-community models."""
from huggingface_hub import list_models
results = list_models(
search=query or None,
author="mlx-community",
sort="downloads",
limit=limit,
)
return [
HuggingFaceSearchResult(
id=m.id,
author=m.author or "",
downloads=m.downloads or 0,
likes=m.likes or 0,
last_modified=str(m.last_modified or ""),
tags=list(m.tags or []),
)
for m in results
]
async def run(self):
shutdown_ev = anyio.Event()
try:
async with self._tg as tg:
logger.info("Starting API")
tg.start_soon(self._apply_state)
tg.start_soon(self._pause_on_new_election)
tg.start_soon(self._cleanup_expired_images)
print_startup_banner(self.port)
tg.start_soon(self.run_api, shutdown_ev)
try:
await anyio.sleep_forever()
finally:
with anyio.CancelScope(shield=True):
shutdown_ev.set()
finally:
self._event_log.close()
self.command_sender.close()
self.event_receiver.close()
async def run_api(self, ev: anyio.Event):
cfg = Config()
cfg.bind = [f"0.0.0.0:{self.port}"]
# nb: shared.logging needs updating if any of this changes
cfg.accesslog = None
cfg.errorlog = "-"
cfg.logger_class = InterceptLogger
with anyio.CancelScope(shield=True):
await serve(
cast(ASGIFramework, self.app),
cfg,
shutdown_trigger=ev.wait,
)
async def _apply_state(self):
with self.event_receiver as events:
async for i_event in events:
self._event_log.append(i_event.event)
self.state = apply(self.state, i_event)
event = i_event.event
if isinstance(event, ChunkGenerated):
if queue := self._image_generation_queues.get(
event.command_id, None
):
assert isinstance(event.chunk, ImageChunk)
try:
await queue.send(event.chunk)
except BrokenResourceError:
self._image_generation_queues.pop(event.command_id, None)
if queue := self._text_generation_queues.get(
event.command_id, None
):
assert not isinstance(event.chunk, ImageChunk)
try:
await queue.send(event.chunk)
except BrokenResourceError:
self._text_generation_queues.pop(event.command_id, None)
if isinstance(event, TracesMerged):
self._save_merged_trace(event)
def _save_merged_trace(self, event: TracesMerged) -> None:
traces = [
TraceEvent(
name=t.name,
start_us=t.start_us,
duration_us=t.duration_us,
rank=t.rank,
category=t.category,
)
for t in event.traces
]
output_path = EXO_TRACING_CACHE_DIR / f"trace_{event.task_id}.json"
export_trace(traces, output_path)
logger.debug(f"Saved merged trace to {output_path}")
async def _pause_on_new_election(self):
with self.election_receiver as ems:
async for message in ems:
if message.clock > self.last_completed_election:
self.paused = True
async def _cleanup_expired_images(self):
"""Periodically clean up expired images from the store."""
cleanup_interval_seconds = 300 # 5 minutes
while True:
await anyio.sleep(cleanup_interval_seconds)
removed = self._image_store.cleanup_expired()
if removed > 0:
logger.debug(f"Cleaned up {removed} expired images")
async def _send(self, command: Command):
while self.paused:
await self.paused_ev.wait()
await self.command_sender.send(
ForwarderCommand(origin=self._system_id, command=command)
)
async def _send_download(self, command: DownloadCommand):
await self.download_command_sender.send(
ForwarderDownloadCommand(origin=self._system_id, command=command)
)
async def start_download(
self, payload: StartDownloadParams
) -> StartDownloadResponse:
command = StartDownload(
target_node_id=payload.target_node_id,
shard_metadata=payload.shard_metadata,
)
await self._send_download(command)
return StartDownloadResponse(command_id=command.command_id)
async def delete_download(
self, node_id: NodeId, model_id: ModelId
) -> DeleteDownloadResponse:
command = DeleteDownload(
target_node_id=node_id,
model_id=ModelId(model_id),
)
await self._send_download(command)
return DeleteDownloadResponse(command_id=command.command_id)
@staticmethod
def _get_trace_path(task_id: str) -> Path:
trace_path = EXO_TRACING_CACHE_DIR / f"trace_{task_id}.json"
if not trace_path.resolve().is_relative_to(EXO_TRACING_CACHE_DIR.resolve()):
raise HTTPException(status_code=400, detail=f"Invalid task ID: {task_id}")
return trace_path
async def list_traces(self) -> TraceListResponse:
traces: list[TraceListItem] = []
for trace_file in sorted(
EXO_TRACING_CACHE_DIR.glob("trace_*.json"),
key=lambda p: p.stat().st_mtime,
reverse=True,
):
# Extract task_id from filename (trace_{task_id}.json)
task_id = trace_file.stem.removeprefix("trace_")
stat = trace_file.stat()
created_at = datetime.fromtimestamp(
stat.st_mtime, tz=timezone.utc
).isoformat()
traces.append(
TraceListItem(
task_id=task_id,
created_at=created_at,
file_size=stat.st_size,
)
)
return TraceListResponse(traces=traces)
async def get_trace(self, task_id: str) -> TraceResponse:
trace_path = self._get_trace_path(task_id)
if not trace_path.exists():
raise HTTPException(status_code=404, detail=f"Trace not found: {task_id}")
trace_events = load_trace_file(trace_path)
return TraceResponse(
task_id=task_id,
traces=[
TraceEventResponse(
name=event.name,
start_us=event.start_us,
duration_us=event.duration_us,
rank=event.rank,
category=event.category,
)
for event in trace_events
],
)
async def get_trace_stats(self, task_id: str) -> TraceStatsResponse:
trace_path = self._get_trace_path(task_id)
if not trace_path.exists():
raise HTTPException(status_code=404, detail=f"Trace not found: {task_id}")
trace_events = load_trace_file(trace_path)
stats = compute_stats(trace_events)
return TraceStatsResponse(
task_id=task_id,
total_wall_time_us=stats.total_wall_time_us,
by_category={
category: TraceCategoryStats(
total_us=cat_stats.total_us,
count=cat_stats.count,
min_us=cat_stats.min_us,
max_us=cat_stats.max_us,
avg_us=cat_stats.avg_us,
)
for category, cat_stats in stats.by_category.items()
},
by_rank={
rank: TraceRankStats(
by_category={
category: TraceCategoryStats(
total_us=cat_stats.total_us,
count=cat_stats.count,
min_us=cat_stats.min_us,
max_us=cat_stats.max_us,
avg_us=cat_stats.avg_us,
)
for category, cat_stats in rank_stats.items()
}
)
for rank, rank_stats in stats.by_rank.items()
},
)
async def get_trace_raw(self, task_id: str) -> FileResponse:
trace_path = self._get_trace_path(task_id)
if not trace_path.exists():
raise HTTPException(status_code=404, detail=f"Trace not found: {task_id}")
return FileResponse(
path=trace_path,
media_type="application/json",
filename=f"trace_{task_id}.json",
)
async def delete_traces(self, request: DeleteTracesRequest) -> DeleteTracesResponse:
deleted: list[str] = []
not_found: list[str] = []
for task_id in request.task_ids:
trace_path = self._get_trace_path(task_id)
if trace_path.exists():
trace_path.unlink()
deleted.append(task_id)
else:
not_found.append(task_id)
return DeleteTracesResponse(deleted=deleted, not_found=not_found)
async def get_onboarding(self) -> JSONResponse:
return JSONResponse({"completed": ONBOARDING_COMPLETE_FILE.exists()})
async def complete_onboarding(self) -> JSONResponse:
ONBOARDING_COMPLETE_FILE.parent.mkdir(parents=True, exist_ok=True)
ONBOARDING_COMPLETE_FILE.write_text("true")
return JSONResponse({"completed": True})
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/master/api.py",
"license": "Apache License 2.0",
"lines": 1645,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
exo-explore/exo:src/exo/master/main.py | from datetime import datetime, timedelta, timezone
import anyio
from loguru import logger
from exo.master.event_log import DiskEventLog
from exo.master.placement import (
add_instance_to_placements,
cancel_unnecessary_downloads,
delete_instance,
get_transition_events,
place_instance,
)
from exo.shared.apply import apply
from exo.shared.constants import EXO_EVENT_LOG_DIR, EXO_TRACING_ENABLED
from exo.shared.types.commands import (
CreateInstance,
DeleteInstance,
ForwarderCommand,
ForwarderDownloadCommand,
ImageEdits,
ImageGeneration,
PlaceInstance,
RequestEventLog,
SendInputChunk,
TaskCancelled,
TaskFinished,
TestCommand,
TextGeneration,
)
from exo.shared.types.common import CommandId, NodeId, SessionId, SystemId
from exo.shared.types.events import (
Event,
GlobalForwarderEvent,
IndexedEvent,
InputChunkReceived,
InstanceDeleted,
LocalForwarderEvent,
NodeGatheredInfo,
NodeTimedOut,
TaskCreated,
TaskDeleted,
TaskStatusUpdated,
TraceEventData,
TracesCollected,
TracesMerged,
)
from exo.shared.types.state import State
from exo.shared.types.tasks import (
ImageEdits as ImageEditsTask,
)
from exo.shared.types.tasks import (
ImageGeneration as ImageGenerationTask,
)
from exo.shared.types.tasks import (
TaskId,
TaskStatus,
)
from exo.shared.types.tasks import (
TextGeneration as TextGenerationTask,
)
from exo.shared.types.worker.instances import InstanceId
from exo.utils.channels import Receiver, Sender
from exo.utils.event_buffer import MultiSourceBuffer
from exo.utils.task_group import TaskGroup
class Master:
def __init__(
self,
node_id: NodeId,
session_id: SessionId,
*,
command_receiver: Receiver[ForwarderCommand],
event_sender: Sender[Event],
local_event_receiver: Receiver[LocalForwarderEvent],
global_event_sender: Sender[GlobalForwarderEvent],
download_command_sender: Sender[ForwarderDownloadCommand],
):
self.node_id = node_id
self.session_id = session_id
self.state = State()
self._tg: TaskGroup = TaskGroup()
self.command_task_mapping: dict[CommandId, TaskId] = {}
self.command_receiver = command_receiver
self.local_event_receiver = local_event_receiver
self.global_event_sender = global_event_sender
self.download_command_sender = download_command_sender
self.event_sender = event_sender
self._system_id = SystemId()
self._multi_buffer = MultiSourceBuffer[SystemId, Event]()
self._event_log = DiskEventLog(EXO_EVENT_LOG_DIR / "master")
self._pending_traces: dict[TaskId, dict[int, list[TraceEventData]]] = {}
self._expected_ranks: dict[TaskId, set[int]] = {}
async def run(self):
logger.info("Starting Master")
try:
async with self._tg as tg:
tg.start_soon(self._event_processor)
tg.start_soon(self._command_processor)
tg.start_soon(self._plan)
finally:
self._event_log.close()
self.global_event_sender.close()
self.local_event_receiver.close()
self.command_receiver.close()
async def shutdown(self):
logger.info("Stopping Master")
self._tg.cancel_tasks()
async def _command_processor(self) -> None:
with self.command_receiver as commands:
async for forwarder_command in commands:
try:
logger.info(f"Executing command: {forwarder_command.command}")
generated_events: list[Event] = []
command = forwarder_command.command
instance_task_counts: dict[InstanceId, int] = {}
match command:
case TestCommand():
pass
case TextGeneration():
for instance in self.state.instances.values():
if (
instance.shard_assignments.model_id
== command.task_params.model
):
task_count = sum(
1
for task in self.state.tasks.values()
if task.instance_id == instance.instance_id
)
instance_task_counts[instance.instance_id] = (
task_count
)
if not instance_task_counts:
raise ValueError(
f"No instance found for model {command.task_params.model}"
)
available_instance_ids = sorted(
instance_task_counts.keys(),
key=lambda instance_id: instance_task_counts[
instance_id
],
)
task_id = TaskId()
generated_events.append(
TaskCreated(
task_id=task_id,
task=TextGenerationTask(
task_id=task_id,
command_id=command.command_id,
instance_id=available_instance_ids[0],
task_status=TaskStatus.Pending,
task_params=command.task_params,
),
)
)
self.command_task_mapping[command.command_id] = task_id
case ImageGeneration():
for instance in self.state.instances.values():
if (
instance.shard_assignments.model_id
== command.task_params.model
):
task_count = sum(
1
for task in self.state.tasks.values()
if task.instance_id == instance.instance_id
)
instance_task_counts[instance.instance_id] = (
task_count
)
if not instance_task_counts:
raise ValueError(
f"No instance found for model {command.task_params.model}"
)
available_instance_ids = sorted(
instance_task_counts.keys(),
key=lambda instance_id: instance_task_counts[
instance_id
],
)
task_id = TaskId()
selected_instance_id = available_instance_ids[0]
generated_events.append(
TaskCreated(
task_id=task_id,
task=ImageGenerationTask(
task_id=task_id,
command_id=command.command_id,
instance_id=selected_instance_id,
task_status=TaskStatus.Pending,
task_params=command.task_params,
),
)
)
self.command_task_mapping[command.command_id] = task_id
if EXO_TRACING_ENABLED:
selected_instance = self.state.instances.get(
selected_instance_id
)
if selected_instance:
ranks = set(
shard.device_rank
for shard in selected_instance.shard_assignments.runner_to_shard.values()
)
self._expected_ranks[task_id] = ranks
case ImageEdits():
for instance in self.state.instances.values():
if (
instance.shard_assignments.model_id
== command.task_params.model
):
task_count = sum(
1
for task in self.state.tasks.values()
if task.instance_id == instance.instance_id
)
instance_task_counts[instance.instance_id] = (
task_count
)
if not instance_task_counts:
raise ValueError(
f"No instance found for model {command.task_params.model}"
)
available_instance_ids = sorted(
instance_task_counts.keys(),
key=lambda instance_id: instance_task_counts[
instance_id
],
)
task_id = TaskId()
selected_instance_id = available_instance_ids[0]
generated_events.append(
TaskCreated(
task_id=task_id,
task=ImageEditsTask(
task_id=task_id,
command_id=command.command_id,
instance_id=selected_instance_id,
task_status=TaskStatus.Pending,
task_params=command.task_params,
),
)
)
self.command_task_mapping[command.command_id] = task_id
if EXO_TRACING_ENABLED:
selected_instance = self.state.instances.get(
selected_instance_id
)
if selected_instance:
ranks = set(
shard.device_rank
for shard in selected_instance.shard_assignments.runner_to_shard.values()
)
self._expected_ranks[task_id] = ranks
case DeleteInstance():
placement = delete_instance(command, self.state.instances)
transition_events = get_transition_events(
self.state.instances, placement, self.state.tasks
)
for cmd in cancel_unnecessary_downloads(
placement, self.state.downloads
):
await self.download_command_sender.send(
ForwarderDownloadCommand(
origin=self._system_id, command=cmd
)
)
generated_events.extend(transition_events)
case PlaceInstance():
placement = place_instance(
command,
self.state.topology,
self.state.instances,
self.state.node_memory,
self.state.node_network,
)
transition_events = get_transition_events(
self.state.instances, placement, self.state.tasks
)
generated_events.extend(transition_events)
case CreateInstance():
placement = add_instance_to_placements(
command,
self.state.topology,
self.state.instances,
)
transition_events = get_transition_events(
self.state.instances, placement, self.state.tasks
)
generated_events.extend(transition_events)
case SendInputChunk(chunk=chunk):
generated_events.append(
InputChunkReceived(
command_id=chunk.command_id,
chunk=chunk,
)
)
case TaskCancelled():
if (
task_id := self.command_task_mapping.get(
command.cancelled_command_id
)
) is not None:
generated_events.append(
TaskStatusUpdated(
task_status=TaskStatus.Cancelled,
task_id=task_id,
)
)
case TaskFinished():
generated_events.append(
TaskDeleted(
task_id=self.command_task_mapping[
command.finished_command_id
]
)
)
self.command_task_mapping.pop(
command.finished_command_id, None
)
case RequestEventLog():
# We should just be able to send everything, since other buffers will ignore old messages
# rate limit to 1000 at a time
end = min(command.since_idx + 1000, len(self._event_log))
for i, event in enumerate(
self._event_log.read_range(command.since_idx, end),
start=command.since_idx,
):
await self._send_event(IndexedEvent(idx=i, event=event))
for event in generated_events:
await self.event_sender.send(event)
except ValueError as e:
logger.opt(exception=e).warning("Error in command processor")
# These plan loops are the cracks showing in our event sourcing architecture - more things could be commands
async def _plan(self) -> None:
while True:
# kill broken instances
connected_node_ids = set(self.state.topology.list_nodes())
for instance_id, instance in self.state.instances.items():
for node_id in instance.shard_assignments.node_to_runner:
if node_id not in connected_node_ids:
await self.event_sender.send(
InstanceDeleted(instance_id=instance_id)
)
break
# time out dead nodes
for node_id, time in self.state.last_seen.items():
now = datetime.now(tz=timezone.utc)
if now - time > timedelta(seconds=30):
logger.info(f"Manually removing node {node_id} due to inactivity")
await self.event_sender.send(NodeTimedOut(node_id=node_id))
await anyio.sleep(10)
async def _event_processor(self) -> None:
with self.local_event_receiver as local_events:
async for local_event in local_events:
# Discard all events not from our session
if local_event.session != self.session_id:
continue
self._multi_buffer.ingest(
local_event.origin_idx,
local_event.event,
local_event.origin,
)
for event in self._multi_buffer.drain():
if isinstance(event, TracesCollected):
await self._handle_traces_collected(event)
continue
logger.debug(f"Master indexing event: {str(event)[:100]}")
indexed = IndexedEvent(event=event, idx=len(self._event_log))
self.state = apply(self.state, indexed)
event._master_time_stamp = datetime.now(tz=timezone.utc) # pyright: ignore[reportPrivateUsage]
if isinstance(event, NodeGatheredInfo):
event.when = str(datetime.now(tz=timezone.utc))
self._event_log.append(event)
await self._send_event(indexed)
# This function is re-entrant, take care!
async def _send_event(self, event: IndexedEvent):
# Convenience method since this line is ugly
await self.global_event_sender.send(
GlobalForwarderEvent(
origin=self.node_id,
origin_idx=event.idx,
session=self.session_id,
event=event.event,
)
)
async def _handle_traces_collected(self, event: TracesCollected) -> None:
task_id = event.task_id
if task_id not in self._pending_traces:
self._pending_traces[task_id] = {}
self._pending_traces[task_id][event.rank] = event.traces
if (
task_id in self._expected_ranks
and set(self._pending_traces[task_id].keys())
>= self._expected_ranks[task_id]
):
await self._merge_and_save_traces(task_id)
async def _merge_and_save_traces(self, task_id: TaskId) -> None:
all_trace_data: list[TraceEventData] = []
for trace_data in self._pending_traces[task_id].values():
all_trace_data.extend(trace_data)
await self.event_sender.send(
TracesMerged(task_id=task_id, traces=all_trace_data)
)
del self._pending_traces[task_id]
if task_id in self._expected_ranks:
del self._expected_ranks[task_id]
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/master/main.py",
"license": "Apache License 2.0",
"lines": 405,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
exo-explore/exo:src/exo/master/placement.py | import random
from collections.abc import Mapping
from copy import deepcopy
from typing import Sequence
from exo.master.placement_utils import (
Cycle,
filter_cycles_by_memory,
get_mlx_jaccl_coordinators,
get_mlx_jaccl_devices_matrix,
get_mlx_ring_hosts_by_node,
get_shard_assignments,
get_smallest_cycles,
)
from exo.shared.models.model_cards import ModelId
from exo.shared.topology import Topology
from exo.shared.types.commands import (
CancelDownload,
CreateInstance,
DeleteInstance,
DownloadCommand,
PlaceInstance,
)
from exo.shared.types.common import NodeId
from exo.shared.types.events import (
Event,
InstanceCreated,
InstanceDeleted,
TaskStatusUpdated,
)
from exo.shared.types.memory import Memory
from exo.shared.types.profiling import MemoryUsage, NodeNetworkInfo
from exo.shared.types.tasks import Task, TaskId, TaskStatus
from exo.shared.types.worker.downloads import (
DownloadOngoing,
DownloadProgress,
)
from exo.shared.types.worker.instances import (
Instance,
InstanceId,
InstanceMeta,
MlxJacclInstance,
MlxRingInstance,
)
from exo.shared.types.worker.shards import Sharding
def random_ephemeral_port() -> int:
port = random.randint(49153, 65535)
return port - 1 if port <= 52415 else port
def add_instance_to_placements(
command: CreateInstance,
topology: Topology,
current_instances: Mapping[InstanceId, Instance],
) -> Mapping[InstanceId, Instance]:
# TODO: validate against topology
return {**current_instances, command.instance.instance_id: command.instance}
def place_instance(
command: PlaceInstance,
topology: Topology,
current_instances: Mapping[InstanceId, Instance],
node_memory: Mapping[NodeId, MemoryUsage],
node_network: Mapping[NodeId, NodeNetworkInfo],
required_nodes: set[NodeId] | None = None,
) -> dict[InstanceId, Instance]:
cycles = topology.get_cycles()
candidate_cycles = list(filter(lambda it: len(it) >= command.min_nodes, cycles))
# Filter to cycles containing all required nodes (subset matching)
if required_nodes:
candidate_cycles = [
cycle
for cycle in candidate_cycles
if required_nodes.issubset(cycle.node_ids)
]
cycles_with_sufficient_memory = filter_cycles_by_memory(
candidate_cycles, node_memory, command.model_card.storage_size
)
if len(cycles_with_sufficient_memory) == 0:
raise ValueError("No cycles found with sufficient memory")
if command.sharding == Sharding.Tensor:
if not command.model_card.supports_tensor:
raise ValueError(
f"Requested Tensor sharding but this model does not support tensor parallelism: {command.model_card.model_id}"
)
# TODO: the condition here for tensor parallel is not correct, but it works good enough for now.
cycles_with_sufficient_memory = [
cycle
for cycle in cycles_with_sufficient_memory
if command.model_card.hidden_size % len(cycle) == 0
]
if not cycles_with_sufficient_memory:
raise ValueError(
f"No tensor sharding found for model with hidden_size {command.model_card.hidden_size} candidate cycles"
)
if command.sharding == Sharding.Pipeline and command.model_card.model_id == ModelId(
"mlx-community/DeepSeek-V3.1-8bit"
):
raise ValueError(
"Pipeline parallelism is not supported for DeepSeek V3.1 (8-bit)"
)
smallest_cycles = get_smallest_cycles(cycles_with_sufficient_memory)
smallest_rdma_cycles = [
cycle for cycle in smallest_cycles if topology.is_rdma_cycle(cycle)
]
if command.instance_meta == InstanceMeta.MlxJaccl:
if not smallest_rdma_cycles:
raise ValueError(
"Requested RDMA (MlxJaccl) but no RDMA-connected cycles available"
)
smallest_cycles = smallest_rdma_cycles
cycles_with_leaf_nodes: list[Cycle] = [
cycle
for cycle in smallest_cycles
if any(topology.node_is_leaf(node_id) for node_id in cycle)
]
selected_cycle = max(
cycles_with_leaf_nodes if cycles_with_leaf_nodes != [] else smallest_cycles,
key=lambda cycle: sum(
(node_memory[node_id].ram_available for node_id in cycle),
start=Memory(),
),
)
# Single-node: force Pipeline/Ring (Tensor and Jaccl require multi-node)
if len(selected_cycle) == 1:
command.instance_meta = InstanceMeta.MlxRing
command.sharding = Sharding.Pipeline
shard_assignments = get_shard_assignments(
command.model_card, selected_cycle, command.sharding, node_memory
)
cycle_digraph: Topology = topology.get_subgraph_from_nodes(selected_cycle.node_ids)
instance_id = InstanceId()
target_instances = dict(deepcopy(current_instances))
match command.instance_meta:
case InstanceMeta.MlxJaccl:
# TODO(evan): shard assignments should contain information about ranks, this is ugly
def get_device_rank(node_id: NodeId) -> int:
runner_id = shard_assignments.node_to_runner[node_id]
shard_metadata = shard_assignments.runner_to_shard.get(runner_id)
assert shard_metadata is not None
return shard_metadata.device_rank
zero_node_ids = [
node_id
for node_id in selected_cycle.node_ids
if get_device_rank(node_id) == 0
]
assert len(zero_node_ids) == 1
coordinator_node_id = zero_node_ids[0]
mlx_jaccl_devices = get_mlx_jaccl_devices_matrix(
[node_id for node_id in selected_cycle],
cycle_digraph,
)
mlx_jaccl_coordinators = get_mlx_jaccl_coordinators(
coordinator=coordinator_node_id,
coordinator_port=random_ephemeral_port(),
cycle_digraph=cycle_digraph,
node_network=node_network,
)
target_instances[instance_id] = MlxJacclInstance(
instance_id=instance_id,
shard_assignments=shard_assignments,
jaccl_devices=mlx_jaccl_devices,
jaccl_coordinators=mlx_jaccl_coordinators,
)
case InstanceMeta.MlxRing:
ephemeral_port = random_ephemeral_port()
hosts_by_node = get_mlx_ring_hosts_by_node(
selected_cycle=selected_cycle,
cycle_digraph=cycle_digraph,
ephemeral_port=ephemeral_port,
node_network=node_network,
)
target_instances[instance_id] = MlxRingInstance(
instance_id=instance_id,
shard_assignments=shard_assignments,
hosts_by_node=hosts_by_node,
ephemeral_port=ephemeral_port,
)
return target_instances
def delete_instance(
command: DeleteInstance,
current_instances: Mapping[InstanceId, Instance],
) -> dict[InstanceId, Instance]:
target_instances = dict(deepcopy(current_instances))
if command.instance_id in target_instances:
del target_instances[command.instance_id]
return target_instances
raise ValueError(f"Instance {command.instance_id} not found")
def get_transition_events(
current_instances: Mapping[InstanceId, Instance],
target_instances: Mapping[InstanceId, Instance],
tasks: Mapping[TaskId, Task],
) -> Sequence[Event]:
events: list[Event] = []
# find instances to create
for instance_id, instance in target_instances.items():
if instance_id not in current_instances:
events.append(
InstanceCreated(
instance=instance,
)
)
# find instances to delete
for instance_id in current_instances:
if instance_id not in target_instances:
for task in tasks.values():
if task.instance_id == instance_id and task.task_status in [
TaskStatus.Pending,
TaskStatus.Running,
]:
events.append(
TaskStatusUpdated(
task_status=TaskStatus.Cancelled,
task_id=task.task_id,
)
)
events.append(
InstanceDeleted(
instance_id=instance_id,
)
)
return events
def cancel_unnecessary_downloads(
instances: Mapping[InstanceId, Instance],
download_status: Mapping[NodeId, Sequence[DownloadProgress]],
) -> Sequence[DownloadCommand]:
commands: list[DownloadCommand] = []
currently_downloading = [
(k, v.shard_metadata.model_card.model_id)
for k, vs in download_status.items()
for v in vs
if isinstance(v, (DownloadOngoing))
]
active_models = set(
(
node_id,
instance.shard_assignments.runner_to_shard[runner_id].model_card.model_id,
)
for instance in instances.values()
for node_id, runner_id in instance.shard_assignments.node_to_runner.items()
)
for pair in currently_downloading:
if pair not in active_models:
commands.append(CancelDownload(target_node_id=pair[0], model_id=pair[1]))
return commands
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/master/placement.py",
"license": "Apache License 2.0",
"lines": 241,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
exo-explore/exo:src/exo/master/placement_utils.py | from collections.abc import Generator, Mapping
from loguru import logger
from exo.shared.models.model_cards import ModelCard
from exo.shared.topology import Topology
from exo.shared.types.common import Host, NodeId
from exo.shared.types.memory import Memory
from exo.shared.types.profiling import MemoryUsage, NodeNetworkInfo
from exo.shared.types.topology import Cycle, RDMAConnection, SocketConnection
from exo.shared.types.worker.runners import RunnerId, ShardAssignments
from exo.shared.types.worker.shards import (
CfgShardMetadata,
PipelineShardMetadata,
Sharding,
ShardMetadata,
TensorShardMetadata,
)
def filter_cycles_by_memory(
cycles: list[Cycle],
node_memory: Mapping[NodeId, MemoryUsage],
required_memory: Memory,
) -> list[Cycle]:
filtered_cycles: list[Cycle] = []
for cycle in cycles:
if not all(node in node_memory for node in cycle):
continue
total_mem = sum(
(node_memory[node_id].ram_available for node_id in cycle.node_ids),
start=Memory(),
)
if total_mem >= required_memory:
filtered_cycles.append(cycle)
return filtered_cycles
def get_smallest_cycles(
cycles: list[Cycle],
) -> list[Cycle]:
min_nodes = min(len(cycle) for cycle in cycles)
return [cycle for cycle in cycles if len(cycle) == min_nodes]
def allocate_layers_proportionally(
total_layers: int,
memory_fractions: list[float],
) -> list[int]:
n = len(memory_fractions)
if n == 0:
raise ValueError("Cannot allocate layers to an empty node list")
if total_layers < n:
raise ValueError(
f"Cannot distribute {total_layers} layers across {n} nodes "
"(need at least 1 layer per node)"
)
# Largest remainder: floor each, then distribute remainder by fractional part
raw = [f * total_layers for f in memory_fractions]
result = [int(r) for r in raw]
by_remainder = sorted(range(n), key=lambda i: raw[i] - result[i], reverse=True)
for i in range(total_layers - sum(result)):
result[by_remainder[i]] += 1
# Ensure minimum 1 per node by taking from the largest
for i in range(n):
if result[i] == 0:
max_idx = max(range(n), key=lambda j: result[j])
assert result[max_idx] > 1
result[max_idx] -= 1
result[i] = 1
return result
def _validate_cycle(cycle: Cycle) -> None:
if not cycle.node_ids:
raise ValueError("Cannot create shard assignments for empty node cycle")
def _compute_total_memory(
node_ids: list[NodeId],
node_memory: Mapping[NodeId, MemoryUsage],
) -> Memory:
total_memory = sum(
(node_memory[node_id].ram_available for node_id in node_ids),
start=Memory(),
)
if total_memory.in_bytes == 0:
raise ValueError("Cannot create shard assignments: total available memory is 0")
return total_memory
def _allocate_and_validate_layers(
node_ids: list[NodeId],
node_memory: Mapping[NodeId, MemoryUsage],
total_memory: Memory,
model_card: ModelCard,
) -> list[int]:
layer_allocations = allocate_layers_proportionally(
total_layers=model_card.n_layers,
memory_fractions=[
node_memory[node_id].ram_available / total_memory for node_id in node_ids
],
)
total_storage = model_card.storage_size
total_layers = model_card.n_layers
for i, node_id in enumerate(node_ids):
node_layers = layer_allocations[i]
required_memory = (total_storage * node_layers) // total_layers
available_memory = node_memory[node_id].ram_available
if required_memory > available_memory:
raise ValueError(
f"Node {i} ({node_id}) has insufficient memory: "
f"requires {required_memory.in_gb:.2f} GB for {node_layers} layers, "
f"but only has {available_memory.in_gb:.2f} GB available"
)
return layer_allocations
def get_shard_assignments_for_pipeline_parallel(
model_card: ModelCard,
cycle: Cycle,
node_memory: Mapping[NodeId, MemoryUsage],
) -> ShardAssignments:
"""Create shard assignments for pipeline parallel execution."""
world_size = len(cycle)
use_cfg_parallel = model_card.uses_cfg and world_size >= 2 and world_size % 2 == 0
if use_cfg_parallel:
return _get_shard_assignments_for_cfg_parallel(model_card, cycle, node_memory)
else:
return _get_shard_assignments_for_pure_pipeline(model_card, cycle, node_memory)
def _get_shard_assignments_for_cfg_parallel(
model_card: ModelCard,
cycle: Cycle,
node_memory: Mapping[NodeId, MemoryUsage],
) -> ShardAssignments:
"""Create shard assignments for CFG parallel execution.
CFG parallel runs two independent pipelines. Group 0 processes the positive
prompt, group 1 processes the negative prompt. The ring topology places
group 1's ranks in reverse order so both "last stages" are neighbors for
efficient CFG exchange.
"""
_validate_cycle(cycle)
world_size = len(cycle)
cfg_world_size = 2
pipeline_world_size = world_size // cfg_world_size
# Allocate layers for one pipeline group (both groups run the same layers)
pipeline_node_ids = cycle.node_ids[:pipeline_world_size]
pipeline_memory = _compute_total_memory(pipeline_node_ids, node_memory)
layer_allocations = _allocate_and_validate_layers(
pipeline_node_ids, node_memory, pipeline_memory, model_card
)
# Ring topology: group 0 ascending [0,1,2,...], group 1 descending [...,2,1,0]
# This places both last stages as neighbors for CFG exchange.
position_to_cfg_pipeline = [(0, r) for r in range(pipeline_world_size)] + [
(1, r) for r in reversed(range(pipeline_world_size))
]
runner_to_shard: dict[RunnerId, ShardMetadata] = {}
node_to_runner: dict[NodeId, RunnerId] = {}
for device_rank, node_id in enumerate(cycle.node_ids):
cfg_rank, pipeline_rank = position_to_cfg_pipeline[device_rank]
layers_before = sum(layer_allocations[:pipeline_rank])
node_layers = layer_allocations[pipeline_rank]
shard = CfgShardMetadata(
model_card=model_card,
device_rank=device_rank,
world_size=world_size,
start_layer=layers_before,
end_layer=layers_before + node_layers,
n_layers=model_card.n_layers,
cfg_rank=cfg_rank,
cfg_world_size=cfg_world_size,
pipeline_rank=pipeline_rank,
pipeline_world_size=pipeline_world_size,
)
runner_id = RunnerId()
runner_to_shard[runner_id] = shard
node_to_runner[node_id] = runner_id
return ShardAssignments(
model_id=model_card.model_id,
runner_to_shard=runner_to_shard,
node_to_runner=node_to_runner,
)
def _get_shard_assignments_for_pure_pipeline(
model_card: ModelCard,
cycle: Cycle,
node_memory: Mapping[NodeId, MemoryUsage],
) -> ShardAssignments:
"""Create shard assignments for pure pipeline execution."""
_validate_cycle(cycle)
total_memory = _compute_total_memory(cycle.node_ids, node_memory)
layer_allocations = _allocate_and_validate_layers(
cycle.node_ids, node_memory, total_memory, model_card
)
runner_to_shard: dict[RunnerId, ShardMetadata] = {}
node_to_runner: dict[NodeId, RunnerId] = {}
for pipeline_rank, node_id in enumerate(cycle.node_ids):
layers_before = sum(layer_allocations[:pipeline_rank])
node_layers = layer_allocations[pipeline_rank]
shard = PipelineShardMetadata(
model_card=model_card,
device_rank=pipeline_rank,
world_size=len(cycle),
start_layer=layers_before,
end_layer=layers_before + node_layers,
n_layers=model_card.n_layers,
)
runner_id = RunnerId()
runner_to_shard[runner_id] = shard
node_to_runner[node_id] = runner_id
return ShardAssignments(
model_id=model_card.model_id,
runner_to_shard=runner_to_shard,
node_to_runner=node_to_runner,
)
def get_shard_assignments_for_tensor_parallel(
model_card: ModelCard,
cycle: Cycle,
):
total_layers = model_card.n_layers
world_size = len(cycle)
runner_to_shard: dict[RunnerId, ShardMetadata] = {}
node_to_runner: dict[NodeId, RunnerId] = {}
for i, node_id in enumerate(cycle):
shard = TensorShardMetadata(
model_card=model_card,
device_rank=i,
world_size=world_size,
start_layer=0,
end_layer=total_layers,
n_layers=total_layers,
)
runner_id = RunnerId()
runner_to_shard[runner_id] = shard
node_to_runner[node_id] = runner_id
shard_assignments = ShardAssignments(
model_id=model_card.model_id,
runner_to_shard=runner_to_shard,
node_to_runner=node_to_runner,
)
return shard_assignments
def get_shard_assignments(
model_card: ModelCard,
cycle: Cycle,
sharding: Sharding,
node_memory: Mapping[NodeId, MemoryUsage],
) -> ShardAssignments:
match sharding:
case Sharding.Pipeline:
return get_shard_assignments_for_pipeline_parallel(
model_card=model_card,
cycle=cycle,
node_memory=node_memory,
)
case Sharding.Tensor:
return get_shard_assignments_for_tensor_parallel(
model_card=model_card,
cycle=cycle,
)
def get_mlx_jaccl_devices_matrix(
selected_cycle: list[NodeId],
cycle_digraph: Topology,
) -> list[list[str | None]]:
"""Build connectivity matrix mapping device i to device j via RDMA interface names.
The matrix element [i][j] contains the interface name on device i that connects
to device j, or None if no connection exists or no interface name is found.
Diagonal elements are always None.
"""
num_nodes = len(selected_cycle)
matrix: list[list[str | None]] = [
[None for _ in range(num_nodes)] for _ in range(num_nodes)
]
for i, node_i in enumerate(selected_cycle):
for j, node_j in enumerate(selected_cycle):
if i == j:
continue
for conn in cycle_digraph.get_all_connections_between(node_i, node_j):
if isinstance(conn, RDMAConnection):
matrix[i][j] = conn.source_rdma_iface
break
else:
raise ValueError(
"Current jaccl backend requires all-to-all RDMA connections"
)
return matrix
def _find_connection_ip(
node_i: NodeId,
node_j: NodeId,
cycle_digraph: Topology,
) -> Generator[str, None, None]:
"""Find all IP addresses that connect node i to node j."""
for connection in cycle_digraph.get_all_connections_between(node_i, node_j):
if isinstance(connection, SocketConnection):
yield connection.sink_multiaddr.ip_address
def _find_ip_prioritised(
node_id: NodeId,
other_node_id: NodeId,
cycle_digraph: Topology,
node_network: Mapping[NodeId, NodeNetworkInfo],
ring: bool,
) -> str | None:
"""Find an IP address between nodes with prioritization.
Priority: ethernet > wifi > unknown > thunderbolt
"""
ips = list(_find_connection_ip(node_id, other_node_id, cycle_digraph))
if not ips:
return None
other_network = node_network.get(other_node_id, NodeNetworkInfo())
ip_to_type = {
iface.ip_address: iface.interface_type for iface in other_network.interfaces
}
# Ring should prioritise fastest connection. As a best-effort, we prioritise TB.
# TODO: Profile and get actual connection speeds.
if ring:
priority = {
"thunderbolt": 0,
"maybe_ethernet": 1,
"ethernet": 2,
"wifi": 3,
"unknown": 4,
}
# RDMA prefers ethernet coordinator
else:
priority = {
"ethernet": 0,
"wifi": 1,
"unknown": 2,
"maybe_ethernet": 3,
"thunderbolt": 4,
}
return min(ips, key=lambda ip: priority.get(ip_to_type.get(ip, "unknown"), 2))
def get_mlx_ring_hosts_by_node(
selected_cycle: Cycle,
cycle_digraph: Topology,
ephemeral_port: int,
node_network: Mapping[NodeId, NodeNetworkInfo],
) -> dict[NodeId, list[Host]]:
"""Generate per-node host lists for MLX ring backend.
Each node gets a list where:
- Self position: Host(ip="0.0.0.0", port=ephemeral_port)
- Left/right neighbors: actual connection IPs
- Non-neighbors: Host(ip="198.51.100.1", port=0) placeholder (RFC 5737 TEST-NET-2)
"""
world_size = len(selected_cycle)
if world_size == 0:
return {}
hosts_by_node: dict[NodeId, list[Host]] = {}
for rank, node_id in enumerate(selected_cycle):
left_rank = (rank - 1) % world_size
right_rank = (rank + 1) % world_size
hosts_for_node: list[Host] = []
for idx, other_node_id in enumerate(selected_cycle):
if idx == rank:
hosts_for_node.append(Host(ip="0.0.0.0", port=ephemeral_port))
continue
if idx not in {left_rank, right_rank}:
# Placeholder IP from RFC 5737 TEST-NET-2
hosts_for_node.append(Host(ip="198.51.100.1", port=0))
continue
connection_ip = _find_ip_prioritised(
node_id, other_node_id, cycle_digraph, node_network, ring=True
)
if connection_ip is None:
raise ValueError(
"MLX ring backend requires connectivity between neighbouring nodes"
)
hosts_for_node.append(Host(ip=connection_ip, port=ephemeral_port))
hosts_by_node[node_id] = hosts_for_node
return hosts_by_node
def get_mlx_jaccl_coordinators(
coordinator: NodeId,
coordinator_port: int,
cycle_digraph: Topology,
node_network: Mapping[NodeId, NodeNetworkInfo],
) -> dict[NodeId, str]:
"""Get the coordinator addresses for MLX JACCL (rank 0 device).
Select an IP address that each node can reach for the rank 0 node. Returns
address in format "X.X.X.X:PORT" per node.
"""
logger.debug(f"Selecting coordinator: {coordinator}")
def get_ip_for_node(n: NodeId) -> str:
if n == coordinator:
return "0.0.0.0"
ip = _find_ip_prioritised(
n, coordinator, cycle_digraph, node_network, ring=False
)
if ip is not None:
return ip
raise ValueError(
"Current jaccl backend requires all participating devices to be able to communicate"
)
return {
n: f"{get_ip_for_node(n)}:{coordinator_port}"
for n in cycle_digraph.list_nodes()
}
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/master/placement_utils.py",
"license": "Apache License 2.0",
"lines": 378,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
exo-explore/exo:src/exo/master/tests/test_master.py | from datetime import datetime, timezone
from typing import Sequence
import anyio
import pytest
from loguru import logger
from exo.master.main import Master
from exo.routing.router import get_node_id_keypair
from exo.shared.models.model_cards import ModelCard, ModelTask
from exo.shared.types.commands import (
CommandId,
ForwarderCommand,
ForwarderDownloadCommand,
PlaceInstance,
TextGeneration,
)
from exo.shared.types.common import ModelId, NodeId, SessionId, SystemId
from exo.shared.types.events import (
Event,
GlobalForwarderEvent,
IndexedEvent,
InstanceCreated,
LocalForwarderEvent,
NodeGatheredInfo,
TaskCreated,
)
from exo.shared.types.memory import Memory
from exo.shared.types.profiling import (
MemoryUsage,
)
from exo.shared.types.tasks import TaskStatus
from exo.shared.types.tasks import TextGeneration as TextGenerationTask
from exo.shared.types.text_generation import InputMessage, TextGenerationTaskParams
from exo.shared.types.worker.instances import (
InstanceMeta,
MlxRingInstance,
ShardAssignments,
)
from exo.shared.types.worker.shards import PipelineShardMetadata, Sharding
from exo.utils.channels import channel
@pytest.mark.asyncio
async def test_master():
keypair = get_node_id_keypair()
node_id = NodeId(keypair.to_node_id())
session_id = SessionId(master_node_id=node_id, election_clock=0)
ge_sender, global_event_receiver = channel[GlobalForwarderEvent]()
command_sender, co_receiver = channel[ForwarderCommand]()
local_event_sender, le_receiver = channel[LocalForwarderEvent]()
fcds, _fcdr = channel[ForwarderDownloadCommand]()
ev_send, ev_recv = channel[Event]()
async def mock_event_router():
idx = 0
sid = SystemId()
with ev_recv as master_events:
async for event in master_events:
await local_event_sender.send(
LocalForwarderEvent(
origin=sid,
origin_idx=idx,
session=session_id,
event=event,
)
)
idx += 1
all_events: list[IndexedEvent] = []
def _get_events() -> Sequence[IndexedEvent]:
orig_events = global_event_receiver.collect()
for e in orig_events:
all_events.append(
IndexedEvent(
event=e.event,
idx=len(all_events), # origin=e.origin,
)
)
return all_events
master = Master(
node_id,
session_id,
event_sender=ev_send,
global_event_sender=ge_sender,
local_event_receiver=le_receiver,
command_receiver=co_receiver,
download_command_sender=fcds,
)
logger.info("run the master")
async with anyio.create_task_group() as tg:
tg.start_soon(master.run)
tg.start_soon(mock_event_router)
# inject a NodeGatheredInfo event
logger.info("inject a NodeGatheredInfo event")
await local_event_sender.send(
LocalForwarderEvent(
origin_idx=0,
origin=SystemId("Worker"),
session=session_id,
event=(
NodeGatheredInfo(
when=str(datetime.now(tz=timezone.utc)),
node_id=node_id,
info=MemoryUsage(
ram_total=Memory.from_bytes(678948 * 1024),
ram_available=Memory.from_bytes(678948 * 1024),
swap_total=Memory.from_bytes(0),
swap_available=Memory.from_bytes(0),
),
)
),
)
)
# wait for initial topology event
logger.info("wait for initial topology event")
while len(list(master.state.topology.list_nodes())) == 0:
await anyio.sleep(0.001)
while len(master.state.node_memory) == 0:
await anyio.sleep(0.001)
logger.info("inject a CreateInstance Command")
await command_sender.send(
ForwarderCommand(
origin=SystemId("API"),
command=(
PlaceInstance(
command_id=CommandId(),
model_card=ModelCard(
model_id=ModelId("llama-3.2-1b"),
n_layers=16,
storage_size=Memory.from_bytes(678948),
hidden_size=7168,
supports_tensor=True,
tasks=[ModelTask.TextGeneration],
),
sharding=Sharding.Pipeline,
instance_meta=InstanceMeta.MlxRing,
min_nodes=1,
)
),
)
)
logger.info("wait for an instance")
while len(master.state.instances.keys()) == 0:
await anyio.sleep(0.001)
logger.info("inject a TextGeneration Command")
await command_sender.send(
ForwarderCommand(
origin=SystemId("API"),
command=(
TextGeneration(
command_id=CommandId(),
task_params=TextGenerationTaskParams(
model=ModelId("llama-3.2-1b"),
input=[
InputMessage(role="user", content="Hello, how are you?")
],
),
)
),
)
)
while len(_get_events()) < 3:
await anyio.sleep(0.01)
events = _get_events()
assert len(events) == 3
assert events[0].idx == 0
assert events[1].idx == 1
assert events[2].idx == 2
assert isinstance(events[0].event, NodeGatheredInfo)
assert isinstance(events[1].event, InstanceCreated)
created_instance = events[1].event.instance
assert isinstance(created_instance, MlxRingInstance)
runner_id = list(created_instance.shard_assignments.runner_to_shard.keys())[0]
# Validate the shard assignments
expected_shard_assignments = ShardAssignments(
model_id=ModelId("llama-3.2-1b"),
runner_to_shard={
(runner_id): PipelineShardMetadata(
start_layer=0,
end_layer=16,
n_layers=16,
model_card=ModelCard(
model_id=ModelId("llama-3.2-1b"),
n_layers=16,
storage_size=Memory.from_bytes(678948),
hidden_size=7168,
supports_tensor=True,
tasks=[ModelTask.TextGeneration],
),
device_rank=0,
world_size=1,
)
},
node_to_runner={node_id: runner_id},
)
assert created_instance.shard_assignments == expected_shard_assignments
# For single-node, hosts_by_node should have one entry with self-binding
assert len(created_instance.hosts_by_node) == 1
assert node_id in created_instance.hosts_by_node
assert len(created_instance.hosts_by_node[node_id]) == 1
assert created_instance.hosts_by_node[node_id][0].ip == "0.0.0.0"
assert created_instance.ephemeral_port > 0
assert isinstance(events[2].event, TaskCreated)
assert events[2].event.task.task_status == TaskStatus.Pending
assert isinstance(events[2].event.task, TextGenerationTask)
assert events[2].event.task.task_params == TextGenerationTaskParams(
model=ModelId("llama-3.2-1b"),
input=[InputMessage(role="user", content="Hello, how are you?")],
)
ev_send.close()
await master.shutdown()
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/master/tests/test_master.py",
"license": "Apache License 2.0",
"lines": 206,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
exo-explore/exo:src/exo/master/tests/test_placement.py | import pytest
from exo.master.placement import (
get_transition_events,
place_instance,
)
from exo.master.tests.conftest import (
create_node_memory,
create_node_network,
create_rdma_connection,
create_socket_connection,
)
from exo.shared.models.model_cards import ModelCard, ModelId, ModelTask
from exo.shared.topology import Topology
from exo.shared.types.commands import PlaceInstance
from exo.shared.types.common import CommandId, NodeId
from exo.shared.types.events import InstanceCreated, InstanceDeleted, TaskStatusUpdated
from exo.shared.types.memory import Memory
from exo.shared.types.multiaddr import Multiaddr
from exo.shared.types.profiling import NetworkInterfaceInfo, NodeNetworkInfo
from exo.shared.types.tasks import TaskId, TaskStatus, TextGeneration
from exo.shared.types.text_generation import InputMessage, TextGenerationTaskParams
from exo.shared.types.topology import Connection, SocketConnection
from exo.shared.types.worker.instances import (
Instance,
InstanceId,
InstanceMeta,
MlxJacclInstance,
MlxRingInstance,
)
from exo.shared.types.worker.runners import ShardAssignments
from exo.shared.types.worker.shards import Sharding
@pytest.fixture
def instance() -> Instance:
return MlxRingInstance(
instance_id=InstanceId(),
shard_assignments=ShardAssignments(
model_id=ModelId("test-model"), runner_to_shard={}, node_to_runner={}
),
hosts_by_node={},
ephemeral_port=50000,
)
@pytest.fixture
def model_card() -> ModelCard:
return ModelCard(
model_id=ModelId("test-model"),
storage_size=Memory.from_kb(1000),
n_layers=10,
hidden_size=30,
supports_tensor=True,
tasks=[ModelTask.TextGeneration],
)
def place_instance_command(model_card: ModelCard) -> PlaceInstance:
return PlaceInstance(
command_id=CommandId(),
model_card=model_card,
sharding=Sharding.Pipeline,
instance_meta=InstanceMeta.MlxRing,
min_nodes=1,
)
@pytest.mark.parametrize(
"available_memory,total_layers,expected_layers",
[
((500, 500, 1000), 12, (3, 3, 6)),
((500, 500, 500), 12, (4, 4, 4)),
((312, 468, 1092), 12, (2, 3, 7)),
],
)
def test_get_instance_placements_create_instance(
available_memory: tuple[int, int, int],
total_layers: int,
expected_layers: tuple[int, int, int],
model_card: ModelCard,
):
# arrange
model_card.n_layers = total_layers
model_card.storage_size = Memory.from_bytes(
sum(available_memory)
) # make it exactly fit across all nodes
topology = Topology()
cic = place_instance_command(model_card)
node_id_a = NodeId()
node_id_b = NodeId()
node_id_c = NodeId()
# fully connected (directed) between the 3 nodes
conn_a_b = Connection(
source=node_id_a, sink=node_id_b, edge=create_socket_connection(1)
)
conn_b_c = Connection(
source=node_id_b, sink=node_id_c, edge=create_socket_connection(2)
)
conn_c_a = Connection(
source=node_id_c, sink=node_id_a, edge=create_socket_connection(3)
)
conn_c_b = Connection(
source=node_id_c, sink=node_id_b, edge=create_socket_connection(4)
)
conn_a_c = Connection(
source=node_id_a, sink=node_id_c, edge=create_socket_connection(5)
)
conn_b_a = Connection(
source=node_id_b, sink=node_id_a, edge=create_socket_connection(6)
)
node_memory = {
node_id_a: create_node_memory(available_memory[0]),
node_id_b: create_node_memory(available_memory[1]),
node_id_c: create_node_memory(available_memory[2]),
}
node_network = {
node_id_a: create_node_network(),
node_id_b: create_node_network(),
node_id_c: create_node_network(),
}
topology.add_node(node_id_a)
topology.add_node(node_id_b)
topology.add_node(node_id_c)
topology.add_connection(conn_a_b)
topology.add_connection(conn_b_c)
topology.add_connection(conn_c_a)
topology.add_connection(conn_c_b)
topology.add_connection(conn_a_c)
topology.add_connection(conn_b_a)
# act
placements = place_instance(cic, topology, {}, node_memory, node_network)
# assert
assert len(placements) == 1
instance_id = list(placements.keys())[0]
instance = placements[instance_id]
assert instance.shard_assignments.model_id == model_card.model_id
runner_id_a = instance.shard_assignments.node_to_runner[node_id_a]
runner_id_b = instance.shard_assignments.node_to_runner[node_id_b]
runner_id_c = instance.shard_assignments.node_to_runner[node_id_c]
shard_a = instance.shard_assignments.runner_to_shard[runner_id_a]
shard_b = instance.shard_assignments.runner_to_shard[runner_id_b]
shard_c = instance.shard_assignments.runner_to_shard[runner_id_c]
assert shard_a.end_layer - shard_a.start_layer == expected_layers[0]
assert shard_b.end_layer - shard_b.start_layer == expected_layers[1]
assert shard_c.end_layer - shard_c.start_layer == expected_layers[2]
shards = [shard_a, shard_b, shard_c]
shards_sorted = sorted(shards, key=lambda s: s.start_layer)
assert shards_sorted[0].start_layer == 0
assert shards_sorted[-1].end_layer == total_layers
def test_get_instance_placements_one_node_exact_fit() -> None:
topology = Topology()
node_id = NodeId()
topology.add_node(node_id)
node_memory = {node_id: create_node_memory(1000 * 1024)}
node_network = {node_id: create_node_network()}
cic = place_instance_command(
ModelCard(
model_id=ModelId("test-model"),
storage_size=Memory.from_kb(1000),
n_layers=10,
hidden_size=1000,
supports_tensor=True,
tasks=[ModelTask.TextGeneration],
),
)
placements = place_instance(cic, topology, {}, node_memory, node_network)
assert len(placements) == 1
instance_id = list(placements.keys())[0]
instance = placements[instance_id]
assert instance.shard_assignments.model_id == "test-model"
assert len(instance.shard_assignments.node_to_runner) == 1
assert len(instance.shard_assignments.runner_to_shard) == 1
assert len(instance.shard_assignments.runner_to_shard) == 1
def test_get_instance_placements_one_node_fits_with_extra_memory() -> None:
topology = Topology()
node_id = NodeId()
topology.add_node(node_id)
node_memory = {node_id: create_node_memory(1001 * 1024)}
node_network = {node_id: create_node_network()}
cic = place_instance_command(
ModelCard(
model_id=ModelId("test-model"),
storage_size=Memory.from_kb(1000),
n_layers=10,
hidden_size=1000,
supports_tensor=True,
tasks=[ModelTask.TextGeneration],
),
)
placements = place_instance(cic, topology, {}, node_memory, node_network)
assert len(placements) == 1
instance_id = list(placements.keys())[0]
instance = placements[instance_id]
assert instance.shard_assignments.model_id == "test-model"
assert len(instance.shard_assignments.node_to_runner) == 1
assert len(instance.shard_assignments.runner_to_shard) == 1
assert len(instance.shard_assignments.runner_to_shard) == 1
def test_get_instance_placements_one_node_not_fit() -> None:
topology = Topology()
node_id = NodeId()
topology.add_node(node_id)
node_memory = {node_id: create_node_memory(1000 * 1024)}
node_network = {node_id: create_node_network()}
cic = place_instance_command(
model_card=ModelCard(
model_id=ModelId("test-model"),
storage_size=Memory.from_kb(1001),
n_layers=10,
hidden_size=1000,
supports_tensor=True,
tasks=[ModelTask.TextGeneration],
),
)
with pytest.raises(ValueError, match="No cycles found with sufficient memory"):
place_instance(cic, topology, {}, node_memory, node_network)
def test_get_transition_events_no_change(instance: Instance):
# arrange
instance_id = InstanceId()
current_instances = {instance_id: instance}
target_instances = {instance_id: instance}
# act
events = get_transition_events(current_instances, target_instances, {})
# assert
assert len(events) == 0
def test_get_transition_events_create_instance(instance: Instance):
# arrange
instance_id = InstanceId()
current_instances: dict[InstanceId, Instance] = {}
target_instances: dict[InstanceId, Instance] = {instance_id: instance}
# act
events = get_transition_events(current_instances, target_instances, {})
# assert
assert len(events) == 1
assert isinstance(events[0], InstanceCreated)
def test_get_transition_events_delete_instance(instance: Instance):
# arrange
instance_id = InstanceId()
current_instances: dict[InstanceId, Instance] = {instance_id: instance}
target_instances: dict[InstanceId, Instance] = {}
# act
events = get_transition_events(current_instances, target_instances, {})
# assert
assert len(events) == 1
assert isinstance(events[0], InstanceDeleted)
assert events[0].instance_id == instance_id
def test_placement_selects_leaf_nodes(
model_card: ModelCard,
):
# arrange
topology = Topology()
model_card.storage_size = Memory.from_bytes(1000)
node_id_a = NodeId()
node_id_b = NodeId()
node_id_c = NodeId()
node_id_d = NodeId()
node_memory = {
node_id_a: create_node_memory(500),
node_id_b: create_node_memory(600),
node_id_c: create_node_memory(600),
node_id_d: create_node_memory(500),
}
node_network = {
node_id_a: create_node_network(),
node_id_b: create_node_network(),
node_id_c: create_node_network(),
node_id_d: create_node_network(),
}
topology.add_node(node_id_a)
topology.add_node(node_id_b)
topology.add_node(node_id_c)
topology.add_node(node_id_d)
# Daisy chain topology (directed)
topology.add_connection(
Connection(source=node_id_a, sink=node_id_b, edge=create_socket_connection(1))
)
topology.add_connection(
Connection(source=node_id_b, sink=node_id_a, edge=create_socket_connection(1))
)
topology.add_connection(
Connection(source=node_id_b, sink=node_id_c, edge=create_socket_connection(1))
)
topology.add_connection(
Connection(source=node_id_c, sink=node_id_b, edge=create_socket_connection(1))
)
topology.add_connection(
Connection(source=node_id_c, sink=node_id_d, edge=create_socket_connection(1))
)
topology.add_connection(
Connection(source=node_id_d, sink=node_id_c, edge=create_socket_connection(1))
)
cic = place_instance_command(model_card=model_card)
# act
placements = place_instance(cic, topology, {}, node_memory, node_network)
# assert
assert len(placements) == 1
instance = list(placements.values())[0]
assigned_nodes = set(instance.shard_assignments.node_to_runner.keys())
assert assigned_nodes == set((node_id_a, node_id_b)) or assigned_nodes == set(
(
node_id_c,
node_id_d,
)
)
def test_tensor_rdma_backend_connectivity_matrix(
model_card: ModelCard,
):
# arrange
topology = Topology()
model_card.n_layers = 12
model_card.storage_size = Memory.from_bytes(1500)
node_a = NodeId()
node_b = NodeId()
node_c = NodeId()
node_memory = {
node_a: create_node_memory(500),
node_b: create_node_memory(500),
node_c: create_node_memory(500),
}
ethernet_interface = NetworkInterfaceInfo(
name="en0",
ip_address="10.0.0.1",
)
ethernet_conn = SocketConnection(
sink_multiaddr=Multiaddr(address="/ip4/10.0.0.1/tcp/8000")
)
node_network = {
node_a: NodeNetworkInfo(interfaces=[ethernet_interface]),
node_b: NodeNetworkInfo(interfaces=[ethernet_interface]),
node_c: NodeNetworkInfo(interfaces=[ethernet_interface]),
}
topology.add_node(node_a)
topology.add_node(node_b)
topology.add_node(node_c)
# RDMA connections (directed)
topology.add_connection(
Connection(source=node_a, sink=node_b, edge=create_rdma_connection(3))
)
topology.add_connection(
Connection(source=node_b, sink=node_a, edge=create_rdma_connection(3))
)
topology.add_connection(
Connection(source=node_b, sink=node_c, edge=create_rdma_connection(4))
)
topology.add_connection(
Connection(source=node_c, sink=node_b, edge=create_rdma_connection(4))
)
topology.add_connection(
Connection(source=node_a, sink=node_c, edge=create_rdma_connection(5))
)
topology.add_connection(
Connection(source=node_c, sink=node_a, edge=create_rdma_connection(5))
)
# Ethernet connections (directed)
topology.add_connection(Connection(source=node_a, sink=node_b, edge=ethernet_conn))
topology.add_connection(Connection(source=node_b, sink=node_c, edge=ethernet_conn))
topology.add_connection(Connection(source=node_c, sink=node_a, edge=ethernet_conn))
topology.add_connection(Connection(source=node_a, sink=node_c, edge=ethernet_conn))
topology.add_connection(Connection(source=node_b, sink=node_a, edge=ethernet_conn))
topology.add_connection(Connection(source=node_c, sink=node_b, edge=ethernet_conn))
cic = PlaceInstance(
sharding=Sharding.Tensor,
instance_meta=InstanceMeta.MlxJaccl,
command_id=CommandId(),
model_card=model_card,
min_nodes=1,
)
# act
placements = place_instance(cic, topology, {}, node_memory, node_network)
# assert
assert len(placements) == 1
instance_id = list(placements.keys())[0]
instance = placements[instance_id]
assert isinstance(instance, MlxJacclInstance)
assert instance.jaccl_devices is not None
assert instance.jaccl_coordinators is not None
matrix = instance.jaccl_devices
assert len(matrix) == 3
for i in range(3):
assert matrix[i][i] is None
assigned_nodes = list(instance.shard_assignments.node_to_runner.keys())
node_to_idx = {node_id: idx for idx, node_id in enumerate(assigned_nodes)}
idx_a = node_to_idx[node_a]
idx_b = node_to_idx[node_b]
idx_c = node_to_idx[node_c]
assert matrix[idx_a][idx_b] == "rdma_en3"
assert matrix[idx_b][idx_c] == "rdma_en4"
assert matrix[idx_c][idx_a] == "rdma_en5"
# Verify coordinators are set for all nodes
assert len(instance.jaccl_coordinators) == 3
for node_id in assigned_nodes:
assert node_id in instance.jaccl_coordinators
coordinator = instance.jaccl_coordinators[node_id]
assert ":" in coordinator
# Rank 0 node should use 0.0.0.0, others should use connection-specific IPs
if node_id == assigned_nodes[0]:
assert coordinator.startswith("0.0.0.0:")
else:
ip_part = coordinator.split(":")[0]
assert len(ip_part.split(".")) == 4
def _make_task(
instance_id: InstanceId,
status: TaskStatus = TaskStatus.Running,
) -> TextGeneration:
return TextGeneration(
task_id=TaskId(),
task_status=status,
instance_id=instance_id,
command_id=CommandId(),
task_params=TextGenerationTaskParams(
model=ModelId("test-model"),
input=[InputMessage(role="user", content="hello")],
),
)
def test_get_transition_events_delete_instance_cancels_running_tasks(
instance: Instance,
):
# arrange
instance_id = InstanceId()
current_instances: dict[InstanceId, Instance] = {instance_id: instance}
target_instances: dict[InstanceId, Instance] = {}
task = _make_task(instance_id, TaskStatus.Running)
tasks = {task.task_id: task}
# act
events = get_transition_events(current_instances, target_instances, tasks)
# assert – cancellation event should come before the deletion event
assert len(events) == 2
assert isinstance(events[0], TaskStatusUpdated)
assert events[0].task_id == task.task_id
assert events[0].task_status == TaskStatus.Cancelled
assert isinstance(events[1], InstanceDeleted)
assert events[1].instance_id == instance_id
def test_get_transition_events_delete_instance_cancels_pending_tasks(
instance: Instance,
):
# arrange
instance_id = InstanceId()
current_instances: dict[InstanceId, Instance] = {instance_id: instance}
target_instances: dict[InstanceId, Instance] = {}
task = _make_task(instance_id, TaskStatus.Pending)
tasks = {task.task_id: task}
# act
events = get_transition_events(current_instances, target_instances, tasks)
# assert
assert len(events) == 2
assert isinstance(events[0], TaskStatusUpdated)
assert events[0].task_id == task.task_id
assert events[0].task_status == TaskStatus.Cancelled
assert isinstance(events[1], InstanceDeleted)
def test_get_transition_events_delete_instance_ignores_completed_tasks(
instance: Instance,
):
# arrange
instance_id = InstanceId()
current_instances: dict[InstanceId, Instance] = {instance_id: instance}
target_instances: dict[InstanceId, Instance] = {}
tasks = {
t.task_id: t
for t in [
_make_task(instance_id, TaskStatus.Complete),
_make_task(instance_id, TaskStatus.Failed),
_make_task(instance_id, TaskStatus.TimedOut),
_make_task(instance_id, TaskStatus.Cancelled),
]
}
# act
events = get_transition_events(current_instances, target_instances, tasks)
# assert – only the InstanceDeleted event, no cancellations
assert len(events) == 1
assert isinstance(events[0], InstanceDeleted)
def test_get_transition_events_delete_instance_cancels_only_matching_tasks(
instance: Instance,
):
# arrange
instance_id_a = InstanceId()
instance_id_b = InstanceId()
current_instances: dict[InstanceId, Instance] = {
instance_id_a: instance,
instance_id_b: instance,
}
# only delete instance A, keep instance B
target_instances: dict[InstanceId, Instance] = {instance_id_b: instance}
task_a = _make_task(instance_id_a, TaskStatus.Running)
task_b = _make_task(instance_id_b, TaskStatus.Running)
tasks = {task_a.task_id: task_a, task_b.task_id: task_b}
# act
events = get_transition_events(current_instances, target_instances, tasks)
# assert – only task_a should be cancelled
cancel_events = [e for e in events if isinstance(e, TaskStatusUpdated)]
delete_events = [e for e in events if isinstance(e, InstanceDeleted)]
assert len(cancel_events) == 1
assert cancel_events[0].task_id == task_a.task_id
assert cancel_events[0].task_status == TaskStatus.Cancelled
assert len(delete_events) == 1
assert delete_events[0].instance_id == instance_id_a
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/master/tests/test_placement.py",
"license": "Apache License 2.0",
"lines": 486,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
exo-explore/exo:src/exo/master/tests/test_placement_utils.py | import pytest
from exo.master.placement_utils import (
allocate_layers_proportionally,
filter_cycles_by_memory,
get_mlx_jaccl_coordinators,
get_shard_assignments,
get_shard_assignments_for_pipeline_parallel,
get_smallest_cycles,
)
from exo.master.tests.conftest import (
create_node_memory,
create_socket_connection,
)
from exo.shared.models.model_cards import ModelCard, ModelId, ModelTask
from exo.shared.topology import Topology
from exo.shared.types.common import NodeId
from exo.shared.types.memory import Memory
from exo.shared.types.profiling import (
NetworkInterfaceInfo,
NodeNetworkInfo,
)
from exo.shared.types.topology import Connection, SocketConnection
from exo.shared.types.worker.shards import (
CfgShardMetadata,
PipelineShardMetadata,
Sharding,
)
def test_filter_cycles_by_memory():
# arrange
node1_id = NodeId()
node2_id = NodeId()
connection1 = Connection(
source=node1_id, sink=node2_id, edge=create_socket_connection(1)
)
connection2 = Connection(
source=node2_id, sink=node1_id, edge=create_socket_connection(2)
)
node1_mem = create_node_memory(1000 * 1024)
node2_mem = create_node_memory(1000 * 1024)
node_memory = {node1_id: node1_mem, node2_id: node2_mem}
topology = Topology()
topology.add_node(node1_id)
topology.add_node(node2_id)
topology.add_connection(connection1)
topology.add_connection(connection2)
cycles = [c for c in topology.get_cycles() if len(c) != 1]
assert len(cycles) == 1
assert len(cycles[0]) == 2
# act
filtered_cycles = filter_cycles_by_memory(cycles, node_memory, Memory.from_bytes(1))
# assert
assert len(filtered_cycles) == 1
assert len(filtered_cycles[0]) == 2
assert set(n for n in filtered_cycles[0]) == {node1_id, node2_id}
def test_filter_cycles_by_insufficient_memory():
# arrange
node1_id = NodeId()
node2_id = NodeId()
connection1 = Connection(
source=node1_id, sink=node2_id, edge=create_socket_connection(1)
)
connection2 = Connection(
source=node2_id, sink=node1_id, edge=create_socket_connection(2)
)
node1_mem = create_node_memory(1000 * 1024)
node2_mem = create_node_memory(1000 * 1024)
node_memory = {node1_id: node1_mem, node2_id: node2_mem}
topology = Topology()
topology.add_node(node1_id)
topology.add_node(node2_id)
topology.add_connection(connection1)
topology.add_connection(connection2)
# act
filtered_cycles = filter_cycles_by_memory(
topology.get_cycles(), node_memory, Memory.from_kb(2001)
)
# assert
assert len(filtered_cycles) == 0
def test_filter_multiple_cycles_by_memory():
# arrange
node_a_id = NodeId()
node_b_id = NodeId()
node_c_id = NodeId()
connection1 = Connection(
source=node_a_id, sink=node_b_id, edge=create_socket_connection(1)
)
connection2 = Connection(
source=node_b_id, sink=node_a_id, edge=create_socket_connection(2)
)
connection3 = Connection(
source=node_a_id, sink=node_c_id, edge=create_socket_connection(3)
)
connection4 = Connection(
source=node_c_id, sink=node_b_id, edge=create_socket_connection(4)
)
node_a_mem = create_node_memory(500 * 1024)
node_b_mem = create_node_memory(500 * 1024)
node_c_mem = create_node_memory(1000 * 1024)
node_memory = {
node_a_id: node_a_mem,
node_b_id: node_b_mem,
node_c_id: node_c_mem,
}
topology = Topology()
topology.add_node(node_a_id)
topology.add_node(node_b_id)
topology.add_node(node_c_id)
topology.add_connection(connection1)
topology.add_connection(connection2)
topology.add_connection(connection3)
topology.add_connection(connection4)
cycles = topology.get_cycles()
# act
filtered_cycles = filter_cycles_by_memory(cycles, node_memory, Memory.from_kb(1500))
# assert
assert len(filtered_cycles) == 1
assert len(filtered_cycles[0]) == 3
assert set(n for n in filtered_cycles[0]) == {
node_a_id,
node_b_id,
node_c_id,
}
def test_get_smallest_cycles():
# arrange
node_a_id = NodeId()
node_b_id = NodeId()
node_c_id = NodeId()
topology = Topology()
topology.add_node(node_a_id)
topology.add_node(node_b_id)
topology.add_node(node_c_id)
connection1 = Connection(
source=node_a_id, sink=node_b_id, edge=create_socket_connection(1)
)
connection2 = Connection(
source=node_b_id, sink=node_a_id, edge=create_socket_connection(2)
)
connection3 = Connection(
source=node_a_id, sink=node_c_id, edge=create_socket_connection(3)
)
connection4 = Connection(
source=node_c_id, sink=node_b_id, edge=create_socket_connection(4)
)
topology.add_connection(connection1)
topology.add_connection(connection2)
topology.add_connection(connection3)
topology.add_connection(connection4)
cycles = [c for c in topology.get_cycles() if len(c) != 1] # ignore singletons
# act
smallest_cycles = get_smallest_cycles(cycles)
# assert
assert len(smallest_cycles) == 1
assert len(smallest_cycles[0]) == 2
assert set(n for n in smallest_cycles[0]) == {node_a_id, node_b_id}
@pytest.mark.parametrize(
"available_memory,total_layers,expected_layers",
[
((500, 500, 1000), 12, (3, 3, 6)),
((500, 500, 500), 12, (4, 4, 4)),
((312, 518, 1024), 12, (2, 3, 7)),
# Edge case: one node has ~90% of memory - should not over-allocate.
# Each node must have enough memory for at least 1 layer (50 KB = 1000/20).
((900, 50, 50), 20, (18, 1, 1)),
],
)
def test_get_shard_assignments(
available_memory: tuple[int, int, int],
total_layers: int,
expected_layers: tuple[int, int, int],
):
# arrange
node_a_id = NodeId()
node_b_id = NodeId()
node_c_id = NodeId()
# create connections (A -> B -> C -> A forms a 3-cycle, plus B -> A also exists)
connection1 = Connection(
source=node_a_id, sink=node_b_id, edge=create_socket_connection(1)
)
connection2 = Connection(
source=node_b_id, sink=node_c_id, edge=create_socket_connection(2)
)
connection3 = Connection(
source=node_c_id, sink=node_a_id, edge=create_socket_connection(3)
)
connection4 = Connection(
source=node_b_id, sink=node_a_id, edge=create_socket_connection(4)
)
topology = Topology()
topology.add_node(node_a_id)
topology.add_node(node_b_id)
topology.add_node(node_c_id)
topology.add_connection(connection1)
topology.add_connection(connection2)
topology.add_connection(connection3)
topology.add_connection(connection4)
node_a_mem = create_node_memory(available_memory[0] * 1024)
node_b_mem = create_node_memory(available_memory[1] * 1024)
node_c_mem = create_node_memory(available_memory[2] * 1024)
node_memory = {
node_a_id: node_a_mem,
node_b_id: node_b_mem,
node_c_id: node_c_mem,
}
model_card = ModelCard(
model_id=ModelId("test-model"),
n_layers=total_layers,
storage_size=Memory.from_kb(1000),
hidden_size=1000,
supports_tensor=True,
tasks=[ModelTask.TextGeneration],
)
cycles = topology.get_cycles()
# pick the 3-node cycle deterministically (cycle ordering can vary)
selected_cycle = next(cycle for cycle in cycles if len(cycle) == 3)
# act
shard_assignments = get_shard_assignments(
model_card, selected_cycle, Sharding.Pipeline, node_memory=node_memory
)
# assert
runner_id_a = shard_assignments.node_to_runner[node_a_id]
runner_id_b = shard_assignments.node_to_runner[node_b_id]
runner_id_c = shard_assignments.node_to_runner[node_c_id]
assert (
shard_assignments.runner_to_shard[runner_id_a].end_layer
- shard_assignments.runner_to_shard[runner_id_a].start_layer
== expected_layers[0]
)
assert (
shard_assignments.runner_to_shard[runner_id_b].end_layer
- shard_assignments.runner_to_shard[runner_id_b].start_layer
== expected_layers[1]
)
assert (
shard_assignments.runner_to_shard[runner_id_c].end_layer
- shard_assignments.runner_to_shard[runner_id_c].start_layer
== expected_layers[2]
)
def test_get_mlx_jaccl_coordinators():
# arrange
node_a_id = NodeId()
node_b_id = NodeId()
node_c_id = NodeId()
# fully connected (directed) between the 3 nodes
conn_a_b = Connection(
source=node_a_id, sink=node_b_id, edge=create_socket_connection(1)
)
conn_b_a = Connection(
source=node_b_id, sink=node_a_id, edge=create_socket_connection(2)
)
conn_b_c = Connection(
source=node_b_id, sink=node_c_id, edge=create_socket_connection(3)
)
conn_c_b = Connection(
source=node_c_id, sink=node_b_id, edge=create_socket_connection(4)
)
conn_c_a = Connection(
source=node_c_id, sink=node_a_id, edge=create_socket_connection(5)
)
conn_a_c = Connection(
source=node_a_id, sink=node_c_id, edge=create_socket_connection(6)
)
network_a = NodeNetworkInfo(
interfaces=[
NetworkInterfaceInfo(name="en0", ip_address="169.254.0.5"),
NetworkInterfaceInfo(name="en0", ip_address="169.254.0.2"),
]
)
network_b = NodeNetworkInfo(
interfaces=[
NetworkInterfaceInfo(name="en0", ip_address="169.254.0.1"),
NetworkInterfaceInfo(name="en0", ip_address="169.254.0.4"),
]
)
network_c = NodeNetworkInfo(
interfaces=[
NetworkInterfaceInfo(name="en0", ip_address="169.254.0.3"),
NetworkInterfaceInfo(name="en0", ip_address="169.254.0.6"),
]
)
node_network = {
node_a_id: network_a,
node_b_id: network_b,
node_c_id: network_c,
}
topology = Topology()
topology.add_node(node_a_id)
topology.add_node(node_b_id)
topology.add_node(node_c_id)
topology.add_connection(conn_a_b)
topology.add_connection(conn_b_a)
topology.add_connection(conn_b_c)
topology.add_connection(conn_c_b)
topology.add_connection(conn_c_a)
topology.add_connection(conn_a_c)
# act
coordinators = get_mlx_jaccl_coordinators(
node_a_id,
coordinator_port=5000,
cycle_digraph=topology,
node_network=node_network,
)
# assert
assert len(coordinators) == 3
assert node_a_id in coordinators
assert node_b_id in coordinators
assert node_c_id in coordinators
# All coordinators should have IP:PORT format
for node_id, coordinator in coordinators.items():
assert ":" in coordinator, (
f"Coordinator for {node_id} should have ':' separator"
)
# Verify port is correct
for node_id, coordinator in coordinators.items():
assert coordinator.endswith(":5000"), (
f"Coordinator for {node_id} should use port 5000"
)
# Rank 0 (node_a) treats this as the listen socket so should listen on all IPs
assert coordinators[node_a_id].startswith("0.0.0.0:"), (
"Rank 0 node should use 0.0.0.0 as coordinator listen address"
)
# Non-rank-0 nodes should use the specific IP from their connection to rank 0
# node_b uses the IP from conn_b_a (node_b -> node_a)
assert isinstance(conn_b_a.edge, SocketConnection)
assert (
coordinators[node_b_id] == f"{conn_b_a.edge.sink_multiaddr.ip_address}:5000"
), "node_b should use the IP from conn_b_a"
# node_c uses the IP from conn_c_a (node_c -> node_a)
assert isinstance(conn_c_a.edge, SocketConnection)
assert coordinators[node_c_id] == (
f"{conn_c_a.edge.sink_multiaddr.ip_address}:5000"
), "node_c should use the IP from conn_c_a"
class TestAllocateLayersProportionally:
def test_empty_node_list_raises(self):
with pytest.raises(ValueError, match="empty node list"):
allocate_layers_proportionally(total_layers=10, memory_fractions=[])
def test_zero_layers_raises(self):
with pytest.raises(ValueError, match="need at least 1 layer per node"):
allocate_layers_proportionally(total_layers=0, memory_fractions=[0.5, 0.5])
def test_negative_layers_raises(self):
with pytest.raises(ValueError, match="need at least 1 layer per node"):
allocate_layers_proportionally(total_layers=-1, memory_fractions=[0.5, 0.5])
def test_fewer_layers_than_nodes_raises(self):
with pytest.raises(ValueError, match="need at least 1 layer per node"):
allocate_layers_proportionally(
total_layers=2, memory_fractions=[0.33, 0.33, 0.34]
)
def test_equal_distribution(self):
result = allocate_layers_proportionally(
total_layers=12, memory_fractions=[0.25, 0.25, 0.25, 0.25]
)
assert result == [3, 3, 3, 3]
assert sum(result) == 12
def test_proportional_distribution(self):
result = allocate_layers_proportionally(
total_layers=12, memory_fractions=[0.25, 0.25, 0.50]
)
assert result == [3, 3, 6]
assert sum(result) == 12
def test_extreme_imbalance_ensures_minimum(self):
result = allocate_layers_proportionally(
total_layers=20, memory_fractions=[0.975, 0.0125, 0.0125]
)
assert all(layers >= 1 for layers in result)
assert sum(result) == 20
# Small nodes get minimum 1 layer
assert result == [18, 1, 1]
def test_single_node_gets_all_layers(self):
result = allocate_layers_proportionally(total_layers=10, memory_fractions=[1.0])
assert result == [10]
def test_minimum_viable_allocation(self):
result = allocate_layers_proportionally(
total_layers=3, memory_fractions=[0.33, 0.33, 0.34]
)
assert result == [1, 1, 1]
assert sum(result) == 3
def test_get_shard_assignments_insufficient_memory_raises():
"""Test that ValueError is raised when a node has insufficient memory for its layers."""
node_a_id = NodeId()
node_b_id = NodeId()
node_c_id = NodeId()
topology = Topology()
# Node C has only 10 KB but would need 50 KB for 1 layer (1000 KB / 20 layers)
node_a_mem = create_node_memory(900 * 1024)
node_b_mem = create_node_memory(50 * 1024)
node_c_mem = create_node_memory(10 * 1024) # Insufficient memory
topology.add_node(node_a_id)
topology.add_node(node_b_id)
topology.add_node(node_c_id)
conn_a_b = Connection(
source=node_a_id, sink=node_b_id, edge=create_socket_connection(1)
)
conn_b_c = Connection(
source=node_b_id, sink=node_c_id, edge=create_socket_connection(2)
)
conn_c_a = Connection(
source=node_c_id, sink=node_a_id, edge=create_socket_connection(3)
)
conn_b_a = Connection(
source=node_b_id, sink=node_a_id, edge=create_socket_connection(3)
)
topology.add_connection(conn_a_b)
topology.add_connection(conn_b_c)
topology.add_connection(conn_c_a)
topology.add_connection(conn_b_a)
node_memory = {
node_a_id: node_a_mem,
node_b_id: node_b_mem,
node_c_id: node_c_mem,
}
model_card = ModelCard(
model_id=ModelId("test-model"),
n_layers=20,
storage_size=Memory.from_kb(1000),
hidden_size=1000,
supports_tensor=True,
tasks=[ModelTask.TextGeneration],
)
cycles = topology.get_cycles()
selected_cycle = cycles[0]
with pytest.raises(ValueError, match="insufficient memory"):
get_shard_assignments(
model_card, selected_cycle, Sharding.Pipeline, node_memory
)
class TestCfgParallelPlacement:
def _create_ring_topology(self, node_ids: list[NodeId]) -> Topology:
topology = Topology()
for node_id in node_ids:
topology.add_node(node_id)
for i, node_id in enumerate(node_ids):
next_node = node_ids[(i + 1) % len(node_ids)]
conn = Connection(
source=node_id,
sink=next_node,
edge=create_socket_connection(i + 1),
)
topology.add_connection(conn)
return topology
def test_two_nodes_cfg_model_uses_cfg_parallel(self):
"""Two nodes with CFG model should use CFG parallel (no pipeline)."""
node_a = NodeId()
node_b = NodeId()
topology = self._create_ring_topology([node_a, node_b])
cycles = [c for c in topology.get_cycles() if len(c) == 2]
cycle = cycles[0]
node_memory = {
node_a: create_node_memory(1000 * 1024),
node_b: create_node_memory(1000 * 1024),
}
model_card = ModelCard(
model_id=ModelId("qwen-image-test"),
n_layers=60,
storage_size=Memory.from_kb(1000),
hidden_size=1,
supports_tensor=False,
uses_cfg=True,
tasks=[ModelTask.TextToImage],
)
assignments = get_shard_assignments_for_pipeline_parallel(
model_card, cycle, node_memory
)
shards = list(assignments.runner_to_shard.values())
assert len(shards) == 2
# CFG models should get CfgShardMetadata
for shard in shards:
assert isinstance(shard, CfgShardMetadata)
# Both nodes should have all layers (no pipeline split)
assert shard.start_layer == 0
assert shard.end_layer == 60
assert shard.cfg_world_size == 2
# Each node is the only stage in its pipeline group
assert shard.pipeline_world_size == 1
assert shard.pipeline_rank == 0
cfg_ranks = sorted(
s.cfg_rank for s in shards if isinstance(s, CfgShardMetadata)
)
assert cfg_ranks == [0, 1]
def test_four_nodes_cfg_model_uses_hybrid(self):
"""Four nodes with CFG model should use 2 CFG groups x 2 pipeline stages."""
nodes = [NodeId() for _ in range(4)]
topology = self._create_ring_topology(nodes)
cycles = [c for c in topology.get_cycles() if len(c) == 4]
cycle = cycles[0]
node_memory = {n: create_node_memory(1000 * 1024) for n in nodes}
model_card = ModelCard(
model_id=ModelId("qwen-image-test"),
n_layers=60,
storage_size=Memory.from_kb(1000),
hidden_size=1,
supports_tensor=False,
uses_cfg=True,
tasks=[ModelTask.TextToImage],
)
assignments = get_shard_assignments_for_pipeline_parallel(
model_card, cycle, node_memory
)
shards = list(assignments.runner_to_shard.values())
assert len(shards) == 4
# CFG models should get CfgShardMetadata
for shard in shards:
assert isinstance(shard, CfgShardMetadata)
assert shard.cfg_world_size == 2
assert shard.pipeline_world_size == 2
assert shard.pipeline_rank in [0, 1]
# Check we have 2 nodes in each CFG group
cfg_0_shards = [
s for s in shards if isinstance(s, CfgShardMetadata) and s.cfg_rank == 0
]
cfg_1_shards = [
s for s in shards if isinstance(s, CfgShardMetadata) and s.cfg_rank == 1
]
assert len(cfg_0_shards) == 2
assert len(cfg_1_shards) == 2
# Both CFG groups should have the same layer assignments
cfg_0_layers = [(s.start_layer, s.end_layer) for s in cfg_0_shards]
cfg_1_layers = [(s.start_layer, s.end_layer) for s in cfg_1_shards]
assert sorted(cfg_0_layers) == sorted(cfg_1_layers)
def test_three_nodes_cfg_model_uses_sequential_cfg(self):
"""Three nodes (odd) with CFG model should use sequential CFG (PipelineShardMetadata)."""
nodes = [NodeId() for _ in range(3)]
topology = self._create_ring_topology(nodes)
cycles = [c for c in topology.get_cycles() if len(c) == 3]
cycle = cycles[0]
node_memory = {n: create_node_memory(1000 * 1024) for n in nodes}
model_card = ModelCard(
model_id=ModelId("qwen-image-test"),
n_layers=60,
storage_size=Memory.from_kb(1000),
hidden_size=1,
supports_tensor=False,
uses_cfg=True,
tasks=[ModelTask.TextToImage],
)
assignments = get_shard_assignments_for_pipeline_parallel(
model_card, cycle, node_memory
)
shards = list(assignments.runner_to_shard.values())
assert len(shards) == 3
# Odd node count with CFG model falls back to PipelineShardMetadata (sequential CFG)
for shard in shards:
assert isinstance(shard, PipelineShardMetadata)
def test_two_nodes_non_cfg_model_uses_pipeline(self):
"""Two nodes with non-CFG model should use pure pipeline (PipelineShardMetadata)."""
node_a = NodeId()
node_b = NodeId()
topology = self._create_ring_topology([node_a, node_b])
cycles = [c for c in topology.get_cycles() if len(c) == 2]
cycle = cycles[0]
node_memory = {
node_a: create_node_memory(1000 * 1024),
node_b: create_node_memory(1000 * 1024),
}
model_card = ModelCard(
model_id=ModelId("flux-test"),
n_layers=57,
storage_size=Memory.from_kb(1000),
hidden_size=1,
supports_tensor=False,
uses_cfg=False, # Non-CFG model
tasks=[ModelTask.TextToImage],
)
assignments = get_shard_assignments_for_pipeline_parallel(
model_card, cycle, node_memory
)
shards = list(assignments.runner_to_shard.values())
assert len(shards) == 2
# Non-CFG models should get PipelineShardMetadata
for shard in shards:
assert isinstance(shard, PipelineShardMetadata)
# Should have actual layer sharding (pipeline)
layer_ranges = sorted(
(s.start_layer, s.end_layer)
for s in shards
if isinstance(s, PipelineShardMetadata)
)
# First shard starts at 0, last shard ends at 57
assert layer_ranges[0][0] == 0
assert layer_ranges[-1][1] == 57
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/master/tests/test_placement_utils.py",
"license": "Apache License 2.0",
"lines": 577,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
exo-explore/exo:src/exo/master/tests/test_topology.py | import pytest
from exo.shared.topology import Topology
from exo.shared.types.common import NodeId
from exo.shared.types.multiaddr import Multiaddr
from exo.shared.types.topology import Connection, SocketConnection
@pytest.fixture
def topology() -> Topology:
return Topology()
@pytest.fixture
def socket_connection() -> SocketConnection:
return SocketConnection(
sink_multiaddr=Multiaddr(address="/ip4/127.0.0.1/tcp/1235"),
)
def test_add_node(topology: Topology):
# arrange
node_id = NodeId()
# act
topology.add_node(node_id)
# assert
assert topology.node_is_leaf(node_id)
def test_add_connection(topology: Topology, socket_connection: SocketConnection):
# arrange
node_a = NodeId()
node_b = NodeId()
connection = Connection(source=node_a, sink=node_b, edge=socket_connection)
topology.add_node(node_a)
topology.add_node(node_b)
topology.add_connection(connection)
# act
data = list(topology.list_connections())
# assert
assert data == [connection]
assert topology.node_is_leaf(node_a)
assert topology.node_is_leaf(node_b)
def test_remove_connection_still_connected(
topology: Topology, socket_connection: SocketConnection
):
# arrange
node_a = NodeId()
node_b = NodeId()
conn = Connection(source=node_a, sink=node_b, edge=socket_connection)
topology.add_node(node_a)
topology.add_node(node_b)
topology.add_connection(conn)
# act
topology.remove_connection(conn)
# assert
assert list(topology.get_all_connections_between(node_a, node_b)) == []
def test_remove_node_still_connected(
topology: Topology, socket_connection: SocketConnection
):
# arrange
node_a = NodeId()
node_b = NodeId()
conn = Connection(source=node_a, sink=node_b, edge=socket_connection)
topology.add_node(node_a)
topology.add_node(node_b)
topology.add_connection(conn)
assert list(topology.out_edges(node_a)) == [conn]
# act
topology.remove_node(node_b)
# assert
assert list(topology.out_edges(node_a)) == []
def test_list_nodes(topology: Topology, socket_connection: SocketConnection):
# arrange
node_a = NodeId()
node_b = NodeId()
conn = Connection(source=node_a, sink=node_b, edge=socket_connection)
topology.add_node(node_a)
topology.add_node(node_b)
topology.add_connection(conn)
assert list(topology.out_edges(node_a)) == [conn]
# act
nodes = list(topology.list_nodes())
# assert
assert len(nodes) == 2
assert all(isinstance(node, NodeId) for node in nodes)
assert set(node for node in nodes) == set([node_a, node_b])
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/master/tests/test_topology.py",
"license": "Apache License 2.0",
"lines": 78,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
exo-explore/exo:src/exo/routing/connection_message.py | from exo_pyo3_bindings import PyFromSwarm
from exo.shared.types.common import NodeId
from exo.utils.pydantic_ext import CamelCaseModel
"""Serialisable types for Connection Updates/Messages"""
class ConnectionMessage(CamelCaseModel):
node_id: NodeId
connected: bool
@classmethod
def from_update(cls, update: PyFromSwarm.Connection) -> "ConnectionMessage":
return cls(node_id=NodeId(update.peer_id), connected=update.connected)
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/routing/connection_message.py",
"license": "Apache License 2.0",
"lines": 10,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
exo-explore/exo:src/exo/routing/router.py | from copy import copy
from itertools import count
from math import inf
from os import PathLike
from pathlib import Path
from typing import cast
from anyio import (
BrokenResourceError,
ClosedResourceError,
move_on_after,
sleep_forever,
)
from exo_pyo3_bindings import (
AllQueuesFullError,
Keypair,
MessageTooLargeError,
NetworkingHandle,
NoPeersSubscribedToTopicError,
PyFromSwarm,
)
from filelock import FileLock
from loguru import logger
from exo.shared.constants import EXO_NODE_ID_KEYPAIR
from exo.utils.channels import Receiver, Sender, channel
from exo.utils.pydantic_ext import CamelCaseModel
from exo.utils.task_group import TaskGroup
from .connection_message import ConnectionMessage
from .topics import CONNECTION_MESSAGES, PublishPolicy, TypedTopic
# A significant current limitation of the TopicRouter is that it is not capable
# of preventing feedback, as it does not ask for a system id so cannot tell
# which message is coming/going to which system.
# This is currently only relevant for Election
class TopicRouter[T: CamelCaseModel]:
def __init__(
self,
topic: TypedTopic[T],
networking_sender: Sender[tuple[str, bytes]],
max_buffer_size: float = inf,
):
self.topic: TypedTopic[T] = topic
self.senders: set[Sender[T]] = set()
send, recv = channel[T]()
self.receiver: Receiver[T] = recv
self._sender: Sender[T] = send
self.networking_sender: Sender[tuple[str, bytes]] = networking_sender
async def run(self):
logger.debug(f"Topic Router {self.topic} ready to send")
with self.receiver as items:
async for item in items:
# Check if we should send to network
if (
len(self.senders) == 0
and self.topic.publish_policy is PublishPolicy.Minimal
):
await self._send_out(item)
continue
if self.topic.publish_policy is PublishPolicy.Always:
await self._send_out(item)
# Then publish to all senders
await self.publish(item)
async def shutdown(self):
logger.debug(f"Shutting down Topic Router {self.topic}")
# Close all the things!
for sender in self.senders:
sender.close()
self._sender.close()
self.receiver.close()
async def publish(self, item: T):
"""
Publish item T on this topic to all senders.
NB: this sends to ALL receivers, potentially including receivers held by the object doing the sending.
You should handle your own output if you hold a sender + receiver pair.
"""
to_clear: set[Sender[T]] = set()
for sender in copy(self.senders):
try:
await sender.send(item)
except (ClosedResourceError, BrokenResourceError):
to_clear.add(sender)
self.senders -= to_clear
async def publish_bytes(self, data: bytes):
await self.publish(self.topic.deserialize(data))
def new_sender(self) -> Sender[T]:
return self._sender.clone()
async def _send_out(self, item: T):
logger.trace(f"TopicRouter {self.topic.topic} sending {item}")
await self.networking_sender.send(
(str(self.topic.topic), self.topic.serialize(item))
)
class Router:
@classmethod
def create(cls, identity: Keypair) -> "Router":
return cls(handle=NetworkingHandle(identity))
def __init__(self, handle: NetworkingHandle):
self.topic_routers: dict[str, TopicRouter[CamelCaseModel]] = {}
send, recv = channel[tuple[str, bytes]]()
self.networking_receiver: Receiver[tuple[str, bytes]] = recv
self._net: NetworkingHandle = handle
self._tmp_networking_sender: Sender[tuple[str, bytes]] | None = send
self._id_count = count()
self._tg: TaskGroup = TaskGroup()
async def register_topic[T: CamelCaseModel](self, topic: TypedTopic[T]):
send = self._tmp_networking_sender
if send:
self._tmp_networking_sender = None
else:
send = self.networking_receiver.clone_sender()
router = TopicRouter[T](topic, send)
self.topic_routers[topic.topic] = cast(TopicRouter[CamelCaseModel], router)
if self._tg.is_running():
await self._networking_subscribe(topic.topic)
def sender[T: CamelCaseModel](self, topic: TypedTopic[T]) -> Sender[T]:
router = self.topic_routers.get(topic.topic, None)
# There's gotta be a way to do this without THIS many asserts
assert router is not None
assert router.topic == topic
sender = cast(TopicRouter[T], router).new_sender()
return sender
def receiver[T: CamelCaseModel](self, topic: TypedTopic[T]) -> Receiver[T]:
router = self.topic_routers.get(topic.topic, None)
# There's gotta be a way to do this without THIS many asserts
assert router is not None
assert router.topic == topic
assert router.topic.model_type == topic.model_type
send, recv = channel[T]()
router.senders.add(cast(Sender[CamelCaseModel], send))
return recv
async def run(self):
logger.debug("Starting Router")
try:
async with self._tg as tg:
for topic in self.topic_routers:
router = self.topic_routers[topic]
tg.start_soon(router.run)
tg.start_soon(self._networking_recv)
tg.start_soon(self._networking_publish)
# subscribe to pending topics
for topic in self.topic_routers:
await self._networking_subscribe(topic)
# Router only shuts down if you cancel it.
await sleep_forever()
finally:
with move_on_after(1, shield=True):
for topic in self.topic_routers:
await self._networking_unsubscribe(str(topic))
async def shutdown(self):
logger.debug("Shutting down Router")
self._tg.cancel_tasks()
async def _networking_subscribe(self, topic: str):
await self._net.gossipsub_subscribe(topic)
logger.info(f"Subscribed to {topic}")
async def _networking_unsubscribe(self, topic: str):
await self._net.gossipsub_unsubscribe(topic)
logger.info(f"Unsubscribed from {topic}")
async def _networking_recv(self):
try:
while True:
from_swarm = await self._net.recv()
logger.debug(from_swarm)
match from_swarm:
case PyFromSwarm.Message(origin, topic, data):
logger.trace(
f"Received message on {topic} from {origin} with payload {data}"
)
if topic not in self.topic_routers:
logger.warning(
f"Received message on unknown or inactive topic {topic}"
)
continue
router = self.topic_routers[topic]
await router.publish_bytes(data)
case PyFromSwarm.Connection():
message = ConnectionMessage.from_update(from_swarm)
logger.trace(
f"Received message on connection_messages with payload {message}"
)
if CONNECTION_MESSAGES.topic in self.topic_routers:
router = self.topic_routers[CONNECTION_MESSAGES.topic]
assert router.topic.model_type == ConnectionMessage
router = cast(TopicRouter[ConnectionMessage], router)
await router.publish(message)
case _:
logger.critical(
"failed to exhaustively check FromSwarm messages - logic error"
)
except Exception as exception:
logger.opt(exception=exception).error(
"Gossipsub receive loop terminated unexpectedly"
)
raise
async def _networking_publish(self):
with self.networking_receiver as networked_items:
async for topic, data in networked_items:
try:
logger.trace(f"Sending message on {topic} with payload {data}")
await self._net.gossipsub_publish(topic, data)
except NoPeersSubscribedToTopicError:
pass
except AllQueuesFullError:
logger.warning(f"All peer queues full, dropping message on {topic}")
except MessageTooLargeError:
logger.warning(
f"Message too large for gossipsub on {topic} ({len(data)} bytes), dropping"
)
def get_node_id_keypair(
path: str | bytes | PathLike[str] | PathLike[bytes] = EXO_NODE_ID_KEYPAIR,
) -> Keypair:
"""
Obtains the :class:`Keypair` associated with this node-ID.
Obtain the :class:`PeerId` by from it.
"""
# TODO(evan): bring back node id persistence once we figure out how to deal with duplicates
return Keypair.generate()
def lock_path(path: str | bytes | PathLike[str] | PathLike[bytes]) -> Path:
return Path(str(path) + ".lock")
# operate with cross-process lock to avoid race conditions
with FileLock(lock_path(path)):
with open(path, "a+b") as f: # opens in append-mode => starts at EOF
# if non-zero EOF, then file exists => use to get node-ID
if f.tell() != 0:
f.seek(0) # go to start & read protobuf-encoded bytes
protobuf_encoded = f.read()
try: # if decoded successfully, save & return
return Keypair.from_bytes(protobuf_encoded)
except ValueError as e: # on runtime error, assume corrupt file
logger.warning(f"Encountered error when trying to get keypair: {e}")
# if no valid credentials, create new ones and persist
with open(path, "w+b") as f:
keypair = Keypair.generate_ed25519()
f.write(keypair.to_bytes())
return keypair
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/routing/router.py",
"license": "Apache License 2.0",
"lines": 231,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
exo-explore/exo:src/exo/routing/tests/test_event_buffer.py | import pytest
from exo.shared.types.events import Event, TestEvent
from exo.utils.event_buffer import OrderedBuffer
def make_indexed_event(idx: int) -> tuple[int, Event]:
"""Factory function to create a unique ForwarderEvent for a given index."""
return (idx, TestEvent())
@pytest.fixture
def buffer() -> OrderedBuffer[Event]:
"""Provides a clean instance of OrderedBuffer[Event] for each test."""
return OrderedBuffer[Event]()
@pytest.mark.asyncio
async def test_initial_state(buffer: OrderedBuffer[Event]):
"""Tests that a new buffer is empty and starts at index 1."""
assert buffer.next_idx_to_release == 0
assert not buffer.store
assert buffer.drain() == []
@pytest.mark.asyncio
async def test_ingest_and_drain_sequential_events(buffer: OrderedBuffer[Event]):
"""Tests ingesting and draining a simple, ordered sequence of events."""
events = [make_indexed_event(0), make_indexed_event(1), make_indexed_event(2)]
[buffer.ingest(*ev) for ev in events]
drained_events = buffer.drain_indexed()
assert drained_events == events
assert buffer.next_idx_to_release == 3
assert not buffer.store
@pytest.mark.asyncio
async def test_ingest_out_of_order_events(buffer: OrderedBuffer[Event]):
"""Tests that out-of-order events are buffered and drained in the correct sequence."""
event1 = make_indexed_event(0)
event2 = make_indexed_event(1)
event3 = make_indexed_event(2)
buffer.ingest(*event3)
buffer.ingest(*event1)
buffer.ingest(*event2)
drained_events = buffer.drain_indexed()
assert drained_events == [event1, event2, event3]
assert buffer.next_idx_to_release == 3
@pytest.mark.asyncio
async def test_drain_with_gap_in_sequence(buffer: OrderedBuffer[Event]):
"""Tests that draining stops when there is a gap in the event indices."""
event1 = make_indexed_event(0)
event3 = make_indexed_event(2)
buffer.ingest(*event1)
buffer.ingest(*event3)
drained_events = buffer.drain_indexed()
assert drained_events == [event1]
assert buffer.next_idx_to_release == 1
assert buffer.drain() == []
assert 2 in buffer.store
@pytest.mark.asyncio
async def test_fill_gap_and_drain_remaining(buffer: OrderedBuffer[Event]):
"""Tests that once a gap is filled, the rest of the sequence is drained."""
event0 = make_indexed_event(0)
event2 = make_indexed_event(2)
buffer.ingest(*event0)
buffer.ingest(*event2)
buffer.drain()
assert buffer.next_idx_to_release == 1
event1 = make_indexed_event(1)
buffer.ingest(*event1)
drained_events = buffer.drain_indexed()
assert [e[0] for e in drained_events] == [1, 2]
assert buffer.next_idx_to_release == 3
@pytest.mark.asyncio
async def test_ingest_drops_duplicate_indices(buffer: OrderedBuffer[Event]):
"""Tests that if multiple events for the same index are ingested, the first one wins."""
event2_first = make_indexed_event(1)
event2_second = (1, TestEvent())
buffer.ingest(*make_indexed_event(0))
buffer.ingest(*event2_first)
with pytest.raises(AssertionError):
buffer.ingest(*event2_second) # This duplicate should be ignored
drained = buffer.drain_indexed()
assert len(drained) == 2
assert drained[1][1].event_id == event2_first[1].event_id
assert drained[1][1].event_id != event2_second[1].event_id
@pytest.mark.asyncio
async def test_ingest_drops_stale_events(buffer: OrderedBuffer[Event]):
"""Tests that events with an index lower than next_idx_to_release are dropped."""
buffer.ingest(*make_indexed_event(0))
buffer.ingest(*make_indexed_event(1))
buffer.drain()
assert buffer.next_idx_to_release == 2
stale_event1 = make_indexed_event(0)
stale_event2 = make_indexed_event(1)
buffer.ingest(*stale_event1)
buffer.ingest(*stale_event2)
assert not buffer.store
assert buffer.drain() == []
@pytest.mark.asyncio
async def test_drain_and_ingest_with_new_sequence(buffer: OrderedBuffer[Event]):
"""Tests reusing the buffer after it has been fully drained."""
buffer.ingest(*make_indexed_event(0))
buffer.ingest(*make_indexed_event(1))
buffer.drain()
assert buffer.next_idx_to_release == 2
assert not buffer.store
buffer.ingest(*make_indexed_event(4))
buffer.ingest(*make_indexed_event(2))
drained = buffer.drain_indexed()
assert [e[0] for e in drained] == [2]
assert buffer.next_idx_to_release == 3
assert 4 in buffer.store
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/routing/tests/test_event_buffer.py",
"license": "Apache License 2.0",
"lines": 103,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
exo-explore/exo:src/exo/routing/topics.py | from dataclasses import dataclass
from enum import Enum
from exo.routing.connection_message import ConnectionMessage
from exo.shared.election import ElectionMessage
from exo.shared.types.commands import ForwarderCommand, ForwarderDownloadCommand
from exo.shared.types.events import (
GlobalForwarderEvent,
LocalForwarderEvent,
)
from exo.utils.pydantic_ext import CamelCaseModel
class PublishPolicy(str, Enum):
Never = "Never"
"""Never publish to the network - this is a local message"""
Minimal = "Minimal"
"""Only publish when there is no local receiver for this type of message"""
Always = "Always"
"""Always publish to the network"""
@dataclass # (frozen=True)
class TypedTopic[T: CamelCaseModel]:
topic: str
publish_policy: PublishPolicy
model_type: type[
T
] # This can be worked around with evil type hacking, see https://stackoverflow.com/a/71720366 - I don't think it's necessary here.
@staticmethod
def serialize(t: T) -> bytes:
return t.model_dump_json().encode("utf-8")
def deserialize(self, b: bytes) -> T:
return self.model_type.model_validate_json(b.decode("utf-8"))
GLOBAL_EVENTS = TypedTopic("global_events", PublishPolicy.Always, GlobalForwarderEvent)
LOCAL_EVENTS = TypedTopic("local_events", PublishPolicy.Always, LocalForwarderEvent)
COMMANDS = TypedTopic("commands", PublishPolicy.Always, ForwarderCommand)
ELECTION_MESSAGES = TypedTopic(
"election_messages", PublishPolicy.Always, ElectionMessage
)
CONNECTION_MESSAGES = TypedTopic(
"connection_messages", PublishPolicy.Never, ConnectionMessage
)
DOWNLOAD_COMMANDS = TypedTopic(
"download_commands", PublishPolicy.Always, ForwarderDownloadCommand
)
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/routing/topics.py",
"license": "Apache License 2.0",
"lines": 41,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
exo-explore/exo:src/exo/shared/apply.py | import copy
from collections.abc import Mapping, Sequence
from datetime import datetime
from loguru import logger
from exo.shared.types.common import NodeId
from exo.shared.types.events import (
ChunkGenerated,
Event,
IndexedEvent,
InputChunkReceived,
InstanceCreated,
InstanceDeleted,
NodeDownloadProgress,
NodeGatheredInfo,
NodeTimedOut,
RunnerDeleted,
RunnerStatusUpdated,
TaskAcknowledged,
TaskCreated,
TaskDeleted,
TaskFailed,
TaskStatusUpdated,
TestEvent,
TopologyEdgeCreated,
TopologyEdgeDeleted,
TracesCollected,
TracesMerged,
)
from exo.shared.types.profiling import (
NodeIdentity,
NodeNetworkInfo,
NodeRdmaCtlStatus,
NodeThunderboltInfo,
ThunderboltBridgeStatus,
)
from exo.shared.types.state import State
from exo.shared.types.tasks import Task, TaskId, TaskStatus
from exo.shared.types.topology import Connection, RDMAConnection
from exo.shared.types.worker.downloads import DownloadProgress
from exo.shared.types.worker.instances import Instance, InstanceId
from exo.shared.types.worker.runners import RunnerId, RunnerStatus
from exo.utils.info_gatherer.info_gatherer import (
MacmonMetrics,
MacThunderboltConnections,
MacThunderboltIdentifiers,
MemoryUsage,
MiscData,
NodeConfig,
NodeDiskUsage,
NodeNetworkInterfaces,
RdmaCtlStatus,
StaticNodeInformation,
ThunderboltBridgeInfo,
)
def event_apply(event: Event, state: State) -> State:
"""Apply an event to state."""
match event:
case (
TestEvent()
| ChunkGenerated()
| TaskAcknowledged()
| InputChunkReceived()
| TracesCollected()
| TracesMerged()
): # Pass-through events that don't modify state
return state
case InstanceCreated():
return apply_instance_created(event, state)
case InstanceDeleted():
return apply_instance_deleted(event, state)
case NodeTimedOut():
return apply_node_timed_out(event, state)
case NodeDownloadProgress():
return apply_node_download_progress(event, state)
case NodeGatheredInfo():
return apply_node_gathered_info(event, state)
case RunnerDeleted():
return apply_runner_deleted(event, state)
case RunnerStatusUpdated():
return apply_runner_status_updated(event, state)
case TaskCreated():
return apply_task_created(event, state)
case TaskDeleted():
return apply_task_deleted(event, state)
case TaskFailed():
return apply_task_failed(event, state)
case TaskStatusUpdated():
return apply_task_status_updated(event, state)
case TopologyEdgeCreated():
return apply_topology_edge_created(event, state)
case TopologyEdgeDeleted():
return apply_topology_edge_deleted(event, state)
def apply(state: State, event: IndexedEvent) -> State:
# Just to test that events are only applied in correct order
if state.last_event_applied_idx != event.idx - 1:
logger.warning(
f"Expected event {state.last_event_applied_idx + 1} but received {event.idx}"
)
assert state.last_event_applied_idx == event.idx - 1
new_state: State = event_apply(event.event, state)
return new_state.model_copy(update={"last_event_applied_idx": event.idx})
def apply_node_download_progress(event: NodeDownloadProgress, state: State) -> State:
"""
Update or add a node download progress to state.
"""
dp = event.download_progress
node_id = dp.node_id
current = list(state.downloads.get(node_id, ()))
replaced = False
for i, existing_dp in enumerate(current):
if existing_dp.shard_metadata == dp.shard_metadata:
current[i] = dp
replaced = True
break
if not replaced:
current.append(dp)
new_downloads: Mapping[NodeId, Sequence[DownloadProgress]] = {
**state.downloads,
node_id: current,
}
return state.model_copy(update={"downloads": new_downloads})
def apply_task_created(event: TaskCreated, state: State) -> State:
new_tasks: Mapping[TaskId, Task] = {**state.tasks, event.task_id: event.task}
return state.model_copy(update={"tasks": new_tasks})
def apply_task_deleted(event: TaskDeleted, state: State) -> State:
new_tasks: Mapping[TaskId, Task] = {
tid: task for tid, task in state.tasks.items() if tid != event.task_id
}
return state.model_copy(update={"tasks": new_tasks})
def apply_task_status_updated(event: TaskStatusUpdated, state: State) -> State:
if event.task_id not in state.tasks:
# maybe should raise
return state
update: dict[str, TaskStatus | None] = {
"task_status": event.task_status,
}
if event.task_status != TaskStatus.Failed:
update["error_type"] = None
update["error_message"] = None
updated_task = state.tasks[event.task_id].model_copy(update=update)
new_tasks: Mapping[TaskId, Task] = {**state.tasks, event.task_id: updated_task}
return state.model_copy(update={"tasks": new_tasks})
def apply_task_failed(event: TaskFailed, state: State) -> State:
if event.task_id not in state.tasks:
# maybe should raise
return state
updated_task = state.tasks[event.task_id].model_copy(
update={"error_type": event.error_type, "error_message": event.error_message}
)
new_tasks: Mapping[TaskId, Task] = {**state.tasks, event.task_id: updated_task}
return state.model_copy(update={"tasks": new_tasks})
def apply_instance_created(event: InstanceCreated, state: State) -> State:
instance = event.instance
new_instances: Mapping[InstanceId, Instance] = {
**state.instances,
instance.instance_id: instance,
}
return state.model_copy(update={"instances": new_instances})
def apply_instance_deleted(event: InstanceDeleted, state: State) -> State:
new_instances: Mapping[InstanceId, Instance] = {
iid: inst for iid, inst in state.instances.items() if iid != event.instance_id
}
return state.model_copy(update={"instances": new_instances})
def apply_runner_status_updated(event: RunnerStatusUpdated, state: State) -> State:
new_runners: Mapping[RunnerId, RunnerStatus] = {
**state.runners,
event.runner_id: event.runner_status,
}
return state.model_copy(update={"runners": new_runners})
def apply_runner_deleted(event: RunnerDeleted, state: State) -> State:
assert event.runner_id in state.runners, (
"RunnerDeleted before any RunnerStatusUpdated events"
)
new_runners: Mapping[RunnerId, RunnerStatus] = {
rid: rs for rid, rs in state.runners.items() if rid != event.runner_id
}
return state.model_copy(update={"runners": new_runners})
def apply_node_timed_out(event: NodeTimedOut, state: State) -> State:
topology = copy.deepcopy(state.topology)
topology.remove_node(event.node_id)
last_seen = {
key: value for key, value in state.last_seen.items() if key != event.node_id
}
downloads = {
key: value for key, value in state.downloads.items() if key != event.node_id
}
# Clean up all granular node mappings
node_memory = {
key: value for key, value in state.node_memory.items() if key != event.node_id
}
node_disk = {
key: value for key, value in state.node_disk.items() if key != event.node_id
}
node_system = {
key: value for key, value in state.node_system.items() if key != event.node_id
}
node_network = {
key: value for key, value in state.node_network.items() if key != event.node_id
}
node_thunderbolt = {
key: value
for key, value in state.node_thunderbolt.items()
if key != event.node_id
}
node_thunderbolt_bridge = {
key: value
for key, value in state.node_thunderbolt_bridge.items()
if key != event.node_id
}
node_rdma_ctl = {
key: value for key, value in state.node_rdma_ctl.items() if key != event.node_id
}
# Only recompute cycles if the leaving node had TB bridge enabled
leaving_node_status = state.node_thunderbolt_bridge.get(event.node_id)
leaving_node_had_tb_enabled = (
leaving_node_status is not None and leaving_node_status.enabled
)
thunderbolt_bridge_cycles = (
topology.get_thunderbolt_bridge_cycles(node_thunderbolt_bridge, node_network)
if leaving_node_had_tb_enabled
else [list(cycle) for cycle in state.thunderbolt_bridge_cycles]
)
return state.model_copy(
update={
"downloads": downloads,
"topology": topology,
"last_seen": last_seen,
"node_memory": node_memory,
"node_disk": node_disk,
"node_system": node_system,
"node_network": node_network,
"node_thunderbolt": node_thunderbolt,
"node_thunderbolt_bridge": node_thunderbolt_bridge,
"node_rdma_ctl": node_rdma_ctl,
"thunderbolt_bridge_cycles": thunderbolt_bridge_cycles,
}
)
def apply_node_gathered_info(event: NodeGatheredInfo, state: State) -> State:
topology = copy.deepcopy(state.topology)
topology.add_node(event.node_id)
info = event.info
# Build update dict with only the mappings that change
update: dict[str, object] = {
"last_seen": {
**state.last_seen,
event.node_id: datetime.fromisoformat(event.when),
},
"topology": topology,
}
match info:
case MacmonMetrics():
update["node_system"] = {
**state.node_system,
event.node_id: info.system_profile,
}
update["node_memory"] = {**state.node_memory, event.node_id: info.memory}
case MemoryUsage():
update["node_memory"] = {**state.node_memory, event.node_id: info}
case NodeDiskUsage():
update["node_disk"] = {**state.node_disk, event.node_id: info.disk_usage}
case NodeConfig():
pass
case MiscData():
current_identity = state.node_identities.get(event.node_id, NodeIdentity())
new_identity = current_identity.model_copy(
update={"friendly_name": info.friendly_name}
)
update["node_identities"] = {
**state.node_identities,
event.node_id: new_identity,
}
case StaticNodeInformation():
current_identity = state.node_identities.get(event.node_id, NodeIdentity())
new_identity = current_identity.model_copy(
update={
"model_id": info.model,
"chip_id": info.chip,
"os_version": info.os_version,
"os_build_version": info.os_build_version,
}
)
update["node_identities"] = {
**state.node_identities,
event.node_id: new_identity,
}
case NodeNetworkInterfaces():
update["node_network"] = {
**state.node_network,
event.node_id: NodeNetworkInfo(interfaces=info.ifaces),
}
case MacThunderboltIdentifiers():
update["node_thunderbolt"] = {
**state.node_thunderbolt,
event.node_id: NodeThunderboltInfo(interfaces=info.idents),
}
case MacThunderboltConnections():
conn_map = {
tb_ident.domain_uuid: (nid, tb_ident.rdma_interface)
for nid in state.node_thunderbolt
for tb_ident in state.node_thunderbolt[nid].interfaces
}
as_rdma_conns = [
Connection(
source=event.node_id,
sink=conn_map[tb_conn.sink_uuid][0],
edge=RDMAConnection(
source_rdma_iface=conn_map[tb_conn.source_uuid][1],
sink_rdma_iface=conn_map[tb_conn.sink_uuid][1],
),
)
for tb_conn in info.conns
if tb_conn.source_uuid in conn_map
if tb_conn.sink_uuid in conn_map
]
topology.replace_all_out_rdma_connections(event.node_id, as_rdma_conns)
case ThunderboltBridgeInfo():
new_tb_bridge: dict[NodeId, ThunderboltBridgeStatus] = {
**state.node_thunderbolt_bridge,
event.node_id: info.status,
}
update["node_thunderbolt_bridge"] = new_tb_bridge
# Only recompute cycles if the enabled status changed
old_status = state.node_thunderbolt_bridge.get(event.node_id)
old_enabled = old_status.enabled if old_status else False
new_enabled = info.status.enabled
if old_enabled != new_enabled:
update["thunderbolt_bridge_cycles"] = (
topology.get_thunderbolt_bridge_cycles(
new_tb_bridge, state.node_network
)
)
case RdmaCtlStatus():
update["node_rdma_ctl"] = {
**state.node_rdma_ctl,
event.node_id: NodeRdmaCtlStatus(enabled=info.enabled),
}
return state.model_copy(update=update)
def apply_topology_edge_created(event: TopologyEdgeCreated, state: State) -> State:
topology = copy.deepcopy(state.topology)
topology.add_connection(event.conn)
return state.model_copy(update={"topology": topology})
def apply_topology_edge_deleted(event: TopologyEdgeDeleted, state: State) -> State:
topology = copy.deepcopy(state.topology)
topology.remove_connection(event.conn)
# TODO: Clean up removing the reverse connection
return state.model_copy(update={"topology": topology})
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/shared/apply.py",
"license": "Apache License 2.0",
"lines": 346,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
exo-explore/exo:src/exo/shared/constants.py | import os
import sys
from pathlib import Path
from exo.utils.dashboard_path import find_dashboard, find_resources
_EXO_HOME_ENV = os.environ.get("EXO_HOME", None)
def _get_xdg_dir(env_var: str, fallback: str) -> Path:
"""Get XDG directory, prioritising EXO_HOME environment variable if its set. On non-Linux platforms, default to ~/.exo."""
if _EXO_HOME_ENV is not None:
return Path.home() / _EXO_HOME_ENV
if sys.platform != "linux":
return Path.home() / ".exo"
xdg_value = os.environ.get(env_var, None)
if xdg_value is not None:
return Path(xdg_value) / "exo"
return Path.home() / fallback / "exo"
EXO_CONFIG_HOME = _get_xdg_dir("XDG_CONFIG_HOME", ".config")
EXO_DATA_HOME = _get_xdg_dir("XDG_DATA_HOME", ".local/share")
EXO_CACHE_HOME = _get_xdg_dir("XDG_CACHE_HOME", ".cache")
# Models directory (data)
_EXO_MODELS_DIR_ENV = os.environ.get("EXO_MODELS_DIR", None)
EXO_MODELS_DIR = (
EXO_DATA_HOME / "models"
if _EXO_MODELS_DIR_ENV is None
else Path.home() / _EXO_MODELS_DIR_ENV
)
# Read-only search path for pre-downloaded models (colon-separated directories)
_EXO_MODELS_PATH_ENV = os.environ.get("EXO_MODELS_PATH", None)
EXO_MODELS_PATH: tuple[Path, ...] | None = (
tuple(Path(p).expanduser() for p in _EXO_MODELS_PATH_ENV.split(":") if p)
if _EXO_MODELS_PATH_ENV is not None
else None
)
_RESOURCES_DIR_ENV = os.environ.get("EXO_RESOURCES_DIR", None)
RESOURCES_DIR = (
find_resources() if _RESOURCES_DIR_ENV is None else Path.home() / _RESOURCES_DIR_ENV
)
_DASHBOARD_DIR_ENV = os.environ.get("EXO_DASHBOARD_DIR", None)
DASHBOARD_DIR = (
find_dashboard() if _DASHBOARD_DIR_ENV is None else Path.home() / _DASHBOARD_DIR_ENV
)
# Log files (data/logs or cache)
EXO_LOG_DIR = EXO_CACHE_HOME / "exo_log"
EXO_LOG = EXO_LOG_DIR / "exo.log"
EXO_TEST_LOG = EXO_CACHE_HOME / "exo_test.log"
# Identity (config)
EXO_NODE_ID_KEYPAIR = EXO_CONFIG_HOME / "node_id.keypair"
EXO_CONFIG_FILE = EXO_CONFIG_HOME / "config.toml"
# libp2p topics for event forwarding
LIBP2P_LOCAL_EVENTS_TOPIC = "worker_events"
LIBP2P_GLOBAL_EVENTS_TOPIC = "global_events"
LIBP2P_ELECTION_MESSAGES_TOPIC = "election_message"
LIBP2P_COMMANDS_TOPIC = "commands"
EXO_MAX_CHUNK_SIZE = 512 * 1024
EXO_CUSTOM_MODEL_CARDS_DIR = EXO_DATA_HOME / "custom_model_cards"
EXO_EVENT_LOG_DIR = EXO_DATA_HOME / "event_log"
EXO_IMAGE_CACHE_DIR = EXO_CACHE_HOME / "images"
EXO_TRACING_CACHE_DIR = EXO_CACHE_HOME / "traces"
EXO_ENABLE_IMAGE_MODELS = (
os.getenv("EXO_ENABLE_IMAGE_MODELS", "false").lower() == "true"
)
EXO_OFFLINE = os.getenv("EXO_OFFLINE", "false").lower() == "true"
EXO_TRACING_ENABLED = os.getenv("EXO_TRACING_ENABLED", "false").lower() == "true"
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/shared/constants.py",
"license": "Apache License 2.0",
"lines": 62,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
exo-explore/exo:src/exo/shared/election.py | from typing import Self
import anyio
from anyio import (
CancelScope,
Event,
get_cancelled_exc_class,
)
from loguru import logger
from exo.routing.connection_message import ConnectionMessage
from exo.shared.types.commands import ForwarderCommand
from exo.shared.types.common import NodeId, SessionId
from exo.utils.channels import Receiver, Sender
from exo.utils.pydantic_ext import CamelCaseModel
from exo.utils.task_group import TaskGroup
DEFAULT_ELECTION_TIMEOUT = 3.0
class ElectionMessage(CamelCaseModel):
clock: int
seniority: int
proposed_session: SessionId
commands_seen: int
# Could eventually include a list of neighbour nodes for centrality
def __lt__(self, other: Self) -> bool:
if self.clock != other.clock:
return self.clock < other.clock
if self.seniority != other.seniority:
return self.seniority < other.seniority
elif self.commands_seen != other.commands_seen:
return self.commands_seen < other.commands_seen
else:
return (
self.proposed_session.master_node_id
< other.proposed_session.master_node_id
)
class ElectionResult(CamelCaseModel):
session_id: SessionId
won_clock: int
is_new_master: bool
class Election:
def __init__(
self,
node_id: NodeId,
*,
election_message_receiver: Receiver[ElectionMessage],
election_message_sender: Sender[ElectionMessage],
election_result_sender: Sender[ElectionResult],
connection_message_receiver: Receiver[ConnectionMessage],
command_receiver: Receiver[ForwarderCommand],
is_candidate: bool = True,
seniority: int = 0,
):
# If we aren't a candidate, simply don't increment seniority.
# For reference: This node can be elected master if all nodes are not master candidates
# Any master candidate will automatically win out over this node.
self.seniority = seniority if is_candidate else -1
self.clock = 0
self.node_id = node_id
self.commands_seen = 0
# Every node spawns as master
self.current_session: SessionId = SessionId(
master_node_id=node_id, election_clock=0
)
# Senders/Receivers
self._em_sender = election_message_sender
self._em_receiver = election_message_receiver
self._er_sender = election_result_sender
self._cm_receiver = connection_message_receiver
self._co_receiver = command_receiver
# Campaign state
self._candidates: list[ElectionMessage] = []
self._campaign_cancel_scope: CancelScope | None = None
self._campaign_done: Event | None = None
self._tg = TaskGroup()
async def run(self):
logger.info("Starting Election")
try:
async with self._tg as tg:
tg.start_soon(self._election_receiver)
tg.start_soon(self._connection_receiver)
tg.start_soon(self._command_counter)
# And start an election immediately, that instantly resolves
candidates: list[ElectionMessage] = []
logger.debug("Starting initial campaign")
self._candidates = candidates
await self._campaign(candidates, campaign_timeout=0.0)
logger.debug("Initial campaign finished")
finally:
# Cancel and wait for the last election to end
if self._campaign_cancel_scope is not None:
logger.debug("Cancelling campaign")
self._campaign_cancel_scope.cancel()
if self._campaign_done is not None:
logger.debug("Waiting for campaign to finish")
await self._campaign_done.wait()
logger.debug("Campaign cancelled and finished")
logger.info("Election shutdown")
async def elect(self, em: ElectionMessage) -> None:
logger.debug(f"Electing: {em}")
is_new_master = em.proposed_session != self.current_session
self.current_session = em.proposed_session
logger.debug(f"Current session: {self.current_session}")
await self._er_sender.send(
ElectionResult(
won_clock=em.clock,
session_id=em.proposed_session,
is_new_master=is_new_master,
)
)
async def shutdown(self) -> None:
self._tg.cancel_tasks()
async def _election_receiver(self) -> None:
with self._em_receiver as election_messages:
async for message in election_messages:
logger.debug(f"Election message received: {message}")
if message.proposed_session.master_node_id == self.node_id:
logger.debug("Dropping message from ourselves")
# Drop messages from us (See exo.routing.router)
continue
# If a new round is starting, we participate
if message.clock > self.clock:
self.clock = message.clock
logger.debug(f"New clock: {self.clock}")
logger.debug("Starting new campaign")
candidates: list[ElectionMessage] = [message]
logger.debug(f"Candidates: {candidates}")
logger.debug(f"Current candidates: {self._candidates}")
self._candidates = candidates
logger.debug(f"New candidates: {self._candidates}")
logger.debug("Starting new campaign")
self._tg.start_soon(
self._campaign, candidates, DEFAULT_ELECTION_TIMEOUT
)
logger.debug("Campaign started")
continue
# Dismiss old messages
if message.clock < self.clock:
logger.debug(f"Dropping old message: {message}")
continue
logger.debug(f"Election added candidate {message}")
# Now we are processing this rounds messages - including the message that triggered this round.
self._candidates.append(message)
async def _connection_receiver(self) -> None:
with self._cm_receiver as connection_messages:
async for first in connection_messages:
# Delay after connection message for time to symmetrically setup
await anyio.sleep(0.2)
rest = connection_messages.collect()
logger.debug(
f"Connection messages received: {first} followed by {rest}"
)
logger.debug(f"Current clock: {self.clock}")
# These messages are strictly peer to peer
self.clock += 1
logger.debug(f"New clock: {self.clock}")
candidates: list[ElectionMessage] = []
self._candidates = candidates
logger.debug("Starting new campaign")
self._tg.start_soon(
self._campaign, candidates, DEFAULT_ELECTION_TIMEOUT
)
logger.debug("Campaign started")
logger.debug("Connection message added")
async def _command_counter(self) -> None:
with self._co_receiver as commands:
async for _command in commands:
self.commands_seen += 1
async def _campaign(
self, candidates: list[ElectionMessage], campaign_timeout: float
) -> None:
clock = self.clock
# Kill the old campaign
if self._campaign_cancel_scope:
logger.info("Cancelling other campaign")
self._campaign_cancel_scope.cancel()
if self._campaign_done:
logger.info("Waiting for other campaign to finish")
await self._campaign_done.wait()
done = Event()
self._campaign_done = done
scope = CancelScope()
self._campaign_cancel_scope = scope
try:
with scope:
logger.debug(f"Election {clock} started")
status = self._election_status(clock)
candidates.append(status)
await self._em_sender.send(status)
logger.debug(f"Sleeping for {campaign_timeout} seconds")
await anyio.sleep(campaign_timeout)
# minor hack - rebroadcast status in case anyone has missed it.
await self._em_sender.send(status)
logger.debug("Woke up from sleep")
# add an anyio checkpoint - anyio.lowlevel.chekpoint() or checkpoint_if_cancelled() is preferred, but wasn't typechecking last I checked
await anyio.sleep(0)
# Election finished!
elected = max(candidates)
logger.debug(f"Election queue {candidates}")
logger.debug(f"Elected: {elected}")
if (
self.node_id == elected.proposed_session.master_node_id
and self.seniority >= 0
):
logger.debug(
f"Node is a candidate and seniority is {self.seniority}"
)
self.seniority = max(self.seniority, len(candidates))
logger.debug(f"New seniority: {self.seniority}")
else:
logger.debug(
f"Node is not a candidate or seniority is not {self.seniority}"
)
logger.debug(
f"Election finished, new SessionId({elected.proposed_session}) with queue {candidates}"
)
logger.debug("Sending election result")
await self.elect(elected)
logger.debug("Election result sent")
except get_cancelled_exc_class():
logger.debug(f"Election {clock} cancelled")
finally:
logger.debug(f"Election {clock} finally")
if self._campaign_cancel_scope is scope:
self._campaign_cancel_scope = None
logger.debug("Setting done event")
done.set()
logger.debug("Done event set")
def _election_status(self, clock: int | None = None) -> ElectionMessage:
c = self.clock if clock is None else clock
return ElectionMessage(
proposed_session=(
self.current_session
if self.current_session.master_node_id == self.node_id
else SessionId(master_node_id=self.node_id, election_clock=c)
),
clock=c,
seniority=self.seniority,
commands_seen=self.commands_seen,
)
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/shared/election.py",
"license": "Apache License 2.0",
"lines": 237,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
exo-explore/exo:src/exo/shared/logging.py | import logging
import sys
from collections.abc import Iterator
from pathlib import Path
import zstandard
from hypercorn import Config
from hypercorn.logging import Logger as HypercornLogger
from loguru import logger
_MAX_LOG_ARCHIVES = 5
def _zstd_compress(filepath: str) -> None:
source = Path(filepath)
dest = source.with_suffix(source.suffix + ".zst")
cctx = zstandard.ZstdCompressor()
with open(source, "rb") as f_in, open(dest, "wb") as f_out:
cctx.copy_stream(f_in, f_out)
source.unlink()
def _once_then_never() -> Iterator[bool]:
yield True
while True:
yield False
class InterceptLogger(HypercornLogger):
def __init__(self, config: Config):
super().__init__(config)
assert self.error_logger
self.error_logger.handlers = [_InterceptHandler()]
class _InterceptHandler(logging.Handler):
def emit(self, record: logging.LogRecord):
try:
level = logger.level(record.levelname).name
except ValueError:
level = record.levelno
logger.opt(depth=3, exception=record.exc_info).log(level, record.getMessage())
def logger_setup(log_file: Path | None, verbosity: int = 0):
"""Set up logging for this process - formatting, file handles, verbosity and output"""
logging.getLogger("exo_pyo3_bindings").setLevel(logging.WARNING)
logging.getLogger("httpx").setLevel(logging.WARNING)
logging.getLogger("httpcore").setLevel(logging.WARNING)
logger.remove()
# replace all stdlib loggers with _InterceptHandlers that log to loguru
logging.basicConfig(handlers=[_InterceptHandler()], level=0)
if verbosity == 0:
logger.add(
sys.__stderr__, # type: ignore
format="[ {time:hh:mm:ss.SSSSA} | <level>{level: <8}</level>] <level>{message}</level>",
level="INFO",
colorize=True,
enqueue=True,
)
else:
logger.add(
sys.__stderr__, # type: ignore
format="[ {time:HH:mm:ss.SSS} | <level>{level: <8}</level> | {name}:{function}:{line} ] <level>{message}</level>",
level="DEBUG",
colorize=True,
enqueue=True,
)
if log_file:
rotate_once = _once_then_never()
logger.add(
log_file,
format="[ {time:YYYY-MM-DD HH:mm:ss.SSS} | {level: <8} | {name}:{function}:{line} ] {message}",
level="INFO",
colorize=False,
enqueue=True,
rotation=lambda _, __: next(rotate_once),
retention=_MAX_LOG_ARCHIVES,
compression=_zstd_compress,
)
def logger_cleanup():
"""Flush all queues before shutting down so any in-flight logs are written to disk"""
logger.complete()
""" --- TODO: Capture MLX Log output:
import contextlib
import sys
from loguru import logger
class StreamToLogger:
def __init__(self, level="INFO"):
self._level = level
def write(self, buffer):
for line in buffer.rstrip().splitlines():
logger.opt(depth=1).log(self._level, line.rstrip())
def flush(self):
pass
logger.remove()
logger.add(sys.__stdout__)
stream = StreamToLogger()
with contextlib.redirect_stdout(stream):
print("Standard output is sent to added handlers.")
"""
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/shared/logging.py",
"license": "Apache License 2.0",
"lines": 89,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
exo-explore/exo:src/exo/shared/models/model_cards.py | from enum import Enum
from typing import Annotated, Any
import aiofiles
import aiofiles.os as aios
import tomlkit
from anyio import Path, open_file
from huggingface_hub import model_info
from loguru import logger
from pydantic import (
AliasChoices,
BaseModel,
Field,
PositiveInt,
ValidationError,
field_validator,
model_validator,
)
from tomlkit.exceptions import TOMLKitError
from exo.shared.constants import (
EXO_CUSTOM_MODEL_CARDS_DIR,
EXO_ENABLE_IMAGE_MODELS,
RESOURCES_DIR,
)
from exo.shared.types.common import ModelId
from exo.shared.types.memory import Memory
from exo.utils.pydantic_ext import CamelCaseModel
# kinda ugly...
# TODO: load search path from config.toml
_custom_cards_dir = Path(str(EXO_CUSTOM_MODEL_CARDS_DIR))
CARD_SEARCH_PATH = [
Path(RESOURCES_DIR) / "inference_model_cards",
Path(RESOURCES_DIR) / "image_model_cards",
_custom_cards_dir,
]
_card_cache: dict[ModelId, "ModelCard"] = {}
async def _refresh_card_cache():
for path in CARD_SEARCH_PATH:
async for toml_file in path.rglob("*.toml"):
try:
card = await ModelCard.load_from_path(toml_file)
if card.model_id not in _card_cache:
_card_cache[card.model_id] = card
except (ValidationError, TOMLKitError):
pass
def _is_image_card(card: "ModelCard") -> bool:
return any(t in (ModelTask.TextToImage, ModelTask.ImageToImage) for t in card.tasks)
async def get_model_cards() -> list["ModelCard"]:
if len(_card_cache) == 0:
await _refresh_card_cache()
if EXO_ENABLE_IMAGE_MODELS:
return list(_card_cache.values())
return [c for c in _card_cache.values() if not _is_image_card(c)]
class ModelTask(str, Enum):
TextGeneration = "TextGeneration"
TextToImage = "TextToImage"
ImageToImage = "ImageToImage"
class ComponentInfo(CamelCaseModel):
component_name: str
component_path: str
storage_size: Memory
n_layers: PositiveInt | None = None
can_shard: bool
safetensors_index_filename: str | None = None
class ModelCard(CamelCaseModel):
model_id: ModelId
storage_size: Memory
n_layers: PositiveInt
hidden_size: PositiveInt
supports_tensor: bool
tasks: list[ModelTask]
components: list[ComponentInfo] | None = None
family: str = ""
quantization: str = ""
base_model: str = ""
capabilities: list[str] = []
uses_cfg: bool = False
trust_remote_code: bool = True
@field_validator("tasks", mode="before")
@classmethod
def _validate_tasks(cls, v: list[str | ModelTask]) -> list[ModelTask]:
return [item if isinstance(item, ModelTask) else ModelTask(item) for item in v]
async def save(self, path: Path) -> None:
async with await open_file(path, "w") as f:
py = self.model_dump(exclude_none=True)
data = tomlkit.dumps(py) # pyright: ignore[reportUnknownMemberType]
await f.write(data)
async def save_to_custom_dir(self) -> None:
await aios.makedirs(str(_custom_cards_dir), exist_ok=True)
await self.save(_custom_cards_dir / (self.model_id.normalize() + ".toml"))
@staticmethod
async def load_from_path(path: Path) -> "ModelCard":
async with await open_file(path, "r") as f:
py = tomlkit.loads(await f.read())
return ModelCard.model_validate(py)
# Is it okay that model card.load defaults to network access if the card doesn't exist? do we want to be more explicit here?
@staticmethod
async def load(model_id: ModelId) -> "ModelCard":
if model_id not in _card_cache:
await _refresh_card_cache()
if (mc := _card_cache.get(model_id)) is not None:
return mc
return await ModelCard.fetch_from_hf(model_id)
@staticmethod
async def fetch_from_hf(model_id: ModelId) -> "ModelCard":
"""Fetches storage size and number of layers for a Hugging Face model, returns Pydantic ModelMeta."""
# TODO: failure if files do not exist
config_data = await fetch_config_data(model_id)
num_layers = config_data.layer_count
mem_size_bytes = await fetch_safetensors_size(model_id)
mc = ModelCard(
model_id=ModelId(model_id),
storage_size=mem_size_bytes,
n_layers=num_layers,
hidden_size=config_data.hidden_size or 0,
supports_tensor=config_data.supports_tensor,
tasks=[ModelTask.TextGeneration],
trust_remote_code=False,
)
await mc.save_to_custom_dir()
_card_cache[model_id] = mc
return mc
async def delete_custom_card(model_id: ModelId) -> bool:
"""Delete a user-added custom model card. Returns True if deleted."""
card_path = _custom_cards_dir / (ModelId(model_id).normalize() + ".toml")
if await card_path.exists():
await card_path.unlink()
_card_cache.pop(model_id, None)
return True
return False
def is_custom_card(model_id: ModelId) -> bool:
"""Check if a model card exists in the custom cards directory."""
import os
card_path = Path(str(EXO_CUSTOM_MODEL_CARDS_DIR)) / (
ModelId(model_id).normalize() + ".toml"
)
return os.path.isfile(str(card_path))
class ConfigData(BaseModel):
model_config = {"extra": "ignore"} # Allow unknown fields
architectures: list[str] | None = None
hidden_size: Annotated[int, Field(ge=0)] | None = None
layer_count: int = Field(
validation_alias=AliasChoices(
"num_hidden_layers",
"num_layers",
"n_layer",
"n_layers",
"num_decoder_layers",
"decoder_layers",
)
)
@property
def supports_tensor(self) -> bool:
return self.architectures in [
["Glm4MoeLiteForCausalLM"],
["GlmMoeDsaForCausalLM"],
["DeepseekV32ForCausalLM"],
["DeepseekV3ForCausalLM"],
["Qwen3NextForCausalLM"],
["Qwen3MoeForCausalLM"],
["MiniMaxM2ForCausalLM"],
["LlamaForCausalLM"],
["GptOssForCausalLM"],
["Step3p5ForCausalLM"],
]
@model_validator(mode="before")
@classmethod
def defer_to_text_config(cls, data: dict[str, Any]):
text_config = data.get("text_config")
if text_config is None:
return data
for field in [
"architectures",
"hidden_size",
"num_hidden_layers",
"num_layers",
"n_layer",
"n_layers",
"num_decoder_layers",
"decoder_layers",
]:
if (val := text_config.get(field)) is not None: # pyright: ignore[reportAny]
data[field] = val
return data
async def fetch_config_data(model_id: ModelId) -> ConfigData:
"""Downloads and parses config.json for a model."""
from exo.download.download_utils import (
download_file_with_retry,
ensure_models_dir,
)
target_dir = (await ensure_models_dir()) / model_id.normalize()
await aios.makedirs(target_dir, exist_ok=True)
config_path = await download_file_with_retry(
model_id,
"main",
"config.json",
target_dir,
lambda curr_bytes, total_bytes, is_renamed: logger.debug(
f"Downloading config.json for {model_id}: {curr_bytes}/{total_bytes} ({is_renamed=})"
),
)
async with aiofiles.open(config_path, "r") as f:
return ConfigData.model_validate_json(await f.read())
async def fetch_safetensors_size(model_id: ModelId) -> Memory:
"""Gets model size from safetensors index or falls back to HF API."""
from exo.download.download_utils import (
download_file_with_retry,
ensure_models_dir,
)
from exo.shared.types.worker.downloads import ModelSafetensorsIndex
target_dir = (await ensure_models_dir()) / model_id.normalize()
await aios.makedirs(target_dir, exist_ok=True)
index_path = await download_file_with_retry(
model_id,
"main",
"model.safetensors.index.json",
target_dir,
lambda curr_bytes, total_bytes, is_renamed: logger.debug(
f"Downloading model.safetensors.index.json for {model_id}: {curr_bytes}/{total_bytes} ({is_renamed=})"
),
)
async with aiofiles.open(index_path, "r") as f:
index_data = ModelSafetensorsIndex.model_validate_json(await f.read())
metadata = index_data.metadata
if metadata is not None:
return Memory.from_bytes(metadata.total_size)
info = model_info(model_id)
if info.safetensors is None:
raise ValueError(f"No safetensors info found for {model_id}")
return Memory.from_bytes(info.safetensors.total)
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/shared/models/model_cards.py",
"license": "Apache License 2.0",
"lines": 229,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
exo-explore/exo:src/exo/shared/tests/test_apply/test_apply_node_download.py | from exo.shared.apply import apply_node_download_progress
from exo.shared.tests.conftest import get_pipeline_shard_metadata
from exo.shared.types.common import NodeId
from exo.shared.types.events import NodeDownloadProgress
from exo.shared.types.memory import Memory
from exo.shared.types.state import State
from exo.shared.types.worker.downloads import DownloadCompleted
from exo.worker.tests.constants import MODEL_A_ID, MODEL_B_ID
def test_apply_node_download_progress():
state = State()
shard1 = get_pipeline_shard_metadata(MODEL_A_ID, device_rank=0, world_size=2)
event = DownloadCompleted(
node_id=NodeId("node-1"),
shard_metadata=shard1,
total=Memory(),
)
new_state = apply_node_download_progress(
NodeDownloadProgress(download_progress=event), state
)
assert new_state.downloads == {NodeId("node-1"): [event]}
def test_apply_two_node_download_progress():
shard1 = get_pipeline_shard_metadata(MODEL_A_ID, device_rank=0, world_size=2)
shard2 = get_pipeline_shard_metadata(MODEL_B_ID, device_rank=0, world_size=2)
event1 = DownloadCompleted(
node_id=NodeId("node-1"),
shard_metadata=shard1,
total=Memory(),
)
event2 = DownloadCompleted(
node_id=NodeId("node-1"),
shard_metadata=shard2,
total=Memory(),
)
state = State(downloads={NodeId("node-1"): [event1]})
new_state = apply_node_download_progress(
NodeDownloadProgress(download_progress=event2), state
)
assert new_state.downloads == {NodeId("node-1"): [event1, event2]}
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/shared/tests/test_apply/test_apply_node_download.py",
"license": "Apache License 2.0",
"lines": 38,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
exo-explore/exo:src/exo/shared/tests/test_election.py | import pytest
from anyio import create_task_group, fail_after, move_on_after
from exo.routing.connection_message import ConnectionMessage
from exo.shared.election import Election, ElectionMessage, ElectionResult
from exo.shared.types.commands import ForwarderCommand, TestCommand
from exo.shared.types.common import NodeId, SessionId, SystemId
from exo.utils.channels import channel
# ======= #
# Helpers #
# ======= #
def em(
clock: int,
seniority: int,
node_id: str,
commands_seen: int = 0,
election_clock: int | None = None,
) -> ElectionMessage:
"""
Helper to build ElectionMessages for a given proposer node.
The new API carries a proposed SessionId (master_node_id + election_clock).
By default we use the same value for election_clock as the 'clock' of the round.
"""
return ElectionMessage(
clock=clock,
seniority=seniority,
proposed_session=SessionId(
master_node_id=NodeId(node_id),
election_clock=clock if election_clock is None else election_clock,
),
commands_seen=commands_seen,
)
# ======================================= #
# TESTS #
# ======================================= #
@pytest.fixture(autouse=True)
def fast_election_timeout(monkeypatch: pytest.MonkeyPatch):
monkeypatch.setattr("exo.shared.election.DEFAULT_ELECTION_TIMEOUT", 0.1)
@pytest.mark.anyio
async def test_single_round_broadcasts_and_updates_seniority_on_self_win() -> None:
"""
Start a round by injecting an ElectionMessage with higher clock.
With only our node effectively 'winning', we should broadcast once and update seniority.
"""
# Outbound election messages from the Election (we'll observe these)
em_out_tx, em_out_rx = channel[ElectionMessage]()
# Inbound election messages to the Election (we'll inject these)
em_in_tx, em_in_rx = channel[ElectionMessage]()
# Election results produced by the Election (we'll observe these)
er_tx, er_rx = channel[ElectionResult]()
# Connection messages
cm_tx, cm_rx = channel[ConnectionMessage]()
# Commands
co_tx, co_rx = channel[ForwarderCommand]()
election = Election(
node_id=NodeId("B"),
election_message_receiver=em_in_rx,
election_message_sender=em_out_tx,
election_result_sender=er_tx,
connection_message_receiver=cm_rx,
command_receiver=co_rx,
is_candidate=True,
)
async with create_task_group() as tg:
with fail_after(2):
tg.start_soon(election.run)
# Trigger new round at clock=1 (peer announces it)
await em_in_tx.send(em(clock=1, seniority=0, node_id="A"))
# Expect our broadcast back to the peer side for this round only
while True:
got = await em_out_rx.receive()
if got.clock == 1 and got.proposed_session.master_node_id == NodeId(
"B"
):
break
# Wait for the round to finish and produce an ElectionResult
result = await er_rx.receive()
assert result.session_id.master_node_id == NodeId("B")
# We spawned as master; electing ourselves again is not "new master".
assert result.is_new_master is False
# Close inbound streams to end the receivers (and run())
em_in_tx.close()
cm_tx.close()
co_tx.close()
# We should have updated seniority to 2 (A + B).
assert election.seniority == 2
@pytest.mark.anyio
async def test_peer_with_higher_seniority_wins_and_we_switch_master() -> None:
"""
If a peer with clearly higher seniority participates in the round, they should win.
We should broadcast our status exactly once for this round, then switch master.
"""
em_out_tx, em_out_rx = channel[ElectionMessage]()
em_in_tx, em_in_rx = channel[ElectionMessage]()
er_tx, er_rx = channel[ElectionResult]()
cm_tx, cm_rx = channel[ConnectionMessage]()
co_tx, co_rx = channel[ForwarderCommand]()
election = Election(
node_id=NodeId("ME"),
election_message_receiver=em_in_rx,
election_message_sender=em_out_tx,
election_result_sender=er_tx,
connection_message_receiver=cm_rx,
command_receiver=co_rx,
is_candidate=True,
)
async with create_task_group() as tg:
with fail_after(2):
tg.start_soon(election.run)
# Start round with peer's message (higher seniority)
await em_in_tx.send(em(clock=1, seniority=10, node_id="PEER"))
# We should still broadcast our status exactly once for this round
while True:
got = await em_out_rx.receive()
if got.clock == 1:
assert got.seniority == 0
break
# After the timeout, election result for clock=1 should report the peer as master
# (Skip any earlier result from the boot campaign at clock=0 by filtering on election_clock)
while True:
result = await er_rx.receive()
if result.session_id.election_clock == 1:
break
assert result.session_id.master_node_id == NodeId("PEER")
assert result.is_new_master is True
em_in_tx.close()
cm_tx.close()
co_tx.close()
# We lost → seniority unchanged
assert election.seniority == 0
@pytest.mark.anyio
async def test_ignores_older_messages() -> None:
"""
Messages with a lower clock than the current round are ignored by the receiver.
Expect exactly one broadcast for the higher clock round.
"""
em_out_tx, em_out_rx = channel[ElectionMessage]()
em_in_tx, em_in_rx = channel[ElectionMessage]()
er_tx, _er_rx = channel[ElectionResult]()
cm_tx, cm_rx = channel[ConnectionMessage]()
co_tx, co_rx = channel[ForwarderCommand]()
election = Election(
node_id=NodeId("ME"),
election_message_receiver=em_in_rx,
election_message_sender=em_out_tx,
election_result_sender=er_tx,
connection_message_receiver=cm_rx,
command_receiver=co_rx,
is_candidate=True,
)
async with create_task_group() as tg:
with fail_after(2):
tg.start_soon(election.run)
# Newer round arrives first -> triggers campaign at clock=2
await em_in_tx.send(em(clock=2, seniority=0, node_id="A"))
while True:
first = await em_out_rx.receive()
if first.clock == 2:
break
# Older message (clock=1) must be ignored (no second broadcast)
await em_in_tx.send(em(clock=1, seniority=999, node_id="B"))
got_second = False
with move_on_after(0.05):
_ = await em_out_rx.receive()
got_second = True
assert not got_second, "Should not receive a broadcast for an older round"
em_in_tx.close()
cm_tx.close()
co_tx.close()
# Not asserting on the result; focus is on ignore behavior.
@pytest.mark.anyio
async def test_two_rounds_emit_two_broadcasts_and_increment_clock() -> None:
"""
Two successive rounds → two broadcasts. Second round triggered by a higher-clock message.
"""
em_out_tx, em_out_rx = channel[ElectionMessage]()
em_in_tx, em_in_rx = channel[ElectionMessage]()
er_tx, _er_rx = channel[ElectionResult]()
cm_tx, cm_rx = channel[ConnectionMessage]()
co_tx, co_rx = channel[ForwarderCommand]()
election = Election(
node_id=NodeId("ME"),
election_message_receiver=em_in_rx,
election_message_sender=em_out_tx,
election_result_sender=er_tx,
connection_message_receiver=cm_rx,
command_receiver=co_rx,
is_candidate=True,
)
async with create_task_group() as tg:
with fail_after(2):
tg.start_soon(election.run)
# Round 1 at clock=1
await em_in_tx.send(em(clock=1, seniority=0, node_id="X"))
while True:
m1 = await em_out_rx.receive()
if m1.clock == 1:
break
# Round 2 at clock=2
await em_in_tx.send(em(clock=2, seniority=0, node_id="Y"))
while True:
m2 = await em_out_rx.receive()
if m2.clock == 2:
break
em_in_tx.close()
cm_tx.close()
co_tx.close()
# Not asserting on who won; just that both rounds were broadcast.
@pytest.mark.anyio
async def test_promotion_new_seniority_counts_participants() -> None:
"""
When we win against two peers in the same round, our seniority becomes
max(existing, number_of_candidates). With existing=0: expect 3 (us + A + B).
"""
em_out_tx, em_out_rx = channel[ElectionMessage]()
em_in_tx, em_in_rx = channel[ElectionMessage]()
er_tx, er_rx = channel[ElectionResult]()
cm_tx, cm_rx = channel[ConnectionMessage]()
co_tx, co_rx = channel[ForwarderCommand]()
election = Election(
node_id=NodeId("ME"),
election_message_receiver=em_in_rx,
election_message_sender=em_out_tx,
election_result_sender=er_tx,
connection_message_receiver=cm_rx,
command_receiver=co_rx,
is_candidate=True,
)
async with create_task_group() as tg:
with fail_after(2):
tg.start_soon(election.run)
# Start round at clock=7 with two peer participants
await em_in_tx.send(em(clock=7, seniority=0, node_id="A"))
await em_in_tx.send(em(clock=7, seniority=0, node_id="B"))
# We should see exactly one broadcast from us for this round
while True:
got = await em_out_rx.receive()
if got.clock == 7 and got.proposed_session.master_node_id == NodeId(
"ME"
):
break
# Wait for the election to finish so seniority updates
_ = await er_rx.receive()
em_in_tx.close()
cm_tx.close()
co_tx.close()
# We + A + B = 3 → new seniority expected to be 3
assert election.seniority == 3
@pytest.mark.anyio
async def test_connection_message_triggers_new_round_broadcast() -> None:
"""
A connection message increments the clock and starts a new campaign.
We should observe a broadcast at the incremented clock.
"""
em_out_tx, em_out_rx = channel[ElectionMessage]()
em_in_tx, em_in_rx = channel[ElectionMessage]()
er_tx, _er_rx = channel[ElectionResult]()
cm_tx, cm_rx = channel[ConnectionMessage]()
co_tx, co_rx = channel[ForwarderCommand]()
election = Election(
node_id=NodeId("ME"),
election_message_receiver=em_in_rx,
election_message_sender=em_out_tx,
election_result_sender=er_tx,
connection_message_receiver=cm_rx,
command_receiver=co_rx,
is_candidate=True,
)
async with create_task_group() as tg:
with fail_after(2):
tg.start_soon(election.run)
# Send any connection message object; we close quickly to cancel before result creation
await cm_tx.send(ConnectionMessage(node_id=NodeId(), connected=True))
# Expect a broadcast for the new round at clock=1
while True:
got = await em_out_rx.receive()
if got.clock == 1 and got.proposed_session.master_node_id == NodeId(
"ME"
):
break
# Close promptly to avoid waiting for campaign completion
em_in_tx.close()
cm_tx.close()
co_tx.close()
# After cancellation (before election finishes), no seniority changes asserted here.
@pytest.mark.anyio
async def test_tie_breaker_prefers_node_with_more_commands_seen() -> None:
"""
With equal seniority, the node that has seen more commands should win the election.
We increase our local 'commands_seen' by sending TestCommand()s before triggering the round.
"""
em_out_tx, em_out_rx = channel[ElectionMessage]()
em_in_tx, em_in_rx = channel[ElectionMessage]()
er_tx, er_rx = channel[ElectionResult]()
cm_tx, cm_rx = channel[ConnectionMessage]()
co_tx, co_rx = channel[ForwarderCommand]()
me = NodeId("ME")
election = Election(
node_id=me,
election_message_receiver=em_in_rx,
election_message_sender=em_out_tx,
election_result_sender=er_tx,
connection_message_receiver=cm_rx,
command_receiver=co_rx,
is_candidate=True,
seniority=0,
)
async with create_task_group() as tg:
with fail_after(2):
tg.start_soon(election.run)
# Pump local commands so our commands_seen is high before the round starts
for _ in range(50):
await co_tx.send(
ForwarderCommand(origin=SystemId("SOMEONE"), command=TestCommand())
)
# Trigger a round at clock=1 with a peer of equal seniority but fewer commands
await em_in_tx.send(
em(clock=1, seniority=0, node_id="PEER", commands_seen=5)
)
# Observe our broadcast for this round (to ensure we've joined the round)
while True:
got = await em_out_rx.receive()
if got.clock == 1 and got.proposed_session.master_node_id == me:
# We don't assert exact count, just that we've participated this round.
break
# The elected result for clock=1 should be us due to higher commands_seen
while True:
result = await er_rx.receive()
if result.session_id.master_node_id == me:
assert result.session_id.election_clock in (0, 1)
break
em_in_tx.close()
cm_tx.close()
co_tx.close()
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/shared/tests/test_election.py",
"license": "Apache License 2.0",
"lines": 333,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
exo-explore/exo:src/exo/shared/tests/test_node_id_persistence.py | import contextlib
import multiprocessing
import os
from multiprocessing import Event, Queue, Semaphore
from multiprocessing.process import BaseProcess
from multiprocessing.queues import Queue as QueueT
from multiprocessing.synchronize import Event as EventT
from multiprocessing.synchronize import Semaphore as SemaphoreT
from loguru import logger
from pytest import LogCaptureFixture, mark
from exo.routing.router import get_node_id_keypair
from exo.shared.constants import EXO_NODE_ID_KEYPAIR
NUM_CONCURRENT_PROCS = 10
def _get_keypair_concurrent_subprocess_task(
sem: SemaphoreT, ev: EventT, queue: QueueT[bytes]
) -> None:
# synchronise with parent process
sem.release()
# wait to be told to begin simultaneous read
ev.wait()
queue.put(get_node_id_keypair().to_bytes())
def _get_keypair_concurrent(num_procs: int) -> bytes:
assert num_procs > 0
sem = Semaphore(0)
ev = Event()
queue: QueueT[bytes] = Queue(maxsize=num_procs)
# make parent process wait for all subprocesses to start
logger.info(f"PARENT: Starting {num_procs} subprocesses")
ps: list[BaseProcess] = []
for _ in range(num_procs):
p = multiprocessing.get_context("fork").Process(
target=_get_keypair_concurrent_subprocess_task, args=(sem, ev, queue)
)
ps.append(p)
p.start()
for _ in range(num_procs):
sem.acquire()
# start all the sub processes simultaneously
logger.info("PARENT: Beginning read")
ev.set()
# wait until all subprocesses are done & read results
for p in ps:
p.join()
# check that the input/output order match, and that
# all subprocesses end up reading the same file
logger.info("PARENT: Checking consistency")
keypair: bytes | None = None
qsize = 0 # cannot use Queue.qsize due to MacOS incompatibility :(
while not queue.empty():
qsize += 1
temp_keypair = queue.get()
if keypair is None:
keypair = temp_keypair
else:
assert keypair == temp_keypair
assert num_procs == qsize
return keypair # pyright: ignore[reportReturnType]
def _delete_if_exists(p: str | bytes | os.PathLike[str] | os.PathLike[bytes]):
with contextlib.suppress(OSError):
os.remove(p)
@mark.skip(reason="this functionality is currently disabled but may return in future")
def test_node_id_fetching(caplog: LogCaptureFixture):
reps = 10
# delete current file and write a new one
_delete_if_exists(EXO_NODE_ID_KEYPAIR)
kp = _get_keypair_concurrent(NUM_CONCURRENT_PROCS)
with caplog.at_level(101): # supress logs
# make sure that continuous fetches return the same value
for _ in range(reps):
assert kp == _get_keypair_concurrent(NUM_CONCURRENT_PROCS)
# make sure that after deleting, we are not fetching the same value
_delete_if_exists(EXO_NODE_ID_KEYPAIR)
for _ in range(reps):
assert kp != _get_keypair_concurrent(NUM_CONCURRENT_PROCS)
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/shared/tests/test_node_id_persistence.py",
"license": "Apache License 2.0",
"lines": 74,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
exo-explore/exo:src/exo/shared/tests/test_state_serialization.py | from exo.shared.types.common import NodeId
from exo.shared.types.multiaddr import Multiaddr
from exo.shared.types.state import State
from exo.shared.types.topology import Connection, SocketConnection
def test_state_serialization_roundtrip() -> None:
"""Verify that State → JSON → State round-trip preserves topology."""
# --- build a simple state ------------------------------------------------
node_a = NodeId("node-a")
node_b = NodeId("node-b")
connection = Connection(
source=node_a,
sink=node_b,
edge=SocketConnection(
sink_multiaddr=Multiaddr(address="/ip4/127.0.0.1/tcp/10001"),
),
)
state = State()
state.topology.add_connection(connection)
json_repr = state.model_dump_json()
restored_state = State.model_validate_json(json_repr)
assert (
state.topology.to_snapshot().nodes
== restored_state.topology.to_snapshot().nodes
)
assert set(state.topology.to_snapshot().connections) == set(
restored_state.topology.to_snapshot().connections
)
assert restored_state.model_dump_json() == json_repr
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/shared/tests/test_state_serialization.py",
"license": "Apache License 2.0",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
exo-explore/exo:src/exo/shared/topology.py | import contextlib
from collections.abc import Mapping, Sequence
from dataclasses import dataclass, field
from typing import Iterable
import rustworkx as rx
from pydantic import BaseModel, ConfigDict
from exo.shared.types.common import NodeId
from exo.shared.types.profiling import (
InterfaceType,
NodeNetworkInfo,
ThunderboltBridgeStatus,
)
from exo.shared.types.topology import (
Connection,
Cycle,
RDMAConnection,
SocketConnection,
)
class TopologySnapshot(BaseModel):
nodes: Sequence[NodeId]
connections: Mapping[
NodeId, Mapping[NodeId, Sequence[SocketConnection | RDMAConnection]]
]
model_config = ConfigDict(frozen=True, extra="forbid")
@dataclass
class Topology:
_graph: rx.PyDiGraph[NodeId, SocketConnection | RDMAConnection] = field(
init=False, default_factory=rx.PyDiGraph
)
_vertex_indices: dict[NodeId, int] = field(init=False, default_factory=dict)
def to_snapshot(self) -> TopologySnapshot:
return TopologySnapshot(
nodes=list(self.list_nodes()), connections=self.map_connections()
)
@classmethod
def from_snapshot(cls, snapshot: TopologySnapshot) -> "Topology":
topology = cls()
for node_id in snapshot.nodes:
with contextlib.suppress(ValueError):
topology.add_node(node_id)
for source in snapshot.connections:
for sink in snapshot.connections[source]:
for edge in snapshot.connections[source][sink]:
topology.add_connection(
Connection(source=source, sink=sink, edge=edge)
)
return topology
def add_node(self, node_id: NodeId) -> None:
if node_id in self._vertex_indices:
return
rx_id = self._graph.add_node(node_id)
self._vertex_indices[node_id] = rx_id
def node_is_leaf(self, node_id: NodeId) -> bool:
return (
node_id in self._vertex_indices
and len(self._graph.neighbors(self._vertex_indices[node_id])) <= 1
)
def neighbours(self, node_id: NodeId) -> list[NodeId]:
return [
self._graph[rx_id]
for rx_id in self._graph.neighbors(self._vertex_indices[node_id])
]
def out_edges(self, node_id: NodeId) -> Iterable[Connection]:
if node_id not in self._vertex_indices:
return []
return (
Connection(source=self._graph[source], sink=self._graph[sink], edge=edge)
for source, sink, edge in self._graph.out_edges(
self._vertex_indices[node_id]
)
)
def contains_node(self, node_id: NodeId) -> bool:
return node_id in self._vertex_indices
def add_connection(self, conn: Connection) -> None:
source, sink, edge = conn.source, conn.sink, conn.edge
del conn
if edge in self.get_all_connections_between(source, sink):
return
if source not in self._vertex_indices:
self.add_node(source)
if sink not in self._vertex_indices:
self.add_node(sink)
src_id = self._vertex_indices[source]
sink_id = self._vertex_indices[sink]
_ = self._graph.add_edge(src_id, sink_id, edge)
def get_all_connections_between(
self, source: NodeId, sink: NodeId
) -> Iterable[SocketConnection | RDMAConnection]:
if source not in self._vertex_indices:
return []
if sink not in self._vertex_indices:
return []
src_id = self._vertex_indices[source]
sink_id = self._vertex_indices[sink]
try:
return self._graph.get_all_edge_data(src_id, sink_id)
except rx.NoEdgeBetweenNodes:
return []
def list_nodes(self) -> Iterable[NodeId]:
return self._graph.nodes()
def map_connections(
self,
) -> Mapping[NodeId, Mapping[NodeId, Sequence[SocketConnection | RDMAConnection]]]:
base: dict[NodeId, dict[NodeId, list[SocketConnection | RDMAConnection]]] = {}
for src_id, sink_id, connection in self._graph.weighted_edge_list():
source = self._graph[src_id]
sink = self._graph[sink_id]
if source not in base:
base[source] = {}
if sink not in base[source]:
base[source][sink] = []
base[source][sink].append(connection)
return base
def list_connections(
self,
) -> Iterable[Connection]:
return (
(
Connection(
source=self._graph[src_id],
sink=self._graph[sink_id],
edge=connection,
)
)
for src_id, sink_id, connection in self._graph.weighted_edge_list()
)
def remove_node(self, node_id: NodeId) -> None:
if node_id not in self._vertex_indices:
return
rx_idx = self._vertex_indices[node_id]
self._graph.remove_node(rx_idx)
del self._vertex_indices[node_id]
def replace_all_out_rdma_connections(
self, source: NodeId, new_connections: Sequence[Connection]
) -> None:
for conn_idx in self._graph.out_edge_indices(self._vertex_indices[source]):
if isinstance(self._graph.get_edge_data_by_index(conn_idx), RDMAConnection):
self._graph.remove_edge_from_index(conn_idx)
for conn in new_connections:
self.add_connection(conn)
def remove_connection(self, conn: Connection) -> None:
if (
conn.source not in self._vertex_indices
or conn.sink not in self._vertex_indices
):
return
for conn_idx in self._graph.edge_indices_from_endpoints(
self._vertex_indices[conn.source], self._vertex_indices[conn.sink]
):
if self._graph.get_edge_data_by_index(conn_idx) == conn.edge:
self._graph.remove_edge_from_index(conn_idx)
def get_cycles(self) -> list[Cycle]:
"""Get simple cycles in the graph, including singleton cycles"""
cycle_idxs = rx.simple_cycles(self._graph)
cycles: list[Cycle] = []
for cycle_idx in cycle_idxs:
cycle = Cycle(node_ids=[self._graph[idx] for idx in cycle_idx])
cycles.append(cycle)
for node_id in self.list_nodes():
cycles.append(Cycle(node_ids=[node_id]))
return cycles
def get_rdma_cycles(self) -> list[Cycle]:
rdma_edges = [
(u, v, conn)
for u, v, conn in self._graph.weighted_edge_list()
if isinstance(conn, RDMAConnection)
]
rdma_graph: rx.PyDiGraph[NodeId, SocketConnection | RDMAConnection] = (
rx.PyDiGraph()
)
rdma_graph.add_nodes_from(self._graph.nodes())
for u, v, conn in rdma_edges:
rdma_graph.add_edge(u, v, conn)
cycle_idxs = rx.simple_cycles(rdma_graph)
cycles: list[Cycle] = []
for cycle_idx in cycle_idxs:
cycle = Cycle(node_ids=[rdma_graph[idx] for idx in cycle_idx])
cycles.append(cycle)
return cycles
def get_subgraph_from_nodes(self, node_ids: list[NodeId]) -> "Topology":
topology = Topology()
for node_id in node_ids:
topology.add_node(node_id)
for connection in self.list_connections():
if connection.source in node_ids and connection.sink in node_ids:
topology.add_connection(connection)
return topology
def is_rdma_cycle(self, cycle: Cycle) -> bool:
node_idxs = [node for node in cycle]
rx_idxs = [self._vertex_indices[idx] for idx in node_idxs]
for rid in rx_idxs:
for neighbor_rid in self._graph.neighbors(rid):
if neighbor_rid not in rx_idxs:
continue
has_rdma = False
for edge in self._graph.get_all_edge_data(rid, neighbor_rid):
if isinstance(edge, RDMAConnection):
has_rdma = True
break
if not has_rdma:
return False
return True
def get_thunderbolt_bridge_cycles(
self,
node_tb_bridge_status: Mapping[NodeId, ThunderboltBridgeStatus],
node_network: Mapping[NodeId, NodeNetworkInfo],
) -> list[list[NodeId]]:
"""
Find cycles in the Thunderbolt topology where all nodes have TB bridge enabled.
Only returns cycles with >=2 nodes (2+ machines in a loop), as
1 node doesn't cause the broadcast storm problem.
"""
enabled_nodes = {
node_id
for node_id, status in node_tb_bridge_status.items()
if status.enabled
}
if len(enabled_nodes) < 2:
return []
thunderbolt_ips = _get_ips_with_interface_type(
enabled_nodes, node_network, "thunderbolt"
)
# Build subgraph with only TB bridge enabled nodes and thunderbolt connections
graph: rx.PyDiGraph[NodeId, SocketConnection | RDMAConnection] = rx.PyDiGraph()
node_to_idx: dict[NodeId, int] = {}
for node_id in enabled_nodes:
if node_id in self._vertex_indices:
node_to_idx[node_id] = graph.add_node(node_id)
for u, v, conn in self._graph.weighted_edge_list():
source_id, sink_id = self._graph[u], self._graph[v]
if source_id not in node_to_idx or sink_id not in node_to_idx:
continue
# Include connection if it's over a thunderbolt interface
if (
isinstance(conn, SocketConnection)
and conn.sink_multiaddr.ip_address in thunderbolt_ips
):
graph.add_edge(node_to_idx[source_id], node_to_idx[sink_id], conn)
if isinstance(conn, RDMAConnection):
graph.add_edge(node_to_idx[source_id], node_to_idx[sink_id], conn)
return [
[graph[idx] for idx in cycle]
for cycle in rx.simple_cycles(graph)
if len(cycle) >= 2
]
def _get_ips_with_interface_type(
node_ids: set[NodeId],
node_network: Mapping[NodeId, NodeNetworkInfo],
interface_type: InterfaceType,
) -> set[str]:
"""Get all IP addresses on interfaces of the specified type for the given nodes."""
ips: set[str] = set()
for node_id in node_ids:
network_info = node_network.get(node_id, NodeNetworkInfo())
for iface in network_info.interfaces:
if iface.interface_type == interface_type:
ips.add(iface.ip_address)
return ips
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/shared/topology.py",
"license": "Apache License 2.0",
"lines": 258,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
exo-explore/exo:src/exo/shared/types/api.py | import time
from collections.abc import Generator
from typing import Annotated, Any, Literal, get_args
from uuid import uuid4
from pydantic import BaseModel, Field, field_validator
from exo.shared.models.model_cards import ModelCard, ModelId
from exo.shared.types.common import CommandId, NodeId
from exo.shared.types.memory import Memory
from exo.shared.types.worker.instances import Instance, InstanceId, InstanceMeta
from exo.shared.types.worker.shards import Sharding, ShardMetadata
from exo.utils.pydantic_ext import CamelCaseModel
FinishReason = Literal[
"stop", "length", "tool_calls", "content_filter", "function_call", "error"
]
class ErrorInfo(BaseModel):
message: str
type: str
param: str | None = None
code: int
class ErrorResponse(BaseModel):
error: ErrorInfo
class ModelListModel(BaseModel):
id: str
object: str = "model"
created: int = Field(default_factory=lambda: int(time.time()))
owned_by: str = "exo"
# openwebui fields
hugging_face_id: str = Field(default="")
name: str = Field(default="")
description: str = Field(default="")
context_length: int = Field(default=0)
tags: list[str] = Field(default=[])
storage_size_megabytes: int = Field(default=0)
supports_tensor: bool = Field(default=False)
tasks: list[str] = Field(default=[])
is_custom: bool = Field(default=False)
family: str = Field(default="")
quantization: str = Field(default="")
base_model: str = Field(default="")
capabilities: list[str] = Field(default_factory=list)
class ModelList(BaseModel):
object: Literal["list"] = "list"
data: list[ModelListModel]
class ChatCompletionMessageText(BaseModel):
type: Literal["text"] = "text"
text: str
class ToolCallItem(BaseModel):
id: str = Field(default_factory=lambda: str(uuid4()))
name: str
arguments: str
class ToolCall(BaseModel):
id: str
index: int | None = None
type: Literal["function"] = "function"
function: ToolCallItem
class ChatCompletionMessage(BaseModel):
role: Literal["system", "user", "assistant", "developer", "tool", "function"]
content: (
str | ChatCompletionMessageText | list[ChatCompletionMessageText] | None
) = None
reasoning_content: str | None = None
name: str | None = None
tool_calls: list[ToolCall] | None = None
tool_call_id: str | None = None
function_call: dict[str, Any] | None = None
class BenchChatCompletionMessage(ChatCompletionMessage):
pass
class TopLogprobItem(BaseModel):
token: str
logprob: float
bytes: list[int] | None = None
class LogprobsContentItem(BaseModel):
token: str
logprob: float
bytes: list[int] | None = None
top_logprobs: list[TopLogprobItem]
class Logprobs(BaseModel):
content: list[LogprobsContentItem] | None = None
class PromptTokensDetails(BaseModel):
cached_tokens: int = 0
audio_tokens: int = 0
class CompletionTokensDetails(BaseModel):
reasoning_tokens: int = 0
audio_tokens: int = 0
accepted_prediction_tokens: int = 0
rejected_prediction_tokens: int = 0
class Usage(BaseModel):
prompt_tokens: int
completion_tokens: int
total_tokens: int
prompt_tokens_details: PromptTokensDetails
completion_tokens_details: CompletionTokensDetails
class StreamingChoiceResponse(BaseModel):
index: int
delta: ChatCompletionMessage
logprobs: Logprobs | None = None
finish_reason: FinishReason | None = None
usage: Usage | None = None
class ChatCompletionChoice(BaseModel):
index: int
message: ChatCompletionMessage
logprobs: Logprobs | None = None
finish_reason: FinishReason | None = None
class ChatCompletionResponse(BaseModel):
id: str
object: Literal["chat.completion"] = "chat.completion"
created: int
model: str
choices: list[ChatCompletionChoice | StreamingChoiceResponse]
usage: Usage | None = None
service_tier: str | None = None
class GenerationStats(BaseModel):
prompt_tps: float
generation_tps: float
prompt_tokens: int
generation_tokens: int
peak_memory_usage: Memory
class ImageGenerationStats(BaseModel):
seconds_per_step: float
total_generation_time: float
num_inference_steps: int
num_images: int
image_width: int
image_height: int
peak_memory_usage: Memory
class BenchChatCompletionResponse(ChatCompletionResponse):
generation_stats: GenerationStats | None = None
class StreamOptions(BaseModel):
include_usage: bool = False
class ChatCompletionRequest(BaseModel):
model: ModelId
frequency_penalty: float | None = None
messages: list[ChatCompletionMessage]
logit_bias: dict[str, int] | None = None
logprobs: bool | None = None
top_logprobs: int | None = None
max_tokens: int | None = None
n: int | None = None
presence_penalty: float | None = None
response_format: dict[str, Any] | None = None
seed: int | None = None
stop: str | list[str] | None = None
stream: bool = False
stream_options: StreamOptions | None = None
temperature: float | None = None
top_p: float | None = None
top_k: int | None = None
tools: list[dict[str, Any]] | None = None
enable_thinking: bool | None = None
tool_choice: str | dict[str, Any] | None = None
parallel_tool_calls: bool | None = None
user: str | None = None
class BenchChatCompletionRequest(ChatCompletionRequest):
pass
class AddCustomModelParams(BaseModel):
model_id: ModelId
class HuggingFaceSearchResult(BaseModel):
id: str
author: str = ""
downloads: int = 0
likes: int = 0
last_modified: str = ""
tags: list[str] = Field(default_factory=list)
class PlaceInstanceParams(BaseModel):
model_id: ModelId
sharding: Sharding = Sharding.Pipeline
instance_meta: InstanceMeta = InstanceMeta.MlxRing
min_nodes: int = 1
class CreateInstanceParams(BaseModel):
instance: Instance
class PlacementPreview(BaseModel):
model_id: ModelId
sharding: Sharding
instance_meta: InstanceMeta
instance: Instance | None = None
# Keys are NodeId strings, values are additional bytes that would be used on that node
memory_delta_by_node: dict[str, int] | None = None
error: str | None = None
class PlacementPreviewResponse(BaseModel):
previews: list[PlacementPreview]
class DeleteInstanceTaskParams(BaseModel):
instance_id: str
class CreateInstanceResponse(BaseModel):
message: str
command_id: CommandId
model_card: ModelCard
class DeleteInstanceResponse(BaseModel):
message: str
command_id: CommandId
instance_id: InstanceId
ImageSize = Literal[
"auto",
"512x512",
"768x768",
"1024x768",
"768x1024",
"1024x1024",
"1024x1536",
"1536x1024",
]
def normalize_image_size(v: object) -> ImageSize:
"""Shared validator for ImageSize fields: maps None → "auto" and rejects invalid values."""
if v is None:
return "auto"
if v not in get_args(ImageSize):
raise ValueError(f"Invalid size: {v!r}. Must be one of {get_args(ImageSize)}")
return v # pyright: ignore[reportReturnType]
class AdvancedImageParams(BaseModel):
seed: Annotated[int, Field(ge=0)] | None = None
num_inference_steps: Annotated[int, Field(ge=1, le=100)] | None = None
guidance: Annotated[float, Field(ge=1.0, le=20.0)] | None = None
negative_prompt: str | None = None
num_sync_steps: Annotated[int, Field(ge=1, le=100)] | None = None
class ImageGenerationTaskParams(BaseModel):
prompt: str
background: str | None = None
model: str
moderation: str | None = None
n: int | None = 1
output_compression: int | None = None
output_format: Literal["png", "jpeg", "webp"] = "png"
partial_images: int | None = 0
quality: Literal["high", "medium", "low"] | None = "medium"
response_format: Literal["url", "b64_json"] | None = "b64_json"
size: ImageSize = "auto"
stream: bool | None = False
style: str | None = "vivid"
user: str | None = None
advanced_params: AdvancedImageParams | None = None
# Internal flag for benchmark mode - set by API, preserved through serialization
bench: bool = False
@field_validator("size", mode="before")
@classmethod
def normalize_size(cls, v: object) -> ImageSize:
return normalize_image_size(v)
class BenchImageGenerationTaskParams(ImageGenerationTaskParams):
bench: bool = True
class ImageEditsTaskParams(BaseModel):
"""Internal task params for image-editing requests."""
image_data: str = "" # Base64-encoded image (empty when using chunked transfer)
total_input_chunks: int = 0
prompt: str
model: str
n: int | None = 1
quality: Literal["high", "medium", "low"] | None = "medium"
output_format: Literal["png", "jpeg", "webp"] = "png"
response_format: Literal["url", "b64_json"] | None = "b64_json"
size: ImageSize = "auto"
image_strength: float | None = 0.7
stream: bool = False
partial_images: int | None = 0
advanced_params: AdvancedImageParams | None = None
bench: bool = False
@field_validator("size", mode="before")
@classmethod
def normalize_size(cls, v: object) -> ImageSize:
return normalize_image_size(v)
def __repr_args__(self) -> Generator[tuple[str, Any], None, None]:
for name, value in super().__repr_args__(): # pyright: ignore[reportAny]
if name == "image_data":
yield name, f"<{len(self.image_data)} chars>"
elif name is not None:
yield name, value
class ImageData(BaseModel):
b64_json: str | None = None
url: str | None = None
revised_prompt: str | None = None
def __repr_args__(self) -> Generator[tuple[str, Any], None, None]:
for name, value in super().__repr_args__(): # pyright: ignore[reportAny]
if name == "b64_json" and self.b64_json is not None:
yield name, f"<{len(self.b64_json)} chars>"
elif name is not None:
yield name, value
class ImageGenerationResponse(BaseModel):
created: int = Field(default_factory=lambda: int(time.time()))
data: list[ImageData]
class BenchImageGenerationResponse(ImageGenerationResponse):
generation_stats: ImageGenerationStats | None = None
class ImageListItem(BaseModel, frozen=True):
image_id: str
url: str
content_type: str
expires_at: float
class ImageListResponse(BaseModel, frozen=True):
data: list[ImageListItem]
class StartDownloadParams(CamelCaseModel):
target_node_id: NodeId
shard_metadata: ShardMetadata
class StartDownloadResponse(CamelCaseModel):
command_id: CommandId
class DeleteDownloadResponse(CamelCaseModel):
command_id: CommandId
class TraceEventResponse(CamelCaseModel):
name: str
start_us: int
duration_us: int
rank: int
category: str
class TraceResponse(CamelCaseModel):
task_id: str
traces: list[TraceEventResponse]
class TraceCategoryStats(CamelCaseModel):
total_us: int
count: int
min_us: int
max_us: int
avg_us: float
class TraceRankStats(CamelCaseModel):
by_category: dict[str, TraceCategoryStats]
class TraceStatsResponse(CamelCaseModel):
task_id: str
total_wall_time_us: int
by_category: dict[str, TraceCategoryStats]
by_rank: dict[int, TraceRankStats]
class TraceListItem(CamelCaseModel):
task_id: str
created_at: str
file_size: int
class TraceListResponse(CamelCaseModel):
traces: list[TraceListItem]
class DeleteTracesRequest(CamelCaseModel):
task_ids: list[str]
class DeleteTracesResponse(CamelCaseModel):
deleted: list[str]
not_found: list[str]
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/shared/types/api.py",
"license": "Apache License 2.0",
"lines": 325,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
exo-explore/exo:src/exo/shared/types/chunks.py | from collections.abc import Generator
from typing import Any, Literal
from exo.shared.models.model_cards import ModelId
from exo.shared.types.api import (
GenerationStats,
ImageGenerationStats,
TopLogprobItem,
Usage,
)
from exo.utils.pydantic_ext import TaggedModel
from .api import FinishReason
from .common import CommandId
from .worker.runner_response import ToolCallItem
class BaseChunk(TaggedModel):
model: ModelId
class TokenChunk(BaseChunk):
text: str
token_id: int
usage: Usage | None
finish_reason: Literal["stop", "length", "content_filter"] | None = None
stats: GenerationStats | None = None
logprob: float | None = None
top_logprobs: list[TopLogprobItem] | None = None
is_thinking: bool = False
class ErrorChunk(BaseChunk):
error_message: str
finish_reason: Literal["error"] = "error"
class ToolCallChunk(BaseChunk):
tool_calls: list[ToolCallItem]
usage: Usage | None
finish_reason: Literal["tool_calls"] = "tool_calls"
stats: GenerationStats | None = None
class ImageChunk(BaseChunk):
data: str
chunk_index: int
total_chunks: int
image_index: int
is_partial: bool = False
partial_index: int | None = None
total_partials: int | None = None
stats: ImageGenerationStats | None = None
format: Literal["png", "jpeg", "webp"] | None = None
finish_reason: FinishReason | None = None
error_message: str | None = None
def __repr_args__(self) -> Generator[tuple[str, Any], None, None]:
for name, value in super().__repr_args__(): # pyright: ignore[reportAny]
if name == "data" and hasattr(value, "__len__"): # pyright: ignore[reportAny]
yield name, f"<{len(self.data)} chars>"
elif name is not None:
yield name, value
class InputImageChunk(BaseChunk):
command_id: CommandId
data: str
chunk_index: int
total_chunks: int
def __repr_args__(self) -> Generator[tuple[str, Any], None, None]:
for name, value in super().__repr_args__(): # pyright: ignore[reportAny]
if name == "data" and hasattr(value, "__len__"): # pyright: ignore[reportAny]
yield name, f"<{len(self.data)} chars>"
elif name is not None:
yield name, value
class PrefillProgressChunk(BaseChunk):
"""Data class for prefill progress events during streaming."""
processed_tokens: int
total_tokens: int
GenerationChunk = (
TokenChunk | ImageChunk | ToolCallChunk | ErrorChunk | PrefillProgressChunk
)
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/shared/types/chunks.py",
"license": "Apache License 2.0",
"lines": 68,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
exo-explore/exo:src/exo/shared/types/commands.py | from pydantic import Field
from exo.shared.models.model_cards import ModelCard, ModelId
from exo.shared.types.api import (
ImageEditsTaskParams,
ImageGenerationTaskParams,
)
from exo.shared.types.chunks import InputImageChunk
from exo.shared.types.common import CommandId, NodeId, SystemId
from exo.shared.types.text_generation import TextGenerationTaskParams
from exo.shared.types.worker.instances import Instance, InstanceId, InstanceMeta
from exo.shared.types.worker.shards import Sharding, ShardMetadata
from exo.utils.pydantic_ext import CamelCaseModel, TaggedModel
class BaseCommand(TaggedModel):
command_id: CommandId = Field(default_factory=CommandId)
class TestCommand(BaseCommand):
__test__ = False
class TextGeneration(BaseCommand):
task_params: TextGenerationTaskParams
class ImageGeneration(BaseCommand):
task_params: ImageGenerationTaskParams
class ImageEdits(BaseCommand):
task_params: ImageEditsTaskParams
class PlaceInstance(BaseCommand):
model_card: ModelCard
sharding: Sharding
instance_meta: InstanceMeta
min_nodes: int
class CreateInstance(BaseCommand):
instance: Instance
class DeleteInstance(BaseCommand):
instance_id: InstanceId
class TaskCancelled(BaseCommand):
cancelled_command_id: CommandId
class TaskFinished(BaseCommand):
finished_command_id: CommandId
class SendInputChunk(BaseCommand):
"""Command to send an input image chunk (converted to event by master)."""
chunk: InputImageChunk
class RequestEventLog(BaseCommand):
since_idx: int
class StartDownload(BaseCommand):
target_node_id: NodeId
shard_metadata: ShardMetadata
class DeleteDownload(BaseCommand):
target_node_id: NodeId
model_id: ModelId
class CancelDownload(BaseCommand):
target_node_id: NodeId
model_id: ModelId
DownloadCommand = StartDownload | DeleteDownload | CancelDownload
Command = (
TestCommand
| RequestEventLog
| TextGeneration
| ImageGeneration
| ImageEdits
| PlaceInstance
| CreateInstance
| DeleteInstance
| TaskCancelled
| TaskFinished
| SendInputChunk
)
class ForwarderCommand(CamelCaseModel):
origin: SystemId
command: Command
class ForwarderDownloadCommand(CamelCaseModel):
origin: SystemId
command: DownloadCommand
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/shared/types/commands.py",
"license": "Apache License 2.0",
"lines": 69,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
exo-explore/exo:src/exo/shared/types/common.py | from typing import Self
from uuid import uuid4
from pydantic import GetCoreSchemaHandler, field_validator
from pydantic_core import core_schema
from exo.utils.pydantic_ext import CamelCaseModel
class Id(str):
def __new__(cls, value: str | None = None) -> Self:
return super().__new__(cls, value or str(uuid4()))
@classmethod
def __get_pydantic_core_schema__(
cls, _source: type, handler: GetCoreSchemaHandler
) -> core_schema.CoreSchema:
# Just use a plain string schema
return core_schema.no_info_after_validator_function(
cls, core_schema.str_schema()
)
class NodeId(Id):
pass
class SystemId(Id):
pass
class ModelId(Id):
def normalize(self) -> str:
return self.replace("/", "--")
def short(self) -> str:
return self.split("/")[-1]
class SessionId(CamelCaseModel):
master_node_id: NodeId
election_clock: int
class CommandId(Id):
pass
class Host(CamelCaseModel):
ip: str
port: int
def __str__(self) -> str:
return f"{self.ip}:{self.port}"
@field_validator("port")
@classmethod
def check_port(cls, v: int) -> int:
if not (0 <= v <= 65535):
raise ValueError("Port must be between 0 and 65535")
return v
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/shared/types/common.py",
"license": "Apache License 2.0",
"lines": 41,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
exo-explore/exo:src/exo/shared/types/events.py | from datetime import datetime
from typing import final
from pydantic import Field
from exo.shared.topology import Connection
from exo.shared.types.chunks import GenerationChunk, InputImageChunk
from exo.shared.types.common import CommandId, Id, NodeId, SessionId, SystemId
from exo.shared.types.tasks import Task, TaskId, TaskStatus
from exo.shared.types.worker.downloads import DownloadProgress
from exo.shared.types.worker.instances import Instance, InstanceId
from exo.shared.types.worker.runners import RunnerId, RunnerStatus
from exo.utils.info_gatherer.info_gatherer import GatheredInfo
from exo.utils.pydantic_ext import CamelCaseModel, FrozenModel, TaggedModel
class EventId(Id):
"""
Newtype around `ID`
"""
class BaseEvent(TaggedModel):
event_id: EventId = Field(default_factory=EventId)
# Internal, for debugging. Please don't rely on this field for anything!
_master_time_stamp: None | datetime = None
class TestEvent(BaseEvent):
__test__ = False
class TaskCreated(BaseEvent):
task_id: TaskId
task: Task
class TaskAcknowledged(BaseEvent):
task_id: TaskId
class TaskDeleted(BaseEvent):
task_id: TaskId
class TaskStatusUpdated(BaseEvent):
task_id: TaskId
task_status: TaskStatus
class TaskFailed(BaseEvent):
task_id: TaskId
error_type: str
error_message: str
class InstanceCreated(BaseEvent):
instance: Instance
def __eq__(self, other: object) -> bool:
if isinstance(other, InstanceCreated):
return self.instance == other.instance and self.event_id == other.event_id
return False
class InstanceDeleted(BaseEvent):
instance_id: InstanceId
class RunnerStatusUpdated(BaseEvent):
runner_id: RunnerId
runner_status: RunnerStatus
class RunnerDeleted(BaseEvent):
runner_id: RunnerId
class NodeTimedOut(BaseEvent):
node_id: NodeId
# TODO: bikeshed this name
class NodeGatheredInfo(BaseEvent):
node_id: NodeId
when: str # this is a manually cast datetime overrode by the master when the event is indexed, rather than the local time on the device
info: GatheredInfo
class NodeDownloadProgress(BaseEvent):
download_progress: DownloadProgress
class ChunkGenerated(BaseEvent):
command_id: CommandId
chunk: GenerationChunk
class InputChunkReceived(BaseEvent):
command_id: CommandId
chunk: InputImageChunk
class TopologyEdgeCreated(BaseEvent):
conn: Connection
class TopologyEdgeDeleted(BaseEvent):
conn: Connection
@final
class TraceEventData(FrozenModel):
name: str
start_us: int
duration_us: int
rank: int
category: str
@final
class TracesCollected(BaseEvent):
task_id: TaskId
rank: int
traces: list[TraceEventData]
@final
class TracesMerged(BaseEvent):
task_id: TaskId
traces: list[TraceEventData]
Event = (
TestEvent
| TaskCreated
| TaskStatusUpdated
| TaskFailed
| TaskDeleted
| TaskAcknowledged
| InstanceCreated
| InstanceDeleted
| RunnerStatusUpdated
| RunnerDeleted
| NodeTimedOut
| NodeGatheredInfo
| NodeDownloadProgress
| ChunkGenerated
| InputChunkReceived
| TopologyEdgeCreated
| TopologyEdgeDeleted
| TracesCollected
| TracesMerged
)
class IndexedEvent(CamelCaseModel):
"""An event indexed by the master, with a globally unique index"""
idx: int = Field(ge=0)
event: Event
class GlobalForwarderEvent(CamelCaseModel):
"""An event the forwarder will serialize and send over the network"""
origin_idx: int = Field(ge=0)
origin: NodeId
session: SessionId
event: Event
class LocalForwarderEvent(CamelCaseModel):
"""An event the forwarder will serialize and send over the network"""
origin_idx: int = Field(ge=0)
origin: SystemId
session: SessionId
event: Event
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/shared/types/events.py",
"license": "Apache License 2.0",
"lines": 121,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
exo-explore/exo:src/exo/shared/types/memory.py | from math import ceil
from typing import Self, overload
from exo.utils.pydantic_ext import FrozenModel
class Memory(FrozenModel):
in_bytes: int = 0
@classmethod
def from_bytes(cls, val: int) -> Self:
"""Construct a new Memory object from a number of bytes"""
return cls(in_bytes=val)
@property
def in_kb(self) -> int:
"""The approximate kilobytes this memory represents, rounded up. Setting this property rounds to the nearest byte."""
return ceil(self.in_bytes / 1024)
@in_kb.setter
def in_kb(self, val: int):
"""Set this memorys value in kilobytes."""
self.in_bytes = val * 1024
@classmethod
def from_kb(cls, val: int) -> Self:
"""Construct a new Memory object from a number of kilobytes"""
return cls(in_bytes=val * 1024)
@classmethod
def from_float_kb(cls, val: float) -> Self:
"""Construct a new Memory object from a number of kilobytes, rounding where appropriate"""
return cls(in_bytes=round(val * 1024))
@property
def in_mb(self) -> int:
"""The approximate megabytes this memory represents, rounded to nearest MB. Setting this property rounds to the nearest byte."""
return round(self.in_bytes / (1024**2))
@in_mb.setter
def in_mb(self, val: int):
"""Set the megabytes for this memory."""
self.in_bytes = val * (1024**2)
@property
def in_float_mb(self) -> float:
"""The megabytes this memory represents as a float. Setting this property rounds to the nearest byte."""
return self.in_bytes / (1024**2)
@in_float_mb.setter
def in_float_mb(self, val: float):
"""Set the megabytes for this memory, rounded to the nearest byte."""
self.in_bytes = round(val * (1024**2))
@classmethod
def from_mb(cls, val: float) -> Self:
"""Construct a new Memory object from a number of megabytes"""
return cls(in_bytes=round(val * (1024**2)))
@classmethod
def from_gb(cls, val: float) -> Self:
"""Construct a new Memory object from a number of megabytes"""
return cls(in_bytes=round(val * (1024**3)))
@property
def in_gb(self) -> float:
"""The approximate gigabytes this memory represents."""
return self.in_bytes / (1024**3)
def __add__(self, other: object) -> "Memory":
if isinstance(other, Memory):
return Memory.from_bytes(self.in_bytes + other.in_bytes)
return NotImplemented
def __radd__(self, other: object) -> "Memory":
if other == 0:
return self
return NotImplemented
def __sub__(self, other: object) -> "Memory":
if isinstance(other, Memory):
return Memory.from_bytes(self.in_bytes - other.in_bytes)
return NotImplemented
def __mul__(self, other: int | float):
return Memory.from_bytes(round(self.in_bytes * other))
def __rmul__(self, other: int | float):
return self * other
@overload
def __truediv__(self, other: "Memory") -> float: ...
@overload
def __truediv__(self, other: int) -> "Memory": ...
@overload
def __truediv__(self, other: float) -> "Memory": ...
def __truediv__(self, other: object) -> "Memory | float":
if isinstance(other, Memory):
return self.in_bytes / other.in_bytes
if isinstance(other, (int, float)):
return Memory.from_bytes(round(self.in_bytes / other))
return NotImplemented
def __floordiv__(self, other: object) -> "Memory":
if isinstance(other, (int, float)):
return Memory.from_bytes(int(self.in_bytes // other))
return NotImplemented
def __lt__(self, other: object) -> bool:
if isinstance(other, Memory):
return self.in_bytes < other.in_bytes
return NotImplemented
def __le__(self, other: object) -> bool:
if isinstance(other, Memory):
return self.in_bytes <= other.in_bytes
return NotImplemented
def __gt__(self, other: object) -> bool:
if isinstance(other, Memory):
return self.in_bytes > other.in_bytes
return NotImplemented
def __ge__(self, other: object) -> bool:
if isinstance(other, Memory):
return self.in_bytes >= other.in_bytes
return NotImplemented
def __eq__(self, other: object) -> bool:
if isinstance(other, Memory):
return self.in_bytes == other.in_bytes
return NotImplemented
def __repr__(self) -> str:
return f"Memory.from_bytes({self.in_bytes})"
def __str__(self) -> str:
if self.in_gb > 2:
val = self.in_gb
unit = "GiB"
elif self.in_mb > 2:
val = self.in_mb
unit = "MiB"
elif self.in_kb > 3:
val = self.in_kb
unit = "KiB"
else:
val = self.in_bytes
unit = "B"
return f"{val:.2f} {unit}".rstrip("0").rstrip(".") + f" {unit}"
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/shared/types/memory.py",
"license": "Apache License 2.0",
"lines": 121,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
exo-explore/exo:src/exo/shared/types/multiaddr.py | import re
from typing import ClassVar
from pydantic import BaseModel, ConfigDict, computed_field, field_validator
class Multiaddr(BaseModel):
model_config = ConfigDict(frozen=True)
address: str
PATTERNS: ClassVar[list[str]] = [
r"^/ip4/(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})(/tcp/(\d{1,5}))?(/p2p/[A-Za-z0-9]+)?$",
r"^/ip6/([0-9a-fA-F:]+)(/tcp/(\d{1,5}))?(/p2p/[A-Za-z0-9]+)?$",
r"^/dns[46]?/([a-zA-Z0-9.-]+)(/tcp/(\d{1,5}))?(/p2p/[A-Za-z0-9]+)?$",
]
@field_validator("address")
@classmethod
def validate_format(cls, v: str) -> str:
if not any(re.match(pattern, v) for pattern in cls.PATTERNS):
raise ValueError(
f"Invalid multiaddr format: {v}. "
"Expected format like /ip4/127.0.0.1/tcp/4001 or /dns/example.com/tcp/443"
)
return v
@computed_field
@property
def address_type(self) -> str:
for pattern in self.PATTERNS:
if re.match(pattern, self.address):
return pattern.split("/")[1]
raise ValueError(f"Invalid multiaddr format: {self.address}")
@property
def ipv6_address(self) -> str:
match = re.match(r"^/ip6/([0-9a-fA-F:]+)", self.address)
if not match:
raise ValueError(
f"Invalid multiaddr format: {self.address}. Expected format like /ip6/::1/tcp/4001"
)
return match.group(1)
@property
def ipv4_address(self) -> str:
match = re.match(r"^/ip4/(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})", self.address)
if not match:
raise ValueError(
f"Invalid multiaddr format: {self.address}. Expected format like /ip4/127.0.0.1/tcp/4001"
)
return match.group(1)
@computed_field
@property
def ip_address(self) -> str:
return self.ipv4_address if self.address_type == "ip4" else self.ipv6_address
@computed_field
@property
def port(self) -> int:
match = re.search(r"/tcp/(\d{1,5})", self.address)
if not match:
raise ValueError(
f"Invalid multiaddr format: {self.address}. Expected format like /ip4/127.0.0.1/tcp/4001"
)
return int(match.group(1))
def __str__(self) -> str:
return self.address
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/shared/types/multiaddr.py",
"license": "Apache License 2.0",
"lines": 58,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
exo-explore/exo:src/exo/shared/types/profiling.py | import shutil
from collections.abc import Sequence
from pathlib import Path
from typing import Literal, Self
import psutil
from exo.shared.types.memory import Memory
from exo.shared.types.thunderbolt import ThunderboltIdentifier
from exo.utils.pydantic_ext import CamelCaseModel
class MemoryUsage(CamelCaseModel):
ram_total: Memory
ram_available: Memory
swap_total: Memory
swap_available: Memory
@classmethod
def from_bytes(
cls, *, ram_total: int, ram_available: int, swap_total: int, swap_available: int
) -> Self:
return cls(
ram_total=Memory.from_bytes(ram_total),
ram_available=Memory.from_bytes(ram_available),
swap_total=Memory.from_bytes(swap_total),
swap_available=Memory.from_bytes(swap_available),
)
@classmethod
def from_psutil(cls, *, override_memory: int | None) -> Self:
vm = psutil.virtual_memory()
sm = psutil.swap_memory()
return cls.from_bytes(
ram_total=vm.total,
ram_available=vm.available if override_memory is None else override_memory,
swap_total=sm.total,
swap_available=sm.free,
)
class DiskUsage(CamelCaseModel):
"""Disk space usage for the models directory."""
total: Memory
available: Memory
@classmethod
def from_path(cls, path: Path) -> Self:
"""Get disk usage stats for the partition containing path."""
total, _used, free = shutil.disk_usage(path)
return cls(
total=Memory.from_bytes(total),
available=Memory.from_bytes(free),
)
class SystemPerformanceProfile(CamelCaseModel):
# TODO: flops_fp16: float
gpu_usage: float = 0.0
temp: float = 0.0
sys_power: float = 0.0
pcpu_usage: float = 0.0
ecpu_usage: float = 0.0
InterfaceType = Literal["wifi", "ethernet", "maybe_ethernet", "thunderbolt", "unknown"]
class NetworkInterfaceInfo(CamelCaseModel):
name: str
ip_address: str
interface_type: InterfaceType = "unknown"
class NodeIdentity(CamelCaseModel):
"""Static and slow-changing node identification data."""
model_id: str = "Unknown"
chip_id: str = "Unknown"
friendly_name: str = "Unknown"
os_version: str = "Unknown"
os_build_version: str = "Unknown"
class NodeNetworkInfo(CamelCaseModel):
"""Network interface information for a node."""
interfaces: Sequence[NetworkInterfaceInfo] = []
class NodeThunderboltInfo(CamelCaseModel):
"""Thunderbolt interface identifiers for a node."""
interfaces: Sequence[ThunderboltIdentifier] = []
class NodeRdmaCtlStatus(CamelCaseModel):
"""Whether RDMA is enabled on this node (via rdma_ctl)."""
enabled: bool
class ThunderboltBridgeStatus(CamelCaseModel):
"""Whether the Thunderbolt Bridge network service is enabled on this node."""
enabled: bool
exists: bool
service_name: str | None = None
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/shared/types/profiling.py",
"license": "Apache License 2.0",
"lines": 78,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
exo-explore/exo:src/exo/shared/types/state.py | from collections.abc import Mapping, Sequence
from datetime import datetime
from typing import Any, cast
from pydantic import ConfigDict, Field, field_serializer, field_validator
from pydantic.alias_generators import to_camel
from exo.shared.topology import Topology, TopologySnapshot
from exo.shared.types.common import NodeId
from exo.shared.types.profiling import (
DiskUsage,
MemoryUsage,
NodeIdentity,
NodeNetworkInfo,
NodeRdmaCtlStatus,
NodeThunderboltInfo,
SystemPerformanceProfile,
ThunderboltBridgeStatus,
)
from exo.shared.types.tasks import Task, TaskId
from exo.shared.types.worker.downloads import DownloadProgress
from exo.shared.types.worker.instances import Instance, InstanceId
from exo.shared.types.worker.runners import RunnerId, RunnerStatus
from exo.utils.pydantic_ext import CamelCaseModel
class State(CamelCaseModel):
"""Global system state.
The :class:`Topology` instance is encoded/decoded via an immutable
:class:`~shared.topology.TopologySnapshot` to ensure compatibility with
standard JSON serialisation.
"""
model_config = ConfigDict(
alias_generator=to_camel,
validate_by_name=True,
extra="forbid",
# I want to reenable this ASAP, but it's causing an issue with TaskStatus
strict=True,
arbitrary_types_allowed=True,
)
instances: Mapping[InstanceId, Instance] = {}
runners: Mapping[RunnerId, RunnerStatus] = {}
downloads: Mapping[NodeId, Sequence[DownloadProgress]] = {}
tasks: Mapping[TaskId, Task] = {}
last_seen: Mapping[NodeId, datetime] = {}
topology: Topology = Field(default_factory=Topology)
last_event_applied_idx: int = Field(default=-1, ge=-1)
# Granular node state mappings (update independently at different frequencies)
node_identities: Mapping[NodeId, NodeIdentity] = {}
node_memory: Mapping[NodeId, MemoryUsage] = {}
node_disk: Mapping[NodeId, DiskUsage] = {}
node_system: Mapping[NodeId, SystemPerformanceProfile] = {}
node_network: Mapping[NodeId, NodeNetworkInfo] = {}
node_thunderbolt: Mapping[NodeId, NodeThunderboltInfo] = {}
node_thunderbolt_bridge: Mapping[NodeId, ThunderboltBridgeStatus] = {}
node_rdma_ctl: Mapping[NodeId, NodeRdmaCtlStatus] = {}
# Detected cycles where all nodes have Thunderbolt bridge enabled (>2 nodes)
thunderbolt_bridge_cycles: Sequence[Sequence[NodeId]] = []
@field_serializer("topology", mode="plain")
def _encode_topology(self, value: Topology) -> TopologySnapshot:
return value.to_snapshot()
@field_validator("topology", mode="before")
@classmethod
def _deserialize_topology(cls, value: object) -> Topology: # noqa: D401 – Pydantic validator signature
"""Convert an incoming *value* into a :class:`Topology` instance.
Accepts either an already constructed :class:`Topology` or a mapping
representing :class:`~shared.topology.TopologySnapshot`.
"""
if isinstance(value, Topology):
return value
if isinstance(value, Mapping): # likely a snapshot-dict coming from JSON
snapshot = TopologySnapshot(**cast(dict[str, Any], value)) # type: ignore[arg-type]
return Topology.from_snapshot(snapshot)
raise TypeError("Invalid representation for Topology field in State")
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/shared/types/state.py",
"license": "Apache License 2.0",
"lines": 70,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
exo-explore/exo:src/exo/shared/types/tasks.py | from enum import Enum
from pydantic import Field
from exo.shared.types.api import (
ImageEditsTaskParams,
ImageGenerationTaskParams,
)
from exo.shared.types.common import CommandId, Id
from exo.shared.types.text_generation import TextGenerationTaskParams
from exo.shared.types.worker.instances import BoundInstance, InstanceId
from exo.shared.types.worker.runners import RunnerId
from exo.shared.types.worker.shards import ShardMetadata
from exo.utils.pydantic_ext import TaggedModel
class TaskId(Id):
pass
class TaskStatus(str, Enum):
Pending = "Pending"
Running = "Running"
Complete = "Complete"
TimedOut = "TimedOut"
Failed = "Failed"
Cancelled = "Cancelled"
class BaseTask(TaggedModel):
task_id: TaskId = Field(default_factory=TaskId)
task_status: TaskStatus = Field(default=TaskStatus.Pending)
instance_id: InstanceId
class CreateRunner(BaseTask): # emitted by Worker
bound_instance: BoundInstance
class DownloadModel(BaseTask): # emitted by Worker
shard_metadata: ShardMetadata
class LoadModel(BaseTask): # emitted by Worker
pass
class ConnectToGroup(BaseTask): # emitted by Worker
pass
class StartWarmup(BaseTask): # emitted by Worker
pass
class TextGeneration(BaseTask): # emitted by Master
command_id: CommandId
task_params: TextGenerationTaskParams
error_type: str | None = Field(default=None)
error_message: str | None = Field(default=None)
class CancelTask(BaseTask):
cancelled_task_id: TaskId
runner_id: RunnerId
class ImageGeneration(BaseTask): # emitted by Master
command_id: CommandId
task_params: ImageGenerationTaskParams
error_type: str | None = Field(default=None)
error_message: str | None = Field(default=None)
class ImageEdits(BaseTask): # emitted by Master
command_id: CommandId
task_params: ImageEditsTaskParams
error_type: str | None = Field(default=None)
error_message: str | None = Field(default=None)
class Shutdown(BaseTask): # emitted by Worker
runner_id: RunnerId
Task = (
CreateRunner
| DownloadModel
| ConnectToGroup
| LoadModel
| StartWarmup
| TextGeneration
| CancelTask
| ImageGeneration
| ImageEdits
| Shutdown
)
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/shared/types/tasks.py",
"license": "Apache License 2.0",
"lines": 67,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
exo-explore/exo:src/exo/shared/types/topology.py | from collections.abc import Iterator
from dataclasses import dataclass
from exo.shared.types.common import NodeId
from exo.shared.types.multiaddr import Multiaddr
from exo.utils.pydantic_ext import FrozenModel
@dataclass(frozen=True)
class Cycle:
node_ids: list[NodeId]
def __len__(self) -> int:
return self.node_ids.__len__()
def __iter__(self) -> Iterator[NodeId]:
return self.node_ids.__iter__()
class RDMAConnection(FrozenModel):
source_rdma_iface: str
sink_rdma_iface: str
class SocketConnection(FrozenModel):
sink_multiaddr: Multiaddr
def __hash__(self):
return hash(self.sink_multiaddr.ip_address)
class Connection(FrozenModel):
source: NodeId
sink: NodeId
edge: RDMAConnection | SocketConnection
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/shared/types/topology.py",
"license": "Apache License 2.0",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
exo-explore/exo:src/exo/shared/types/worker/downloads.py | from datetime import timedelta
from typing import Literal
from pydantic import BaseModel, ConfigDict, Field, PositiveInt
from exo.shared.types.common import NodeId
from exo.shared.types.memory import Memory
from exo.shared.types.worker.shards import ShardMetadata
from exo.utils.pydantic_ext import CamelCaseModel, TaggedModel
class DownloadProgressData(CamelCaseModel):
total: Memory
downloaded: Memory
downloaded_this_session: Memory
completed_files: int
total_files: int
speed: float
eta_ms: int
files: dict[str, "DownloadProgressData"]
class BaseDownloadProgress(TaggedModel):
node_id: NodeId
shard_metadata: ShardMetadata
model_directory: str = ""
class DownloadPending(BaseDownloadProgress):
downloaded: Memory = Memory()
total: Memory = Memory()
class DownloadCompleted(BaseDownloadProgress):
total: Memory
read_only: bool = False
class DownloadFailed(BaseDownloadProgress):
error_message: str
class DownloadOngoing(BaseDownloadProgress):
download_progress: DownloadProgressData
DownloadProgress = (
DownloadPending | DownloadCompleted | DownloadFailed | DownloadOngoing
)
class ModelSafetensorsIndexMetadata(BaseModel):
total_size: PositiveInt
class ModelSafetensorsIndex(BaseModel):
metadata: ModelSafetensorsIndexMetadata | None
weight_map: dict[str, str]
class FileListEntry(BaseModel):
type: Literal["file", "directory"]
path: str
size: int | None = None
class RepoFileDownloadProgress(BaseModel):
repo_id: str
repo_revision: str
file_path: str
downloaded: Memory
downloaded_this_session: Memory
total: Memory
speed: float
eta: timedelta
status: Literal["not_started", "in_progress", "complete"]
start_time: float
model_config = ConfigDict(frozen=True)
class RepoDownloadProgress(BaseModel):
repo_id: str
repo_revision: str
shard: ShardMetadata
completed_files: int
total_files: int
downloaded: Memory
downloaded_this_session: Memory
total: Memory
overall_speed: float
overall_eta: timedelta
status: Literal["not_started", "in_progress", "complete"]
file_progress: dict[str, RepoFileDownloadProgress] = Field(default_factory=dict)
model_config = ConfigDict(frozen=True)
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/shared/types/worker/downloads.py",
"license": "Apache License 2.0",
"lines": 68,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
exo-explore/exo:src/exo/shared/types/worker/instances.py | from enum import Enum
from pydantic import model_validator
from exo.shared.models.model_cards import ModelTask
from exo.shared.types.common import Host, Id, NodeId
from exo.shared.types.worker.runners import RunnerId, ShardAssignments, ShardMetadata
from exo.utils.pydantic_ext import CamelCaseModel, TaggedModel
class InstanceId(Id):
pass
class InstanceMeta(str, Enum):
MlxRing = "MlxRing"
MlxJaccl = "MlxJaccl"
class BaseInstance(TaggedModel):
instance_id: InstanceId
shard_assignments: ShardAssignments
def shard(self, runner_id: RunnerId) -> ShardMetadata | None:
return self.shard_assignments.runner_to_shard.get(runner_id, None)
class MlxRingInstance(BaseInstance):
hosts_by_node: dict[NodeId, list[Host]]
ephemeral_port: int
class MlxJacclInstance(BaseInstance):
jaccl_devices: list[list[str | None]]
jaccl_coordinators: dict[NodeId, str]
# TODO: Single node instance
Instance = MlxRingInstance | MlxJacclInstance
class BoundInstance(CamelCaseModel):
instance: Instance
bound_runner_id: RunnerId
bound_node_id: NodeId
@property
def bound_shard(self) -> ShardMetadata:
shard = self.instance.shard(self.bound_runner_id)
assert shard is not None
return shard
@property
def is_image_model(self) -> bool:
return (
ModelTask.TextToImage in self.bound_shard.model_card.tasks
or ModelTask.ImageToImage in self.bound_shard.model_card.tasks
)
@model_validator(mode="after")
def validate_shard_exists(self) -> "BoundInstance":
assert (
self.bound_runner_id in self.instance.shard_assignments.runner_to_shard
), (
"Bound Instance must be constructed with a runner_id that is in the instances assigned shards"
)
return self
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/shared/types/worker/instances.py",
"license": "Apache License 2.0",
"lines": 47,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
exo-explore/exo:src/exo/shared/types/worker/runner_response.py | from collections.abc import Generator
from typing import Any, Literal
from exo.shared.types.api import (
FinishReason,
GenerationStats,
ImageGenerationStats,
ToolCallItem,
TopLogprobItem,
Usage,
)
from exo.utils.pydantic_ext import TaggedModel
class BaseRunnerResponse(TaggedModel):
pass
class TokenizedResponse(BaseRunnerResponse):
prompt_tokens: int
class GenerationResponse(BaseRunnerResponse):
text: str
token: int
logprob: float | None = None
top_logprobs: list[TopLogprobItem] | None = None
finish_reason: FinishReason | None = None
stats: GenerationStats | None = None
usage: Usage | None
is_thinking: bool = False
class ImageGenerationResponse(BaseRunnerResponse):
image_data: bytes
format: Literal["png", "jpeg", "webp"] = "png"
stats: ImageGenerationStats | None = None
image_index: int = 0
def __repr_args__(self) -> Generator[tuple[str, Any], None, None]:
for name, value in super().__repr_args__(): # pyright: ignore[reportAny]
if name == "image_data":
yield name, f"<{len(self.image_data)} bytes>"
elif name is not None:
yield name, value
class PartialImageResponse(BaseRunnerResponse):
image_data: bytes
format: Literal["png", "jpeg", "webp"] = "png"
partial_index: int
total_partials: int
image_index: int = 0
def __repr_args__(self) -> Generator[tuple[str, Any], None, None]:
for name, value in super().__repr_args__(): # pyright: ignore[reportAny]
if name == "image_data":
yield name, f"<{len(self.image_data)} bytes>"
elif name is not None:
yield name, value
class ToolCallResponse(BaseRunnerResponse):
tool_calls: list[ToolCallItem]
usage: Usage | None
stats: GenerationStats | None = None
class FinishedResponse(BaseRunnerResponse):
pass
class PrefillProgressResponse(BaseRunnerResponse):
processed_tokens: int
total_tokens: int
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/shared/types/worker/runner_response.py",
"license": "Apache License 2.0",
"lines": 56,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
exo-explore/exo:src/exo/shared/types/worker/runners.py | from collections.abc import Mapping
from pydantic import model_validator
from exo.shared.models.model_cards import ModelId
from exo.shared.types.common import Id, NodeId
from exo.shared.types.worker.shards import ShardMetadata
from exo.utils.pydantic_ext import CamelCaseModel, TaggedModel
class RunnerId(Id):
pass
class RunnerError(Exception):
pass
class BaseRunnerStatus(TaggedModel):
def is_running(self):
return isinstance(self, RunnerRunning)
class RunnerIdle(BaseRunnerStatus):
pass
class RunnerConnecting(BaseRunnerStatus):
pass
class RunnerConnected(BaseRunnerStatus):
pass
class RunnerLoading(BaseRunnerStatus):
layers_loaded: int = 0
total_layers: int = 0
class RunnerLoaded(BaseRunnerStatus):
pass
class RunnerWarmingUp(BaseRunnerStatus):
pass
class RunnerReady(BaseRunnerStatus):
pass
class RunnerRunning(BaseRunnerStatus):
pass
class RunnerShuttingDown(BaseRunnerStatus):
pass
class RunnerShutdown(BaseRunnerStatus):
pass
class RunnerFailed(BaseRunnerStatus):
error_message: str | None = None
RunnerStatus = (
RunnerIdle
| RunnerConnecting
| RunnerConnected
| RunnerLoading
| RunnerLoaded
| RunnerWarmingUp
| RunnerReady
| RunnerRunning
| RunnerShuttingDown
| RunnerShutdown
| RunnerFailed
)
class ShardAssignments(CamelCaseModel):
model_id: ModelId
runner_to_shard: Mapping[RunnerId, ShardMetadata]
node_to_runner: Mapping[NodeId, RunnerId]
@model_validator(mode="after")
def validate_runners_exist(self) -> "ShardAssignments":
for runner_id in self.node_to_runner.values():
if runner_id not in self.runner_to_shard:
raise ValueError(
f"Runner {runner_id} in node_to_runner does not exist in runner_to_shard"
)
return self
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/shared/types/worker/runners.py",
"license": "Apache License 2.0",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
exo-explore/exo:src/exo/shared/types/worker/shards.py | from enum import Enum
from typing import TypeAlias, final
from pydantic import Field
from exo.shared.models.model_cards import ModelCard
from exo.utils.pydantic_ext import TaggedModel
class Sharding(str, Enum):
Tensor = "Tensor"
Pipeline = "Pipeline"
class BaseShardMetadata(TaggedModel):
"""
Defines a specific shard of the model that is ready to be run on a device.
Replaces previous `Shard` object.
"""
model_card: ModelCard
device_rank: int
world_size: int
# Error handling; equivalent to monkey-patch, but we can't monkey-patch runner.py
# This is kinda annoying because it allocates memory in the ShardMetadata object. Can be rethought after Shanghai.
immediate_exception: bool = False
should_timeout: float | None = None
start_layer: int = Field(ge=0)
end_layer: int = Field(ge=0)
n_layers: int = Field(ge=0)
@property
def is_first_layer(self) -> bool:
return self.start_layer == 0
@property
def is_last_layer(self) -> bool:
return self.end_layer == self.n_layers
def __hash__(self) -> int:
return hash(
(
self.model_card.model_id,
self.start_layer,
self.end_layer,
self.n_layers,
self.device_rank,
self.world_size,
)
)
@final
class PipelineShardMetadata(BaseShardMetadata):
"""
Pipeline parallelism shard meta.
Layers are represented as a half-open interval [start_layer, end_layer),
where start_layer is inclusive and end_layer is exclusive.
"""
@final
class CfgShardMetadata(BaseShardMetadata):
"""Shard metadata for CFG-parallel image generation models."""
cfg_rank: int # 0 = positive branch, 1 = negative branch
cfg_world_size: int = 2
# Pipeline-relative coordinates (computed at placement time)
pipeline_rank: int # rank within the pipeline group (0, 1, 2, ...)
pipeline_world_size: int # number of nodes per pipeline group
@final
class TensorShardMetadata(BaseShardMetadata):
pass
ShardMetadata: TypeAlias = (
PipelineShardMetadata | CfgShardMetadata | TensorShardMetadata
)
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/shared/types/worker/shards.py",
"license": "Apache License 2.0",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
exo-explore/exo:src/exo/utils/banner.py | import logging
import os
import sys
import webbrowser
from exo.shared.constants import EXO_CONFIG_HOME
logger = logging.getLogger(__name__)
_FIRST_RUN_MARKER = EXO_CONFIG_HOME / ".dashboard_opened"
def _is_first_run() -> bool:
return not _FIRST_RUN_MARKER.exists()
def _mark_first_run_done() -> None:
_FIRST_RUN_MARKER.parent.mkdir(parents=True, exist_ok=True)
_FIRST_RUN_MARKER.touch()
def print_startup_banner(port: int) -> None:
dashboard_url = f"http://localhost:{port}"
first_run = _is_first_run()
banner = f"""
╔═══════════════════════════════════════════════════════════════════════╗
║ ║
║ ███████╗██╗ ██╗ ██████╗ ║
║ ██╔════╝╚██╗██╔╝██╔═══██╗ ║
║ █████╗ ╚███╔╝ ██║ ██║ ║
║ ██╔══╝ ██╔██╗ ██║ ██║ ║
║ ███████╗██╔╝ ██╗╚██████╔╝ ║
║ ╚══════╝╚═╝ ╚═╝ ╚═════╝ ║
║ ║
║ Distributed AI Inference Cluster ║
║ ║
╚═══════════════════════════════════════════════════════════════════════╝
╔═══════════════════════════════════════════════════════════════════════╗
║ ║
║ 🌐 Dashboard & API Ready ║
║ ║
║ {dashboard_url}{" " * (69 - len(dashboard_url))}║
║ ║
║ Click the URL above to open the dashboard in your browser ║
║ ║
╚═══════════════════════════════════════════════════════════════════════╝
"""
print(banner, file=sys.stderr)
if first_run:
# Skip browser open when running inside the native macOS app —
# FirstLaunchPopout.swift handles the auto-open with a countdown.
if not os.environ.get("EXO_RUNTIME_DIR"):
try:
webbrowser.open(dashboard_url)
logger.info("First run detected — opening dashboard in browser")
except Exception:
logger.debug("Could not auto-open browser", exc_info=True)
_mark_first_run_done()
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/utils/banner.py",
"license": "Apache License 2.0",
"lines": 49,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
exo-explore/exo:src/exo/utils/channels.py | import contextlib
import multiprocessing as mp
from dataclasses import dataclass, field
from math import inf
from multiprocessing.synchronize import Event
from queue import Empty, Full
from types import TracebackType
from typing import Any, Self
from anyio import (
CapacityLimiter,
ClosedResourceError,
EndOfStream,
WouldBlock,
to_thread,
)
from anyio.streams.memory import (
MemoryObjectReceiveStream as AnyioReceiver,
)
from anyio.streams.memory import (
MemoryObjectSendStream as AnyioSender,
)
from anyio.streams.memory import (
MemoryObjectStreamState as AnyioState,
)
class Sender[T](AnyioSender[T]):
def clone(self) -> "Sender[T]":
if self._closed:
raise ClosedResourceError
return Sender(_state=self._state)
def clone_receiver(self) -> "Receiver[T]":
"""Constructs a Receiver using a Senders shared state - similar to calling Receiver.clone() without needing the receiver"""
if self._closed:
raise ClosedResourceError
return Receiver(_state=self._state)
class Receiver[T](AnyioReceiver[T]):
def clone(self) -> "Receiver[T]":
if self._closed:
raise ClosedResourceError
return Receiver(_state=self._state)
def clone_sender(self) -> Sender[T]:
"""Constructs a Sender using a Receivers shared state - similar to calling Sender.clone() without needing the sender"""
if self._closed:
raise ClosedResourceError
return Sender(_state=self._state)
def collect(self) -> list[T]:
"""Collect all currently available items from this receiver"""
out: list[T] = []
while True:
try:
item = self.receive_nowait()
out.append(item)
except WouldBlock:
break
return out
async def receive_at_least(self, n: int) -> list[T]:
out: list[T] = []
out.append(await self.receive())
out.extend(self.collect())
while len(out) < n:
out.append(await self.receive())
out.extend(self.collect())
return out
def __enter__(self) -> Self:
return self
class _MpEndOfStream:
pass
class MpState[T]:
def __init__(self, max_buffer_size: float):
if max_buffer_size == inf:
max_buffer_size = 0
assert isinstance(max_buffer_size, int), (
"State should only ever be constructed with an integer or math.inf size."
)
self.max_buffer_size: float = max_buffer_size
self.buffer: mp.Queue[T | _MpEndOfStream] = mp.Queue(max_buffer_size)
self.closed: Event = mp.Event()
def __getstate__(self):
d = self.__dict__.copy()
d.pop("__orig_class__", None)
return d
@dataclass(eq=False)
class MpSender[T]:
"""
An interprocess channel, mimicing the Anyio structure.
It should be noted that none of the clone methods are implemented for simplicity, for now.
"""
_state: MpState[T] = field()
def send_nowait(self, item: T) -> None:
if self._state.closed.is_set():
raise ClosedResourceError
try:
self._state.buffer.put(item, block=False)
except Full:
raise WouldBlock from None
except ValueError as e:
print("Unreachable code path - let me know!")
raise ClosedResourceError from e
def send(self, item: T) -> None:
if self._state.closed.is_set():
raise ClosedResourceError
try:
self.send_nowait(item)
except WouldBlock:
# put anyway, blocking
self._state.buffer.put(item, block=True)
async def send_async(self, item: T) -> None:
await to_thread.run_sync(
self.send, item, limiter=CapacityLimiter(1), abandon_on_cancel=True
)
def close(self) -> None:
if not self._state.closed.is_set():
self._state.closed.set()
with contextlib.suppress(Exception):
self._state.buffer.put_nowait(_MpEndOfStream())
self._state.buffer.close()
# == unique to Mp channels ==
def join(self) -> None:
"""Ensure any queued messages are resolved before continuing"""
assert self._state.closed.is_set(), (
"Mp channels must be closed before being joined"
)
self._state.buffer.join_thread()
# == context manager support ==
def __enter__(self) -> Self:
return self
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> None:
self.close()
def __getstate__(self) -> dict[str, Any]:
d = self.__dict__.copy()
d.pop("__orig_class__", None)
return d
@dataclass(eq=False)
class MpReceiver[T]:
"""
An interprocess channel, mimicing the Anyio structure.
It should be noted that none of the clone methods are implemented for simplicity, for now.
"""
_state: MpState[T] = field()
def receive_nowait(self) -> T:
if self._state.closed.is_set():
raise ClosedResourceError
try:
item = self._state.buffer.get(block=False)
if isinstance(item, _MpEndOfStream):
self.close()
raise EndOfStream
return item
except Empty:
raise WouldBlock from None
except ValueError as e:
print("Unreachable code path - let me know!")
raise ClosedResourceError from e
def receive(self) -> T:
try:
return self.receive_nowait()
except WouldBlock:
try:
item = self._state.buffer.get()
except (TypeError, OSError):
# Queue pipe can get closed while we are blocked on get().
# The underlying connection._handle becomes None, causing
# TypeError in read(handle, remaining).
raise ClosedResourceError from None
if isinstance(item, _MpEndOfStream):
self.close()
raise EndOfStream from None
return item
async def receive_async(self) -> T:
return await to_thread.run_sync(
self.receive, limiter=CapacityLimiter(1), abandon_on_cancel=True
)
def close(self) -> None:
if not self._state.closed.is_set():
self._state.closed.set()
with contextlib.suppress(Exception):
self._state.buffer.put_nowait(_MpEndOfStream())
self._state.buffer.close()
# == unique to Mp channels ==
def join(self) -> None:
"""Block until all enqueued messages are drained off our side of the buffer"""
assert self._state.closed.is_set(), (
"Mp channels must be closed before being joined"
)
self._state.buffer.join_thread()
# == iterator support ==
def __iter__(self) -> Self:
return self
def __next__(self) -> T:
try:
return self.receive()
except EndOfStream:
raise StopIteration from None
# == async iterator support ==
def __aiter__(self) -> Self:
return self
async def __anext__(self) -> T:
try:
return await self.receive_async()
except EndOfStream:
raise StopAsyncIteration from None
# == context manager support ==
def __enter__(self) -> Self:
return self
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> None:
self.close()
def collect(self) -> list[T]:
"""Collect all currently available items from this receiver"""
out: list[T] = []
while True:
try:
item = self.receive_nowait()
out.append(item)
except WouldBlock:
break
return out
def receive_at_least(self, n: int) -> list[T]:
out: list[T] = []
out.append(self.receive())
out.extend(self.collect())
while len(out) < n:
out.append(self.receive())
out.extend(self.collect())
return out
def __getstate__(self):
d = self.__dict__.copy()
d.pop("__orig_class__", None)
return d
class channel[T]: # noqa: N801
"""Create a pair of asynchronous channels for communicating within the same process"""
def __new__(cls, max_buffer_size: float = inf) -> tuple[Sender[T], Receiver[T]]:
if max_buffer_size != inf and not isinstance(max_buffer_size, int):
raise ValueError("max_buffer_size must be either an integer or math.inf")
state = AnyioState[T](max_buffer_size)
return Sender(_state=state), Receiver(_state=state)
class mp_channel[T]: # noqa: N801
"""Create a pair of synchronous channels for interprocess communication"""
# max buffer size uses math.inf to represent an unbounded queue, and 0 to represent a yet unimplemented "unbuffered" queue.
def __new__(cls, max_buffer_size: float = inf) -> tuple[MpSender[T], MpReceiver[T]]:
if (
max_buffer_size == 0
or max_buffer_size != inf
and not isinstance(max_buffer_size, int)
):
raise ValueError(
"max_buffer_size must be either an integer or math.inf. 0-sized buffers are not supported by multiprocessing"
)
state = MpState[T](max_buffer_size)
return MpSender(_state=state), MpReceiver(_state=state)
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/utils/channels.py",
"license": "Apache License 2.0",
"lines": 258,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
exo-explore/exo:src/exo/utils/dashboard_path.py | import sys
from pathlib import Path
from typing import cast
def find_resources() -> Path:
resources = _find_resources_in_repo() or _find_resources_in_bundle()
if resources is None:
raise FileNotFoundError(
"Unable to locate resources. Did you clone the repo properly?"
)
return resources
def _find_resources_in_repo() -> Path | None:
current_module = Path(__file__).resolve()
for parent in current_module.parents:
build = parent / "resources"
if build.is_dir():
return build
return None
def _find_resources_in_bundle() -> Path | None:
frozen_root = cast(str | None, getattr(sys, "_MEIPASS", None))
if frozen_root is None:
return None
candidate = Path(frozen_root) / "resources"
if candidate.is_dir():
return candidate
return None
def find_dashboard() -> Path:
dashboard = _find_dashboard_in_repo() or _find_dashboard_in_bundle()
if not dashboard:
raise FileNotFoundError(
"Unable to locate dashboard assets - you probably forgot to run `cd dashboard && npm install && npm run build && cd ..`"
)
return dashboard
def _find_dashboard_in_repo() -> Path | None:
current_module = Path(__file__).resolve()
for parent in current_module.parents:
build = parent / "dashboard" / "build"
if build.is_dir() and (build / "index.html").exists():
return build
return None
def _find_dashboard_in_bundle() -> Path | None:
frozen_root = cast(str | None, getattr(sys, "_MEIPASS", None))
if frozen_root is None:
return None
candidate = Path(frozen_root) / "dashboard"
if candidate.is_dir():
return candidate
return None
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/utils/dashboard_path.py",
"license": "Apache License 2.0",
"lines": 47,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
exo-explore/exo:src/exo/utils/event_buffer.py | from loguru import logger
class OrderedBuffer[T]:
"""
A buffer that resequences events to ensure their ordering is preserved.
Currently this buffer doesn't raise any errors if an event is lost
This buffer is NOT thread safe, and is designed to only be polled from one
source at a time.
"""
def __init__(self):
self.store: dict[int, T] = {}
self.next_idx_to_release: int = 0
def ingest(self, idx: int, t: T):
"""Ingest a sequence into the buffer"""
logger.trace(f"Ingested event {t}")
if idx < self.next_idx_to_release:
return
if idx in self.store:
assert self.store[idx] == t, (
"Received different messages with identical indices, probable race condition"
)
return
self.store[idx] = t
def drain(self) -> list[T]:
"""Drain all available events from the buffer"""
ret: list[T] = []
while self.next_idx_to_release in self.store:
idx = self.next_idx_to_release
event = self.store.pop(idx)
ret.append(event)
self.next_idx_to_release += 1
logger.trace(f"Releasing event {ret}")
return ret
def drain_indexed(self) -> list[tuple[int, T]]:
"""Drain all available events from the buffer"""
ret: list[tuple[int, T]] = []
while self.next_idx_to_release in self.store:
idx = self.next_idx_to_release
event = self.store.pop(idx)
ret.append((idx, event))
self.next_idx_to_release += 1
logger.trace(f"Releasing event {ret}")
return ret
class MultiSourceBuffer[SourceId, T]:
"""
A buffer that resequences events to ensure their ordering is preserved.
Tracks events with multiple sources
"""
def __init__(self):
self.stores: dict[SourceId, OrderedBuffer[T]] = {}
def ingest(self, idx: int, t: T, source: SourceId):
if source not in self.stores:
self.stores[source] = OrderedBuffer()
buffer = self.stores[source]
buffer.ingest(idx, t)
def drain(self) -> list[T]:
ret: list[T] = []
for store in self.stores.values():
ret.extend(store.drain())
return ret
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/utils/event_buffer.py",
"license": "Apache License 2.0",
"lines": 59,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
exo-explore/exo:src/exo/utils/fs.py | import contextlib
import os
import pathlib
import tempfile
from typing import LiteralString
type StrPath = str | os.PathLike[str]
type BytesPath = bytes | os.PathLike[bytes]
type StrOrBytesPath = str | bytes | os.PathLike[str] | os.PathLike[bytes]
def delete_if_exists(filename: StrOrBytesPath) -> None:
with contextlib.suppress(FileNotFoundError):
os.remove(filename)
def ensure_parent_directory_exists(filename: StrPath) -> None:
"""
Ensure the directory containing the file exists (create it if necessary).
"""
pathlib.Path(filename).parent.mkdir(parents=True, exist_ok=True)
def ensure_directory_exists(dirname: StrPath) -> None:
"""
Ensure the directory exists (create it if necessary).
"""
pathlib.Path(dirname).mkdir(parents=True, exist_ok=True)
def make_temp_path(name: LiteralString) -> str:
return os.path.join(tempfile.mkdtemp(), name)
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/utils/fs.py",
"license": "Apache License 2.0",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
exo-explore/exo:src/exo/utils/phantom.py | class _PhantomData[*T]:
"""
Internal machinery of the phantom data - it stores nothing.
"""
type PhantomData[*T] = _PhantomData[*T] | None
"""
Allows you to use generics in functions without storing anything of that generic type.
Just use `None` and you'll be fine
"""
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/utils/phantom.py",
"license": "Apache License 2.0",
"lines": 9,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
exo-explore/exo:src/exo/utils/pydantic_ext.py | # pyright: reportAny=false, reportUnknownArgumentType=false, reportUnknownVariableType=false
from typing import Any, Self
from pydantic import BaseModel, ConfigDict, model_serializer, model_validator
from pydantic.alias_generators import to_camel
from pydantic_core.core_schema import (
SerializerFunctionWrapHandler,
ValidatorFunctionWrapHandler,
)
class CamelCaseModel(BaseModel):
"""
A model whose fields are aliased to camel-case from snake-case.
"""
model_config = ConfigDict(
alias_generator=to_camel,
validate_by_name=True,
extra="forbid",
strict=True,
)
class FrozenModel(BaseModel):
model_config = ConfigDict(
alias_generator=to_camel,
validate_by_name=True,
extra="forbid",
strict=True,
frozen=True,
)
class TaggedModel(CamelCaseModel):
@model_serializer(mode="wrap")
def _serialize(self, handler: SerializerFunctionWrapHandler):
inner = handler(self)
return {self.__class__.__name__: inner}
@model_validator(mode="wrap")
@classmethod
def _validate(cls, v: Any, handler: ValidatorFunctionWrapHandler) -> Self:
if isinstance(v, dict) and len(v) == 1 and cls.__name__ in v:
return handler(v[cls.__name__])
return handler(v)
def __str__(self) -> str:
return f"{self.__class__.__name__}({super().__str__()})"
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/utils/pydantic_ext.py",
"license": "Apache License 2.0",
"lines": 39,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
exo-explore/exo:src/exo/utils/reactive.py | """
Utilities for reactive variables
"""
from typing import Protocol
class OnChange[T](Protocol):
def __call__(self, old_value: T, new_value: T) -> None: ...
class Reactive[T]:
def __init__(self, initial_value: T, on_change: OnChange[T]):
self._value = initial_value
self._on_change = on_change
@property
def value(self):
return self._value
@value.setter
def value(self, new_value: T):
old_value = self._value
self._value = new_value
# don't notify when not changed
if old_value == new_value:
return
# notify of changes
self._on_change(old_value=old_value, new_value=new_value)
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/utils/reactive.py",
"license": "Apache License 2.0",
"lines": 22,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
exo-explore/exo:src/exo/utils/tests/test_tagged.py | import anyio
import pytest
from pydantic import BaseModel, TypeAdapter, ValidationError
from exo.utils.pydantic_ext import TaggedModel
def test_plain_union_prefers_first_member_when_shapes_are_identical():
class Foo1(BaseModel):
x: int
class Foo2(BaseModel):
x: int
# Base Pydantic behavior: ambiguous dict goes to the first union member
ta = TypeAdapter[Foo1 | Foo2](Foo1 | Foo2)
out = ta.validate_python({"x": 1})
assert isinstance(out, Foo1), (
"Base Pydantic should pick the first union member for identical shapes"
)
def test_tagged_union_serializes_and_deserializes_two_identical_shapes_correctly():
class Foo1(TaggedModel):
x: int
class Foo2(TaggedModel):
x: int
t1 = Foo1(x=1)
assert t1.model_dump() == {"Foo1": {"x": 1}}
t2 = Foo2(x=2)
assert t2.model_dump() == {"Foo2": {"x": 2}}
# ---- deserialize (TypeAdapter -> model_validator(before)) ----
ta = TypeAdapter[Foo1 | Foo2](Foo1 | Foo2)
out1 = ta.validate_python({"Foo1": {"x": 10}})
assert isinstance(out1, Foo1) and out1.x == 10
out2 = ta.validate_python({"Foo2": {"x": 20}})
assert isinstance(out2, Foo2) and out2.x == 20
def test_tagged_union_rejects_unknown_tag():
class Foo1(TaggedModel):
x: int
class Foo2(TaggedModel):
x: int
ta = TypeAdapter[Foo1 | Foo2](Foo1 | Foo2)
with pytest.raises(ValidationError):
ta.validate_python({"NotARealTag": {"x": 0}})
def test_two_tagged_classes_with_different_shapes_are_independent_and_not_cross_deserializable():
class A1(TaggedModel):
x: int
class A2(TaggedModel):
name: str
class B1(TaggedModel):
name: str
class B2(TaggedModel):
active: bool
a_payload = A1(x=123).model_dump()
b_payload = B1(name="neo").model_dump()
assert a_payload == {"A1": {"x": 123}}
assert b_payload == {"B1": {"name": "neo"}}
ta_a = TypeAdapter[A1 | A2](A1 | A2)
ta_b = TypeAdapter[B1 | B2](B1 | B2)
with pytest.raises(ValidationError):
ta_a.validate_python(b_payload)
with pytest.raises(ValidationError):
ta_b.validate_python(a_payload)
class Inner(TaggedModel):
x: int
class Outer(TaggedModel):
inner: Inner
class Wrapper(TaggedModel):
outer: Outer
label: str
class Container(TaggedModel):
items: list[Inner]
nested: Wrapper
def test_single_level_tagging():
inner = Inner(x=10)
dumped = inner.model_dump()
assert dumped == {"Inner": {"x": 10}}
restored = Inner.model_validate(dumped)
assert isinstance(restored, Inner)
assert restored.x == 10
def test_nested_externally_tagged_union_serializes_recursively():
outer = Outer(inner=Inner(x=42))
dumped = outer.model_dump()
assert dumped == {"Outer": {"inner": {"Inner": {"x": 42}}}}
restored = Outer.model_validate(dumped)
assert isinstance(restored.inner, Inner)
assert restored.inner.x == 42
def test_two_level_nested_tagging():
outer = Outer(inner=Inner(x=123))
dumped = outer.model_dump()
assert dumped == {"Outer": {"inner": {"Inner": {"x": 123}}}}
restored = Outer.model_validate(dumped)
assert isinstance(restored.inner, Inner)
assert restored.inner.x == 123
def test_three_level_nested_tagging():
wrapper = Wrapper(label="deep", outer=Outer(inner=Inner(x=7)))
dumped = wrapper.model_dump()
# 3-level structure, each with exactly one tag
assert dumped == {
"Wrapper": {
"label": "deep",
"outer": {"Outer": {"inner": {"Inner": {"x": 7}}}},
}
}
restored = Wrapper.model_validate(dumped)
assert isinstance(restored.outer.inner, Inner)
assert restored.outer.inner.x == 7
assert restored.label == "deep"
def test_lists_and_mixed_nested_structures():
container = Container(
items=[Inner(x=1), Inner(x=2)],
nested=Wrapper(label="mix", outer=Outer(inner=Inner(x=9))),
)
dumped = container.model_dump()
assert dumped == {
"Container": {
"items": [
{"Inner": {"x": 1}},
{"Inner": {"x": 2}},
],
"nested": {
"Wrapper": {
"label": "mix",
"outer": {"Outer": {"inner": {"Inner": {"x": 9}}}},
}
},
}
}
restored = Container.model_validate(dumped)
assert isinstance(restored.nested.outer.inner, Inner)
assert [i.x for i in restored.items] == [1, 2]
def test_no_double_tagging_on_repeated_calls():
"""Ensure multiple model_dump calls don't stack tags."""
inner = Inner(x=11)
dumped1 = inner.model_dump()
dumped2 = inner.model_dump()
assert dumped1 == dumped2 == {"Inner": {"x": 11}}
outer = Outer(inner=inner)
d1 = outer.model_dump()
d2 = outer.model_dump()
assert d1 == d2 == {"Outer": {"inner": {"Inner": {"x": 11}}}}
class L3A(TaggedModel):
x: int
class L3B(TaggedModel):
x: int
class L3C(TaggedModel):
x: int
L3 = L3A | L3B | L3C
class L2A(TaggedModel):
child: L3
class L2B(TaggedModel):
child: L3
class L2C(TaggedModel):
child: L3
L2 = L2A | L2B | L2C
class L1A(TaggedModel):
child: L2
class L1B(TaggedModel):
child: L2
class L1C(TaggedModel):
child: L2
L1 = L1A | L1B | L1C
@pytest.mark.anyio
async def test_tagged_union_is_fast():
# payload along the "C" path (worst case for DFS if branches are tried A->B->C)
payload = {"L1C": {"child": {"L2C": {"child": {"L3C": {"x": 123}}}}}}
with anyio.fail_after(0.1):
out = TypeAdapter(L1).validate_python(payload) # type: ignore
# Sanity check the result
assert out.__class__.__name__ == "L1C" # type: ignore
assert out.child.__class__.__name__ == "L2C" # type: ignore
assert out.child.child.__class__.__name__ == "L3C" # type: ignore
assert out.child.child.x == 123 # type: ignore
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/utils/tests/test_tagged.py",
"license": "Apache License 2.0",
"lines": 167,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
exo-explore/exo:src/exo/worker/engines/mlx/auto_parallel.py | import os
import threading
from abc import ABC, abstractmethod
from collections.abc import Callable
from functools import partial
from inspect import signature
from typing import TYPE_CHECKING, Any, Protocol, cast
import mlx.core as mx
import mlx.nn as nn
from mlx.nn.layers.distributed import (
shard_inplace,
shard_linear,
sum_gradients,
)
from mlx_lm.models.base import (
scaled_dot_product_attention, # pyright: ignore[reportUnknownVariableType]
)
from mlx_lm.models.deepseek_v3 import DeepseekV3MLP
from mlx_lm.models.deepseek_v3 import Model as DeepseekV3Model
from mlx_lm.models.deepseek_v32 import DeepseekV32MLP
from mlx_lm.models.deepseek_v32 import Model as DeepseekV32Model
from mlx_lm.models.glm4_moe import Model as Glm4MoeModel
from mlx_lm.models.glm4_moe import MoE
from mlx_lm.models.glm4_moe_lite import Glm4MoeLiteDecoderLayer, Glm4MoeLiteMLP
from mlx_lm.models.glm4_moe_lite import Model as GLM4MoeLiteModel
from mlx_lm.models.gpt_oss import GptOssMoeModel
from mlx_lm.models.gpt_oss import Model as GptOssModel
from mlx_lm.models.kimi_k25 import Model as KimiK25Model
from mlx_lm.models.llama import Model as LlamaModel
from mlx_lm.models.minimax import MiniMaxAttention
from mlx_lm.models.minimax import Model as MiniMaxModel
from mlx_lm.models.ministral3 import Model as Ministral3Model
from mlx_lm.models.qwen3_moe import Model as Qwen3MoeModel
from mlx_lm.models.qwen3_moe import Qwen3MoeDecoderLayer, Qwen3MoeSparseMoeBlock
from mlx_lm.models.qwen3_next import Model as Qwen3NextModel
from mlx_lm.models.qwen3_next import Qwen3NextDecoderLayer, Qwen3NextSparseMoeBlock
from mlx_lm.models.step3p5 import Model as Step35Model
from mlx_lm.models.step3p5 import Step3p5MLP as Step35MLP
from mlx_lm.models.step3p5 import Step3p5Model as Step35InnerModel
from exo.shared.logging import logger
from exo.shared.types.worker.shards import PipelineShardMetadata
if TYPE_CHECKING:
from mlx_lm.models.cache import Cache
TimeoutCallback = Callable[[], None]
LayerLoadedCallback = Callable[[int, int], None] # (layers_loaded, total_layers)
_pending_prefill_sends: list[tuple[mx.array, int, mx.distributed.Group]] = []
def flush_prefill_sends() -> None:
for output, dst, group in _pending_prefill_sends:
sent = mx.distributed.send(output, dst, group=group)
mx.async_eval(sent)
_pending_prefill_sends.clear()
def clear_prefill_sends() -> None:
# Discard pending sends (e.g. on cancellation).
_pending_prefill_sends.clear()
def eval_with_timeout(
mlx_item: Any, # pyright: ignore[reportAny]
timeout_seconds: float = 60.0,
on_timeout: TimeoutCallback | None = None,
) -> None:
"""Evaluate MLX item with a hard timeout.
If on_timeout callback is provided, it will be called before terminating
the process. This allows the runner to send a failure event before exit.
"""
completed = threading.Event()
def watchdog() -> None:
if not completed.wait(timeout=timeout_seconds):
logger.error(
f"mlx_item evaluation timed out after {timeout_seconds:.0f}s. "
"This may indicate an issue with FAST_SYNCH and tensor parallel sharding. "
"Terminating process."
)
if on_timeout is not None:
on_timeout()
os._exit(1)
watchdog_thread = threading.Thread(target=watchdog, daemon=True)
watchdog_thread.start()
try:
mx.eval(mlx_item) # pyright: ignore[reportAny]
finally:
completed.set()
class _LayerCallable(Protocol):
"""Structural type that any compatible layer must satisfy.
We require a single positional input of type ``mx.array`` and an
``mx.array`` output, while permitting arbitrary *args / **kwargs so this
protocol matches the vast majority of `mlx.nn.Module` subclasses.
"""
def __call__(self, x: mx.array, *args: object, **kwargs: object) -> mx.array: ...
class CustomMlxLayer(nn.Module):
"""Base class for replacing an MLX layer with a custom implementation."""
def __init__(self, original_layer: _LayerCallable):
super().__init__()
dict.__setitem__(self, "_original_layer", original_layer) # pyright: ignore[reportUnknownMemberType]
@property
def original_layer(self) -> _LayerCallable:
return cast(_LayerCallable, self["_original_layer"])
# Calls __getattr__ for any attributes not found on nn.Module (e.g. use_sliding)
if not TYPE_CHECKING:
def __getattr__(self, name):
try:
return super().__getattr__(name)
except AttributeError:
original_layer = cast(_LayerCallable, self["_original_layer"])
return getattr(original_layer, name)
class PipelineFirstLayer(CustomMlxLayer):
def __init__(
self,
original_layer: _LayerCallable,
r: int,
group: mx.distributed.Group,
):
super().__init__(original_layer)
self.r: int = r
self.group = group
self.is_prefill: bool = False
def __call__(self, x: mx.array, *args: object, **kwargs: object) -> mx.array:
if self.r != 0:
# We want to avoid GPU timeout errors by evalling the distributed operation
# so that it stays on CPU, which does not have a timeout.
mx.eval(x)
x = mx.distributed.recv_like(x, (self.r - 1), group=self.group)
mx.eval(x)
return self.original_layer(x, *args, **kwargs)
class PipelineLastLayer(CustomMlxLayer):
def __init__(
self,
original_layer: _LayerCallable,
r: int,
s: int,
group: mx.distributed.Group,
):
super().__init__(original_layer)
self.r: int = r
self.s: int = s
self.group = group
self.original_layer_signature = signature(self.original_layer.__call__)
self.is_prefill: bool = False
self.queue_sends: bool = False
def __call__(self, x: mx.array, *args: object, **kwargs: object) -> mx.array:
cache = self.original_layer_signature.bind_partial(
x, *args, **kwargs
).arguments.get("cache", None)
output: mx.array = self.original_layer(x, *args, **kwargs)
# Eval layer output to materialize it before send — this splits the graph
# so the send is isolated and the receiving rank's recv can complete.
mx.eval(output)
if self.r != self.s - 1:
if self.queue_sends:
_pending_prefill_sends.append(
(output, (self.r + 1) % self.s, self.group)
)
else:
output = mx.distributed.send(
output, (self.r + 1) % self.s, group=self.group
)
if cache is not None:
# CacheList (used by MLA models like DeepSeekV32, GLM MoE DSA)
# doesn't have .keys directly; access via first sub-cache.
_cache = cache[0] if hasattr(cache, "caches") else cache # type: ignore
_cache.keys = mx.depends(_cache.keys, output) # type: ignore
mx.eval(output)
if cache is not None:
mx.eval(_cache.keys) # type: ignore
if not self.is_prefill:
output = mx.distributed.all_gather(output, group=self.group)[
-output.shape[0] :
]
mx.eval(output)
return output
def set_pipeline_prefill(model: nn.Module, is_prefill: bool) -> None:
for layer in model.layers: # type: ignore
if isinstance(layer, (PipelineFirstLayer, PipelineLastLayer)):
layer.is_prefill = is_prefill
def set_pipeline_queue_sends(model: nn.Module, queue_sends: bool) -> None:
for layer in model.layers: # type: ignore
if isinstance(layer, PipelineLastLayer):
layer.queue_sends = queue_sends
def get_inner_model(model: nn.Module) -> nn.Module:
inner = getattr(model, "model", None)
if isinstance(inner, nn.Module):
return inner
inner = getattr(model, "transformer", None)
if isinstance(inner, nn.Module):
return inner
inner = getattr(model, "language_model", None)
if isinstance(inner, nn.Module):
inner_inner = getattr(inner, "model", None)
if isinstance(inner_inner, nn.Module):
return inner_inner
raise ValueError("Model must either have a 'model' or 'transformer' attribute")
def get_layers(inner_model_instance: nn.Module) -> list[_LayerCallable]:
# Handle both model.layers and model.h cases
layers: list[_LayerCallable]
if hasattr(inner_model_instance, "layers"):
layers = cast(list[_LayerCallable], inner_model_instance.layers)
elif hasattr(inner_model_instance, "h"):
layers = cast(list[_LayerCallable], inner_model_instance.h)
else:
raise ValueError("Model must have either a 'layers' or 'h' attribute")
return layers
def pipeline_auto_parallel(
model: nn.Module,
group: mx.distributed.Group,
model_shard_meta: PipelineShardMetadata,
on_layer_loaded: LayerLoadedCallback | None,
) -> nn.Module:
"""
Automatically parallelize a model across multiple devices.
Args:
model: The model to parallelize (must have a 'layers' or 'h' property)
model_shard_meta: The metadata for the model shard
Returns:
The parallelized model
"""
inner_model_instance: nn.Module = get_inner_model(model)
layers = get_layers(inner_model_instance)
start_layer, end_layer = model_shard_meta.start_layer, model_shard_meta.end_layer
device_rank, world_size = model_shard_meta.device_rank, model_shard_meta.world_size
layers = layers[start_layer:end_layer]
total = len(layers)
for i, layer in enumerate(layers):
mx.eval(layer) # type: ignore
if on_layer_loaded is not None:
on_layer_loaded(i, total)
layers[0] = PipelineFirstLayer(layers[0], device_rank, group=group)
layers[-1] = PipelineLastLayer(
layers[-1],
device_rank,
world_size,
group=group,
)
if isinstance(inner_model_instance, GptOssMoeModel):
inner_model_instance.layer_types = inner_model_instance.layer_types[ # type: ignore
start_layer:end_layer
]
# We can assume the model has at least one layer thanks to placement.
# If a layer type doesn't exist, we can set it to 0.
inner_model_instance.swa_idx = (
0
if "sliding_attention" not in inner_model_instance.layer_types # type: ignore
else inner_model_instance.layer_types.index( # type: ignore
"sliding_attention"
)
)
inner_model_instance.ga_idx = (
0
if "full_attention" not in inner_model_instance.layer_types # type: ignore
else inner_model_instance.layer_types.index( # type: ignore
"full_attention"
)
)
if isinstance(inner_model_instance, Step35InnerModel):
inner_model_instance.num_layers = len(layers)
sliding_layers = [
i for i, layer in enumerate(layers) if getattr(layer, "is_sliding", False)
]
full_layers = [
i
for i, layer in enumerate(layers)
if not getattr(layer, "is_sliding", True)
]
inner_model_instance._swa_idx = 0 if not sliding_layers else sliding_layers[0]
inner_model_instance._full_idx = 0 if not full_layers else full_layers[0]
_set_layers(model, layers)
assert isinstance(layers, list), (
"Expected a list of layers after auto-parallel initialisation"
)
return patch_pipeline_model(model, group)
def patch_pipeline_model[T](model: T, group: mx.distributed.Group) -> T:
# Patch __call__ on the model's class
cls = model.__class__
original_call = cls.__call__ # type :ignore
call_signature = signature(original_call) # type :ignore
def patched_call(
self: T,
*args: object,
**kwargs: object,
) -> mx.array:
logits: mx.array = original_call(self, *args, **kwargs) # type: ignore
cache = call_signature.bind_partial(self, *args, **kwargs).arguments.get(
"cache", None
)
# Add dependency to last cache entry to ensure distributed ops are evaluated
if cache is not None:
last = cache[-1] # type: ignore
dep_cache = last[0] if hasattr(last, "caches") else last # type: ignore
dep_cache.keys = mx.depends(dep_cache.keys, logits) # type: ignore
return logits
cls.__call__ = patched_call
return model
def patch_tensor_model[T](model: T) -> T:
"""Patch model's __call__ to ensure distributed ops sync during inference."""
cls = model.__class__
original_call = cls.__call__
call_signature = signature(original_call)
def patched_call(
self: T,
*args: object,
**kwargs: object,
) -> mx.array:
logits: mx.array = original_call(self, *args, **kwargs) # pyright: ignore[reportAny]
cache = call_signature.bind_partial(self, *args, **kwargs).arguments.get(
"cache", None
)
# Add dependency to last cache entry to ensure distributed ops are evaluated
if cache is not None and len(cache) > 0: # pyright: ignore[reportAny]
last = cache[-1] # pyright: ignore[reportAny]
dep_cache = last[0] if hasattr(last, "caches") else last # pyright: ignore[reportAny]
dep_cache.keys = mx.depends(dep_cache.keys, logits) # pyright: ignore[reportAny,reportUnknownMemberType]
return logits
cls.__call__ = patched_call
return model
def tensor_auto_parallel(
model: nn.Module,
group: mx.distributed.Group,
timeout_seconds: float,
on_timeout: TimeoutCallback | None,
on_layer_loaded: LayerLoadedCallback | None,
) -> nn.Module:
all_to_sharded_linear = partial(
shard_linear,
sharding="all-to-sharded",
group=group,
)
sharded_to_all_linear = partial(
shard_linear,
sharding="sharded-to-all",
group=group,
)
segments: int = 1
def _all_to_sharded(path: str, weight: mx.array):
if path.endswith("bias"):
logger.info(f"Sharding bias for {path} - all to sharded")
return weight.ndim - 1, segments
return max(weight.ndim - 2, 0), segments
all_to_sharded_linear_in_place = partial(
shard_inplace,
sharding=_all_to_sharded, # type: ignore
group=group,
)
n = group.size()
def _sharded_to_all(path: str, weight: mx.array):
if path.endswith("bias"):
logger.info(f"Sharding bias for {path} - sharded to all")
weight /= n
return None
return -1, segments
sharded_to_all_linear_in_place = partial(
shard_inplace,
sharding=_sharded_to_all, # type: ignore
group=group,
)
if isinstance(model, (LlamaModel, Ministral3Model)):
tensor_parallel_sharding_strategy = LlamaShardingStrategy(
group,
all_to_sharded_linear,
sharded_to_all_linear,
all_to_sharded_linear_in_place,
sharded_to_all_linear_in_place,
)
elif isinstance(model, (DeepseekV3Model, DeepseekV32Model, KimiK25Model)):
tensor_parallel_sharding_strategy = DeepSeekShardingStrategy(
group,
all_to_sharded_linear,
sharded_to_all_linear,
all_to_sharded_linear_in_place,
sharded_to_all_linear_in_place,
)
elif isinstance(model, MiniMaxModel):
tensor_parallel_sharding_strategy = MiniMaxShardingStrategy(
group,
all_to_sharded_linear,
sharded_to_all_linear,
all_to_sharded_linear_in_place,
sharded_to_all_linear_in_place,
)
elif isinstance(model, GLM4MoeLiteModel):
tensor_parallel_sharding_strategy = GLM4MoeLiteShardingStrategy(
group,
all_to_sharded_linear,
sharded_to_all_linear,
all_to_sharded_linear_in_place,
sharded_to_all_linear_in_place,
)
elif isinstance(model, Glm4MoeModel):
tensor_parallel_sharding_strategy = Glm4MoeShardingStrategy(
group,
all_to_sharded_linear,
sharded_to_all_linear,
all_to_sharded_linear_in_place,
sharded_to_all_linear_in_place,
)
elif isinstance(model, (Qwen3MoeModel, Qwen3NextModel)):
tensor_parallel_sharding_strategy = QwenShardingStrategy(
group,
all_to_sharded_linear,
sharded_to_all_linear,
all_to_sharded_linear_in_place,
sharded_to_all_linear_in_place,
)
elif isinstance(model, GptOssModel):
tensor_parallel_sharding_strategy = GptOssShardingStrategy(
group,
all_to_sharded_linear,
sharded_to_all_linear,
all_to_sharded_linear_in_place,
sharded_to_all_linear_in_place,
)
elif isinstance(model, Step35Model):
tensor_parallel_sharding_strategy = Step35ShardingStrategy(
group,
all_to_sharded_linear,
sharded_to_all_linear,
all_to_sharded_linear_in_place,
sharded_to_all_linear_in_place,
)
else:
raise ValueError(f"Unsupported model type: {type(model)}")
model = tensor_parallel_sharding_strategy.shard_model(
model, timeout_seconds, on_timeout, on_layer_loaded
)
return patch_tensor_model(model)
class TensorParallelShardingStrategy(ABC):
def __init__(
self,
group: mx.distributed.Group,
all_to_sharded_linear: Callable[..., nn.Linear],
sharded_to_all_linear: Callable[..., nn.Linear],
all_to_sharded_linear_in_place: Callable[..., None],
sharded_to_all_linear_in_place: Callable[..., None],
):
self.all_to_sharded_linear = all_to_sharded_linear
self.sharded_to_all_linear = sharded_to_all_linear
self.all_to_sharded_linear_in_place = all_to_sharded_linear_in_place
self.sharded_to_all_linear_in_place = sharded_to_all_linear_in_place
self.group = group
self.N = group.size()
@abstractmethod
def shard_model(
self,
model: nn.Module,
timeout_seconds: float,
on_timeout: TimeoutCallback | None,
on_layer_loaded: LayerLoadedCallback | None,
) -> nn.Module: ...
class LlamaShardingStrategy(TensorParallelShardingStrategy):
def shard_model(
self,
model: nn.Module,
timeout_seconds: float,
on_timeout: TimeoutCallback | None,
on_layer_loaded: LayerLoadedCallback | None,
) -> nn.Module:
model = cast(LlamaModel, model)
total = len(model.layers)
for i, layer in enumerate(model.layers):
# Force load weights before sharding to avoid FAST_SYNCH deadlock
eval_with_timeout(layer.parameters(), timeout_seconds / total, on_timeout)
layer.self_attn.q_proj = self.all_to_sharded_linear(layer.self_attn.q_proj)
layer.self_attn.k_proj = self.all_to_sharded_linear(layer.self_attn.k_proj)
layer.self_attn.v_proj = self.all_to_sharded_linear(layer.self_attn.v_proj)
layer.self_attn.o_proj = self.sharded_to_all_linear(layer.self_attn.o_proj)
layer.self_attn.n_heads //= self.N
if layer.self_attn.n_kv_heads is not None:
layer.self_attn.n_kv_heads //= self.N
layer.mlp.gate_proj = self.all_to_sharded_linear(layer.mlp.gate_proj)
layer.mlp.down_proj = self.sharded_to_all_linear(layer.mlp.down_proj)
layer.mlp.up_proj = self.all_to_sharded_linear(layer.mlp.up_proj)
mx.eval(layer)
if on_layer_loaded is not None:
on_layer_loaded(i, total)
return model
def _set_layers(model: nn.Module, layers: list[_LayerCallable]) -> None:
inner_model_instance = get_inner_model(model)
if hasattr(inner_model_instance, "layers"):
inner_model_instance.layers = layers
# Update DeepSeek V3 specific parameters when layers are shrunk
if isinstance(
model, (DeepseekV3Model, DeepseekV32Model, Glm4MoeModel, KimiK25Model)
) and hasattr(inner_model_instance, "num_layers"):
logger.info(
f"Setting num_layers to {len(layers)} for model {model.model.__class__.__name__}"
)
inner_model_instance.start_idx = 0
inner_model_instance.end_idx = len(layers)
inner_model_instance.num_layers = len(layers)
elif isinstance(model, Qwen3MoeModel):
logger.info(
f"Setting num_hidden_layers to {len(layers)} for model {model.model.__class__.__name__}"
)
inner_model_instance.num_hidden_layers = len(layers)
elif hasattr(inner_model_instance, "h"):
inner_model_instance.h = layers
else:
raise ValueError("Model must have either a 'layers' or 'h' attribute")
class DeepSeekShardingStrategy(TensorParallelShardingStrategy):
def shard_model(
self,
model: nn.Module,
timeout_seconds: float,
on_timeout: TimeoutCallback | None,
on_layer_loaded: LayerLoadedCallback | None,
) -> nn.Module:
model = cast(DeepseekV3Model, model)
total = len(model.layers)
for i, layer in enumerate(model.layers):
eval_with_timeout(layer.parameters(), timeout_seconds / total, on_timeout)
# Shard the self attention
if layer.self_attn.q_lora_rank is None:
layer.self_attn.q_proj = self.all_to_sharded_linear(
layer.self_attn.q_proj
)
else:
layer.self_attn.q_b_proj = self.all_to_sharded_linear(
layer.self_attn.q_b_proj
)
layer.self_attn.o_proj = self.sharded_to_all_linear(layer.self_attn.o_proj)
layer.self_attn.num_heads //= self.N
# Logic from upstream mlx
num_heads = layer.self_attn.num_heads
sh = self.group.rank() * num_heads
eh = sh + num_heads
def shard_heads(w: mx.array, sh: int = sh, eh: int = eh) -> mx.array:
return w[sh:eh]
layer.self_attn.embed_q.apply(shard_heads)
layer.self_attn.unembed_out.apply(shard_heads)
# Shard the MLP
if isinstance(layer.mlp, (DeepseekV3MLP, DeepseekV32MLP)):
layer.mlp.gate_proj = self.all_to_sharded_linear(layer.mlp.gate_proj)
layer.mlp.down_proj = self.sharded_to_all_linear(layer.mlp.down_proj)
layer.mlp.up_proj = self.all_to_sharded_linear(layer.mlp.up_proj)
# Shard the MoE.
else:
if getattr(layer.mlp, "shared_experts", None) is not None:
self.all_to_sharded_linear_in_place(
layer.mlp.shared_experts.gate_proj
)
self.sharded_to_all_linear_in_place(
layer.mlp.shared_experts.down_proj
)
self.all_to_sharded_linear_in_place(
layer.mlp.shared_experts.up_proj
)
self.all_to_sharded_linear_in_place(layer.mlp.switch_mlp.gate_proj)
self.sharded_to_all_linear_in_place(layer.mlp.switch_mlp.down_proj)
self.all_to_sharded_linear_in_place(layer.mlp.switch_mlp.up_proj)
layer.mlp = ShardedMoE(layer.mlp) # type: ignore
layer.mlp.sharding_group = self.group
mx.eval(layer)
if on_layer_loaded is not None:
on_layer_loaded(i, total)
return model
class ShardedMoE(CustomMlxLayer):
"""Wraps any MoE layer with distributed sum_gradients / all_sum."""
def __init__(self, layer: _LayerCallable):
super().__init__(layer)
self.sharding_group: mx.distributed.Group | None = None
def __call__(self, x: mx.array) -> mx.array:
if self.sharding_group is not None:
x = sum_gradients(self.sharding_group)(x)
y = self.original_layer.__call__(x)
if self.sharding_group is not None:
y = mx.distributed.all_sum(y, group=self.sharding_group)
return y
class GLM4MoeLiteShardingStrategy(TensorParallelShardingStrategy):
def shard_model(
self,
model: nn.Module,
timeout_seconds: float,
on_timeout: TimeoutCallback | None,
on_layer_loaded: LayerLoadedCallback | None,
) -> nn.Module:
model = cast(GLM4MoeLiteModel, model)
total = len(model.layers) # type: ignore
for i, layer in enumerate(model.layers): # type: ignore
layer = cast(Glm4MoeLiteDecoderLayer, layer)
eval_with_timeout(
layer.parameters(),
timeout_seconds / total,
on_timeout,
)
if layer.self_attn.q_lora_rank is None: # type: ignore
layer.self_attn.q_proj = self.all_to_sharded_linear(
layer.self_attn.q_proj
)
else:
layer.self_attn.q_b_proj = self.all_to_sharded_linear(
layer.self_attn.q_b_proj
)
layer.self_attn.o_proj = self.sharded_to_all_linear(layer.self_attn.o_proj)
layer.self_attn.num_heads //= self.N
# Logic from upstream mlx
num_heads = layer.self_attn.num_heads
sh = self.group.rank() * num_heads
eh = sh + num_heads
def shard_heads(w: mx.array, sh: int = sh, eh: int = eh) -> mx.array:
return w[sh:eh]
layer.self_attn.embed_q.apply(shard_heads)
layer.self_attn.unembed_out.apply(shard_heads)
if isinstance(layer.mlp, Glm4MoeLiteMLP):
layer.mlp.gate_proj = self.all_to_sharded_linear(layer.mlp.gate_proj)
layer.mlp.down_proj = self.sharded_to_all_linear(layer.mlp.down_proj)
layer.mlp.up_proj = self.all_to_sharded_linear(layer.mlp.up_proj)
else:
if getattr(layer.mlp, "shared_experts", None) is not None:
self.all_to_sharded_linear_in_place(
layer.mlp.shared_experts.gate_proj
)
self.sharded_to_all_linear_in_place(
layer.mlp.shared_experts.down_proj
)
self.all_to_sharded_linear_in_place(
layer.mlp.shared_experts.up_proj
)
self.all_to_sharded_linear_in_place(layer.mlp.switch_mlp.gate_proj)
self.sharded_to_all_linear_in_place(layer.mlp.switch_mlp.down_proj)
self.all_to_sharded_linear_in_place(layer.mlp.switch_mlp.up_proj)
layer.mlp = ShardedMoE(layer.mlp) # type: ignore
layer.mlp.sharding_group = self.group # type: ignore
mx.eval(layer)
if on_layer_loaded is not None:
on_layer_loaded(i, total)
return model
class WrappedMiniMaxAttention(CustomMlxLayer):
def __init__(self, layer: _LayerCallable, group: mx.distributed.Group):
super().__init__(layer)
self.group = group
def __call__(
self,
x: mx.array,
mask: mx.array | None = None,
cache: "Cache | None" = None,
) -> mx.array:
batch_dim, seq_dim, _ = x.shape
self._original_layer = cast(MiniMaxAttention, self.original_layer) # type: ignore
queries: mx.array = self._original_layer.q_proj(x)
keys: mx.array = self._original_layer.k_proj(x)
values: mx.array = self._original_layer.v_proj(x)
if getattr(self, "use_qk_norm", False):
q_dim = queries.shape[-1]
k_dim = keys.shape[-1]
n = self.group.size()
qk = mx.concatenate(
[queries, keys], axis=-1
) # (batch_dim, seq_dim, q_dim + k_dim)
qk = mx.distributed.all_gather(
qk, group=self.group
) # (n*batch_dim, seq_dim, q_dim + k_dim)
qk = qk.reshape(n, batch_dim, seq_dim, q_dim + k_dim).transpose(1, 2, 0, 3)
queries = qk[..., :q_dim].reshape(
batch_dim, seq_dim, -1
) # (batch_dim, seq_dim, n * q_dim)
keys = qk[..., q_dim:].reshape(
batch_dim, seq_dim, -1
) # (batch_dim, seq_dim, n * k_dim)
queries = self._original_layer.q_norm(queries)
keys = self._original_layer.k_norm(keys)
# Split back and take this rank's portion
queries = mx.split(queries, n, axis=-1)[self.group.rank()]
keys = mx.split(keys, n, axis=-1)[self.group.rank()]
queries = queries.reshape(
batch_dim, seq_dim, self._original_layer.num_attention_heads, -1
).transpose(0, 2, 1, 3)
keys = keys.reshape(
batch_dim, seq_dim, self._original_layer.num_key_value_heads, -1
).transpose(0, 2, 1, 3)
values = values.reshape(
batch_dim, seq_dim, self._original_layer.num_key_value_heads, -1
).transpose(0, 2, 1, 3)
if cache is not None:
queries = self._original_layer.rope(queries, offset=cache.offset)
keys = self._original_layer.rope(keys, offset=cache.offset)
keys, values = cache.update_and_fetch(keys, values)
else:
queries = self._original_layer.rope(queries)
keys = self._original_layer.rope(keys)
output = scaled_dot_product_attention(
queries,
keys,
values,
cache=cache,
scale=self._original_layer.scale, # type: ignore
mask=mask,
)
output = output.transpose(0, 2, 1, 3).reshape(batch_dim, seq_dim, -1)
return self._original_layer.o_proj(output)
class MiniMaxShardingStrategy(TensorParallelShardingStrategy):
def shard_model(
self,
model: nn.Module,
timeout_seconds: float,
on_timeout: TimeoutCallback | None,
on_layer_loaded: LayerLoadedCallback | None,
) -> nn.Module:
model = cast(MiniMaxModel, model)
total = len(model.layers)
for i, layer in enumerate(model.layers):
eval_with_timeout(layer.parameters(), timeout_seconds / total, on_timeout)
# Shard the self attention
layer.self_attn.q_proj = self.all_to_sharded_linear(layer.self_attn.q_proj)
layer.self_attn.k_proj = self.all_to_sharded_linear(layer.self_attn.k_proj)
layer.self_attn.v_proj = self.all_to_sharded_linear(layer.self_attn.v_proj)
layer.self_attn.o_proj = self.sharded_to_all_linear(layer.self_attn.o_proj)
layer.self_attn.num_attention_heads //= self.N
layer.self_attn.num_key_value_heads //= self.N
layer.self_attn = WrappedMiniMaxAttention(layer.self_attn, self.group) # pyright: ignore[reportAttributeAccessIssue,reportArgumentType]
# Shard the MoE.
self.all_to_sharded_linear_in_place(
layer.block_sparse_moe.switch_mlp.gate_proj
)
self.sharded_to_all_linear_in_place(
layer.block_sparse_moe.switch_mlp.down_proj
)
self.all_to_sharded_linear_in_place(
layer.block_sparse_moe.switch_mlp.up_proj
)
layer.block_sparse_moe = ShardedMoE(layer.block_sparse_moe) # pyright: ignore[reportAttributeAccessIssue, reportArgumentType]
layer.block_sparse_moe.sharding_group = self.group # pyright: ignore[reportAttributeAccessIssue]
mx.eval(layer)
if on_layer_loaded is not None:
on_layer_loaded(i, total)
return model
class QwenShardingStrategy(TensorParallelShardingStrategy):
def shard_model(
self,
model: nn.Module,
timeout_seconds: float,
on_timeout: TimeoutCallback | None,
on_layer_loaded: LayerLoadedCallback | None,
) -> nn.Module:
model = cast(Qwen3MoeModel | Qwen3NextModel, model)
total = len(model.layers)
for i, layer in enumerate(model.layers):
eval_with_timeout(layer.parameters(), timeout_seconds / total, on_timeout)
# Shard the self attention
if isinstance(layer, Qwen3MoeDecoderLayer):
layer.self_attn.q_proj = self.all_to_sharded_linear(
layer.self_attn.q_proj
)
layer.self_attn.k_proj = self.all_to_sharded_linear(
layer.self_attn.k_proj
)
layer.self_attn.v_proj = self.all_to_sharded_linear(
layer.self_attn.v_proj
)
layer.self_attn.o_proj = self.sharded_to_all_linear(
layer.self_attn.o_proj
)
layer.self_attn.n_heads //= self.N
layer.self_attn.n_kv_heads //= self.N
else:
assert isinstance(layer, Qwen3NextDecoderLayer)
if hasattr(layer, "linear_attn"):
linear_attn = layer.linear_attn
linear_attn.in_proj_qkvz = self.all_to_sharded_linear(
linear_attn.in_proj_qkvz
)
linear_attn.in_proj_ba = self.all_to_sharded_linear(
linear_attn.in_proj_ba
)
linear_attn.out_proj = self.sharded_to_all_linear(
linear_attn.out_proj
)
# Shard conv1d: depthwise conv with non-contiguous channel slicing.
# Channel layout is [q(key_dim), k(key_dim), v(value_dim)].
# Each rank takes its head-slice from each of the three sections.
rank = self.group.rank()
key_dim = linear_attn.key_dim
value_dim = linear_attn.value_dim
key_dim_shard = key_dim // self.N
value_dim_shard = value_dim // self.N
q_idx = mx.arange(rank * key_dim_shard, (rank + 1) * key_dim_shard)
k_idx = mx.arange(
key_dim + rank * key_dim_shard,
key_dim + (rank + 1) * key_dim_shard,
)
v_idx = mx.arange(
2 * key_dim + rank * value_dim_shard,
2 * key_dim + (rank + 1) * value_dim_shard,
)
conv_indices = mx.concatenate([q_idx, k_idx, v_idx])
linear_attn.conv1d.weight = linear_attn.conv1d.weight[conv_indices]
new_conv_dim = key_dim_shard * 2 + value_dim_shard
linear_attn.conv1d.groups = new_conv_dim
num_v_shard = linear_attn.num_v_heads // self.N
v_start = rank * num_v_shard
v_end = v_start + num_v_shard
linear_attn.A_log = linear_attn.A_log[v_start:v_end]
linear_attn.dt_bias = linear_attn.dt_bias[v_start:v_end]
linear_attn.num_k_heads //= self.N
linear_attn.num_v_heads //= self.N
linear_attn.key_dim = (
linear_attn.head_k_dim * linear_attn.num_k_heads
)
linear_attn.value_dim = (
linear_attn.head_v_dim * linear_attn.num_v_heads
)
linear_attn.conv_dim = (
linear_attn.key_dim * 2 + linear_attn.value_dim
)
else:
layer.self_attn.q_proj = self.all_to_sharded_linear(
layer.self_attn.q_proj
)
layer.self_attn.k_proj = self.all_to_sharded_linear(
layer.self_attn.k_proj
)
layer.self_attn.v_proj = self.all_to_sharded_linear(
layer.self_attn.v_proj
)
layer.self_attn.o_proj = self.sharded_to_all_linear(
layer.self_attn.o_proj
)
layer.self_attn.num_attention_heads //= self.N
layer.self_attn.num_key_value_heads //= self.N
# Shard the MoE.
if isinstance(layer.mlp, (Qwen3MoeSparseMoeBlock, Qwen3NextSparseMoeBlock)):
self.all_to_sharded_linear_in_place(layer.mlp.switch_mlp.gate_proj)
self.sharded_to_all_linear_in_place(layer.mlp.switch_mlp.down_proj)
self.all_to_sharded_linear_in_place(layer.mlp.switch_mlp.up_proj)
if isinstance(layer.mlp, Qwen3NextSparseMoeBlock):
self.all_to_sharded_linear_in_place(
layer.mlp.shared_expert.gate_proj
)
self.sharded_to_all_linear_in_place(
layer.mlp.shared_expert.down_proj
)
self.all_to_sharded_linear_in_place(layer.mlp.shared_expert.up_proj)
layer.mlp = ShardedMoE(layer.mlp) # pyright: ignore[reportAttributeAccessIssue, reportArgumentType]
layer.mlp.sharding_group = self.group
# Shard the MLP
else:
layer.mlp.gate_proj = self.all_to_sharded_linear(layer.mlp.gate_proj)
layer.mlp.down_proj = self.sharded_to_all_linear(layer.mlp.down_proj)
layer.mlp.up_proj = self.all_to_sharded_linear(layer.mlp.up_proj)
mx.eval(layer)
if on_layer_loaded is not None:
on_layer_loaded(i, total)
return model
class Glm4MoeShardingStrategy(TensorParallelShardingStrategy):
def shard_model(
self,
model: nn.Module,
timeout_seconds: float,
on_timeout: TimeoutCallback | None,
on_layer_loaded: LayerLoadedCallback | None,
) -> nn.Module:
model = cast(Glm4MoeModel, model)
total = len(model.layers)
for i, layer in enumerate(model.layers):
eval_with_timeout(layer.parameters(), timeout_seconds / total, on_timeout)
layer.self_attn.q_proj = self.all_to_sharded_linear(layer.self_attn.q_proj)
layer.self_attn.k_proj = self.all_to_sharded_linear(layer.self_attn.k_proj)
layer.self_attn.v_proj = self.all_to_sharded_linear(layer.self_attn.v_proj)
layer.self_attn.o_proj = self.sharded_to_all_linear(layer.self_attn.o_proj)
layer.self_attn.n_heads //= self.N
layer.self_attn.n_kv_heads //= self.N
if isinstance(layer.mlp, MoE):
self.all_to_sharded_linear_in_place(layer.mlp.switch_mlp.gate_proj)
self.sharded_to_all_linear_in_place(layer.mlp.switch_mlp.down_proj)
self.all_to_sharded_linear_in_place(layer.mlp.switch_mlp.up_proj)
if getattr(layer.mlp, "shared_experts", None) is not None:
self.all_to_sharded_linear_in_place(
layer.mlp.shared_experts.gate_proj
)
self.sharded_to_all_linear_in_place(
layer.mlp.shared_experts.down_proj
)
self.all_to_sharded_linear_in_place(
layer.mlp.shared_experts.up_proj
)
layer.mlp = ShardedMoE(layer.mlp) # pyright: ignore[reportAttributeAccessIssue, reportArgumentType]
layer.mlp.sharding_group = self.group
else:
layer.mlp.gate_proj = self.all_to_sharded_linear(layer.mlp.gate_proj)
layer.mlp.down_proj = self.sharded_to_all_linear(layer.mlp.down_proj)
layer.mlp.up_proj = self.all_to_sharded_linear(layer.mlp.up_proj)
mx.eval(layer)
if on_layer_loaded is not None:
on_layer_loaded(i, total)
return model
class GptOssShardingStrategy(TensorParallelShardingStrategy):
def shard_model(
self,
model: nn.Module,
timeout_seconds: float,
on_timeout: TimeoutCallback | None,
on_layer_loaded: LayerLoadedCallback | None,
) -> nn.Module:
model = cast(GptOssMoeModel, model)
total = len(model.layers)
for i, layer in enumerate(model.layers):
eval_with_timeout(layer.parameters(), timeout_seconds / total, on_timeout)
layer.self_attn.q_proj = self.all_to_sharded_linear(layer.self_attn.q_proj)
layer.self_attn.k_proj = self.all_to_sharded_linear(layer.self_attn.k_proj)
layer.self_attn.v_proj = self.all_to_sharded_linear(layer.self_attn.v_proj)
layer.self_attn.o_proj = self.sharded_to_all_linear(layer.self_attn.o_proj)
layer.self_attn.num_attention_heads //= self.N
layer.self_attn.num_key_value_heads //= self.N
layer.self_attn.num_key_value_groups = (
layer.self_attn.num_attention_heads
// layer.self_attn.num_key_value_heads
)
layer.self_attn.sinks = layer.self_attn.sinks[
layer.self_attn.num_attention_heads
* self.group.rank() : layer.self_attn.num_attention_heads
* (self.group.rank() + 1)
]
self.all_to_sharded_linear_in_place(layer.mlp.experts.gate_proj)
self.sharded_to_all_linear_in_place(layer.mlp.experts.down_proj)
self.all_to_sharded_linear_in_place(layer.mlp.experts.up_proj)
layer.mlp = ShardedMoE(layer.mlp) # type: ignore
layer.mlp.sharding_group = self.group # pyright: ignore[reportAttributeAccessIssue]
mx.eval(layer)
if on_layer_loaded is not None:
on_layer_loaded(i, total)
return model
class Step35ShardingStrategy(TensorParallelShardingStrategy):
def shard_model(
self,
model: nn.Module,
timeout_seconds: float,
on_timeout: TimeoutCallback | None,
on_layer_loaded: LayerLoadedCallback | None,
) -> nn.Module:
model = cast(Step35Model, model)
total = len(model.layers)
for i, layer in enumerate(model.layers):
eval_with_timeout(layer.parameters(), timeout_seconds / total, on_timeout)
layer.self_attn.q_proj = self.all_to_sharded_linear(layer.self_attn.q_proj)
layer.self_attn.k_proj = self.all_to_sharded_linear(layer.self_attn.k_proj)
layer.self_attn.v_proj = self.all_to_sharded_linear(layer.self_attn.v_proj)
layer.self_attn.o_proj = self.sharded_to_all_linear(layer.self_attn.o_proj)
layer.self_attn.num_heads //= self.N
layer.self_attn.num_kv_heads //= self.N
if getattr(layer.self_attn, "use_head_wise_attn_gate", False):
layer.self_attn.g_proj = self.all_to_sharded_linear(
layer.self_attn.g_proj
)
if isinstance(layer.mlp, Step35MLP):
layer.mlp.gate_proj = self.all_to_sharded_linear(layer.mlp.gate_proj)
layer.mlp.up_proj = self.all_to_sharded_linear(layer.mlp.up_proj)
layer.mlp.down_proj = self.sharded_to_all_linear(layer.mlp.down_proj)
else:
layer.mlp.sharding_group = self.group
self.all_to_sharded_linear_in_place(layer.mlp.share_expert.gate_proj)
self.all_to_sharded_linear_in_place(layer.mlp.share_expert.up_proj)
self.sharded_to_all_linear_in_place(layer.mlp.share_expert.down_proj)
self.all_to_sharded_linear_in_place(layer.mlp.switch_mlp.gate_proj)
self.all_to_sharded_linear_in_place(layer.mlp.switch_mlp.up_proj)
self.sharded_to_all_linear_in_place(layer.mlp.switch_mlp.down_proj)
mx.eval(layer)
if on_layer_loaded is not None:
on_layer_loaded(i, total)
return model
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/worker/engines/mlx/auto_parallel.py",
"license": "Apache License 2.0",
"lines": 954,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
exo-explore/exo:src/exo/worker/engines/mlx/cache.py | import os
from copy import deepcopy
import mlx.core as mx
import psutil
from mlx_lm.models.cache import (
ArraysCache,
CacheList,
KVCache,
QuantizedKVCache,
RotatingKVCache,
)
from mlx_lm.tokenizer_utils import TokenizerWrapper
from exo.shared.types.memory import Memory
from exo.shared.types.mlx import KVCacheType, Model
from exo.worker.engines.mlx.constants import CACHE_GROUP_SIZE, KV_CACHE_BITS
from exo.worker.runner.bootstrap import logger
# Fraction of device memory above which LRU eviction kicks in.
# Smaller machines need more aggressive eviction.
def _default_memory_threshold() -> float:
total_gb = Memory.from_bytes(psutil.virtual_memory().total).in_gb
if total_gb >= 128:
return 0.85
if total_gb >= 64:
return 0.80
if total_gb >= 32:
return 0.75
return 0.70
_MEMORY_THRESHOLD = float(
os.environ.get("EXO_MEMORY_THRESHOLD", _default_memory_threshold())
)
class CacheSnapshot:
"""Snapshot of states at a known token position."""
def __init__(
self, states: list[RotatingKVCache | ArraysCache | None], token_count: int
):
self.states = states
self.token_count = token_count
def snapshot_ssm_states(cache: KVCacheType) -> CacheSnapshot:
states: list[ArraysCache | RotatingKVCache | None] = []
for c in cache:
if isinstance(c, (ArraysCache, RotatingKVCache)):
states.append(deepcopy(c))
else:
states.append(None)
token_count = cache_length(cache)
return CacheSnapshot(states=states, token_count=token_count)
def _find_nearest_snapshot(
snapshots: list[CacheSnapshot],
target_token_count: int,
) -> CacheSnapshot | None:
best: CacheSnapshot | None = None
for snap in snapshots:
if snap.token_count <= target_token_count and (
best is None or snap.token_count > best.token_count
):
best = snap
return best
def has_non_kv_caches(cache: KVCacheType) -> bool:
"""Check if a cache contains any ArraysCache (SSM) entries."""
return any(isinstance(c, (ArraysCache, RotatingKVCache)) for c in cache)
class KVPrefixCache:
def __init__(self, group: mx.distributed.Group | None):
self.prompts: list[mx.array] = [] # mx array of tokens (ints)
self.caches: list[KVCacheType] = []
self._snapshots: list[list[CacheSnapshot] | None] = []
self._last_used: list[int] = [] # monotonic counter of last access per entry
self._access_counter: int = 0
self._group = group
def clear(self):
"""Clear all cached prompts and caches."""
self.prompts.clear()
self.caches.clear()
self._snapshots.clear()
self._last_used.clear()
def add_kv_cache(
self,
prompt_tokens: mx.array,
cache: KVCacheType,
ssm_snapshots: list[CacheSnapshot] | None = None,
):
"""Add a new cache entry. Evicts LRU entries if memory is high."""
self._evict_if_needed()
self.prompts.append(prompt_tokens)
self.caches.append(deepcopy(cache))
self._snapshots.append(ssm_snapshots)
self._access_counter += 1
self._last_used.append(self._access_counter)
logger.info(f"KV cache added: {len(prompt_tokens)} tokens")
def update_kv_cache(
self,
index: int,
prompt_tokens: mx.array,
cache: KVCacheType,
snapshots: list[CacheSnapshot] | None,
restore_pos: int,
):
"""Update an existing cache entry in-place."""
old_snapshots = self._snapshots[index]
merged: list[CacheSnapshot] = []
if old_snapshots:
merged = [s for s in old_snapshots if s.token_count <= restore_pos]
if snapshots:
merged.extend(snapshots)
self.prompts[index] = prompt_tokens
self.caches[index] = deepcopy(cache)
self._snapshots[index] = merged or None
self._access_counter += 1
self._last_used[index] = self._access_counter
logger.info(f"KV cache updated (index {index}): {len(prompt_tokens)} tokens")
def _get_snapshot(
self, entry_index: int, target_token_count: int
) -> tuple[int, CacheSnapshot | None]:
if not has_non_kv_caches(self.caches[entry_index]):
return target_token_count, None
snapshots = self._snapshots[entry_index]
if not snapshots:
return 0, None
snap = _find_nearest_snapshot(snapshots, target_token_count)
if snap is not None:
return snap.token_count, snap
return 0, None
def get_kv_cache(
self,
model: Model,
prompt_tokens: mx.array,
) -> tuple[KVCacheType, mx.array, int | None]:
"""Get KV cache for prompt, returning remaining tokens to prefill.
Returns:
Tuple of (cache, remaining_tokens, matched_index) where:
- cache: KV cache to use for generation
- remaining_tokens: tokens that still need prefilling
- matched_index: index of the matched entry (None if no match)
For models with SSM layers (which are ArraysCache in mlx), the cache is trimmed to the
nearest SSM snapshot position at or before the match point for correctness.
Same for rotating KV Cache.
"""
max_length = len(prompt_tokens)
best_index: int | None = None
best_length = 0
is_exact = False
# Find best cache match
for i, cached_prompt in enumerate(self.prompts):
length = get_prefix_length(prompt_tokens, cached_prompt)
if length >= max_length - 1:
best_index, best_length = i, length
is_exact = True
break
if length > best_length:
best_index, best_length = i, length
if best_index is None:
return make_kv_cache(model), prompt_tokens, None
# For exact match: trim to max_length-1 so remaining has the last token
# For partial match: trim to best_length, remaining has suffix to prefill
# This ensures stream_generate always has at least one token to start with
has_ssm = has_non_kv_caches(self.caches[best_index])
target = (max_length - 1) if is_exact and not has_ssm else best_length
restore_pos, restore_snap = self._get_snapshot(best_index, target)
# No usable snapshot — need fresh cache
if restore_snap is None and has_ssm:
return make_kv_cache(model), prompt_tokens, None
prompt_cache = deepcopy(self.caches[best_index])
cached_length = cache_length(self.caches[best_index])
tokens_to_trim = cached_length - restore_pos
if tokens_to_trim > 0:
trim_cache(prompt_cache, tokens_to_trim, restore_snap)
# Reset cache offset to match trimmed length
for c in prompt_cache:
if hasattr(c, "offset"):
c.offset = restore_pos
self._access_counter += 1
self._last_used[best_index] = self._access_counter
remaining = prompt_tokens[restore_pos:]
return prompt_cache, remaining, best_index
def _evict_if_needed(self):
"""Evict least recently used entries while memory usage is high."""
if len(self.caches) == 0:
return
# Evict LRU entries until below threshold
while (
len(self.caches) > 0
and self.get_memory_used_percentage() > _MEMORY_THRESHOLD
):
lru_index = self._last_used.index(min(self._last_used))
evicted_tokens = len(self.prompts[lru_index])
self.prompts.pop(lru_index)
self.caches.pop(lru_index)
self._snapshots.pop(lru_index)
self._last_used.pop(lru_index)
logger.info(
f"KV cache evicted LRU entry ({evicted_tokens} tokens) due to memory usage"
)
def get_memory_used_percentage(self) -> float:
local_pressure: float = get_memory_used_percentage()
if self._group is None:
return local_pressure
all_pressure = mx.distributed.all_gather(
mx.array([local_pressure], dtype=mx.float32),
group=self._group,
)
# .item() evals.
max_pressure = float(mx.max(all_pressure).item())
return max_pressure
def trim_cache(
cache: KVCacheType,
num_tokens: int,
snapshot: CacheSnapshot | None = None,
) -> None:
for i, c in enumerate(cache):
if isinstance(c, (ArraysCache, RotatingKVCache)):
if snapshot is not None and snapshot.states[i] is not None:
cache[i] = deepcopy(snapshot.states[i]) # type: ignore
else:
c.state = [None] * len(c.state)
else:
c.trim(num_tokens)
def encode_prompt(tokenizer: TokenizerWrapper, prompt: str) -> mx.array:
"""Encode a prompt string to token array.
For chat-templated prompts (which have their own structure markers like
<|im_user|>, <|im_middle|>, etc.), we should NOT add BOS/EOS tokens as
that would corrupt the prompt structure.
"""
# Chat templates define their own structure - don't add BOS/EOS
prompt_tokens = tokenizer.encode(prompt, add_special_tokens=False)
return mx.array(prompt_tokens)
def _entry_length(
c: KVCache | RotatingKVCache | QuantizedKVCache | ArraysCache | CacheList,
) -> int:
# Use .offset attribute which KVCache types have (len() not implemented in older QuantizedKVCache).
if hasattr(c, "offset"):
return c.offset
# For CacheList
if hasattr(c, "size"):
return int(c.size()) # type: ignore
return 0
def cache_length(cache: KVCacheType) -> int:
"""Get the number of tokens in a KV cache."""
return max(_entry_length(c) for c in cache)
def get_prefix_length(prompt: mx.array, cached_prompt: mx.array) -> int:
"""Find the length of the common prefix between two token arrays."""
n = min(int(prompt.shape[0]), int(cached_prompt.shape[0]))
if n == 0:
return 0
equal = mx.equal(prompt[:n], cached_prompt[:n]).astype(mx.int32)
prefix_mask = mx.cumprod(equal) # stays 1 until first mismatch, then 0 forever
return int(mx.sum(prefix_mask).item())
def get_available_memory() -> Memory:
mem: int = psutil.virtual_memory().available
return Memory.from_bytes(mem)
def get_memory_used_percentage() -> float:
mem = psutil.virtual_memory()
# percent is 0-100
return float(mem.percent / 100)
def make_kv_cache(
model: Model, max_kv_size: int | None = None, keep: int = 0
) -> KVCacheType:
assert hasattr(model, "layers")
if hasattr(model, "make_cache"):
logger.info("Using MLX LM's make cache")
return model.make_cache() # type: ignore
if max_kv_size is None:
if KV_CACHE_BITS is None:
logger.info("Using default KV cache")
return [KVCache() for _ in model.layers]
else:
logger.info("Using quantized KV cache")
return [
QuantizedKVCache(group_size=CACHE_GROUP_SIZE, bits=KV_CACHE_BITS)
for _ in model.layers
]
else:
logger.info(f"Using rotating KV cache with {max_kv_size=} with {keep=}")
return [RotatingKVCache(max_size=max_kv_size, keep=keep) for _ in model.layers]
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/worker/engines/mlx/cache.py",
"license": "Apache License 2.0",
"lines": 272,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
exo-explore/exo:src/exo/worker/engines/mlx/constants.py | # TODO: Do we want so many constants?
# I think we want a lot of these as parameters?
KV_GROUP_SIZE: int | None = 32
KV_BITS: int | None = None
ATTENTION_KV_BITS: int | None = 4
MAX_TOKENS: int = 32168
MAX_KV_SIZE: int | None = 3200
KEEP_KV_SIZE: int | None = 1600
QUANTIZE_MODEL_MODE: str | None = "affine"
CACHE_GROUP_SIZE: int = 64
KV_CACHE_BITS: int | None = None
DEFAULT_TOP_LOGPROBS: int = 5
# TODO: We should really make this opt-in, but Kimi requires trust_remote_code=True
TRUST_REMOTE_CODE: bool = True
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/worker/engines/mlx/constants.py",
"license": "Apache License 2.0",
"lines": 14,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
exo-explore/exo:src/exo/worker/engines/mlx/generator/generate.py | import functools
import math
import time
from copy import deepcopy
from typing import Callable, Generator, cast, get_args
import mlx.core as mx
from mlx_lm.generate import (
maybe_quantize_kv_cache,
stream_generate,
)
from mlx_lm.models.cache import ArraysCache, RotatingKVCache
from mlx_lm.sample_utils import make_sampler
from mlx_lm.tokenizer_utils import TokenizerWrapper
from exo.shared.types.api import (
CompletionTokensDetails,
FinishReason,
GenerationStats,
PromptTokensDetails,
TopLogprobItem,
Usage,
)
from exo.shared.types.common import ModelId
from exo.shared.types.memory import Memory
from exo.shared.types.mlx import KVCacheType, Model
from exo.shared.types.text_generation import InputMessage, TextGenerationTaskParams
from exo.shared.types.worker.runner_response import (
GenerationResponse,
)
from exo.worker.engines.mlx.auto_parallel import (
PipelineFirstLayer,
PipelineLastLayer,
clear_prefill_sends,
flush_prefill_sends,
set_pipeline_prefill,
set_pipeline_queue_sends,
)
from exo.worker.engines.mlx.cache import (
CacheSnapshot,
KVPrefixCache,
encode_prompt,
has_non_kv_caches,
make_kv_cache,
snapshot_ssm_states,
)
from exo.worker.engines.mlx.constants import (
DEFAULT_TOP_LOGPROBS,
KV_BITS,
KV_GROUP_SIZE,
MAX_TOKENS,
)
from exo.worker.engines.mlx.utils_mlx import (
apply_chat_template,
fix_unmatched_think_end_tokens,
mx_barrier,
)
from exo.worker.runner.bootstrap import logger
generation_stream = mx.new_stream(mx.default_device())
_MIN_PREFIX_HIT_RATIO_TO_UPDATE = 0.5
class PrefillCancelled(BaseException):
"""Raised when prefill is cancelled via the progress callback."""
def _has_pipeline_communication_layer(model: Model):
for layer in model.layers:
if isinstance(layer, (PipelineFirstLayer, PipelineLastLayer)):
return True
return False
def pipeline_parallel_prefill(
model: Model,
prompt: mx.array,
prompt_cache: KVCacheType,
prefill_step_size: int,
kv_group_size: int | None,
kv_bits: int | None,
prompt_progress_callback: Callable[[int, int], None],
distributed_prompt_progress_callback: Callable[[], None] | None,
group: mx.distributed.Group,
) -> None:
"""Prefill the KV cache for pipeline parallel with overlapping stages.
Each rank processes the full prompt through its real cache, offset by leading
and trailing dummy iterations.
Total iterations per rank = N_real_chunks + world_size - 1:
- rank r leading dummies (skip_pipeline_io, throwaway cache)
- N_real_chunks real (pipeline IO active, real cache)
- (world_size-1-r) trailing dummies (skip_pipeline_io, throwaway cache)
e.g.
Timeline (2 ranks, 3 chunks of 10240 tokens @ step=4096):
iter 0: R0 real[0:4096] R1 dummy
iter 1: R0 real[4096:8192] R1 real[0:4096]
iter 2: R0 real[8192:10240] R1 real[4096:8192]
iter 3: R0 dummy R1 real[8192:10240]
This function is designed to match mlx_lm's stream_generate exactly in terms of
side effects (given the same prefill step size)
"""
prefill_step_size = prefill_step_size // min(4, group.size())
quantize_cache_fn: Callable[..., None] = functools.partial(
maybe_quantize_kv_cache,
quantized_kv_start=0,
kv_group_size=kv_group_size,
kv_bits=kv_bits,
)
_prompt_cache: KVCacheType = prompt_cache
rank = group.rank()
world_size = group.size()
# Build list of real prompt chunk sizes
total = len(prompt)
real_chunk_sizes: list[int] = []
remaining = total - 1
while remaining:
n = min(prefill_step_size, remaining)
real_chunk_sizes.append(n)
remaining -= n
n_real = len(real_chunk_sizes)
# Each rank does: [rank leading dummies] [N real chunks] [world_size-1-rank trailing dummies]
n_leading = rank
n_trailing = world_size - 1 - rank
n_total = n_leading + n_real + n_trailing
t_start = time.perf_counter()
processed = 0
logger.info(
f"[R{rank}] Pipeline prefill: {n_real} real + {n_leading} leading + {n_trailing} trailing = {n_total} iterations"
)
clear_prefill_sends()
# Initial callback matching generate_step
prompt_progress_callback(0, total)
try:
with mx.stream(generation_stream):
for _ in range(n_leading):
if distributed_prompt_progress_callback is not None:
distributed_prompt_progress_callback()
for i in range(n_real):
chunk_size = real_chunk_sizes[i]
model(
prompt[processed : processed + chunk_size][None],
cache=_prompt_cache,
)
quantize_cache_fn(_prompt_cache)
processed += chunk_size
if distributed_prompt_progress_callback is not None:
distributed_prompt_progress_callback()
flush_prefill_sends()
prompt_progress_callback(processed, total)
for _ in range(n_trailing):
if distributed_prompt_progress_callback is not None:
distributed_prompt_progress_callback()
finally:
clear_prefill_sends()
# Post-loop: process remaining 1 token + add +1 entry to match stream_generate.
for _ in range(2):
with mx.stream(generation_stream):
model(prompt[-1:][None], cache=_prompt_cache)
quantize_cache_fn(_prompt_cache)
flush_prefill_sends()
assert _prompt_cache is not None
mx.eval([c.state for c in _prompt_cache]) # type: ignore
# Final callback matching generate_step
prompt_progress_callback(total, total)
logger.info(
f"[R{rank}] Prefill: {n_real} real + {n_leading}+{n_trailing} dummy iterations, "
f"Processed {processed} tokens in {(time.perf_counter() - t_start) * 1000:.1f}ms"
)
def prefill(
model: Model,
tokenizer: TokenizerWrapper,
sampler: Callable[[mx.array], mx.array],
prompt_tokens: mx.array,
cache: KVCacheType,
group: mx.distributed.Group | None,
on_prefill_progress: Callable[[int, int], None] | None,
distributed_prompt_progress_callback: Callable[[], None] | None,
) -> tuple[float, int, list[CacheSnapshot]]:
"""Prefill the KV cache with prompt tokens.
This runs the model over the prompt tokens to populate the cache,
then trims off the extra generated token.
Returns:
(tokens_per_sec, num_tokens, snapshots)
"""
num_tokens = len(prompt_tokens)
if num_tokens == 0:
return 0.0, 0, []
logger.debug(f"Prefilling {num_tokens} tokens...")
start_time = time.perf_counter()
has_ssm = has_non_kv_caches(cache)
snapshots: list[CacheSnapshot] = []
# TODO(evan): kill the callbacks/runner refactor
def progress_callback(processed: int, total: int) -> None:
elapsed = time.perf_counter() - start_time
tok_per_sec = processed / elapsed if elapsed > 0 else 0
logger.debug(
f"Prefill progress: {processed}/{total} tokens ({tok_per_sec:.1f} tok/s)"
)
if has_ssm:
snapshots.append(snapshot_ssm_states(cache))
if on_prefill_progress is not None:
on_prefill_progress(processed, total)
def combined_progress_callback(processed: int, total: int) -> None:
if distributed_prompt_progress_callback is not None:
distributed_prompt_progress_callback()
progress_callback(processed, total)
set_pipeline_prefill(model, is_prefill=True)
mx_barrier(group)
logger.info("Starting prefill")
is_pipeline = _has_pipeline_communication_layer(model)
prefill_step_size = 4096
try:
if is_pipeline and num_tokens >= prefill_step_size:
set_pipeline_queue_sends(model, queue_sends=True)
assert group is not None, "Pipeline prefill requires a distributed group"
pipeline_parallel_prefill(
model=model,
prompt=prompt_tokens,
prompt_cache=cache,
prefill_step_size=prefill_step_size,
kv_group_size=KV_GROUP_SIZE,
kv_bits=KV_BITS,
prompt_progress_callback=progress_callback,
distributed_prompt_progress_callback=distributed_prompt_progress_callback,
group=group,
)
else:
# Use max_tokens=1 because max_tokens=0 does not work.
# We just throw away the generated token - we only care about filling the cache
for _ in stream_generate(
model=model,
tokenizer=tokenizer,
prompt=prompt_tokens,
max_tokens=1,
sampler=sampler,
prompt_cache=cache,
prefill_step_size=prefill_step_size,
kv_group_size=KV_GROUP_SIZE,
kv_bits=KV_BITS,
prompt_progress_callback=combined_progress_callback,
):
break # Stop after first iteration - cache is now filled
except PrefillCancelled:
set_pipeline_queue_sends(model, queue_sends=False)
set_pipeline_prefill(model, is_prefill=False)
raise
set_pipeline_queue_sends(model, queue_sends=False)
set_pipeline_prefill(model, is_prefill=False)
# stream_generate added 1 extra generated token to the cache, so we should trim it.
# Because of needing to roll back arrays cache, we will generate on 2 tokens so trim 1 more.
pre_gen = deepcopy(snapshots[-2]) if has_ssm else None
for i, c in enumerate(cache):
if has_ssm and isinstance(c, (ArraysCache, RotatingKVCache)):
assert pre_gen is not None
if pre_gen.states[i] is not None:
cache[i] = deepcopy(pre_gen.states[i]) # type: ignore
else:
assert not isinstance(c, (ArraysCache, RotatingKVCache))
c.trim(2)
elapsed = time.perf_counter() - start_time
tokens_per_sec = num_tokens / elapsed if elapsed > 0 else 0.0
logger.debug(
f"Prefill complete: {num_tokens} tokens in {elapsed:.2f}s "
f"({tokens_per_sec:.1f} tok/s)"
)
# Exclude the last snapshot
return tokens_per_sec, num_tokens, snapshots[:-1] if snapshots else []
def warmup_inference(
model: Model,
tokenizer: TokenizerWrapper,
group: mx.distributed.Group | None,
) -> int:
content = "Prompt to warm up the inference engine. Repeat this."
warmup_prompt = apply_chat_template(
tokenizer=tokenizer,
task_params=TextGenerationTaskParams(
model=ModelId(""),
input=[InputMessage(role="user", content=content)],
),
)
tokens_generated = 0
cache = make_kv_cache(
model=model,
)
# Use a default sampler for warmup
sampler = make_sampler(temp=0.0)
mx_barrier(group)
logger.info("Generating warmup tokens")
for _r in stream_generate(
model=model,
tokenizer=tokenizer,
prompt=warmup_prompt,
max_tokens=50,
sampler=sampler,
prompt_cache=cache,
prefill_step_size=2048,
kv_group_size=KV_GROUP_SIZE,
kv_bits=KV_BITS,
):
logger.info("Generated warmup token: " + str(_r.text))
tokens_generated += 1
logger.info("Generated ALL warmup tokens")
mx_barrier(group)
return tokens_generated
def ban_token_ids(token_ids: list[int]) -> Callable[[mx.array, mx.array], mx.array]:
token_ids = [int(t) for t in token_ids]
def proc(_history: mx.array, logits: mx.array) -> mx.array:
for tid in token_ids:
logits[..., tid] = -1e9
return logits
return proc
def eos_ids_from_tokenizer(tokenizer: TokenizerWrapper) -> list[int]:
eos: list[int] | None = getattr(tokenizer, "eos_token_ids", None)
if eos is None:
return []
return eos
def extract_top_logprobs(
logprobs: mx.array,
tokenizer: TokenizerWrapper,
top_logprobs: int,
selected_token: int,
) -> tuple[float, list[TopLogprobItem]]:
"""Extract the selected token's logprob and top alternative tokens.
Args:
logprobs: Full vocabulary logprobs array from MLX
tokenizer: Tokenizer for decoding token IDs to strings
top_logprobs: Number of top alternatives to return
selected_token: The token ID that was actually sampled
Returns:
Tuple of (selected_token_logprob, list of TopLogprobItem for top alternatives)
"""
# Get the logprob of the selected token
selected_logprob = float(logprobs[selected_token].item())
# Get top indices (most probable tokens)
# mx.argpartition gives indices that would partition the array
# We negate logprobs since argpartition finds smallest, and we want largest
top_logprobs = min(top_logprobs, logprobs.shape[0]) # Don't exceed vocab size
top_indices = mx.argpartition(-logprobs, top_logprobs)[:top_logprobs]
# Get the actual logprob values for these indices
top_values = logprobs[top_indices]
# Sort by logprob (descending) for consistent ordering
sort_order = mx.argsort(-top_values)
top_indices = top_indices[sort_order]
top_values = top_values[sort_order]
# Convert to list of TopLogprobItem
top_logprob_items: list[TopLogprobItem] = []
for i in range(top_logprobs):
token_id = int(top_indices[i].item())
token_logprob = float(top_values[i].item())
if math.isnan(token_logprob):
continue
# Decode token ID to string
token_str = tokenizer.decode([token_id])
# Get byte representation
token_bytes = list(token_str.encode("utf-8"))
top_logprob_items.append(
TopLogprobItem(
token=token_str,
logprob=token_logprob,
bytes=token_bytes,
)
)
return selected_logprob, top_logprob_items
def mlx_generate(
model: Model,
tokenizer: TokenizerWrapper,
task: TextGenerationTaskParams,
prompt: str,
kv_prefix_cache: KVPrefixCache | None,
group: mx.distributed.Group | None,
on_prefill_progress: Callable[[int, int], None] | None = None,
distributed_prompt_progress_callback: Callable[[], None] | None = None,
) -> Generator[GenerationResponse]:
# Ensure that generation stats only contains peak memory for this generation
mx.reset_peak_memory()
# TODO: Randomise task seed and set in taskparams, instead of hard coding as 42.
seed = task.seed or 42
mx.random.seed(seed)
# Encode prompt once at the top and fix unmatched think tags
all_prompt_tokens = encode_prompt(tokenizer, prompt)
all_prompt_tokens = fix_unmatched_think_end_tokens(all_prompt_tokens, tokenizer)
# Do not use the prefix cache if we are trying to do benchmarks.
is_bench = task.bench
if is_bench:
kv_prefix_cache = None
# Use prefix cache if available, otherwise create fresh cache
prefix_hit_length = 0
matched_index: int | None = None
if kv_prefix_cache is None:
caches = make_kv_cache(model=model)
prompt_tokens = all_prompt_tokens
else:
caches, prompt_tokens, matched_index = kv_prefix_cache.get_kv_cache(
model, all_prompt_tokens
)
prefix_hit_length = len(all_prompt_tokens) - len(prompt_tokens)
if prefix_hit_length > 0:
logger.info(
f"KV cache hit: {prefix_hit_length}/{len(all_prompt_tokens)} tokens cached ({100 * prefix_hit_length / len(all_prompt_tokens):.1f}%)"
)
logits_processors: list[Callable[[mx.array, mx.array], mx.array]] = []
if is_bench:
# Only sample length eos tokens
eos_ids = eos_ids_from_tokenizer(tokenizer)
logits_processors = [ban_token_ids(eos_ids)]
sampler = make_sampler(
temp=task.temperature if task.temperature is not None else 0.7,
top_p=task.top_p if task.top_p is not None else 1.0,
top_k=task.top_k if task.top_k is not None else 0,
)
# Normalize stop sequences to a list
stop_sequences: list[str] = (
([task.stop] if isinstance(task.stop, str) else task.stop)
if task.stop is not None
else []
)
max_stop_len = max((len(s) for s in stop_sequences), default=0)
# Prefill cache with all tokens except the last one
prefill_tps, prefill_tokens, ssm_snapshots_list = prefill(
model,
tokenizer,
sampler,
prompt_tokens[:-1],
caches,
group,
on_prefill_progress,
distributed_prompt_progress_callback,
)
cache_snapshots: list[CacheSnapshot] | None = ssm_snapshots_list or None
# stream_generate starts from the last token
last_token = prompt_tokens[-2:]
max_tokens = task.max_output_tokens or MAX_TOKENS
accumulated_text = ""
generated_text_parts: list[str] = []
generation_start_time = time.perf_counter()
usage: Usage | None = None
in_thinking = False
reasoning_tokens = 0
think_start = tokenizer.think_start
think_end = tokenizer.think_end
logger.info("Starting decode")
mx_barrier(group)
for completion_tokens, out in enumerate(
stream_generate(
model=model,
tokenizer=tokenizer,
prompt=last_token,
max_tokens=max_tokens,
sampler=sampler,
logits_processors=logits_processors,
prompt_cache=caches,
prefill_step_size=1,
kv_group_size=KV_GROUP_SIZE,
kv_bits=KV_BITS,
),
start=1,
):
generated_text_parts.append(out.text)
accumulated_text += out.text
if think_start is not None and out.text == think_start:
in_thinking = True
elif think_end is not None and out.text == think_end:
in_thinking = False
if in_thinking:
reasoning_tokens += 1
# Check for stop sequences
text = out.text
finish_reason: FinishReason | None = cast(
FinishReason | None, out.finish_reason
)
stop_matched = False
if stop_sequences:
for stop_seq in stop_sequences:
if stop_seq in accumulated_text:
# Trim text to just before the stop sequence
stop_index = accumulated_text.find(stop_seq)
text_before_stop = accumulated_text[:stop_index]
chunk_start = len(accumulated_text) - len(out.text)
text = text_before_stop[chunk_start:]
finish_reason = "stop"
stop_matched = True
break
is_done = finish_reason is not None
stats: GenerationStats | None = None
if is_done:
stats = GenerationStats(
prompt_tps=float(prefill_tps or out.prompt_tps),
generation_tps=float(out.generation_tps),
prompt_tokens=int(prefill_tokens + out.prompt_tokens),
generation_tokens=int(out.generation_tokens),
peak_memory_usage=Memory.from_gb(out.peak_memory),
)
if not stop_matched and out.finish_reason not in get_args(FinishReason):
logger.warning(
f"Model generated unexpected finish_reason: {out.finish_reason}"
)
total_prompt_tokens = len(all_prompt_tokens)
usage = Usage(
prompt_tokens=total_prompt_tokens,
completion_tokens=completion_tokens,
total_tokens=total_prompt_tokens + completion_tokens,
prompt_tokens_details=PromptTokensDetails(
cached_tokens=prefix_hit_length
),
completion_tokens_details=CompletionTokensDetails(
reasoning_tokens=reasoning_tokens
),
)
# Extract logprobs from the full vocabulary logprobs array
logprob: float | None = None
top_logprobs: list[TopLogprobItem] | None = None
if task.logprobs:
logprob, top_logprobs = extract_top_logprobs(
logprobs=out.logprobs,
tokenizer=tokenizer,
top_logprobs=task.top_logprobs or DEFAULT_TOP_LOGPROBS,
selected_token=out.token,
)
if is_done:
# Log generation stats
generation_elapsed = time.perf_counter() - generation_start_time
generated_tokens = len(generated_text_parts)
generation_tps = (
generated_tokens / generation_elapsed if generation_elapsed > 0 else 0.0
)
logger.debug(
f"Generation complete: prefill {prompt_tokens} tokens @ "
f"{prefill_tps:.1f} tok/s, generated {generated_tokens} tokens @ "
f"{generation_tps:.1f} tok/s"
)
if kv_prefix_cache is not None:
generated_tokens_array = mx.array(
tokenizer.encode(
"".join(generated_text_parts), add_special_tokens=False
)
)
full_prompt_tokens = mx.concatenate(
[all_prompt_tokens, generated_tokens_array]
)
hit_ratio = (
prefix_hit_length / len(all_prompt_tokens)
if len(all_prompt_tokens) > 0
else 0.0
)
if (
matched_index is not None
and hit_ratio >= _MIN_PREFIX_HIT_RATIO_TO_UPDATE
):
kv_prefix_cache.update_kv_cache(
matched_index,
full_prompt_tokens,
caches,
cache_snapshots,
restore_pos=prefix_hit_length,
)
else:
kv_prefix_cache.add_kv_cache(
full_prompt_tokens, caches, cache_snapshots
)
yield GenerationResponse(
text=text,
token=out.token,
logprob=logprob,
top_logprobs=top_logprobs,
finish_reason=finish_reason,
stats=stats,
usage=usage,
)
if is_done:
mx_barrier(group)
break
# Limit accumulated_text to what's needed for stop sequence detection
if max_stop_len > 0 and len(accumulated_text) > max_stop_len:
accumulated_text = accumulated_text[-max_stop_len:]
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/worker/engines/mlx/generator/generate.py",
"license": "Apache License 2.0",
"lines": 565,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
exo-explore/exo:src/exo/worker/engines/mlx/utils_mlx.py | import json
import os
import re
import sys
import tempfile
import time
from pathlib import Path
from typing import Any, cast
# Monkey-patch for transformers 5.x compatibility
# Kimi's tokenization_kimi.py imports bytes_to_unicode from the old location
# which was moved in transformers 5.0.0rc2
try:
import transformers.models.gpt2.tokenization_gpt2 as gpt2_tokenization
from transformers.convert_slow_tokenizer import bytes_to_unicode
if not hasattr(gpt2_tokenization, "bytes_to_unicode"):
gpt2_tokenization.bytes_to_unicode = bytes_to_unicode # type: ignore[attr-defined]
except ImportError:
pass # transformers < 5.0 or bytes_to_unicode not available
from mlx_lm.models.cache import KVCache
from mlx_lm.models.deepseek_v3 import DeepseekV3Model
from mlx_lm.tokenizer_utils import TokenizerWrapper
from exo.shared.models.model_cards import ModelId
from exo.worker.engines.mlx.constants import TRUST_REMOTE_CODE
try:
from mlx_lm.tokenizer_utils import load_tokenizer
except ImportError:
from mlx_lm.tokenizer_utils import load as load_tokenizer
import contextlib
import mlx.core as mx
import mlx.nn as nn
from mlx_lm.utils import load_model
from pydantic import RootModel
from exo.download.download_utils import build_model_path
from exo.shared.types.common import Host
from exo.shared.types.memory import Memory
from exo.shared.types.mlx import Model
from exo.shared.types.text_generation import TextGenerationTaskParams
from exo.shared.types.worker.instances import (
BoundInstance,
MlxJacclInstance,
MlxRingInstance,
)
from exo.shared.types.worker.shards import (
CfgShardMetadata,
PipelineShardMetadata,
ShardMetadata,
TensorShardMetadata,
)
from exo.worker.engines.mlx.auto_parallel import (
LayerLoadedCallback,
TimeoutCallback,
eval_with_timeout,
get_inner_model,
get_layers,
pipeline_auto_parallel,
tensor_auto_parallel,
)
from exo.worker.runner.bootstrap import logger
Group = mx.distributed.Group
def get_weights_size(model_shard_meta: ShardMetadata) -> Memory:
return Memory.from_float_kb(
(model_shard_meta.end_layer - model_shard_meta.start_layer)
/ model_shard_meta.n_layers
* model_shard_meta.model_card.storage_size.in_kb
/ (
1
if isinstance(model_shard_meta, PipelineShardMetadata)
else model_shard_meta.world_size
)
)
class ModelLoadingTimeoutError(Exception):
pass
class HostList(RootModel[list[str]]):
@classmethod
def from_hosts(cls, hosts: list[Host]) -> "HostList":
return cls(root=[str(host) for host in hosts])
def mlx_distributed_init(
bound_instance: BoundInstance,
) -> Group:
"""
Initialize MLX distributed.
"""
rank = bound_instance.bound_shard.device_rank
logger.info(f"Starting initialization for rank {rank}")
with tempfile.TemporaryDirectory() as tmpdir:
coordination_file = str(
Path(tmpdir) / f"hosts_{bound_instance.instance.instance_id}_{rank}.json"
)
# TODO: singleton instances
match bound_instance.instance:
case MlxRingInstance(hosts_by_node=hosts_by_node, ephemeral_port=_):
hosts_for_node = hosts_by_node[bound_instance.bound_node_id]
hosts_json = HostList.from_hosts(hosts_for_node).model_dump_json()
with open(coordination_file, "w") as f:
_ = f.write(hosts_json)
logger.info(
f"rank {rank} hostfile: {coordination_file} hosts: {hosts_json}"
)
os.environ["MLX_HOSTFILE"] = coordination_file
os.environ["MLX_RANK"] = str(rank)
os.environ["MLX_RING_VERBOSE"] = "1"
group = mx.distributed.init(backend="ring", strict=True)
case MlxJacclInstance(
jaccl_devices=jaccl_devices, jaccl_coordinators=jaccl_coordinators
):
assert all(
jaccl_devices[i][i] is None for i in range(len(jaccl_devices))
)
# Use RDMA connectivity matrix
jaccl_devices_json = json.dumps(jaccl_devices)
with open(coordination_file, "w") as f:
_ = f.write(jaccl_devices_json)
jaccl_coordinator = jaccl_coordinators[bound_instance.bound_node_id]
logger.info(
f"rank {rank} MLX_IBV_DEVICES: {coordination_file} with devices: {jaccl_devices_json}"
)
logger.info(f"rank {rank} MLX_JACCL_COORDINATOR: {jaccl_coordinator}")
os.environ["MLX_IBV_DEVICES"] = coordination_file
os.environ["MLX_RANK"] = str(rank)
os.environ["MLX_JACCL_COORDINATOR"] = jaccl_coordinator
group = mx.distributed.init(backend="jaccl", strict=True)
logger.info(f"Rank {rank} mlx distributed initialization complete")
return group
def initialize_mlx(
bound_instance: BoundInstance,
) -> Group:
# should we unseed it?
# TODO: pass in seed from params
mx.random.seed(42)
assert len(bound_instance.instance.shard_assignments.node_to_runner) > 1, (
"Tried to initialize mlx for a single node instance"
)
return mlx_distributed_init(bound_instance)
def load_mlx_items(
bound_instance: BoundInstance,
group: Group | None,
on_timeout: TimeoutCallback | None,
on_layer_loaded: LayerLoadedCallback | None,
) -> tuple[Model, TokenizerWrapper]:
if group is None:
logger.info(f"Single device used for {bound_instance.instance}")
model_path = build_model_path(bound_instance.bound_shard.model_card.model_id)
start_time = time.perf_counter()
model, _ = load_model(model_path, lazy=True, strict=False)
# Eval layers one by one for progress reporting
try:
inner = get_inner_model(model)
layers = get_layers(inner)
total = len(layers)
for i, layer in enumerate(layers):
mx.eval(layer) # type: ignore
if on_layer_loaded is not None:
on_layer_loaded(i, total)
except ValueError as e:
logger.opt(exception=e).debug(
"Model architecture doesn't support layer-by-layer progress tracking",
)
mx.eval(model)
end_time = time.perf_counter()
logger.info(f"Time taken to load model: {(end_time - start_time):.2f}s")
tokenizer = get_tokenizer(model_path, bound_instance.bound_shard)
else:
logger.info("Starting distributed init")
start_time = time.perf_counter()
model, tokenizer = shard_and_load(
bound_instance.bound_shard,
group=group,
on_timeout=on_timeout,
on_layer_loaded=on_layer_loaded,
)
end_time = time.perf_counter()
logger.info(
f"Time taken to shard and load model: {(end_time - start_time):.2f}s"
)
set_wired_limit_for_model(get_weights_size(bound_instance.bound_shard))
mx.clear_cache()
return cast(Model, model), tokenizer
def shard_and_load(
shard_metadata: ShardMetadata,
group: Group,
on_timeout: TimeoutCallback | None,
on_layer_loaded: LayerLoadedCallback | None,
) -> tuple[nn.Module, TokenizerWrapper]:
model_path = build_model_path(shard_metadata.model_card.model_id)
model, _ = load_model(model_path, lazy=True, strict=False)
logger.debug(model)
if hasattr(model, "model") and isinstance(model.model, DeepseekV3Model): # type: ignore
pass
# TODO: See if we should quantize the model.
# def is_attention_layer(path: str) -> bool:
# path = path.lower()
# return "self_attn" in path and "layernorm" not in path
# def quant_predicate(path: str, module: nn.Module):
# if not isinstance(module, nn.Linear):
# return False
# return is_attention_layer(path)
# model, config = quantize_model(
# model, config, group_size=KV_GROUP_SIZE, bits=ATTENTION_KV_BITS, quant_predicate=quant_predicate, mode=QUANTIZE_MODEL_MODE
# )
assert isinstance(model, nn.Module)
tokenizer = get_tokenizer(model_path, shard_metadata)
logger.info(f"Group size: {group.size()}, group rank: {group.rank()}")
# Estimate timeout based on model size (5x default for large queued workloads)
base_timeout = float(os.environ.get("EXO_MODEL_LOAD_TIMEOUT", "300"))
model_size = get_weights_size(shard_metadata)
timeout_seconds = base_timeout + model_size.in_gb
logger.info(
f"Evaluating model parameters with timeout of {timeout_seconds:.0f}s "
f"(model size: {model_size.in_gb:.1f}GB)"
)
match shard_metadata:
case TensorShardMetadata():
logger.info(f"loading model from {model_path} with tensor parallelism")
model = tensor_auto_parallel(
model, group, timeout_seconds, on_timeout, on_layer_loaded
)
case PipelineShardMetadata():
logger.info(f"loading model from {model_path} with pipeline parallelism")
model = pipeline_auto_parallel(
model, group, shard_metadata, on_layer_loaded=on_layer_loaded
)
eval_with_timeout(model.parameters(), timeout_seconds, on_timeout)
case CfgShardMetadata():
raise ValueError(
"CfgShardMetadata is not supported for text model loading - "
"this metadata type is only for image generation models"
)
# TODO: Do we need this?
mx.eval(model)
logger.debug("SHARDED")
logger.debug(model)
# Synchronize processes before generation to avoid timeout
mx_barrier(group)
return model, tokenizer
def get_tokenizer(model_path: Path, shard_metadata: ShardMetadata) -> TokenizerWrapper:
"""Load tokenizer for a model shard. Delegates to load_tokenizer_for_model_id."""
return load_tokenizer_for_model_id(
shard_metadata.model_card.model_id,
model_path,
trust_remote_code=shard_metadata.model_card.trust_remote_code,
)
def get_eos_token_ids_for_model(model_id: ModelId) -> list[int] | None:
"""
Get the EOS token IDs for a model based on its ID.
Some models require explicit EOS token configuration that isn't in their
tokenizer config. This function returns the known EOS token IDs for such models.
Args:
model_id: The HuggingFace model ID
Returns:
List of EOS token IDs, or None if the model uses standard tokenizer config
"""
model_id_lower = model_id.lower()
if "kimi-k2" in model_id_lower:
return [163586]
elif "glm-5" in model_id_lower or "glm-4.7" in model_id_lower:
# For GLM-5 and GLM-4.7
# 154820: <|endoftext|>, 154827: <|user|>, 154829: <|observation|>
return [154820, 154827, 154829]
elif "glm" in model_id_lower:
# For GLM-4.5 and older
return [151336, 151329, 151338]
elif "gpt-oss" in model_id_lower:
return [200002, 200012]
return None
def load_tokenizer_for_model_id(
model_id: ModelId, model_path: Path, *, trust_remote_code: bool = TRUST_REMOTE_CODE
) -> TokenizerWrapper:
"""
Load tokenizer for a model given its ID and local path.
This is the core tokenizer loading logic, handling special cases for different
model families (Kimi, GLM, etc.) and transformers 5.x compatibility.
Args:
model_id: The HuggingFace model ID (e.g., "moonshotai/Kimi-K2-Instruct")
model_path: Local path where the model/tokenizer files are stored
Returns:
TokenizerWrapper instance configured for the model
"""
model_id_lower = model_id.lower()
eos_token_ids = get_eos_token_ids_for_model(model_id)
# Kimi uses a custom TikTokenTokenizer that transformers 5.x can't load via AutoTokenizer
if "kimi-k2" in model_id_lower:
import importlib.util
import types
sys.path.insert(0, str(model_path))
# Load tool_declaration_ts first (tokenization_kimi imports it with relative import)
tool_decl_path = model_path / "tool_declaration_ts.py"
if tool_decl_path.exists():
spec = importlib.util.spec_from_file_location(
"tool_declaration_ts", tool_decl_path
)
if spec and spec.loader:
tool_decl_module = importlib.util.module_from_spec(spec)
sys.modules["tool_declaration_ts"] = tool_decl_module
spec.loader.exec_module(tool_decl_module)
# Load tokenization_kimi with patched source (convert relative to absolute import)
tok_path = model_path / "tokenization_kimi.py"
source = tok_path.read_text()
source = source.replace("from .tool_declaration_ts", "from tool_declaration_ts")
spec = importlib.util.spec_from_file_location("tokenization_kimi", tok_path)
if spec:
tok_module = types.ModuleType("tokenization_kimi")
tok_module.__file__ = str(tok_path)
sys.modules["tokenization_kimi"] = tok_module
exec(compile(source, tok_path, "exec"), tok_module.__dict__) # noqa: S102
TikTokenTokenizer = tok_module.TikTokenTokenizer # type: ignore[attr-defined] # noqa: N806
else:
from tokenization_kimi import TikTokenTokenizer # type: ignore[import-not-found] # noqa: I001
hf_tokenizer: Any = TikTokenTokenizer.from_pretrained(model_path) # pyright: ignore[reportUnknownVariableType,reportUnknownMemberType]
# Patch encode to use internal tiktoken model directly
# transformers 5.x has a bug in the encode->pad path for slow tokenizers
def _patched_encode(text: str, **_kwargs: object) -> list[int]:
# Pass allowed_special="all" to handle special tokens like <|im_user|>
return list(hf_tokenizer.model.encode(text, allowed_special="all")) # pyright: ignore[reportUnknownMemberType,reportUnknownArgumentType]
hf_tokenizer.encode = _patched_encode
return TokenizerWrapper(
hf_tokenizer,
eos_token_ids=eos_token_ids,
tool_call_start="<|tool_calls_section_begin|>",
tool_call_end="<|tool_calls_section_end|>",
tool_parser=_parse_kimi_tool_calls,
)
tokenizer = load_tokenizer(
model_path,
tokenizer_config_extra={"trust_remote_code": trust_remote_code},
eos_token_ids=eos_token_ids,
)
if "gemma-3" in model_id_lower:
gemma_3_eos_id = 1
gemma_3_end_of_turn_id = 106
if tokenizer.eos_token_ids is not None:
if gemma_3_end_of_turn_id not in tokenizer.eos_token_ids:
tokenizer.eos_token_ids = list(tokenizer.eos_token_ids) + [
gemma_3_end_of_turn_id
]
else:
tokenizer.eos_token_ids = [gemma_3_eos_id, gemma_3_end_of_turn_id]
return tokenizer
def _normalize_tool_calls(msg_dict: dict[str, Any]) -> None:
"""Normalize tool_calls in a message dict.
OpenAI format has tool_calls[].function.arguments as a JSON string,
but some chat templates (e.g., GLM) expect it as a dict.
"""
tool_calls = msg_dict.get("tool_calls")
if not tool_calls or not isinstance(tool_calls, list):
return
for tc in tool_calls: # pyright: ignore[reportUnknownVariableType]
if not isinstance(tc, dict):
continue
func = tc.get("function") # pyright: ignore[reportUnknownMemberType,reportUnknownVariableType]
if not isinstance(func, dict):
continue
args = func.get("arguments") # pyright: ignore[reportUnknownMemberType,reportUnknownVariableType]
if isinstance(args, str):
with contextlib.suppress(json.JSONDecodeError):
func["arguments"] = json.loads(args)
def _collect_nested_property_names(schema: dict[str, Any]) -> set[str]:
names: set[str] = set()
properties: dict[str, Any] = schema.get("properties", {}) # type: ignore[reportAny]
for prop_spec in properties.values(): # pyright: ignore[reportAny]
if not isinstance(prop_spec, dict):
continue
if prop_spec.get("type") == "array": # type: ignore[reportAny]
items: dict[str, Any] | None = prop_spec.get("items") # type: ignore[reportAny]
if isinstance(items, dict) and items.get("type") == "object": # type: ignore[reportAny]
inner_props: dict[str, Any] = items.get("properties", {}) # type: ignore[reportAny]
for k in inner_props: # pyright: ignore[reportUnknownVariableType]
names.add(str(k)) # pyright: ignore[reportUnknownArgumentType]
names.update(_collect_nested_property_names(items)) # pyright: ignore[reportUnknownArgumentType]
return names
def _schemas_lost_in_prompt(prompt: str, tools: list[dict[str, Any]]) -> bool:
"""Return True if nested property names from any tool schema are absent."""
for tool in tools:
fn: dict[str, Any] = tool.get("function", {}) # type: ignore
params: dict[str, Any] = fn.get("parameters", {}) # type: ignore
nested = _collect_nested_property_names(params)
if nested and not all(name in prompt for name in nested):
return True
return False
_LOSSY_TEMPLATE_PATTERN = re.compile(
r"""inner_type\s*==\s*["']object \| object["']\s*or\s*inner_type\|length\s*>\s*\d+""",
)
def _patch_lossy_chat_template(template: str) -> str | None:
"""Patch chat templates that collapse nested object schemas to ``any[]``.
Some templates (e.g., GPT-OSS) have a guard like::
inner_type == "object | object" or inner_type|length > 50
The length check silently drops complex array-of-object schemas.
We remove the length guard, keeping only the object-union check.
Returns the patched template, or *None* if no patch was needed.
"""
patched, n = _LOSSY_TEMPLATE_PATTERN.subn(
lambda m: m.group(0).split(" or ")[0], # keep only the object-union check
template,
)
return patched if n > 0 else None
def _needs_dsml_encoding(task_params: TextGenerationTaskParams) -> bool:
if "deepseek-v3.2" not in task_params.model.lower():
return False
# Use DSML encoding when tools are provided or tool results are in the conversation
if task_params.tools:
return True
if task_params.chat_template_messages:
return any(
msg.get("role") == "tool" for msg in task_params.chat_template_messages
)
return False
def apply_chat_template(
tokenizer: TokenizerWrapper,
task_params: TextGenerationTaskParams,
) -> str:
"""Convert TextGenerationTaskParams to a chat template prompt.
Converts the internal format (input + instructions) to a messages list
that can be processed by the tokenizer's chat template.
When chat_template_messages is available (from Chat Completions API),
uses those directly to preserve tool_calls, thinking, and other fields.
"""
formatted_messages: list[dict[str, Any]] = []
if task_params.chat_template_messages is not None:
# Use pre-formatted messages that preserve tool_calls, thinking, etc.
formatted_messages = list(task_params.chat_template_messages)
for msg in formatted_messages:
_normalize_tool_calls(msg)
else:
# Add system message (instructions) if present
if task_params.instructions:
formatted_messages.append(
{"role": "system", "content": task_params.instructions}
)
# Convert input to messages
for msg in task_params.input:
if not msg.content:
logger.warning("Received message with empty content, skipping")
continue
formatted_messages.append({"role": msg.role, "content": msg.content})
# For assistant prefilling, append content after templating to avoid a closing turn token.
partial_assistant_content: str | None = None
if formatted_messages and formatted_messages[-1].get("role") == "assistant":
partial_assistant_content = cast(str, formatted_messages[-1].get("content", ""))
formatted_messages = formatted_messages[:-1]
if _needs_dsml_encoding(task_params):
from exo.worker.engines.mlx.dsml_encoding import encode_messages
prompt = encode_messages(
messages=formatted_messages,
thinking_mode="thinking" if task_params.enable_thinking else "chat",
tools=task_params.tools,
)
if partial_assistant_content:
prompt += partial_assistant_content
logger.info(prompt)
return prompt
extra_kwargs: dict[str, Any] = {}
if task_params.enable_thinking is not None:
# Qwen3 and GLM use "enable_thinking"; DeepSeek uses "thinking".
# Jinja ignores unknown variables, so passing both is safe.
extra_kwargs["enable_thinking"] = task_params.enable_thinking
extra_kwargs["thinking"] = task_params.enable_thinking
patched_template: str | None = None
if task_params.tools:
original_template: str | None = getattr(tokenizer, "chat_template", None)
if isinstance(original_template, str):
patched_template = _patch_lossy_chat_template(original_template)
if patched_template is not None:
logger.info(
"Patched lossy chat template (removed inner_type length guard)"
)
prompt: str = tokenizer.apply_chat_template(
formatted_messages,
tokenize=False,
add_generation_prompt=True,
tools=task_params.tools,
**({"chat_template": patched_template} if patched_template is not None else {}),
**extra_kwargs,
)
if task_params.tools and _schemas_lost_in_prompt(prompt, task_params.tools):
logger.warning("Chat template lost nested tool schemas even after patching")
if partial_assistant_content:
prompt += partial_assistant_content
logger.info(prompt)
return prompt
def detect_thinking_prompt_suffix(prompt: str, tokenizer: TokenizerWrapper) -> bool:
"""
Detect if prompt ends with a thinking opening tag that should be
prepended to the output stream.
"""
think_token = tokenizer.think_start
return think_token is not None and prompt.rstrip().endswith(think_token)
def fix_unmatched_think_end_tokens(
tokens: mx.array, tokenizer: TokenizerWrapper
) -> mx.array:
if not tokenizer.has_thinking:
return tokens
assert tokenizer.think_start_id
assert tokenizer.think_end_id
think_start_id: int = tokenizer.think_start_id
think_end_id: int = tokenizer.think_end_id
token_list: list[int] = cast(list[int], tokens.tolist())
result: list[int] = []
depth = 0
for token in token_list:
if token == think_start_id:
depth += 1
elif token == think_end_id:
if depth == 0:
result.append(think_start_id)
else:
depth -= 1
result.append(token)
return mx.array(result)
class NullKVCache(KVCache):
"""
A KVCache that pretends to exist but holds zero tokens.
It satisfies .state/.meta_state and never allocates real keys/values.
"""
def __init__(self, dtype: mx.Dtype = mx.float16):
super().__init__()
# zero-length K/V so shapes/dtypes are defined but empty
self.keys = mx.zeros((1, 1, 0, 1), dtype=dtype)
self.values = mx.zeros((1, 1, 0, 1), dtype=dtype)
self.offset = 0
@property
def state(self) -> tuple[mx.array, mx.array]:
# matches what mx.save_safetensors / mx.eval expect
return self.keys, self.values
@state.setter
def state(self, v: tuple[mx.array, mx.array]) -> None:
raise NotImplementedError("We should not be setting a NullKVCache.")
def mlx_force_oom(size: int = 200000) -> None:
"""
Force an Out-Of-Memory (OOM) error in MLX by performing large tensor operations.
"""
mx.set_default_device(mx.gpu)
a = mx.random.uniform(shape=(size, size), dtype=mx.float32)
b = mx.random.uniform(shape=(size, size), dtype=mx.float32)
mx.eval(a, b)
c = mx.matmul(a, b)
d = mx.matmul(a, c)
e = mx.matmul(b, c)
f = mx.sigmoid(d + e)
mx.eval(f)
def set_wired_limit_for_model(model_size: Memory):
"""
A context manager to temporarily change the wired limit.
Note, the wired limit should not be changed during an async eval. If an
async eval could be running pass in the streams to synchronize with prior
to exiting the context manager.
"""
if not mx.metal.is_available():
return
max_rec_size = Memory.from_bytes(
int(mx.device_info()["max_recommended_working_set_size"])
)
if model_size > 0.9 * max_rec_size:
logger.warning(
f"Generating with a model that requires {model_size.in_float_mb:.1f} MB "
f"which is close to the maximum recommended size of {max_rec_size.in_float_mb:.1f} "
"MB. This can be slow. See the documentation for possible work-arounds: "
"https://github.com/ml-explore/mlx-lm/tree/main#large-models"
)
mx.set_wired_limit(max_rec_size.in_bytes)
logger.info(f"Wired limit set to {max_rec_size}.")
def mlx_cleanup(
model: Model | None, tokenizer: TokenizerWrapper | None, group: Group | None
) -> None:
del model, tokenizer, group
mx.clear_cache()
import gc
gc.collect()
def mx_any(bool_: bool, group: Group | None) -> bool:
if group is None:
return bool_
num_true = mx.distributed.all_sum(
mx.array(bool_), group=group, stream=mx.default_stream(mx.Device(mx.cpu))
)
mx.eval(num_true)
return num_true.item() > 0
def mx_barrier(group: Group | None):
if group is None:
return
mx.eval(
mx.distributed.all_sum(
mx.array(1.0), group=group, stream=mx.default_stream(mx.Device(mx.cpu))
)
)
def _parse_kimi_tool_calls(text: str):
import regex as re
# kimi has a fixed function naming scheme, with a json formatted arg
# functions.multiply:0<|tool_call_argument_begin|>{"a": 2, "b": 3}
_func_name_regex = re.compile(
r"^\s*((?:functions\.)?(.+?):\d+)\s*<\|tool_call_argument_begin\|>", re.DOTALL
)
_func_arg_regex = re.compile(r"<\|tool_call_argument_begin\|>\s*(.*)\s*", re.DOTALL)
_tool_call_split_regex = re.compile(
r"<\|tool_call_begin\|>(.*?)<\|tool_call_end\|>", re.DOTALL
)
def _parse_single_tool(text: str) -> dict[str, Any]:
func_name_match = _func_name_regex.search(text)
if func_name_match is None:
raise ValueError("No tool call found.")
tool_call_id = func_name_match.group(1) # e.g. "functions.get_weather:0"
func_name = func_name_match.group(2) # e.g. "get_weather"
func_args_match = _func_arg_regex.search(text)
if func_args_match is None:
raise ValueError("No tool call arguments found.")
func_args = func_args_match.group(1)
try:
arg_dct = json.loads(func_args) # pyright: ignore[reportAny]
except Exception:
arg_dct = None
return dict(id=tool_call_id, name=func_name, arguments=arg_dct)
tool_matches = _tool_call_split_regex.findall(text)
if tool_matches:
return [_parse_single_tool(match) for match in tool_matches] # pyright: ignore[reportAny]
else:
return [_parse_single_tool(text)]
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/worker/engines/mlx/utils_mlx.py",
"license": "Apache License 2.0",
"lines": 613,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
exo-explore/exo:src/exo/worker/main.py | from collections import defaultdict
from datetime import datetime, timezone
import anyio
from anyio import fail_after
from loguru import logger
from exo.download.download_utils import resolve_model_in_path
from exo.shared.apply import apply
from exo.shared.models.model_cards import ModelId
from exo.shared.types.api import ImageEditsTaskParams
from exo.shared.types.commands import (
ForwarderCommand,
ForwarderDownloadCommand,
StartDownload,
)
from exo.shared.types.common import CommandId, NodeId, SystemId
from exo.shared.types.events import (
Event,
IndexedEvent,
InputChunkReceived,
NodeDownloadProgress,
NodeGatheredInfo,
TaskCreated,
TaskStatusUpdated,
TopologyEdgeCreated,
TopologyEdgeDeleted,
)
from exo.shared.types.multiaddr import Multiaddr
from exo.shared.types.state import State
from exo.shared.types.tasks import (
CancelTask,
CreateRunner,
DownloadModel,
ImageEdits,
Shutdown,
Task,
TaskStatus,
)
from exo.shared.types.topology import Connection, SocketConnection
from exo.shared.types.worker.downloads import DownloadCompleted
from exo.shared.types.worker.runners import RunnerId
from exo.utils.channels import Receiver, Sender, channel
from exo.utils.info_gatherer.info_gatherer import GatheredInfo, InfoGatherer
from exo.utils.info_gatherer.net_profile import check_reachable
from exo.utils.keyed_backoff import KeyedBackoff
from exo.utils.task_group import TaskGroup
from exo.worker.plan import plan
from exo.worker.runner.runner_supervisor import RunnerSupervisor
class Worker:
def __init__(
self,
node_id: NodeId,
*,
event_receiver: Receiver[IndexedEvent],
event_sender: Sender[Event],
# This is for requesting updates. It doesn't need to be a general command sender right now,
# but I think it's the correct way to be thinking about commands
command_sender: Sender[ForwarderCommand],
download_command_sender: Sender[ForwarderDownloadCommand],
):
self.node_id: NodeId = node_id
self.event_receiver = event_receiver
self.event_sender = event_sender
self.command_sender = command_sender
self.download_command_sender = download_command_sender
self.state: State = State()
self.runners: dict[RunnerId, RunnerSupervisor] = {}
self._tg: TaskGroup = TaskGroup()
self._system_id = SystemId()
# Buffer for input image chunks (for image editing)
self.input_chunk_buffer: dict[CommandId, dict[int, str]] = {}
self.input_chunk_counts: dict[CommandId, int] = {}
self._download_backoff: KeyedBackoff[ModelId] = KeyedBackoff(base=0.5, cap=10.0)
async def run(self):
logger.info("Starting Worker")
info_send, info_recv = channel[GatheredInfo]()
info_gatherer: InfoGatherer = InfoGatherer(info_send)
try:
async with self._tg as tg:
tg.start_soon(info_gatherer.run)
tg.start_soon(self._forward_info, info_recv)
tg.start_soon(self.plan_step)
tg.start_soon(self._event_applier)
tg.start_soon(self._poll_connection_updates)
finally:
# Actual shutdown code - waits for all tasks to complete before executing.
logger.info("Stopping Worker")
self.event_sender.close()
self.command_sender.close()
self.download_command_sender.close()
for runner in self.runners.values():
runner.shutdown()
async def _forward_info(self, recv: Receiver[GatheredInfo]):
with recv as info_stream:
async for info in info_stream:
await self.event_sender.send(
NodeGatheredInfo(
node_id=self.node_id,
when=str(datetime.now(tz=timezone.utc)),
info=info,
)
)
async def _event_applier(self):
with self.event_receiver as events:
async for event in events:
# 2. for each event, apply it to the state
self.state = apply(self.state, event=event)
event = event.event
# Buffer input image chunks for image editing
if isinstance(event, InputChunkReceived):
cmd_id = event.command_id
if cmd_id not in self.input_chunk_buffer:
self.input_chunk_buffer[cmd_id] = {}
self.input_chunk_counts[cmd_id] = event.chunk.total_chunks
self.input_chunk_buffer[cmd_id][event.chunk.chunk_index] = (
event.chunk.data
)
async def plan_step(self):
while True:
await anyio.sleep(0.1)
task: Task | None = plan(
self.node_id,
self.runners,
self.state.downloads,
self.state.instances,
self.state.runners,
self.state.tasks,
self.input_chunk_buffer,
self.input_chunk_counts,
)
if task is None:
continue
# Gate DownloadModel on backoff BEFORE emitting TaskCreated
# to prevent flooding the event log with useless events
if isinstance(task, DownloadModel):
model_id = task.shard_metadata.model_card.model_id
if not self._download_backoff.should_proceed(model_id):
continue
logger.info(f"Worker plan: {task.__class__.__name__}")
assert task.task_status
await self.event_sender.send(TaskCreated(task_id=task.task_id, task=task))
# lets not kill the worker if a runner is unresponsive
match task:
case CreateRunner():
self._create_supervisor(task)
await self.event_sender.send(
TaskStatusUpdated(
task_id=task.task_id, task_status=TaskStatus.Complete
)
)
case DownloadModel(shard_metadata=shard):
model_id = shard.model_card.model_id
self._download_backoff.record_attempt(model_id)
found_path = resolve_model_in_path(model_id)
if found_path is not None:
logger.info(
f"Model {model_id} found in EXO_MODELS_PATH at {found_path}"
)
await self.event_sender.send(
NodeDownloadProgress(
download_progress=DownloadCompleted(
node_id=self.node_id,
shard_metadata=shard,
model_directory=str(found_path),
total=shard.model_card.storage_size,
read_only=True,
)
)
)
await self.event_sender.send(
TaskStatusUpdated(
task_id=task.task_id,
task_status=TaskStatus.Complete,
)
)
else:
await self.download_command_sender.send(
ForwarderDownloadCommand(
origin=self._system_id,
command=StartDownload(
target_node_id=self.node_id,
shard_metadata=shard,
),
)
)
await self.event_sender.send(
TaskStatusUpdated(
task_id=task.task_id,
task_status=TaskStatus.Running,
)
)
case Shutdown(runner_id=runner_id):
runner = self.runners.pop(runner_id)
try:
with fail_after(3):
await runner.start_task(task)
except TimeoutError:
await self.event_sender.send(
TaskStatusUpdated(
task_id=task.task_id, task_status=TaskStatus.TimedOut
)
)
finally:
runner.shutdown()
case CancelTask(
cancelled_task_id=cancelled_task_id, runner_id=runner_id
):
await self.runners[runner_id].cancel_task(cancelled_task_id)
await self.event_sender.send(
TaskStatusUpdated(
task_id=task.task_id, task_status=TaskStatus.Complete
)
)
case ImageEdits() if task.task_params.total_input_chunks > 0:
# Assemble image from chunks and inject into task
cmd_id = task.command_id
chunks = self.input_chunk_buffer.get(cmd_id, {})
assembled = "".join(chunks[i] for i in range(len(chunks)))
logger.info(
f"Assembled input image from {len(chunks)} chunks, "
f"total size: {len(assembled)} bytes"
)
# Create modified task with assembled image data
modified_task = ImageEdits(
task_id=task.task_id,
command_id=task.command_id,
instance_id=task.instance_id,
task_status=task.task_status,
task_params=ImageEditsTaskParams(
image_data=assembled,
total_input_chunks=task.task_params.total_input_chunks,
prompt=task.task_params.prompt,
model=task.task_params.model,
n=task.task_params.n,
quality=task.task_params.quality,
output_format=task.task_params.output_format,
response_format=task.task_params.response_format,
size=task.task_params.size,
image_strength=task.task_params.image_strength,
bench=task.task_params.bench,
stream=task.task_params.stream,
partial_images=task.task_params.partial_images,
advanced_params=task.task_params.advanced_params,
),
)
# Cleanup buffers
if cmd_id in self.input_chunk_buffer:
del self.input_chunk_buffer[cmd_id]
if cmd_id in self.input_chunk_counts:
del self.input_chunk_counts[cmd_id]
await self._start_runner_task(modified_task)
case task:
await self._start_runner_task(task)
def shutdown(self):
self._tg.cancel_tasks()
async def _start_runner_task(self, task: Task):
if (instance := self.state.instances.get(task.instance_id)) is not None:
await self.runners[
instance.shard_assignments.node_to_runner[self.node_id]
].start_task(task)
def _create_supervisor(self, task: CreateRunner) -> RunnerSupervisor:
"""Creates and stores a new AssignedRunner with initial downloading status."""
runner = RunnerSupervisor.create(
bound_instance=task.bound_instance,
event_sender=self.event_sender.clone(),
)
self.runners[task.bound_instance.bound_runner_id] = runner
self._tg.start_soon(runner.run)
return runner
async def _poll_connection_updates(self):
while True:
edges = set(
conn.edge for conn in self.state.topology.out_edges(self.node_id)
)
conns: defaultdict[NodeId, set[str]] = defaultdict(set)
async for ip, nid in check_reachable(
self.state.topology,
self.node_id,
self.state.node_network,
):
if ip in conns[nid]:
continue
conns[nid].add(ip)
edge = SocketConnection(
# nonsense multiaddr
sink_multiaddr=Multiaddr(address=f"/ip4/{ip}/tcp/52415")
if "." in ip
# nonsense multiaddr
else Multiaddr(address=f"/ip6/{ip}/tcp/52415"),
)
if edge not in edges:
logger.debug(f"ping discovered {edge=}")
await self.event_sender.send(
TopologyEdgeCreated(
conn=Connection(source=self.node_id, sink=nid, edge=edge)
)
)
for conn in self.state.topology.out_edges(self.node_id):
if not isinstance(conn.edge, SocketConnection):
continue
# ignore mDNS discovered connections
if conn.edge.sink_multiaddr.port != 52415:
continue
if (
conn.sink not in conns
or conn.edge.sink_multiaddr.ip_address not in conns[conn.sink]
):
logger.debug(f"ping failed to discover {conn=}")
await self.event_sender.send(TopologyEdgeDeleted(conn=conn))
await anyio.sleep(10)
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/worker/main.py",
"license": "Apache License 2.0",
"lines": 309,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
exo-explore/exo:src/exo/worker/plan.py | # pyright: reportUnusedImport = false
from collections.abc import Mapping, Sequence
from exo.shared.types.common import CommandId, NodeId
from exo.shared.types.tasks import (
CancelTask,
ConnectToGroup,
CreateRunner,
DownloadModel,
ImageEdits,
ImageGeneration,
LoadModel,
Shutdown,
StartWarmup,
Task,
TaskId,
TaskStatus,
TextGeneration,
)
from exo.shared.types.worker.downloads import (
DownloadCompleted,
DownloadFailed,
DownloadOngoing,
DownloadProgress,
)
from exo.shared.types.worker.instances import BoundInstance, Instance, InstanceId
from exo.shared.types.worker.runners import (
RunnerConnected,
RunnerConnecting,
RunnerFailed,
RunnerId,
RunnerIdle,
RunnerLoaded,
RunnerLoading,
RunnerReady,
RunnerRunning,
RunnerStatus,
RunnerWarmingUp,
)
from exo.worker.runner.runner_supervisor import RunnerSupervisor
def plan(
node_id: NodeId,
# Runners is expected to be FRESH and so should not come from state
runners: Mapping[RunnerId, RunnerSupervisor],
global_download_status: Mapping[NodeId, Sequence[DownloadProgress]],
instances: Mapping[InstanceId, Instance],
all_runners: Mapping[RunnerId, RunnerStatus], # all global
tasks: Mapping[TaskId, Task],
input_chunk_buffer: Mapping[CommandId, dict[int, str]] | None = None,
input_chunk_counts: Mapping[CommandId, int] | None = None,
) -> Task | None:
# Python short circuiting OR logic should evaluate these sequentially.
return (
_cancel_tasks(runners, tasks)
or _kill_runner(runners, all_runners, instances)
or _create_runner(node_id, runners, instances)
or _model_needs_download(node_id, runners, global_download_status)
or _init_distributed_backend(runners, all_runners)
or _load_model(runners, all_runners, global_download_status)
or _ready_to_warmup(runners, all_runners)
or _pending_tasks(runners, tasks, all_runners, input_chunk_buffer or {})
)
def _kill_runner(
runners: Mapping[RunnerId, RunnerSupervisor],
all_runners: Mapping[RunnerId, RunnerStatus],
instances: Mapping[InstanceId, Instance],
) -> Shutdown | None:
for runner in runners.values():
runner_id = runner.bound_instance.bound_runner_id
if (instance_id := runner.bound_instance.instance.instance_id) not in instances:
return Shutdown(instance_id=instance_id, runner_id=runner_id)
for (
global_runner_id
) in runner.bound_instance.instance.shard_assignments.node_to_runner.values():
if runner_id == global_runner_id:
continue
if isinstance(all_runners.get(global_runner_id, None), RunnerFailed):
return Shutdown(
instance_id=instance_id,
runner_id=runner_id,
)
def _create_runner(
node_id: NodeId,
runners: Mapping[RunnerId, RunnerSupervisor],
instances: Mapping[InstanceId, Instance],
) -> CreateRunner | None:
for instance in instances.values():
runner_id = instance.shard_assignments.node_to_runner.get(node_id, None)
if runner_id is None:
continue
if runner_id in runners:
continue
shard = instance.shard(runner_id)
assert shard is not None
return CreateRunner(
instance_id=instance.instance_id,
bound_instance=BoundInstance(
instance=instance, bound_runner_id=runner_id, bound_node_id=node_id
),
)
def _model_needs_download(
node_id: NodeId,
runners: Mapping[RunnerId, RunnerSupervisor],
global_download_status: Mapping[NodeId, Sequence[DownloadProgress]],
) -> DownloadModel | None:
local_downloads = global_download_status.get(node_id, [])
download_status = {
dp.shard_metadata.model_card.model_id: dp for dp in local_downloads
}
for runner in runners.values():
model_id = runner.bound_instance.bound_shard.model_card.model_id
if isinstance(runner.status, RunnerIdle) and (
model_id not in download_status
or not isinstance(
download_status[model_id],
(DownloadOngoing, DownloadCompleted, DownloadFailed),
)
):
# We don't invalidate download_status randomly in case a file gets deleted on disk
return DownloadModel(
instance_id=runner.bound_instance.instance.instance_id,
shard_metadata=runner.bound_instance.bound_shard,
)
def _init_distributed_backend(
runners: Mapping[RunnerId, RunnerSupervisor],
all_runners: Mapping[RunnerId, RunnerStatus],
):
for runner in runners.values():
instance = runner.bound_instance.instance
shard_assignments = instance.shard_assignments
is_single_node_instance = len(shard_assignments.runner_to_shard) == 1
if is_single_node_instance:
continue
runner_is_idle = isinstance(runner.status, RunnerIdle)
all_runners_connecting = all(
isinstance(
all_runners.get(global_runner_id),
(RunnerConnecting, RunnerIdle),
)
for global_runner_id in shard_assignments.runner_to_shard
)
if not (runner_is_idle and all_runners_connecting):
continue
runner_id = runner.bound_instance.bound_runner_id
shard = runner.bound_instance.bound_shard
device_rank = shard.device_rank
world_size = shard.world_size
assert device_rank < world_size
assert device_rank >= 0
accepting_ranks = device_rank < world_size - 1
# Rank = n-1
connecting_rank_ready = device_rank == world_size - 1 and all(
isinstance(all_runners.get(global_runner_id, None), RunnerConnecting)
for global_runner_id in shard_assignments.runner_to_shard
if global_runner_id != runner_id
)
if not (accepting_ranks or connecting_rank_ready):
continue
return ConnectToGroup(instance_id=instance.instance_id)
return None
def _load_model(
runners: Mapping[RunnerId, RunnerSupervisor],
all_runners: Mapping[RunnerId, RunnerStatus],
global_download_status: Mapping[NodeId, Sequence[DownloadProgress]],
) -> LoadModel | None:
for runner in runners.values():
instance = runner.bound_instance.instance
shard_assignments = instance.shard_assignments
all_local_downloads_complete = all(
nid in global_download_status
and any(
isinstance(dp, DownloadCompleted)
and dp.shard_metadata.model_card.model_id == shard_assignments.model_id
for dp in global_download_status[nid]
)
for nid in shard_assignments.node_to_runner
)
if not all_local_downloads_complete:
continue
is_single_node_instance = len(instance.shard_assignments.runner_to_shard) == 1
if is_single_node_instance and isinstance(runner.status, RunnerIdle):
return LoadModel(instance_id=instance.instance_id)
is_runner_waiting = isinstance(runner.status, RunnerConnected)
all_ready_for_model = all(
isinstance(
all_runners.get(global_runner_id, None),
(RunnerConnected, RunnerLoading, RunnerLoaded),
)
for global_runner_id in shard_assignments.runner_to_shard
)
if is_runner_waiting and all_ready_for_model:
return LoadModel(instance_id=instance.instance_id)
return None
def _ready_to_warmup(
runners: Mapping[RunnerId, RunnerSupervisor],
all_runners: Mapping[RunnerId, RunnerStatus],
) -> StartWarmup | None:
for runner in runners.values():
instance = runner.bound_instance.instance
shard_assignments = instance.shard_assignments
shard = runner.bound_instance.bound_shard
device_rank = shard.device_rank
runner_id = runner.bound_instance.bound_runner_id
world_size = shard.world_size
is_runner_loaded = isinstance(runner.status, RunnerLoaded)
assert device_rank < world_size
assert device_rank >= 0
# Rank != 0
accepting_ranks_ready = device_rank > 0 and all(
isinstance(
all_runners.get(global_runner_id, None),
(RunnerLoaded, RunnerWarmingUp),
)
for global_runner_id in shard_assignments.runner_to_shard
)
# Rank = 0
connecting_rank_ready = device_rank == 0 and all(
isinstance(all_runners.get(global_runner_id, None), RunnerWarmingUp)
for global_runner_id in shard_assignments.runner_to_shard
if global_runner_id != runner_id
)
if is_runner_loaded and (accepting_ranks_ready or connecting_rank_ready):
return StartWarmup(instance_id=instance.instance_id)
return None
def _pending_tasks(
runners: Mapping[RunnerId, RunnerSupervisor],
tasks: Mapping[TaskId, Task],
all_runners: Mapping[RunnerId, RunnerStatus],
input_chunk_buffer: Mapping[CommandId, dict[int, str]],
) -> Task | None:
for task in tasks.values():
# for now, just forward chat completions
# TODO(ciaran): do this better!
if not isinstance(task, (TextGeneration, ImageGeneration, ImageEdits)):
continue
if task.task_status not in (TaskStatus.Pending, TaskStatus.Running):
continue
# For ImageEdits tasks, verify all input chunks have been received
if isinstance(task, ImageEdits) and task.task_params.total_input_chunks > 0:
cmd_id = task.command_id
expected = task.task_params.total_input_chunks
received = len(input_chunk_buffer.get(cmd_id, {}))
if received < expected:
continue # Wait for all chunks to arrive
for runner in runners.values():
if task.instance_id != runner.bound_instance.instance.instance_id:
continue
# the task status _should_ be set to completed by the LAST runner
# it is currently set by the first
# this is definitely a hack
if task.task_id in runner.completed:
continue
if isinstance(runner.status, RunnerReady) and all(
isinstance(all_runners[global_runner_id], (RunnerReady, RunnerRunning))
for global_runner_id in runner.bound_instance.instance.shard_assignments.runner_to_shard
):
return task
def _cancel_tasks(
runners: Mapping[RunnerId, RunnerSupervisor],
tasks: Mapping[TaskId, Task],
) -> Task | None:
for task in tasks.values():
if task.task_status != TaskStatus.Cancelled:
continue
for runner_id, runner in runners.items():
if task.instance_id != runner.bound_instance.instance.instance_id:
continue
if task.task_id in runner.cancelled:
continue
return CancelTask(
instance_id=task.instance_id,
cancelled_task_id=task.task_id,
runner_id=runner_id,
)
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/worker/plan.py",
"license": "Apache License 2.0",
"lines": 273,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
exo-explore/exo:src/exo/worker/runner/bootstrap.py | import os
import loguru
from exo.shared.types.events import Event, RunnerStatusUpdated
from exo.shared.types.tasks import Task, TaskId
from exo.shared.types.worker.instances import BoundInstance
from exo.shared.types.worker.runners import RunnerFailed
from exo.utils.channels import ClosedResourceError, MpReceiver, MpSender
logger: "loguru.Logger" = loguru.logger
def entrypoint(
bound_instance: BoundInstance,
event_sender: MpSender[Event],
task_receiver: MpReceiver[Task],
cancel_receiver: MpReceiver[TaskId],
_logger: "loguru.Logger",
) -> None:
global logger
logger = _logger
fast_synch_override = os.environ.get("EXO_FAST_SYNCH")
if fast_synch_override != "off":
os.environ["MLX_METAL_FAST_SYNCH"] = "1"
else:
os.environ["MLX_METAL_FAST_SYNCH"] = "0"
logger.info(f"Fast synch flag: {os.environ['MLX_METAL_FAST_SYNCH']}")
# Import main after setting global logger - this lets us just import logger from this module
try:
if bound_instance.is_image_model:
from exo.worker.runner.image_models.runner import main
else:
from exo.worker.runner.llm_inference.runner import main
main(bound_instance, event_sender, task_receiver, cancel_receiver)
except ClosedResourceError:
logger.warning("Runner communication closed unexpectedly")
except Exception as e:
logger.opt(exception=e).warning(
f"Runner {bound_instance.bound_runner_id} crashed with critical exception {e}"
)
event_sender.send(
RunnerStatusUpdated(
runner_id=bound_instance.bound_runner_id,
runner_status=RunnerFailed(error_message=str(e)),
)
)
finally:
try:
event_sender.close()
task_receiver.close()
finally:
event_sender.join()
task_receiver.join()
logger.info("bye from the runner")
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/worker/runner/bootstrap.py",
"license": "Apache License 2.0",
"lines": 50,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
exo-explore/exo:src/exo/worker/runner/runner_supervisor.py | import contextlib
import multiprocessing as mp
import signal
from dataclasses import dataclass, field
from typing import Self
import anyio
from anyio import (
BrokenResourceError,
ClosedResourceError,
to_thread,
)
from loguru import logger
from exo.shared.types.events import (
Event,
RunnerStatusUpdated,
TaskAcknowledged,
TaskStatusUpdated,
)
from exo.shared.types.tasks import Task, TaskId, TaskStatus
from exo.shared.types.worker.instances import BoundInstance
from exo.shared.types.worker.runners import (
RunnerConnecting,
RunnerFailed,
RunnerIdle,
RunnerLoading,
RunnerRunning,
RunnerShuttingDown,
RunnerStatus,
RunnerWarmingUp,
)
from exo.shared.types.worker.shards import ShardMetadata
from exo.utils.channels import MpReceiver, MpSender, Sender, mp_channel
from exo.utils.task_group import TaskGroup
from exo.worker.runner.bootstrap import entrypoint
PREFILL_TIMEOUT_SECONDS = 60
DECODE_TIMEOUT_SECONDS = 5
@dataclass(eq=False)
class RunnerSupervisor:
shard_metadata: ShardMetadata
bound_instance: BoundInstance
runner_process: mp.Process
initialize_timeout: float
_ev_recv: MpReceiver[Event]
_task_sender: MpSender[Task]
_event_sender: Sender[Event]
_cancel_sender: MpSender[TaskId]
_tg: TaskGroup = field(default_factory=TaskGroup, init=False)
status: RunnerStatus = field(default_factory=RunnerIdle, init=False)
pending: dict[TaskId, anyio.Event] = field(default_factory=dict, init=False)
completed: set[TaskId] = field(default_factory=set, init=False)
cancelled: set[TaskId] = field(default_factory=set, init=False)
_cancel_watch_runner: anyio.CancelScope = field(
default_factory=anyio.CancelScope, init=False
)
@classmethod
def create(
cls,
*,
bound_instance: BoundInstance,
event_sender: Sender[Event],
initialize_timeout: float = 400,
) -> Self:
ev_send, ev_recv = mp_channel[Event]()
task_sender, task_recv = mp_channel[Task]()
cancel_sender, cancel_recv = mp_channel[TaskId]()
runner_process = mp.Process(
target=entrypoint,
args=(
bound_instance,
ev_send,
task_recv,
cancel_recv,
logger,
),
daemon=True,
)
shard_metadata = bound_instance.bound_shard
self = cls(
bound_instance=bound_instance,
shard_metadata=shard_metadata,
runner_process=runner_process,
initialize_timeout=initialize_timeout,
_ev_recv=ev_recv,
_task_sender=task_sender,
_cancel_sender=cancel_sender,
_event_sender=event_sender,
)
return self
async def run(self):
self.runner_process.start()
async with self._tg as tg:
tg.start_soon(self._watch_runner)
tg.start_soon(self._forward_events)
def shutdown(self):
logger.info("Runner supervisor shutting down")
self._tg.cancel_tasks()
if not self._cancel_watch_runner.cancel_called:
self._cancel_watch_runner.cancel()
with contextlib.suppress(ClosedResourceError):
self._ev_recv.close()
with contextlib.suppress(ClosedResourceError):
self._task_sender.close()
with contextlib.suppress(ClosedResourceError):
self._event_sender.close()
with contextlib.suppress(ClosedResourceError):
self._cancel_sender.send(TaskId("CANCEL_CURRENT_TASK"))
with contextlib.suppress(ClosedResourceError):
self._cancel_sender.close()
self.runner_process.join(5)
if not self.runner_process.is_alive():
logger.info("Runner process succesfully terminated")
return
# This is overkill but it's not technically bad, just unnecessary.
logger.warning("Runner process didn't shutdown succesfully, terminating")
self.runner_process.terminate()
self.runner_process.join(1)
if not self.runner_process.is_alive():
return
logger.critical("Runner process didn't respond to SIGTERM, killing")
self.runner_process.kill()
async def start_task(self, task: Task):
if task.task_id in self.pending:
logger.warning(
f"Skipping invalid task {task} as it has already been submitted"
)
return
if task.task_id in self.completed:
logger.warning(
f"Skipping invalid task {task} as it has already been completed"
)
return
logger.info(f"Starting task {task}")
event = anyio.Event()
self.pending[task.task_id] = event
try:
await self._task_sender.send_async(task)
except ClosedResourceError:
logger.warning(f"Task {task} dropped, runner closed communication.")
return
await event.wait()
async def cancel_task(self, task_id: TaskId):
if task_id in self.completed:
logger.info(f"Unable to cancel {task_id} as it has been completed")
return
self.cancelled.add(task_id)
with anyio.move_on_after(0.5) as scope:
await self._cancel_sender.send_async(task_id)
if scope.cancel_called:
logger.error("RunnerSupervisor cancel pipe blocked")
await self._check_runner(TimeoutError("cancel pipe blocked"))
async def _forward_events(self):
try:
with self._ev_recv as events:
async for event in events:
if isinstance(event, RunnerStatusUpdated):
self.status = event.runner_status
if isinstance(event, TaskAcknowledged):
self.pending.pop(event.task_id).set()
continue
if (
isinstance(event, TaskStatusUpdated)
and event.task_status == TaskStatus.Complete
):
# If a task has just been completed, we should be working on it.
assert isinstance(
self.status,
(
RunnerRunning,
RunnerWarmingUp,
RunnerLoading,
RunnerConnecting,
RunnerShuttingDown,
),
)
self.completed.add(event.task_id)
await self._event_sender.send(event)
except (ClosedResourceError, BrokenResourceError) as e:
await self._check_runner(e)
finally:
for tid in self.pending:
self.pending[tid].set()
def __del__(self) -> None:
if self.runner_process.is_alive():
logger.critical("RunnerSupervisor was not stopped cleanly.")
with contextlib.suppress(ValueError):
self.runner_process.kill()
async def _watch_runner(self) -> None:
with self._cancel_watch_runner:
while True:
await anyio.sleep(5)
if not self.runner_process.is_alive():
await self._check_runner(RuntimeError("Runner found to be dead"))
async def _check_runner(self, e: Exception) -> None:
if not self._cancel_watch_runner.cancel_called:
self._cancel_watch_runner.cancel()
logger.info("Checking runner's status")
if self.runner_process.is_alive():
logger.info("Runner was found to be alive, attempting to join process")
await to_thread.run_sync(self.runner_process.join, 5)
rc = self.runner_process.exitcode
logger.info(f"Runner exited with exit code {rc}")
if rc == 0:
return
if isinstance(rc, int) and rc < 0:
sig = -rc
try:
cause = f"signal={sig} ({signal.strsignal(sig)})"
except Exception:
cause = f"signal={sig}"
else:
cause = f"exitcode={rc}"
logger.opt(exception=e).error(f"Runner terminated with {cause}")
try:
self.status = RunnerFailed(error_message=f"Terminated ({cause})")
with anyio.CancelScope(shield=True):
await self._event_sender.send(
RunnerStatusUpdated(
runner_id=self.bound_instance.bound_runner_id,
runner_status=RunnerFailed(
error_message=f"Terminated ({cause})"
),
)
)
except (ClosedResourceError, BrokenResourceError):
logger.warning(
"Event sender already closed, unable to report runner failure"
)
self.shutdown()
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/worker/runner/runner_supervisor.py",
"license": "Apache License 2.0",
"lines": 228,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
exo-explore/exo:src/exo/worker/tests/constants.py | from typing import Final
from exo.shared.models.model_cards import ModelId
from exo.shared.types.common import CommandId, NodeId
from exo.shared.types.tasks import TaskId
from exo.shared.types.worker.instances import InstanceId, RunnerId
MASTER_NODE_ID = NodeId("ffffffff-aaaa-4aaa-8aaa-aaaaaaaaaaaa")
NODE_A: Final[NodeId] = NodeId("aaaaaaaa-aaaa-4aaa-8aaa-aaaaaaaaaaaa")
NODE_B: Final[NodeId] = NodeId("bbbbbbbb-bbbb-4bbb-8bbb-bbbbbbbbbbbb")
NODE_C: Final[NodeId] = NodeId("cccccccc-cccc-4ccc-8ccc-cccccccccccc")
RUNNER_1_ID: Final[RunnerId] = RunnerId("11111111-1111-4111-8111-111111111111")
RUNNER_2_ID: Final[RunnerId] = RunnerId("33333333-3333-4333-8333-333333333333")
RUNNER_3_ID: Final[RunnerId] = RunnerId("Runner3")
INSTANCE_1_ID: Final[InstanceId] = InstanceId("22222222-2222-4222-8222-222222222222")
INSTANCE_2_ID: Final[InstanceId] = InstanceId("44444444-4444-4444-8444-444444444444")
MODEL_A_ID: Final[ModelId] = ModelId("mlx-community/Llama-3.2-1B-Instruct-4bit")
MODEL_B_ID: Final[ModelId] = ModelId("mlx-community/TinyLlama-1.1B-Chat-v1.0")
TASK_1_ID: Final[TaskId] = TaskId("55555555-5555-4555-8555-555555555555")
TASK_2_ID: Final[TaskId] = TaskId("66666666-6666-4666-8666-666666666666")
COMMAND_1_ID: Final[CommandId] = CommandId("77777777-7777-4777-8777-777777777777")
COMMAND_2_ID: Final[CommandId] = CommandId("88888888-8888-4888-8888-888888888888")
SHUTDOWN_TASK_ID = TaskId("shutdown")
CHAT_COMPLETION_TASK_ID = TaskId("chat-completion")
INITIALIZATION_TASK_ID = TaskId("initialisation")
LOAD_TASK_ID = TaskId("load")
WARMUP_TASK_ID = TaskId("warmup")
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/worker/tests/constants.py",
"license": "Apache License 2.0",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
exo-explore/exo:src/exo/worker/tests/unittests/test_plan/test_download_and_loading.py | import exo.worker.plan as plan_mod
from exo.shared.types.common import NodeId
from exo.shared.types.memory import Memory
from exo.shared.types.tasks import LoadModel
from exo.shared.types.worker.downloads import DownloadCompleted, DownloadProgress
from exo.shared.types.worker.instances import BoundInstance
from exo.shared.types.worker.runners import (
RunnerConnected,
RunnerIdle,
)
from exo.worker.tests.constants import (
INSTANCE_1_ID,
MODEL_A_ID,
NODE_A,
NODE_B,
RUNNER_1_ID,
RUNNER_2_ID,
)
from exo.worker.tests.unittests.conftest import (
FakeRunnerSupervisor,
get_mlx_ring_instance,
get_pipeline_shard_metadata,
)
def test_plan_requests_download_when_waiting_and_shard_not_downloaded():
"""
When a runner is waiting for a model and its shard is not in the
local download_status map, plan() should emit DownloadModel.
"""
shard = get_pipeline_shard_metadata(model_id=MODEL_A_ID, device_rank=0)
instance = get_mlx_ring_instance(
instance_id=INSTANCE_1_ID,
model_id=MODEL_A_ID,
node_to_runner={NODE_A: RUNNER_1_ID},
runner_to_shard={RUNNER_1_ID: shard},
)
bound_instance = BoundInstance(
instance=instance, bound_runner_id=RUNNER_1_ID, bound_node_id=NODE_A
)
runner = FakeRunnerSupervisor(bound_instance=bound_instance, status=RunnerIdle())
runners = {RUNNER_1_ID: runner}
instances = {INSTANCE_1_ID: instance}
all_runners = {RUNNER_1_ID: RunnerIdle()}
result = plan_mod.plan(
node_id=NODE_A,
runners=runners, # type: ignore
global_download_status={NODE_A: []},
instances=instances,
all_runners=all_runners,
tasks={},
)
assert isinstance(result, plan_mod.DownloadModel)
assert result.instance_id == INSTANCE_1_ID
assert result.shard_metadata == shard
def test_plan_loads_model_when_all_shards_downloaded_and_waiting():
"""
When all shards for an instance are DownloadCompleted (globally) and
all runners are in waiting/loading/loaded states, plan() should emit
LoadModel once.
"""
shard1 = get_pipeline_shard_metadata(MODEL_A_ID, device_rank=0, world_size=2)
shard2 = get_pipeline_shard_metadata(MODEL_A_ID, device_rank=1, world_size=2)
instance = get_mlx_ring_instance(
instance_id=INSTANCE_1_ID,
model_id=MODEL_A_ID,
node_to_runner={NODE_A: RUNNER_1_ID, NODE_B: RUNNER_2_ID},
runner_to_shard={RUNNER_1_ID: shard1, RUNNER_2_ID: shard2},
)
bound_instance = BoundInstance(
instance=instance, bound_runner_id=RUNNER_1_ID, bound_node_id=NODE_A
)
local_runner = FakeRunnerSupervisor(
bound_instance=bound_instance, status=RunnerConnected()
)
runners = {RUNNER_1_ID: local_runner}
instances = {INSTANCE_1_ID: instance}
all_runners = {
RUNNER_1_ID: RunnerConnected(),
RUNNER_2_ID: RunnerConnected(),
}
global_download_status = {
NODE_A: [
DownloadCompleted(shard_metadata=shard1, node_id=NODE_A, total=Memory())
],
NODE_B: [
DownloadCompleted(shard_metadata=shard2, node_id=NODE_B, total=Memory())
],
}
result = plan_mod.plan(
node_id=NODE_A,
runners=runners, # type: ignore
global_download_status=global_download_status,
instances=instances,
all_runners=all_runners,
tasks={},
)
assert isinstance(result, LoadModel)
assert result.instance_id == INSTANCE_1_ID
def test_plan_does_not_request_download_when_shard_already_downloaded():
"""
If the local shard already has a DownloadCompleted entry, plan()
should not re-emit DownloadModel while global state is still catching up.
"""
shard = get_pipeline_shard_metadata(MODEL_A_ID, device_rank=0)
instance = get_mlx_ring_instance(
instance_id=INSTANCE_1_ID,
model_id=MODEL_A_ID,
node_to_runner={NODE_A: RUNNER_1_ID},
runner_to_shard={RUNNER_1_ID: shard},
)
bound_instance = BoundInstance(
instance=instance, bound_runner_id=RUNNER_1_ID, bound_node_id=NODE_A
)
runner = FakeRunnerSupervisor(bound_instance=bound_instance, status=RunnerIdle())
runners = {RUNNER_1_ID: runner}
instances = {INSTANCE_1_ID: instance}
all_runners = {RUNNER_1_ID: RunnerIdle()}
# Global state shows shard is downloaded for NODE_A
global_download_status: dict[NodeId, list[DownloadProgress]] = {
NODE_A: [
DownloadCompleted(shard_metadata=shard, node_id=NODE_A, total=Memory())
],
NODE_B: [],
}
result = plan_mod.plan(
node_id=NODE_A,
runners=runners, # type: ignore
global_download_status=global_download_status,
instances=instances,
all_runners=all_runners,
tasks={},
)
assert not isinstance(result, plan_mod.DownloadModel)
def test_plan_does_not_load_model_until_all_shards_downloaded_globally():
"""
LoadModel should not be emitted while some shards are still missing from
the global_download_status.
"""
shard1 = get_pipeline_shard_metadata(MODEL_A_ID, device_rank=0, world_size=2)
shard2 = get_pipeline_shard_metadata(MODEL_A_ID, device_rank=1, world_size=2)
instance = get_mlx_ring_instance(
instance_id=INSTANCE_1_ID,
model_id=MODEL_A_ID,
node_to_runner={NODE_A: RUNNER_1_ID, NODE_B: RUNNER_2_ID},
runner_to_shard={RUNNER_1_ID: shard1, RUNNER_2_ID: shard2},
)
bound_instance = BoundInstance(
instance=instance, bound_runner_id=RUNNER_1_ID, bound_node_id=NODE_A
)
local_runner = FakeRunnerSupervisor(
bound_instance=bound_instance, status=RunnerConnected()
)
runners = {RUNNER_1_ID: local_runner}
instances = {INSTANCE_1_ID: instance}
all_runners = {
RUNNER_1_ID: RunnerConnected(),
RUNNER_2_ID: RunnerConnected(),
}
global_download_status = {
NODE_A: [
DownloadCompleted(shard_metadata=shard1, node_id=NODE_A, total=Memory())
],
NODE_B: [], # NODE_B has no downloads completed yet
}
result = plan_mod.plan(
node_id=NODE_A,
runners=runners, # type: ignore
global_download_status=global_download_status,
instances=instances,
all_runners=all_runners,
tasks={},
)
assert result is None
global_download_status = {
NODE_A: [
DownloadCompleted(shard_metadata=shard1, node_id=NODE_A, total=Memory())
],
NODE_B: [
DownloadCompleted(shard_metadata=shard2, node_id=NODE_B, total=Memory())
], # NODE_B has no downloads completed yet
}
result = plan_mod.plan(
node_id=NODE_A,
runners=runners, # type: ignore
global_download_status=global_download_status,
instances=instances,
all_runners=all_runners,
tasks={},
)
assert result is not None
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/worker/tests/unittests/test_plan/test_download_and_loading.py",
"license": "Apache License 2.0",
"lines": 189,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
exo-explore/exo:src/exo/worker/tests/unittests/test_plan/test_runner_lifecycle.py | from typing import Any
import exo.worker.plan as plan_mod
from exo.shared.types.tasks import Shutdown
from exo.shared.types.worker.instances import BoundInstance, Instance, InstanceId
from exo.shared.types.worker.runners import (
RunnerFailed,
RunnerId,
RunnerReady,
RunnerStatus,
)
from exo.worker.tests.constants import (
INSTANCE_1_ID,
MODEL_A_ID,
NODE_A,
NODE_B,
RUNNER_1_ID,
RUNNER_2_ID,
)
from exo.worker.tests.unittests.conftest import (
FakeRunnerSupervisor,
get_mlx_ring_instance,
get_pipeline_shard_metadata,
)
def test_plan_kills_runner_when_instance_missing():
"""
If a local runner's instance is no longer present in state,
plan() should return a Shutdown for that runner.
"""
shard = get_pipeline_shard_metadata(model_id=MODEL_A_ID, device_rank=0)
instance = get_mlx_ring_instance(
instance_id=INSTANCE_1_ID,
model_id=MODEL_A_ID,
node_to_runner={NODE_A: RUNNER_1_ID},
runner_to_shard={RUNNER_1_ID: shard},
)
bound_instance = BoundInstance(
instance=instance, bound_runner_id=RUNNER_1_ID, bound_node_id=NODE_A
)
runner = FakeRunnerSupervisor(bound_instance=bound_instance, status=RunnerReady())
runners = {RUNNER_1_ID: runner}
instances: dict[InstanceId, Instance] = {}
all_runners = {RUNNER_1_ID: RunnerReady()}
result = plan_mod.plan(
node_id=NODE_A,
runners=runners, # type: ignore[arg-type]
global_download_status={NODE_A: []},
instances=instances,
all_runners=all_runners,
tasks={},
)
assert isinstance(result, Shutdown)
assert result.instance_id == INSTANCE_1_ID
assert result.runner_id == RUNNER_1_ID
def test_plan_kills_runner_when_sibling_failed():
"""
If a sibling runner in the same instance has failed, the local runner
should be shut down.
"""
shard1 = get_pipeline_shard_metadata(MODEL_A_ID, device_rank=0, world_size=2)
shard2 = get_pipeline_shard_metadata(MODEL_A_ID, device_rank=1, world_size=2)
instance = get_mlx_ring_instance(
instance_id=INSTANCE_1_ID,
model_id=MODEL_A_ID,
node_to_runner={NODE_A: RUNNER_1_ID, NODE_B: RUNNER_2_ID},
runner_to_shard={RUNNER_1_ID: shard1, RUNNER_2_ID: shard2},
)
bound_instance = BoundInstance(
instance=instance, bound_runner_id=RUNNER_1_ID, bound_node_id=NODE_A
)
runner = FakeRunnerSupervisor(bound_instance=bound_instance, status=RunnerReady())
runners = {RUNNER_1_ID: runner}
instances = {INSTANCE_1_ID: instance}
all_runners = {
RUNNER_1_ID: RunnerReady(),
RUNNER_2_ID: RunnerFailed(error_message="boom"),
}
result = plan_mod.plan(
node_id=NODE_A,
runners=runners, # type: ignore[arg-type]
global_download_status={NODE_A: []},
instances=instances,
all_runners=all_runners,
tasks={},
)
assert isinstance(result, Shutdown)
assert result.instance_id == INSTANCE_1_ID
assert result.runner_id == RUNNER_1_ID
def test_plan_creates_runner_when_missing_for_node():
"""
If shard_assignments specify a runner for this node but we don't have
a local supervisor yet, plan() should emit a CreateRunner.
"""
shard = get_pipeline_shard_metadata(model_id=MODEL_A_ID, device_rank=0)
instance = get_mlx_ring_instance(
instance_id=INSTANCE_1_ID,
model_id=MODEL_A_ID,
node_to_runner={NODE_A: RUNNER_1_ID},
runner_to_shard={RUNNER_1_ID: shard},
)
runners: dict[Any, Any] = {} # nothing local yet
instances = {INSTANCE_1_ID: instance}
all_runners: dict[Any, Any] = {}
result = plan_mod.plan(
node_id=NODE_A,
runners=runners,
global_download_status={NODE_A: []},
instances=instances,
all_runners=all_runners,
tasks={},
)
# We patched plan_mod.CreateRunner → CreateRunner
assert isinstance(result, plan_mod.CreateRunner)
assert result.instance_id == INSTANCE_1_ID
assert isinstance(result.bound_instance, BoundInstance)
assert result.bound_instance.instance is instance
assert result.bound_instance.bound_runner_id == RUNNER_1_ID
def test_plan_does_not_create_runner_when_supervisor_already_present():
"""
If we already have a local supervisor for the runner assigned to this node,
plan() should not emit a CreateRunner again.
"""
shard = get_pipeline_shard_metadata(model_id=MODEL_A_ID, device_rank=0)
instance = get_mlx_ring_instance(
instance_id=INSTANCE_1_ID,
model_id=MODEL_A_ID,
node_to_runner={NODE_A: RUNNER_1_ID},
runner_to_shard={RUNNER_1_ID: shard},
)
bound_instance = BoundInstance(
instance=instance, bound_runner_id=RUNNER_1_ID, bound_node_id=NODE_A
)
runner = FakeRunnerSupervisor(bound_instance=bound_instance, status=RunnerReady())
runners = {RUNNER_1_ID: runner}
instances = {INSTANCE_1_ID: instance}
all_runners = {RUNNER_1_ID: RunnerReady()}
result = plan_mod.plan(
node_id=NODE_A,
runners=runners, # type: ignore[arg-type]
global_download_status={NODE_A: []},
instances=instances,
all_runners=all_runners,
tasks={},
)
assert result is None
def test_plan_does_not_create_runner_for_unassigned_node():
"""
If this node does not appear in shard_assignments.node_to_runner,
plan() should not try to create a runner on this node.
"""
shard = get_pipeline_shard_metadata(model_id=MODEL_A_ID, device_rank=0)
instance = get_mlx_ring_instance(
instance_id=INSTANCE_1_ID,
model_id=MODEL_A_ID,
node_to_runner={NODE_B: RUNNER_2_ID},
runner_to_shard={RUNNER_2_ID: shard},
)
runners: dict[RunnerId, FakeRunnerSupervisor] = {} # no local runners
instances = {INSTANCE_1_ID: instance}
all_runners: dict[RunnerId, RunnerStatus] = {}
result = plan_mod.plan(
node_id=NODE_A,
runners=runners, # type: ignore
global_download_status={NODE_A: []},
instances=instances,
all_runners=all_runners,
tasks={},
)
assert result is None
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/worker/tests/unittests/test_plan/test_runner_lifecycle.py",
"license": "Apache License 2.0",
"lines": 168,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
exo-explore/exo:src/exo/worker/tests/unittests/test_plan/test_task_forwarding.py | from typing import cast
import exo.worker.plan as plan_mod
from exo.shared.types.tasks import Task, TaskId, TaskStatus, TextGeneration
from exo.shared.types.text_generation import InputMessage, TextGenerationTaskParams
from exo.shared.types.worker.instances import BoundInstance, InstanceId
from exo.shared.types.worker.runners import (
RunnerIdle,
RunnerReady,
RunnerRunning,
)
from exo.worker.tests.constants import (
COMMAND_1_ID,
INSTANCE_1_ID,
MODEL_A_ID,
NODE_A,
NODE_B,
RUNNER_1_ID,
RUNNER_2_ID,
TASK_1_ID,
)
from exo.worker.tests.unittests.conftest import (
FakeRunnerSupervisor,
OtherTask,
get_mlx_ring_instance,
get_pipeline_shard_metadata,
)
def test_plan_forwards_pending_chat_completion_when_runner_ready():
"""
When there is a pending TextGeneration for the local instance and all
runners are Ready/Running, plan() should forward that task.
"""
shard0 = get_pipeline_shard_metadata(MODEL_A_ID, device_rank=0, world_size=2)
shard1 = get_pipeline_shard_metadata(MODEL_A_ID, device_rank=1, world_size=2)
instance = get_mlx_ring_instance(
instance_id=INSTANCE_1_ID,
model_id=MODEL_A_ID,
node_to_runner={NODE_A: RUNNER_1_ID, NODE_B: RUNNER_2_ID},
runner_to_shard={RUNNER_1_ID: shard0, RUNNER_2_ID: shard1},
)
bound_instance = BoundInstance(
instance=instance, bound_runner_id=RUNNER_1_ID, bound_node_id=NODE_A
)
local_runner = FakeRunnerSupervisor(
bound_instance=bound_instance, status=RunnerReady()
)
runners = {RUNNER_1_ID: local_runner}
instances = {INSTANCE_1_ID: instance}
all_runners = {
RUNNER_1_ID: RunnerReady(),
RUNNER_2_ID: RunnerReady(),
}
task = TextGeneration(
task_id=TASK_1_ID,
instance_id=INSTANCE_1_ID,
task_status=TaskStatus.Pending,
command_id=COMMAND_1_ID,
task_params=TextGenerationTaskParams(
model=MODEL_A_ID, input=[InputMessage(role="user", content="")]
),
)
result = plan_mod.plan(
node_id=NODE_A,
runners=runners, # type: ignore
global_download_status={NODE_A: []},
instances=instances,
all_runners=all_runners,
tasks={TASK_1_ID: task},
)
assert result is task
def test_plan_does_not_forward_chat_completion_if_any_runner_not_ready():
"""
Even with a pending TextGeneration, plan() should not forward it unless
all runners for the instance are Ready/Running.
"""
shard1 = get_pipeline_shard_metadata(MODEL_A_ID, device_rank=0, world_size=2)
shard2 = get_pipeline_shard_metadata(MODEL_A_ID, device_rank=1, world_size=2)
instance = get_mlx_ring_instance(
instance_id=INSTANCE_1_ID,
model_id=MODEL_A_ID,
node_to_runner={NODE_A: RUNNER_1_ID, NODE_B: RUNNER_2_ID},
runner_to_shard={RUNNER_1_ID: shard1, RUNNER_2_ID: shard2},
)
bound_instance = BoundInstance(
instance=instance, bound_runner_id=RUNNER_1_ID, bound_node_id=NODE_A
)
local_runner = FakeRunnerSupervisor(
bound_instance=bound_instance, status=RunnerReady()
)
runners = {RUNNER_1_ID: local_runner}
instances = {INSTANCE_1_ID: instance}
all_runners = {
RUNNER_1_ID: RunnerReady(),
RUNNER_2_ID: RunnerIdle(),
}
task = TextGeneration(
task_id=TASK_1_ID,
instance_id=INSTANCE_1_ID,
task_status=TaskStatus.Pending,
command_id=COMMAND_1_ID,
task_params=TextGenerationTaskParams(
model=MODEL_A_ID, input=[InputMessage(role="user", content="")]
),
)
result = plan_mod.plan(
node_id=NODE_A,
runners=runners, # type: ignore
global_download_status={NODE_A: [], NODE_B: []},
instances=instances,
all_runners=all_runners,
tasks={TASK_1_ID: task},
)
assert result is None
def test_plan_does_not_forward_tasks_for_other_instances():
"""
plan() should ignore pending TextGeneration tasks whose instance_id does
not match the local instance.
"""
shard = get_pipeline_shard_metadata(model_id=MODEL_A_ID, device_rank=0)
local_instance = get_mlx_ring_instance(
instance_id=INSTANCE_1_ID,
model_id=MODEL_A_ID,
node_to_runner={NODE_A: RUNNER_1_ID},
runner_to_shard={RUNNER_1_ID: shard},
)
bound_instance = BoundInstance(
instance=local_instance, bound_runner_id=RUNNER_1_ID, bound_node_id=NODE_A
)
local_runner = FakeRunnerSupervisor(
bound_instance=bound_instance, status=RunnerReady()
)
runners = {RUNNER_1_ID: local_runner}
instances = {INSTANCE_1_ID: local_instance}
all_runners = {RUNNER_1_ID: RunnerReady()}
other_instance_id = InstanceId("instance-2")
foreign_task = TextGeneration(
task_id=TaskId("other-task"),
instance_id=other_instance_id,
task_status=TaskStatus.Pending,
command_id=COMMAND_1_ID,
task_params=TextGenerationTaskParams(
model=MODEL_A_ID, input=[InputMessage(role="user", content="")]
),
)
result = plan_mod.plan(
node_id=NODE_A,
runners=runners, # type: ignore
global_download_status={NODE_A: []},
instances=instances,
all_runners=all_runners,
tasks={foreign_task.task_id: foreign_task},
)
assert result is None
def test_plan_ignores_non_pending_or_non_chat_tasks():
"""
_pending_tasks should not forward tasks that are either not TextGeneration
or not in Pending/Running states.
"""
shard0 = get_pipeline_shard_metadata(MODEL_A_ID, device_rank=0, world_size=2)
shard1 = get_pipeline_shard_metadata(MODEL_A_ID, device_rank=1, world_size=2)
instance = get_mlx_ring_instance(
instance_id=INSTANCE_1_ID,
model_id=MODEL_A_ID,
node_to_runner={NODE_A: RUNNER_1_ID, NODE_B: RUNNER_2_ID},
runner_to_shard={RUNNER_1_ID: shard0, RUNNER_2_ID: shard1},
)
bound_instance = BoundInstance(
instance=instance, bound_runner_id=RUNNER_1_ID, bound_node_id=NODE_A
)
local_runner = FakeRunnerSupervisor(
bound_instance=bound_instance, status=RunnerReady()
)
runners = {RUNNER_1_ID: local_runner}
instances = {INSTANCE_1_ID: instance}
all_runners = {
RUNNER_1_ID: RunnerReady(),
RUNNER_2_ID: RunnerReady(),
}
completed_task = TextGeneration(
task_id=TASK_1_ID,
instance_id=INSTANCE_1_ID,
task_status=TaskStatus.Complete,
command_id=COMMAND_1_ID,
task_params=TextGenerationTaskParams(
model=MODEL_A_ID, input=[InputMessage(role="user", content="")]
),
)
other_task_id = TaskId("other-task")
other_task = cast(
Task,
cast(
object,
OtherTask(
task_id=other_task_id,
instance_id=INSTANCE_1_ID,
task_status=TaskStatus.Pending,
),
),
)
result = plan_mod.plan(
node_id=NODE_A,
runners=runners, # type: ignore
global_download_status={NODE_A: [], NODE_B: []},
instances=instances,
all_runners=all_runners,
tasks={TASK_1_ID: completed_task, other_task_id: other_task},
)
assert result is None
def test_plan_returns_none_when_nothing_to_do():
"""
If there are healthy runners, no downloads needed, and no pending tasks,
plan() should return None (steady state).
"""
shard0 = get_pipeline_shard_metadata(MODEL_A_ID, device_rank=0, world_size=2)
shard1 = get_pipeline_shard_metadata(MODEL_A_ID, device_rank=1, world_size=2)
instance = get_mlx_ring_instance(
instance_id=INSTANCE_1_ID,
model_id=MODEL_A_ID,
node_to_runner={NODE_A: RUNNER_1_ID, NODE_B: RUNNER_2_ID},
runner_to_shard={RUNNER_1_ID: shard0, RUNNER_2_ID: shard1},
)
bound_instance = BoundInstance(
instance=instance, bound_runner_id=RUNNER_1_ID, bound_node_id=NODE_A
)
local_runner = FakeRunnerSupervisor(
bound_instance=bound_instance, status=RunnerRunning()
)
runners = {RUNNER_1_ID: local_runner}
instances = {INSTANCE_1_ID: instance}
all_runners = {
RUNNER_1_ID: RunnerRunning(),
RUNNER_2_ID: RunnerRunning(),
}
result = plan_mod.plan(
node_id=NODE_A,
runners=runners, # type: ignore
global_download_status={NODE_A: [], NODE_B: []},
instances=instances,
all_runners=all_runners,
tasks={},
)
assert result is None
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/worker/tests/unittests/test_plan/test_task_forwarding.py",
"license": "Apache License 2.0",
"lines": 241,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
exo-explore/exo:src/exo/worker/tests/unittests/test_plan/test_warmup.py | import exo.worker.plan as plan_mod
from exo.shared.types.tasks import StartWarmup
from exo.shared.types.worker.instances import BoundInstance
from exo.shared.types.worker.runners import (
RunnerIdle,
RunnerLoaded,
RunnerLoading,
RunnerWarmingUp,
)
from exo.worker.tests.constants import (
INSTANCE_1_ID,
MODEL_A_ID,
NODE_A,
NODE_B,
NODE_C,
RUNNER_1_ID,
RUNNER_2_ID,
RUNNER_3_ID,
)
from exo.worker.tests.unittests.conftest import (
FakeRunnerSupervisor,
get_mlx_ring_instance,
get_pipeline_shard_metadata,
)
def test_plan_starts_warmup_for_accepting_rank_when_all_loaded_or_warming():
"""
For non-zero device_rank shards, StartWarmup should be emitted when all
shards in the instance are Loaded/WarmingUp.
"""
shard0 = get_pipeline_shard_metadata(MODEL_A_ID, device_rank=0, world_size=3)
shard1 = get_pipeline_shard_metadata(MODEL_A_ID, device_rank=1, world_size=3)
shard2 = get_pipeline_shard_metadata(MODEL_A_ID, device_rank=2, world_size=3)
instance = get_mlx_ring_instance(
instance_id=INSTANCE_1_ID,
model_id=MODEL_A_ID,
node_to_runner={NODE_A: RUNNER_1_ID, NODE_B: RUNNER_2_ID, NODE_C: RUNNER_3_ID},
runner_to_shard={RUNNER_1_ID: shard0, RUNNER_2_ID: shard1, RUNNER_3_ID: shard2},
)
bound_instance = BoundInstance(
instance=instance, bound_runner_id=RUNNER_2_ID, bound_node_id=NODE_B
)
local_runner = FakeRunnerSupervisor(
bound_instance=bound_instance, status=RunnerLoaded()
)
runners = {RUNNER_2_ID: local_runner}
instances = {INSTANCE_1_ID: instance}
all_runners = {
RUNNER_1_ID: RunnerLoaded(),
RUNNER_2_ID: RunnerLoaded(),
RUNNER_3_ID: RunnerWarmingUp(),
}
result = plan_mod.plan(
node_id=NODE_B,
runners=runners, # type: ignore
global_download_status={NODE_A: []},
instances=instances,
all_runners=all_runners,
tasks={},
)
assert isinstance(result, StartWarmup)
assert result.instance_id == INSTANCE_1_ID
def test_plan_starts_warmup_for_rank_zero_after_others_warming():
"""
For device_rank == 0, StartWarmup should only be emitted once all the
other runners in the instance are already warming up.
"""
shard0 = get_pipeline_shard_metadata(MODEL_A_ID, device_rank=0, world_size=2)
shard1 = get_pipeline_shard_metadata(MODEL_A_ID, device_rank=1, world_size=2)
instance = get_mlx_ring_instance(
instance_id=INSTANCE_1_ID,
model_id=MODEL_A_ID,
node_to_runner={NODE_A: RUNNER_1_ID, NODE_B: RUNNER_2_ID},
runner_to_shard={RUNNER_1_ID: shard0, RUNNER_2_ID: shard1},
)
bound_instance = BoundInstance(
instance=instance, bound_runner_id=RUNNER_1_ID, bound_node_id=NODE_A
)
local_runner = FakeRunnerSupervisor(
bound_instance=bound_instance, status=RunnerLoaded()
)
runners = {RUNNER_1_ID: local_runner}
instances = {INSTANCE_1_ID: instance}
all_runners = {
RUNNER_1_ID: RunnerLoaded(),
RUNNER_2_ID: RunnerWarmingUp(),
}
result = plan_mod.plan(
node_id=NODE_A,
runners=runners, # type: ignore
global_download_status={NODE_A: []},
instances=instances,
all_runners=all_runners,
tasks={},
)
assert isinstance(result, StartWarmup)
assert result.instance_id == INSTANCE_1_ID
def test_plan_does_not_start_warmup_for_non_zero_rank_until_all_loaded_or_warming():
"""
Non-zero rank should not start warmup while any shard is not Loaded/WarmingUp.
"""
shard0 = get_pipeline_shard_metadata(MODEL_A_ID, device_rank=0, world_size=2)
shard1 = get_pipeline_shard_metadata(MODEL_A_ID, device_rank=1, world_size=2)
instance = get_mlx_ring_instance(
instance_id=INSTANCE_1_ID,
model_id=MODEL_A_ID,
node_to_runner={NODE_A: RUNNER_1_ID, NODE_B: RUNNER_2_ID},
runner_to_shard={RUNNER_1_ID: shard0, RUNNER_2_ID: shard1},
)
bound_instance = BoundInstance(
instance=instance, bound_runner_id=RUNNER_2_ID, bound_node_id=NODE_B
)
local_runner = FakeRunnerSupervisor(
bound_instance=bound_instance, status=RunnerLoaded()
)
runners = {RUNNER_2_ID: local_runner}
instances = {INSTANCE_1_ID: instance}
all_runners = {
RUNNER_1_ID: RunnerIdle(),
RUNNER_2_ID: RunnerLoaded(),
}
result = plan_mod.plan(
node_id=NODE_B,
runners=runners, # type: ignore
global_download_status={NODE_A: [], NODE_B: []},
instances=instances,
all_runners=all_runners,
tasks={},
)
assert result is None
def test_plan_does_not_start_warmup_for_rank_zero_until_others_warming():
"""
Rank-zero shard should not start warmup until all non-zero ranks are
already WarmingUp.
For accepting ranks (device_rank != 0), StartWarmup should be
emitted when all shards in the instance are Loaded/WarmingUp.
In a 2-node setup, rank 1 is the accepting rank.
"""
shard0 = get_pipeline_shard_metadata(MODEL_A_ID, device_rank=0, world_size=2)
shard1 = get_pipeline_shard_metadata(MODEL_A_ID, device_rank=1, world_size=2)
instance = get_mlx_ring_instance(
instance_id=INSTANCE_1_ID,
model_id=MODEL_A_ID,
node_to_runner={NODE_A: RUNNER_1_ID, NODE_B: RUNNER_2_ID},
runner_to_shard={RUNNER_1_ID: shard0, RUNNER_2_ID: shard1},
)
# Rank 1 is the accepting rank
bound_instance = BoundInstance(
instance=instance, bound_runner_id=RUNNER_1_ID, bound_node_id=NODE_A
)
local_runner = FakeRunnerSupervisor(
bound_instance=bound_instance, status=RunnerLoaded()
)
runners = {RUNNER_1_ID: local_runner}
instances = {INSTANCE_1_ID: instance}
all_runners = {
RUNNER_1_ID: RunnerLoaded(),
RUNNER_2_ID: RunnerLoaded(),
}
result = plan_mod.plan(
node_id=NODE_A,
runners=runners, # type: ignore
global_download_status={NODE_A: []},
instances=instances,
all_runners=all_runners,
tasks={},
)
assert result is None
all_runners = {
RUNNER_1_ID: RunnerLoaded(),
RUNNER_2_ID: RunnerWarmingUp(),
}
result = plan_mod.plan(
node_id=NODE_A,
runners=runners, # type: ignore
global_download_status={NODE_A: []},
instances=instances,
all_runners=all_runners,
tasks={},
)
assert isinstance(result, StartWarmup)
assert result.instance_id == INSTANCE_1_ID
def test_plan_starts_warmup_for_connecting_rank_after_others_warming():
"""
For connecting rank (device_rank == world_size - 1), StartWarmup should
only be emitted once all the other runners are already warming up.
In a 2-node setup, rank 1 is the connecting rank.
"""
shard0 = get_pipeline_shard_metadata(MODEL_A_ID, device_rank=0, world_size=2)
shard1 = get_pipeline_shard_metadata(MODEL_A_ID, device_rank=1, world_size=2)
instance = get_mlx_ring_instance(
instance_id=INSTANCE_1_ID,
model_id=MODEL_A_ID,
node_to_runner={NODE_A: RUNNER_1_ID, NODE_B: RUNNER_2_ID},
runner_to_shard={RUNNER_1_ID: shard0, RUNNER_2_ID: shard1},
)
# Rank 1 is the connecting rank
bound_instance = BoundInstance(
instance=instance, bound_runner_id=RUNNER_2_ID, bound_node_id=NODE_B
)
local_runner = FakeRunnerSupervisor(
bound_instance=bound_instance, status=RunnerLoaded()
)
runners = {RUNNER_2_ID: local_runner}
instances = {INSTANCE_1_ID: instance}
all_runners = {
RUNNER_1_ID: RunnerWarmingUp(),
RUNNER_2_ID: RunnerLoaded(),
}
result = plan_mod.plan(
node_id=NODE_B,
runners=runners, # type: ignore
global_download_status={NODE_B: []},
instances=instances,
all_runners=all_runners,
tasks={},
)
assert isinstance(result, StartWarmup)
assert result.instance_id == INSTANCE_1_ID
def test_plan_does_not_start_warmup_for_accepting_rank_until_all_loaded_or_warming():
"""
Accepting rank should not start warmup while any shard is not Loaded/WarmingUp.
In a 2-node setup, rank 0 is the accepting rank.
"""
shard0 = get_pipeline_shard_metadata(MODEL_A_ID, device_rank=0, world_size=2)
shard1 = get_pipeline_shard_metadata(MODEL_A_ID, device_rank=1, world_size=2)
instance = get_mlx_ring_instance(
instance_id=INSTANCE_1_ID,
model_id=MODEL_A_ID,
node_to_runner={NODE_A: RUNNER_1_ID, NODE_B: RUNNER_2_ID},
runner_to_shard={RUNNER_1_ID: shard0, RUNNER_2_ID: shard1},
)
# Rank 0 is the accepting rank
bound_instance = BoundInstance(
instance=instance, bound_runner_id=RUNNER_1_ID, bound_node_id=NODE_A
)
local_runner = FakeRunnerSupervisor(
bound_instance=bound_instance, status=RunnerLoaded()
)
runners = {RUNNER_1_ID: local_runner}
instances = {INSTANCE_1_ID: instance}
all_runners = {
RUNNER_1_ID: RunnerLoaded(),
RUNNER_2_ID: RunnerLoading(),
}
result = plan_mod.plan(
node_id=NODE_A,
runners=runners, # type: ignore
global_download_status={NODE_A: [], NODE_B: []},
instances=instances,
all_runners=all_runners,
tasks={},
)
assert result is None
def test_plan_does_not_start_warmup_for_connecting_rank_until_others_warming():
"""
Connecting rank (device_rank == 0) should not start warmup
until all other ranks are already WarmingUp.
"""
shard0 = get_pipeline_shard_metadata(MODEL_A_ID, device_rank=0, world_size=2)
shard1 = get_pipeline_shard_metadata(MODEL_A_ID, device_rank=1, world_size=2)
instance = get_mlx_ring_instance(
instance_id=INSTANCE_1_ID,
model_id=MODEL_A_ID,
node_to_runner={NODE_A: RUNNER_1_ID, NODE_B: RUNNER_2_ID},
runner_to_shard={RUNNER_1_ID: shard0, RUNNER_2_ID: shard1},
)
# Rank 1 is the connecting rank
bound_instance = BoundInstance(
instance=instance, bound_runner_id=RUNNER_1_ID, bound_node_id=NODE_A
)
local_runner = FakeRunnerSupervisor(
bound_instance=bound_instance, status=RunnerLoaded()
)
runners = {RUNNER_1_ID: local_runner}
instances = {INSTANCE_1_ID: instance}
all_runners = {
RUNNER_1_ID: RunnerLoaded(),
RUNNER_2_ID: RunnerLoaded(),
}
result = plan_mod.plan(
node_id=NODE_A,
runners=runners, # type: ignore
global_download_status={NODE_A: [], NODE_B: []},
instances=instances,
all_runners=all_runners,
tasks={},
)
assert result is None
| {
"repo_id": "exo-explore/exo",
"file_path": "src/exo/worker/tests/unittests/test_plan/test_warmup.py",
"license": "Apache License 2.0",
"lines": 288,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
exo-explore/exo:tmp/run_llm.py | #!/usr/bin/env python3
import argparse
import json
import sys
import requests
def stream_chat(host: str, query: str) -> None:
url = f"http://{host}:52415/v1/chat/completions"
headers = {"Content-Type": "application/json"}
payload = {
"model": "mlx-community/Llama-3.2-1B-Instruct-4bit",
# "model": "mlx-community/Llama-3_3-Nemotron-Super-49B-v1_5-mlx-4Bit",
"stream": True,
"messages": [{"role": "user", "content": query}],
}
try:
with requests.post(url, headers=headers, json=payload, stream=True) as resp:
resp.raise_for_status()
for line in resp.iter_lines(decode_unicode=True):
if not line:
continue
# SSE lines look like: "data: {...}" or "data: [DONE]"
if not line.startswith("data:"):
continue
data = line[len("data:") :].strip()
if data == "[DONE]":
break
try:
obj = json.loads(data)
except json.JSONDecodeError:
continue
for choice in obj.get("choices", []):
delta = choice.get("delta") or {}
content = delta.get("content")
if content:
print(content, end="", flush=True)
except requests.RequestException as e:
print(f"Request failed: {e}", file=sys.stderr)
sys.exit(1)
print()
def main() -> None:
parser = argparse.ArgumentParser(
description="Stream chat completions from a local server."
)
parser.add_argument("host", help="Hostname (without protocol), e.g. localhost")
parser.add_argument(
"-f",
"--file",
help="Path to a text file whose contents will be used as the query",
)
parser.add_argument(
"query",
nargs="*",
help="Query text (if not using -f/--file). All remaining arguments are joined with spaces.",
)
args = parser.parse_args()
if args.file:
try:
with open(args.file, "r", encoding="utf-8") as f:
query = f.read().strip()
except OSError as e:
print(f"Error reading file {args.file}: {e}", file=sys.stderr)
sys.exit(1)
elif args.query:
query = " ".join(args.query)
else:
parser.error("You must provide either a query or a file (-f/--file).")
stream_chat(args.host, query)
if __name__ == "__main__":
main()
| {
"repo_id": "exo-explore/exo",
"file_path": "tmp/run_llm.py",
"license": "Apache License 2.0",
"lines": 69,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
explosion/spaCy:spacy/lang/ht/lemmatizer.py | from typing import List, Tuple
from ...lookups import Lookups
from ...pipeline import Lemmatizer
from ...tokens import Token
class HaitianCreoleLemmatizer(Lemmatizer):
"""
Minimal Haitian Creole lemmatizer.
Returns a word's base form based on rules and lookup,
or defaults to the original form.
"""
def is_base_form(self, token: Token) -> bool:
morph = token.morph.to_dict()
upos = token.pos_.lower()
# Consider unmarked forms to be base
if upos in {"noun", "verb", "adj", "adv"}:
if not morph:
return True
if upos == "noun" and morph.get("Number") == "Sing":
return True
if upos == "verb" and morph.get("VerbForm") == "Inf":
return True
if upos == "adj" and morph.get("Degree") == "Pos":
return True
return False
def rule_lemmatize(self, token: Token) -> List[str]:
string = token.text.lower()
pos = token.pos_.lower()
cache_key = (token.orth, token.pos)
if cache_key in self.cache:
return self.cache[cache_key]
forms = []
# fallback rule: just return lowercased form
forms.append(string)
self.cache[cache_key] = forms
return forms
@classmethod
def get_lookups_config(cls, mode: str) -> Tuple[List[str], List[str]]:
if mode == "rule":
required = ["lemma_lookup", "lemma_rules", "lemma_exc", "lemma_index"]
return (required, [])
return super().get_lookups_config(mode)
| {
"repo_id": "explosion/spaCy",
"file_path": "spacy/lang/ht/lemmatizer.py",
"license": "MIT License",
"lines": 41,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
explosion/spaCy:spacy/lang/ht/punctuation.py | from ..char_classes import (
ALPHA,
ALPHA_LOWER,
ALPHA_UPPER,
CONCAT_QUOTES,
HYPHENS,
LIST_ELLIPSES,
LIST_ICONS,
LIST_PUNCT,
LIST_QUOTES,
merge_chars,
)
ELISION = "'’".replace(" ", "")
_prefixes_elision = "m n l y t k w"
_prefixes_elision += " " + _prefixes_elision.upper()
TOKENIZER_PREFIXES = (
LIST_PUNCT
+ LIST_QUOTES
+ [
r"(?:({pe})[{el}])(?=[{a}])".format(
a=ALPHA, el=ELISION, pe=merge_chars(_prefixes_elision)
)
]
)
TOKENIZER_SUFFIXES = (
LIST_PUNCT
+ LIST_QUOTES
+ LIST_ELLIPSES
+ [
r"(?<=[0-9])%", # numbers like 10%
r"(?<=[0-9])(?:{h})".format(h=HYPHENS), # hyphens after numbers
r"(?<=[{a}])['’]".format(a=ALPHA), # apostrophes after letters
r"(?<=[{a}])['’][mwlnytk](?=\s|$)".format(a=ALPHA), # contractions
r"(?<=[{a}0-9])\)", # right parenthesis after letter/number
r"(?<=[{a}])\.(?=\s|$)".format(
a=ALPHA
), # period after letter if space or end of string
r"(?<=\))[\.\?!]", # punctuation immediately after right parenthesis
]
)
TOKENIZER_INFIXES = (
LIST_ELLIPSES
+ LIST_ICONS
+ [
r"(?<=[0-9])[+\-\*^](?=[0-9-])",
r"(?<=[{al}{q}])\.(?=[{au}{q}])".format(
al=ALPHA_LOWER, au=ALPHA_UPPER, q=CONCAT_QUOTES
),
r"(?<=[{a}]),(?=[{a}])".format(a=ALPHA),
r"(?<=[{a}0-9])(?:{h})(?=[{a}])".format(a=ALPHA, h=HYPHENS),
r"(?<=[{a}][{el}])(?=[{a}])".format(a=ALPHA, el=ELISION),
]
)
| {
"repo_id": "explosion/spaCy",
"file_path": "spacy/lang/ht/punctuation.py",
"license": "MIT License",
"lines": 53,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
explosion/spaCy:spacy/lang/ht/syntax_iterators.py | from typing import Iterator, Tuple, Union
from ...errors import Errors
from ...symbols import NOUN, PRON, PROPN
from ...tokens import Doc, Span
def noun_chunks(doclike: Union[Doc, Span]) -> Iterator[Tuple[int, int, int]]:
"""
Detect base noun phrases from a dependency parse for Haitian Creole.
Works on both Doc and Span objects.
"""
# Core nominal dependencies common in Haitian Creole
labels = [
"nsubj",
"obj",
"obl",
"nmod",
"appos",
"ROOT",
]
# Modifiers to optionally include in chunk (to the right)
post_modifiers = ["compound", "flat", "flat:name", "fixed"]
doc = doclike.doc
if not doc.has_annotation("DEP"):
raise ValueError(Errors.E029)
np_deps = {doc.vocab.strings.add(label) for label in labels}
np_mods = {doc.vocab.strings.add(mod) for mod in post_modifiers}
conj_label = doc.vocab.strings.add("conj")
np_label = doc.vocab.strings.add("NP")
adp_pos = doc.vocab.strings.add("ADP")
cc_pos = doc.vocab.strings.add("CCONJ")
prev_end = -1
for i, word in enumerate(doclike):
if word.pos not in (NOUN, PROPN, PRON):
continue
if word.left_edge.i <= prev_end:
continue
if word.dep in np_deps:
right_end = word
# expand to include known modifiers to the right
for child in word.rights:
if child.dep in np_mods:
right_end = child.right_edge
elif child.pos == NOUN:
right_end = child.right_edge
left_index = word.left_edge.i
# Skip prepositions at the start
if word.left_edge.pos == adp_pos:
left_index += 1
prev_end = right_end.i
yield left_index, right_end.i + 1, np_label
elif word.dep == conj_label:
head = word.head
while head.dep == conj_label and head.head.i < head.i:
head = head.head
if head.dep in np_deps:
left_index = word.left_edge.i
if word.left_edge.pos == cc_pos:
left_index += 1
prev_end = word.i
yield left_index, word.i + 1, np_label
SYNTAX_ITERATORS = {"noun_chunks": noun_chunks}
| {
"repo_id": "explosion/spaCy",
"file_path": "spacy/lang/ht/syntax_iterators.py",
"license": "MIT License",
"lines": 60,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
explosion/spaCy:spacy/lang/ht/tag_map.py | from spacy.symbols import (
ADJ,
ADP,
ADV,
AUX,
CCONJ,
DET,
INTJ,
NOUN,
NUM,
PART,
PRON,
PROPN,
PUNCT,
SCONJ,
SYM,
VERB,
X,
)
TAG_MAP = {
"NOUN": {"pos": NOUN},
"VERB": {"pos": VERB},
"AUX": {"pos": AUX},
"ADJ": {"pos": ADJ},
"ADV": {"pos": ADV},
"PRON": {"pos": PRON},
"DET": {"pos": DET},
"ADP": {"pos": ADP},
"SCONJ": {"pos": SCONJ},
"CCONJ": {"pos": CCONJ},
"PART": {"pos": PART},
"INTJ": {"pos": INTJ},
"NUM": {"pos": NUM},
"PROPN": {"pos": PROPN},
"PUNCT": {"pos": PUNCT},
"SYM": {"pos": SYM},
"X": {"pos": X},
}
| {
"repo_id": "explosion/spaCy",
"file_path": "spacy/lang/ht/tag_map.py",
"license": "MIT License",
"lines": 38,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
explosion/spaCy:spacy/lang/ht/tokenizer_exceptions.py | from spacy.symbols import NORM, ORTH
def make_variants(base, first_norm, second_orth, second_norm):
return {
base: [
{ORTH: base.split("'")[0] + "'", NORM: first_norm},
{ORTH: second_orth, NORM: second_norm},
],
base.capitalize(): [
{
ORTH: base.split("'")[0].capitalize() + "'",
NORM: first_norm.capitalize(),
},
{ORTH: second_orth, NORM: second_norm},
],
}
TOKENIZER_EXCEPTIONS = {"Dr.": [{ORTH: "Dr."}]}
# Apostrophe forms
TOKENIZER_EXCEPTIONS.update(make_variants("m'ap", "mwen", "ap", "ap"))
TOKENIZER_EXCEPTIONS.update(make_variants("n'ap", "nou", "ap", "ap"))
TOKENIZER_EXCEPTIONS.update(make_variants("l'ap", "li", "ap", "ap"))
TOKENIZER_EXCEPTIONS.update(make_variants("y'ap", "yo", "ap", "ap"))
TOKENIZER_EXCEPTIONS.update(make_variants("m'te", "mwen", "te", "te"))
TOKENIZER_EXCEPTIONS.update(make_variants("m'pral", "mwen", "pral", "pral"))
TOKENIZER_EXCEPTIONS.update(make_variants("w'ap", "ou", "ap", "ap"))
TOKENIZER_EXCEPTIONS.update(make_variants("k'ap", "ki", "ap", "ap"))
TOKENIZER_EXCEPTIONS.update(make_variants("p'ap", "pa", "ap", "ap"))
TOKENIZER_EXCEPTIONS.update(make_variants("t'ap", "te", "ap", "ap"))
# Non-apostrophe contractions (with capitalized variants)
TOKENIZER_EXCEPTIONS.update(
{
"map": [
{ORTH: "m", NORM: "mwen"},
{ORTH: "ap", NORM: "ap"},
],
"Map": [
{ORTH: "M", NORM: "Mwen"},
{ORTH: "ap", NORM: "ap"},
],
"lem": [
{ORTH: "le", NORM: "le"},
{ORTH: "m", NORM: "mwen"},
],
"Lem": [
{ORTH: "Le", NORM: "Le"},
{ORTH: "m", NORM: "mwen"},
],
"lew": [
{ORTH: "le", NORM: "le"},
{ORTH: "w", NORM: "ou"},
],
"Lew": [
{ORTH: "Le", NORM: "Le"},
{ORTH: "w", NORM: "ou"},
],
"nap": [
{ORTH: "n", NORM: "nou"},
{ORTH: "ap", NORM: "ap"},
],
"Nap": [
{ORTH: "N", NORM: "Nou"},
{ORTH: "ap", NORM: "ap"},
],
"lap": [
{ORTH: "l", NORM: "li"},
{ORTH: "ap", NORM: "ap"},
],
"Lap": [
{ORTH: "L", NORM: "Li"},
{ORTH: "ap", NORM: "ap"},
],
"yap": [
{ORTH: "y", NORM: "yo"},
{ORTH: "ap", NORM: "ap"},
],
"Yap": [
{ORTH: "Y", NORM: "Yo"},
{ORTH: "ap", NORM: "ap"},
],
"mte": [
{ORTH: "m", NORM: "mwen"},
{ORTH: "te", NORM: "te"},
],
"Mte": [
{ORTH: "M", NORM: "Mwen"},
{ORTH: "te", NORM: "te"},
],
"mpral": [
{ORTH: "m", NORM: "mwen"},
{ORTH: "pral", NORM: "pral"},
],
"Mpral": [
{ORTH: "M", NORM: "Mwen"},
{ORTH: "pral", NORM: "pral"},
],
"wap": [
{ORTH: "w", NORM: "ou"},
{ORTH: "ap", NORM: "ap"},
],
"Wap": [
{ORTH: "W", NORM: "Ou"},
{ORTH: "ap", NORM: "ap"},
],
"kap": [
{ORTH: "k", NORM: "ki"},
{ORTH: "ap", NORM: "ap"},
],
"Kap": [
{ORTH: "K", NORM: "Ki"},
{ORTH: "ap", NORM: "ap"},
],
"tap": [
{ORTH: "t", NORM: "te"},
{ORTH: "ap", NORM: "ap"},
],
"Tap": [
{ORTH: "T", NORM: "Te"},
{ORTH: "ap", NORM: "ap"},
],
}
)
| {
"repo_id": "explosion/spaCy",
"file_path": "spacy/lang/ht/tokenizer_exceptions.py",
"license": "MIT License",
"lines": 120,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
explosion/spaCy:spacy/tests/lang/ht/test_exceptions.py | import pytest
def test_ht_tokenizer_handles_basic_contraction(ht_tokenizer):
text = "m'ap ri"
tokens = ht_tokenizer(text)
assert len(tokens) == 3
assert tokens[0].text == "m'"
assert tokens[1].text == "ap"
assert tokens[2].text == "ri"
text = "mwen di'w non!"
tokens = ht_tokenizer(text)
assert len(tokens) == 5
assert tokens[0].text == "mwen"
assert tokens[1].text == "di"
assert tokens[2].text == "'w"
assert tokens[3].text == "non"
assert tokens[4].text == "!"
@pytest.mark.parametrize("text", ["Dr."])
def test_ht_tokenizer_handles_basic_abbreviation(ht_tokenizer, text):
tokens = ht_tokenizer(text)
assert len(tokens) == 1
assert tokens[0].text == text
def test_ht_tokenizer_full_sentence(ht_tokenizer):
text = "Si'm ka vini, m'ap pale ak li."
tokens = [t.text for t in ht_tokenizer(text)]
assert tokens == [
"Si",
"'m",
"ka",
"vini",
",",
"m'",
"ap",
"pale",
"ak",
"li",
".",
]
| {
"repo_id": "explosion/spaCy",
"file_path": "spacy/tests/lang/ht/test_exceptions.py",
"license": "MIT License",
"lines": 37,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
explosion/spaCy:spacy/tests/lang/ht/test_noun_chunks.py | import pytest
from spacy.tokens import Doc
@pytest.fixture
def doc(ht_vocab):
words = ["Pitit", "gen", "gwo", "pwoblèm", "ak", "kontwòl"]
heads = [1, 1, 5, 5, 3, 3]
deps = ["nsubj", "ROOT", "amod", "obj", "case", "nmod"]
pos = ["NOUN", "VERB", "ADJ", "NOUN", "ADP", "NOUN"]
return Doc(ht_vocab, words=words, heads=heads, deps=deps, pos=pos)
def test_noun_chunks_is_parsed(ht_tokenizer):
"""Test that noun_chunks raises Value Error for 'ht' language if Doc is not parsed."""
doc = ht_tokenizer("Sa a se yon fraz")
with pytest.raises(ValueError):
list(doc.noun_chunks)
def test_ht_noun_chunks_not_nested(doc, ht_vocab):
"""Test that each token only appears in one noun chunk at most"""
word_occurred = {}
chunks = list(doc.noun_chunks)
assert len(chunks) > 1
for chunk in chunks:
for word in chunk:
word_occurred.setdefault(word.text, 0)
word_occurred[word.text] += 1
assert len(word_occurred) > 0
for word, freq in word_occurred.items():
assert freq == 1, (word, [chunk.text for chunk in doc.noun_chunks])
def test_noun_chunks_span(doc, ht_tokenizer):
"""Test that the span.noun_chunks property works correctly"""
doc_chunks = list(doc.noun_chunks)
span = doc[0:3]
span_chunks = list(span.noun_chunks)
assert 0 < len(span_chunks) < len(doc_chunks)
for chunk in span_chunks:
assert chunk in doc_chunks
assert chunk.start >= 0
assert chunk.end <= 3
| {
"repo_id": "explosion/spaCy",
"file_path": "spacy/tests/lang/ht/test_noun_chunks.py",
"license": "MIT License",
"lines": 36,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
explosion/spaCy:spacy/tests/lang/ht/test_prefix_suffix_infix.py | import pytest
@pytest.mark.parametrize("text", ["(ka)"])
def test_ht_tokenizer_splits_no_special(ht_tokenizer, text):
tokens = ht_tokenizer(text)
assert len(tokens) == 3
@pytest.mark.parametrize("text", ["m'ap"])
def test_ht_tokenizer_splits_no_punct(ht_tokenizer, text):
tokens = ht_tokenizer(text)
assert len(tokens) == 2
@pytest.mark.parametrize("text", ["(m'ap"])
def test_ht_tokenizer_splits_prefix_punct(ht_tokenizer, text):
tokens = ht_tokenizer(text)
assert len(tokens) == 3
@pytest.mark.parametrize("text", ["m'ap)"])
def test_ht_tokenizer_splits_suffix_punct(ht_tokenizer, text):
tokens = ht_tokenizer(text)
assert len(tokens) == 3
@pytest.mark.parametrize("text", ["(m'ap)"])
def test_ht_tokenizer_splits_even_wrap(ht_tokenizer, text):
tokens = ht_tokenizer(text)
assert len(tokens) == 4
@pytest.mark.parametrize("text", ["(m'ap?)"])
def test_ht_tokenizer_splits_uneven_wrap(ht_tokenizer, text):
tokens = ht_tokenizer(text)
assert len(tokens) == 5
@pytest.mark.parametrize(
"text,length", [("Ozetazini.", 2), ("Frans.", 2), ("(Ozetazini.", 3)]
)
def test_ht_tokenizer_splits_prefix_interact(ht_tokenizer, text, length):
tokens = ht_tokenizer(text)
assert len(tokens) == length
@pytest.mark.parametrize("text", ["Ozetazini.)"])
def test_ht_tokenizer_splits_suffix_interact(ht_tokenizer, text):
tokens = ht_tokenizer(text)
assert len(tokens) == 3
@pytest.mark.parametrize("text", ["(Ozetazini.)"])
def test_ht_tokenizer_splits_even_wrap_interact(ht_tokenizer, text):
tokens = ht_tokenizer(text)
assert len(tokens) == 4
@pytest.mark.parametrize("text", ["(Ozetazini?)"])
def test_ht_tokenizer_splits_uneven_wrap_interact(ht_tokenizer, text):
tokens = ht_tokenizer(text)
assert len(tokens) == 4
@pytest.mark.parametrize("text", ["pi-bon"])
def test_ht_tokenizer_splits_hyphens(ht_tokenizer, text):
tokens = ht_tokenizer(text)
assert len(tokens) == 3
@pytest.mark.parametrize("text", ["0.1-13.5", "0.0-0.1", "103.27-300"])
def test_ht_tokenizer_splits_numeric_range(ht_tokenizer, text):
tokens = ht_tokenizer(text)
assert len(tokens) == 3
@pytest.mark.parametrize("text", ["pi.Bon", "Bon.Jour"])
def test_ht_tokenizer_splits_period_infix(ht_tokenizer, text):
tokens = ht_tokenizer(text)
assert len(tokens) == 3
@pytest.mark.parametrize("text", ["Bonjou,moun", "youn,de"])
def test_ht_tokenizer_splits_comma_infix(ht_tokenizer, text):
tokens = ht_tokenizer(text)
assert len(tokens) == 3
assert tokens[0].text == text.split(",")[0]
assert tokens[1].text == ","
assert tokens[2].text == text.split(",")[1]
@pytest.mark.parametrize("text", ["pi...Bon", "pi...bon"])
def test_ht_tokenizer_splits_ellipsis_infix(ht_tokenizer, text):
tokens = ht_tokenizer(text)
assert len(tokens) == 3
def test_ht_tokenizer_splits_double_hyphen_infix(ht_tokenizer):
tokens = ht_tokenizer("Pa vrè--men ou konnen--mwen renmen w.")
assert tokens[0].text == "Pa"
assert tokens[1].text == "vrè"
assert tokens[2].text == "--"
assert tokens[3].text == "men"
assert tokens[4].text == "ou"
assert tokens[5].text == "konnen"
assert tokens[6].text == "--"
assert tokens[7].text == "mwen"
assert tokens[8].text == "renmen"
assert tokens[9].text == "w"
assert tokens[10].text == "."
def test_ht_tokenizer_splits_period_abbr(ht_tokenizer):
text = "Jodi a se Madi.Mr."
tokens = ht_tokenizer(text)
assert len(tokens) == 7
assert tokens[0].text == "Jodi"
assert tokens[1].text == "a"
assert tokens[2].text == "se"
assert tokens[3].text == "Madi"
assert tokens[4].text == "."
assert tokens[5].text == "Mr"
assert tokens[6].text == "."
def test_ht_tokenizer_splits_paren_period(ht_tokenizer):
tokens = ht_tokenizer("M ap teste sa (pou kounye a).")
words = [t.text for t in tokens]
assert "a" in words
assert ")" in words
assert "." in words
| {
"repo_id": "explosion/spaCy",
"file_path": "spacy/tests/lang/ht/test_prefix_suffix_infix.py",
"license": "MIT License",
"lines": 96,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
explosion/spaCy:spacy/pipeline/factories.py | from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union
from thinc.api import Model
from thinc.types import Floats2d, Ragged
from ..kb import Candidate, KnowledgeBase
from ..language import Language
from ..pipeline._parser_internals.transition_system import TransitionSystem
from ..pipeline.attributeruler import AttributeRuler
from ..pipeline.dep_parser import DEFAULT_PARSER_MODEL, DependencyParser
from ..pipeline.edit_tree_lemmatizer import (
DEFAULT_EDIT_TREE_LEMMATIZER_MODEL,
EditTreeLemmatizer,
)
# Import factory default configurations
from ..pipeline.entity_linker import DEFAULT_NEL_MODEL, EntityLinker, EntityLinker_v1
from ..pipeline.entityruler import DEFAULT_ENT_ID_SEP, EntityRuler
from ..pipeline.functions import DocCleaner, TokenSplitter
from ..pipeline.lemmatizer import Lemmatizer
from ..pipeline.morphologizer import DEFAULT_MORPH_MODEL, Morphologizer
from ..pipeline.multitask import DEFAULT_MT_MODEL, MultitaskObjective
from ..pipeline.ner import DEFAULT_NER_MODEL, EntityRecognizer
from ..pipeline.sentencizer import Sentencizer
from ..pipeline.senter import DEFAULT_SENTER_MODEL, SentenceRecognizer
from ..pipeline.span_finder import DEFAULT_SPAN_FINDER_MODEL, SpanFinder
from ..pipeline.span_ruler import DEFAULT_SPANS_KEY as SPAN_RULER_DEFAULT_SPANS_KEY
from ..pipeline.span_ruler import (
SpanRuler,
prioritize_existing_ents_filter,
prioritize_new_ents_filter,
)
from ..pipeline.spancat import (
DEFAULT_SPANCAT_MODEL,
DEFAULT_SPANCAT_SINGLELABEL_MODEL,
DEFAULT_SPANS_KEY,
SpanCategorizer,
Suggester,
)
from ..pipeline.tagger import DEFAULT_TAGGER_MODEL, Tagger
from ..pipeline.textcat import DEFAULT_SINGLE_TEXTCAT_MODEL, TextCategorizer
from ..pipeline.textcat_multilabel import (
DEFAULT_MULTI_TEXTCAT_MODEL,
MultiLabel_TextCategorizer,
)
from ..pipeline.tok2vec import DEFAULT_TOK2VEC_MODEL, Tok2Vec
from ..tokens.doc import Doc
from ..tokens.span import Span
from ..vocab import Vocab
# Global flag to track if factories have been registered
FACTORIES_REGISTERED = False
def register_factories() -> None:
"""Register all factories with the registry.
This function registers all pipeline component factories, centralizing
the registrations that were previously done with @Language.factory decorators.
"""
global FACTORIES_REGISTERED
if FACTORIES_REGISTERED:
return
# Register factories using the same pattern as Language.factory decorator
# We use Language.factory()() pattern which exactly mimics the decorator
# attributeruler
Language.factory(
"attribute_ruler",
default_config={
"validate": False,
"scorer": {"@scorers": "spacy.attribute_ruler_scorer.v1"},
},
)(make_attribute_ruler)
# entity_linker
Language.factory(
"entity_linker",
requires=["doc.ents", "doc.sents", "token.ent_iob", "token.ent_type"],
assigns=["token.ent_kb_id"],
default_config={
"model": DEFAULT_NEL_MODEL,
"labels_discard": [],
"n_sents": 0,
"incl_prior": True,
"incl_context": True,
"entity_vector_length": 64,
"get_candidates": {"@misc": "spacy.CandidateGenerator.v1"},
"get_candidates_batch": {"@misc": "spacy.CandidateBatchGenerator.v1"},
"generate_empty_kb": {"@misc": "spacy.EmptyKB.v2"},
"overwrite": True,
"scorer": {"@scorers": "spacy.entity_linker_scorer.v1"},
"use_gold_ents": True,
"candidates_batch_size": 1,
"threshold": None,
},
default_score_weights={
"nel_micro_f": 1.0,
"nel_micro_r": None,
"nel_micro_p": None,
},
)(make_entity_linker)
# entity_ruler
Language.factory(
"entity_ruler",
assigns=["doc.ents", "token.ent_type", "token.ent_iob"],
default_config={
"phrase_matcher_attr": None,
"matcher_fuzzy_compare": {"@misc": "spacy.levenshtein_compare.v1"},
"validate": False,
"overwrite_ents": False,
"ent_id_sep": DEFAULT_ENT_ID_SEP,
"scorer": {"@scorers": "spacy.entity_ruler_scorer.v1"},
},
default_score_weights={
"ents_f": 1.0,
"ents_p": 0.0,
"ents_r": 0.0,
"ents_per_type": None,
},
)(make_entity_ruler)
# lemmatizer
Language.factory(
"lemmatizer",
assigns=["token.lemma"],
default_config={
"model": None,
"mode": "lookup",
"overwrite": False,
"scorer": {"@scorers": "spacy.lemmatizer_scorer.v1"},
},
default_score_weights={"lemma_acc": 1.0},
)(make_lemmatizer)
# textcat
Language.factory(
"textcat",
assigns=["doc.cats"],
default_config={
"threshold": 0.0,
"model": DEFAULT_SINGLE_TEXTCAT_MODEL,
"scorer": {"@scorers": "spacy.textcat_scorer.v2"},
},
default_score_weights={
"cats_score": 1.0,
"cats_score_desc": None,
"cats_micro_p": None,
"cats_micro_r": None,
"cats_micro_f": None,
"cats_macro_p": None,
"cats_macro_r": None,
"cats_macro_f": None,
"cats_macro_auc": None,
"cats_f_per_type": None,
},
)(make_textcat)
# token_splitter
Language.factory(
"token_splitter",
default_config={"min_length": 25, "split_length": 10},
retokenizes=True,
)(make_token_splitter)
# doc_cleaner
Language.factory(
"doc_cleaner",
default_config={"attrs": {"tensor": None, "_.trf_data": None}, "silent": True},
)(make_doc_cleaner)
# tok2vec
Language.factory(
"tok2vec",
assigns=["doc.tensor"],
default_config={"model": DEFAULT_TOK2VEC_MODEL},
)(make_tok2vec)
# senter
Language.factory(
"senter",
assigns=["token.is_sent_start"],
default_config={
"model": DEFAULT_SENTER_MODEL,
"overwrite": False,
"scorer": {"@scorers": "spacy.senter_scorer.v1"},
},
default_score_weights={"sents_f": 1.0, "sents_p": 0.0, "sents_r": 0.0},
)(make_senter)
# morphologizer
Language.factory(
"morphologizer",
assigns=["token.morph", "token.pos"],
default_config={
"model": DEFAULT_MORPH_MODEL,
"overwrite": True,
"extend": False,
"scorer": {"@scorers": "spacy.morphologizer_scorer.v1"},
"label_smoothing": 0.0,
},
default_score_weights={
"pos_acc": 0.5,
"morph_acc": 0.5,
"morph_per_feat": None,
},
)(make_morphologizer)
# spancat
Language.factory(
"spancat",
assigns=["doc.spans"],
default_config={
"threshold": 0.5,
"spans_key": DEFAULT_SPANS_KEY,
"max_positive": None,
"model": DEFAULT_SPANCAT_MODEL,
"suggester": {"@misc": "spacy.ngram_suggester.v1", "sizes": [1, 2, 3]},
"scorer": {"@scorers": "spacy.spancat_scorer.v1"},
},
default_score_weights={"spans_sc_f": 1.0, "spans_sc_p": 0.0, "spans_sc_r": 0.0},
)(make_spancat)
# spancat_singlelabel
Language.factory(
"spancat_singlelabel",
assigns=["doc.spans"],
default_config={
"spans_key": DEFAULT_SPANS_KEY,
"model": DEFAULT_SPANCAT_SINGLELABEL_MODEL,
"negative_weight": 1.0,
"suggester": {"@misc": "spacy.ngram_suggester.v1", "sizes": [1, 2, 3]},
"scorer": {"@scorers": "spacy.spancat_scorer.v1"},
"allow_overlap": True,
},
default_score_weights={"spans_sc_f": 1.0, "spans_sc_p": 0.0, "spans_sc_r": 0.0},
)(make_spancat_singlelabel)
# future_entity_ruler
Language.factory(
"future_entity_ruler",
assigns=["doc.ents"],
default_config={
"phrase_matcher_attr": None,
"validate": False,
"overwrite_ents": False,
"scorer": {"@scorers": "spacy.entity_ruler_scorer.v1"},
"ent_id_sep": "__unused__",
"matcher_fuzzy_compare": {"@misc": "spacy.levenshtein_compare.v1"},
},
default_score_weights={
"ents_f": 1.0,
"ents_p": 0.0,
"ents_r": 0.0,
"ents_per_type": None,
},
)(make_future_entity_ruler)
# span_ruler
Language.factory(
"span_ruler",
assigns=["doc.spans"],
default_config={
"spans_key": SPAN_RULER_DEFAULT_SPANS_KEY,
"spans_filter": None,
"annotate_ents": False,
"ents_filter": {"@misc": "spacy.first_longest_spans_filter.v1"},
"phrase_matcher_attr": None,
"matcher_fuzzy_compare": {"@misc": "spacy.levenshtein_compare.v1"},
"validate": False,
"overwrite": True,
"scorer": {
"@scorers": "spacy.overlapping_labeled_spans_scorer.v1",
"spans_key": SPAN_RULER_DEFAULT_SPANS_KEY,
},
},
default_score_weights={
f"spans_{SPAN_RULER_DEFAULT_SPANS_KEY}_f": 1.0,
f"spans_{SPAN_RULER_DEFAULT_SPANS_KEY}_p": 0.0,
f"spans_{SPAN_RULER_DEFAULT_SPANS_KEY}_r": 0.0,
f"spans_{SPAN_RULER_DEFAULT_SPANS_KEY}_per_type": None,
},
)(make_span_ruler)
# trainable_lemmatizer
Language.factory(
"trainable_lemmatizer",
assigns=["token.lemma"],
requires=[],
default_config={
"model": DEFAULT_EDIT_TREE_LEMMATIZER_MODEL,
"backoff": "orth",
"min_tree_freq": 3,
"overwrite": False,
"top_k": 1,
"scorer": {"@scorers": "spacy.lemmatizer_scorer.v1"},
},
default_score_weights={"lemma_acc": 1.0},
)(make_edit_tree_lemmatizer)
# textcat_multilabel
Language.factory(
"textcat_multilabel",
assigns=["doc.cats"],
default_config={
"threshold": 0.5,
"model": DEFAULT_MULTI_TEXTCAT_MODEL,
"scorer": {"@scorers": "spacy.textcat_multilabel_scorer.v2"},
},
default_score_weights={
"cats_score": 1.0,
"cats_score_desc": None,
"cats_micro_p": None,
"cats_micro_r": None,
"cats_micro_f": None,
"cats_macro_p": None,
"cats_macro_r": None,
"cats_macro_f": None,
"cats_macro_auc": None,
"cats_f_per_type": None,
},
)(make_multilabel_textcat)
# span_finder
Language.factory(
"span_finder",
assigns=["doc.spans"],
default_config={
"threshold": 0.5,
"model": DEFAULT_SPAN_FINDER_MODEL,
"spans_key": DEFAULT_SPANS_KEY,
"max_length": 25,
"min_length": None,
"scorer": {"@scorers": "spacy.span_finder_scorer.v1"},
},
default_score_weights={
f"spans_{DEFAULT_SPANS_KEY}_f": 1.0,
f"spans_{DEFAULT_SPANS_KEY}_p": 0.0,
f"spans_{DEFAULT_SPANS_KEY}_r": 0.0,
},
)(make_span_finder)
# ner
Language.factory(
"ner",
assigns=["doc.ents", "token.ent_iob", "token.ent_type"],
default_config={
"moves": None,
"update_with_oracle_cut_size": 100,
"model": DEFAULT_NER_MODEL,
"incorrect_spans_key": None,
"scorer": {"@scorers": "spacy.ner_scorer.v1"},
},
default_score_weights={
"ents_f": 1.0,
"ents_p": 0.0,
"ents_r": 0.0,
"ents_per_type": None,
},
)(make_ner)
# beam_ner
Language.factory(
"beam_ner",
assigns=["doc.ents", "token.ent_iob", "token.ent_type"],
default_config={
"moves": None,
"update_with_oracle_cut_size": 100,
"model": DEFAULT_NER_MODEL,
"beam_density": 0.01,
"beam_update_prob": 0.5,
"beam_width": 32,
"incorrect_spans_key": None,
"scorer": {"@scorers": "spacy.ner_scorer.v1"},
},
default_score_weights={
"ents_f": 1.0,
"ents_p": 0.0,
"ents_r": 0.0,
"ents_per_type": None,
},
)(make_beam_ner)
# parser
Language.factory(
"parser",
assigns=["token.dep", "token.head", "token.is_sent_start", "doc.sents"],
default_config={
"moves": None,
"update_with_oracle_cut_size": 100,
"learn_tokens": False,
"min_action_freq": 30,
"model": DEFAULT_PARSER_MODEL,
"scorer": {"@scorers": "spacy.parser_scorer.v1"},
},
default_score_weights={
"dep_uas": 0.5,
"dep_las": 0.5,
"dep_las_per_type": None,
"sents_p": None,
"sents_r": None,
"sents_f": 0.0,
},
)(make_parser)
# beam_parser
Language.factory(
"beam_parser",
assigns=["token.dep", "token.head", "token.is_sent_start", "doc.sents"],
default_config={
"moves": None,
"update_with_oracle_cut_size": 100,
"learn_tokens": False,
"min_action_freq": 30,
"beam_width": 8,
"beam_density": 0.0001,
"beam_update_prob": 0.5,
"model": DEFAULT_PARSER_MODEL,
"scorer": {"@scorers": "spacy.parser_scorer.v1"},
},
default_score_weights={
"dep_uas": 0.5,
"dep_las": 0.5,
"dep_las_per_type": None,
"sents_p": None,
"sents_r": None,
"sents_f": 0.0,
},
)(make_beam_parser)
# tagger
Language.factory(
"tagger",
assigns=["token.tag"],
default_config={
"model": DEFAULT_TAGGER_MODEL,
"overwrite": False,
"scorer": {"@scorers": "spacy.tagger_scorer.v1"},
"neg_prefix": "!",
"label_smoothing": 0.0,
},
default_score_weights={
"tag_acc": 1.0,
"pos_acc": 0.0,
"tag_micro_p": None,
"tag_micro_r": None,
"tag_micro_f": None,
},
)(make_tagger)
# nn_labeller
Language.factory(
"nn_labeller",
default_config={
"labels": None,
"target": "dep_tag_offset",
"model": DEFAULT_MT_MODEL,
},
)(make_nn_labeller)
# sentencizer
Language.factory(
"sentencizer",
assigns=["token.is_sent_start", "doc.sents"],
default_config={
"punct_chars": None,
"overwrite": False,
"scorer": {"@scorers": "spacy.senter_scorer.v1"},
},
default_score_weights={"sents_f": 1.0, "sents_p": 0.0, "sents_r": 0.0},
)(make_sentencizer)
# Set the flag to indicate that all factories have been registered
FACTORIES_REGISTERED = True
# We can't have function implementations for these factories in Cython, because
# we need to build a Pydantic model for them dynamically, reading their argument
# structure from the signature. In Cython 3, this doesn't work because the
# from __future__ import annotations semantics are used, which means the types
# are stored as strings.
def make_sentencizer(
nlp: Language,
name: str,
punct_chars: Optional[List[str]],
overwrite: bool,
scorer: Optional[Callable],
):
return Sentencizer(
name, punct_chars=punct_chars, overwrite=overwrite, scorer=scorer
)
def make_attribute_ruler(
nlp: Language, name: str, validate: bool, scorer: Optional[Callable]
):
return AttributeRuler(nlp.vocab, name, validate=validate, scorer=scorer)
def make_entity_linker(
nlp: Language,
name: str,
model: Model,
*,
labels_discard: Iterable[str],
n_sents: int,
incl_prior: bool,
incl_context: bool,
entity_vector_length: int,
get_candidates: Callable[[KnowledgeBase, Span], Iterable[Candidate]],
get_candidates_batch: Callable[
[KnowledgeBase, Iterable[Span]], Iterable[Iterable[Candidate]]
],
generate_empty_kb: Callable[[Vocab, int], KnowledgeBase],
overwrite: bool,
scorer: Optional[Callable],
use_gold_ents: bool,
candidates_batch_size: int,
threshold: Optional[float] = None,
):
if not model.attrs.get("include_span_maker", False):
# The only difference in arguments here is that use_gold_ents and threshold aren't available.
return EntityLinker_v1(
nlp.vocab,
model,
name,
labels_discard=labels_discard,
n_sents=n_sents,
incl_prior=incl_prior,
incl_context=incl_context,
entity_vector_length=entity_vector_length,
get_candidates=get_candidates,
overwrite=overwrite,
scorer=scorer,
)
return EntityLinker(
nlp.vocab,
model,
name,
labels_discard=labels_discard,
n_sents=n_sents,
incl_prior=incl_prior,
incl_context=incl_context,
entity_vector_length=entity_vector_length,
get_candidates=get_candidates,
get_candidates_batch=get_candidates_batch,
generate_empty_kb=generate_empty_kb,
overwrite=overwrite,
scorer=scorer,
use_gold_ents=use_gold_ents,
candidates_batch_size=candidates_batch_size,
threshold=threshold,
)
def make_lemmatizer(
nlp: Language,
model: Optional[Model],
name: str,
mode: str,
overwrite: bool,
scorer: Optional[Callable],
):
return Lemmatizer(
nlp.vocab, model, name, mode=mode, overwrite=overwrite, scorer=scorer
)
def make_textcat(
nlp: Language,
name: str,
model: Model[List[Doc], List[Floats2d]],
threshold: float,
scorer: Optional[Callable],
) -> TextCategorizer:
return TextCategorizer(nlp.vocab, model, name, threshold=threshold, scorer=scorer)
def make_token_splitter(
nlp: Language, name: str, *, min_length: int = 0, split_length: int = 0
):
return TokenSplitter(min_length=min_length, split_length=split_length)
def make_doc_cleaner(nlp: Language, name: str, *, attrs: Dict[str, Any], silent: bool):
return DocCleaner(attrs, silent=silent)
def make_tok2vec(nlp: Language, name: str, model: Model) -> Tok2Vec:
return Tok2Vec(nlp.vocab, model, name)
def make_spancat(
nlp: Language,
name: str,
suggester: Suggester,
model: Model[Tuple[List[Doc], Ragged], Floats2d],
spans_key: str,
scorer: Optional[Callable],
threshold: float,
max_positive: Optional[int],
) -> SpanCategorizer:
return SpanCategorizer(
nlp.vocab,
model=model,
suggester=suggester,
name=name,
spans_key=spans_key,
negative_weight=None,
allow_overlap=True,
max_positive=max_positive,
threshold=threshold,
scorer=scorer,
add_negative_label=False,
)
def make_spancat_singlelabel(
nlp: Language,
name: str,
suggester: Suggester,
model: Model[Tuple[List[Doc], Ragged], Floats2d],
spans_key: str,
negative_weight: float,
allow_overlap: bool,
scorer: Optional[Callable],
) -> SpanCategorizer:
return SpanCategorizer(
nlp.vocab,
model=model,
suggester=suggester,
name=name,
spans_key=spans_key,
negative_weight=negative_weight,
allow_overlap=allow_overlap,
max_positive=1,
add_negative_label=True,
threshold=None,
scorer=scorer,
)
def make_future_entity_ruler(
nlp: Language,
name: str,
phrase_matcher_attr: Optional[Union[int, str]],
matcher_fuzzy_compare: Callable,
validate: bool,
overwrite_ents: bool,
scorer: Optional[Callable],
ent_id_sep: str,
):
if overwrite_ents:
ents_filter = prioritize_new_ents_filter
else:
ents_filter = prioritize_existing_ents_filter
return SpanRuler(
nlp,
name,
spans_key=None,
spans_filter=None,
annotate_ents=True,
ents_filter=ents_filter,
phrase_matcher_attr=phrase_matcher_attr,
matcher_fuzzy_compare=matcher_fuzzy_compare,
validate=validate,
overwrite=False,
scorer=scorer,
)
def make_entity_ruler(
nlp: Language,
name: str,
phrase_matcher_attr: Optional[Union[int, str]],
matcher_fuzzy_compare: Callable,
validate: bool,
overwrite_ents: bool,
ent_id_sep: str,
scorer: Optional[Callable],
):
return EntityRuler(
nlp,
name,
phrase_matcher_attr=phrase_matcher_attr,
matcher_fuzzy_compare=matcher_fuzzy_compare,
validate=validate,
overwrite_ents=overwrite_ents,
ent_id_sep=ent_id_sep,
scorer=scorer,
)
def make_span_ruler(
nlp: Language,
name: str,
spans_key: Optional[str],
spans_filter: Optional[Callable[[Iterable[Span], Iterable[Span]], Iterable[Span]]],
annotate_ents: bool,
ents_filter: Callable[[Iterable[Span], Iterable[Span]], Iterable[Span]],
phrase_matcher_attr: Optional[Union[int, str]],
matcher_fuzzy_compare: Callable,
validate: bool,
overwrite: bool,
scorer: Optional[Callable],
):
return SpanRuler(
nlp,
name,
spans_key=spans_key,
spans_filter=spans_filter,
annotate_ents=annotate_ents,
ents_filter=ents_filter,
phrase_matcher_attr=phrase_matcher_attr,
matcher_fuzzy_compare=matcher_fuzzy_compare,
validate=validate,
overwrite=overwrite,
scorer=scorer,
)
def make_edit_tree_lemmatizer(
nlp: Language,
name: str,
model: Model,
backoff: Optional[str],
min_tree_freq: int,
overwrite: bool,
top_k: int,
scorer: Optional[Callable],
):
return EditTreeLemmatizer(
nlp.vocab,
model,
name,
backoff=backoff,
min_tree_freq=min_tree_freq,
overwrite=overwrite,
top_k=top_k,
scorer=scorer,
)
def make_multilabel_textcat(
nlp: Language,
name: str,
model: Model[List[Doc], List[Floats2d]],
threshold: float,
scorer: Optional[Callable],
) -> MultiLabel_TextCategorizer:
return MultiLabel_TextCategorizer(
nlp.vocab, model, name, threshold=threshold, scorer=scorer
)
def make_span_finder(
nlp: Language,
name: str,
model: Model[Iterable[Doc], Floats2d],
spans_key: str,
threshold: float,
max_length: Optional[int],
min_length: Optional[int],
scorer: Optional[Callable],
) -> SpanFinder:
return SpanFinder(
nlp,
model=model,
threshold=threshold,
name=name,
scorer=scorer,
max_length=max_length,
min_length=min_length,
spans_key=spans_key,
)
def make_ner(
nlp: Language,
name: str,
model: Model,
moves: Optional[TransitionSystem],
update_with_oracle_cut_size: int,
incorrect_spans_key: Optional[str],
scorer: Optional[Callable],
):
return EntityRecognizer(
nlp.vocab,
model,
name=name,
moves=moves,
update_with_oracle_cut_size=update_with_oracle_cut_size,
incorrect_spans_key=incorrect_spans_key,
scorer=scorer,
)
def make_beam_ner(
nlp: Language,
name: str,
model: Model,
moves: Optional[TransitionSystem],
update_with_oracle_cut_size: int,
beam_width: int,
beam_density: float,
beam_update_prob: float,
incorrect_spans_key: Optional[str],
scorer: Optional[Callable],
):
return EntityRecognizer(
nlp.vocab,
model,
name=name,
moves=moves,
update_with_oracle_cut_size=update_with_oracle_cut_size,
beam_width=beam_width,
beam_density=beam_density,
beam_update_prob=beam_update_prob,
incorrect_spans_key=incorrect_spans_key,
scorer=scorer,
)
def make_parser(
nlp: Language,
name: str,
model: Model,
moves: Optional[TransitionSystem],
update_with_oracle_cut_size: int,
learn_tokens: bool,
min_action_freq: int,
scorer: Optional[Callable],
):
return DependencyParser(
nlp.vocab,
model,
name=name,
moves=moves,
update_with_oracle_cut_size=update_with_oracle_cut_size,
learn_tokens=learn_tokens,
min_action_freq=min_action_freq,
scorer=scorer,
)
def make_beam_parser(
nlp: Language,
name: str,
model: Model,
moves: Optional[TransitionSystem],
update_with_oracle_cut_size: int,
learn_tokens: bool,
min_action_freq: int,
beam_width: int,
beam_density: float,
beam_update_prob: float,
scorer: Optional[Callable],
):
return DependencyParser(
nlp.vocab,
model,
name=name,
moves=moves,
update_with_oracle_cut_size=update_with_oracle_cut_size,
learn_tokens=learn_tokens,
min_action_freq=min_action_freq,
beam_width=beam_width,
beam_density=beam_density,
beam_update_prob=beam_update_prob,
scorer=scorer,
)
def make_tagger(
nlp: Language,
name: str,
model: Model,
overwrite: bool,
scorer: Optional[Callable],
neg_prefix: str,
label_smoothing: float,
):
return Tagger(
nlp.vocab,
model,
name=name,
overwrite=overwrite,
scorer=scorer,
neg_prefix=neg_prefix,
label_smoothing=label_smoothing,
)
def make_nn_labeller(
nlp: Language, name: str, model: Model, labels: Optional[dict], target: str
):
return MultitaskObjective(nlp.vocab, model, name, target=target)
def make_morphologizer(
nlp: Language,
model: Model,
name: str,
overwrite: bool,
extend: bool,
label_smoothing: float,
scorer: Optional[Callable],
):
return Morphologizer(
nlp.vocab,
model,
name,
overwrite=overwrite,
extend=extend,
label_smoothing=label_smoothing,
scorer=scorer,
)
def make_senter(
nlp: Language, name: str, model: Model, overwrite: bool, scorer: Optional[Callable]
):
return SentenceRecognizer(
nlp.vocab, model, name, overwrite=overwrite, scorer=scorer
)
| {
"repo_id": "explosion/spaCy",
"file_path": "spacy/pipeline/factories.py",
"license": "MIT License",
"lines": 846,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
explosion/spaCy:spacy/registrations.py | """Centralized registry population for spaCy config
This module centralizes registry decorations to prevent circular import issues
with Cython annotation changes from __future__ import annotations. Functions
remain in their original locations, but decoration is moved here.
Component definitions and registrations are in spacy/pipeline/factories.py
"""
# Global flag to track if registry has been populated
REGISTRY_POPULATED = False
def populate_registry() -> None:
"""Populate the registry with all necessary components.
This function should be called before accessing the registry, to ensure
it's populated. The function uses a global flag to prevent repopulation.
"""
global REGISTRY_POPULATED
if REGISTRY_POPULATED:
return
# Import all necessary modules
from .lang.ja import create_tokenizer as create_japanese_tokenizer
from .lang.ko import create_tokenizer as create_korean_tokenizer
from .lang.th import create_thai_tokenizer
from .lang.vi import create_vietnamese_tokenizer
from .lang.zh import create_chinese_tokenizer
from .language import load_lookups_data
from .matcher.levenshtein import make_levenshtein_compare
from .ml.models.entity_linker import (
create_candidates,
create_candidates_batch,
empty_kb,
empty_kb_for_config,
load_kb,
)
from .pipeline.attributeruler import make_attribute_ruler_scorer
from .pipeline.dep_parser import make_parser_scorer
# Import the functions we refactored by removing direct registry decorators
from .pipeline.entity_linker import make_entity_linker_scorer
from .pipeline.entityruler import (
make_entity_ruler_scorer as make_entityruler_scorer,
)
from .pipeline.lemmatizer import make_lemmatizer_scorer
from .pipeline.morphologizer import make_morphologizer_scorer
from .pipeline.ner import make_ner_scorer
from .pipeline.senter import make_senter_scorer
from .pipeline.span_finder import make_span_finder_scorer
from .pipeline.span_ruler import (
make_overlapping_labeled_spans_scorer,
make_preserve_existing_ents_filter,
make_prioritize_new_ents_filter,
)
from .pipeline.spancat import (
build_ngram_range_suggester,
build_ngram_suggester,
build_preset_spans_suggester,
make_spancat_scorer,
)
# Import all pipeline components that were using registry decorators
from .pipeline.tagger import make_tagger_scorer
from .pipeline.textcat import make_textcat_scorer
from .pipeline.textcat_multilabel import make_textcat_multilabel_scorer
from .util import make_first_longest_spans_filter, registry
# Register miscellaneous components
registry.misc("spacy.first_longest_spans_filter.v1")(
make_first_longest_spans_filter
)
registry.misc("spacy.ngram_suggester.v1")(build_ngram_suggester)
registry.misc("spacy.ngram_range_suggester.v1")(build_ngram_range_suggester)
registry.misc("spacy.preset_spans_suggester.v1")(build_preset_spans_suggester)
registry.misc("spacy.prioritize_new_ents_filter.v1")(
make_prioritize_new_ents_filter
)
registry.misc("spacy.prioritize_existing_ents_filter.v1")(
make_preserve_existing_ents_filter
)
registry.misc("spacy.levenshtein_compare.v1")(make_levenshtein_compare)
# KB-related registrations
registry.misc("spacy.KBFromFile.v1")(load_kb)
registry.misc("spacy.EmptyKB.v2")(empty_kb_for_config)
registry.misc("spacy.EmptyKB.v1")(empty_kb)
registry.misc("spacy.CandidateGenerator.v1")(create_candidates)
registry.misc("spacy.CandidateBatchGenerator.v1")(create_candidates_batch)
registry.misc("spacy.LookupsDataLoader.v1")(load_lookups_data)
# Need to get references to the existing functions in registry by importing the function that is there
# For the registry that was previously decorated
# Import ML components that use registry
from .language import create_tokenizer
from .ml._precomputable_affine import PrecomputableAffine
from .ml.callbacks import (
create_models_and_pipes_with_nvtx_range,
create_models_with_nvtx_range,
)
from .ml.extract_ngrams import extract_ngrams
from .ml.extract_spans import extract_spans
# Import decorator-removed ML components
from .ml.featureextractor import FeatureExtractor
from .ml.models.entity_linker import build_nel_encoder
from .ml.models.multi_task import (
create_pretrain_characters,
create_pretrain_vectors,
)
from .ml.models.parser import build_tb_parser_model
from .ml.models.span_finder import build_finder_model
from .ml.models.spancat import (
build_linear_logistic,
build_mean_max_reducer,
build_spancat_model,
)
from .ml.models.tagger import build_tagger_model
from .ml.models.textcat import (
build_bow_text_classifier,
build_bow_text_classifier_v3,
build_reduce_text_classifier,
build_simple_cnn_text_classifier,
build_text_classifier_lowdata,
build_text_classifier_v2,
build_textcat_parametric_attention_v1,
)
from .ml.models.tok2vec import (
BiLSTMEncoder,
CharacterEmbed,
MaxoutWindowEncoder,
MishWindowEncoder,
MultiHashEmbed,
build_hash_embed_cnn_tok2vec,
build_Tok2Vec_model,
tok2vec_listener_v1,
)
from .ml.staticvectors import StaticVectors
from .ml.tb_framework import TransitionModel
from .training.augment import (
create_combined_augmenter,
create_lower_casing_augmenter,
create_orth_variants_augmenter,
)
from .training.batchers import (
configure_minibatch,
configure_minibatch_by_padded_size,
configure_minibatch_by_words,
)
from .training.callbacks import create_copy_from_base_model
from .training.loggers import console_logger, console_logger_v3
# Register scorers
registry.scorers("spacy.tagger_scorer.v1")(make_tagger_scorer)
registry.scorers("spacy.ner_scorer.v1")(make_ner_scorer)
# span_ruler_scorer removed as it's not in span_ruler.py
registry.scorers("spacy.entity_ruler_scorer.v1")(make_entityruler_scorer)
registry.scorers("spacy.senter_scorer.v1")(make_senter_scorer)
registry.scorers("spacy.textcat_scorer.v1")(make_textcat_scorer)
registry.scorers("spacy.textcat_scorer.v2")(make_textcat_scorer)
registry.scorers("spacy.textcat_multilabel_scorer.v1")(
make_textcat_multilabel_scorer
)
registry.scorers("spacy.textcat_multilabel_scorer.v2")(
make_textcat_multilabel_scorer
)
registry.scorers("spacy.lemmatizer_scorer.v1")(make_lemmatizer_scorer)
registry.scorers("spacy.span_finder_scorer.v1")(make_span_finder_scorer)
registry.scorers("spacy.spancat_scorer.v1")(make_spancat_scorer)
registry.scorers("spacy.entity_linker_scorer.v1")(make_entity_linker_scorer)
registry.scorers("spacy.overlapping_labeled_spans_scorer.v1")(
make_overlapping_labeled_spans_scorer
)
registry.scorers("spacy.attribute_ruler_scorer.v1")(make_attribute_ruler_scorer)
registry.scorers("spacy.parser_scorer.v1")(make_parser_scorer)
registry.scorers("spacy.morphologizer_scorer.v1")(make_morphologizer_scorer)
# Register tokenizers
registry.tokenizers("spacy.Tokenizer.v1")(create_tokenizer)
registry.tokenizers("spacy.ja.JapaneseTokenizer")(create_japanese_tokenizer)
registry.tokenizers("spacy.zh.ChineseTokenizer")(create_chinese_tokenizer)
registry.tokenizers("spacy.ko.KoreanTokenizer")(create_korean_tokenizer)
registry.tokenizers("spacy.vi.VietnameseTokenizer")(create_vietnamese_tokenizer)
registry.tokenizers("spacy.th.ThaiTokenizer")(create_thai_tokenizer)
# Register tok2vec architectures we've modified
registry.architectures("spacy.Tok2VecListener.v1")(tok2vec_listener_v1)
registry.architectures("spacy.HashEmbedCNN.v2")(build_hash_embed_cnn_tok2vec)
registry.architectures("spacy.Tok2Vec.v2")(build_Tok2Vec_model)
registry.architectures("spacy.MultiHashEmbed.v2")(MultiHashEmbed)
registry.architectures("spacy.CharacterEmbed.v2")(CharacterEmbed)
registry.architectures("spacy.MaxoutWindowEncoder.v2")(MaxoutWindowEncoder)
registry.architectures("spacy.MishWindowEncoder.v2")(MishWindowEncoder)
registry.architectures("spacy.TorchBiLSTMEncoder.v1")(BiLSTMEncoder)
registry.architectures("spacy.EntityLinker.v2")(build_nel_encoder)
registry.architectures("spacy.TextCatCNN.v2")(build_simple_cnn_text_classifier)
registry.architectures("spacy.TextCatBOW.v2")(build_bow_text_classifier)
registry.architectures("spacy.TextCatBOW.v3")(build_bow_text_classifier_v3)
registry.architectures("spacy.TextCatEnsemble.v2")(build_text_classifier_v2)
registry.architectures("spacy.TextCatLowData.v1")(build_text_classifier_lowdata)
registry.architectures("spacy.TextCatParametricAttention.v1")(
build_textcat_parametric_attention_v1
)
registry.architectures("spacy.TextCatReduce.v1")(build_reduce_text_classifier)
registry.architectures("spacy.SpanCategorizer.v1")(build_spancat_model)
registry.architectures("spacy.SpanFinder.v1")(build_finder_model)
registry.architectures("spacy.TransitionBasedParser.v2")(build_tb_parser_model)
registry.architectures("spacy.PretrainVectors.v1")(create_pretrain_vectors)
registry.architectures("spacy.PretrainCharacters.v1")(create_pretrain_characters)
registry.architectures("spacy.Tagger.v2")(build_tagger_model)
# Register layers
registry.layers("spacy.FeatureExtractor.v1")(FeatureExtractor)
registry.layers("spacy.extract_spans.v1")(extract_spans)
registry.layers("spacy.extract_ngrams.v1")(extract_ngrams)
registry.layers("spacy.LinearLogistic.v1")(build_linear_logistic)
registry.layers("spacy.mean_max_reducer.v1")(build_mean_max_reducer)
registry.layers("spacy.StaticVectors.v2")(StaticVectors)
registry.layers("spacy.PrecomputableAffine.v1")(PrecomputableAffine)
registry.layers("spacy.CharEmbed.v1")(CharacterEmbed)
registry.layers("spacy.TransitionModel.v1")(TransitionModel)
# Register callbacks
registry.callbacks("spacy.copy_from_base_model.v1")(create_copy_from_base_model)
registry.callbacks("spacy.models_with_nvtx_range.v1")(create_models_with_nvtx_range)
registry.callbacks("spacy.models_and_pipes_with_nvtx_range.v1")(
create_models_and_pipes_with_nvtx_range
)
# Register loggers
registry.loggers("spacy.ConsoleLogger.v2")(console_logger)
registry.loggers("spacy.ConsoleLogger.v3")(console_logger_v3)
# Register batchers
registry.batchers("spacy.batch_by_padded.v1")(configure_minibatch_by_padded_size)
registry.batchers("spacy.batch_by_words.v1")(configure_minibatch_by_words)
registry.batchers("spacy.batch_by_sequence.v1")(configure_minibatch)
# Register augmenters
registry.augmenters("spacy.combined_augmenter.v1")(create_combined_augmenter)
registry.augmenters("spacy.lower_case.v1")(create_lower_casing_augmenter)
registry.augmenters("spacy.orth_variants.v1")(create_orth_variants_augmenter)
# Set the flag to indicate that the registry has been populated
REGISTRY_POPULATED = True
| {
"repo_id": "explosion/spaCy",
"file_path": "spacy/registrations.py",
"license": "MIT License",
"lines": 224,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
explosion/spaCy:spacy/tests/test_factory_imports.py | # coding: utf-8
"""Test factory import compatibility from original and new locations."""
import importlib
import pytest
@pytest.mark.parametrize(
"factory_name,original_module,compat_module",
[
("make_tagger", "spacy.pipeline.factories", "spacy.pipeline.tagger"),
("make_sentencizer", "spacy.pipeline.factories", "spacy.pipeline.sentencizer"),
("make_ner", "spacy.pipeline.factories", "spacy.pipeline.ner"),
("make_parser", "spacy.pipeline.factories", "spacy.pipeline.dep_parser"),
("make_tok2vec", "spacy.pipeline.factories", "spacy.pipeline.tok2vec"),
("make_spancat", "spacy.pipeline.factories", "spacy.pipeline.spancat"),
(
"make_spancat_singlelabel",
"spacy.pipeline.factories",
"spacy.pipeline.spancat",
),
("make_lemmatizer", "spacy.pipeline.factories", "spacy.pipeline.lemmatizer"),
("make_entity_ruler", "spacy.pipeline.factories", "spacy.pipeline.entityruler"),
("make_span_ruler", "spacy.pipeline.factories", "spacy.pipeline.span_ruler"),
(
"make_edit_tree_lemmatizer",
"spacy.pipeline.factories",
"spacy.pipeline.edit_tree_lemmatizer",
),
(
"make_attribute_ruler",
"spacy.pipeline.factories",
"spacy.pipeline.attributeruler",
),
(
"make_entity_linker",
"spacy.pipeline.factories",
"spacy.pipeline.entity_linker",
),
("make_textcat", "spacy.pipeline.factories", "spacy.pipeline.textcat"),
("make_token_splitter", "spacy.pipeline.factories", "spacy.pipeline.functions"),
("make_doc_cleaner", "spacy.pipeline.factories", "spacy.pipeline.functions"),
(
"make_morphologizer",
"spacy.pipeline.factories",
"spacy.pipeline.morphologizer",
),
("make_senter", "spacy.pipeline.factories", "spacy.pipeline.senter"),
("make_span_finder", "spacy.pipeline.factories", "spacy.pipeline.span_finder"),
(
"make_multilabel_textcat",
"spacy.pipeline.factories",
"spacy.pipeline.textcat_multilabel",
),
("make_beam_ner", "spacy.pipeline.factories", "spacy.pipeline.ner"),
("make_beam_parser", "spacy.pipeline.factories", "spacy.pipeline.dep_parser"),
("make_nn_labeller", "spacy.pipeline.factories", "spacy.pipeline.multitask"),
# This one's special because the function was named make_span_ruler, so
# the name in the registrations.py doesn't match the name we make the import hook
# point to. We could make a test just for this but shrug
# ("make_future_entity_ruler", "spacy.pipeline.factories", "spacy.pipeline.span_ruler"),
],
)
def test_factory_import_compatibility(factory_name, original_module, compat_module):
"""Test that factory functions can be imported from both original and compatibility locations."""
# Import from the original module (registrations.py)
original_module_obj = importlib.import_module(original_module)
original_factory = getattr(original_module_obj, factory_name)
assert (
original_factory is not None
), f"Could not import {factory_name} from {original_module}"
# Import from the compatibility module (component file)
compat_module_obj = importlib.import_module(compat_module)
compat_factory = getattr(compat_module_obj, factory_name)
assert (
compat_factory is not None
), f"Could not import {factory_name} from {compat_module}"
# Test that they're the same function (identity)
assert original_factory is compat_factory, (
f"Factory {factory_name} imported from {original_module} is not the same object "
f"as the one imported from {compat_module}"
)
| {
"repo_id": "explosion/spaCy",
"file_path": "spacy/tests/test_factory_imports.py",
"license": "MIT License",
"lines": 79,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
explosion/spaCy:spacy/tests/test_factory_registrations.py | import inspect
import json
from pathlib import Path
import pytest
from spacy.language import Language
from spacy.util import registry
# Path to the reference factory registrations, relative to this file
REFERENCE_FILE = Path(__file__).parent / "factory_registrations.json"
# Monkey patch the util.is_same_func to handle Cython functions
import inspect
from spacy import util
original_is_same_func = util.is_same_func
def patched_is_same_func(func1, func2):
# Handle Cython functions
try:
return original_is_same_func(func1, func2)
except TypeError:
# For Cython functions, just compare the string representation
return str(func1) == str(func2)
util.is_same_func = patched_is_same_func
@pytest.fixture
def reference_factory_registrations():
"""Load reference factory registrations from JSON file"""
if not REFERENCE_FILE.exists():
pytest.fail(
f"Reference file {REFERENCE_FILE} not found. Run export_factory_registrations.py first."
)
with REFERENCE_FILE.open("r") as f:
return json.load(f)
def test_factory_registrations_preserved(reference_factory_registrations):
"""Test that all factory registrations from the reference file are still present."""
# Ensure the registry is populated
registry.ensure_populated()
# Get all factory registrations
all_factories = registry.factories.get_all()
# Initialize our data structure to store current factory registrations
current_registrations = {}
# Process factory registrations
for name, func in all_factories.items():
# Store information about each factory
try:
module_name = func.__module__
except (AttributeError, TypeError):
# For Cython functions, just use a placeholder
module_name = str(func).split()[1].split(".")[0]
try:
func_name = func.__qualname__
except (AttributeError, TypeError):
# For Cython functions, use the function's name
func_name = (
func.__name__
if hasattr(func, "__name__")
else str(func).split()[1].split(".")[-1]
)
current_registrations[name] = {
"name": name,
"module": module_name,
"function": func_name,
}
# Check for missing registrations
missing_registrations = set(reference_factory_registrations.keys()) - set(
current_registrations.keys()
)
assert (
not missing_registrations
), f"Missing factory registrations: {', '.join(sorted(missing_registrations))}"
# Check for new registrations (not an error, but informative)
new_registrations = set(current_registrations.keys()) - set(
reference_factory_registrations.keys()
)
if new_registrations:
# This is not an error, just informative
print(
f"New factory registrations found: {', '.join(sorted(new_registrations))}"
)
| {
"repo_id": "explosion/spaCy",
"file_path": "spacy/tests/test_factory_registrations.py",
"license": "MIT License",
"lines": 75,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
explosion/spaCy:spacy/tests/test_registry_population.py | import json
import os
from pathlib import Path
import pytest
from spacy.util import registry
# Path to the reference registry contents, relative to this file
REFERENCE_FILE = Path(__file__).parent / "registry_contents.json"
@pytest.fixture
def reference_registry():
"""Load reference registry contents from JSON file"""
if not REFERENCE_FILE.exists():
pytest.fail(f"Reference file {REFERENCE_FILE} not found.")
with REFERENCE_FILE.open("r") as f:
return json.load(f)
def test_registry_types(reference_registry):
"""Test that all registry types match the reference"""
# Get current registry types
current_registry_types = set(registry.get_registry_names())
expected_registry_types = set(reference_registry.keys())
# Check for missing registry types
missing_types = expected_registry_types - current_registry_types
assert not missing_types, f"Missing registry types: {', '.join(missing_types)}"
def test_registry_entries(reference_registry):
"""Test that all registry entries are present"""
# Check each registry's entries
for registry_name, expected_entries in reference_registry.items():
# Skip if this registry type doesn't exist
if not hasattr(registry, registry_name):
pytest.fail(f"Registry '{registry_name}' does not exist.")
# Get current entries
reg = getattr(registry, registry_name)
current_entries = sorted(list(reg.get_all().keys()))
# Compare entries
expected_set = set(expected_entries)
current_set = set(current_entries)
# Check for missing entries - these would indicate our new registry population
# mechanism is missing something
missing_entries = expected_set - current_set
assert (
not missing_entries
), f"Registry '{registry_name}' missing entries: {', '.join(missing_entries)}"
| {
"repo_id": "explosion/spaCy",
"file_path": "spacy/tests/test_registry_population.py",
"license": "MIT License",
"lines": 41,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
facebookresearch/detectron2:detectron2/utils/torch_version_utils.py | from packaging import version
def min_torch_version(min_version: str) -> bool:
"""
Returns True when torch's version is at least `min_version`.
"""
try:
import torch
except ImportError:
return False
installed_version = version.parse(torch.__version__.split("+")[0])
min_version = version.parse(min_version)
return installed_version >= min_version
| {
"repo_id": "facebookresearch/detectron2",
"file_path": "detectron2/utils/torch_version_utils.py",
"license": "Apache License 2.0",
"lines": 12,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
fastapi/fastapi:docs_src/server_sent_events/tutorial001_py310.py | from collections.abc import AsyncIterable, Iterable
from fastapi import FastAPI
from fastapi.sse import EventSourceResponse
from pydantic import BaseModel
app = FastAPI()
class Item(BaseModel):
name: str
description: str | None
items = [
Item(name="Plumbus", description="A multi-purpose household device."),
Item(name="Portal Gun", description="A portal opening device."),
Item(name="Meeseeks Box", description="A box that summons a Meeseeks."),
]
@app.get("/items/stream", response_class=EventSourceResponse)
async def sse_items() -> AsyncIterable[Item]:
for item in items:
yield item
@app.get("/items/stream-no-async", response_class=EventSourceResponse)
def sse_items_no_async() -> Iterable[Item]:
for item in items:
yield item
@app.get("/items/stream-no-annotation", response_class=EventSourceResponse)
async def sse_items_no_annotation():
for item in items:
yield item
@app.get("/items/stream-no-async-no-annotation", response_class=EventSourceResponse)
def sse_items_no_async_no_annotation():
for item in items:
yield item
| {
"repo_id": "fastapi/fastapi",
"file_path": "docs_src/server_sent_events/tutorial001_py310.py",
"license": "MIT License",
"lines": 29,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
fastapi/fastapi:docs_src/server_sent_events/tutorial002_py310.py | from collections.abc import AsyncIterable
from fastapi import FastAPI
from fastapi.sse import EventSourceResponse, ServerSentEvent
from pydantic import BaseModel
app = FastAPI()
class Item(BaseModel):
name: str
price: float
items = [
Item(name="Plumbus", price=32.99),
Item(name="Portal Gun", price=999.99),
Item(name="Meeseeks Box", price=49.99),
]
@app.get("/items/stream", response_class=EventSourceResponse)
async def stream_items() -> AsyncIterable[ServerSentEvent]:
yield ServerSentEvent(comment="stream of item updates")
for i, item in enumerate(items):
yield ServerSentEvent(data=item, event="item_update", id=str(i + 1), retry=5000)
| {
"repo_id": "fastapi/fastapi",
"file_path": "docs_src/server_sent_events/tutorial002_py310.py",
"license": "MIT License",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
fastapi/fastapi:docs_src/server_sent_events/tutorial003_py310.py | from collections.abc import AsyncIterable
from fastapi import FastAPI
from fastapi.sse import EventSourceResponse, ServerSentEvent
app = FastAPI()
@app.get("/logs/stream", response_class=EventSourceResponse)
async def stream_logs() -> AsyncIterable[ServerSentEvent]:
logs = [
"2025-01-01 INFO Application started",
"2025-01-01 DEBUG Connected to database",
"2025-01-01 WARN High memory usage detected",
]
for log_line in logs:
yield ServerSentEvent(raw_data=log_line)
| {
"repo_id": "fastapi/fastapi",
"file_path": "docs_src/server_sent_events/tutorial003_py310.py",
"license": "MIT License",
"lines": 13,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
fastapi/fastapi:docs_src/server_sent_events/tutorial004_py310.py | from collections.abc import AsyncIterable
from typing import Annotated
from fastapi import FastAPI, Header
from fastapi.sse import EventSourceResponse, ServerSentEvent
from pydantic import BaseModel
app = FastAPI()
class Item(BaseModel):
name: str
price: float
items = [
Item(name="Plumbus", price=32.99),
Item(name="Portal Gun", price=999.99),
Item(name="Meeseeks Box", price=49.99),
]
@app.get("/items/stream", response_class=EventSourceResponse)
async def stream_items(
last_event_id: Annotated[int | None, Header()] = None,
) -> AsyncIterable[ServerSentEvent]:
start = last_event_id + 1 if last_event_id is not None else 0
for i, item in enumerate(items):
if i < start:
continue
yield ServerSentEvent(data=item, id=str(i))
| {
"repo_id": "fastapi/fastapi",
"file_path": "docs_src/server_sent_events/tutorial004_py310.py",
"license": "MIT License",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
fastapi/fastapi:docs_src/server_sent_events/tutorial005_py310.py | from collections.abc import AsyncIterable
from fastapi import FastAPI
from fastapi.sse import EventSourceResponse, ServerSentEvent
from pydantic import BaseModel
app = FastAPI()
class Prompt(BaseModel):
text: str
@app.post("/chat/stream", response_class=EventSourceResponse)
async def stream_chat(prompt: Prompt) -> AsyncIterable[ServerSentEvent]:
words = prompt.text.split()
for word in words:
yield ServerSentEvent(data=word, event="token")
yield ServerSentEvent(raw_data="[DONE]", event="done")
| {
"repo_id": "fastapi/fastapi",
"file_path": "docs_src/server_sent_events/tutorial005_py310.py",
"license": "MIT License",
"lines": 13,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
fastapi/fastapi:fastapi/sse.py | from typing import Annotated, Any
from annotated_doc import Doc
from pydantic import AfterValidator, BaseModel, Field, model_validator
from starlette.responses import StreamingResponse
# Canonical SSE event schema matching the OpenAPI 3.2 spec
# (Section 4.14.4 "Special Considerations for Server-Sent Events")
_SSE_EVENT_SCHEMA: dict[str, Any] = {
"type": "object",
"properties": {
"data": {"type": "string"},
"event": {"type": "string"},
"id": {"type": "string"},
"retry": {"type": "integer", "minimum": 0},
},
}
class EventSourceResponse(StreamingResponse):
"""Streaming response with `text/event-stream` media type.
Use as `response_class=EventSourceResponse` on a *path operation* that uses `yield`
to enable Server Sent Events (SSE) responses.
Works with **any HTTP method** (`GET`, `POST`, etc.), which makes it compatible
with protocols like MCP that stream SSE over `POST`.
The actual encoding logic lives in the FastAPI routing layer. This class
serves mainly as a marker and sets the correct `Content-Type`.
"""
media_type = "text/event-stream"
def _check_id_no_null(v: str | None) -> str | None:
if v is not None and "\0" in v:
raise ValueError("SSE 'id' must not contain null characters")
return v
class ServerSentEvent(BaseModel):
"""Represents a single Server-Sent Event.
When `yield`ed from a *path operation function* that uses
`response_class=EventSourceResponse`, each `ServerSentEvent` is encoded
into the [SSE wire format](https://html.spec.whatwg.org/multipage/server-sent-events.html#parsing-an-event-stream)
(`text/event-stream`).
If you yield a plain object (dict, Pydantic model, etc.) instead, it is
automatically JSON-encoded and sent as the `data:` field.
All `data` values **including plain strings** are JSON-serialized.
For example, `data="hello"` produces `data: "hello"` on the wire (with
quotes).
"""
data: Annotated[
Any,
Doc(
"""
The event payload.
Can be any JSON-serializable value: a Pydantic model, dict, list,
string, number, etc. It is **always** serialized to JSON: strings
are quoted (`"hello"` becomes `data: "hello"` on the wire).
Mutually exclusive with `raw_data`.
"""
),
] = None
raw_data: Annotated[
str | None,
Doc(
"""
Raw string to send as the `data:` field **without** JSON encoding.
Use this when you need to send pre-formatted text, HTML fragments,
CSV lines, or any non-JSON payload. The string is placed directly
into the `data:` field as-is.
Mutually exclusive with `data`.
"""
),
] = None
event: Annotated[
str | None,
Doc(
"""
Optional event type name.
Maps to `addEventListener(event, ...)` on the browser. When omitted,
the browser dispatches on the generic `message` event.
"""
),
] = None
id: Annotated[
str | None,
AfterValidator(_check_id_no_null),
Doc(
"""
Optional event ID.
The browser sends this value back as the `Last-Event-ID` header on
automatic reconnection. **Must not contain null (`\\0`) characters.**
"""
),
] = None
retry: Annotated[
int | None,
Field(ge=0),
Doc(
"""
Optional reconnection time in **milliseconds**.
Tells the browser how long to wait before reconnecting after the
connection is lost. Must be a non-negative integer.
"""
),
] = None
comment: Annotated[
str | None,
Doc(
"""
Optional comment line(s).
Comment lines start with `:` in the SSE wire format and are ignored by
`EventSource` clients. Useful for keep-alive pings to prevent
proxy/load-balancer timeouts.
"""
),
] = None
@model_validator(mode="after")
def _check_data_exclusive(self) -> "ServerSentEvent":
if self.data is not None and self.raw_data is not None:
raise ValueError(
"Cannot set both 'data' and 'raw_data' on the same "
"ServerSentEvent. Use 'data' for JSON-serialized payloads "
"or 'raw_data' for pre-formatted strings."
)
return self
def format_sse_event(
*,
data_str: Annotated[
str | None,
Doc(
"""
Pre-serialized data string to use as the `data:` field.
"""
),
] = None,
event: Annotated[
str | None,
Doc(
"""
Optional event type name (`event:` field).
"""
),
] = None,
id: Annotated[
str | None,
Doc(
"""
Optional event ID (`id:` field).
"""
),
] = None,
retry: Annotated[
int | None,
Doc(
"""
Optional reconnection time in milliseconds (`retry:` field).
"""
),
] = None,
comment: Annotated[
str | None,
Doc(
"""
Optional comment line(s) (`:` prefix).
"""
),
] = None,
) -> bytes:
"""Build SSE wire-format bytes from **pre-serialized** data.
The result always ends with `\n\n` (the event terminator).
"""
lines: list[str] = []
if comment is not None:
for line in comment.splitlines():
lines.append(f": {line}")
if event is not None:
lines.append(f"event: {event}")
if data_str is not None:
for line in data_str.splitlines():
lines.append(f"data: {line}")
if id is not None:
lines.append(f"id: {id}")
if retry is not None:
lines.append(f"retry: {retry}")
lines.append("")
lines.append("")
return "\n".join(lines).encode("utf-8")
# Keep-alive comment, per the SSE spec recommendation
KEEPALIVE_COMMENT = b": ping\n\n"
# Seconds between keep-alive pings when a generator is idle.
# Private but importable so tests can monkeypatch it.
_PING_INTERVAL: float = 15.0
| {
"repo_id": "fastapi/fastapi",
"file_path": "fastapi/sse.py",
"license": "MIT License",
"lines": 184,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
fastapi/fastapi:tests/test_sse.py | import asyncio
import time
from collections.abc import AsyncIterable, Iterable
import fastapi.routing
import pytest
from fastapi import APIRouter, FastAPI
from fastapi.responses import EventSourceResponse
from fastapi.sse import ServerSentEvent
from fastapi.testclient import TestClient
from pydantic import BaseModel
class Item(BaseModel):
name: str
description: str | None = None
items = [
Item(name="Plumbus", description="A multi-purpose household device."),
Item(name="Portal Gun", description="A portal opening device."),
Item(name="Meeseeks Box", description="A box that summons a Meeseeks."),
]
app = FastAPI()
@app.get("/items/stream", response_class=EventSourceResponse)
async def sse_items() -> AsyncIterable[Item]:
for item in items:
yield item
@app.get("/items/stream-sync", response_class=EventSourceResponse)
def sse_items_sync() -> Iterable[Item]:
yield from items
@app.get("/items/stream-no-annotation", response_class=EventSourceResponse)
async def sse_items_no_annotation():
for item in items:
yield item
@app.get("/items/stream-sync-no-annotation", response_class=EventSourceResponse)
def sse_items_sync_no_annotation():
yield from items
@app.get("/items/stream-dict", response_class=EventSourceResponse)
async def sse_items_dict():
for item in items:
yield {"name": item.name, "description": item.description}
@app.get("/items/stream-sse-event", response_class=EventSourceResponse)
async def sse_items_event():
yield ServerSentEvent(data="hello", event="greeting", id="1")
yield ServerSentEvent(data={"key": "value"}, event="json-data", id="2")
yield ServerSentEvent(comment="just a comment")
yield ServerSentEvent(data="retry-test", retry=5000)
@app.get("/items/stream-mixed", response_class=EventSourceResponse)
async def sse_items_mixed() -> AsyncIterable[Item]:
yield items[0]
yield ServerSentEvent(data="custom-event", event="special")
yield items[1]
@app.get("/items/stream-string", response_class=EventSourceResponse)
async def sse_items_string():
yield ServerSentEvent(data="plain text data")
@app.post("/items/stream-post", response_class=EventSourceResponse)
async def sse_items_post() -> AsyncIterable[Item]:
for item in items:
yield item
@app.get("/items/stream-raw", response_class=EventSourceResponse)
async def sse_items_raw():
yield ServerSentEvent(raw_data="plain text without quotes")
yield ServerSentEvent(raw_data="<div>html fragment</div>", event="html")
yield ServerSentEvent(raw_data="cpu,87.3,1709145600", event="csv")
router = APIRouter()
@router.get("/events", response_class=EventSourceResponse)
async def stream_events():
yield {"msg": "hello"}
yield {"msg": "world"}
app.include_router(router, prefix="/api")
@pytest.fixture(name="client")
def client_fixture():
with TestClient(app) as c:
yield c
def test_async_generator_with_model(client: TestClient):
response = client.get("/items/stream")
assert response.status_code == 200
assert response.headers["content-type"] == "text/event-stream; charset=utf-8"
assert response.headers["cache-control"] == "no-cache"
assert response.headers["x-accel-buffering"] == "no"
lines = response.text.strip().split("\n")
data_lines = [line for line in lines if line.startswith("data: ")]
assert len(data_lines) == 3
assert '"name":"Plumbus"' in data_lines[0] or '"name": "Plumbus"' in data_lines[0]
assert (
'"name":"Portal Gun"' in data_lines[1]
or '"name": "Portal Gun"' in data_lines[1]
)
assert (
'"name":"Meeseeks Box"' in data_lines[2]
or '"name": "Meeseeks Box"' in data_lines[2]
)
def test_sync_generator_with_model(client: TestClient):
response = client.get("/items/stream-sync")
assert response.status_code == 200
assert response.headers["content-type"] == "text/event-stream; charset=utf-8"
data_lines = [
line for line in response.text.strip().split("\n") if line.startswith("data: ")
]
assert len(data_lines) == 3
def test_async_generator_no_annotation(client: TestClient):
response = client.get("/items/stream-no-annotation")
assert response.status_code == 200
assert response.headers["content-type"] == "text/event-stream; charset=utf-8"
data_lines = [
line for line in response.text.strip().split("\n") if line.startswith("data: ")
]
assert len(data_lines) == 3
def test_sync_generator_no_annotation(client: TestClient):
response = client.get("/items/stream-sync-no-annotation")
assert response.status_code == 200
assert response.headers["content-type"] == "text/event-stream; charset=utf-8"
data_lines = [
line for line in response.text.strip().split("\n") if line.startswith("data: ")
]
assert len(data_lines) == 3
def test_dict_items(client: TestClient):
response = client.get("/items/stream-dict")
assert response.status_code == 200
data_lines = [
line for line in response.text.strip().split("\n") if line.startswith("data: ")
]
assert len(data_lines) == 3
assert '"name"' in data_lines[0]
def test_post_method_sse(client: TestClient):
"""SSE should work with POST (needed for MCP compatibility)."""
response = client.post("/items/stream-post")
assert response.status_code == 200
assert response.headers["content-type"] == "text/event-stream; charset=utf-8"
data_lines = [
line for line in response.text.strip().split("\n") if line.startswith("data: ")
]
assert len(data_lines) == 3
def test_sse_events_with_fields(client: TestClient):
response = client.get("/items/stream-sse-event")
assert response.status_code == 200
text = response.text
assert "event: greeting\n" in text
assert 'data: "hello"\n' in text
assert "id: 1\n" in text
assert "event: json-data\n" in text
assert "id: 2\n" in text
assert 'data: {"key": "value"}\n' in text
assert ": just a comment\n" in text
assert "retry: 5000\n" in text
assert 'data: "retry-test"\n' in text
def test_mixed_plain_and_sse_events(client: TestClient):
response = client.get("/items/stream-mixed")
assert response.status_code == 200
text = response.text
assert "event: special\n" in text
assert 'data: "custom-event"\n' in text
assert '"name"' in text
def test_string_data_json_encoded(client: TestClient):
"""Strings are always JSON-encoded (quoted)."""
response = client.get("/items/stream-string")
assert response.status_code == 200
assert 'data: "plain text data"\n' in response.text
def test_server_sent_event_null_id_rejected():
with pytest.raises(ValueError, match="null"):
ServerSentEvent(data="test", id="has\0null")
def test_server_sent_event_negative_retry_rejected():
with pytest.raises(ValueError):
ServerSentEvent(data="test", retry=-1)
def test_server_sent_event_float_retry_rejected():
with pytest.raises(ValueError):
ServerSentEvent(data="test", retry=1.5) # type: ignore[arg-type]
def test_raw_data_sent_without_json_encoding(client: TestClient):
"""raw_data is sent as-is, not JSON-encoded."""
response = client.get("/items/stream-raw")
assert response.status_code == 200
text = response.text
# raw_data should appear without JSON quotes
assert "data: plain text without quotes\n" in text
# Not JSON-quoted
assert 'data: "plain text without quotes"' not in text
assert "event: html\n" in text
assert "data: <div>html fragment</div>\n" in text
assert "event: csv\n" in text
assert "data: cpu,87.3,1709145600\n" in text
def test_data_and_raw_data_mutually_exclusive():
"""Cannot set both data and raw_data."""
with pytest.raises(ValueError, match="Cannot set both"):
ServerSentEvent(data="json", raw_data="raw")
def test_sse_on_router_included_in_app(client: TestClient):
response = client.get("/api/events")
assert response.status_code == 200
assert response.headers["content-type"] == "text/event-stream; charset=utf-8"
data_lines = [
line for line in response.text.strip().split("\n") if line.startswith("data: ")
]
assert len(data_lines) == 2
# Keepalive ping tests
keepalive_app = FastAPI()
@keepalive_app.get("/slow-async", response_class=EventSourceResponse)
async def slow_async_stream():
yield {"n": 1}
# Sleep longer than the (monkeypatched) ping interval so a keepalive
# comment is emitted before the next item.
await asyncio.sleep(0.3)
yield {"n": 2}
@keepalive_app.get("/slow-sync", response_class=EventSourceResponse)
def slow_sync_stream():
yield {"n": 1}
time.sleep(0.3)
yield {"n": 2}
def test_keepalive_ping_async(monkeypatch: pytest.MonkeyPatch):
monkeypatch.setattr(fastapi.routing, "_PING_INTERVAL", 0.05)
with TestClient(keepalive_app) as c:
response = c.get("/slow-async")
assert response.status_code == 200
text = response.text
# The keepalive comment ": ping" should appear between the two data events
assert ": ping\n" in text
data_lines = [line for line in text.split("\n") if line.startswith("data: ")]
assert len(data_lines) == 2
def test_keepalive_ping_sync(monkeypatch: pytest.MonkeyPatch):
monkeypatch.setattr(fastapi.routing, "_PING_INTERVAL", 0.05)
with TestClient(keepalive_app) as c:
response = c.get("/slow-sync")
assert response.status_code == 200
text = response.text
assert ": ping\n" in text
data_lines = [line for line in text.split("\n") if line.startswith("data: ")]
assert len(data_lines) == 2
def test_no_keepalive_when_fast(client: TestClient):
"""No keepalive comment when items arrive quickly."""
response = client.get("/items/stream")
assert response.status_code == 200
# KEEPALIVE_COMMENT is ": ping\n\n".
assert ": ping\n" not in response.text
| {
"repo_id": "fastapi/fastapi",
"file_path": "tests/test_sse.py",
"license": "MIT License",
"lines": 227,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
fastapi/fastapi:tests/test_tutorial/test_server_sent_events/test_tutorial002.py | import importlib
import pytest
from fastapi.testclient import TestClient
from inline_snapshot import snapshot
@pytest.fixture(
name="client",
params=[
pytest.param("tutorial002_py310"),
],
)
def get_client(request: pytest.FixtureRequest):
mod = importlib.import_module(f"docs_src.server_sent_events.{request.param}")
client = TestClient(mod.app)
return client
def test_stream_items(client: TestClient):
response = client.get("/items/stream")
assert response.status_code == 200, response.text
assert response.headers["content-type"] == "text/event-stream; charset=utf-8"
lines = response.text.strip().split("\n")
# First event is a comment-only event
assert lines[0] == ": stream of item updates"
# Remaining lines contain event:, data:, id:, retry: fields
event_lines = [line for line in lines if line.startswith("event: ")]
assert len(event_lines) == 3
assert all(line == "event: item_update" for line in event_lines)
data_lines = [line for line in lines if line.startswith("data: ")]
assert len(data_lines) == 3
id_lines = [line for line in lines if line.startswith("id: ")]
assert id_lines == ["id: 1", "id: 2", "id: 3"]
retry_lines = [line for line in lines if line.startswith("retry: ")]
assert len(retry_lines) == 3
assert all(line == "retry: 5000" for line in retry_lines)
def test_openapi_schema(client: TestClient):
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == snapshot(
{
"openapi": "3.1.0",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/items/stream": {
"get": {
"summary": "Stream Items",
"operationId": "stream_items_items_stream_get",
"responses": {
"200": {
"description": "Successful Response",
"content": {
"text/event-stream": {
"itemSchema": {
"type": "object",
"properties": {
"data": {"type": "string"},
"event": {"type": "string"},
"id": {"type": "string"},
"retry": {
"type": "integer",
"minimum": 0,
},
},
}
}
},
}
},
}
}
},
}
)
| {
"repo_id": "fastapi/fastapi",
"file_path": "tests/test_tutorial/test_server_sent_events/test_tutorial002.py",
"license": "MIT License",
"lines": 70,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
fastapi/fastapi:tests/test_tutorial/test_server_sent_events/test_tutorial003.py | import importlib
import pytest
from fastapi.testclient import TestClient
from inline_snapshot import snapshot
@pytest.fixture(
name="client",
params=[
pytest.param("tutorial003_py310"),
],
)
def get_client(request: pytest.FixtureRequest):
mod = importlib.import_module(f"docs_src.server_sent_events.{request.param}")
client = TestClient(mod.app)
return client
def test_stream_logs(client: TestClient):
response = client.get("/logs/stream")
assert response.status_code == 200, response.text
assert response.headers["content-type"] == "text/event-stream; charset=utf-8"
data_lines = [
line for line in response.text.strip().split("\n") if line.startswith("data: ")
]
assert len(data_lines) == 3
# raw_data is sent without JSON encoding (no quotes around the string)
assert data_lines[0] == "data: 2025-01-01 INFO Application started"
assert data_lines[1] == "data: 2025-01-01 DEBUG Connected to database"
assert data_lines[2] == "data: 2025-01-01 WARN High memory usage detected"
def test_openapi_schema(client: TestClient):
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == snapshot(
{
"openapi": "3.1.0",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/logs/stream": {
"get": {
"summary": "Stream Logs",
"operationId": "stream_logs_logs_stream_get",
"responses": {
"200": {
"description": "Successful Response",
"content": {
"text/event-stream": {
"itemSchema": {
"type": "object",
"properties": {
"data": {"type": "string"},
"event": {"type": "string"},
"id": {"type": "string"},
"retry": {
"type": "integer",
"minimum": 0,
},
},
}
}
},
}
},
}
}
},
}
)
| {
"repo_id": "fastapi/fastapi",
"file_path": "tests/test_tutorial/test_server_sent_events/test_tutorial003.py",
"license": "MIT License",
"lines": 64,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
fastapi/fastapi:tests/test_tutorial/test_server_sent_events/test_tutorial004.py | import importlib
import pytest
from fastapi.testclient import TestClient
from inline_snapshot import snapshot
@pytest.fixture(
name="client",
params=[
pytest.param("tutorial004_py310"),
],
)
def get_client(request: pytest.FixtureRequest):
mod = importlib.import_module(f"docs_src.server_sent_events.{request.param}")
client = TestClient(mod.app)
return client
def test_stream_all_items(client: TestClient):
response = client.get("/items/stream")
assert response.status_code == 200, response.text
data_lines = [
line for line in response.text.strip().split("\n") if line.startswith("data: ")
]
assert len(data_lines) == 3
id_lines = [
line for line in response.text.strip().split("\n") if line.startswith("id: ")
]
assert id_lines == ["id: 0", "id: 1", "id: 2"]
def test_resume_from_last_event_id(client: TestClient):
response = client.get(
"/items/stream",
headers={"last-event-id": "0"},
)
assert response.status_code == 200, response.text
data_lines = [
line for line in response.text.strip().split("\n") if line.startswith("data: ")
]
assert len(data_lines) == 2
id_lines = [
line for line in response.text.strip().split("\n") if line.startswith("id: ")
]
assert id_lines == ["id: 1", "id: 2"]
def test_resume_from_last_item(client: TestClient):
response = client.get(
"/items/stream",
headers={"last-event-id": "1"},
)
assert response.status_code == 200, response.text
data_lines = [
line for line in response.text.strip().split("\n") if line.startswith("data: ")
]
assert len(data_lines) == 1
id_lines = [
line for line in response.text.strip().split("\n") if line.startswith("id: ")
]
assert id_lines == ["id: 2"]
def test_openapi_schema(client: TestClient):
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == snapshot(
{
"openapi": "3.1.0",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/items/stream": {
"get": {
"summary": "Stream Items",
"operationId": "stream_items_items_stream_get",
"parameters": [
{
"name": "last-event-id",
"in": "header",
"required": False,
"schema": {
"anyOf": [{"type": "integer"}, {"type": "null"}],
"title": "Last-Event-Id",
},
}
],
"responses": {
"200": {
"description": "Successful Response",
"content": {
"text/event-stream": {
"itemSchema": {
"type": "object",
"properties": {
"data": {"type": "string"},
"event": {"type": "string"},
"id": {"type": "string"},
"retry": {
"type": "integer",
"minimum": 0,
},
},
}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
}
},
"components": {
"schemas": {
"HTTPValidationError": {
"properties": {
"detail": {
"items": {
"$ref": "#/components/schemas/ValidationError"
},
"type": "array",
"title": "Detail",
}
},
"type": "object",
"title": "HTTPValidationError",
},
"ValidationError": {
"properties": {
"loc": {
"items": {
"anyOf": [{"type": "string"}, {"type": "integer"}]
},
"type": "array",
"title": "Location",
},
"msg": {"type": "string", "title": "Message"},
"type": {"type": "string", "title": "Error Type"},
"input": {"title": "Input"},
"ctx": {"type": "object", "title": "Context"},
},
"type": "object",
"required": ["loc", "msg", "type"],
"title": "ValidationError",
},
}
},
}
)
| {
"repo_id": "fastapi/fastapi",
"file_path": "tests/test_tutorial/test_server_sent_events/test_tutorial004.py",
"license": "MIT License",
"lines": 147,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
fastapi/fastapi:tests/test_tutorial/test_server_sent_events/test_tutorial005.py | import importlib
import pytest
from fastapi.testclient import TestClient
from inline_snapshot import snapshot
@pytest.fixture(
name="client",
params=[
pytest.param("tutorial005_py310"),
],
)
def get_client(request: pytest.FixtureRequest):
mod = importlib.import_module(f"docs_src.server_sent_events.{request.param}")
client = TestClient(mod.app)
return client
def test_stream_chat(client: TestClient):
response = client.post(
"/chat/stream",
json={"text": "hello world"},
)
assert response.status_code == 200, response.text
assert response.headers["content-type"] == "text/event-stream; charset=utf-8"
lines = response.text.strip().split("\n")
event_lines = [line for line in lines if line.startswith("event: ")]
assert event_lines == [
"event: token",
"event: token",
"event: done",
]
data_lines = [line for line in lines if line.startswith("data: ")]
assert data_lines == [
'data: "hello"',
'data: "world"',
"data: [DONE]",
]
def test_openapi_schema(client: TestClient):
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == snapshot(
{
"openapi": "3.1.0",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/chat/stream": {
"post": {
"summary": "Stream Chat",
"operationId": "stream_chat_chat_stream_post",
"requestBody": {
"content": {
"application/json": {
"schema": {"$ref": "#/components/schemas/Prompt"}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"text/event-stream": {
"itemSchema": {
"type": "object",
"properties": {
"data": {"type": "string"},
"event": {"type": "string"},
"id": {"type": "string"},
"retry": {
"type": "integer",
"minimum": 0,
},
},
}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
}
},
"components": {
"schemas": {
"HTTPValidationError": {
"properties": {
"detail": {
"items": {
"$ref": "#/components/schemas/ValidationError"
},
"type": "array",
"title": "Detail",
}
},
"type": "object",
"title": "HTTPValidationError",
},
"Prompt": {
"properties": {"text": {"type": "string", "title": "Text"}},
"type": "object",
"required": ["text"],
"title": "Prompt",
},
"ValidationError": {
"properties": {
"loc": {
"items": {
"anyOf": [{"type": "string"}, {"type": "integer"}]
},
"type": "array",
"title": "Location",
},
"msg": {"type": "string", "title": "Message"},
"type": {"type": "string", "title": "Error Type"},
"input": {"title": "Input"},
"ctx": {"type": "object", "title": "Context"},
},
"type": "object",
"required": ["loc", "msg", "type"],
"title": "ValidationError",
},
}
},
}
)
| {
"repo_id": "fastapi/fastapi",
"file_path": "tests/test_tutorial/test_server_sent_events/test_tutorial005.py",
"license": "MIT License",
"lines": 131,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
fastapi/fastapi:docs_src/stream_data/tutorial001_py310.py | from collections.abc import AsyncIterable, Iterable
from fastapi import FastAPI
from fastapi.responses import StreamingResponse
app = FastAPI()
message = """
Rick: (stumbles in drunkenly, and turns on the lights) Morty! You gotta come on. You got--... you gotta come with me.
Morty: (rubs his eyes) What, Rick? What's going on?
Rick: I got a surprise for you, Morty.
Morty: It's the middle of the night. What are you talking about?
Rick: (spills alcohol on Morty's bed) Come on, I got a surprise for you. (drags Morty by the ankle) Come on, hurry up. (pulls Morty out of his bed and into the hall)
Morty: Ow! Ow! You're tugging me too hard!
Rick: We gotta go, gotta get outta here, come on. Got a surprise for you Morty.
"""
@app.get("/story/stream", response_class=StreamingResponse)
async def stream_story() -> AsyncIterable[str]:
for line in message.splitlines():
yield line
@app.get("/story/stream-no-async", response_class=StreamingResponse)
def stream_story_no_async() -> Iterable[str]:
for line in message.splitlines():
yield line
@app.get("/story/stream-no-annotation", response_class=StreamingResponse)
async def stream_story_no_annotation():
for line in message.splitlines():
yield line
@app.get("/story/stream-no-async-no-annotation", response_class=StreamingResponse)
def stream_story_no_async_no_annotation():
for line in message.splitlines():
yield line
@app.get("/story/stream-bytes", response_class=StreamingResponse)
async def stream_story_bytes() -> AsyncIterable[bytes]:
for line in message.splitlines():
yield line.encode("utf-8")
@app.get("/story/stream-no-async-bytes", response_class=StreamingResponse)
def stream_story_no_async_bytes() -> Iterable[bytes]:
for line in message.splitlines():
yield line.encode("utf-8")
@app.get("/story/stream-no-annotation-bytes", response_class=StreamingResponse)
async def stream_story_no_annotation_bytes():
for line in message.splitlines():
yield line.encode("utf-8")
@app.get("/story/stream-no-async-no-annotation-bytes", response_class=StreamingResponse)
def stream_story_no_async_no_annotation_bytes():
for line in message.splitlines():
yield line.encode("utf-8")
| {
"repo_id": "fastapi/fastapi",
"file_path": "docs_src/stream_data/tutorial001_py310.py",
"license": "MIT License",
"lines": 45,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
fastapi/fastapi:docs_src/stream_json_lines/tutorial001_py310.py | from collections.abc import AsyncIterable, Iterable
from fastapi import FastAPI
from pydantic import BaseModel
app = FastAPI()
class Item(BaseModel):
name: str
description: str | None
items = [
Item(name="Plumbus", description="A multi-purpose household device."),
Item(name="Portal Gun", description="A portal opening device."),
Item(name="Meeseeks Box", description="A box that summons a Meeseeks."),
]
@app.get("/items/stream")
async def stream_items() -> AsyncIterable[Item]:
for item in items:
yield item
@app.get("/items/stream-no-async")
def stream_items_no_async() -> Iterable[Item]:
for item in items:
yield item
@app.get("/items/stream-no-annotation")
async def stream_items_no_annotation():
for item in items:
yield item
@app.get("/items/stream-no-async-no-annotation")
def stream_items_no_async_no_annotation():
for item in items:
yield item
| {
"repo_id": "fastapi/fastapi",
"file_path": "docs_src/stream_json_lines/tutorial001_py310.py",
"license": "MIT License",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
fastapi/fastapi:tests/test_stream_bare_type.py | import json
from typing import AsyncIterable, Iterable # noqa: UP035 to test coverage
from fastapi import FastAPI
from fastapi.testclient import TestClient
from pydantic import BaseModel
class Item(BaseModel):
name: str
app = FastAPI()
@app.get("/items/stream-bare-async")
async def stream_bare_async() -> AsyncIterable:
yield {"name": "foo"}
@app.get("/items/stream-bare-sync")
def stream_bare_sync() -> Iterable:
yield {"name": "bar"}
client = TestClient(app)
def test_stream_bare_async_iterable():
response = client.get("/items/stream-bare-async")
assert response.status_code == 200
assert response.headers["content-type"] == "application/jsonl"
lines = [json.loads(line) for line in response.text.strip().splitlines()]
assert lines == [{"name": "foo"}]
def test_stream_bare_sync_iterable():
response = client.get("/items/stream-bare-sync")
assert response.status_code == 200
assert response.headers["content-type"] == "application/jsonl"
lines = [json.loads(line) for line in response.text.strip().splitlines()]
assert lines == [{"name": "bar"}]
| {
"repo_id": "fastapi/fastapi",
"file_path": "tests/test_stream_bare_type.py",
"license": "MIT License",
"lines": 27,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
fastapi/fastapi:tests/test_stream_cancellation.py | """
Test that async streaming endpoints can be cancelled without hanging.
Ref: https://github.com/fastapi/fastapi/issues/14680
"""
from collections.abc import AsyncIterable
import anyio
import pytest
from fastapi import FastAPI
from fastapi.responses import StreamingResponse
pytestmark = [
pytest.mark.anyio,
pytest.mark.filterwarnings("ignore::pytest.PytestUnraisableExceptionWarning"),
]
app = FastAPI()
@app.get("/stream-raw", response_class=StreamingResponse)
async def stream_raw() -> AsyncIterable[str]:
"""Async generator with no internal await - would hang without checkpoint."""
i = 0
while True:
yield f"item {i}\n"
i += 1
@app.get("/stream-jsonl")
async def stream_jsonl() -> AsyncIterable[int]:
"""JSONL async generator with no internal await."""
i = 0
while True:
yield i
i += 1
async def _run_asgi_and_cancel(app: FastAPI, path: str, timeout: float) -> bool:
"""Call the ASGI app for *path* and cancel after *timeout* seconds.
Returns `True` if the cancellation was delivered (i.e. it did not hang).
"""
chunks: list[bytes] = []
async def receive(): # type: ignore[no-untyped-def]
# Simulate a client that never disconnects, rely on cancellation
await anyio.sleep(float("inf"))
return {"type": "http.disconnect"} # pragma: no cover
async def send(message: dict) -> None: # type: ignore[type-arg]
if message["type"] == "http.response.body":
chunks.append(message.get("body", b""))
scope = {
"type": "http",
"asgi": {"version": "3.0", "spec_version": "2.0"},
"http_version": "1.1",
"method": "GET",
"path": path,
"query_string": b"",
"root_path": "",
"headers": [],
"server": ("test", 80),
}
with anyio.move_on_after(timeout) as cancel_scope:
await app(scope, receive, send) # type: ignore[arg-type]
# If we got here within the timeout the generator was cancellable.
# cancel_scope.cancelled_caught is True when move_on_after fired.
return cancel_scope.cancelled_caught or len(chunks) > 0
async def test_raw_stream_cancellation() -> None:
"""Raw streaming endpoint should be cancellable within a reasonable time."""
cancelled = await _run_asgi_and_cancel(app, "/stream-raw", timeout=3.0)
# The key assertion: we reached this line at all (didn't hang).
# cancelled will be True because the infinite generator was interrupted.
assert cancelled
async def test_jsonl_stream_cancellation() -> None:
"""JSONL streaming endpoint should be cancellable within a reasonable time."""
cancelled = await _run_asgi_and_cancel(app, "/stream-jsonl", timeout=3.0)
assert cancelled
| {
"repo_id": "fastapi/fastapi",
"file_path": "tests/test_stream_cancellation.py",
"license": "MIT License",
"lines": 66,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
fastapi/fastapi:tests/test_stream_json_validation_error.py | from collections.abc import AsyncIterable, Iterable
import pytest
from fastapi import FastAPI
from fastapi.exceptions import ResponseValidationError
from fastapi.testclient import TestClient
from pydantic import BaseModel
class Item(BaseModel):
name: str
price: float
app = FastAPI()
@app.get("/items/stream-invalid")
async def stream_items_invalid() -> AsyncIterable[Item]:
yield {"name": "valid", "price": 1.0}
yield {"name": "invalid", "price": "not-a-number"}
@app.get("/items/stream-invalid-sync")
def stream_items_invalid_sync() -> Iterable[Item]:
yield {"name": "valid", "price": 1.0}
yield {"name": "invalid", "price": "not-a-number"}
client = TestClient(app)
def test_stream_json_validation_error_async():
with pytest.raises(ResponseValidationError):
client.get("/items/stream-invalid")
def test_stream_json_validation_error_sync():
with pytest.raises(ResponseValidationError):
client.get("/items/stream-invalid-sync")
| {
"repo_id": "fastapi/fastapi",
"file_path": "tests/test_stream_json_validation_error.py",
"license": "MIT License",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
fastapi/fastapi:tests/test_tutorial/test_stream_data/test_tutorial001.py | import importlib
import pytest
from fastapi.testclient import TestClient
from inline_snapshot import snapshot
@pytest.fixture(
name="client",
params=[
pytest.param("tutorial001_py310"),
],
)
def get_client(request: pytest.FixtureRequest):
mod = importlib.import_module(f"docs_src.stream_data.{request.param}")
client = TestClient(mod.app)
return client
expected_text = (
""
"Rick: (stumbles in drunkenly, and turns on the lights)"
" Morty! You gotta come on. You got--... you gotta come with me."
"Morty: (rubs his eyes) What, Rick? What's going on?"
"Rick: I got a surprise for you, Morty."
"Morty: It's the middle of the night. What are you talking about?"
"Rick: (spills alcohol on Morty's bed) Come on, I got a surprise for you."
" (drags Morty by the ankle) Come on, hurry up."
" (pulls Morty out of his bed and into the hall)"
"Morty: Ow! Ow! You're tugging me too hard!"
"Rick: We gotta go, gotta get outta here, come on."
" Got a surprise for you Morty."
)
@pytest.mark.parametrize(
"path",
[
"/story/stream",
"/story/stream-no-async",
"/story/stream-no-annotation",
"/story/stream-no-async-no-annotation",
"/story/stream-bytes",
"/story/stream-no-async-bytes",
"/story/stream-no-annotation-bytes",
"/story/stream-no-async-no-annotation-bytes",
],
)
def test_stream_story(client: TestClient, path: str):
response = client.get(path)
assert response.status_code == 200, response.text
assert response.text == expected_text
def test_openapi_schema(client: TestClient):
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == snapshot(
{
"openapi": "3.1.0",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/story/stream": {
"get": {
"summary": "Stream Story",
"operationId": "stream_story_story_stream_get",
"responses": {
"200": {
"description": "Successful Response",
}
},
}
},
"/story/stream-no-async": {
"get": {
"summary": "Stream Story No Async",
"operationId": "stream_story_no_async_story_stream_no_async_get",
"responses": {
"200": {
"description": "Successful Response",
}
},
}
},
"/story/stream-no-annotation": {
"get": {
"summary": "Stream Story No Annotation",
"operationId": "stream_story_no_annotation_story_stream_no_annotation_get",
"responses": {
"200": {
"description": "Successful Response",
}
},
}
},
"/story/stream-no-async-no-annotation": {
"get": {
"summary": "Stream Story No Async No Annotation",
"operationId": "stream_story_no_async_no_annotation_story_stream_no_async_no_annotation_get",
"responses": {
"200": {
"description": "Successful Response",
}
},
}
},
"/story/stream-bytes": {
"get": {
"summary": "Stream Story Bytes",
"operationId": "stream_story_bytes_story_stream_bytes_get",
"responses": {
"200": {
"description": "Successful Response",
}
},
}
},
"/story/stream-no-async-bytes": {
"get": {
"summary": "Stream Story No Async Bytes",
"operationId": "stream_story_no_async_bytes_story_stream_no_async_bytes_get",
"responses": {
"200": {
"description": "Successful Response",
}
},
}
},
"/story/stream-no-annotation-bytes": {
"get": {
"summary": "Stream Story No Annotation Bytes",
"operationId": "stream_story_no_annotation_bytes_story_stream_no_annotation_bytes_get",
"responses": {
"200": {
"description": "Successful Response",
}
},
}
},
"/story/stream-no-async-no-annotation-bytes": {
"get": {
"summary": "Stream Story No Async No Annotation Bytes",
"operationId": "stream_story_no_async_no_annotation_bytes_story_stream_no_async_no_annotation_bytes_get",
"responses": {
"200": {
"description": "Successful Response",
}
},
}
},
},
}
)
| {
"repo_id": "fastapi/fastapi",
"file_path": "tests/test_tutorial/test_stream_data/test_tutorial001.py",
"license": "MIT License",
"lines": 144,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
fastapi/fastapi:tests/test_tutorial/test_stream_data/test_tutorial002.py | import importlib
import pytest
from fastapi.testclient import TestClient
from inline_snapshot import snapshot
@pytest.fixture(
name="mod",
params=[
pytest.param("tutorial002_py310"),
],
)
def get_mod(request: pytest.FixtureRequest):
return importlib.import_module(f"docs_src.stream_data.{request.param}")
@pytest.fixture(name="client")
def get_client(mod):
client = TestClient(mod.app)
return client
@pytest.mark.parametrize(
"path",
[
"/image/stream",
"/image/stream-no-async",
"/image/stream-no-async-yield-from",
"/image/stream-no-annotation",
"/image/stream-no-async-no-annotation",
],
)
def test_stream_image(mod, client: TestClient, path: str):
response = client.get(path)
assert response.status_code == 200
assert response.headers["content-type"] == "image/png"
assert response.content == mod.binary_image
def test_openapi_schema(client: TestClient):
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == snapshot(
{
"openapi": "3.1.0",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/image/stream": {
"get": {
"summary": "Stream Image",
"operationId": "stream_image_image_stream_get",
"responses": {
"200": {
"description": "Successful Response",
"content": {
"image/png": {"schema": {"type": "string"}}
},
}
},
}
},
"/image/stream-no-async": {
"get": {
"summary": "Stream Image No Async",
"operationId": "stream_image_no_async_image_stream_no_async_get",
"responses": {
"200": {
"description": "Successful Response",
"content": {
"image/png": {"schema": {"type": "string"}}
},
}
},
}
},
"/image/stream-no-async-yield-from": {
"get": {
"summary": "Stream Image No Async Yield From",
"operationId": "stream_image_no_async_yield_from_image_stream_no_async_yield_from_get",
"responses": {
"200": {
"description": "Successful Response",
"content": {
"image/png": {"schema": {"type": "string"}}
},
}
},
}
},
"/image/stream-no-annotation": {
"get": {
"summary": "Stream Image No Annotation",
"operationId": "stream_image_no_annotation_image_stream_no_annotation_get",
"responses": {
"200": {
"description": "Successful Response",
"content": {
"image/png": {"schema": {"type": "string"}}
},
}
},
}
},
"/image/stream-no-async-no-annotation": {
"get": {
"summary": "Stream Image No Async No Annotation",
"operationId": "stream_image_no_async_no_annotation_image_stream_no_async_no_annotation_get",
"responses": {
"200": {
"description": "Successful Response",
"content": {
"image/png": {"schema": {"type": "string"}}
},
}
},
}
},
},
}
)
| {
"repo_id": "fastapi/fastapi",
"file_path": "tests/test_tutorial/test_stream_data/test_tutorial002.py",
"license": "MIT License",
"lines": 112,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
fastapi/fastapi:tests/test_tutorial/test_stream_json_lines/test_tutorial001.py | import importlib
import json
import pytest
from fastapi.testclient import TestClient
from inline_snapshot import snapshot
@pytest.fixture(
name="client",
params=[
pytest.param("tutorial001_py310"),
],
)
def get_client(request: pytest.FixtureRequest):
mod = importlib.import_module(f"docs_src.stream_json_lines.{request.param}")
client = TestClient(mod.app)
return client
expected_items = [
{"name": "Plumbus", "description": "A multi-purpose household device."},
{"name": "Portal Gun", "description": "A portal opening device."},
{"name": "Meeseeks Box", "description": "A box that summons a Meeseeks."},
]
@pytest.mark.parametrize(
"path",
[
"/items/stream",
"/items/stream-no-async",
"/items/stream-no-annotation",
"/items/stream-no-async-no-annotation",
],
)
def test_stream_items(client: TestClient, path: str):
response = client.get(path)
assert response.status_code == 200, response.text
assert response.headers["content-type"] == "application/jsonl"
lines = [json.loads(line) for line in response.text.strip().splitlines()]
assert lines == expected_items
def test_openapi_schema(client: TestClient):
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == snapshot(
{
"openapi": "3.1.0",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/items/stream": {
"get": {
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/jsonl": {
"itemSchema": {
"$ref": "#/components/schemas/Item"
},
}
},
}
},
"summary": "Stream Items",
"operationId": "stream_items_items_stream_get",
}
},
"/items/stream-no-async": {
"get": {
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/jsonl": {
"itemSchema": {
"$ref": "#/components/schemas/Item"
},
}
},
}
},
"summary": "Stream Items No Async",
"operationId": "stream_items_no_async_items_stream_no_async_get",
}
},
"/items/stream-no-annotation": {
"get": {
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/jsonl": {
"itemSchema": {},
}
},
}
},
"summary": "Stream Items No Annotation",
"operationId": "stream_items_no_annotation_items_stream_no_annotation_get",
}
},
"/items/stream-no-async-no-annotation": {
"get": {
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/jsonl": {
"itemSchema": {},
}
},
}
},
"summary": "Stream Items No Async No Annotation",
"operationId": "stream_items_no_async_no_annotation_items_stream_no_async_no_annotation_get",
}
},
},
"components": {
"schemas": {
"Item": {
"properties": {
"name": {"type": "string", "title": "Name"},
"description": {
"anyOf": [
{"type": "string"},
{"type": "null"},
],
"title": "Description",
},
},
"type": "object",
"required": ["name", "description"],
"title": "Item",
}
}
},
}
)
| {
"repo_id": "fastapi/fastapi",
"file_path": "tests/test_tutorial/test_stream_json_lines/test_tutorial001.py",
"license": "MIT License",
"lines": 133,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
fastapi/fastapi:tests/test_openapi_cache_root_path.py | from fastapi import FastAPI
from fastapi.testclient import TestClient
def test_root_path_does_not_persist_across_requests():
app = FastAPI()
@app.get("/")
def read_root(): # pragma: no cover
return {"ok": True}
# Attacker request with a spoofed root_path
attacker_client = TestClient(app, root_path="/evil-api")
response1 = attacker_client.get("/openapi.json")
data1 = response1.json()
assert any(s.get("url") == "/evil-api" for s in data1.get("servers", []))
# Subsequent legitimate request with no root_path
clean_client = TestClient(app)
response2 = clean_client.get("/openapi.json")
data2 = response2.json()
servers = [s.get("url") for s in data2.get("servers", [])]
assert "/evil-api" not in servers
def test_multiple_different_root_paths_do_not_accumulate():
app = FastAPI()
@app.get("/")
def read_root(): # pragma: no cover
return {"ok": True}
for prefix in ["/path-a", "/path-b", "/path-c"]:
c = TestClient(app, root_path=prefix)
c.get("/openapi.json")
# A clean request should not have any of them
clean_client = TestClient(app)
response = clean_client.get("/openapi.json")
data = response.json()
servers = [s.get("url") for s in data.get("servers", [])]
for prefix in ["/path-a", "/path-b", "/path-c"]:
assert prefix not in servers, (
f"root_path '{prefix}' leaked into clean request: {servers}"
)
def test_legitimate_root_path_still_appears():
app = FastAPI()
@app.get("/")
def read_root(): # pragma: no cover
return {"ok": True}
client = TestClient(app, root_path="/api/v1")
response = client.get("/openapi.json")
data = response.json()
servers = [s.get("url") for s in data.get("servers", [])]
assert "/api/v1" in servers
def test_configured_servers_not_mutated():
configured_servers = [{"url": "https://prod.example.com"}]
app = FastAPI(servers=configured_servers)
@app.get("/")
def read_root(): # pragma: no cover
return {"ok": True}
# Request with a rogue root_path
attacker_client = TestClient(app, root_path="/evil")
attacker_client.get("/openapi.json")
# The original servers list must be untouched
assert configured_servers == [{"url": "https://prod.example.com"}]
| {
"repo_id": "fastapi/fastapi",
"file_path": "tests/test_openapi_cache_root_path.py",
"license": "MIT License",
"lines": 56,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
fastapi/fastapi:tests/test_swagger_ui_escape.py | from fastapi.openapi.docs import get_swagger_ui_html
def test_init_oauth_html_chars_are_escaped():
xss_payload = "Evil</script><script>alert(1)</script>"
html = get_swagger_ui_html(
openapi_url="/openapi.json",
title="Test",
init_oauth={"appName": xss_payload},
)
body = html.body.decode()
assert "</script><script>" not in body
assert "\\u003c/script\\u003e\\u003cscript\\u003e" in body
def test_swagger_ui_parameters_html_chars_are_escaped():
html = get_swagger_ui_html(
openapi_url="/openapi.json",
title="Test",
swagger_ui_parameters={"customKey": "<img src=x onerror=alert(1)>"},
)
body = html.body.decode()
assert "<img src=x onerror=alert(1)>" not in body
assert "\\u003cimg" in body
def test_normal_init_oauth_still_works():
html = get_swagger_ui_html(
openapi_url="/openapi.json",
title="Test",
init_oauth={"clientId": "my-client", "appName": "My App"},
)
body = html.body.decode()
assert '"clientId": "my-client"' in body
assert '"appName": "My App"' in body
assert "ui.initOAuth" in body
| {
"repo_id": "fastapi/fastapi",
"file_path": "tests/test_swagger_ui_escape.py",
"license": "MIT License",
"lines": 30,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
fastapi/fastapi:docs_src/strict_content_type/tutorial001_py310.py | from fastapi import FastAPI
from pydantic import BaseModel
app = FastAPI(strict_content_type=False)
class Item(BaseModel):
name: str
price: float
@app.post("/items/")
async def create_item(item: Item):
return item
| {
"repo_id": "fastapi/fastapi",
"file_path": "docs_src/strict_content_type/tutorial001_py310.py",
"license": "MIT License",
"lines": 9,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.