sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
PrefectHQ/fastmcp:src/fastmcp/mcp_config.py | """Canonical MCP Configuration Format.
This module defines the standard configuration format for Model Context Protocol (MCP) servers.
It provides a client-agnostic, extensible format that can be used across all MCP implementations.
The configuration format supports both stdio and remote (HTTP/SSE) transports, with comprehensive
field definitions for server metadata, authentication, and execution parameters.
Example configuration:
```json
{
"mcpServers": {
"my-server": {
"command": "npx",
"args": ["-y", "@my/mcp-server"],
"env": {"API_KEY": "secret"},
"timeout": 30000,
"description": "My MCP server"
}
}
}
```
"""
from __future__ import annotations
import datetime
import re
from pathlib import Path
from typing import TYPE_CHECKING, Annotated, Any, Literal, cast
from urllib.parse import urlparse
import httpx
from pydantic import (
AnyUrl,
BaseModel,
ConfigDict,
Field,
model_validator,
)
from typing_extensions import Self, override
from fastmcp.tools.tool_transform import ToolTransformConfig
from fastmcp.utilities.types import FastMCPBaseModel
if TYPE_CHECKING:
from fastmcp.client.transports import (
ClientTransport,
SSETransport,
StdioTransport,
StreamableHttpTransport,
)
from fastmcp.server.server import FastMCP
def infer_transport_type_from_url(
url: str | AnyUrl,
) -> Literal["http", "sse"]:
"""
Infer the appropriate transport type from the given URL.
"""
url = str(url)
if not url.startswith("http"):
raise ValueError(f"Invalid URL: {url}")
parsed_url = urlparse(url)
path = parsed_url.path
# Match /sse followed by /, ?, &, or end of string
if re.search(r"/sse(/|\?|&|$)", path):
return "sse"
else:
return "http"
class _TransformingMCPServerMixin(FastMCPBaseModel):
"""A mixin that enables wrapping an MCP Server with tool transforms."""
tools: dict[str, ToolTransformConfig] = Field(default_factory=dict)
"""The multi-tool transform to apply to the tools."""
include_tags: set[str] | None = Field(
default=None,
description="The tags to include in the proxy.",
)
exclude_tags: set[str] | None = Field(
default=None,
description="The tags to exclude in the proxy.",
)
@model_validator(mode="before")
@classmethod
def _require_at_least_one_transform_field(
cls, values: dict[str, Any]
) -> dict[str, Any]:
"""Reject if none of the transforming fields are set.
This ensures that plain server configs (without tools, include_tags,
or exclude_tags) fall through to the base server types during union
validation, avoiding unnecessary proxy wrapping.
"""
if isinstance(values, dict):
has_tools = bool(values.get("tools"))
has_include = values.get("include_tags") is not None
has_exclude = values.get("exclude_tags") is not None
if not (has_tools or has_include or has_exclude):
raise ValueError(
"At least one of 'tools', 'include_tags', or 'exclude_tags' is required"
)
return values
def _to_server_and_underlying_transport(
self,
server_name: str | None = None,
client_name: str | None = None,
) -> tuple[FastMCP[Any], ClientTransport]:
"""Turn the Transforming MCPServer into a FastMCP Server and also return the underlying transport."""
from fastmcp.client import Client
from fastmcp.client.transports import (
ClientTransport, # pyright: ignore[reportUnusedImport]
)
from fastmcp.server import create_proxy
transport: ClientTransport = super().to_transport() # pyright: ignore[reportUnknownMemberType, reportAttributeAccessIssue, reportUnknownVariableType] # ty: ignore[unresolved-attribute]
transport = cast(ClientTransport, transport)
client: Client[ClientTransport] = Client(transport=transport, name=client_name)
wrapped_mcp_server = create_proxy(
client,
name=server_name,
)
if self.include_tags is not None:
wrapped_mcp_server.enable(tags=self.include_tags, only=True)
if self.exclude_tags is not None:
wrapped_mcp_server.disable(tags=self.exclude_tags)
# Apply tool transforms if configured
if self.tools:
from fastmcp.server.transforms import ToolTransform
wrapped_mcp_server.add_transform(ToolTransform(self.tools))
return wrapped_mcp_server, transport
def to_transport(self) -> ClientTransport:
"""Get the transport for the transforming MCP server."""
from fastmcp.client.transports import FastMCPTransport
return FastMCPTransport(mcp=self._to_server_and_underlying_transport()[0])
class StdioMCPServer(BaseModel):
"""MCP server configuration for stdio transport.
This is the canonical configuration format for MCP servers using stdio transport.
"""
# Required fields
command: str
# Common optional fields
args: list[str] = Field(default_factory=list)
env: dict[str, Any] = Field(default_factory=dict)
# Transport specification
transport: Literal["stdio"] = "stdio"
type: Literal["stdio"] | None = None # Alternative transport field name
# Execution context
cwd: str | None = None # Working directory for command execution
timeout: int | None = None # Maximum response time in milliseconds
keep_alive: bool | None = (
None # Whether to keep the subprocess alive between connections
)
# Metadata
description: str | None = None # Human-readable server description
icon: str | None = None # Icon path or URL for UI display
# Authentication configuration
authentication: dict[str, Any] | None = None # Auth configuration object
model_config = ConfigDict(extra="allow") # Preserve unknown fields
def to_transport(self) -> StdioTransport:
from fastmcp.client.transports import StdioTransport
return StdioTransport(
command=self.command,
args=self.args,
env=self.env,
cwd=self.cwd,
keep_alive=self.keep_alive,
)
class TransformingStdioMCPServer(_TransformingMCPServerMixin, StdioMCPServer):
"""A Stdio server with tool transforms."""
class RemoteMCPServer(BaseModel):
"""MCP server configuration for HTTP/SSE transport.
This is the canonical configuration format for MCP servers using remote transports.
"""
# Required fields
url: str
# Transport configuration
transport: Literal["http", "streamable-http", "sse"] | None = None
headers: dict[str, str] = Field(default_factory=dict)
# Authentication
auth: Annotated[
str | Literal["oauth"] | httpx.Auth | None,
Field(
description='Either a string representing a Bearer token, the literal "oauth" to use OAuth authentication, or an httpx.Auth instance for custom authentication.',
),
] = None
# Timeout configuration
sse_read_timeout: datetime.timedelta | int | float | None = None
timeout: int | None = None # Maximum response time in milliseconds
# Metadata
description: str | None = None # Human-readable server description
icon: str | None = None # Icon path or URL for UI display
# Authentication configuration
authentication: dict[str, Any] | None = None # Auth configuration object
model_config = ConfigDict(
extra="allow", arbitrary_types_allowed=True
) # Preserve unknown fields
def to_transport(self) -> StreamableHttpTransport | SSETransport:
from fastmcp.client.transports import SSETransport, StreamableHttpTransport
if self.transport is None:
transport = infer_transport_type_from_url(self.url)
else:
transport = self.transport
if transport == "sse":
return SSETransport(
self.url,
headers=self.headers,
auth=self.auth,
sse_read_timeout=self.sse_read_timeout,
)
else:
# Both "http" and "streamable-http" map to StreamableHttpTransport
return StreamableHttpTransport(
self.url,
headers=self.headers,
auth=self.auth,
sse_read_timeout=self.sse_read_timeout,
)
class TransformingRemoteMCPServer(_TransformingMCPServerMixin, RemoteMCPServer):
"""A Remote server with tool transforms."""
TransformingMCPServerTypes = TransformingStdioMCPServer | TransformingRemoteMCPServer
CanonicalMCPServerTypes = StdioMCPServer | RemoteMCPServer
MCPServerTypes = TransformingMCPServerTypes | CanonicalMCPServerTypes
class MCPConfig(BaseModel):
"""A configuration object for MCP Servers that conforms to the canonical MCP configuration format
while adding additional fields for enabling FastMCP-specific features like tool transformations
and filtering by tags.
For an MCPConfig that is strictly canonical, see the `CanonicalMCPConfig` class.
"""
mcpServers: dict[str, MCPServerTypes] = Field(default_factory=dict)
model_config = ConfigDict(extra="allow") # Preserve unknown top-level fields
@model_validator(mode="before")
@classmethod
def wrap_servers_at_root(cls, values: dict[str, Any]) -> dict[str, Any]:
"""If there's no mcpServers key but there are server configs at root, wrap them."""
if "mcpServers" not in values:
# Check if any values look like server configs
has_servers = any(
isinstance(v, dict) and ("command" in v or "url" in v)
for v in values.values()
)
if has_servers:
# Move all server-like configs under mcpServers
return {"mcpServers": values}
return values
def add_server(self, name: str, server: MCPServerTypes) -> None:
"""Add or update a server in the configuration."""
self.mcpServers[name] = server
@classmethod
def from_dict(cls, config: dict[str, Any]) -> Self:
"""Parse MCP configuration from dictionary format."""
return cls.model_validate(config)
def to_dict(self) -> dict[str, Any]:
"""Convert MCPConfig to dictionary format, preserving all fields."""
return self.model_dump(exclude_none=True)
def write_to_file(self, file_path: Path) -> None:
"""Write configuration to JSON file."""
file_path.parent.mkdir(parents=True, exist_ok=True)
file_path.write_text(self.model_dump_json(indent=2))
@classmethod
def from_file(cls, file_path: Path) -> Self:
"""Load configuration from JSON file."""
if file_path.exists() and (content := file_path.read_text().strip()):
return cls.model_validate_json(content)
raise ValueError(f"No MCP servers defined in the config: {file_path}")
class CanonicalMCPConfig(MCPConfig):
"""Canonical MCP configuration format.
This defines the standard configuration format for Model Context Protocol servers.
The format is designed to be client-agnostic and extensible for future use cases.
"""
mcpServers: dict[str, CanonicalMCPServerTypes] = Field(default_factory=dict)
@override
def add_server(self, name: str, server: CanonicalMCPServerTypes) -> None:
"""Add or update a server in the configuration."""
self.mcpServers[name] = server
def update_config_file(
file_path: Path,
server_name: str,
server_config: CanonicalMCPServerTypes,
) -> None:
"""Update an MCP configuration file from a server object, preserving existing fields.
This is used for updating the mcpServer configurations of third-party tools so we do not
worry about transforming server objects here."""
config = MCPConfig.from_file(file_path)
# If updating an existing server, merge with existing configuration
# to preserve any unknown fields
if existing_server := config.mcpServers.get(server_name):
# Get the raw dict representation of both servers
existing_dict = existing_server.model_dump()
new_dict = server_config.model_dump(exclude_none=True)
# Merge, with new values taking precedence
merged_config = server_config.model_validate({**existing_dict, **new_dict})
config.add_server(server_name, merged_config)
else:
config.add_server(server_name, server_config)
config.write_to_file(file_path)
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "src/fastmcp/mcp_config.py",
"license": "Apache License 2.0",
"lines": 284,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/fastmcp:src/fastmcp/utilities/cli.py | from __future__ import annotations
import json
import os
from pathlib import Path
from typing import TYPE_CHECKING, Any
from pydantic import ValidationError
from rich.align import Align
from rich.console import Console, Group
from rich.panel import Panel
from rich.table import Table
from rich.text import Text
import fastmcp
from fastmcp.utilities.logging import get_logger
from fastmcp.utilities.mcp_server_config import MCPServerConfig
from fastmcp.utilities.mcp_server_config.v1.sources.filesystem import FileSystemSource
from fastmcp.utilities.types import get_cached_typeadapter
from fastmcp.utilities.version_check import check_for_newer_version
if TYPE_CHECKING:
from fastmcp import FastMCP
logger = get_logger("cli.config")
def is_already_in_uv_subprocess() -> bool:
"""Check if we're already running in a FastMCP uv subprocess."""
return bool(os.environ.get("FASTMCP_UV_SPAWNED"))
def load_and_merge_config(
server_spec: str | None,
**cli_overrides,
) -> tuple[MCPServerConfig, str]:
"""Load config from server_spec and apply CLI overrides.
This consolidates the config parsing logic that was duplicated across
run, inspect, and dev commands.
Args:
server_spec: Python file, config file, URL, or None to auto-detect
cli_overrides: CLI arguments that override config values
Returns:
Tuple of (MCPServerConfig, resolved_server_spec)
"""
config = None
config_path = None
# Auto-detect fastmcp.json if no server_spec provided
if server_spec is None:
config_path = Path("fastmcp.json")
if not config_path.exists():
found_config = MCPServerConfig.find_config()
if found_config:
config_path = found_config
else:
logger.error(
"No server specification provided and no fastmcp.json found in current directory.\n"
"Please specify a server file or create a fastmcp.json configuration."
)
raise FileNotFoundError("No server specification or fastmcp.json found")
resolved_spec = str(config_path)
logger.info(f"Using configuration from {config_path}")
else:
resolved_spec = server_spec
# Load config if server_spec is a .json file
if resolved_spec.endswith(".json"):
config_path = Path(resolved_spec)
if config_path.exists():
try:
with open(config_path) as f:
data = json.load(f)
# Check if it's an MCPConfig first (has canonical mcpServers key)
if "mcpServers" in data:
# MCPConfig - we don't process these here, just pass through
pass
else:
# Try to parse as MCPServerConfig
try:
adapter = get_cached_typeadapter(MCPServerConfig)
config = adapter.validate_python(data)
# Apply deployment settings
if config.deployment:
config.deployment.apply_runtime_settings(config_path)
except ValidationError:
# Not a valid MCPServerConfig, just pass through
pass
except (json.JSONDecodeError, FileNotFoundError):
# Not a valid JSON file, just pass through
pass
# If we don't have a config object yet, create one from filesystem source
if config is None:
source = FileSystemSource(path=resolved_spec)
config = MCPServerConfig(source=source)
# Convert to dict for immutable transformation
config_dict = config.model_dump()
# Apply CLI overrides to config's environment (always exists due to default_factory)
if python_override := cli_overrides.get("python"):
config_dict["environment"]["python"] = python_override
if packages_override := cli_overrides.get("with_packages"):
# Merge packages - CLI packages are added to config packages
existing = config_dict["environment"].get("dependencies") or []
config_dict["environment"]["dependencies"] = packages_override + existing
if requirements_override := cli_overrides.get("with_requirements"):
config_dict["environment"]["requirements"] = str(requirements_override)
if project_override := cli_overrides.get("project"):
config_dict["environment"]["project"] = str(project_override)
if editable_override := cli_overrides.get("editable"):
config_dict["environment"]["editable"] = editable_override
# Apply deployment CLI overrides (always exists due to default_factory)
if transport_override := cli_overrides.get("transport"):
config_dict["deployment"]["transport"] = transport_override
if host_override := cli_overrides.get("host"):
config_dict["deployment"]["host"] = host_override
if port_override := cli_overrides.get("port"):
config_dict["deployment"]["port"] = port_override
if path_override := cli_overrides.get("path"):
config_dict["deployment"]["path"] = path_override
if log_level_override := cli_overrides.get("log_level"):
config_dict["deployment"]["log_level"] = log_level_override
if server_args_override := cli_overrides.get("server_args"):
config_dict["deployment"]["args"] = server_args_override
# Create new config from modified dict
new_config = MCPServerConfig(**config_dict)
return new_config, resolved_spec
LOGO_ASCII_1 = r"""
_ __ ___ _____ __ __ _____________ ____ ____
_ __ ___ .'____/___ ______/ /_/ |/ / ____/ __ \ |___ \ / __ \
_ __ ___ / /_ / __ `/ ___/ __/ /|_/ / / / /_/ / ___/ / / / / /
_ __ ___ / __/ / /_/ (__ ) /_/ / / / /___/ ____/ / __/_/ /_/ /
_ __ ___ /_/ \____/____/\__/_/ /_/\____/_/ /_____(*)____/
""".lstrip("\n")
# This prints the below in a blue gradient
# βββ βββ βββ βββ βββββ βββ βββ
# ββ βββ βββ β β β β βββ βββ
LOGO_ASCII_2 = (
"\x1b[38;2;0;198;255m \x1b[38;2;0;195;255mβ\x1b[38;2;0;192;255mβ\x1b[38;2;0;189;255mβ\x1b[38;2;0;186;255m "
"\x1b[38;2;0;184;255mβ\x1b[38;2;0;181;255mβ\x1b[38;2;0;178;255mβ\x1b[38;2;0;175;255m "
"\x1b[38;2;0;172;255mβ\x1b[38;2;0;169;255mβ\x1b[38;2;0;166;255mβ\x1b[38;2;0;163;255m "
"\x1b[38;2;0;160;255mβ\x1b[38;2;0;157;255mβ\x1b[38;2;0;155;255mβ\x1b[38;2;0;152;255m "
"\x1b[38;2;0;149;255mβ\x1b[38;2;0;146;255mβ\x1b[38;2;0;143;255mβ\x1b[38;2;0;140;255mβ\x1b[38;2;0;137;255mβ\x1b[38;2;0;134;255m "
"\x1b[38;2;0;131;255mβ\x1b[38;2;0;128;255mβ\x1b[38;2;0;126;255mβ\x1b[38;2;0;123;255m "
"\x1b[38;2;0;120;255mβ\x1b[38;2;0;117;255mβ\x1b[38;2;0;114;255mβ\x1b[39m\n"
"\x1b[38;2;0;198;255m \x1b[38;2;0;195;255mβ\x1b[38;2;0;192;255mβ\x1b[38;2;0;189;255m \x1b[38;2;0;186;255m "
"\x1b[38;2;0;184;255mβ\x1b[38;2;0;181;255mβ\x1b[38;2;0;178;255mβ\x1b[38;2;0;175;255m "
"\x1b[38;2;0;172;255mβ\x1b[38;2;0;169;255mβ\x1b[38;2;0;166;255mβ\x1b[38;2;0;163;255m "
"\x1b[38;2;0;160;255m \x1b[38;2;0;157;255mβ\x1b[38;2;0;155;255m \x1b[38;2;0;152;255m "
"\x1b[38;2;0;149;255mβ\x1b[38;2;0;146;255m \x1b[38;2;0;143;255mβ\x1b[38;2;0;140;255m \x1b[38;2;0;137;255mβ\x1b[38;2;0;134;255m "
"\x1b[38;2;0;131;255mβ\x1b[38;2;0;128;255mβ\x1b[38;2;0;126;255mβ\x1b[38;2;0;123;255m "
"\x1b[38;2;0;120;255mβ\x1b[38;2;0;117;255mβ\x1b[38;2;0;114;255mβ\x1b[39m"
).strip()
# Prints the below in a blue gradient - stylized F
# ββββ
# βββ
# β
LOGO_ASCII_3 = (
" \x1b[38;2;0;170;255mβ\x1b[38;2;0;142;255mβ\x1b[38;2;0;114;255mβ\x1b[38;2;0;86;255mβ\x1b[39m\n"
" \x1b[38;2;0;170;255mβ\x1b[38;2;0;142;255mβ\x1b[38;2;0;114;255mβ\x1b[39m\n"
"\x1b[38;2;0;170;255mβ\x1b[39m\n"
"\x1b[0m"
)
# Prints the below in a blue gradient - block logo with slightly stylized F
# βββ βββ βββ βββ βββββ βββ βββ
# ββ βββ βββ β β β β βββ βββ
LOGO_ASCII_4 = (
"\x1b[38;2;0;198;255m \x1b[38;2;0;195;255mβ\x1b[38;2;0;192;255mβ\x1b[38;2;0;189;255mβ\x1b[38;2;0;186;255m \x1b[38;2;0;184;255mβ\x1b[38;2;0;181;255mβ\x1b[38;2;0;178;255mβ\x1b[38;2;0;175;255m "
"\x1b[38;2;0;172;255mβ\x1b[38;2;0;169;255mβ\x1b[38;2;0;166;255mβ\x1b[38;2;0;163;255m "
"\x1b[38;2;0;160;255mβ\x1b[38;2;0;157;255mβ\x1b[38;2;0;155;255mβ\x1b[38;2;0;152;255m "
"\x1b[38;2;0;149;255mβ\x1b[38;2;0;146;255mβ\x1b[38;2;0;143;255mβ\x1b[38;2;0;140;255mβ\x1b[38;2;0;137;255mβ\x1b[38;2;0;134;255m "
"\x1b[38;2;0;131;255mβ\x1b[38;2;0;128;255mβ\x1b[38;2;0;126;255mβ\x1b[38;2;0;123;255m "
"\x1b[38;2;0;120;255mβ\x1b[38;2;0;117;255mβ\x1b[38;2;0;114;255mβ\x1b[39m\n"
"\x1b[38;2;0;198;255m \x1b[38;2;0;195;255mβ\x1b[38;2;0;192;255mβ\x1b[38;2;0;189;255m \x1b[38;2;0;186;255m \x1b[38;2;0;184;255mβ\x1b[38;2;0;181;255mβ\x1b[38;2;0;178;255mβ\x1b[38;2;0;175;255m "
"\x1b[38;2;0;172;255mβ\x1b[38;2;0;169;255mβ\x1b[38;2;0;166;255mβ\x1b[38;2;0;163;255m "
"\x1b[38;2;0;160;255m \x1b[38;2;0;157;255mβ\x1b[38;2;0;155;255m \x1b[38;2;0;152;255m "
"\x1b[38;2;0;149;255mβ\x1b[38;2;0;146;255m \x1b[38;2;0;143;255mβ\x1b[38;2;0;140;255m \x1b[38;2;0;137;255mβ\x1b[38;2;0;134;255m "
"\x1b[38;2;0;131;255mβ\x1b[38;2;0;128;255mβ\x1b[38;2;0;126;255mβ\x1b[38;2;0;123;255m "
"\x1b[38;2;0;120;255mβ\x1b[38;2;0;117;255mβ\x1b[38;2;0;114;255mβ\x1b[39m\n"
)
def log_server_banner(server: FastMCP[Any]) -> None:
"""Creates and logs a formatted banner with server information and logo."""
# Check for updates (non-blocking, fails silently)
newer_version = check_for_newer_version()
# Create the logo text
# Use Text with no_wrap and markup disabled to preserve ANSI escape codes
logo_text = Text.from_ansi(LOGO_ASCII_4, no_wrap=True)
# Create the main title
title_text = Text(f"FastMCP {fastmcp.__version__}", style="bold blue")
# Create the information table
info_table = Table.grid(padding=(0, 1))
info_table.add_column(style="bold", justify="center") # Emoji column
info_table.add_column(style="cyan", justify="left") # Label column
info_table.add_column(style="dim", justify="left") # Value column
server_info = server.name
if server.version:
server_info += f", {server.version}"
info_table.add_row("π₯", "Server:", Text(server_info, style="dim"))
info_table.add_row("π", "Deploy free:", "https://fastmcp.cloud")
# Create panel with logo, title, and information using Group
docs_url = Text("https://gofastmcp.com", style="dim")
panel_content = Group(
"",
Align.center(logo_text),
"",
"",
Align.center(title_text),
Align.center(docs_url),
"",
Align.center(info_table),
)
panel = Panel(
panel_content,
border_style="dim",
padding=(1, 4),
# expand=False,
width=80, # Set max width for the panel
)
console = Console(stderr=True)
# Build output elements
output_elements: list[Align | Panel | str] = ["\n", Align.center(panel)]
# Add update notice if a newer version is available (shown last for visibility)
if newer_version:
update_line1 = Text.assemble(
("π Update available: ", "bold"),
(newer_version, "bold green"),
)
update_line2 = Text("Run: pip install --upgrade fastmcp", style="dim")
update_notice = Panel(
Group(Align.center(update_line1), Align.center(update_line2)),
border_style="blue",
padding=(0, 2),
width=80,
)
output_elements.append(Align.center(update_notice))
output_elements.append("\n")
console.print(Group(*output_elements))
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "src/fastmcp/utilities/cli.py",
"license": "Apache License 2.0",
"lines": 225,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/fastmcp:src/fastmcp/client/elicitation.py | from __future__ import annotations
from collections.abc import Awaitable, Callable
from typing import Any, Generic, TypeAlias
import mcp.types
from mcp import ClientSession
from mcp.client.session import ElicitationFnT
from mcp.shared.context import LifespanContextT, RequestContext
from mcp.types import ElicitRequestFormParams, ElicitRequestParams
from mcp.types import ElicitResult as MCPElicitResult
from pydantic_core import to_jsonable_python
from typing_extensions import TypeVar
from fastmcp.utilities.json_schema_type import json_schema_to_type
__all__ = ["ElicitRequestParams", "ElicitResult", "ElicitationHandler"]
T = TypeVar("T", default=Any)
class ElicitResult(MCPElicitResult, Generic[T]):
content: T | None = None
ElicitationHandler: TypeAlias = Callable[
[
str, # message
type[T]
| None, # a class for creating a structured response (None for URL elicitation)
ElicitRequestParams,
RequestContext[ClientSession, LifespanContextT],
],
Awaitable[T | dict[str, Any] | ElicitResult[T | dict[str, Any]]],
]
def create_elicitation_callback(
elicitation_handler: ElicitationHandler,
) -> ElicitationFnT:
async def _elicitation_handler(
context: RequestContext[ClientSession, LifespanContextT],
params: ElicitRequestParams,
) -> MCPElicitResult | mcp.types.ErrorData:
try:
# requestedSchema only exists on ElicitRequestFormParams, not ElicitRequestURLParams
if isinstance(params, ElicitRequestFormParams):
if params.requestedSchema == {"type": "object", "properties": {}}:
response_type = None
else:
response_type = json_schema_to_type(params.requestedSchema)
else:
# URL-based elicitation doesn't have a schema
response_type = None
result = await elicitation_handler(
params.message, response_type, params, context
)
# if the user returns data, we assume they've accepted the elicitation
if not isinstance(result, ElicitResult):
result = ElicitResult(action="accept", content=result)
content = to_jsonable_python(result.content)
if not isinstance(content, dict | None):
raise ValueError(
"Elicitation responses must be serializable as a JSON object (dict). Received: "
f"{result.content!r}"
)
return MCPElicitResult(
_meta=result.meta, # type: ignore[call-arg] # _meta is Pydantic alias for meta field
action=result.action,
content=content,
)
except Exception as e:
return mcp.types.ErrorData(
code=mcp.types.INTERNAL_ERROR,
message=str(e),
)
return _elicitation_handler
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "src/fastmcp/client/elicitation.py",
"license": "Apache License 2.0",
"lines": 66,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
PrefectHQ/fastmcp:src/fastmcp/contrib/component_manager/component_manager.py | """
HTTP routes for enabling/disabling components in FastMCP.
Provides REST endpoints for controlling component enabled state with optional
authentication scopes.
"""
from mcp.server.auth.middleware.bearer_auth import RequireAuthMiddleware
from starlette.applications import Starlette
from starlette.requests import Request
from starlette.responses import JSONResponse
from starlette.routing import Mount, Route
from fastmcp.server.server import FastMCP
def set_up_component_manager(
server: FastMCP, path: str = "/", required_scopes: list[str] | None = None
) -> None:
"""Set up HTTP routes for enabling/disabling tools, resources, and prompts.
Args:
server: The FastMCP server instance.
path: Base path for component management routes.
required_scopes: Optional list of scopes required for these routes.
Applies only if authentication is enabled.
Routes created:
POST /tools/{name}/enable[?version=v1]
POST /tools/{name}/disable[?version=v1]
POST /resources/{uri}/enable[?version=v1]
POST /resources/{uri}/disable[?version=v1]
POST /prompts/{name}/enable[?version=v1]
POST /prompts/{name}/disable[?version=v1]
"""
if required_scopes is None:
# No auth - include path prefix in routes
routes = _build_routes(server, path)
server._additional_http_routes.extend(routes)
else:
# With auth - Mount handles path prefix, routes shouldn't have it
routes = _build_routes(server, "/")
mount = Mount(
path if path != "/" else "",
app=RequireAuthMiddleware(Starlette(routes=routes), required_scopes),
)
server._additional_http_routes.append(mount)
def _build_routes(server: FastMCP, base_path: str) -> list[Route]:
"""Build all component management routes."""
prefix = base_path.rstrip("/") if base_path != "/" else ""
return [
# Tools
Route(
f"{prefix}/tools/{{name}}/enable",
endpoint=_make_endpoint(server, "tool", "enable"),
methods=["POST"],
),
Route(
f"{prefix}/tools/{{name}}/disable",
endpoint=_make_endpoint(server, "tool", "disable"),
methods=["POST"],
),
# Resources
Route(
f"{prefix}/resources/{{uri:path}}/enable",
endpoint=_make_endpoint(server, "resource", "enable"),
methods=["POST"],
),
Route(
f"{prefix}/resources/{{uri:path}}/disable",
endpoint=_make_endpoint(server, "resource", "disable"),
methods=["POST"],
),
# Prompts
Route(
f"{prefix}/prompts/{{name}}/enable",
endpoint=_make_endpoint(server, "prompt", "enable"),
methods=["POST"],
),
Route(
f"{prefix}/prompts/{{name}}/disable",
endpoint=_make_endpoint(server, "prompt", "disable"),
methods=["POST"],
),
]
def _make_endpoint(server: FastMCP, component_type: str, action: str):
"""Create an endpoint function for enabling/disabling a component type."""
async def endpoint(request: Request) -> JSONResponse:
# Get name from path params (tools/prompts use 'name', resources use 'uri')
name = request.path_params.get("name") or request.path_params.get("uri")
version = request.query_params.get("version")
# Map component type to components list
# Note: "resource" in the route can refer to either a resource or template
# We need to check if it's a template (contains {}) and use "template" if so
if component_type == "resource" and name is not None and "{" in name:
components = ["template"]
elif component_type == "resource":
components = ["resource"]
else:
component_map = {
"tool": ["tool"],
"prompt": ["prompt"],
}
components = component_map[component_type]
# Call server.enable() or server.disable()
method = getattr(server, action)
method(names={name} if name else None, version=version, components=components)
return JSONResponse(
{"message": f"{action.capitalize()}d {component_type}: {name}"}
)
return endpoint
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "src/fastmcp/contrib/component_manager/component_manager.py",
"license": "Apache License 2.0",
"lines": 104,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/fastmcp:src/fastmcp/contrib/component_manager/example.py | from fastmcp import FastMCP
from fastmcp.contrib.component_manager import set_up_component_manager
from fastmcp.server.auth.providers.jwt import JWTVerifier, RSAKeyPair
key_pair = RSAKeyPair.generate()
auth = JWTVerifier(
public_key=key_pair.public_key,
issuer="https://dev.example.com",
audience="my-dev-server",
required_scopes=["mcp:read"],
)
# Build main server
mcp_token = key_pair.create_token(
subject="dev-user",
issuer="https://dev.example.com",
audience="my-dev-server",
scopes=["mcp:write", "mcp:read"],
)
mcp = FastMCP(
name="Component Manager",
instructions="This is a test server with component manager.",
auth=auth,
)
# Set up main server component manager
set_up_component_manager(server=mcp, required_scopes=["mcp:write"])
# Build mounted server
mounted_token = key_pair.create_token(
subject="dev-user",
issuer="https://dev.example.com",
audience="my-dev-server",
scopes=["mounted:write", "mcp:read"],
)
mounted = FastMCP(
name="Component Manager",
instructions="This is a test server with component manager.",
auth=auth,
)
# Set up mounted server component manager
set_up_component_manager(server=mounted, required_scopes=["mounted:write"])
# Mount
mcp.mount(server=mounted, namespace="mo")
@mcp.resource("resource://greeting")
def get_greeting() -> str:
"""Provides a simple greeting message."""
return "Hello from FastMCP Resources!"
@mounted.tool("greeting")
def get_info() -> str:
"""Provides a simple info."""
return "You are using component manager contrib module!"
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "src/fastmcp/contrib/component_manager/example.py",
"license": "Apache License 2.0",
"lines": 48,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
PrefectHQ/fastmcp:src/fastmcp/server/elicitation.py | from __future__ import annotations
from dataclasses import dataclass
from enum import Enum
from typing import Any, Generic, Literal, cast, get_origin
from mcp.server.elicitation import (
CancelledElicitation,
DeclinedElicitation,
)
from pydantic import BaseModel
from pydantic.json_schema import GenerateJsonSchema, JsonSchemaValue
from pydantic_core import core_schema
from typing_extensions import TypeVar
from fastmcp.utilities.json_schema import compress_schema
from fastmcp.utilities.logging import get_logger
from fastmcp.utilities.types import get_cached_typeadapter
__all__ = [
"AcceptedElicitation",
"CancelledElicitation",
"DeclinedElicitation",
"ElicitConfig",
"ScalarElicitationType",
"get_elicitation_schema",
"handle_elicit_accept",
"parse_elicit_response_type",
]
logger = get_logger(__name__)
T = TypeVar("T", default=Any)
class ElicitationJsonSchema(GenerateJsonSchema):
"""Custom JSON schema generator for MCP elicitation that always inlines enums.
MCP elicitation requires inline enum schemas without $ref/$defs references.
This generator ensures enums are always generated inline for compatibility.
Optionally adds enumNames for better UI display when available.
"""
def generate_inner(self, schema: core_schema.CoreSchema) -> JsonSchemaValue: # type: ignore[override]
"""Override to prevent ref generation for enums and handle list schemas."""
# For enum schemas, bypass the ref mechanism entirely
if schema["type"] == "enum":
# Directly call our custom enum_schema without going through handler
# This prevents the ref/defs mechanism from being invoked
return self.enum_schema(schema)
# For list schemas, check if items are enums
if schema["type"] == "list":
return self.list_schema(schema)
# For all other types, use the default implementation
return super().generate_inner(schema)
def list_schema(self, schema: core_schema.ListSchema) -> JsonSchemaValue:
"""Generate schema for list types, detecting enum items for multi-select."""
items_schema = schema.get("items_schema")
# Check if items are enum/Literal
if items_schema and items_schema.get("type") == "enum":
# Generate array with enum items
items = self.enum_schema(items_schema) # type: ignore[arg-type]
# If items have oneOf pattern, convert to anyOf for multi-select per SEP-1330
if "oneOf" in items:
items = {"anyOf": items["oneOf"]}
return {
"type": "array",
"items": items, # Will be {"enum": [...]} or {"anyOf": [...]}
}
# Check if items are Literal (which Pydantic represents differently)
if items_schema:
# Try to detect Literal patterns
items_result = super().generate_inner(items_schema)
# If it's a const pattern or enum-like, allow it
if (
"const" in items_result
or "enum" in items_result
or "oneOf" in items_result
):
# Convert oneOf to anyOf for multi-select
if "oneOf" in items_result:
items_result = {"anyOf": items_result["oneOf"]}
return {
"type": "array",
"items": items_result,
}
# Default behavior for non-enum arrays
return super().list_schema(schema)
def enum_schema(self, schema: core_schema.EnumSchema) -> JsonSchemaValue:
"""Generate inline enum schema.
Always generates enum pattern: `{"enum": [value, ...]}`
Titled enums are handled separately via dict-based syntax in ctx.elicit().
"""
# Get the base schema from parent - always use simple enum pattern
return super().enum_schema(schema)
# we can't use the low-level AcceptedElicitation because it only works with BaseModels
class AcceptedElicitation(BaseModel, Generic[T]):
"""Result when user accepts the elicitation."""
action: Literal["accept"] = "accept"
data: T
@dataclass
class ScalarElicitationType(Generic[T]):
value: T
@dataclass
class ElicitConfig:
"""Configuration for an elicitation request.
Attributes:
schema: The JSON schema to send to the client
response_type: The type to validate responses with (None for raw schemas)
is_raw: True if schema was built directly (extract "value" from response)
"""
schema: dict[str, Any]
response_type: type | None
is_raw: bool
def parse_elicit_response_type(response_type: Any) -> ElicitConfig:
"""Parse response_type into schema and handling configuration.
Supports multiple syntaxes:
- None: Empty object schema, expect empty response
- dict: `{"low": {"title": "..."}}` -> single-select titled enum
- list patterns:
- `[["a", "b"]]` -> multi-select untitled
- `[{"low": {...}}]` -> multi-select titled
- `["a", "b"]` -> single-select untitled
- `list[X]` type annotation: multi-select with type
- Scalar types (bool, int, float, str, Literal, Enum): single value
- Other types (dataclass, BaseModel): use directly
"""
if response_type is None:
return ElicitConfig(
schema={"type": "object", "properties": {}},
response_type=None,
is_raw=False,
)
if isinstance(response_type, dict):
return _parse_dict_syntax(response_type)
if isinstance(response_type, list):
return _parse_list_syntax(response_type)
if get_origin(response_type) is list:
return _parse_generic_list(response_type)
if _is_scalar_type(response_type):
return _parse_scalar_type(response_type)
# Other types (dataclass, BaseModel, etc.) - use directly
return ElicitConfig(
schema=get_elicitation_schema(response_type),
response_type=response_type,
is_raw=False,
)
def _is_scalar_type(response_type: Any) -> bool:
"""Check if response_type is a scalar type that needs wrapping."""
return (
response_type in {bool, int, float, str}
or get_origin(response_type) is Literal
or (isinstance(response_type, type) and issubclass(response_type, Enum))
)
def _parse_dict_syntax(d: dict[str, Any]) -> ElicitConfig:
"""Parse dict syntax: {"low": {"title": "..."}} -> single-select titled."""
if not d:
raise ValueError("Dict response_type cannot be empty.")
enum_schema = _dict_to_enum_schema(d, multi_select=False)
return ElicitConfig(
schema={
"type": "object",
"properties": {"value": enum_schema},
"required": ["value"],
},
response_type=None,
is_raw=True,
)
def _parse_list_syntax(lst: list[Any]) -> ElicitConfig:
"""Parse list patterns: [[...]], [{...}], or [...]."""
# [["a", "b", "c"]] -> multi-select untitled
if (
len(lst) == 1
and isinstance(lst[0], list)
and lst[0]
and all(isinstance(item, str) for item in lst[0])
):
return ElicitConfig(
schema={
"type": "object",
"properties": {"value": {"type": "array", "items": {"enum": lst[0]}}},
"required": ["value"],
},
response_type=None,
is_raw=True,
)
# [{"low": {"title": "..."}}] -> multi-select titled
if len(lst) == 1 and isinstance(lst[0], dict) and lst[0]:
enum_schema = _dict_to_enum_schema(lst[0], multi_select=True)
return ElicitConfig(
schema={
"type": "object",
"properties": {"value": {"type": "array", "items": enum_schema}},
"required": ["value"],
},
response_type=None,
is_raw=True,
)
# ["a", "b", "c"] -> single-select untitled
if lst and all(isinstance(item, str) for item in lst):
# Construct Literal type from tuple - use cast since we can't construct Literal dynamically
# but we know the values are all strings
choice_literal: type[Any] = cast(type[Any], Literal[tuple(lst)]) # type: ignore[valid-type]
wrapped = ScalarElicitationType[choice_literal] # type: ignore[valid-type]
return ElicitConfig(
schema=get_elicitation_schema(wrapped),
response_type=wrapped,
is_raw=False,
)
raise ValueError(f"Invalid list response_type format. Received: {lst}")
def _parse_generic_list(response_type: Any) -> ElicitConfig:
"""Parse list[X] type annotation -> multi-select."""
wrapped = ScalarElicitationType[response_type]
return ElicitConfig(
schema=get_elicitation_schema(wrapped),
response_type=wrapped,
is_raw=False,
)
def _parse_scalar_type(response_type: Any) -> ElicitConfig:
"""Parse scalar types (bool, int, float, str, Literal, Enum)."""
wrapped = ScalarElicitationType[response_type]
return ElicitConfig(
schema=get_elicitation_schema(wrapped),
response_type=wrapped,
is_raw=False,
)
def handle_elicit_accept(
config: ElicitConfig, content: Any
) -> AcceptedElicitation[Any]:
"""Handle an accepted elicitation response.
Args:
config: The elicitation configuration from parse_elicit_response_type
content: The response content from the client
Returns:
AcceptedElicitation with the extracted/validated data
"""
# For raw schemas (dict/nested-list syntax), extract value directly
if config.is_raw:
if not isinstance(content, dict) or "value" not in content:
raise ValueError("Elicitation response missing required 'value' field.")
return AcceptedElicitation[Any](data=content["value"])
# For typed schemas, validate with Pydantic
if config.response_type is not None:
type_adapter = get_cached_typeadapter(config.response_type)
validated_data = type_adapter.validate_python(content)
if isinstance(validated_data, ScalarElicitationType):
return AcceptedElicitation[Any](data=validated_data.value)
return AcceptedElicitation[Any](data=validated_data)
# For None response_type, expect empty response
if content:
raise ValueError(
f"Elicitation expected an empty response, but received: {content}"
)
return AcceptedElicitation[dict[str, Any]](data={})
def _dict_to_enum_schema(
enum_dict: dict[str, dict[str, str]], multi_select: bool = False
) -> dict[str, Any]:
"""Convert dict enum to SEP-1330 compliant schema pattern.
Args:
enum_dict: {"low": {"title": "Low Priority"}, "medium": {"title": "Medium Priority"}}
multi_select: If True, use anyOf pattern; if False, use oneOf pattern
Returns:
{"type": "string", "oneOf": [...]} for single-select
{"anyOf": [...]} for multi-select (used as array items)
"""
pattern_key = "anyOf" if multi_select else "oneOf"
pattern = []
for value, metadata in enum_dict.items():
title = metadata.get("title", value)
pattern.append({"const": value, "title": title})
result: dict[str, Any] = {pattern_key: pattern}
if not multi_select:
result["type"] = "string"
return result
def get_elicitation_schema(response_type: type[T]) -> dict[str, Any]:
"""Get the schema for an elicitation response.
Args:
response_type: The type of the response
"""
# Use custom schema generator that inlines enums for MCP compatibility
schema = get_cached_typeadapter(response_type).json_schema(
schema_generator=ElicitationJsonSchema
)
schema = compress_schema(schema)
# Validate the schema to ensure it follows MCP elicitation requirements
validate_elicitation_json_schema(schema)
return schema
def validate_elicitation_json_schema(schema: dict[str, Any]) -> None:
"""Validate that a JSON schema follows MCP elicitation requirements.
This ensures the schema is compatible with MCP elicitation requirements:
- Must be an object schema
- Must only contain primitive field types (string, number, integer, boolean)
- Must be flat (no nested objects or arrays of objects)
- Allows const fields (for Literal types) and enum fields (for Enum types)
- Only primitive types and their nullable variants are allowed
Args:
schema: The JSON schema to validate
Raises:
TypeError: If the schema doesn't meet MCP elicitation requirements
"""
ALLOWED_TYPES = {"string", "number", "integer", "boolean"}
# Check that the schema is an object
if schema.get("type") != "object":
raise TypeError(
f"Elicitation schema must be an object schema, got type '{schema.get('type')}'. "
"Elicitation schemas are limited to flat objects with primitive properties only."
)
properties = schema.get("properties", {})
for prop_name, prop_schema in properties.items():
prop_type = prop_schema.get("type")
# Handle nullable types
if isinstance(prop_type, list):
if "null" in prop_type:
prop_type = [t for t in prop_type if t != "null"]
if len(prop_type) == 1:
prop_type = prop_type[0]
elif prop_schema.get("nullable", False):
continue # Nullable with no other type is fine
# Handle const fields (Literal types)
if "const" in prop_schema:
continue # const fields are allowed regardless of type
# Handle enum fields (Enum types)
if "enum" in prop_schema:
continue # enum fields are allowed regardless of type
# Handle references to definitions (like Enum types)
if "$ref" in prop_schema:
# Get the referenced definition
ref_path = prop_schema["$ref"]
if ref_path.startswith("#/$defs/"):
def_name = ref_path[8:] # Remove "#/$defs/" prefix
ref_def = schema.get("$defs", {}).get(def_name, {})
# If the referenced definition has an enum, it's allowed
if "enum" in ref_def:
continue
# If the referenced definition has a type that's allowed, it's allowed
ref_type = ref_def.get("type")
if ref_type in ALLOWED_TYPES:
continue
# If we can't determine what the ref points to, reject it for safety
raise TypeError(
f"Elicitation schema field '{prop_name}' contains a reference '{ref_path}' "
"that could not be validated. Only references to enum types or primitive types are allowed."
)
# Handle union types (oneOf/anyOf)
if "oneOf" in prop_schema or "anyOf" in prop_schema:
union_schemas = prop_schema.get("oneOf", []) + prop_schema.get("anyOf", [])
for union_schema in union_schemas:
# Allow const and enum in unions
if "const" in union_schema or "enum" in union_schema:
continue
union_type = union_schema.get("type")
if union_type not in ALLOWED_TYPES:
raise TypeError(
f"Elicitation schema field '{prop_name}' has union type '{union_type}' which is not "
f"a primitive type. Only {ALLOWED_TYPES} are allowed in elicitation schemas."
)
continue
# Check for arrays before checking primitive types
if prop_type == "array":
items_schema = prop_schema.get("items", {})
if items_schema.get("type") == "object":
raise TypeError(
f"Elicitation schema field '{prop_name}' is an array of objects, but arrays of objects are not allowed. "
"Elicitation schemas must be flat objects with primitive properties only."
)
# Allow arrays with enum patterns (for multi-select)
if "enum" in items_schema:
continue # Allowed: {"type": "array", "items": {"enum": [...]}}
# Allow arrays with oneOf/anyOf const patterns (SEP-1330)
if "oneOf" in items_schema or "anyOf" in items_schema:
union_schemas = items_schema.get("oneOf", []) + items_schema.get(
"anyOf", []
)
if union_schemas and all("const" in s for s in union_schemas):
continue # Allowed: {"type": "array", "items": {"anyOf": [{"const": ...}, ...]}}
# Reject other array types (e.g., arrays of primitives without enum pattern)
raise TypeError(
f"Elicitation schema field '{prop_name}' is an array, but arrays are only allowed "
"when items are enums (for multi-select). Only enum arrays are supported in elicitation schemas."
)
# Check for nested objects (not allowed)
if prop_type == "object":
raise TypeError(
f"Elicitation schema field '{prop_name}' is an object, but nested objects are not allowed. "
"Elicitation schemas must be flat objects with primitive properties only."
)
# Check if it's a primitive type
if prop_type not in ALLOWED_TYPES:
raise TypeError(
f"Elicitation schema field '{prop_name}' has type '{prop_type}' which is not "
f"a primitive type. Only {ALLOWED_TYPES} are allowed in elicitation schemas."
)
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "src/fastmcp/server/elicitation.py",
"license": "Apache License 2.0",
"lines": 382,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/fastmcp:src/fastmcp/utilities/json_schema_type.py | """Convert JSON Schema to Python types with validation.
The json_schema_to_type function converts a JSON Schema into a Python type that can be used
for validation with Pydantic. It supports:
- Basic types (string, number, integer, boolean, null)
- Complex types (arrays, objects)
- Format constraints (date-time, email, uri)
- Numeric constraints (minimum, maximum, multipleOf)
- String constraints (minLength, maxLength, pattern)
- Array constraints (minItems, maxItems, uniqueItems)
- Object properties with defaults
- References and recursive schemas
- Enums and constants
- Union types
Example:
```python
schema = {
"type": "object",
"properties": {
"name": {"type": "string", "minLength": 1},
"age": {"type": "integer", "minimum": 0},
"email": {"type": "string", "format": "email"}
},
"required": ["name", "age"]
}
# Name is optional and will be inferred from schema's "title" property if not provided
Person = json_schema_to_type(schema)
# Creates a validated dataclass with name, age, and optional email fields
```
"""
from __future__ import annotations
import hashlib
import json
import re
from collections.abc import Callable, Mapping
from copy import deepcopy
from dataclasses import MISSING, field, make_dataclass
from datetime import datetime
from typing import (
Annotated,
Any,
ForwardRef,
Literal,
Union,
cast,
)
from pydantic import (
AnyUrl,
BaseModel,
ConfigDict,
EmailStr,
Field,
Json,
StringConstraints,
model_validator,
)
from typing_extensions import NotRequired, TypedDict
__all__ = ["JSONSchema", "json_schema_to_type"]
FORMAT_TYPES: dict[str, Any] = {
"date-time": datetime,
"email": EmailStr,
"uri": AnyUrl,
"json": Json,
}
_classes: dict[tuple[str, Any], type | None] = {}
class JSONSchema(TypedDict):
type: NotRequired[str | list[str]]
properties: NotRequired[dict[str, JSONSchema]]
required: NotRequired[list[str]]
additionalProperties: NotRequired[bool | JSONSchema]
items: NotRequired[JSONSchema | list[JSONSchema]]
enum: NotRequired[list[Any]]
const: NotRequired[Any]
default: NotRequired[Any]
description: NotRequired[str]
title: NotRequired[str]
examples: NotRequired[list[Any]]
format: NotRequired[str]
allOf: NotRequired[list[JSONSchema]]
anyOf: NotRequired[list[JSONSchema]]
oneOf: NotRequired[list[JSONSchema]]
not_: NotRequired[JSONSchema]
definitions: NotRequired[dict[str, JSONSchema]]
dependencies: NotRequired[dict[str, JSONSchema | list[str]]]
pattern: NotRequired[str]
minLength: NotRequired[int]
maxLength: NotRequired[int]
minimum: NotRequired[int | float]
maximum: NotRequired[int | float]
exclusiveMinimum: NotRequired[int | float]
exclusiveMaximum: NotRequired[int | float]
multipleOf: NotRequired[int | float]
uniqueItems: NotRequired[bool]
minItems: NotRequired[int]
maxItems: NotRequired[int]
additionalItems: NotRequired[bool | JSONSchema]
def json_schema_to_type(
schema: Mapping[str, Any],
name: str | None = None,
) -> type:
"""Convert JSON schema to appropriate Python type with validation.
Args:
schema: A JSON Schema dictionary defining the type structure and validation rules
name: Optional name for object schemas. Only allowed when schema type is "object".
If not provided for objects, name will be inferred from schema's "title"
property or default to "Root".
Returns:
A Python type (typically a dataclass for objects) with Pydantic validation
Raises:
ValueError: If a name is provided for a non-object schema
Examples:
Create a dataclass from an object schema:
```python
schema = {
"type": "object",
"title": "Person",
"properties": {
"name": {"type": "string", "minLength": 1},
"age": {"type": "integer", "minimum": 0},
"email": {"type": "string", "format": "email"}
},
"required": ["name", "age"]
}
Person = json_schema_to_type(schema)
# Creates a dataclass with name, age, and optional email fields:
# @dataclass
# class Person:
# name: str
# age: int
# email: str | None = None
```
Person(name="John", age=30)
Create a scalar type with constraints:
```python
schema = {
"type": "string",
"minLength": 3,
"pattern": "^[A-Z][a-z]+$"
}
NameType = json_schema_to_type(schema)
# Creates Annotated[str, StringConstraints(min_length=3, pattern="^[A-Z][a-z]+$")]
@dataclass
class Name:
name: NameType
```
"""
# Always use the top-level schema for references
if schema.get("type") == "object":
# If no properties defined but has additionalProperties, return typed dict
if not schema.get("properties") and schema.get("additionalProperties"):
additional_props = schema["additionalProperties"]
if additional_props is True:
return dict[str, Any]
else:
# Handle typed dictionaries like dict[str, str]
value_type = _schema_to_type(additional_props, schemas=schema)
# value_type might be ForwardRef or type - cast to Any for dynamic type construction
return cast(type[Any], dict[str, value_type]) # type: ignore[valid-type]
# If no properties and no additionalProperties, default to dict[str, Any] for safety
elif not schema.get("properties") and not schema.get("additionalProperties"):
return dict[str, Any]
# If has properties AND additionalProperties is True, use Pydantic BaseModel
elif schema.get("properties") and schema.get("additionalProperties") is True:
return _create_pydantic_model(schema, name, schemas=schema)
# Otherwise use fast dataclass
return _create_dataclass(schema, name, schemas=schema)
elif name:
raise ValueError(f"Can not apply name to non-object schema: {name}")
result = _schema_to_type(schema, schemas=schema)
return result # type: ignore[return-value]
def _hash_schema(schema: Mapping[str, Any]) -> str:
"""Generate a deterministic hash for schema caching."""
return hashlib.sha256(json.dumps(schema, sort_keys=True).encode()).hexdigest()
def _resolve_ref(ref: str, schemas: Mapping[str, Any]) -> Mapping[str, Any]:
"""Resolve JSON Schema reference to target schema."""
path = ref.replace("#/", "").split("/")
current = schemas
for part in path:
current = current.get(part, {})
return current
def _create_string_type(schema: Mapping[str, Any]) -> type | Annotated[Any, ...]:
"""Create string type with optional constraints."""
if "const" in schema:
return Literal[schema["const"]] # type: ignore
if fmt := schema.get("format"):
if fmt == "uri":
return AnyUrl
elif fmt == "uri-reference":
return str
return FORMAT_TYPES.get(fmt, str)
constraints = {
k: v
for k, v in {
"min_length": schema.get("minLength"),
"max_length": schema.get("maxLength"),
"pattern": schema.get("pattern"),
}.items()
if v is not None
}
return Annotated[str, StringConstraints(**constraints)] if constraints else str
def _create_numeric_type(
base: type[int | float], schema: Mapping[str, Any]
) -> type | Annotated[Any, ...]:
"""Create numeric type with optional constraints."""
if "const" in schema:
return Literal[schema["const"]] # type: ignore
constraints = {
k: v
for k, v in {
"gt": schema.get("exclusiveMinimum"),
"ge": schema.get("minimum"),
"lt": schema.get("exclusiveMaximum"),
"le": schema.get("maximum"),
"multiple_of": schema.get("multipleOf"),
}.items()
if v is not None
}
return Annotated[base, Field(**constraints)] if constraints else base # type: ignore[return-value]
def _create_enum(name: str, values: list[Any]) -> type:
"""Create enum type from list of values."""
# Always return Literal for enum fields to preserve the literal nature
return Literal[tuple(values)] # type: ignore[return-value]
def _create_array_type(
schema: Mapping[str, Any], schemas: Mapping[str, Any]
) -> type | Annotated[Any, ...]:
"""Create list/set type with optional constraints."""
items = schema.get("items", {})
if isinstance(items, list):
# Handle positional item schemas
item_types = [_schema_to_type(s, schemas) for s in items]
combined = Union[tuple(item_types)] # noqa: UP007
base = list[combined] # type: ignore[valid-type]
else:
# Handle single item schema
item_type = _schema_to_type(items, schemas)
base_class = set if schema.get("uniqueItems") else list
base = base_class[item_type]
constraints = {
k: v
for k, v in {
"min_length": schema.get("minItems"),
"max_length": schema.get("maxItems"),
}.items()
if v is not None
}
return Annotated[base, Field(**constraints)] if constraints else base # type: ignore[return-value]
def _return_Any() -> Any:
return Any
def _get_from_type_handler(
schema: Mapping[str, Any], schemas: Mapping[str, Any]
) -> Callable[..., Any]:
"""Get the appropriate type handler for the schema."""
type_handlers: dict[str, Callable[..., Any]] = { # TODO
"string": lambda s: _create_string_type(s),
"integer": lambda s: _create_numeric_type(int, s),
"number": lambda s: _create_numeric_type(float, s),
"boolean": lambda _: bool,
"null": lambda _: type(None),
"array": lambda s: _create_array_type(s, schemas),
"object": lambda s: (
_create_pydantic_model(s, s.get("title"), schemas)
if s.get("properties") and s.get("additionalProperties") is True
else _create_dataclass(s, s.get("title"), schemas)
),
}
return type_handlers.get(schema.get("type", None), _return_Any)
def _schema_to_type(
schema: Mapping[str, Any],
schemas: Mapping[str, Any],
) -> type | ForwardRef:
"""Convert schema to appropriate Python type."""
if not schema:
return object
if "type" not in schema and "properties" in schema:
return _create_dataclass(schema, schema.get("title", "<unknown>"), schemas)
# Handle references first
if "$ref" in schema:
ref = schema["$ref"]
# Handle self-reference
if ref == "#":
return ForwardRef(schema.get("title", "Root"))
return _schema_to_type(_resolve_ref(ref, schemas), schemas)
if "const" in schema:
return Literal[schema["const"]] # type: ignore
if "enum" in schema:
return _create_enum(f"Enum_{len(_classes)}", schema["enum"])
# Handle anyOf unions
if "anyOf" in schema:
types: list[type | Any] = []
for subschema in schema["anyOf"]:
# Special handling for dict-like objects in unions
if (
subschema.get("type") == "object"
and not subschema.get("properties")
and subschema.get("additionalProperties")
):
# This is a dict type, handle it directly
additional_props = subschema["additionalProperties"]
if additional_props is True:
types.append(dict[str, Any])
else:
value_type = _schema_to_type(additional_props, schemas)
types.append(dict[str, value_type]) # type: ignore
else:
types.append(_schema_to_type(subschema, schemas))
# Check if one of the types is None (null)
has_null = type(None) in types
types = [t for t in types if t is not type(None)]
if len(types) == 0:
return type(None)
elif len(types) == 1:
if has_null:
return types[0] | None # type: ignore
else:
return types[0]
else:
if has_null:
return Union[(*types, type(None))] # type: ignore
else:
return Union[tuple(types)] # type: ignore # noqa: UP007
schema_type = schema.get("type")
if not schema_type:
return Any
if isinstance(schema_type, list):
# Create a copy of the schema for each type, but keep all constraints
types: list[type | Any] = []
for t in schema_type:
type_schema = dict(schema)
type_schema["type"] = t
types.append(_schema_to_type(type_schema, schemas))
has_null = type(None) in types
types = [t for t in types if t is not type(None)]
if has_null:
if len(types) == 1:
return types[0] | None # type: ignore
else:
return Union[(*types, type(None))] # type: ignore
return Union[tuple(types)] # type: ignore # noqa: UP007
return _get_from_type_handler(schema, schemas)(schema)
def _sanitize_name(name: str) -> str:
"""Convert string to valid Python identifier."""
original_name = name
# Step 1: replace everything except [0-9a-zA-Z_] with underscores
cleaned = re.sub(r"[^0-9a-zA-Z_]", "_", name)
# Step 2: deduplicate underscores
cleaned = re.sub(r"__+", "_", cleaned)
# Step 3: if the first char of original name isn't a letter or underscore, prepend field_
if not name or not re.match(r"[a-zA-Z_]", name[0]):
cleaned = f"field_{cleaned}"
# Step 4: deduplicate again
cleaned = re.sub(r"__+", "_", cleaned)
# Step 5: only strip trailing underscores if they weren't in the original name
if not original_name.endswith("_"):
cleaned = cleaned.rstrip("_")
return cleaned
def _get_default_value(
schema: dict[str, Any],
prop_name: str,
parent_default: dict[str, Any] | None = None,
) -> Any:
"""Get default value with proper priority ordering.
1. Value from parent's default if it exists
2. Property's own default if it exists
3. None
"""
if parent_default is not None and prop_name in parent_default:
return parent_default[prop_name]
return schema.get("default")
def _create_field_with_default(
field_type: type,
default_value: Any,
schema: dict[str, Any],
) -> Any:
"""Create a field with simplified default handling."""
# Always use None as default for complex types
if isinstance(default_value, dict | list) or default_value is None:
return field(default=None)
# For simple types, use the value directly
return field(default=default_value)
def _create_pydantic_model(
schema: Mapping[str, Any],
name: str | None = None,
schemas: Mapping[str, Any] | None = None,
) -> type:
"""Create Pydantic BaseModel from object schema with additionalProperties."""
name = name or schema.get("title", "Root")
if name is None:
raise ValueError("Name is required")
sanitized_name = _sanitize_name(name)
schema_hash = _hash_schema(schema)
cache_key = (schema_hash, sanitized_name)
# Return existing class if already built
if cache_key in _classes:
existing = _classes[cache_key]
if existing is None:
return ForwardRef(sanitized_name) # type: ignore[return-value]
return existing
# Place placeholder for recursive references
_classes[cache_key] = None
properties = schema.get("properties", {})
required = schema.get("required", [])
# Build field annotations and defaults
annotations = {}
defaults = {}
for prop_name, prop_schema in properties.items():
field_type = _schema_to_type(prop_schema, schemas or {})
# Handle defaults
default_value = prop_schema.get("default", MISSING)
if default_value is not MISSING:
defaults[prop_name] = default_value
annotations[prop_name] = field_type
elif prop_name in required:
annotations[prop_name] = field_type
else:
annotations[prop_name] = Union[field_type, type(None)] # type: ignore[misc] # noqa: UP007
defaults[prop_name] = None
# Create Pydantic model class
cls_dict = {
"__annotations__": annotations,
"model_config": ConfigDict(extra="allow"),
**defaults,
}
cls = type(sanitized_name, (BaseModel,), cls_dict)
# Store completed class
_classes[cache_key] = cls
return cls
def _create_dataclass(
schema: Mapping[str, Any],
name: str | None = None,
schemas: Mapping[str, Any] | None = None,
) -> type:
"""Create dataclass from object schema."""
name = name or schema.get("title", "Root")
# Sanitize name for class creation
if name is None:
raise ValueError("Name is required")
sanitized_name = _sanitize_name(name)
schema_hash = _hash_schema(schema)
cache_key = (schema_hash, sanitized_name)
original_schema = dict(schema) # Store copy for validator
# Return existing class if already built
if cache_key in _classes:
existing = _classes[cache_key]
if existing is None:
return ForwardRef(sanitized_name) # type: ignore[return-value]
return existing
# Place placeholder for recursive references
_classes[cache_key] = None
if "$ref" in schema:
ref = schema["$ref"]
if ref == "#":
return ForwardRef(sanitized_name) # type: ignore[return-value]
schema = _resolve_ref(ref, schemas or {})
properties = schema.get("properties", {})
required = schema.get("required", [])
fields: list[tuple[Any, ...]] = []
for prop_name, prop_schema in properties.items():
field_name = _sanitize_name(prop_name)
# Check for self-reference in property
if prop_schema.get("$ref") == "#":
field_type = ForwardRef(sanitized_name)
else:
field_type = _schema_to_type(prop_schema, schemas or {})
default_val = prop_schema.get("default", MISSING)
is_required = prop_name in required
# Include alias in field metadata
meta = {"alias": prop_name}
if default_val is not MISSING:
if isinstance(default_val, dict | list):
field_def = field(
default_factory=lambda d=default_val: deepcopy(d), metadata=meta
)
else:
field_def = field(default=default_val, metadata=meta)
else:
if is_required:
field_def = field(metadata=meta)
else:
field_def = field(default=None, metadata=meta)
if is_required or default_val is not MISSING:
fields.append((field_name, field_type, field_def))
else:
fields.append((field_name, Union[field_type, type(None)], field_def)) # type: ignore[misc] # noqa: UP007
cls = make_dataclass(sanitized_name, fields, kw_only=True)
# Add model validator for defaults
@model_validator(mode="before")
@classmethod
def _apply_defaults(cls, data: Mapping[str, Any]):
if isinstance(data, dict):
return _merge_defaults(data, original_schema)
return data
cls._apply_defaults = _apply_defaults # type: ignore[attr-defined]
# Store completed class
_classes[cache_key] = cls
return cls
def _merge_defaults(
data: Mapping[str, Any],
schema: Mapping[str, Any],
parent_default: Mapping[str, Any] | None = None,
) -> dict[str, Any]:
"""Merge defaults with provided data at all levels."""
# If we have no data
if not data:
# Start with parent default if available
if parent_default:
result = dict(parent_default)
# Otherwise use schema default if available
elif "default" in schema:
result = dict(schema["default"])
# Otherwise start empty
else:
result = {}
# If we have data and a parent default, merge them
elif parent_default:
result = dict(parent_default)
for key, value in data.items():
if (
isinstance(value, dict)
and key in result
and isinstance(result[key], dict)
):
# recursively merge nested dicts
result[key] = _merge_defaults(value, {"properties": {}}, result[key])
else:
result[key] = value
# Otherwise just use the data
else:
result = dict(data)
# For each property in the schema
for prop_name, prop_schema in schema.get("properties", {}).items():
# If property is missing, apply defaults in priority order
if prop_name not in result:
if parent_default and prop_name in parent_default:
result[prop_name] = parent_default[prop_name]
elif "default" in prop_schema:
result[prop_name] = prop_schema["default"]
# If property exists and is an object, recursively merge
if (
prop_name in result
and isinstance(result[prop_name], dict)
and prop_schema.get("type") == "object"
):
# Get the appropriate default for this nested object
nested_default = None
if parent_default and prop_name in parent_default:
nested_default = parent_default[prop_name]
elif "default" in prop_schema:
nested_default = prop_schema["default"]
result[prop_name] = _merge_defaults(
result[prop_name], prop_schema, nested_default
)
return result
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "src/fastmcp/utilities/json_schema_type.py",
"license": "Apache License 2.0",
"lines": 551,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/fastmcp:tests/client/test_elicitation.py | from dataclasses import asdict, dataclass
from enum import Enum
from typing import Any, Literal, cast
import pytest
from mcp.types import ElicitRequestFormParams, ElicitRequestParams
from pydantic import BaseModel
from typing_extensions import TypedDict
from fastmcp import Context, FastMCP
from fastmcp.client.client import Client
from fastmcp.client.elicitation import ElicitResult
from fastmcp.exceptions import ToolError
from fastmcp.server.elicitation import (
AcceptedElicitation,
CancelledElicitation,
DeclinedElicitation,
validate_elicitation_json_schema,
)
from fastmcp.utilities.types import TypeAdapter
@pytest.fixture
def fastmcp_server():
mcp = FastMCP("TestServer")
@dataclass
class Person:
name: str
@mcp.tool
async def ask_for_name(context: Context) -> str:
result = await context.elicit(
message="What is your name?",
response_type=Person,
)
if result.action == "accept":
assert isinstance(result, AcceptedElicitation)
assert isinstance(result.data, Person)
return f"Hello, {result.data.name}!"
else:
return "No name provided."
@mcp.tool
def simple_test() -> str:
return "Hello!"
return mcp
async def test_elicitation_with_no_handler(fastmcp_server):
"""Test that elicitation works without a handler."""
async with Client(fastmcp_server) as client:
with pytest.raises(ToolError, match="Elicitation not supported"):
await client.call_tool("ask_for_name")
async def test_elicitation_accept_content(fastmcp_server):
"""Test basic elicitation functionality."""
async def elicitation_handler(message, response_type, params, ctx):
# Mock user providing their name
return ElicitResult(action="accept", content=response_type(name="Alice"))
async with Client(
fastmcp_server, elicitation_handler=elicitation_handler
) as client:
result = await client.call_tool("ask_for_name")
assert result.data == "Hello, Alice!"
async def test_elicitation_decline(fastmcp_server):
"""Test that elicitation handler receives correct parameters."""
async def elicitation_handler(message, response_type, params, ctx):
return ElicitResult(action="decline")
async with Client(
fastmcp_server, elicitation_handler=elicitation_handler
) as client:
result = await client.call_tool("ask_for_name")
assert result.data == "No name provided."
async def test_elicitation_handler_parameters():
"""Test that elicitation handler receives correct parameters."""
mcp = FastMCP("TestServer")
captured_params = {}
@mcp.tool
async def test_tool(context: Context) -> str:
await context.elicit(
message="Test message",
response_type=int,
)
return "done"
async def elicitation_handler(message, response_type, params, ctx):
captured_params["message"] = message
captured_params["response_type"] = str(response_type)
captured_params["params"] = params
captured_params["ctx"] = ctx
return ElicitResult(action="accept", content={"value": 42})
async with Client(mcp, elicitation_handler=elicitation_handler) as client:
await client.call_tool("test_tool", {})
assert captured_params["message"] == "Test message"
assert "ScalarElicitationType" in str(captured_params["response_type"])
assert captured_params["params"].requestedSchema == {
"properties": {"value": {"title": "Value", "type": "integer"}},
"required": ["value"],
"title": "ScalarElicitationType",
"type": "object",
}
assert captured_params["ctx"] is not None
async def test_elicitation_cancel_action():
"""Test user canceling elicitation request."""
mcp = FastMCP("TestServer")
@mcp.tool
async def ask_for_optional_info(context: Context) -> str:
result = await context.elicit(
message="Optional: What's your age?", response_type=int
)
if result.action == "cancel":
return "Request was canceled"
elif result.action == "accept":
assert isinstance(result, AcceptedElicitation)
assert isinstance(result.data, int)
return f"Age: {result.data}"
else:
return "No response provided"
async def elicitation_handler(message, response_type, params, ctx):
return ElicitResult(action="cancel")
async with Client(mcp, elicitation_handler=elicitation_handler) as client:
result = await client.call_tool("ask_for_optional_info", {})
assert result.data == "Request was canceled"
class TestScalarResponseTypes:
async def test_elicitation_no_response(self):
"""Test elicitation with no response type."""
mcp = FastMCP("TestServer")
@mcp.tool
async def my_tool(context: Context) -> dict[str, Any]:
result = await context.elicit(message="", response_type=None)
assert isinstance(result, AcceptedElicitation)
assert isinstance(result.data, dict)
return cast(dict[str, Any], result.data)
async def elicitation_handler(
message, response_type, params: ElicitRequestParams, ctx
):
assert isinstance(params, ElicitRequestFormParams)
assert params.requestedSchema == {"type": "object", "properties": {}}
assert response_type is None
return ElicitResult(action="accept")
async with Client(mcp, elicitation_handler=elicitation_handler) as client:
result = await client.call_tool("my_tool", {})
assert result.data is None
async def test_elicitation_empty_response(self):
"""Test elicitation with empty response type."""
mcp = FastMCP("TestServer")
@mcp.tool
async def my_tool(context: Context) -> dict[str, Any]:
result = await context.elicit(message="", response_type=None)
assert result.action == "accept"
assert isinstance(result, AcceptedElicitation)
accepted = cast(AcceptedElicitation[dict[str, Any]], result)
assert isinstance(accepted.data, dict)
return accepted.data
async def elicitation_handler(
message, response_type, params: ElicitRequestParams, ctx
):
return ElicitResult(action="accept", content={})
async with Client(mcp, elicitation_handler=elicitation_handler) as client:
result = await client.call_tool("my_tool", {})
assert result.data is None
async def test_elicitation_response_when_no_response_requested(self):
"""Test elicitation with no response type."""
mcp = FastMCP("TestServer")
@mcp.tool
async def my_tool(context: Context) -> dict[str, Any]:
result = await context.elicit(message="", response_type=None)
assert result.action == "accept"
assert isinstance(result, AcceptedElicitation)
accepted = cast(AcceptedElicitation[dict[str, Any]], result)
assert isinstance(accepted.data, dict)
return accepted.data
async def elicitation_handler(message, response_type, params, ctx):
return ElicitResult(action="accept", content={"value": "hello"})
async with Client(mcp, elicitation_handler=elicitation_handler) as client:
with pytest.raises(
ToolError, match="Elicitation expected an empty response"
):
await client.call_tool("my_tool", {})
async def test_elicitation_str_response(self):
"""Test elicitation with string schema."""
mcp = FastMCP("TestServer")
@mcp.tool
async def my_tool(context: Context) -> str:
result = await context.elicit(message="", response_type=str)
assert isinstance(result, AcceptedElicitation)
assert isinstance(result.data, str)
return result.data
async def elicitation_handler(message, response_type, params, ctx):
return ElicitResult(action="accept", content={"value": "hello"})
async with Client(mcp, elicitation_handler=elicitation_handler) as client:
result = await client.call_tool("my_tool", {})
assert result.data == "hello"
async def test_elicitation_int_response(self):
"""Test elicitation with number schema."""
mcp = FastMCP("TestServer")
@mcp.tool
async def my_tool(context: Context) -> int:
result = await context.elicit(message="", response_type=int)
assert isinstance(result, AcceptedElicitation)
assert isinstance(result.data, int)
return result.data
async def elicitation_handler(message, response_type, params, ctx):
return ElicitResult(action="accept", content={"value": 42})
async with Client(mcp, elicitation_handler=elicitation_handler) as client:
result = await client.call_tool("my_tool", {})
assert result.data == 42
async def test_elicitation_float_response(self):
"""Test elicitation with number schema."""
mcp = FastMCP("TestServer")
@mcp.tool
async def my_tool(context: Context) -> float:
result = await context.elicit(message="", response_type=float)
assert isinstance(result, AcceptedElicitation)
assert isinstance(result.data, float)
return result.data
async def elicitation_handler(message, response_type, params, ctx):
return ElicitResult(action="accept", content={"value": 3.14})
async with Client(mcp, elicitation_handler=elicitation_handler) as client:
result = await client.call_tool("my_tool", {})
assert result.data == 3.14
async def test_elicitation_bool_response(self):
"""Test elicitation with boolean schema."""
mcp = FastMCP("TestServer")
@mcp.tool
async def my_tool(context: Context) -> bool:
result = await context.elicit(message="", response_type=bool)
assert isinstance(result, AcceptedElicitation)
assert isinstance(result.data, bool)
return result.data
async def elicitation_handler(message, response_type, params, ctx):
return ElicitResult(action="accept", content={"value": True})
async with Client(mcp, elicitation_handler=elicitation_handler) as client:
result = await client.call_tool("my_tool", {})
assert result.data is True
async def test_elicitation_literal_response(self):
"""Test elicitation with literal schema."""
mcp = FastMCP("TestServer")
@mcp.tool
async def my_tool(context: Context) -> Literal["x", "y"]:
# Literal types work at runtime but type checker doesn't recognize them in overloads
result = await context.elicit(message="", response_type=Literal["x", "y"]) # type: ignore[arg-type]
assert isinstance(result, AcceptedElicitation)
accepted = cast(AcceptedElicitation[Literal["x", "y"]], result)
assert isinstance(accepted.data, str)
return accepted.data
async def elicitation_handler(message, response_type, params, ctx):
return ElicitResult(action="accept", content={"value": "x"})
async with Client(mcp, elicitation_handler=elicitation_handler) as client:
result = await client.call_tool("my_tool", {})
assert result.data == "x"
async def test_elicitation_enum_response(self):
"""Test elicitation with enum schema."""
mcp = FastMCP("TestServer")
class ResponseEnum(Enum):
X = "x"
Y = "y"
@mcp.tool
async def my_tool(context: Context) -> ResponseEnum:
result = await context.elicit(message="", response_type=ResponseEnum)
assert isinstance(result, AcceptedElicitation)
assert isinstance(result.data, ResponseEnum)
return result.data
async def elicitation_handler(message, response_type, params, ctx):
return ElicitResult(action="accept", content={"value": "x"})
async with Client(mcp, elicitation_handler=elicitation_handler) as client:
result = await client.call_tool("my_tool", {})
assert result.data == "x"
async def test_elicitation_list_of_strings_response(self):
"""Test elicitation with list schema."""
mcp = FastMCP("TestServer")
@mcp.tool
async def my_tool(context: Context) -> str:
result = await context.elicit(message="", response_type=["x", "y"])
assert isinstance(result, AcceptedElicitation)
assert isinstance(result.data, str)
return result.data
async def elicitation_handler(message, response_type, params, ctx):
return ElicitResult(action="accept", content={"value": "x"})
async with Client(mcp, elicitation_handler=elicitation_handler) as client:
result = await client.call_tool("my_tool", {})
assert result.data == "x"
async def test_elicitation_handler_error():
"""Test error handling in elicitation handler."""
mcp = FastMCP("TestServer")
@mcp.tool
async def failing_elicit(context: Context) -> str:
try:
result = await context.elicit(message="This will fail", response_type=str)
assert isinstance(result, AcceptedElicitation)
assert result.action == "accept"
return f"Got: {result.data}"
except Exception as e:
return f"Error: {str(e)}"
async def elicitation_handler(message, response_type, params, ctx):
raise ValueError("Handler failed!")
async with Client(mcp, elicitation_handler=elicitation_handler) as client:
result = await client.call_tool("failing_elicit", {})
assert "Error:" in result.data
async def test_elicitation_multiple_calls():
"""Test multiple elicitation calls in sequence."""
mcp = FastMCP("TestServer")
@mcp.tool
async def multi_step_form(context: Context) -> str:
# First question
name_result = await context.elicit(
message="What's your name?", response_type=str
)
assert isinstance(name_result, AcceptedElicitation)
if name_result.action != "accept":
return "Form abandoned"
# Second question
age_result = await context.elicit(message="What's your age?", response_type=int)
assert isinstance(age_result, AcceptedElicitation)
if age_result.action != "accept":
return f"Hello {name_result.data}, form incomplete"
return f"Hello {name_result.data}, you are {age_result.data} years old"
call_count = 0
async def elicitation_handler(message, response_type, params, ctx):
nonlocal call_count
call_count += 1
if call_count == 1:
return ElicitResult(action="accept", content={"value": "Bob"})
elif call_count == 2:
return ElicitResult(action="accept", content={"value": 25})
else:
raise ValueError("Unexpected call")
async with Client(mcp, elicitation_handler=elicitation_handler) as client:
result = await client.call_tool("multi_step_form", {})
assert result.data == "Hello Bob, you are 25 years old"
assert call_count == 2
@dataclass
class UserInfo:
name: str
age: int
class UserInfoTypedDict(TypedDict):
name: str
age: int
class UserInfoPydantic(BaseModel):
name: str
age: int
@pytest.mark.parametrize(
"structured_type", [UserInfo, UserInfoTypedDict, UserInfoPydantic]
)
async def test_structured_response_type(
structured_type: type[UserInfo | UserInfoTypedDict | UserInfoPydantic],
):
"""Test elicitation with dataclass response type."""
mcp = FastMCP("TestServer")
@mcp.tool
async def get_user_info(context: Context) -> str:
result = await context.elicit(
message="Please provide your information", response_type=structured_type
)
assert isinstance(result, AcceptedElicitation)
if result.action == "accept":
assert isinstance(result, AcceptedElicitation)
if isinstance(result.data, dict):
data_dict = cast(dict[str, Any], result.data)
name = data_dict.get("name")
age = data_dict.get("age")
assert name is not None
assert age is not None
return f"User: {name}, age: {age}"
else:
# result.data is a structured type (UserInfo, UserInfoTypedDict, or UserInfoPydantic)
assert hasattr(result.data, "name")
assert hasattr(result.data, "age")
return f"User: {result.data.name}, age: {result.data.age}"
return "No user info provided"
async def elicitation_handler(message, response_type, params, ctx):
# Verify we get the dataclass type
assert (
TypeAdapter(response_type).json_schema()
== TypeAdapter(structured_type).json_schema()
)
# Verify the schema has the dataclass fields (available in params)
schema = params.requestedSchema
assert schema["type"] == "object"
assert "name" in schema["properties"]
assert "age" in schema["properties"]
assert schema["properties"]["name"]["type"] == "string"
assert schema["properties"]["age"]["type"] == "integer"
return ElicitResult(action="accept", content=UserInfo(name="Alice", age=30))
async with Client(mcp, elicitation_handler=elicitation_handler) as client:
result = await client.call_tool("get_user_info", {})
assert result.data == "User: Alice, age: 30"
async def test_all_primitive_field_types():
class DataEnum(Enum):
X = "x"
Y = "y"
@dataclass
class Data:
integer: int
float_: float
number: int | float
boolean: bool
string: str
constant: Literal["x"]
union: Literal["x"] | Literal["y"]
choice: Literal["x", "y"]
enum: DataEnum
mcp = FastMCP("TestServer")
@mcp.tool
async def get_data(context: Context) -> Data:
result = await context.elicit(message="Enter data", response_type=Data)
assert isinstance(result, AcceptedElicitation)
assert isinstance(result.data, Data)
return result.data
async def elicitation_handler(message, response_type, params, ctx):
return ElicitResult(
action="accept",
content=Data(
integer=1,
float_=1.0,
number=1.0,
boolean=True,
string="hello",
constant="x",
union="x",
choice="x",
enum=DataEnum.X,
),
)
async with Client(mcp, elicitation_handler=elicitation_handler) as client:
result = await client.call_tool("get_data", {})
# Now all literal/enum fields should be preserved as strings
result_data = asdict(result.data)
result_data_enum = result_data.pop("enum")
assert result_data_enum == "x" # Should be a string now, not an enum
assert result_data == {
"integer": 1,
"float_": 1.0,
"number": 1.0,
"boolean": True,
"string": "hello",
"constant": "x",
"union": "x",
"choice": "x",
}
class TestValidation:
async def test_schema_validation_rejects_non_object(self):
"""Test that non-object schemas are rejected."""
with pytest.raises(TypeError, match="must be an object schema"):
validate_elicitation_json_schema({"type": "string"})
async def test_schema_validation_rejects_nested_objects(self):
"""Test that nested object schemas are rejected."""
with pytest.raises(
TypeError, match="is an object, but nested objects are not allowed"
):
validate_elicitation_json_schema(
{
"type": "object",
"properties": {
"user": {
"type": "object",
"properties": {"name": {"type": "string"}},
}
},
}
)
async def test_schema_validation_rejects_arrays(self):
"""Test that non-enum array schemas are rejected."""
with pytest.raises(TypeError, match="is an array, but arrays are only allowed"):
validate_elicitation_json_schema(
{
"type": "object",
"properties": {
"users": {"type": "array", "items": {"type": "string"}}
},
}
)
class TestPatternMatching:
async def test_pattern_matching_accept(self):
"""Test pattern matching with AcceptedElicitation."""
mcp = FastMCP("TestServer")
@mcp.tool
async def pattern_match_tool(context: Context) -> str:
result = await context.elicit("Enter your name:", response_type=str)
match result:
case AcceptedElicitation(data=name):
return f"Hello {name}!"
case DeclinedElicitation():
return "You declined"
case CancelledElicitation():
return "Cancelled"
case _:
return "Unknown result"
async def elicitation_handler(message, response_type, params, ctx):
return ElicitResult(action="accept", content={"value": "Alice"})
async with Client(mcp, elicitation_handler=elicitation_handler) as client:
result = await client.call_tool("pattern_match_tool", {})
assert result.data == "Hello Alice!"
async def test_pattern_matching_decline(self):
"""Test pattern matching with DeclinedElicitation."""
mcp = FastMCP("TestServer")
@mcp.tool
async def pattern_match_tool(context: Context) -> str:
result = await context.elicit("Enter your name:", response_type=str)
match result:
case AcceptedElicitation(data=name):
return f"Hello {name}!"
case DeclinedElicitation():
return "You declined"
case CancelledElicitation():
return "Cancelled"
case _:
return "Unknown result"
async def elicitation_handler(message, response_type, params, ctx):
return ElicitResult(action="decline")
async with Client(mcp, elicitation_handler=elicitation_handler) as client:
result = await client.call_tool("pattern_match_tool", {})
assert result.data == "You declined"
async def test_pattern_matching_cancel(self):
"""Test pattern matching with CancelledElicitation."""
mcp = FastMCP("TestServer")
@mcp.tool
async def pattern_match_tool(context: Context) -> str:
result = await context.elicit("Enter your name:", response_type=str)
match result:
case AcceptedElicitation(data=name):
return f"Hello {name}!"
case DeclinedElicitation():
return "You declined"
case CancelledElicitation():
return "Cancelled"
case _:
return "Unknown result"
async def elicitation_handler(message, response_type, params, ctx):
return ElicitResult(action="cancel")
async with Client(mcp, elicitation_handler=elicitation_handler) as client:
result = await client.call_tool("pattern_match_tool", {})
assert result.data == "Cancelled"
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/client/test_elicitation.py",
"license": "Apache License 2.0",
"lines": 523,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:tests/contrib/test_component_manager.py | import pytest
from starlette import status
from starlette.testclient import TestClient
from fastmcp import FastMCP
from fastmcp.contrib.component_manager import set_up_component_manager
from fastmcp.server.auth.providers.jwt import JWTVerifier, RSAKeyPair
class TestComponentManagementRoutes:
"""Test the component management routes for tools, resources, and prompts."""
@pytest.fixture
def mcp(self):
"""Create a FastMCP server with test tools, resources, and prompts."""
mcp = FastMCP("TestServer")
set_up_component_manager(server=mcp)
# Add a test tool
@mcp.tool
def test_tool() -> str:
"""Test tool for tool management routes."""
return "test_tool_result"
# Add a test resource
@mcp.resource("data://test_resource")
def test_resource() -> str:
"""Test resource for tool management routes."""
return "test_resource_result"
# Add a test resource
@mcp.resource("data://test_resource/{id}")
def test_template(id: str) -> dict:
"""Test template for tool management routes."""
return {"id": id, "value": "data"}
# Add a test prompt
@mcp.prompt
def test_prompt() -> str:
"""Test prompt for tool management routes."""
return "test_prompt_result"
return mcp
@pytest.fixture
def client(self, mcp):
"""Create a test client for the FastMCP server."""
return TestClient(mcp.http_app())
async def test_enable_tool_route(self, client, mcp):
"""Test enabling a tool via the HTTP route."""
# First disable the tool
mcp.disable(names={"test_tool"}, components={"tool"})
tools = await mcp.list_tools()
assert not any(t.name == "test_tool" for t in tools)
# Enable the tool via the HTTP route
response = client.post("/tools/test_tool/enable")
assert response.status_code == status.HTTP_200_OK
assert response.json() == {"message": "Enabled tool: test_tool"}
# Verify the tool is enabled
tools = await mcp.list_tools()
assert any(t.name == "test_tool" for t in tools)
async def test_disable_tool_route(self, client, mcp):
"""Test disabling a tool via the HTTP route."""
# First ensure the tool is enabled
tools = await mcp.list_tools()
assert any(t.name == "test_tool" for t in tools)
# Disable the tool via the HTTP route
response = client.post("/tools/test_tool/disable")
assert response.status_code == status.HTTP_200_OK
assert response.json() == {"message": "Disabled tool: test_tool"}
# Verify the tool is disabled
tools = await mcp.list_tools()
assert not any(t.name == "test_tool" for t in tools)
async def test_enable_resource_route(self, client, mcp):
"""Test enabling a resource via the HTTP route."""
# First disable the resource (can use URI as name for resources)
mcp.disable(names={"data://test_resource"}, components={"resource"})
resources = await mcp.list_resources()
assert not any(str(r.uri) == "data://test_resource" for r in resources)
# Enable the resource via the HTTP route
response = client.post("/resources/data://test_resource/enable")
assert response.status_code == status.HTTP_200_OK
assert response.json() == {"message": "Enabled resource: data://test_resource"}
# Verify the resource is enabled
resources = await mcp.list_resources()
assert any(str(r.uri) == "data://test_resource" for r in resources)
async def test_disable_resource_route(self, client, mcp):
"""Test disabling a resource via the HTTP route."""
# First ensure the resource is enabled
resources = await mcp.list_resources()
assert any(str(r.uri) == "data://test_resource" for r in resources)
# Disable the resource via the HTTP route
response = client.post("/resources/data://test_resource/disable")
assert response.status_code == status.HTTP_200_OK
assert response.json() == {"message": "Disabled resource: data://test_resource"}
# Verify the resource is disabled
resources = await mcp.list_resources()
assert not any(str(r.uri) == "data://test_resource" for r in resources)
async def test_enable_template_route(self, client, mcp):
"""Test enabling a resource template via the HTTP route."""
key = "data://test_resource/{id}"
mcp.disable(names={"data://test_resource/{id}"}, components={"template"})
templates = await mcp.list_resource_templates()
assert not any(t.uri_template == key for t in templates)
response = client.post("/resources/data://test_resource/{id}/enable")
assert response.status_code == status.HTTP_200_OK
assert response.json() == {
"message": "Enabled resource: data://test_resource/{id}"
}
templates = await mcp.list_resource_templates()
assert any(t.uri_template == key for t in templates)
async def test_disable_template_route(self, client, mcp):
"""Test disabling a resource template via the HTTP route."""
key = "data://test_resource/{id}"
templates = await mcp.list_resource_templates()
assert any(t.uri_template == key for t in templates)
response = client.post("/resources/data://test_resource/{id}/disable")
assert response.status_code == status.HTTP_200_OK
assert response.json() == {
"message": "Disabled resource: data://test_resource/{id}"
}
templates = await mcp.list_resource_templates()
assert not any(t.uri_template == key for t in templates)
async def test_enable_prompt_route(self, client, mcp):
"""Test enabling a prompt via the HTTP route."""
# First disable the prompt
mcp.disable(names={"test_prompt"}, components={"prompt"})
prompts = await mcp.list_prompts()
assert not any(p.name == "test_prompt" for p in prompts)
# Enable the prompt via the HTTP route
response = client.post("/prompts/test_prompt/enable")
assert response.status_code == status.HTTP_200_OK
assert response.json() == {"message": "Enabled prompt: test_prompt"}
# Verify the prompt is enabled
prompts = await mcp.list_prompts()
assert any(p.name == "test_prompt" for p in prompts)
async def test_disable_prompt_route(self, client, mcp):
"""Test disabling a prompt via the HTTP route."""
# First ensure the prompt is enabled
prompts = await mcp.list_prompts()
assert any(p.name == "test_prompt" for p in prompts)
# Disable the prompt via the HTTP route
response = client.post("/prompts/test_prompt/disable")
assert response.status_code == status.HTTP_200_OK
assert response.json() == {"message": "Disabled prompt: test_prompt"}
# Verify the prompt is disabled
prompts = await mcp.list_prompts()
assert not any(p.name == "test_prompt" for p in prompts)
class TestAuthComponentManagementRoutes:
"""Test the component management routes with authentication for tools, resources, and prompts."""
def setup_method(self):
"""Set up test fixtures."""
# Generate a key pair and create an auth provider
key_pair = RSAKeyPair.generate()
self.auth = JWTVerifier(
public_key=key_pair.public_key,
issuer="https://dev.example.com",
audience="my-dev-server",
)
self.mcp = FastMCP("TestServerWithAuth", auth=self.auth)
set_up_component_manager(
server=self.mcp, required_scopes=["tool:write", "tool:read"]
)
self.token = key_pair.create_token(
subject="dev-user",
issuer="https://dev.example.com",
audience="my-dev-server",
scopes=["tool:write", "tool:read"],
)
self.token_without_scopes = key_pair.create_token(
subject="dev-user",
issuer="https://dev.example.com",
audience="my-dev-server",
scopes=["tool:read"],
)
# Add test components
@self.mcp.tool
def test_tool() -> str:
"""Test tool for auth testing."""
return "test_tool_result"
@self.mcp.resource("data://test_resource")
def test_resource() -> str:
"""Test resource for auth testing."""
return "test_resource_result"
@self.mcp.prompt
def test_prompt() -> str:
"""Test prompt for auth testing."""
return "test_prompt_result"
# Create test client
self.client = TestClient(self.mcp.http_app())
async def test_unauthorized_enable_tool(self):
"""Test that unauthenticated requests to enable a tool are rejected."""
self.mcp.disable(names={"test_tool"}, components={"tool"})
tools = await self.mcp.list_tools()
assert not any(t.name == "test_tool" for t in tools)
response = self.client.post("/tools/test_tool/enable")
assert response.status_code == 401
tools = await self.mcp.list_tools()
assert not any(t.name == "test_tool" for t in tools)
async def test_authorized_enable_tool(self):
"""Test that authenticated requests to enable a tool are allowed."""
self.mcp.disable(names={"test_tool"}, components={"tool"})
tools = await self.mcp.list_tools()
assert not any(t.name == "test_tool" for t in tools)
response = self.client.post(
"/tools/test_tool/enable", headers={"Authorization": "Bearer " + self.token}
)
assert response.status_code == 200
assert response.json() == {"message": "Enabled tool: test_tool"}
tools = await self.mcp.list_tools()
assert any(t.name == "test_tool" for t in tools)
async def test_unauthorized_disable_tool(self):
"""Test that unauthenticated requests to disable a tool are rejected."""
tools = await self.mcp.list_tools()
assert any(t.name == "test_tool" for t in tools)
response = self.client.post("/tools/test_tool/disable")
assert response.status_code == 401
tools = await self.mcp.list_tools()
assert any(t.name == "test_tool" for t in tools)
async def test_authorized_disable_tool(self):
"""Test that authenticated requests to disable a tool are allowed."""
tools = await self.mcp.list_tools()
assert any(t.name == "test_tool" for t in tools)
response = self.client.post(
"/tools/test_tool/disable",
headers={"Authorization": "Bearer " + self.token},
)
assert response.status_code == 200
assert response.json() == {"message": "Disabled tool: test_tool"}
tools = await self.mcp.list_tools()
assert not any(t.name == "test_tool" for t in tools)
async def test_forbidden_enable_tool(self):
"""Test that requests with insufficient scopes are rejected."""
self.mcp.disable(names={"test_tool"}, components={"tool"})
tools = await self.mcp.list_tools()
assert not any(t.name == "test_tool" for t in tools)
response = self.client.post(
"/tools/test_tool/enable",
headers={"Authorization": "Bearer " + self.token_without_scopes},
)
assert response.status_code == 403
tools = await self.mcp.list_tools()
assert not any(t.name == "test_tool" for t in tools)
async def test_authorized_enable_resource(self):
"""Test that authenticated requests to enable a resource are allowed."""
self.mcp.disable(names={"data://test_resource"}, components={"resource"})
resources = await self.mcp.list_resources()
assert not any(str(r.uri) == "data://test_resource" for r in resources)
response = self.client.post(
"/resources/data://test_resource/enable",
headers={"Authorization": "Bearer " + self.token},
)
assert response.status_code == 200
assert response.json() == {"message": "Enabled resource: data://test_resource"}
resources = await self.mcp.list_resources()
assert any(str(r.uri) == "data://test_resource" for r in resources)
async def test_unauthorized_disable_resource(self):
"""Test that unauthenticated requests to disable a resource are rejected."""
resources = await self.mcp.list_resources()
assert any(str(r.uri) == "data://test_resource" for r in resources)
response = self.client.post("/resources/data://test_resource/disable")
assert response.status_code == 401
resources = await self.mcp.list_resources()
assert any(str(r.uri) == "data://test_resource" for r in resources)
async def test_forbidden_enable_resource(self):
"""Test that requests with insufficient scopes are rejected."""
self.mcp.disable(names={"data://test_resource"}, components={"resource"})
resources = await self.mcp.list_resources()
assert not any(str(r.uri) == "data://test_resource" for r in resources)
response = self.client.post(
"/resources/data://test_resource/disable",
headers={"Authorization": "Bearer " + self.token_without_scopes},
)
assert response.status_code == 403
resources = await self.mcp.list_resources()
assert not any(str(r.uri) == "data://test_resource" for r in resources)
async def test_authorized_disable_resource(self):
"""Test that authenticated requests to disable a resource are allowed."""
resources = await self.mcp.list_resources()
assert any(str(r.uri) == "data://test_resource" for r in resources)
response = self.client.post(
"/resources/data://test_resource/disable",
headers={"Authorization": "Bearer " + self.token},
)
assert response.status_code == 200
assert response.json() == {"message": "Disabled resource: data://test_resource"}
resources = await self.mcp.list_resources()
assert not any(str(r.uri) == "data://test_resource" for r in resources)
async def test_unauthorized_enable_prompt(self):
"""Test that unauthenticated requests to enable a prompt are rejected."""
self.mcp.disable(names={"test_prompt"}, components={"prompt"})
prompts = await self.mcp.list_prompts()
assert not any(p.name == "test_prompt" for p in prompts)
response = self.client.post("/prompts/test_prompt/enable")
assert response.status_code == 401
prompts = await self.mcp.list_prompts()
assert not any(p.name == "test_prompt" for p in prompts)
async def test_authorized_enable_prompt(self):
"""Test that authenticated requests to enable a prompt are allowed."""
self.mcp.disable(names={"test_prompt"}, components={"prompt"})
prompts = await self.mcp.list_prompts()
assert not any(p.name == "test_prompt" for p in prompts)
response = self.client.post(
"/prompts/test_prompt/enable",
headers={"Authorization": "Bearer " + self.token},
)
assert response.status_code == 200
assert response.json() == {"message": "Enabled prompt: test_prompt"}
prompts = await self.mcp.list_prompts()
assert any(p.name == "test_prompt" for p in prompts)
async def test_unauthorized_disable_prompt(self):
"""Test that unauthenticated requests to disable a prompt are rejected."""
prompts = await self.mcp.list_prompts()
assert any(p.name == "test_prompt" for p in prompts)
response = self.client.post("/prompts/test_prompt/disable")
assert response.status_code == 401
prompts = await self.mcp.list_prompts()
assert any(p.name == "test_prompt" for p in prompts)
async def test_forbidden_disable_prompt(self):
"""Test that requests with insufficient scopes are rejected."""
prompts = await self.mcp.list_prompts()
assert any(p.name == "test_prompt" for p in prompts)
response = self.client.post(
"/prompts/test_prompt/disable",
headers={"Authorization": "Bearer " + self.token_without_scopes},
)
assert response.status_code == 403
prompts = await self.mcp.list_prompts()
assert any(p.name == "test_prompt" for p in prompts)
async def test_authorized_disable_prompt(self):
"""Test that authenticated requests to disable a prompt are allowed."""
prompts = await self.mcp.list_prompts()
assert any(p.name == "test_prompt" for p in prompts)
response = self.client.post(
"/prompts/test_prompt/disable",
headers={"Authorization": "Bearer " + self.token},
)
assert response.status_code == 200
assert response.json() == {"message": "Disabled prompt: test_prompt"}
prompts = await self.mcp.list_prompts()
assert not any(p.name == "test_prompt" for p in prompts)
class TestComponentManagerWithPath:
"""Test component manager routes when mounted at a custom path."""
@pytest.fixture
def mcp_with_path(self):
mcp = FastMCP("TestServerWithPath")
set_up_component_manager(server=mcp, path="/test")
@mcp.tool
def test_tool() -> str:
return "test_tool_result"
@mcp.resource("data://test_resource")
def test_resource() -> str:
return "test_resource_result"
@mcp.prompt
def test_prompt() -> str:
return "test_prompt_result"
return mcp
@pytest.fixture
def client_with_path(self, mcp_with_path):
return TestClient(mcp_with_path.http_app())
async def test_enable_tool_route_with_path(self, client_with_path, mcp_with_path):
mcp_with_path.disable(names={"test_tool"}, components={"tool"})
tools = await mcp_with_path.list_tools()
assert not any(t.name == "test_tool" for t in tools)
response = client_with_path.post("/test/tools/test_tool/enable")
assert response.status_code == status.HTTP_200_OK
assert response.json() == {"message": "Enabled tool: test_tool"}
tools = await mcp_with_path.list_tools()
assert any(t.name == "test_tool" for t in tools)
async def test_disable_resource_route_with_path(
self, client_with_path, mcp_with_path
):
resources = await mcp_with_path.list_resources()
assert any(str(r.uri) == "data://test_resource" for r in resources)
response = client_with_path.post("/test/resources/data://test_resource/disable")
assert response.status_code == status.HTTP_200_OK
assert response.json() == {"message": "Disabled resource: data://test_resource"}
resources = await mcp_with_path.list_resources()
assert not any(str(r.uri) == "data://test_resource" for r in resources)
async def test_enable_prompt_route_with_path(self, client_with_path, mcp_with_path):
mcp_with_path.disable(names={"test_prompt"}, components={"prompt"})
prompts = await mcp_with_path.list_prompts()
assert not any(p.name == "test_prompt" for p in prompts)
response = client_with_path.post("/test/prompts/test_prompt/enable")
assert response.status_code == status.HTTP_200_OK
assert response.json() == {"message": "Enabled prompt: test_prompt"}
prompts = await mcp_with_path.list_prompts()
assert any(p.name == "test_prompt" for p in prompts)
class TestComponentManagerWithPathAuth:
"""Test component manager routes with auth when mounted at a custom path."""
def setup_method(self):
# Generate a key pair and create an auth provider
key_pair = RSAKeyPair.generate()
self.auth = JWTVerifier(
public_key=key_pair.public_key,
issuer="https://dev.example.com",
audience="my-dev-server",
)
self.mcp = FastMCP("TestServerWithPathAuth", auth=self.auth)
set_up_component_manager(
server=self.mcp, path="/test", required_scopes=["tool:write", "tool:read"]
)
self.token = key_pair.create_token(
subject="dev-user",
issuer="https://dev.example.com",
audience="my-dev-server",
scopes=["tool:read", "tool:write"],
)
self.token_without_scopes = key_pair.create_token(
subject="dev-user",
issuer="https://dev.example.com",
audience="my-dev-server",
scopes=[],
)
@self.mcp.tool
def test_tool() -> str:
return "test_tool_result"
@self.mcp.resource("data://test_resource")
def test_resource() -> str:
return "test_resource_result"
@self.mcp.prompt
def test_prompt() -> str:
return "test_prompt_result"
self.client = TestClient(self.mcp.http_app())
async def test_unauthorized_enable_tool(self):
self.mcp.disable(names={"test_tool"}, components={"tool"})
tools = await self.mcp.list_tools()
assert not any(t.name == "test_tool" for t in tools)
response = self.client.post("/test/tools/test_tool/enable")
assert response.status_code == 401
tools = await self.mcp.list_tools()
assert not any(t.name == "test_tool" for t in tools)
async def test_forbidden_enable_tool(self):
self.mcp.disable(names={"test_tool"}, components={"tool"})
tools = await self.mcp.list_tools()
assert not any(t.name == "test_tool" for t in tools)
response = self.client.post(
"/test/tools/test_tool/enable",
headers={"Authorization": "Bearer " + self.token_without_scopes},
)
assert response.status_code == 403
tools = await self.mcp.list_tools()
assert not any(t.name == "test_tool" for t in tools)
async def test_authorized_enable_tool(self):
self.mcp.disable(names={"test_tool"}, components={"tool"})
tools = await self.mcp.list_tools()
assert not any(t.name == "test_tool" for t in tools)
response = self.client.post(
"/test/tools/test_tool/enable",
headers={"Authorization": "Bearer " + self.token},
)
assert response.status_code == 200
assert response.json() == {"message": "Enabled tool: test_tool"}
tools = await self.mcp.list_tools()
assert any(t.name == "test_tool" for t in tools)
async def test_unauthorized_disable_resource(self):
resources = await self.mcp.list_resources()
assert any(str(r.uri) == "data://test_resource" for r in resources)
response = self.client.post("/test/resources/data://test_resource/disable")
assert response.status_code == 401
resources = await self.mcp.list_resources()
assert any(str(r.uri) == "data://test_resource" for r in resources)
async def test_forbidden_disable_resource(self):
resources = await self.mcp.list_resources()
assert any(str(r.uri) == "data://test_resource" for r in resources)
response = self.client.post(
"/test/resources/data://test_resource/disable",
headers={"Authorization": "Bearer " + self.token_without_scopes},
)
assert response.status_code == 403
resources = await self.mcp.list_resources()
assert any(str(r.uri) == "data://test_resource" for r in resources)
async def test_authorized_disable_resource(self):
resources = await self.mcp.list_resources()
assert any(str(r.uri) == "data://test_resource" for r in resources)
response = self.client.post(
"/test/resources/data://test_resource/disable",
headers={"Authorization": "Bearer " + self.token},
)
assert response.status_code == 200
assert response.json() == {"message": "Disabled resource: data://test_resource"}
resources = await self.mcp.list_resources()
assert not any(str(r.uri) == "data://test_resource" for r in resources)
async def test_unauthorized_enable_prompt(self):
self.mcp.disable(names={"test_prompt"}, components={"prompt"})
prompts = await self.mcp.list_prompts()
assert not any(p.name == "test_prompt" for p in prompts)
response = self.client.post("/test/prompts/test_prompt/enable")
assert response.status_code == 401
prompts = await self.mcp.list_prompts()
assert not any(p.name == "test_prompt" for p in prompts)
async def test_forbidden_enable_prompt(self):
self.mcp.disable(names={"test_prompt"}, components={"prompt"})
prompts = await self.mcp.list_prompts()
assert not any(p.name == "test_prompt" for p in prompts)
response = self.client.post(
"/test/prompts/test_prompt/enable",
headers={"Authorization": "Bearer " + self.token_without_scopes},
)
assert response.status_code == 403
prompts = await self.mcp.list_prompts()
assert not any(p.name == "test_prompt" for p in prompts)
async def test_authorized_enable_prompt(self):
self.mcp.disable(names={"test_prompt"}, components={"prompt"})
prompts = await self.mcp.list_prompts()
assert not any(p.name == "test_prompt" for p in prompts)
response = self.client.post(
"/test/prompts/test_prompt/enable",
headers={"Authorization": "Bearer " + self.token},
)
assert response.status_code == 200
assert response.json() == {"message": "Enabled prompt: test_prompt"}
prompts = await self.mcp.list_prompts()
assert any(p.name == "test_prompt" for p in prompts)
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/contrib/test_component_manager.py",
"license": "Apache License 2.0",
"lines": 504,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:examples/atproto_mcp/demo.py | """Demo script showing all ATProto MCP server capabilities."""
import argparse
import asyncio
import json
from typing import cast
from atproto_mcp.server import atproto_mcp
from atproto_mcp.types import (
NotificationsResult,
PostResult,
ProfileInfo,
SearchResult,
TimelineResult,
)
from fastmcp import Client
async def main(enable_posting: bool = False):
print("π΅ ATProto MCP Server Demo\n")
async with Client(atproto_mcp) as client:
# 1. Check connection status (resource)
print("1. Checking connection status...")
result = await client.read_resource("atproto://profile/status")
status: ProfileInfo = (
json.loads(result[0].text) if result else cast(ProfileInfo, {})
)
if status.get("connected"):
print(f"β
Connected as: @{status['handle']}")
print(f" Followers: {status['followers']}")
print(f" Following: {status['following']}")
print(f" Posts: {status['posts']}")
else:
print(f"β Connection failed: {status.get('error')}")
return
# 2. Get timeline
print("\n2. Getting timeline...")
result = await client.read_resource("atproto://timeline")
timeline: TimelineResult = (
json.loads(result[0].text) if result else cast(TimelineResult, {})
)
if timeline.get("success") and timeline["posts"]:
print(f"β
Found {timeline['count']} posts")
post = timeline["posts"][0]
print(f" Latest by @{post['author']}: {post['text'][:80]}...")
save_uri = post["uri"] # Save for later interactions
else:
print("β No posts in timeline")
save_uri = None
# 3. Search for posts
print("\n3. Searching for posts about 'Bluesky'...")
result = await client.call_tool("search", {"query": "Bluesky", "limit": 5})
search: SearchResult = (
json.loads(result[0].text) if result else cast(SearchResult, {})
)
if search.get("success") and search["posts"]:
print(f"β
Found {search['count']} posts")
print(f" Sample: {search['posts'][0]['text'][:80]}...")
# 4. Get notifications
print("\n4. Checking notifications...")
result = await client.read_resource("atproto://notifications")
notifs: NotificationsResult = (
json.loads(result[0].text) if result else cast(NotificationsResult, {})
)
if notifs.get("success"):
print(f"β
You have {notifs['count']} notifications")
unread = sum(1 for n in notifs["notifications"] if not n["is_read"])
if unread:
print(f" ({unread} unread)")
# 5. Demo posting capabilities
if enable_posting:
print("\n5. Demonstrating posting capabilities...")
# a. Simple post
print("\n a) Creating a simple post...")
result = await client.call_tool(
"post",
{"text": "π§ͺ Testing the unified ATProto MCP post tool! #FastMCP"},
)
post_result: PostResult = json.loads(result[0].text) if result else {}
if post_result.get("success"):
print(" β
Posted successfully!")
simple_uri = post_result["uri"]
else:
print(f" β Failed: {post_result.get('error')}")
simple_uri = None
# b. Post with rich text (link and mention)
print("\n b) Creating a post with rich text...")
result = await client.call_tool(
"post",
{
"text": "Check out FastMCP and follow @alternatebuild.dev for updates!",
"links": [
{
"text": "FastMCP",
"url": "https://github.com/PrefectHQ/fastmcp",
}
],
"mentions": [
{
"handle": "alternatebuild.dev",
"display_text": "@alternatebuild.dev",
}
],
},
)
if json.loads(result[0].text).get("success"):
print(" β
Rich text post created!")
# c. Reply to a post
if save_uri:
print("\n c) Replying to a post...")
result = await client.call_tool(
"post", {"text": "Great post! π", "reply_to": save_uri}
)
if json.loads(result[0].text).get("success"):
print(" β
Reply posted!")
# d. Quote post
if simple_uri:
print("\n d) Creating a quote post...")
result = await client.call_tool(
"post",
{
"text": "Quoting my own test post for demo purposes π",
"quote": simple_uri,
},
)
if json.loads(result[0].text).get("success"):
print(" β
Quote post created!")
# e. Post with image
print("\n e) Creating a post with image...")
result = await client.call_tool(
"post",
{
"text": "Here's a test image post! πΈ",
"images": ["https://picsum.photos/400/300"],
"image_alts": ["Random test image"],
},
)
if json.loads(result[0].text).get("success"):
print(" β
Image post created!")
# f. Quote with image (advanced)
if simple_uri:
print("\n f) Creating a quote post with image...")
result = await client.call_tool(
"post",
{
"text": "Quote + image combo! π¨",
"quote": simple_uri,
"images": ["https://picsum.photos/300/200"],
"image_alts": ["Another test image"],
},
)
if json.loads(result[0].text).get("success"):
print(" β
Quote with image created!")
# g. Social actions
if save_uri:
print("\n g) Demonstrating social actions...")
# Like
result = await client.call_tool("like", {"uri": save_uri})
if json.loads(result[0].text).get("success"):
print(" β
Liked a post!")
# Repost
result = await client.call_tool("repost", {"uri": save_uri})
if json.loads(result[0].text).get("success"):
print(" β
Reposted!")
# Follow
result = await client.call_tool(
"follow", {"handle": "alternatebuild.dev"}
)
if json.loads(result[0].text).get("success"):
print(" β
Followed @alternatebuild.dev!")
# h. Thread creation (new!)
print("\n h) Creating a thread...")
result = await client.call_tool(
"create_thread",
{
"posts": [
{
"text": "Let me share some thoughts about the ATProto MCP server π§΅"
},
{
"text": "First, it makes posting from the terminal incredibly smooth"
},
{
"text": "The unified post API means one tool handles everything",
"links": [
{
"text": "everything",
"url": "https://github.com/PrefectHQ/fastmcp",
}
],
},
{
"text": "And now with create_thread, multi-post threads are trivial!"
},
]
},
)
if json.loads(result[0].text).get("success"):
thread_result = json.loads(result[0].text)
print(f" β
Thread created with {thread_result['post_count']} posts!")
else:
print("\n5. Posting capabilities (not enabled):")
print(" To test posting, run with --post flag")
print(" Example: python demo.py --post")
# 6. Show available capabilities
print("\n6. Available capabilities:")
print("\n Resources (read-only):")
print(" - atproto://profile/status")
print(" - atproto://timeline")
print(" - atproto://notifications")
print("\n Tools (actions):")
print(" - post: Unified posting with rich features")
print(" β’ Simple text posts")
print(" β’ Images (up to 4)")
print(" β’ Rich text (links, mentions)")
print(" β’ Replies and threads")
print(" β’ Quote posts")
print(" β’ Combinations (quote + image, reply + rich text, etc.)")
print(" - search: Search for posts")
print(" - create_thread: Post multi-part threads")
print(" - follow: Follow users")
print(" - like: Like posts")
print(" - repost: Share posts")
print("\n⨠Demo complete!")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="ATProto MCP Server Demo")
parser.add_argument(
"--post",
action="store_true",
help="Enable posting test messages to Bluesky",
)
args = parser.parse_args()
asyncio.run(main(enable_posting=args.post))
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "examples/atproto_mcp/demo.py",
"license": "Apache License 2.0",
"lines": 229,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/fastmcp:examples/atproto_mcp/src/atproto_mcp/__main__.py | from atproto_mcp.server import atproto_mcp
def main():
atproto_mcp.run()
if __name__ == "__main__":
main()
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "examples/atproto_mcp/src/atproto_mcp/__main__.py",
"license": "Apache License 2.0",
"lines": 5,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
PrefectHQ/fastmcp:examples/atproto_mcp/src/atproto_mcp/_atproto/_client.py | """ATProto client management."""
from atproto import Client
from atproto_mcp.settings import settings
_client: Client | None = None
def get_client() -> Client:
"""Get or create an authenticated ATProto client."""
global _client
if _client is None:
_client = Client()
_client.login(settings.atproto_handle, settings.atproto_password)
return _client
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "examples/atproto_mcp/src/atproto_mcp/_atproto/_client.py",
"license": "Apache License 2.0",
"lines": 11,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
PrefectHQ/fastmcp:examples/atproto_mcp/src/atproto_mcp/_atproto/_posts.py | """Unified posting functionality."""
import time
from datetime import datetime
from atproto import models
from atproto_mcp.types import (
PostResult,
RichTextLink,
RichTextMention,
ThreadPost,
ThreadResult,
)
from ._client import get_client
def create_post(
text: str,
images: list[str] | None = None,
image_alts: list[str] | None = None,
links: list[RichTextLink] | None = None,
mentions: list[RichTextMention] | None = None,
reply_to: str | None = None,
reply_root: str | None = None,
quote: str | None = None,
) -> PostResult:
"""Create a unified post with optional features.
Args:
text: Post text (max 300 chars)
images: URLs of images to attach (max 4)
image_alts: Alt text for images
links: Links to embed in rich text
mentions: User mentions to embed
reply_to: URI of post to reply to
reply_root: URI of thread root (defaults to reply_to)
quote: URI of post to quote
"""
try:
client = get_client()
facets = []
embed = None
reply_ref = None
# Always build facets to handle auto-detected URLs and explicit links/mentions
facets = _build_facets(text, links, mentions, client)
# Handle replies
if reply_to:
reply_ref = _build_reply_ref(reply_to, reply_root, client)
# Handle quotes and images
if quote and images:
# Quote with images - create record with media embed
embed = _build_quote_with_images_embed(quote, images, image_alts, client)
elif quote:
# Quote only
embed = _build_quote_embed(quote, client)
elif images:
# Images only - use send_images for proper handling
return _send_images(text, images, image_alts, facets, reply_ref, client)
# Send the post (always include facets if any were created)
post = client.send_post(
text=text,
facets=facets if facets else None,
embed=embed,
reply_to=reply_ref,
)
return PostResult(
success=True,
uri=post.uri,
cid=post.cid,
text=text,
created_at=datetime.now().isoformat(),
error=None,
)
except Exception as e:
return PostResult(
success=False,
uri=None,
cid=None,
text=None,
created_at=None,
error=str(e),
)
def _build_facets(
text: str,
links: list[RichTextLink] | None,
mentions: list[RichTextMention] | None,
client,
):
"""Build facets for rich text formatting, including auto-detected URLs."""
import re
facets = []
covered_ranges = []
# URL regex pattern for auto-detection
url_pattern = re.compile(
r"https?://(?:www\.)?[-a-zA-Z0-9@:%._\+~#=]{1,256}\.[a-zA-Z0-9()]{1,6}\b(?:[-a-zA-Z0-9()@:%_\+.~#?&/=]*)"
)
# Process explicit links first
if links:
for link in links:
start = text.find(link["text"])
if start == -1:
continue
end = start + len(link["text"])
# Track this range as covered
covered_ranges.append((start, end))
facets.append(
models.AppBskyRichtextFacet.Main(
features=[models.AppBskyRichtextFacet.Link(uri=link["url"])],
index=models.AppBskyRichtextFacet.ByteSlice(
byte_start=len(text[:start].encode("UTF-8")),
byte_end=len(text[:end].encode("UTF-8")),
),
)
)
# Auto-detect URLs that aren't already covered by explicit links
for match in url_pattern.finditer(text):
url = match.group()
start = match.start()
end = match.end()
# Check if this URL overlaps with any explicit link
overlaps = False
for covered_start, covered_end in covered_ranges:
if not (end <= covered_start or start >= covered_end):
overlaps = True
break
if not overlaps:
facets.append(
models.AppBskyRichtextFacet.Main(
features=[models.AppBskyRichtextFacet.Link(uri=url)],
index=models.AppBskyRichtextFacet.ByteSlice(
byte_start=len(text[:start].encode("UTF-8")),
byte_end=len(text[:end].encode("UTF-8")),
),
)
)
# Process mentions
if mentions:
for mention in mentions:
display_text = mention.get("display_text") or f"@{mention['handle']}"
start = text.find(display_text)
if start == -1:
continue
end = start + len(display_text)
# Resolve handle to DID
resolved = client.app.bsky.actor.search_actors(
params={"q": mention["handle"], "limit": 1}
)
if not resolved.actors:
continue
did = resolved.actors[0].did
facets.append(
models.AppBskyRichtextFacet.Main(
features=[models.AppBskyRichtextFacet.Mention(did=did)],
index=models.AppBskyRichtextFacet.ByteSlice(
byte_start=len(text[:start].encode("UTF-8")),
byte_end=len(text[:end].encode("UTF-8")),
),
)
)
return facets
def _build_reply_ref(reply_to: str, reply_root: str | None, client):
"""Build reply reference."""
# Get parent post to extract CID
parent_post = client.app.bsky.feed.get_posts(params={"uris": [reply_to]})
if not parent_post.posts:
raise ValueError("Parent post not found")
parent_cid = parent_post.posts[0].cid
parent_ref = models.ComAtprotoRepoStrongRef.Main(uri=reply_to, cid=parent_cid)
# If no root_uri provided, parent is the root
if reply_root is None:
root_ref = parent_ref
else:
# Get root post CID
root_post = client.app.bsky.feed.get_posts(params={"uris": [reply_root]})
if not root_post.posts:
raise ValueError("Root post not found")
root_cid = root_post.posts[0].cid
root_ref = models.ComAtprotoRepoStrongRef.Main(uri=reply_root, cid=root_cid)
return models.AppBskyFeedPost.ReplyRef(parent=parent_ref, root=root_ref)
def _build_quote_embed(quote_uri: str, client):
"""Build quote embed."""
# Get the post to quote
quoted_post = client.app.bsky.feed.get_posts(params={"uris": [quote_uri]})
if not quoted_post.posts:
raise ValueError("Quoted post not found")
# Create strong ref for the quoted post
quoted_cid = quoted_post.posts[0].cid
quoted_ref = models.ComAtprotoRepoStrongRef.Main(uri=quote_uri, cid=quoted_cid)
# Create the embed
return models.AppBskyEmbedRecord.Main(record=quoted_ref)
def _build_quote_with_images_embed(
quote_uri: str, image_urls: list[str], image_alts: list[str] | None, client
):
"""Build quote embed with images."""
import httpx
# Get the quoted post
quoted_post = client.app.bsky.feed.get_posts(params={"uris": [quote_uri]})
if not quoted_post.posts:
raise ValueError("Quoted post not found")
quoted_cid = quoted_post.posts[0].cid
quoted_ref = models.ComAtprotoRepoStrongRef.Main(uri=quote_uri, cid=quoted_cid)
# Download and upload images
images = []
alts = image_alts or [""] * len(image_urls)
for i, url in enumerate(image_urls[:4]):
response = httpx.get(url, follow_redirects=True)
response.raise_for_status()
# Upload to blob storage
upload = client.upload_blob(response.content)
images.append(
models.AppBskyEmbedImages.Image(
alt=alts[i] if i < len(alts) else "",
image=upload.blob,
)
)
# Create record with media embed
return models.AppBskyEmbedRecordWithMedia.Main(
record=models.AppBskyEmbedRecord.Main(record=quoted_ref),
media=models.AppBskyEmbedImages.Main(images=images),
)
def _send_images(
text: str,
image_urls: list[str],
image_alts: list[str] | None,
facets,
reply_ref,
client,
):
"""Send post with images using the client's send_images method."""
import httpx
# Ensure alt_texts has same length as images
if image_alts is None:
image_alts = [""] * len(image_urls)
elif len(image_alts) < len(image_urls):
image_alts.extend([""] * (len(image_urls) - len(image_alts)))
image_data = []
alts = []
for i, url in enumerate(image_urls[:4]): # Max 4 images
# Download image (follow redirects)
response = httpx.get(url, follow_redirects=True)
response.raise_for_status()
image_data.append(response.content)
alts.append(image_alts[i] if i < len(image_alts) else "")
# Send post with images
# Note: send_images doesn't support facets or reply_to directly
# So we need to use send_post with manual image upload if we have facets or replies
# Since we always create facets now (for URL auto-detection), we'll always use this path
if facets or reply_ref:
# Manual image upload
images = []
for i, data in enumerate(image_data):
upload = client.upload_blob(data)
images.append(
models.AppBskyEmbedImages.Image(
alt=alts[i],
image=upload.blob,
)
)
embed = models.AppBskyEmbedImages.Main(images=images)
post = client.send_post(
text=text,
facets=facets if facets else None,
embed=embed,
reply_to=reply_ref,
)
else:
# Use simple send_images
post = client.send_images(
text=text,
images=image_data,
image_alts=alts,
)
return PostResult(
success=True,
uri=post.uri,
cid=post.cid,
text=text,
created_at=datetime.now().isoformat(),
error=None,
)
def create_thread(posts: list[ThreadPost]) -> ThreadResult:
"""Create a thread of posts with automatic linking.
Args:
posts: List of posts to create as a thread. First post is the root.
"""
if not posts:
return ThreadResult(
success=False,
thread_uri=None,
post_uris=[],
post_count=0,
error="No posts provided",
)
try:
post_uris = []
root_uri = None
parent_uri = None
for i, post_data in enumerate(posts):
# First post is the root
if i == 0:
result = create_post(
text=post_data["text"],
images=post_data.get("images"),
image_alts=post_data.get("image_alts"),
links=post_data.get("links"),
mentions=post_data.get("mentions"),
quote=post_data.get("quote"),
)
if not result["success"]:
return ThreadResult(
success=False,
thread_uri=None,
post_uris=post_uris,
post_count=len(post_uris),
error=f"Failed to create root post: {result['error']}",
)
root_uri = result["uri"]
parent_uri = root_uri
post_uris.append(root_uri)
# Small delay to ensure post is indexed
time.sleep(0.5)
else:
# Subsequent posts reply to the previous one
result = create_post(
text=post_data["text"],
images=post_data.get("images"),
image_alts=post_data.get("image_alts"),
links=post_data.get("links"),
mentions=post_data.get("mentions"),
quote=post_data.get("quote"),
reply_to=parent_uri,
reply_root=root_uri,
)
if not result["success"]:
return ThreadResult(
success=False,
thread_uri=root_uri,
post_uris=post_uris,
post_count=len(post_uris),
error=f"Failed to create post {i + 1}: {result['error']}",
)
parent_uri = result["uri"]
post_uris.append(parent_uri)
# Small delay between posts
if i < len(posts) - 1:
time.sleep(0.5)
return ThreadResult(
success=True,
thread_uri=root_uri,
post_uris=post_uris,
post_count=len(post_uris),
error=None,
)
except Exception as e:
return ThreadResult(
success=False,
thread_uri=None,
post_uris=post_uris,
post_count=len(post_uris),
error=str(e),
)
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "examples/atproto_mcp/src/atproto_mcp/_atproto/_posts.py",
"license": "Apache License 2.0",
"lines": 356,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/fastmcp:examples/atproto_mcp/src/atproto_mcp/_atproto/_profile.py | """Profile-related operations."""
from atproto_mcp.types import ProfileInfo
from ._client import get_client
def get_profile_info() -> ProfileInfo:
"""Get profile information for the authenticated user."""
try:
client = get_client()
profile = client.get_profile(client.me.did)
return ProfileInfo(
connected=True,
handle=profile.handle,
display_name=profile.display_name,
did=client.me.did,
followers=profile.followers_count,
following=profile.follows_count,
posts=profile.posts_count,
error=None,
)
except Exception as e:
return ProfileInfo(
connected=False,
handle=None,
display_name=None,
did=None,
followers=None,
following=None,
posts=None,
error=str(e),
)
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "examples/atproto_mcp/src/atproto_mcp/_atproto/_profile.py",
"license": "Apache License 2.0",
"lines": 29,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
PrefectHQ/fastmcp:examples/atproto_mcp/src/atproto_mcp/_atproto/_read.py | """Read-only operations for timeline, search, and notifications."""
from atproto_mcp.types import (
Notification,
NotificationsResult,
Post,
SearchResult,
TimelineResult,
)
from ._client import get_client
def fetch_timeline(limit: int = 10) -> TimelineResult:
"""Fetch the authenticated user's timeline."""
try:
client = get_client()
timeline = client.get_timeline(limit=limit)
posts = []
for feed_view in timeline.feed:
post = feed_view.post
posts.append(
Post(
uri=post.uri,
cid=post.cid,
text=post.record.text if hasattr(post.record, "text") else "",
author=post.author.handle,
created_at=post.record.created_at,
likes=post.like_count or 0,
reposts=post.repost_count or 0,
replies=post.reply_count or 0,
)
)
return TimelineResult(
success=True,
posts=posts,
count=len(posts),
error=None,
)
except Exception as e:
return TimelineResult(
success=False,
posts=[],
count=0,
error=str(e),
)
def search_for_posts(query: str, limit: int = 10) -> SearchResult:
"""Search for posts containing specific text."""
try:
client = get_client()
search_results = client.app.bsky.feed.search_posts(
params={"q": query, "limit": limit}
)
posts = []
for post in search_results.posts:
posts.append(
Post(
uri=post.uri,
cid=post.cid,
text=post.record.text if hasattr(post.record, "text") else "",
author=post.author.handle,
created_at=post.record.created_at,
likes=post.like_count or 0,
reposts=post.repost_count or 0,
replies=post.reply_count or 0,
)
)
return SearchResult(
success=True,
query=query,
posts=posts,
count=len(posts),
error=None,
)
except Exception as e:
return SearchResult(
success=False,
query=query,
posts=[],
count=0,
error=str(e),
)
def fetch_notifications(limit: int = 10) -> NotificationsResult:
"""Fetch recent notifications."""
try:
client = get_client()
notifs = client.app.bsky.notification.list_notifications(
params={"limit": limit}
)
notifications = []
for notif in notifs.notifications:
notifications.append(
Notification(
uri=notif.uri,
cid=notif.cid,
author=notif.author.handle,
reason=notif.reason,
is_read=notif.is_read,
indexed_at=notif.indexed_at,
)
)
return NotificationsResult(
success=True,
notifications=notifications,
count=len(notifications),
error=None,
)
except Exception as e:
return NotificationsResult(
success=False,
notifications=[],
count=0,
error=str(e),
)
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "examples/atproto_mcp/src/atproto_mcp/_atproto/_read.py",
"license": "Apache License 2.0",
"lines": 110,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/fastmcp:examples/atproto_mcp/src/atproto_mcp/_atproto/_social.py | """Social actions like follow, like, and repost."""
from atproto_mcp.types import FollowResult, LikeResult, RepostResult
from ._client import get_client
def follow_user_by_handle(handle: str) -> FollowResult:
"""Follow a user by their handle."""
try:
client = get_client()
# Search for the user to get their DID
results = client.app.bsky.actor.search_actors(params={"q": handle, "limit": 1})
if not results.actors:
return FollowResult(
success=False,
did=None,
handle=None,
uri=None,
error=f"User @{handle} not found",
)
actor = results.actors[0]
# Create the follow
follow = client.follow(actor.did)
return FollowResult(
success=True,
did=actor.did,
handle=actor.handle,
uri=follow.uri,
error=None,
)
except Exception as e:
return FollowResult(
success=False,
did=None,
handle=None,
uri=None,
error=str(e),
)
def like_post_by_uri(uri: str) -> LikeResult:
"""Like a post by its AT URI."""
try:
client = get_client()
# Parse the URI to get the components
# URI format: at://did:plc:xxx/app.bsky.feed.post/yyy
parts = uri.replace("at://", "").split("/")
if len(parts) != 3 or parts[1] != "app.bsky.feed.post":
raise ValueError("Invalid post URI format")
# Get the post to retrieve its CID
post = client.app.bsky.feed.get_posts(params={"uris": [uri]})
if not post.posts:
raise ValueError("Post not found")
cid = post.posts[0].cid
# Now like the post with both URI and CID
like = client.like(uri, cid)
return LikeResult(
success=True,
liked_uri=uri,
like_uri=like.uri,
error=None,
)
except Exception as e:
return LikeResult(
success=False,
liked_uri=None,
like_uri=None,
error=str(e),
)
def repost_by_uri(uri: str) -> RepostResult:
"""Repost a post by its AT URI."""
try:
client = get_client()
# Parse the URI to get the components
# URI format: at://did:plc:xxx/app.bsky.feed.post/yyy
parts = uri.replace("at://", "").split("/")
if len(parts) != 3 or parts[1] != "app.bsky.feed.post":
raise ValueError("Invalid post URI format")
# Get the post to retrieve its CID
post = client.app.bsky.feed.get_posts(params={"uris": [uri]})
if not post.posts:
raise ValueError("Post not found")
cid = post.posts[0].cid
# Now repost with both URI and CID
repost = client.repost(uri, cid)
return RepostResult(
success=True,
reposted_uri=uri,
repost_uri=repost.uri,
error=None,
)
except Exception as e:
return RepostResult(
success=False,
reposted_uri=None,
repost_uri=None,
error=str(e),
)
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "examples/atproto_mcp/src/atproto_mcp/_atproto/_social.py",
"license": "Apache License 2.0",
"lines": 93,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/fastmcp:examples/atproto_mcp/src/atproto_mcp/server.py | """ATProto MCP Server - Public API exposing Bluesky tools and resources."""
from typing import Annotated
from pydantic import Field
from atproto_mcp import _atproto
from atproto_mcp.settings import settings
from atproto_mcp.types import (
FollowResult,
LikeResult,
NotificationsResult,
PostResult,
ProfileInfo,
RepostResult,
RichTextLink,
RichTextMention,
SearchResult,
ThreadPost,
ThreadResult,
TimelineResult,
)
from fastmcp import FastMCP
atproto_mcp = FastMCP("ATProto MCP Server")
# Resources - read-only operations
@atproto_mcp.resource("atproto://profile/status")
def atproto_status() -> ProfileInfo:
"""Check the status of the ATProto connection and current user profile."""
return _atproto.get_profile_info()
@atproto_mcp.resource("atproto://timeline")
def get_timeline() -> TimelineResult:
"""Get the authenticated user's timeline feed."""
return _atproto.fetch_timeline(settings.atproto_timeline_default_limit)
@atproto_mcp.resource("atproto://notifications")
def get_notifications() -> NotificationsResult:
"""Get recent notifications for the authenticated user."""
return _atproto.fetch_notifications(settings.atproto_notifications_default_limit)
# Tools - actions that modify state
@atproto_mcp.tool
def post(
text: Annotated[
str, Field(max_length=300, description="The text content of the post")
],
images: Annotated[
list[str] | None,
Field(max_length=4, description="URLs of images to attach (max 4)"),
] = None,
image_alts: Annotated[
list[str] | None, Field(description="Alt text for each image")
] = None,
links: Annotated[
list[RichTextLink] | None, Field(description="Links to embed in the text")
] = None,
mentions: Annotated[
list[RichTextMention] | None, Field(description="User mentions to embed")
] = None,
reply_to: Annotated[
str | None, Field(description="AT URI of post to reply to")
] = None,
reply_root: Annotated[
str | None, Field(description="AT URI of thread root (defaults to reply_to)")
] = None,
quote: Annotated[str | None, Field(description="AT URI of post to quote")] = None,
) -> PostResult:
"""Create a post with optional rich features like images, quotes, replies, and rich text.
Examples:
- Simple post: post("Hello world!")
- With image: post("Check this out!", images=["https://example.com/img.jpg"])
- Reply: post("I agree!", reply_to="at://did/app.bsky.feed.post/123")
- Quote: post("Great point!", quote="at://did/app.bsky.feed.post/456")
- Rich text: post("Check out example.com", links=[{"text": "example.com", "url": "https://example.com"}])
"""
return _atproto.create_post(
text, images, image_alts, links, mentions, reply_to, reply_root, quote
)
@atproto_mcp.tool
def follow(
handle: Annotated[
str,
Field(
description="The handle of the user to follow (e.g., 'user.bsky.social')"
),
],
) -> FollowResult:
"""Follow a user by their handle."""
return _atproto.follow_user_by_handle(handle)
@atproto_mcp.tool
def like(
uri: Annotated[str, Field(description="The AT URI of the post to like")],
) -> LikeResult:
"""Like a post by its AT URI."""
return _atproto.like_post_by_uri(uri)
@atproto_mcp.tool
def repost(
uri: Annotated[str, Field(description="The AT URI of the post to repost")],
) -> RepostResult:
"""Repost a post by its AT URI."""
return _atproto.repost_by_uri(uri)
@atproto_mcp.tool
def search(
query: Annotated[str, Field(description="Search query for posts")],
limit: Annotated[
int, Field(ge=1, le=100, description="Number of results to return")
] = settings.atproto_search_default_limit,
) -> SearchResult:
"""Search for posts containing specific text."""
return _atproto.search_for_posts(query, limit)
@atproto_mcp.tool
def create_thread(
posts: Annotated[
list[ThreadPost],
Field(
description="List of posts to create as a thread. Each post can have text, images, links, mentions, and quotes."
),
],
) -> ThreadResult:
"""Create a thread of posts with automatic linking.
The first post becomes the root of the thread, and each subsequent post
replies to the previous one, maintaining the thread structure.
Example:
create_thread([
{"text": "Starting a thread about Python π§΅"},
{"text": "Python is great for rapid development"},
{"text": "And the ecosystem is amazing!", "images": ["https://example.com/python.jpg"]}
])
"""
return _atproto.create_thread(posts)
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "examples/atproto_mcp/src/atproto_mcp/server.py",
"license": "Apache License 2.0",
"lines": 124,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
PrefectHQ/fastmcp:examples/atproto_mcp/src/atproto_mcp/settings.py | from pydantic import Field
from pydantic_settings import BaseSettings, SettingsConfigDict
class Settings(BaseSettings):
model_config = SettingsConfigDict(env_file=[".env"], extra="ignore")
atproto_handle: str = Field(default=...)
atproto_password: str = Field(default=...)
atproto_pds_url: str = Field(default="https://bsky.social")
atproto_notifications_default_limit: int = Field(default=10)
atproto_timeline_default_limit: int = Field(default=10)
atproto_search_default_limit: int = Field(default=10)
settings = Settings()
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "examples/atproto_mcp/src/atproto_mcp/settings.py",
"license": "Apache License 2.0",
"lines": 11,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
PrefectHQ/fastmcp:examples/atproto_mcp/src/atproto_mcp/types.py | """Type definitions for ATProto MCP server."""
from typing import TypedDict
class ProfileInfo(TypedDict):
"""Profile information response."""
connected: bool
handle: str | None
display_name: str | None
did: str | None
followers: int | None
following: int | None
posts: int | None
error: str | None
class PostResult(TypedDict):
"""Result of creating a post."""
success: bool
uri: str | None
cid: str | None
text: str | None
created_at: str | None
error: str | None
class Post(TypedDict):
"""A single post."""
author: str
text: str | None
created_at: str | None
likes: int
reposts: int
replies: int
uri: str
cid: str
class TimelineResult(TypedDict):
"""Timeline fetch result."""
success: bool
count: int
posts: list[Post]
error: str | None
class SearchResult(TypedDict):
"""Search result."""
success: bool
query: str
count: int
posts: list[Post]
error: str | None
class Notification(TypedDict):
"""A single notification."""
reason: str
author: str | None
is_read: bool
indexed_at: str
uri: str
cid: str
class NotificationsResult(TypedDict):
"""Notifications fetch result."""
success: bool
count: int
notifications: list[Notification]
error: str | None
class FollowResult(TypedDict):
"""Result of following a user."""
success: bool
handle: str | None
did: str | None
uri: str | None
error: str | None
class LikeResult(TypedDict):
"""Result of liking a post."""
success: bool
liked_uri: str | None
like_uri: str | None
error: str | None
class RepostResult(TypedDict):
"""Result of reposting."""
success: bool
reposted_uri: str | None
repost_uri: str | None
error: str | None
class RichTextLink(TypedDict):
"""A link in rich text."""
text: str
url: str
class RichTextMention(TypedDict):
"""A mention in rich text."""
handle: str
display_text: str | None
class ThreadPost(TypedDict, total=False):
"""A post in a thread."""
text: str # Required
images: list[str] | None
image_alts: list[str] | None
links: list[RichTextLink] | None
mentions: list[RichTextMention] | None
quote: str | None
class ThreadResult(TypedDict):
"""Result of creating a thread."""
success: bool
thread_uri: str | None # URI of the first post
post_uris: list[str]
post_count: int
error: str | None
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "examples/atproto_mcp/src/atproto_mcp/types.py",
"license": "Apache License 2.0",
"lines": 99,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
PrefectHQ/fastmcp:src/fastmcp/client/messages.py | from typing import TypeAlias
import mcp.types
from mcp.client.session import MessageHandlerFnT
from mcp.shared.session import RequestResponder
Message: TypeAlias = (
RequestResponder[mcp.types.ServerRequest, mcp.types.ClientResult]
| mcp.types.ServerNotification
| Exception
)
MessageHandlerT: TypeAlias = MessageHandlerFnT
class MessageHandler:
"""
This class is used to handle MCP messages sent to the client. It is used to handle all messages,
requests, notifications, and exceptions. Users can override any of the hooks
"""
async def __call__(
self,
message: RequestResponder[mcp.types.ServerRequest, mcp.types.ClientResult]
| mcp.types.ServerNotification
| Exception,
) -> None:
return await self.dispatch(message)
async def dispatch(self, message: Message) -> None:
# handle all messages
await self.on_message(message)
match message:
# requests
case RequestResponder():
# handle all requests
# TODO(ty): remove when ty supports match statement narrowing
await self.on_request(message) # type: ignore[arg-type]
# handle specific requests
# TODO(ty): remove type ignores when ty supports match statement narrowing
match message.request.root: # type: ignore[union-attr]
case mcp.types.PingRequest():
await self.on_ping(message.request.root) # type: ignore[union-attr]
case mcp.types.ListRootsRequest():
await self.on_list_roots(message.request.root) # type: ignore[union-attr]
case mcp.types.CreateMessageRequest():
await self.on_create_message(message.request.root) # type: ignore[union-attr]
# notifications
case mcp.types.ServerNotification():
# handle all notifications
await self.on_notification(message)
# handle specific notifications
match message.root:
case mcp.types.CancelledNotification():
await self.on_cancelled(message.root)
case mcp.types.ProgressNotification():
await self.on_progress(message.root)
case mcp.types.LoggingMessageNotification():
await self.on_logging_message(message.root)
case mcp.types.ToolListChangedNotification():
await self.on_tool_list_changed(message.root)
case mcp.types.ResourceListChangedNotification():
await self.on_resource_list_changed(message.root)
case mcp.types.PromptListChangedNotification():
await self.on_prompt_list_changed(message.root)
case mcp.types.ResourceUpdatedNotification():
await self.on_resource_updated(message.root)
case Exception():
await self.on_exception(message)
async def on_message(self, message: Message) -> None:
pass
async def on_request(
self, message: RequestResponder[mcp.types.ServerRequest, mcp.types.ClientResult]
) -> None:
pass
async def on_ping(self, message: mcp.types.PingRequest) -> None:
pass
async def on_list_roots(self, message: mcp.types.ListRootsRequest) -> None:
pass
async def on_create_message(self, message: mcp.types.CreateMessageRequest) -> None:
pass
async def on_notification(self, message: mcp.types.ServerNotification) -> None:
pass
async def on_exception(self, message: Exception) -> None:
pass
async def on_progress(self, message: mcp.types.ProgressNotification) -> None:
pass
async def on_logging_message(
self, message: mcp.types.LoggingMessageNotification
) -> None:
pass
async def on_tool_list_changed(
self, message: mcp.types.ToolListChangedNotification
) -> None:
pass
async def on_resource_list_changed(
self, message: mcp.types.ResourceListChangedNotification
) -> None:
pass
async def on_prompt_list_changed(
self, message: mcp.types.PromptListChangedNotification
) -> None:
pass
async def on_resource_updated(
self, message: mcp.types.ResourceUpdatedNotification
) -> None:
pass
async def on_cancelled(self, message: mcp.types.CancelledNotification) -> None:
pass
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "src/fastmcp/client/messages.py",
"license": "Apache License 2.0",
"lines": 102,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
PrefectHQ/fastmcp:src/fastmcp/server/low_level.py | from __future__ import annotations
import weakref
from collections.abc import Awaitable, Callable
from contextlib import AsyncExitStack
from typing import TYPE_CHECKING, Any
import anyio
import mcp.types
from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream
from mcp import McpError
from mcp.server.lowlevel.server import (
LifespanResultT,
NotificationOptions,
RequestT,
)
from mcp.server.lowlevel.server import (
Server as _Server,
)
from mcp.server.models import InitializationOptions
from mcp.server.session import ServerSession
from mcp.server.stdio import stdio_server as stdio_server
from mcp.shared.message import SessionMessage
from mcp.shared.session import RequestResponder
from pydantic import AnyUrl
from fastmcp.server.apps import UI_EXTENSION_ID
from fastmcp.utilities.logging import get_logger
if TYPE_CHECKING:
from fastmcp.server.server import FastMCP
logger = get_logger(__name__)
class MiddlewareServerSession(ServerSession):
"""ServerSession that routes initialization requests through FastMCP middleware."""
def __init__(self, fastmcp: FastMCP, *args, **kwargs):
super().__init__(*args, **kwargs)
self._fastmcp_ref: weakref.ref[FastMCP] = weakref.ref(fastmcp)
# Task group for subscription tasks (set during session run)
self._subscription_task_group: anyio.TaskGroup | None = None # type: ignore[valid-type]
@property
def fastmcp(self) -> FastMCP:
"""Get the FastMCP instance."""
fastmcp = self._fastmcp_ref()
if fastmcp is None:
raise RuntimeError("FastMCP instance is no longer available")
return fastmcp
def client_supports_extension(self, extension_id: str) -> bool:
"""Check if the connected client supports a given MCP extension.
Inspects the ``extensions`` extra field on ``ClientCapabilities``
sent by the client during initialization.
"""
client_params = self._client_params
if client_params is None:
return False
caps = client_params.capabilities
if caps is None:
return False
# ClientCapabilities uses extra="allow" β extensions is an extra field
extras = caps.model_extra or {}
extensions: dict[str, Any] | None = extras.get("extensions")
if not extensions:
return False
return extension_id in extensions
async def _received_request(
self,
responder: RequestResponder[mcp.types.ClientRequest, mcp.types.ServerResult],
):
"""
Override the _received_request method to route special requests
through FastMCP middleware.
Handles initialization requests and SEP-1686 task methods.
"""
import fastmcp.server.context
from fastmcp.server.middleware.middleware import MiddlewareContext
if isinstance(responder.request.root, mcp.types.InitializeRequest):
# The MCP SDK's ServerSession._received_request() handles the
# initialize request internally by calling responder.respond()
# to send the InitializeResult directly to the write stream, then
# returning None. This bypasses the middleware return path entirely,
# so middleware would only see the request, never the response.
#
# To expose the response to middleware (e.g., for logging server
# capabilities), we wrap responder.respond() to capture the
# InitializeResult before it's sent, then return it from
# call_original_handler so it flows back through the middleware chain.
captured_response: mcp.types.ServerResult | None = None
original_respond = responder.respond
async def capturing_respond(
response: mcp.types.ServerResult,
) -> None:
nonlocal captured_response
captured_response = response
return await original_respond(response)
responder.respond = capturing_respond # type: ignore[method-assign]
async def call_original_handler(
ctx: MiddlewareContext,
) -> mcp.types.InitializeResult | None:
await super(MiddlewareServerSession, self)._received_request(responder)
if captured_response is not None and isinstance(
captured_response.root, mcp.types.InitializeResult
):
return captured_response.root
return None
async with fastmcp.server.context.Context(
fastmcp=self.fastmcp, session=self
) as fastmcp_ctx:
# Create the middleware context.
mw_context = MiddlewareContext(
message=responder.request.root,
source="client",
type="request",
method="initialize",
fastmcp_context=fastmcp_ctx,
)
try:
return await self.fastmcp._run_middleware(
mw_context, call_original_handler
)
except McpError as e:
# McpError can be thrown from middleware in `on_initialize`
# send the error to responder.
if not responder._completed:
with responder:
await responder.respond(e.error)
else:
# Don't re-raise: prevents responding to initialize request twice
logger.warning(
"Received McpError but responder is already completed. "
"Cannot send error response as response was already sent.",
exc_info=e,
)
return None
# Fall through to default handling (task methods now handled via registered handlers)
return await super()._received_request(responder)
class LowLevelServer(_Server[LifespanResultT, RequestT]):
def __init__(self, fastmcp: FastMCP, *args: Any, **kwargs: Any):
super().__init__(*args, **kwargs)
# Store a weak reference to FastMCP to avoid circular references
self._fastmcp_ref: weakref.ref[FastMCP] = weakref.ref(fastmcp)
# FastMCP servers support notifications for all components
self.notification_options = NotificationOptions(
prompts_changed=True,
resources_changed=True,
tools_changed=True,
)
@property
def fastmcp(self) -> FastMCP:
"""Get the FastMCP instance."""
fastmcp = self._fastmcp_ref()
if fastmcp is None:
raise RuntimeError("FastMCP instance is no longer available")
return fastmcp
def create_initialization_options(
self,
notification_options: NotificationOptions | None = None,
experimental_capabilities: dict[str, dict[str, Any]] | None = None,
**kwargs: Any,
) -> InitializationOptions:
# ensure we use the FastMCP notification options
if notification_options is None:
notification_options = self.notification_options
return super().create_initialization_options(
notification_options=notification_options,
experimental_capabilities=experimental_capabilities,
**kwargs,
)
def get_capabilities(
self,
notification_options: NotificationOptions,
experimental_capabilities: dict[str, dict[str, Any]],
) -> mcp.types.ServerCapabilities:
"""Override to set capabilities.tasks as a first-class field per SEP-1686.
This ensures task capabilities appear in capabilities.tasks instead of
capabilities.experimental.tasks, which is required by the MCP spec and
enables proper task detection by clients like VS Code Copilot 1.107+.
"""
from fastmcp.server.tasks.capabilities import get_task_capabilities
# Get base capabilities from SDK (pass empty dict for experimental)
# since we'll set tasks as a first-class field instead
capabilities = super().get_capabilities(
notification_options,
experimental_capabilities or {},
)
# Set tasks as a first-class field (not experimental) per SEP-1686
capabilities.tasks = get_task_capabilities()
# Advertise MCP Apps extension support (io.modelcontextprotocol/ui)
# Uses the same extra-field pattern as tasks above β ServerCapabilities
# has extra="allow" so this survives serialization.
# Merge with any existing extensions to avoid clobbering other features.
existing_extensions: dict[str, Any] = (
getattr(capabilities, "extensions", None) or {}
)
capabilities.extensions = {**existing_extensions, UI_EXTENSION_ID: {}}
return capabilities
async def run(
self,
read_stream: MemoryObjectReceiveStream[SessionMessage | Exception],
write_stream: MemoryObjectSendStream[SessionMessage],
initialization_options: InitializationOptions,
raise_exceptions: bool = False,
stateless: bool = False,
):
"""
Overrides the run method to use the MiddlewareServerSession.
"""
async with AsyncExitStack() as stack:
lifespan_context = await stack.enter_async_context(self.lifespan(self))
session = await stack.enter_async_context(
MiddlewareServerSession(
self.fastmcp,
read_stream,
write_stream,
initialization_options,
stateless=stateless,
)
)
async with anyio.create_task_group() as tg:
# Store task group on session for subscription tasks (SEP-1686)
session._subscription_task_group = tg
async for message in session.incoming_messages:
tg.start_soon(
self._handle_message,
message,
session,
lifespan_context,
raise_exceptions,
)
def read_resource(
self,
) -> Callable[
[
Callable[
[AnyUrl],
Awaitable[mcp.types.ReadResourceResult | mcp.types.CreateTaskResult],
]
],
Callable[
[AnyUrl],
Awaitable[mcp.types.ReadResourceResult | mcp.types.CreateTaskResult],
],
]:
"""
Decorator for registering a read_resource handler with CreateTaskResult support.
The MCP SDK's read_resource decorator does not support returning CreateTaskResult
for background task execution. This decorator wraps the result in ServerResult.
This decorator can be removed once the MCP SDK adds native CreateTaskResult support
for resources.
"""
def decorator(
func: Callable[
[AnyUrl],
Awaitable[mcp.types.ReadResourceResult | mcp.types.CreateTaskResult],
],
) -> Callable[
[AnyUrl],
Awaitable[mcp.types.ReadResourceResult | mcp.types.CreateTaskResult],
]:
async def handler(
req: mcp.types.ReadResourceRequest,
) -> mcp.types.ServerResult:
result = await func(req.params.uri)
return mcp.types.ServerResult(result)
self.request_handlers[mcp.types.ReadResourceRequest] = handler
return func
return decorator
def get_prompt(
self,
) -> Callable[
[
Callable[
[str, dict[str, Any] | None],
Awaitable[mcp.types.GetPromptResult | mcp.types.CreateTaskResult],
]
],
Callable[
[str, dict[str, Any] | None],
Awaitable[mcp.types.GetPromptResult | mcp.types.CreateTaskResult],
],
]:
"""
Decorator for registering a get_prompt handler with CreateTaskResult support.
The MCP SDK's get_prompt decorator does not support returning CreateTaskResult
for background task execution. This decorator wraps the result in ServerResult.
This decorator can be removed once the MCP SDK adds native CreateTaskResult support
for prompts.
"""
def decorator(
func: Callable[
[str, dict[str, Any] | None],
Awaitable[mcp.types.GetPromptResult | mcp.types.CreateTaskResult],
],
) -> Callable[
[str, dict[str, Any] | None],
Awaitable[mcp.types.GetPromptResult | mcp.types.CreateTaskResult],
]:
async def handler(
req: mcp.types.GetPromptRequest,
) -> mcp.types.ServerResult:
result = await func(req.params.name, req.params.arguments)
return mcp.types.ServerResult(result)
self.request_handlers[mcp.types.GetPromptRequest] = handler
return func
return decorator
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "src/fastmcp/server/low_level.py",
"license": "Apache License 2.0",
"lines": 299,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/fastmcp:src/fastmcp/server/middleware/error_handling.py | """Error handling middleware for consistent error responses and tracking."""
import asyncio
import logging
import traceback
from collections.abc import Callable
from typing import Any
import anyio
from mcp import McpError
from mcp.types import ErrorData
from fastmcp.exceptions import NotFoundError
from .middleware import CallNext, Middleware, MiddlewareContext
class ErrorHandlingMiddleware(Middleware):
"""Middleware that provides consistent error handling and logging.
Catches exceptions, logs them appropriately, and converts them to
proper MCP error responses. Also tracks error patterns for monitoring.
Example:
```python
from fastmcp.server.middleware.error_handling import ErrorHandlingMiddleware
import logging
# Configure logging to see error details
logging.basicConfig(level=logging.ERROR)
mcp = FastMCP("MyServer")
mcp.add_middleware(ErrorHandlingMiddleware())
```
"""
def __init__(
self,
logger: logging.Logger | None = None,
include_traceback: bool = False,
error_callback: Callable[[Exception, MiddlewareContext], None] | None = None,
transform_errors: bool = True,
):
"""Initialize error handling middleware.
Args:
logger: Logger instance for error logging. If None, uses 'fastmcp.errors'
include_traceback: Whether to include full traceback in error logs
error_callback: Optional callback function called for each error
transform_errors: Whether to transform non-MCP errors to McpError
"""
self.logger = logger or logging.getLogger("fastmcp.errors")
self.include_traceback = include_traceback
self.error_callback = error_callback
self.transform_errors = transform_errors
self.error_counts = {}
def _log_error(self, error: Exception, context: MiddlewareContext) -> None:
"""Log error with appropriate detail level."""
error_type = type(error).__name__
method = context.method or "unknown"
# Track error counts
error_key = f"{error_type}:{method}"
self.error_counts[error_key] = self.error_counts.get(error_key, 0) + 1
base_message = f"Error in {method}: {error_type}: {error!s}"
if self.include_traceback:
self.logger.error(f"{base_message}\n{traceback.format_exc()}")
else:
self.logger.error(base_message)
# Call custom error callback if provided
if self.error_callback:
try:
self.error_callback(error, context)
except Exception as callback_error:
self.logger.error(f"Error in error callback: {callback_error}")
def _transform_error(
self, error: Exception, context: MiddlewareContext
) -> Exception:
"""Transform non-MCP errors to proper MCP errors."""
if isinstance(error, McpError):
return error
if not self.transform_errors:
return error
# Map common exceptions to appropriate MCP error codes
error_type = type(error.__cause__) if error.__cause__ else type(error)
if error_type in (ValueError, TypeError):
return McpError(
ErrorData(code=-32602, message=f"Invalid params: {error!s}")
)
elif error_type in (FileNotFoundError, KeyError, NotFoundError):
# MCP spec defines -32002 specifically for resource not found
method = context.method or ""
if method.startswith("resources/"):
return McpError(
ErrorData(code=-32002, message=f"Resource not found: {error!s}")
)
return McpError(ErrorData(code=-32001, message=f"Not found: {error!s}"))
elif error_type is PermissionError:
return McpError(
ErrorData(code=-32000, message=f"Permission denied: {error!s}")
)
# asyncio.TimeoutError is a subclass of TimeoutError in Python 3.10, alias in 3.11+
elif error_type in (TimeoutError, asyncio.TimeoutError):
return McpError(
ErrorData(code=-32000, message=f"Request timeout: {error!s}")
)
else:
return McpError(
ErrorData(code=-32603, message=f"Internal error: {error!s}")
)
async def on_message(self, context: MiddlewareContext, call_next: CallNext) -> Any:
"""Handle errors for all messages."""
try:
return await call_next(context)
except Exception as error:
self._log_error(error, context)
# Transform and re-raise
transformed_error = self._transform_error(error, context)
raise transformed_error from error
def get_error_stats(self) -> dict[str, int]:
"""Get error statistics for monitoring."""
return self.error_counts.copy()
class RetryMiddleware(Middleware):
"""Middleware that implements automatic retry logic for failed requests.
Retries requests that fail with transient errors, using exponential
backoff to avoid overwhelming the server or external dependencies.
Example:
```python
from fastmcp.server.middleware.error_handling import RetryMiddleware
# Retry up to 3 times with exponential backoff
retry_middleware = RetryMiddleware(
max_retries=3,
retry_exceptions=(ConnectionError, TimeoutError)
)
mcp = FastMCP("MyServer")
mcp.add_middleware(retry_middleware)
```
"""
def __init__(
self,
max_retries: int = 3,
base_delay: float = 1.0,
max_delay: float = 60.0,
backoff_multiplier: float = 2.0,
retry_exceptions: tuple[type[Exception], ...] = (ConnectionError, TimeoutError),
logger: logging.Logger | None = None,
):
"""Initialize retry middleware.
Args:
max_retries: Maximum number of retry attempts
base_delay: Initial delay between retries in seconds
max_delay: Maximum delay between retries in seconds
backoff_multiplier: Multiplier for exponential backoff
retry_exceptions: Tuple of exception types that should trigger retries
logger: Logger for retry attempts
"""
self.max_retries = max_retries
self.base_delay = base_delay
self.max_delay = max_delay
self.backoff_multiplier = backoff_multiplier
self.retry_exceptions = retry_exceptions
self.logger = logger or logging.getLogger("fastmcp.retry")
def _should_retry(self, error: Exception) -> bool:
"""Determine if an error should trigger a retry."""
return isinstance(error, self.retry_exceptions)
def _calculate_delay(self, attempt: int) -> float:
"""Calculate delay for the given attempt number."""
delay = self.base_delay * (self.backoff_multiplier**attempt)
return min(delay, self.max_delay)
async def on_request(self, context: MiddlewareContext, call_next: CallNext) -> Any:
"""Implement retry logic for requests."""
last_error = None
for attempt in range(self.max_retries + 1):
try:
return await call_next(context)
except Exception as error:
last_error = error
# Don't retry on the last attempt or if it's not a retryable error
if attempt == self.max_retries or not self._should_retry(error):
break
delay = self._calculate_delay(attempt)
self.logger.warning(
f"Request {context.method} failed (attempt {attempt + 1}/{self.max_retries + 1}): "
f"{type(error).__name__}: {error!s}. Retrying in {delay:.1f}s..."
)
await anyio.sleep(delay)
# Re-raise the last error if all retries failed
if last_error:
raise last_error
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "src/fastmcp/server/middleware/error_handling.py",
"license": "Apache License 2.0",
"lines": 176,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/fastmcp:src/fastmcp/server/middleware/logging.py | """Comprehensive logging middleware for FastMCP servers."""
import json
import logging
import time
from collections.abc import Callable
from logging import Logger
from typing import Any
import pydantic_core
from .middleware import CallNext, Middleware, MiddlewareContext
def default_serializer(data: Any) -> str:
"""The default serializer for Payloads in the logging middleware."""
return pydantic_core.to_json(data, fallback=str).decode()
class BaseLoggingMiddleware(Middleware):
"""Base class for logging middleware."""
logger: Logger
log_level: int
include_payloads: bool
include_payload_length: bool
estimate_payload_tokens: bool
max_payload_length: int | None
methods: list[str] | None
structured_logging: bool
payload_serializer: Callable[[Any], str] | None
def _serialize_payload(self, context: MiddlewareContext[Any]) -> str:
payload: str
if not self.payload_serializer:
payload = default_serializer(context.message)
else:
try:
payload = self.payload_serializer(context.message)
except Exception as e:
self.logger.warning(
f"Failed to serialize payload due to {e}: {context.type} {context.method} {context.source}."
)
payload = default_serializer(context.message)
return payload
def _format_message(self, message: dict[str, str | int | float]) -> str:
"""Format a message for logging."""
if self.structured_logging:
return json.dumps(message)
else:
return " ".join([f"{k}={v}" for k, v in message.items()])
def _create_before_message(
self, context: MiddlewareContext[Any]
) -> dict[str, str | int | float]:
message = {
"event": context.type + "_start",
"method": context.method or "unknown",
"source": context.source,
}
if (
self.include_payloads
or self.include_payload_length
or self.estimate_payload_tokens
):
payload = self._serialize_payload(context)
if self.include_payload_length or self.estimate_payload_tokens:
payload_length = len(payload)
payload_tokens = payload_length // 4
if self.estimate_payload_tokens:
message["payload_tokens"] = payload_tokens
if self.include_payload_length:
message["payload_length"] = payload_length
if self.max_payload_length and len(payload) > self.max_payload_length:
payload = payload[: self.max_payload_length] + "..."
if self.include_payloads:
message["payload"] = payload
message["payload_type"] = type(context.message).__name__
return message
def _create_error_message(
self,
context: MiddlewareContext[Any],
start_time: float,
error: Exception,
) -> dict[str, str | int | float]:
duration_ms: float = _get_duration_ms(start_time)
message = {
"event": context.type + "_error",
"method": context.method or "unknown",
"source": context.source,
"duration_ms": duration_ms,
"error": str(object=error),
}
return message
def _create_after_message(
self,
context: MiddlewareContext[Any],
start_time: float,
) -> dict[str, str | int | float]:
duration_ms: float = _get_duration_ms(start_time)
message = {
"event": context.type + "_success",
"method": context.method or "unknown",
"source": context.source,
"duration_ms": duration_ms,
}
return message
def _log_message(
self, message: dict[str, str | int | float], log_level: int | None = None
):
self.logger.log(log_level or self.log_level, self._format_message(message))
async def on_message(
self, context: MiddlewareContext[Any], call_next: CallNext[Any, Any]
) -> Any:
"""Log messages for configured methods."""
if self.methods and context.method not in self.methods:
return await call_next(context)
self._log_message(self._create_before_message(context))
start_time = time.perf_counter()
try:
result = await call_next(context)
self._log_message(self._create_after_message(context, start_time))
return result
except Exception as e:
self._log_message(
self._create_error_message(context, start_time, e), logging.ERROR
)
raise
class LoggingMiddleware(BaseLoggingMiddleware):
"""Middleware that provides comprehensive request and response logging.
Logs all MCP messages with configurable detail levels. Useful for debugging,
monitoring, and understanding server usage patterns.
Example:
```python
from fastmcp.server.middleware.logging import LoggingMiddleware
import logging
# Configure logging
logging.basicConfig(level=logging.INFO)
mcp = FastMCP("MyServer")
mcp.add_middleware(LoggingMiddleware())
```
"""
def __init__(
self,
*,
logger: logging.Logger | None = None,
log_level: int = logging.INFO,
include_payloads: bool = False,
include_payload_length: bool = False,
estimate_payload_tokens: bool = False,
max_payload_length: int = 1000,
methods: list[str] | None = None,
payload_serializer: Callable[[Any], str] | None = None,
):
"""Initialize logging middleware.
Args:
logger: Logger instance to use. If None, creates a logger named 'fastmcp.requests'
log_level: Log level for messages (default: INFO)
include_payloads: Whether to include message payloads in logs
include_payload_length: Whether to include response size in logs
estimate_payload_tokens: Whether to estimate response tokens
max_payload_length: Maximum length of payload to log (prevents huge logs)
methods: List of methods to log. If None, logs all methods.
payload_serializer: Callable that converts objects to a JSON string for the
payload. If not provided, uses FastMCP's default tool serializer.
"""
self.logger: Logger = logger or logging.getLogger("fastmcp.middleware.logging")
self.log_level = log_level
self.include_payloads: bool = include_payloads
self.include_payload_length: bool = include_payload_length
self.estimate_payload_tokens: bool = estimate_payload_tokens
self.max_payload_length: int = max_payload_length
self.methods: list[str] | None = methods
self.payload_serializer: Callable[[Any], str] | None = payload_serializer
self.structured_logging: bool = False
class StructuredLoggingMiddleware(BaseLoggingMiddleware):
"""Middleware that provides structured JSON logging for better log analysis.
Outputs structured logs that are easier to parse and analyze with log
aggregation tools like ELK stack, Splunk, or cloud logging services.
Example:
```python
from fastmcp.server.middleware.logging import StructuredLoggingMiddleware
import logging
mcp = FastMCP("MyServer")
mcp.add_middleware(StructuredLoggingMiddleware())
```
"""
def __init__(
self,
*,
logger: logging.Logger | None = None,
log_level: int = logging.INFO,
include_payloads: bool = False,
include_payload_length: bool = False,
estimate_payload_tokens: bool = False,
methods: list[str] | None = None,
payload_serializer: Callable[[Any], str] | None = None,
):
"""Initialize structured logging middleware.
Args:
logger: Logger instance to use. If None, creates a logger named 'fastmcp.structured'
log_level: Log level for messages (default: INFO)
include_payloads: Whether to include message payloads in logs
include_payload_length: Whether to include payload size in logs
estimate_payload_tokens: Whether to estimate token count using length // 4
methods: List of methods to log. If None, logs all methods.
payload_serializer: Callable that converts objects to a JSON string for the
payload. If not provided, uses FastMCP's default tool serializer.
"""
self.logger: Logger = logger or logging.getLogger(
"fastmcp.middleware.structured_logging"
)
self.log_level: int = log_level
self.include_payloads: bool = include_payloads
self.include_payload_length: bool = include_payload_length
self.estimate_payload_tokens: bool = estimate_payload_tokens
self.methods: list[str] | None = methods
self.payload_serializer: Callable[[Any], str] | None = payload_serializer
self.max_payload_length: int | None = None
self.structured_logging: bool = True
def _get_duration_ms(start_time: float, /) -> float:
return round(number=(time.perf_counter() - start_time) * 1000, ndigits=2)
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "src/fastmcp/server/middleware/logging.py",
"license": "Apache License 2.0",
"lines": 212,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/fastmcp:src/fastmcp/server/middleware/rate_limiting.py | """Rate limiting middleware for protecting FastMCP servers from abuse."""
import time
from collections import defaultdict, deque
from collections.abc import Callable
from typing import Any
import anyio
from mcp import McpError
from mcp.types import ErrorData
from .middleware import CallNext, Middleware, MiddlewareContext
class RateLimitError(McpError):
"""Error raised when rate limit is exceeded."""
def __init__(self, message: str = "Rate limit exceeded"):
super().__init__(ErrorData(code=-32000, message=message))
class TokenBucketRateLimiter:
"""Token bucket implementation for rate limiting."""
def __init__(self, capacity: int, refill_rate: float):
"""Initialize token bucket.
Args:
capacity: Maximum number of tokens in the bucket
refill_rate: Tokens added per second
"""
self.capacity = capacity
self.refill_rate = refill_rate
self.tokens = capacity
self.last_refill = time.time()
self._lock = anyio.Lock()
async def consume(self, tokens: int = 1) -> bool:
"""Try to consume tokens from the bucket.
Args:
tokens: Number of tokens to consume
Returns:
True if tokens were available and consumed, False otherwise
"""
async with self._lock:
now = time.time()
elapsed = now - self.last_refill
# Add tokens based on elapsed time
self.tokens = min(self.capacity, self.tokens + elapsed * self.refill_rate)
self.last_refill = now
if self.tokens >= tokens:
self.tokens -= tokens
return True
return False
class SlidingWindowRateLimiter:
"""Sliding window rate limiter implementation."""
def __init__(self, max_requests: int, window_seconds: int):
"""Initialize sliding window rate limiter.
Args:
max_requests: Maximum requests allowed in the time window
window_seconds: Time window in seconds
"""
self.max_requests = max_requests
self.window_seconds = window_seconds
self.requests = deque()
self._lock = anyio.Lock()
async def is_allowed(self) -> bool:
"""Check if a request is allowed."""
async with self._lock:
now = time.time()
cutoff = now - self.window_seconds
# Remove old requests outside the window
while self.requests and self.requests[0] < cutoff:
self.requests.popleft()
if len(self.requests) < self.max_requests:
self.requests.append(now)
return True
return False
class RateLimitingMiddleware(Middleware):
"""Middleware that implements rate limiting to prevent server abuse.
Uses a token bucket algorithm by default, allowing for burst traffic
while maintaining a sustainable long-term rate.
Example:
```python
from fastmcp.server.middleware.rate_limiting import RateLimitingMiddleware
# Allow 10 requests per second with bursts up to 20
rate_limiter = RateLimitingMiddleware(
max_requests_per_second=10,
burst_capacity=20
)
mcp = FastMCP("MyServer")
mcp.add_middleware(rate_limiter)
```
"""
def __init__(
self,
max_requests_per_second: float = 10.0,
burst_capacity: int | None = None,
get_client_id: Callable[[MiddlewareContext], str] | None = None,
global_limit: bool = False,
):
"""Initialize rate limiting middleware.
Args:
max_requests_per_second: Sustained requests per second allowed
burst_capacity: Maximum burst capacity. If None, defaults to 2x max_requests_per_second
get_client_id: Function to extract client ID from context. If None, uses global limiting
global_limit: If True, apply limit globally; if False, per-client
"""
self.max_requests_per_second = max_requests_per_second
self.burst_capacity = burst_capacity or int(max_requests_per_second * 2)
self.get_client_id = get_client_id
self.global_limit = global_limit
# Storage for rate limiters per client
self.limiters: dict[str, TokenBucketRateLimiter] = defaultdict(
lambda: TokenBucketRateLimiter(
self.burst_capacity, self.max_requests_per_second
)
)
# Global rate limiter
if self.global_limit:
self.global_limiter = TokenBucketRateLimiter(
self.burst_capacity, self.max_requests_per_second
)
def _get_client_identifier(self, context: MiddlewareContext) -> str:
"""Get client identifier for rate limiting."""
if self.get_client_id:
return self.get_client_id(context)
return "global"
async def on_request(self, context: MiddlewareContext, call_next: CallNext) -> Any:
"""Apply rate limiting to requests."""
if self.global_limit:
# Global rate limiting
allowed = await self.global_limiter.consume()
if not allowed:
raise RateLimitError("Global rate limit exceeded")
else:
# Per-client rate limiting
client_id = self._get_client_identifier(context)
limiter = self.limiters[client_id]
allowed = await limiter.consume()
if not allowed:
raise RateLimitError(f"Rate limit exceeded for client: {client_id}")
return await call_next(context)
class SlidingWindowRateLimitingMiddleware(Middleware):
"""Middleware that implements sliding window rate limiting.
Uses a sliding window approach which provides more precise rate limiting
but uses more memory to track individual request timestamps.
Example:
```python
from fastmcp.server.middleware.rate_limiting import SlidingWindowRateLimitingMiddleware
# Allow 100 requests per minute
rate_limiter = SlidingWindowRateLimitingMiddleware(
max_requests=100,
window_minutes=1
)
mcp = FastMCP("MyServer")
mcp.add_middleware(rate_limiter)
```
"""
def __init__(
self,
max_requests: int,
window_minutes: int = 1,
get_client_id: Callable[[MiddlewareContext], str] | None = None,
):
"""Initialize sliding window rate limiting middleware.
Args:
max_requests: Maximum requests allowed in the time window
window_minutes: Time window in minutes
get_client_id: Function to extract client ID from context
"""
self.max_requests = max_requests
self.window_seconds = window_minutes * 60
self.get_client_id = get_client_id
# Storage for rate limiters per client
self.limiters: dict[str, SlidingWindowRateLimiter] = defaultdict(
lambda: SlidingWindowRateLimiter(self.max_requests, self.window_seconds)
)
def _get_client_identifier(self, context: MiddlewareContext) -> str:
"""Get client identifier for rate limiting."""
if self.get_client_id:
return self.get_client_id(context)
return "global"
async def on_request(self, context: MiddlewareContext, call_next: CallNext) -> Any:
"""Apply sliding window rate limiting to requests."""
client_id = self._get_client_identifier(context)
limiter = self.limiters[client_id]
allowed = await limiter.is_allowed()
if not allowed:
raise RateLimitError(
f"Rate limit exceeded: {self.max_requests} requests per "
f"{self.window_seconds // 60} minutes for client: {client_id}"
)
return await call_next(context)
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "src/fastmcp/server/middleware/rate_limiting.py",
"license": "Apache License 2.0",
"lines": 183,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/fastmcp:src/fastmcp/server/middleware/timing.py | """Timing middleware for measuring and logging request performance."""
import logging
import time
from typing import Any
from .middleware import CallNext, Middleware, MiddlewareContext
class TimingMiddleware(Middleware):
"""Middleware that logs the execution time of requests.
Only measures and logs timing for request messages (not notifications).
Provides insights into performance characteristics of your MCP server.
Example:
```python
from fastmcp.server.middleware.timing import TimingMiddleware
mcp = FastMCP("MyServer")
mcp.add_middleware(TimingMiddleware())
# Now all requests will be timed and logged
```
"""
def __init__(
self, logger: logging.Logger | None = None, log_level: int = logging.INFO
):
"""Initialize timing middleware.
Args:
logger: Logger instance to use. If None, creates a logger named 'fastmcp.timing'
log_level: Log level for timing messages (default: INFO)
"""
self.logger = logger or logging.getLogger("fastmcp.timing")
self.log_level = log_level
async def on_request(self, context: MiddlewareContext, call_next: CallNext) -> Any:
"""Time request execution and log the results."""
method = context.method or "unknown"
start_time = time.perf_counter()
try:
result = await call_next(context)
duration_ms = (time.perf_counter() - start_time) * 1000
self.logger.log(
self.log_level, f"Request {method} completed in {duration_ms:.2f}ms"
)
return result
except Exception as e:
duration_ms = (time.perf_counter() - start_time) * 1000
self.logger.log(
self.log_level,
f"Request {method} failed after {duration_ms:.2f}ms: {e}",
)
raise
class DetailedTimingMiddleware(Middleware):
"""Enhanced timing middleware with per-operation breakdowns.
Provides detailed timing information for different types of MCP operations,
allowing you to identify performance bottlenecks in specific operations.
Example:
```python
from fastmcp.server.middleware.timing import DetailedTimingMiddleware
import logging
# Configure logging to see the output
logging.basicConfig(level=logging.INFO)
mcp = FastMCP("MyServer")
mcp.add_middleware(DetailedTimingMiddleware())
```
"""
def __init__(
self, logger: logging.Logger | None = None, log_level: int = logging.INFO
):
"""Initialize detailed timing middleware.
Args:
logger: Logger instance to use. If None, creates a logger named 'fastmcp.timing.detailed'
log_level: Log level for timing messages (default: INFO)
"""
self.logger = logger or logging.getLogger("fastmcp.timing.detailed")
self.log_level = log_level
async def _time_operation(
self, context: MiddlewareContext, call_next: CallNext, operation_name: str
) -> Any:
"""Helper method to time any operation."""
start_time = time.perf_counter()
try:
result = await call_next(context)
duration_ms = (time.perf_counter() - start_time) * 1000
self.logger.log(
self.log_level, f"{operation_name} completed in {duration_ms:.2f}ms"
)
return result
except Exception as e:
duration_ms = (time.perf_counter() - start_time) * 1000
self.logger.log(
self.log_level,
f"{operation_name} failed after {duration_ms:.2f}ms: {e}",
)
raise
async def on_call_tool(
self, context: MiddlewareContext, call_next: CallNext
) -> Any:
"""Time tool execution."""
tool_name = getattr(context.message, "name", "unknown")
return await self._time_operation(context, call_next, f"Tool '{tool_name}'")
async def on_read_resource(
self, context: MiddlewareContext, call_next: CallNext
) -> Any:
"""Time resource reading."""
resource_uri = getattr(context.message, "uri", "unknown")
return await self._time_operation(
context, call_next, f"Resource '{resource_uri}'"
)
async def on_get_prompt(
self, context: MiddlewareContext, call_next: CallNext
) -> Any:
"""Time prompt retrieval."""
prompt_name = getattr(context.message, "name", "unknown")
return await self._time_operation(context, call_next, f"Prompt '{prompt_name}'")
async def on_list_tools(
self, context: MiddlewareContext, call_next: CallNext
) -> Any:
"""Time tool listing."""
return await self._time_operation(context, call_next, "List tools")
async def on_list_resources(
self, context: MiddlewareContext, call_next: CallNext
) -> Any:
"""Time resource listing."""
return await self._time_operation(context, call_next, "List resources")
async def on_list_resource_templates(
self, context: MiddlewareContext, call_next: CallNext
) -> Any:
"""Time resource template listing."""
return await self._time_operation(context, call_next, "List resource templates")
async def on_list_prompts(
self, context: MiddlewareContext, call_next: CallNext
) -> Any:
"""Time prompt listing."""
return await self._time_operation(context, call_next, "List prompts")
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "src/fastmcp/server/middleware/timing.py",
"license": "Apache License 2.0",
"lines": 128,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
PrefectHQ/fastmcp:tests/client/test_notifications.py | from dataclasses import dataclass, field
from datetime import datetime
import mcp.types
import pytest
from fastmcp import Client, FastMCP
from fastmcp.client.messages import MessageHandler
from fastmcp.server.context import Context
@dataclass
class NotificationRecording:
"""Record of a notification that was received."""
method: str
notification: mcp.types.ServerNotification
timestamp: datetime = field(default_factory=datetime.now)
class RecordingMessageHandler(MessageHandler):
"""A message handler that records all notifications."""
def __init__(self, name: str | None = None):
super().__init__()
self.notifications: list[NotificationRecording] = []
self.name = name
async def on_notification(self, message: mcp.types.ServerNotification) -> None:
"""Record all notifications with timestamp."""
self.notifications.append(
NotificationRecording(method=message.root.method, notification=message)
)
def get_notifications(
self, method: str | None = None
) -> list[NotificationRecording]:
"""Get all recorded notifications, optionally filtered by method."""
if method is None:
return self.notifications
return [n for n in self.notifications if n.method == method]
def assert_notification_sent(self, method: str, times: int = 1) -> bool:
"""Assert that a notification was sent a specific number of times."""
notifications = self.get_notifications(method)
actual_times = len(notifications)
assert actual_times == times, (
f"Expected {times} notifications for {method}, "
f"but received {actual_times} notifications"
)
return True
def assert_notification_not_sent(self, method: str) -> bool:
"""Assert that a notification was not sent."""
notifications = self.get_notifications(method)
assert len(notifications) == 0, (
f"Expected no notifications for {method}, but received {len(notifications)}"
)
return True
def reset(self):
"""Clear all recorded notifications."""
self.notifications.clear()
@pytest.fixture
def recording_message_handler():
"""Fixture that provides a recording message handler instance."""
handler = RecordingMessageHandler(name="recording_message_handler")
yield handler
class TestNotificationAPI:
"""Test the notification API."""
async def test_send_notification_async(
self,
recording_message_handler: RecordingMessageHandler,
):
"""Test that send_notification sends immediately in async context."""
server = FastMCP(name="NotificationAPITestServer")
@server.tool
async def trigger_notification(ctx: Context) -> str:
"""Send a notification using the async API."""
await ctx.send_notification(mcp.types.ToolListChangedNotification())
return "Notification sent"
async with Client(server, message_handler=recording_message_handler) as client:
recording_message_handler.reset()
await client.call_tool("trigger_notification", {})
recording_message_handler.assert_notification_sent(
"notifications/tools/list_changed", times=1
)
async def test_send_multiple_notifications(
self,
recording_message_handler: RecordingMessageHandler,
):
"""Test sending multiple different notification types."""
server = FastMCP(name="NotificationAPITestServer")
@server.tool
async def trigger_all_notifications(ctx: Context) -> str:
"""Send all notification types."""
await ctx.send_notification(mcp.types.ToolListChangedNotification())
await ctx.send_notification(mcp.types.ResourceListChangedNotification())
await ctx.send_notification(mcp.types.PromptListChangedNotification())
return "All notifications sent"
async with Client(server, message_handler=recording_message_handler) as client:
recording_message_handler.reset()
await client.call_tool("trigger_all_notifications", {})
recording_message_handler.assert_notification_sent(
"notifications/tools/list_changed", times=1
)
recording_message_handler.assert_notification_sent(
"notifications/resources/list_changed", times=1
)
recording_message_handler.assert_notification_sent(
"notifications/prompts/list_changed", times=1
)
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/client/test_notifications.py",
"license": "Apache License 2.0",
"lines": 99,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:tests/server/http/test_bearer_auth_backend.py | """Tests for BearerAuthBackend integration with TokenVerifier."""
import pytest
from mcp.server.auth.middleware.bearer_auth import BearerAuthBackend
from starlette.requests import HTTPConnection
from fastmcp.server.auth import AccessToken, TokenVerifier
from fastmcp.server.auth.providers.jwt import JWTVerifier, RSAKeyPair
class TestBearerAuthBackendTokenVerifierIntegration:
"""Test BearerAuthBackend works with TokenVerifier protocol."""
@pytest.fixture
def rsa_key_pair(self) -> RSAKeyPair:
"""Generate RSA key pair for testing."""
return RSAKeyPair.generate()
@pytest.fixture
def jwt_verifier(self, rsa_key_pair: RSAKeyPair) -> JWTVerifier:
"""Create JWTVerifier for testing."""
return JWTVerifier(
public_key=rsa_key_pair.public_key,
issuer="https://test.example.com",
audience="https://api.example.com",
)
@pytest.fixture
def valid_token(self, rsa_key_pair: RSAKeyPair) -> str:
"""Create a valid test token."""
return rsa_key_pair.create_token(
subject="test-user",
issuer="https://test.example.com",
audience="https://api.example.com",
scopes=["read", "write"],
)
def test_bearer_auth_backend_constructor_accepts_token_verifier(
self, jwt_verifier: JWTVerifier
):
"""Test that BearerAuthBackend constructor accepts TokenVerifier."""
# This should not raise an error
backend = BearerAuthBackend(jwt_verifier)
assert isinstance(backend.token_verifier, TokenVerifier)
assert backend.token_verifier is jwt_verifier
async def test_bearer_auth_backend_authenticate_with_valid_token(
self, jwt_verifier: JWTVerifier, valid_token: str
):
"""Test BearerAuthBackend authentication with valid token."""
backend = BearerAuthBackend(jwt_verifier)
# Create mock HTTPConnection with Authorization header
scope = {
"type": "http",
"headers": [(b"authorization", f"Bearer {valid_token}".encode())],
}
conn = HTTPConnection(scope)
result = await backend.authenticate(conn)
assert result is not None
credentials, user = result
assert credentials.scopes == ["read", "write"]
assert user.username == "test-user"
assert hasattr(user, "access_token")
assert user.access_token.token == valid_token
async def test_bearer_auth_backend_authenticate_with_invalid_token(
self, jwt_verifier: JWTVerifier
):
"""Test BearerAuthBackend authentication with invalid token."""
backend = BearerAuthBackend(jwt_verifier)
# Create mock HTTPConnection with invalid Authorization header
scope = {
"type": "http",
"headers": [(b"authorization", b"Bearer invalid-token")],
}
conn = HTTPConnection(scope)
result = await backend.authenticate(conn)
assert result is None
async def test_bearer_auth_backend_authenticate_with_no_header(
self, jwt_verifier: JWTVerifier
):
"""Test BearerAuthBackend authentication with no Authorization header."""
backend = BearerAuthBackend(jwt_verifier)
# Create mock HTTPConnection without Authorization header
scope = {
"type": "http",
"headers": [],
}
conn = HTTPConnection(scope)
result = await backend.authenticate(conn)
assert result is None
async def test_bearer_auth_backend_authenticate_with_non_bearer_token(
self, jwt_verifier: JWTVerifier
):
"""Test BearerAuthBackend authentication with non-Bearer token."""
backend = BearerAuthBackend(jwt_verifier)
# Create mock HTTPConnection with Basic auth header
scope = {
"type": "http",
"headers": [(b"authorization", b"Basic dXNlcjpwYXNz")],
}
conn = HTTPConnection(scope)
result = await backend.authenticate(conn)
assert result is None
class MockTokenVerifier:
"""Mock TokenVerifier for testing backend integration."""
def __init__(self, return_value: AccessToken | None = None):
self.return_value = return_value
self.verify_token_calls = []
async def verify_token(self, token: str) -> AccessToken | None:
"""Mock verify_token method."""
self.verify_token_calls.append(token)
return self.return_value
class TestBearerAuthBackendWithMockVerifier:
"""Test BearerAuthBackend with mock TokenVerifier."""
async def test_backend_calls_verify_token_method(self):
"""Test that BearerAuthBackend calls verify_token on the verifier."""
mock_access_token = AccessToken(
token="test-token",
client_id="test-client",
scopes=["read"],
expires_at=None,
)
mock_verifier = MockTokenVerifier(return_value=mock_access_token)
backend = BearerAuthBackend(mock_verifier)
scope = {
"type": "http",
"headers": [(b"authorization", b"Bearer test-token")],
}
conn = HTTPConnection(scope)
result = await backend.authenticate(conn)
# Should have called verify_token with the token
assert mock_verifier.verify_token_calls == ["test-token"]
# Should return authentication result
assert result is not None
credentials, user = result
assert credentials.scopes == ["read"]
assert user.username == "test-client"
async def test_backend_handles_verify_token_none_result(self):
"""Test that BearerAuthBackend handles None result from verify_token."""
mock_verifier = MockTokenVerifier(return_value=None)
backend = BearerAuthBackend(mock_verifier)
scope = {
"type": "http",
"headers": [(b"authorization", b"Bearer invalid-token")],
}
conn = HTTPConnection(scope)
result = await backend.authenticate(conn)
# Should have called verify_token
assert mock_verifier.verify_token_calls == ["invalid-token"]
# Should return None for authentication failure
assert result is None
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/server/http/test_bearer_auth_backend.py",
"license": "Apache License 2.0",
"lines": 142,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:tests/server/middleware/test_error_handling.py | """Tests for error handling middleware."""
import logging
from unittest.mock import AsyncMock, MagicMock
import pytest
from mcp import McpError
from fastmcp.exceptions import NotFoundError, ToolError
from fastmcp.server.middleware.error_handling import (
ErrorHandlingMiddleware,
RetryMiddleware,
)
from fastmcp.server.middleware.middleware import MiddlewareContext
@pytest.fixture
def mock_context():
"""Create a mock middleware context."""
context = MagicMock(spec=MiddlewareContext)
context.method = "test_method"
return context
@pytest.fixture
def mock_call_next():
"""Create a mock call_next function."""
return AsyncMock(return_value="test_result")
class TestErrorHandlingMiddleware:
"""Test error handling middleware functionality."""
def test_init_default(self):
"""Test default initialization."""
middleware = ErrorHandlingMiddleware()
assert middleware.logger.name == "fastmcp.errors"
assert middleware.include_traceback is False
assert middleware.error_callback is None
assert middleware.transform_errors is True
assert middleware.error_counts == {}
def test_init_custom(self):
"""Test custom initialization."""
logger = logging.getLogger("custom")
callback = MagicMock()
middleware = ErrorHandlingMiddleware(
logger=logger,
include_traceback=True,
error_callback=callback,
transform_errors=False,
)
assert middleware.logger is logger
assert middleware.include_traceback is True
assert middleware.error_callback is callback
assert middleware.transform_errors is False
def test_log_error_basic(self, mock_context, caplog):
"""Test basic error logging."""
middleware = ErrorHandlingMiddleware()
error = ValueError("test error")
with caplog.at_level(logging.ERROR):
middleware._log_error(error, mock_context)
assert "Error in test_method: ValueError: test error" in caplog.text
assert "ValueError:test_method" in middleware.error_counts
assert middleware.error_counts["ValueError:test_method"] == 1
def test_log_error_with_traceback(self, mock_context, caplog):
"""Test error logging with traceback."""
middleware = ErrorHandlingMiddleware(include_traceback=True)
error = ValueError("test error")
with caplog.at_level(logging.ERROR):
middleware._log_error(error, mock_context)
assert "Error in test_method: ValueError: test error" in caplog.text
# The traceback is added to the log message
assert "Error in test_method: ValueError: test error" in caplog.text
def test_log_error_with_callback(self, mock_context):
"""Test error logging with callback."""
callback = MagicMock()
middleware = ErrorHandlingMiddleware(error_callback=callback)
error = ValueError("test error")
middleware._log_error(error, mock_context)
callback.assert_called_once_with(error, mock_context)
def test_log_error_callback_exception(self, mock_context, caplog):
"""Test error logging when callback raises exception."""
callback = MagicMock(side_effect=RuntimeError("callback error"))
middleware = ErrorHandlingMiddleware(error_callback=callback)
error = ValueError("test error")
with caplog.at_level(logging.ERROR):
middleware._log_error(error, mock_context)
assert "Error in error callback: callback error" in caplog.text
def test_transform_error_mcp_error(self, mock_context):
"""Test that MCP errors are not transformed."""
middleware = ErrorHandlingMiddleware()
from mcp.types import ErrorData
error = McpError(ErrorData(code=-32001, message="test error"))
result = middleware._transform_error(error, mock_context)
assert result is error
def test_transform_error_disabled(self, mock_context):
"""Test error transformation when disabled."""
middleware = ErrorHandlingMiddleware(transform_errors=False)
error = ValueError("test error")
result = middleware._transform_error(error, mock_context)
assert result is error
def test_transform_error_value_error(self, mock_context):
"""Test transforming ValueError."""
middleware = ErrorHandlingMiddleware()
error = ValueError("test error")
result = middleware._transform_error(error, mock_context)
assert isinstance(result, McpError)
assert result.error.code == -32602
assert "Invalid params: test error" in result.error.message
def test_transform_error_not_found_for_resource_method(self):
"""Test that not-found errors use -32002 for resource methods."""
middleware = ErrorHandlingMiddleware()
resource_context = MagicMock(spec=MiddlewareContext)
resource_context.method = "resources/read"
for error in [
FileNotFoundError("test error"),
NotFoundError("test error"),
]:
result = middleware._transform_error(error, resource_context)
assert isinstance(result, McpError)
assert result.error.code == -32002
assert "Resource not found: test error" in result.error.message
def test_transform_error_not_found_for_non_resource_method(self, mock_context):
"""Test that not-found errors use -32001 for non-resource methods."""
middleware = ErrorHandlingMiddleware()
for error in [
FileNotFoundError("test error"),
NotFoundError("test error"),
]:
result = middleware._transform_error(error, mock_context)
assert isinstance(result, McpError)
assert result.error.code == -32001
assert "Not found: test error" in result.error.message
def test_transform_error_permission_error(self, mock_context):
"""Test transforming PermissionError."""
middleware = ErrorHandlingMiddleware()
error = PermissionError("test error")
result = middleware._transform_error(error, mock_context)
assert isinstance(result, McpError)
assert result.error.code == -32000
assert "Permission denied: test error" in result.error.message
def test_transform_error_timeout_error(self, mock_context):
"""Test transforming TimeoutError."""
middleware = ErrorHandlingMiddleware()
error = TimeoutError("test error")
result = middleware._transform_error(error, mock_context)
assert isinstance(result, McpError)
assert result.error.code == -32000
assert "Request timeout: test error" in result.error.message
def test_transform_error_generic(self, mock_context):
"""Test transforming generic error."""
middleware = ErrorHandlingMiddleware()
error = RuntimeError("test error")
result = middleware._transform_error(error, mock_context)
assert isinstance(result, McpError)
assert result.error.code == -32603
assert "Internal error: test error" in result.error.message
async def test_on_message_success(self, mock_context, mock_call_next):
"""Test successful message handling."""
middleware = ErrorHandlingMiddleware()
result = await middleware.on_message(mock_context, mock_call_next)
assert result == "test_result"
assert mock_call_next.called
async def test_on_message_error_transform(self, mock_context, caplog):
"""Test error handling with transformation."""
middleware = ErrorHandlingMiddleware()
mock_call_next = AsyncMock(side_effect=ValueError("test error"))
with caplog.at_level(logging.ERROR):
with pytest.raises(McpError) as exc_info:
await middleware.on_message(mock_context, mock_call_next)
assert isinstance(exc_info.value, McpError)
assert exc_info.value.error.code == -32602
assert "Invalid params: test error" in exc_info.value.error.message
assert "Error in test_method: ValueError: test error" in caplog.text
async def test_on_message_error_transform_tool_error(self, mock_context, caplog):
"""Test error handling with transformation and cause type."""
middleware = ErrorHandlingMiddleware()
tool_error = ToolError("test error")
tool_error.__cause__ = ValueError()
mock_call_next = AsyncMock(side_effect=tool_error)
with caplog.at_level(logging.ERROR):
with pytest.raises(McpError) as exc_info:
await middleware.on_message(mock_context, mock_call_next)
assert isinstance(exc_info.value, McpError)
assert exc_info.value.error.code == -32602
assert "Invalid params: test error" in exc_info.value.error.message
assert "Error in test_method: ToolError: test error" in caplog.text
def test_get_error_stats(self, mock_context):
"""Test getting error statistics."""
middleware = ErrorHandlingMiddleware()
error1 = ValueError("error1")
error2 = ValueError("error2")
error3 = RuntimeError("error3")
middleware._log_error(error1, mock_context)
middleware._log_error(error2, mock_context)
middleware._log_error(error3, mock_context)
stats = middleware.get_error_stats()
assert stats["ValueError:test_method"] == 2
assert stats["RuntimeError:test_method"] == 1
class TestRetryMiddleware:
"""Test retry middleware functionality."""
def test_init_default(self):
"""Test default initialization."""
middleware = RetryMiddleware()
assert middleware.max_retries == 3
assert middleware.base_delay == 1.0
assert middleware.max_delay == 60.0
assert middleware.backoff_multiplier == 2.0
assert middleware.retry_exceptions == (ConnectionError, TimeoutError)
assert middleware.logger.name == "fastmcp.retry"
def test_init_custom(self):
"""Test custom initialization."""
logger = logging.getLogger("custom")
middleware = RetryMiddleware(
max_retries=5,
base_delay=2.0,
max_delay=120.0,
backoff_multiplier=3.0,
retry_exceptions=(ValueError, RuntimeError),
logger=logger,
)
assert middleware.max_retries == 5
assert middleware.base_delay == 2.0
assert middleware.max_delay == 120.0
assert middleware.backoff_multiplier == 3.0
assert middleware.retry_exceptions == (ValueError, RuntimeError)
assert middleware.logger is logger
def test_should_retry_true(self):
"""Test retry decision for retryable errors."""
middleware = RetryMiddleware()
assert middleware._should_retry(ConnectionError()) is True
assert middleware._should_retry(TimeoutError()) is True
def test_should_retry_false(self):
"""Test retry decision for non-retryable errors."""
middleware = RetryMiddleware()
assert middleware._should_retry(ValueError()) is False
assert middleware._should_retry(RuntimeError()) is False
def test_calculate_delay(self):
"""Test delay calculation."""
middleware = RetryMiddleware(
base_delay=1.0, backoff_multiplier=2.0, max_delay=10.0
)
assert middleware._calculate_delay(0) == 1.0
assert middleware._calculate_delay(1) == 2.0
assert middleware._calculate_delay(2) == 4.0
assert middleware._calculate_delay(3) == 8.0
assert middleware._calculate_delay(4) == 10.0 # capped at max_delay
async def test_on_request_success_first_try(self, mock_context, mock_call_next):
"""Test successful request on first try."""
middleware = RetryMiddleware()
result = await middleware.on_request(mock_context, mock_call_next)
assert result == "test_result"
assert mock_call_next.call_count == 1
async def test_on_request_success_after_retries(self, mock_context, caplog):
"""Test successful request after retries."""
middleware = RetryMiddleware(base_delay=0.01) # Fast retry for testing
# Fail first two attempts, succeed on third
mock_call_next = AsyncMock(
side_effect=[
ConnectionError("connection failed"),
ConnectionError("connection failed"),
"test_result",
]
)
with caplog.at_level(logging.WARNING):
result = await middleware.on_request(mock_context, mock_call_next)
assert result == "test_result"
assert mock_call_next.call_count == 3
assert "Retrying in" in caplog.text
async def test_on_request_max_retries_exceeded(self, mock_context, caplog):
"""Test request failing after max retries."""
middleware = RetryMiddleware(max_retries=2, base_delay=0.01)
# Fail all attempts
mock_call_next = AsyncMock(side_effect=ConnectionError("connection failed"))
with caplog.at_level(logging.WARNING):
with pytest.raises(ConnectionError):
await middleware.on_request(mock_context, mock_call_next)
assert mock_call_next.call_count == 3 # initial + 2 retries
assert "Retrying in" in caplog.text
async def test_on_request_non_retryable_error(self, mock_context):
"""Test non-retryable error is not retried."""
middleware = RetryMiddleware()
mock_call_next = AsyncMock(side_effect=ValueError("non-retryable"))
with pytest.raises(ValueError):
await middleware.on_request(mock_context, mock_call_next)
assert mock_call_next.call_count == 1 # No retries
@pytest.fixture
def error_handling_server():
"""Create a FastMCP server specifically for error handling middleware tests."""
from fastmcp import FastMCP
mcp = FastMCP("ErrorHandlingTestServer")
@mcp.tool
def reliable_operation(data: str) -> str:
"""A reliable operation that always succeeds."""
return f"Success: {data}"
@mcp.tool
def failing_operation(error_type: str = "value") -> str:
"""An operation that fails with different error types."""
if error_type == "value":
raise ValueError("Value error occurred")
elif error_type == "file":
raise FileNotFoundError("File not found")
elif error_type == "permission":
raise PermissionError("Permission denied")
elif error_type == "timeout":
raise TimeoutError("Operation timed out")
elif error_type == "generic":
raise RuntimeError("Generic runtime error")
else:
return "Operation completed"
@mcp.tool
def intermittent_operation(fail_rate: float = 0.5) -> str:
"""An operation that fails intermittently."""
import random
if random.random() < fail_rate:
raise ConnectionError("Random connection failure")
return "Operation succeeded"
@mcp.tool
def retryable_operation(attempt_count: int = 0) -> str:
"""An operation that succeeds after a few attempts."""
# This is a simple way to simulate retry behavior
# In a real scenario, you might use external state
if attempt_count < 2:
raise ConnectionError("Temporary connection error")
return "Operation succeeded after retries"
return mcp
class TestErrorHandlingMiddlewareIntegration:
"""Integration tests for error handling middleware with real FastMCP server."""
async def test_error_handling_middleware_logs_real_errors(
self, error_handling_server, caplog
):
"""Test that error handling middleware logs real errors from tools."""
from fastmcp.client import Client
error_handling_server.add_middleware(ErrorHandlingMiddleware())
with caplog.at_level(logging.ERROR):
async with Client(error_handling_server) as client:
# Test different types of errors
with pytest.raises(Exception):
await client.call_tool("failing_operation", {"error_type": "value"})
with pytest.raises(Exception):
await client.call_tool("failing_operation", {"error_type": "file"})
log_text = caplog.text
# Should have error logs for both failures
assert "Error in tools/call: ToolError:" in log_text
# Should have captured both error instances
error_count = log_text.count("Error in tools/call:")
assert error_count == 2
async def test_error_handling_middleware_tracks_error_statistics(
self, error_handling_server
):
"""Test that error handling middleware accurately tracks error statistics."""
from fastmcp.client import Client
error_middleware = ErrorHandlingMiddleware()
error_handling_server.add_middleware(error_middleware)
async with Client(error_handling_server) as client:
# Generate different types of errors
for _ in range(3):
with pytest.raises(Exception):
await client.call_tool("failing_operation", {"error_type": "value"})
for _ in range(2):
with pytest.raises(Exception):
await client.call_tool("failing_operation", {"error_type": "file"})
# Try some intermittent operations (some may succeed)
for _ in range(5):
try:
await client.call_tool("intermittent_operation", {"fail_rate": 0.8})
except Exception:
pass # Expected failures
# Check error statistics
stats = error_middleware.get_error_stats()
# Should have tracked the ToolError wrapper
assert "ToolError:tools/call" in stats
assert stats["ToolError:tools/call"] >= 5 # At least the 5 deliberate failures
async def test_error_handling_middleware_with_success_and_failure(
self, error_handling_server, caplog
):
"""Test error handling middleware with mix of successful and failed operations."""
from fastmcp.client import Client
error_handling_server.add_middleware(ErrorHandlingMiddleware())
with caplog.at_level(logging.ERROR):
async with Client(error_handling_server) as client:
# Successful operation (should not generate error logs)
await client.call_tool("reliable_operation", {"data": "test"})
# Failed operation (should generate error log)
with pytest.raises(Exception):
await client.call_tool("failing_operation", {"error_type": "value"})
# Another successful operation
await client.call_tool("reliable_operation", {"data": "test2"})
log_text = caplog.text
# Should only have one error log (for the failed operation)
error_count = log_text.count("Error in tools/call:")
assert error_count == 1
async def test_error_handling_middleware_custom_callback(
self, error_handling_server
):
"""Test error handling middleware with custom error callback."""
from fastmcp.client import Client
captured_errors = []
def error_callback(error, context):
captured_errors.append(
{
"error_type": type(error).__name__,
"message": str(error),
"method": context.method,
}
)
error_handling_server.add_middleware(
ErrorHandlingMiddleware(error_callback=error_callback)
)
async with Client(error_handling_server) as client:
# Generate some errors
with pytest.raises(Exception):
await client.call_tool("failing_operation", {"error_type": "value"})
with pytest.raises(Exception):
await client.call_tool("failing_operation", {"error_type": "timeout"})
# Check that callback was called
assert len(captured_errors) == 2
assert captured_errors[0]["error_type"] == "ToolError"
assert captured_errors[1]["error_type"] == "ToolError"
assert all(error["method"] == "tools/call" for error in captured_errors)
async def test_error_handling_middleware_transform_errors(
self, error_handling_server
):
"""Test error transformation functionality."""
from fastmcp.client import Client
error_handling_server.add_middleware(
ErrorHandlingMiddleware(transform_errors=True)
)
async with Client(error_handling_server) as client:
# All errors should still be raised, but potentially transformed
with pytest.raises(Exception) as exc_info:
await client.call_tool("failing_operation", {"error_type": "value"})
# Error should still exist (may be wrapped by FastMCP)
assert exc_info.value is not None
class TestRetryMiddlewareIntegration:
"""Integration tests for retry middleware with real FastMCP server."""
async def test_retry_middleware_with_transient_failures(
self, error_handling_server, caplog
):
"""Test retry middleware with operations that have transient failures."""
from fastmcp.client import Client
# Configure retry middleware to retry connection errors
error_handling_server.add_middleware(
RetryMiddleware(
max_retries=3,
base_delay=0.01, # Very short delay for testing
retry_exceptions=(ConnectionError,),
)
)
with caplog.at_level(logging.WARNING):
async with Client(error_handling_server) as client:
# This operation fails intermittently - try several times
success_count = 0
for _ in range(5):
try:
await client.call_tool(
"intermittent_operation", {"fail_rate": 0.7}
)
success_count += 1
except Exception:
pass # Some failures expected even with retries
# Should have some retry log messages
# Note: Retry logs might not appear if the underlying errors are wrapped by FastMCP
# The key is that some operations should succeed due to retries
async def test_retry_middleware_with_permanent_failures(
self, error_handling_server
):
"""Test that retry middleware doesn't retry non-retryable errors."""
from fastmcp.client import Client
# Configure retry middleware for connection errors only
error_handling_server.add_middleware(
RetryMiddleware(
max_retries=3, base_delay=0.01, retry_exceptions=(ConnectionError,)
)
)
async with Client(error_handling_server) as client:
# Value errors should not be retried
with pytest.raises(Exception):
await client.call_tool("failing_operation", {"error_type": "value"})
# Should fail immediately without retries
async def test_combined_error_handling_and_retry_middleware(
self, error_handling_server, caplog
):
"""Test error handling and retry middleware working together."""
from fastmcp.client import Client
# Add both middleware
error_handling_server.add_middleware(ErrorHandlingMiddleware())
error_handling_server.add_middleware(
RetryMiddleware(
max_retries=2, base_delay=0.01, retry_exceptions=(ConnectionError,)
)
)
with caplog.at_level(logging.ERROR):
async with Client(error_handling_server) as client:
# Try intermittent operation
try:
await client.call_tool("intermittent_operation", {"fail_rate": 0.9})
except Exception:
pass # May still fail even with retries
# Try permanent failure
with pytest.raises(Exception):
await client.call_tool("failing_operation", {"error_type": "value"})
log_text = caplog.text
# Should have error logs from error handling middleware
assert "Error in tools/call:" in log_text
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/server/middleware/test_error_handling.py",
"license": "Apache License 2.0",
"lines": 495,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:tests/server/middleware/test_logging.py | """Tests for logging middleware."""
import datetime
import logging
from collections.abc import Generator
from typing import Any, Literal, TypeVar
from unittest.mock import AsyncMock, MagicMock, patch
import mcp
import mcp.types
import pytest
from inline_snapshot import snapshot
from pydantic import AnyUrl
from fastmcp import FastMCP
from fastmcp.client import Client
from fastmcp.resources.template import ResourceTemplate
from fastmcp.server.middleware.logging import (
LoggingMiddleware,
StructuredLoggingMiddleware,
)
from fastmcp.server.middleware.middleware import CallNext, MiddlewareContext
FIXED_DATE = datetime.datetime(2023, 1, 1, tzinfo=datetime.timezone.utc)
T = TypeVar("T")
def get_log_lines(
caplog: pytest.LogCaptureFixture, module: str | None = None
) -> list[str]:
"""Get log lines from a caplog fixture."""
return [
record.message
for record in caplog.records
if (module or "logging") in record.name
]
def new_mock_context(
message: T,
method: str | None = None,
source: Literal["server", "client"] | None = None,
type: Literal["request", "notification"] | None = None,
) -> MiddlewareContext[T]:
"""Create a new mock middleware context."""
context = MagicMock(spec=MiddlewareContext[T])
context.method = method or "test_method"
context.source = source or "client"
context.type = type or "request"
context.message = message
context.timestamp = FIXED_DATE
return context
@pytest.fixture(autouse=True)
def mock_duration_ms() -> Generator[float, None]:
"""Mock duration_ms."""
patched = patch(
"fastmcp.server.middleware.logging._get_duration_ms", return_value=0.02
)
patched.start()
yield
patched.stop()
@pytest.fixture
def mock_context():
"""Create a mock middleware context."""
return new_mock_context(
message=mcp.types.CallToolRequest(
method="tools/call",
params=mcp.types.CallToolRequestParams(
name="test_method",
arguments={"param": "value"},
),
)
)
@pytest.fixture
def mock_call_next() -> AsyncMock:
"""Create a mock call_next function."""
return AsyncMock(return_value="test_result")
class TestStructuredLoggingMiddleware:
"""Test structured logging middleware functionality."""
def test_init_default(self):
"""Test default initialization."""
middleware = StructuredLoggingMiddleware()
assert middleware.logger.name == "fastmcp.middleware.structured_logging"
assert middleware.log_level == logging.INFO
assert middleware.include_payloads is False
assert middleware.include_payload_length is False
assert middleware.estimate_payload_tokens is False
assert middleware.structured_logging is True
def test_init_custom(self):
"""Test custom initialization."""
logger = logging.getLogger("custom")
middleware = StructuredLoggingMiddleware(
logger=logger,
log_level=logging.DEBUG,
include_payloads=True,
include_payload_length=False,
estimate_payload_tokens=True,
)
assert middleware.logger is logger
assert middleware.log_level == logging.DEBUG
assert middleware.include_payloads is True
assert middleware.include_payload_length is False
assert middleware.estimate_payload_tokens is True
class TestHelperMethods:
def test_create_before_message(self, mock_context: MiddlewareContext[Any]):
"""Test message formatting without payloads."""
middleware = StructuredLoggingMiddleware()
message = middleware._create_before_message(mock_context)
assert message == snapshot(
{
"event": "request_start",
"source": "client",
"method": "test_method",
}
)
def test_create_message_with_payloads(
self, mock_context: MiddlewareContext[Any]
):
"""Test message formatting with payloads."""
middleware = StructuredLoggingMiddleware(include_payloads=True)
message = middleware._create_before_message(mock_context)
assert message == snapshot(
{
"event": "request_start",
"source": "client",
"method": "test_method",
"payload": '{"method":"tools/call","params":{"task":null,"_meta":null,"name":"test_method","arguments":{"param":"value"}}}',
"payload_type": "CallToolRequest",
}
)
def test_calculate_response_size(self, mock_context: MiddlewareContext[Any]):
"""Test response size calculation."""
middleware = StructuredLoggingMiddleware(include_payload_length=True)
message = middleware._create_before_message(mock_context)
assert message == snapshot(
{
"event": "request_start",
"source": "client",
"method": "test_method",
"payload_length": 110,
}
)
def test_calculate_response_size_with_token_estimation(
self, mock_context: MiddlewareContext[Any]
):
"""Test response size calculation with token estimation."""
middleware = StructuredLoggingMiddleware(
include_payload_length=True, estimate_payload_tokens=True
)
message = middleware._create_before_message(mock_context)
assert message == snapshot(
{
"event": "request_start",
"source": "client",
"method": "test_method",
"payload_tokens": 27,
"payload_length": 110,
}
)
async def test_on_message_success(
self,
mock_context: MiddlewareContext[Any],
caplog: pytest.LogCaptureFixture,
):
"""Test logging successful messages."""
middleware = StructuredLoggingMiddleware()
mock_call_next = AsyncMock(return_value="test_result")
result = await middleware.on_message(mock_context, mock_call_next)
assert result == "test_result"
assert mock_call_next.called
assert get_log_lines(caplog) == snapshot(
[
'{"event": "request_start", "method": "test_method", "source": "client"}',
'{"event": "request_success", "method": "test_method", "source": "client", "duration_ms": 0.02}',
]
)
async def test_on_message_failure(
self, mock_context: MiddlewareContext[Any], caplog: pytest.LogCaptureFixture
):
"""Test logging failed messages."""
middleware = StructuredLoggingMiddleware()
mock_call_next = AsyncMock(side_effect=ValueError("test error"))
with pytest.raises(ValueError):
await middleware.on_message(mock_context, mock_call_next)
assert get_log_lines(caplog) == snapshot(
[
'{"event": "request_start", "method": "test_method", "source": "client"}',
'{"event": "request_error", "method": "test_method", "source": "client", "duration_ms": 0.02, "error": "test error"}',
]
)
class TestLoggingMiddleware:
"""Test structured logging middleware functionality."""
def test_init_default(self):
"""Test default initialization."""
middleware = LoggingMiddleware()
assert middleware.logger.name == "fastmcp.middleware.logging"
assert middleware.log_level == logging.INFO
assert middleware.include_payloads is False
assert middleware.include_payload_length is False
assert middleware.estimate_payload_tokens is False
def test_format_message(self, mock_context: MiddlewareContext[Any]):
"""Test message formatting."""
middleware = LoggingMiddleware()
message = middleware._create_before_message(mock_context)
formatted = middleware._format_message(message)
assert formatted == snapshot(
"event=request_start method=test_method source=client"
)
def test_create_before_message_long_payload(
self, mock_context: MiddlewareContext[Any]
):
"""Test message formatting with long payload truncation."""
middleware = LoggingMiddleware(include_payloads=True, max_payload_length=10)
message = middleware._create_before_message(mock_context)
formatted = middleware._format_message(message)
assert formatted == snapshot(
'event=request_start method=test_method source=client payload={"method":... payload_type=CallToolRequest'
)
async def test_on_message_failure(
self, mock_context: MiddlewareContext[Any], caplog: pytest.LogCaptureFixture
):
"""Test structured logging of failed messages."""
middleware = StructuredLoggingMiddleware()
mock_call_next = AsyncMock(side_effect=ValueError("test error"))
with pytest.raises(ValueError):
await middleware.on_message(mock_context, mock_call_next)
# Check that we have structured JSON logs
assert get_log_lines(caplog) == snapshot(
[
'{"event": "request_start", "method": "test_method", "source": "client"}',
'{"event": "request_error", "method": "test_method", "source": "client", "duration_ms": 0.02, "error": "test error"}',
]
)
async def test_on_message_with_pydantic_types_in_payload(
self,
mock_call_next: CallNext[Any, Any],
caplog: pytest.LogCaptureFixture,
):
"""Ensure Pydantic AnyUrl in payload serializes correctly when include_payloads=True."""
mock_context = new_mock_context(
message=mcp.types.ReadResourceRequest(
method="resources/read",
params=mcp.types.ReadResourceRequestParams(
uri=AnyUrl("test://example/1"),
),
)
)
middleware = StructuredLoggingMiddleware(include_payloads=True)
result = await middleware.on_message(mock_context, mock_call_next)
assert result == "test_result"
assert get_log_lines(caplog) == snapshot(
[
'{"event": "request_start", "method": "test_method", "source": "client", "payload": "{\\"method\\":\\"resources/read\\",\\"params\\":{\\"task\\":null,\\"_meta\\":null,\\"uri\\":\\"test://example/1\\"}}", "payload_type": "ReadResourceRequest"}',
'{"event": "request_success", "method": "test_method", "source": "client", "duration_ms": 0.02}',
]
)
async def test_on_message_with_resource_template_in_payload(
self,
mock_call_next: CallNext[Any, Any],
caplog: pytest.LogCaptureFixture,
):
"""Ensure ResourceTemplate in payload serializes via pydantic conversion without errors."""
mock_context = new_mock_context(
message=ResourceTemplate(
name="tmpl",
uri_template="tmpl://{id}",
parameters={"id": {"type": "string"}},
)
)
middleware = StructuredLoggingMiddleware(include_payloads=True)
result = await middleware.on_message(mock_context, mock_call_next)
assert result == "test_result"
assert get_log_lines(caplog) == snapshot(
[
'{"event": "request_start", "method": "test_method", "source": "client", "payload": "{\\"name\\":\\"tmpl\\",\\"version\\":null,\\"title\\":null,\\"description\\":null,\\"icons\\":null,\\"tags\\":[],\\"meta\\":null,\\"task_config\\":{\\"mode\\":\\"forbidden\\",\\"poll_interval\\":\\"PT5S\\"},\\"uri_template\\":\\"tmpl://{id}\\",\\"mime_type\\":\\"text/plain\\",\\"parameters\\":{\\"id\\":{\\"type\\":\\"string\\"}},\\"annotations\\":null}", "payload_type": "ResourceTemplate"}',
'{"event": "request_success", "method": "test_method", "source": "client", "duration_ms": 0.02}',
]
)
async def test_on_message_with_nonserializable_payload_falls_back_to_str(
self, mock_call_next: CallNext[Any, Any], caplog: pytest.LogCaptureFixture
):
"""Ensure non-JSONable objects fall back to string serialization in payload."""
class NonSerializable:
def __str__(self) -> str:
return "NON_SERIALIZABLE"
mock_context = new_mock_context(
message=mcp.types.CallToolRequest(
method="tools/call",
params=mcp.types.CallToolRequestParams(
name="test_method",
arguments={"obj": NonSerializable()},
),
)
)
middleware = StructuredLoggingMiddleware(include_payloads=True)
result = await middleware.on_message(mock_context, mock_call_next)
assert result == "test_result"
assert get_log_lines(caplog) == snapshot(
[
'{"event": "request_start", "method": "test_method", "source": "client", "payload": "{\\"method\\":\\"tools/call\\",\\"params\\":{\\"task\\":null,\\"_meta\\":null,\\"name\\":\\"test_method\\",\\"arguments\\":{\\"obj\\":\\"NON_SERIALIZABLE\\"}}}", "payload_type": "CallToolRequest"}',
'{"event": "request_success", "method": "test_method", "source": "client", "duration_ms": 0.02}',
]
)
async def test_on_message_with_custom_serializer_applied(
self, mock_call_next: CallNext[Any, Any], caplog: pytest.LogCaptureFixture
):
"""Ensure a custom serializer is used for non-JSONable payloads."""
# Provide a serializer that replaces entire payload with a fixed string
def custom_serializer(_: Any) -> str:
return "CUSTOM_PAYLOAD"
mock_context = new_mock_context(
message=mcp.types.CallToolRequest(
method="tools/call",
params=mcp.types.CallToolRequestParams(
name="test_method",
arguments={"obj": "OBJECT"},
),
)
)
middleware = StructuredLoggingMiddleware(
include_payloads=True, payload_serializer=custom_serializer
)
result = await middleware.on_message(mock_context, mock_call_next)
assert result == "test_result"
assert get_log_lines(caplog) == snapshot(
[
'{"event": "request_start", "method": "test_method", "source": "client", "payload": "CUSTOM_PAYLOAD", "payload_type": "CallToolRequest"}',
'{"event": "request_success", "method": "test_method", "source": "client", "duration_ms": 0.02}',
]
)
@pytest.fixture
def logging_server():
"""Create a FastMCP server specifically for logging middleware tests."""
from fastmcp import FastMCP
mcp = FastMCP("LoggingTestServer")
@mcp.tool
def simple_operation(data: str) -> str:
"""A simple operation for testing logging."""
return f"Processed: {data}"
@mcp.tool
def complex_operation(items: list[str], mode: str = "default") -> dict:
"""A complex operation with structured data."""
return {"processed_items": len(items), "mode": mode, "result": "success"}
@mcp.tool
def operation_with_error(should_fail: bool = False) -> str:
"""An operation that can be made to fail."""
if should_fail:
raise ValueError("Operation failed intentionally")
return "Operation completed successfully"
@mcp.resource("log://test")
def test_resource() -> str:
"""A test resource for logging."""
return "Test resource content"
@mcp.prompt
def test_prompt() -> str:
"""A test prompt for logging."""
return "Test prompt content"
return mcp
class TestLoggingMiddlewareIntegration:
"""Integration tests for logging middleware with real FastMCP server."""
@pytest.fixture
def logging_server(self):
"""Create a FastMCP server specifically for logging middleware tests."""
mcp = FastMCP("LoggingTestServer")
@mcp.tool
def simple_operation(data: str) -> str:
"""A simple operation for testing logging."""
return f"Processed: {data}"
@mcp.tool
def complex_operation(items: list[str], mode: str = "default") -> dict:
"""A complex operation with structured data."""
return {"processed_items": len(items), "mode": mode, "result": "success"}
@mcp.tool
def operation_with_error(should_fail: bool = False) -> str:
"""An operation that can be made to fail."""
if should_fail:
raise ValueError("Operation failed intentionally")
return "Operation completed successfully"
@mcp.resource("log://test")
def test_resource() -> str:
"""A test resource for logging."""
return "Test resource content"
@mcp.prompt
def test_prompt() -> str:
"""A test prompt for logging."""
return "Test prompt content"
return mcp
async def test_logging_middleware_logs_successful_operations(
self, logging_server: FastMCP, caplog: pytest.LogCaptureFixture
):
"""Test that logging middleware captures successful operations."""
logging_middleware = LoggingMiddleware(methods=["tools/call"])
logging_server.add_middleware(logging_middleware)
with caplog.at_level(logging.INFO):
async with Client(logging_server) as client:
await client.call_tool(
name="simple_operation", arguments={"data": "test_data"}
)
await client.call_tool(
name="complex_operation",
arguments={"items": ["a", "b", "c"], "mode": "batch"},
)
# Should have processing and completion logs for both operations
assert get_log_lines(caplog) == snapshot(
[
"event=request_start method=tools/call source=client",
"event=request_success method=tools/call source=client duration_ms=0.02",
"event=request_start method=tools/call source=client",
"event=request_success method=tools/call source=client duration_ms=0.02",
]
)
async def test_logging_middleware_logs_failures(
self, logging_server: FastMCP, caplog: pytest.LogCaptureFixture
):
"""Test that logging middleware captures failed operations."""
logging_server.add_middleware(LoggingMiddleware(methods=["tools/call"]))
async with Client(logging_server) as client:
# This should fail and be logged
with pytest.raises(Exception):
await client.call_tool("operation_with_error", {"should_fail": True})
log_text = caplog.text
# Should have processing and failure logs
assert log_text.splitlines()[-1] == snapshot(
"ERROR fastmcp.middleware.logging:logging.py:122 event=request_error method=tools/call source=client duration_ms=0.02 error=Error calling tool 'operation_with_error': Operation failed intentionally"
)
async def test_logging_middleware_with_payloads(
self, logging_server: FastMCP, caplog: pytest.LogCaptureFixture
):
"""Test logging middleware when configured to include payloads."""
middleware = LoggingMiddleware(
include_payloads=True, max_payload_length=500, methods=["tools/call"]
)
logging_server.add_middleware(middleware)
async with Client(logging_server) as client:
await client.call_tool("simple_operation", {"data": "payload_test"})
assert get_log_lines(caplog) == snapshot(
[
'event=request_start method=tools/call source=client payload={"task":null,"_meta":null,"name":"simple_operation","arguments":{"data":"payload_test"}} payload_type=CallToolRequestParams',
"event=request_success method=tools/call source=client duration_ms=0.02",
]
)
async def test_structured_logging_middleware_produces_json(
self, logging_server: FastMCP, caplog: pytest.LogCaptureFixture
):
"""Test that structured logging middleware produces parseable JSON logs."""
logging_middleware = StructuredLoggingMiddleware(
include_payloads=True, methods=["tools/call"]
)
logging_server.add_middleware(logging_middleware)
async with Client(logging_server) as client:
await client.call_tool(
name="simple_operation", arguments={"data": "json_test"}
)
assert get_log_lines(caplog) == snapshot(
[
'{"event": "request_start", "method": "tools/call", "source": "client", "payload": "{\\"task\\":null,\\"_meta\\":null,\\"name\\":\\"simple_operation\\",\\"arguments\\":{\\"data\\":\\"json_test\\"}}", "payload_type": "CallToolRequestParams"}',
'{"event": "request_success", "method": "tools/call", "source": "client", "duration_ms": 0.02}',
]
)
async def test_structured_logging_middleware_handles_errors(
self, logging_server: FastMCP, caplog: pytest.LogCaptureFixture
):
"""Test structured logging of errors with JSON format."""
logging_middleware = StructuredLoggingMiddleware(methods=["tools/call"])
logging_server.add_middleware(logging_middleware)
with caplog.at_level(logging.INFO):
async with Client(logging_server) as client:
with pytest.raises(Exception):
await client.call_tool(
"operation_with_error", {"should_fail": True}
)
assert get_log_lines(caplog) == snapshot(
[
'{"event": "request_start", "method": "tools/call", "source": "client"}',
'{"event": "request_error", "method": "tools/call", "source": "client", "duration_ms": 0.02, "error": "Error calling tool \'operation_with_error\': Operation failed intentionally"}',
]
)
async def test_logging_middleware_with_different_operations(
self, logging_server: FastMCP, caplog: pytest.LogCaptureFixture
):
"""Test logging middleware with various MCP operations."""
logging_server.add_middleware(
LoggingMiddleware(
methods=[
"tools/call",
"resources/list",
"prompts/get",
"resources/read",
]
)
)
async with Client(logging_server) as client:
# Test different operation types
await client.call_tool("simple_operation", {"data": "test"})
await client.read_resource("log://test")
await client.get_prompt("test_prompt")
await client.list_resources()
assert get_log_lines(caplog) == snapshot(
[
"event=request_start method=tools/call source=client",
"event=request_success method=tools/call source=client duration_ms=0.02",
"event=request_start method=resources/read source=client",
"event=request_success method=resources/read source=client duration_ms=0.02",
"event=request_start method=prompts/get source=client",
"event=request_success method=prompts/get source=client duration_ms=0.02",
"event=request_start method=resources/list source=client",
"event=request_success method=resources/list source=client duration_ms=0.02",
]
)
async def test_logging_middleware_custom_configuration(
self, logging_server: FastMCP
):
"""Test logging middleware with custom logger configuration."""
import io
import logging
# Create custom logger
log_buffer = io.StringIO()
handler = logging.StreamHandler(log_buffer)
custom_logger = logging.getLogger("custom_logging_test")
custom_logger.addHandler(handler)
custom_logger.setLevel(logging.DEBUG)
logging_server.add_middleware(
LoggingMiddleware(
logger=custom_logger,
log_level=logging.DEBUG,
include_payloads=True,
methods=["tools/call"],
)
)
async with Client(logging_server) as client:
await client.call_tool("simple_operation", {"data": "custom_test"})
# Check that our custom logger captured the logs
log_output = log_buffer.getvalue()
assert log_output == snapshot("""\
event=request_start method=tools/call source=client payload={"task":null,"_meta":null,"name":"simple_operation","arguments":{"data":"custom_test"}} payload_type=CallToolRequestParams
event=request_success method=tools/call source=client duration_ms=0.02
""")
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/server/middleware/test_logging.py",
"license": "Apache License 2.0",
"lines": 529,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:tests/server/middleware/test_rate_limiting.py | """Tests for rate limiting middleware."""
import asyncio
from unittest.mock import AsyncMock, MagicMock
import pytest
from fastmcp import FastMCP
from fastmcp.client import Client
from fastmcp.exceptions import ToolError
from fastmcp.server.middleware.middleware import MiddlewareContext
from fastmcp.server.middleware.rate_limiting import (
RateLimitError,
RateLimitingMiddleware,
SlidingWindowRateLimiter,
SlidingWindowRateLimitingMiddleware,
TokenBucketRateLimiter,
)
@pytest.fixture
def mock_context():
"""Create a mock middleware context."""
context = MagicMock(spec=MiddlewareContext)
context.method = "test_method"
return context
@pytest.fixture
def mock_call_next():
"""Create a mock call_next function."""
return AsyncMock(return_value="test_result")
class TestTokenBucketRateLimiter:
"""Test token bucket rate limiter."""
def test_init(self):
"""Test initialization."""
limiter = TokenBucketRateLimiter(capacity=10, refill_rate=5.0)
assert limiter.capacity == 10
assert limiter.refill_rate == 5.0
assert limiter.tokens == 10
async def test_consume_success(self):
"""Test successful token consumption."""
limiter = TokenBucketRateLimiter(capacity=10, refill_rate=5.0)
# Should be able to consume tokens initially
assert await limiter.consume(5) is True
assert await limiter.consume(3) is True
async def test_consume_failure(self):
"""Test failed token consumption."""
limiter = TokenBucketRateLimiter(capacity=5, refill_rate=1.0)
# Consume all tokens
assert await limiter.consume(5) is True
# Should fail to consume more
assert await limiter.consume(1) is False
async def test_refill(self):
"""Test token refill over time."""
limiter = TokenBucketRateLimiter(
capacity=10, refill_rate=10.0
) # 10 tokens per second
# Consume all tokens
assert await limiter.consume(10) is True
assert await limiter.consume(1) is False
# Wait for refill (0.2 seconds = 2 tokens at 10/sec)
await asyncio.sleep(0.2)
assert await limiter.consume(2) is True
class TestSlidingWindowRateLimiter:
"""Test sliding window rate limiter."""
def test_init(self):
"""Test initialization."""
limiter = SlidingWindowRateLimiter(max_requests=10, window_seconds=60)
assert limiter.max_requests == 10
assert limiter.window_seconds == 60
assert len(limiter.requests) == 0
async def test_is_allowed_success(self):
"""Test allowing requests within limit."""
limiter = SlidingWindowRateLimiter(max_requests=3, window_seconds=60)
# Should allow requests up to the limit
assert await limiter.is_allowed() is True
assert await limiter.is_allowed() is True
assert await limiter.is_allowed() is True
async def test_is_allowed_failure(self):
"""Test rejecting requests over limit."""
limiter = SlidingWindowRateLimiter(max_requests=2, window_seconds=60)
# Should allow up to limit
assert await limiter.is_allowed() is True
assert await limiter.is_allowed() is True
# Should reject over limit
assert await limiter.is_allowed() is False
async def test_sliding_window(self):
"""Test sliding window behavior."""
limiter = SlidingWindowRateLimiter(max_requests=2, window_seconds=1)
# Use up requests
assert await limiter.is_allowed() is True
assert await limiter.is_allowed() is True
assert await limiter.is_allowed() is False
# Wait for window to pass
await asyncio.sleep(1.1)
# Should be able to make requests again
assert await limiter.is_allowed() is True
class TestRateLimitingMiddleware:
"""Test rate limiting middleware."""
def test_init_default(self):
"""Test default initialization."""
middleware = RateLimitingMiddleware()
assert middleware.max_requests_per_second == 10.0
assert middleware.burst_capacity == 20
assert middleware.get_client_id is None
assert middleware.global_limit is False
def test_init_custom(self):
"""Test custom initialization."""
def get_client_id(ctx):
return "test_client"
middleware = RateLimitingMiddleware(
max_requests_per_second=5.0,
burst_capacity=10,
get_client_id=get_client_id,
global_limit=True,
)
assert middleware.max_requests_per_second == 5.0
assert middleware.burst_capacity == 10
assert middleware.get_client_id is get_client_id
assert middleware.global_limit is True
def test_get_client_identifier_default(self, mock_context):
"""Test default client identifier."""
middleware = RateLimitingMiddleware()
assert middleware._get_client_identifier(mock_context) == "global"
def test_get_client_identifier_custom(self, mock_context):
"""Test custom client identifier."""
def get_client_id(ctx):
return "custom_client"
middleware = RateLimitingMiddleware(get_client_id=get_client_id)
assert middleware._get_client_identifier(mock_context) == "custom_client"
async def test_on_request_success(self, mock_context, mock_call_next):
"""Test successful request within rate limit."""
middleware = RateLimitingMiddleware(max_requests_per_second=100.0) # High limit
result = await middleware.on_request(mock_context, mock_call_next)
assert result == "test_result"
assert mock_call_next.called
async def test_on_request_rate_limited(self, mock_context, mock_call_next):
"""Test request rejection due to rate limiting."""
middleware = RateLimitingMiddleware(
max_requests_per_second=1.0, burst_capacity=1
)
# First request should succeed
await middleware.on_request(mock_context, mock_call_next)
# Second request should be rate limited
with pytest.raises(RateLimitError, match="Rate limit exceeded"):
await middleware.on_request(mock_context, mock_call_next)
async def test_global_rate_limiting(self, mock_context, mock_call_next):
"""Test global rate limiting."""
middleware = RateLimitingMiddleware(
max_requests_per_second=1.0, burst_capacity=1, global_limit=True
)
# First request should succeed
await middleware.on_request(mock_context, mock_call_next)
# Second request should be rate limited
with pytest.raises(RateLimitError, match="Global rate limit exceeded"):
await middleware.on_request(mock_context, mock_call_next)
class TestSlidingWindowRateLimitingMiddleware:
"""Test sliding window rate limiting middleware."""
def test_init_default(self):
"""Test default initialization."""
middleware = SlidingWindowRateLimitingMiddleware(max_requests=100)
assert middleware.max_requests == 100
assert middleware.window_seconds == 60
assert middleware.get_client_id is None
def test_init_custom(self):
"""Test custom initialization."""
def get_client_id(ctx):
return "test_client"
middleware = SlidingWindowRateLimitingMiddleware(
max_requests=50, window_minutes=5, get_client_id=get_client_id
)
assert middleware.max_requests == 50
assert middleware.window_seconds == 300 # 5 minutes
assert middleware.get_client_id is get_client_id
async def test_on_request_success(self, mock_context, mock_call_next):
"""Test successful request within rate limit."""
middleware = SlidingWindowRateLimitingMiddleware(max_requests=100)
result = await middleware.on_request(mock_context, mock_call_next)
assert result == "test_result"
assert mock_call_next.called
async def test_on_request_rate_limited(self, mock_context, mock_call_next):
"""Test request rejection due to rate limiting."""
middleware = SlidingWindowRateLimitingMiddleware(max_requests=1)
# First request should succeed
await middleware.on_request(mock_context, mock_call_next)
# Second request should be rate limited
with pytest.raises(RateLimitError, match="Rate limit exceeded"):
await middleware.on_request(mock_context, mock_call_next)
class TestRateLimitError:
"""Test rate limit error."""
def test_init_default(self):
"""Test default initialization."""
error = RateLimitError()
assert error.error.code == -32000
assert error.error.message == "Rate limit exceeded"
def test_init_custom(self):
"""Test custom initialization."""
error = RateLimitError("Custom message")
assert error.error.code == -32000
assert error.error.message == "Custom message"
@pytest.fixture
def rate_limit_server():
"""Create a FastMCP server specifically for rate limiting tests."""
mcp = FastMCP("RateLimitTestServer")
@mcp.tool
def quick_action(message: str) -> str:
"""A quick action for testing rate limits."""
return f"Processed: {message}"
@mcp.tool
def batch_process(items: list[str]) -> str:
"""Process multiple items."""
return f"Processed {len(items)} items"
@mcp.tool
def heavy_computation() -> str:
"""A heavy computation that might need rate limiting."""
# Simulate some work
import time
time.sleep(0.01) # Very short delay
return "Heavy computation complete"
return mcp
class TestRateLimitingMiddlewareIntegration:
"""Integration tests for rate limiting middleware with real FastMCP server."""
async def test_rate_limiting_allows_normal_usage(self, rate_limit_server):
"""Test that normal usage patterns are allowed through rate limiting."""
# Generous rate limit
rate_limit_server.add_middleware(
RateLimitingMiddleware(max_requests_per_second=50.0, burst_capacity=10)
)
async with Client(rate_limit_server) as client:
# Normal usage should be fine
for i in range(5):
result = await client.call_tool(
"quick_action", {"message": f"task_{i}"}
)
assert f"Processed: task_{i}" in str(result)
async def test_rate_limiting_blocks_rapid_requests(self, rate_limit_server):
"""Test that rate limiting blocks rapid successive requests."""
# Use a generous burst but near-zero refill rate so tokens never
# replenish. The MCP SDK sends internal messages (initialize,
# notifications, list_tools for validation) whose exact count
# varies, so we give enough burst for init + several calls, then
# keep firing until we hit the limit.
rate_limit_server.add_middleware(
RateLimitingMiddleware(max_requests_per_second=0.001, burst_capacity=20)
)
async with Client(rate_limit_server) as client:
# Fire enough calls to exhaust the burst. With near-zero
# refill, we must eventually hit the limit.
hit_limit = False
for i in range(30):
try:
await client.call_tool("quick_action", {"message": str(i)})
except ToolError as exc:
assert "Rate limit exceeded" in str(exc)
hit_limit = True
break
assert hit_limit, "Rate limit was never triggered"
async def test_rate_limiting_with_concurrent_requests(self, rate_limit_server):
"""Test rate limiting behavior with concurrent requests."""
rate_limit_server.add_middleware(
RateLimitingMiddleware(max_requests_per_second=15.0, burst_capacity=8)
)
async with Client(rate_limit_server) as client:
# Fire off many concurrent requests
tasks = []
for i in range(8):
task = asyncio.create_task(
client.call_tool("quick_action", {"message": f"concurrent_{i}"})
)
tasks.append(task)
# Gather results, allowing exceptions
results = await asyncio.gather(*tasks, return_exceptions=True)
# With extra list_tools calls, the exact behavior is unpredictable
# Just verify that rate limiting is working (not all succeed)
successes = [r for r in results if not isinstance(r, Exception)]
failures = [r for r in results if isinstance(r, Exception)]
total_results = len(successes) + len(failures)
assert total_results == 8, f"Expected 8 results, got {total_results}"
# With the unpredictable list_tools calls, we just verify that the system
# is working (all requests should either succeed or fail with some exception)
assert 0 <= len(successes) <= 8, "Should have between 0-8 successes"
assert 0 <= len(failures) <= 8, "Should have between 0-8 failures"
async def test_sliding_window_rate_limiting(self, rate_limit_server):
"""Test sliding window rate limiting implementation."""
rate_limit_server.add_middleware(
SlidingWindowRateLimitingMiddleware(
max_requests=6, # 1 init + 1 list_tools + 3 calls + 1 to fail
window_minutes=1, # 1-minute window
)
)
async with Client(rate_limit_server) as client:
# Should allow up to the limit
await client.call_tool("quick_action", {"message": "1"})
await client.call_tool("quick_action", {"message": "2"})
await client.call_tool("quick_action", {"message": "3"})
# Fourth should be blocked
with pytest.raises(ToolError, match="Rate limit exceeded"):
await client.call_tool("quick_action", {"message": "4"})
async def test_rate_limiting_with_different_operations(self, rate_limit_server):
"""Test that rate limiting applies to all types of operations."""
rate_limit_server.add_middleware(
RateLimitingMiddleware(max_requests_per_second=9.0, burst_capacity=5)
)
async with Client(rate_limit_server) as client:
# Mix different operations
await client.call_tool("quick_action", {"message": "test"})
await client.call_tool("heavy_computation")
# Should be rate limited regardless of operation type
with pytest.raises(ToolError, match="Rate limit exceeded"):
await client.call_tool("batch_process", {"items": ["a", "b", "c"]})
async def test_custom_client_identification(self, rate_limit_server):
"""Test rate limiting with custom client identification."""
def get_client_id(context):
# In a real scenario, this might extract from headers or context
return "test_client_123"
rate_limit_server.add_middleware(
RateLimitingMiddleware(
max_requests_per_second=1.0, # Very slow refill to ensure rate limiting triggers
burst_capacity=4, # init + list_tools + call + list_tools = 4, so 2nd call fails
get_client_id=get_client_id,
)
)
async with Client(rate_limit_server) as client:
# First request should succeed
await client.call_tool("quick_action", {"message": "first"})
# Second should be rate limited for this specific client
with pytest.raises(ToolError) as exc_info:
await client.call_tool("quick_action", {"message": "second"})
assert "Rate limit exceeded for client: test_client_123" in str(
exc_info.value
)
async def test_global_rate_limiting(self, rate_limit_server):
"""Test global rate limiting across all clients."""
rate_limit_server.add_middleware(
RateLimitingMiddleware(
max_requests_per_second=6.0,
burst_capacity=5, # 1 init + 2 list_tools + 2 calls before limit
global_limit=True, # Accounting for initialization and list_tools calls
)
)
async with Client(rate_limit_server) as client:
# Use up the global capacity
await client.call_tool("quick_action", {"message": "1"})
await client.call_tool("quick_action", {"message": "2"})
# Should be globally rate limited
with pytest.raises(ToolError, match="Global rate limit exceeded"):
await client.call_tool("quick_action", {"message": "3"})
async def test_rate_limiting_recovery_over_time(self, rate_limit_server):
"""Test that rate limiting allows requests again after time passes."""
rate_limit_server.add_middleware(
RateLimitingMiddleware(
max_requests_per_second=10.0, # 10 per second = 1 every 100ms
burst_capacity=4,
)
)
async with Client(rate_limit_server) as client:
# Use up capacity
await client.call_tool("quick_action", {"message": "first"})
# Should be rate limited immediately
with pytest.raises(ToolError):
await client.call_tool("quick_action", {"message": "second"})
# Wait for token bucket to refill (150ms should be enough for ~1.5 tokens)
await asyncio.sleep(0.15)
# Should be able to make another request
result = await client.call_tool("quick_action", {"message": "after_wait"})
assert "after_wait" in str(result)
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/server/middleware/test_rate_limiting.py",
"license": "Apache License 2.0",
"lines": 360,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:tests/server/middleware/test_timing.py | """Tests for timing middleware."""
import asyncio
import logging
import time
from unittest.mock import AsyncMock, MagicMock
import pytest
from fastmcp import FastMCP
from fastmcp.client import Client
from fastmcp.server.middleware.middleware import MiddlewareContext
from fastmcp.server.middleware.timing import DetailedTimingMiddleware, TimingMiddleware
@pytest.fixture
def mock_context():
"""Create a mock middleware context."""
context = MagicMock(spec=MiddlewareContext)
context.method = "test_method"
return context
@pytest.fixture
def mock_call_next():
"""Create a mock call_next function."""
return AsyncMock(return_value="test_result")
class TestTimingMiddleware:
"""Test timing middleware functionality."""
def test_init_default(self):
"""Test default initialization."""
middleware = TimingMiddleware()
assert middleware.logger.name == "fastmcp.timing"
assert middleware.log_level == logging.INFO
def test_init_custom(self):
"""Test custom initialization."""
logger = logging.getLogger("custom")
middleware = TimingMiddleware(logger=logger, log_level=logging.DEBUG)
assert middleware.logger is logger
assert middleware.log_level == logging.DEBUG
async def test_on_request_success(self, mock_context, mock_call_next, caplog):
"""Test timing successful requests."""
middleware = TimingMiddleware()
result = await middleware.on_request(mock_context, mock_call_next)
assert result == "test_result"
assert mock_call_next.called
assert "Request test_method completed in" in caplog.text
assert "ms" in caplog.text
async def test_on_request_failure(self, mock_context, caplog):
"""Test timing failed requests."""
middleware = TimingMiddleware()
mock_call_next = AsyncMock(side_effect=ValueError("test error"))
with pytest.raises(ValueError):
await middleware.on_request(mock_context, mock_call_next)
assert "Request test_method failed after" in caplog.text
assert "ms: test error" in caplog.text
class TestDetailedTimingMiddleware:
"""Test detailed timing middleware functionality."""
def test_init_default(self):
"""Test default initialization."""
middleware = DetailedTimingMiddleware()
assert middleware.logger.name == "fastmcp.timing.detailed"
assert middleware.log_level == logging.INFO
async def test_on_call_tool(self, caplog):
"""Test timing tool calls."""
middleware = DetailedTimingMiddleware()
context = MagicMock()
context.message.name = "test_tool"
mock_call_next = AsyncMock(return_value="tool_result")
result = await middleware.on_call_tool(context, mock_call_next)
assert result == "tool_result"
assert "Tool 'test_tool' completed in" in caplog.text
async def test_on_read_resource(self, caplog):
"""Test timing resource reads."""
middleware = DetailedTimingMiddleware()
context = MagicMock()
context.message.uri = "test://resource"
mock_call_next = AsyncMock(return_value="resource_result")
result = await middleware.on_read_resource(context, mock_call_next)
assert result == "resource_result"
assert "Resource 'test://resource' completed in" in caplog.text
async def test_on_get_prompt(self, caplog):
"""Test timing prompt retrieval."""
middleware = DetailedTimingMiddleware()
context = MagicMock()
context.message.name = "test_prompt"
mock_call_next = AsyncMock(return_value="prompt_result")
result = await middleware.on_get_prompt(context, mock_call_next)
assert result == "prompt_result"
assert "Prompt 'test_prompt' completed in" in caplog.text
async def test_on_list_tools(self, caplog):
"""Test timing tool listing."""
middleware = DetailedTimingMiddleware()
context = MagicMock()
mock_call_next = AsyncMock(return_value="tools_result")
result = await middleware.on_list_tools(context, mock_call_next)
assert result == "tools_result"
assert "List tools completed in" in caplog.text
async def test_operation_failure(self, caplog):
"""Test timing failed operations."""
middleware = DetailedTimingMiddleware()
context = MagicMock()
context.message.name = "failing_tool"
mock_call_next = AsyncMock(side_effect=RuntimeError("operation failed"))
with pytest.raises(RuntimeError):
await middleware.on_call_tool(context, mock_call_next)
assert "Tool 'failing_tool' failed after" in caplog.text
assert "ms: operation failed" in caplog.text
@pytest.fixture
def timing_server():
"""Create a FastMCP server specifically for timing middleware tests."""
mcp = FastMCP("TimingTestServer")
@mcp.tool
def instant_task() -> str:
"""A task that completes instantly."""
return "Done instantly"
@mcp.tool
def short_task() -> str:
"""A task that takes 0.01 seconds."""
time.sleep(0.01)
return "Done after 0.01 seconds"
@mcp.tool
def medium_task() -> str:
"""A task that takes 0.02 seconds."""
time.sleep(0.02)
return "Done after 0.02 seconds"
@mcp.tool
def failing_task() -> str:
"""A task that always fails."""
raise ValueError("Task failed as expected")
@mcp.resource("timer://test")
def test_resource() -> str:
"""A resource that takes time to read."""
time.sleep(0.005)
return "Resource content after 0.005 seconds"
@mcp.prompt
def test_prompt() -> str:
"""A prompt that takes time to generate."""
time.sleep(0.008)
return "Prompt content after 0.008 seconds"
return mcp
class TestTimingMiddlewareIntegration:
"""Integration tests for timing middleware with real FastMCP server."""
async def test_timing_middleware_measures_tool_execution(
self, timing_server, caplog
):
"""Test that timing middleware accurately measures tool execution times."""
timing_server.add_middleware(TimingMiddleware())
async with Client(timing_server) as client:
# Test instant task
await client.call_tool("instant_task")
# Test short task (0.1s)
await client.call_tool("short_task")
# Test medium task (0.15s)
await client.call_tool("medium_task")
log_text = caplog.text
# Should have timing logs for all three calls (plus any extra list_tools calls)
timing_logs = [
line
for line in log_text.split("\n")
if "completed in" in line and "ms" in line
]
assert (
len(timing_logs) >= 3
) # At least 3 tool calls, may have additional list_tools calls
# Verify that longer tasks show longer timing (roughly)
assert "tools/call completed in" in log_text
assert "ms" in log_text
async def test_timing_middleware_handles_failures(self, timing_server, caplog):
"""Test that timing middleware measures time even for failed operations."""
timing_server.add_middleware(TimingMiddleware())
async with Client(timing_server) as client:
# This should fail but still be timed
with pytest.raises(Exception):
await client.call_tool("failing_task")
# Should log the failure with timing
assert "tools/call failed after" in caplog.text
assert "ms:" in caplog.text
async def test_detailed_timing_middleware_per_operation(
self, timing_server, caplog
):
"""Test that detailed timing middleware provides operation-specific timing."""
timing_server.add_middleware(DetailedTimingMiddleware())
async with Client(timing_server) as client:
# Test tool call
await client.call_tool("short_task")
# Test resource read
await client.read_resource("timer://test")
# Test prompt
await client.get_prompt("test_prompt")
# Test listing operations
await client.list_tools()
await client.list_resources()
await client.list_prompts()
log_text = caplog.text
# Should have specific timing logs for each operation type
assert "Tool 'short_task' completed in" in log_text
assert "Resource 'timer://test' completed in" in log_text
assert "Prompt 'test_prompt' completed in" in log_text
assert "List tools completed in" in log_text
assert "List resources completed in" in log_text
assert "List prompts completed in" in log_text
async def test_timing_middleware_concurrent_operations(self, timing_server, caplog):
"""Test timing middleware with concurrent operations."""
timing_server.add_middleware(TimingMiddleware())
async with Client(timing_server) as client:
# Run multiple operations concurrently
tasks = [
client.call_tool("instant_task"),
client.call_tool("short_task"),
client.call_tool("instant_task"),
]
await asyncio.gather(*tasks)
log_text = caplog.text
# Should have timing logs for all concurrent operations (including extra list_tools calls)
timing_logs = [line for line in log_text.split("\n") if "completed in" in line]
assert (
len(timing_logs) >= 3
) # At least 3 tool calls, may have additional list_tools calls
async def test_timing_middleware_custom_logger(self, timing_server, caplog):
"""Test timing middleware with custom logger configuration."""
import io
import logging
# Create a custom logger that writes to a string buffer
log_buffer = io.StringIO()
handler = logging.StreamHandler(log_buffer)
custom_logger = logging.getLogger("custom_timing")
custom_logger.addHandler(handler)
custom_logger.setLevel(logging.DEBUG)
# Use custom logger and log level
timing_server.add_middleware(
TimingMiddleware(logger=custom_logger, log_level=logging.DEBUG)
)
async with Client(timing_server) as client:
await client.call_tool("instant_task")
# Check that our custom logger was used
log_output = log_buffer.getvalue()
assert "tools/call completed in" in log_output
assert "ms" in log_output
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/server/middleware/test_timing.py",
"license": "Apache License 2.0",
"lines": 232,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:src/fastmcp/utilities/inspect.py | """Utilities for inspecting FastMCP instances."""
from __future__ import annotations
import importlib.metadata
from dataclasses import dataclass
from enum import Enum
from typing import Any, Literal, cast
import pydantic_core
from mcp.server.fastmcp import FastMCP as FastMCP1x
import fastmcp
from fastmcp import Client
from fastmcp.server.server import FastMCP
@dataclass
class ToolInfo:
"""Information about a tool."""
key: str
name: str
description: str | None
input_schema: dict[str, Any]
output_schema: dict[str, Any] | None = None
annotations: dict[str, Any] | None = None
tags: list[str] | None = None
title: str | None = None
icons: list[dict[str, Any]] | None = None
meta: dict[str, Any] | None = None
@dataclass
class PromptInfo:
"""Information about a prompt."""
key: str
name: str
description: str | None
arguments: list[dict[str, Any]] | None = None
tags: list[str] | None = None
title: str | None = None
icons: list[dict[str, Any]] | None = None
meta: dict[str, Any] | None = None
@dataclass
class ResourceInfo:
"""Information about a resource."""
key: str
uri: str
name: str | None
description: str | None
mime_type: str | None = None
annotations: dict[str, Any] | None = None
tags: list[str] | None = None
title: str | None = None
icons: list[dict[str, Any]] | None = None
meta: dict[str, Any] | None = None
@dataclass
class TemplateInfo:
"""Information about a resource template."""
key: str
uri_template: str
name: str | None
description: str | None
mime_type: str | None = None
parameters: dict[str, Any] | None = None
annotations: dict[str, Any] | None = None
tags: list[str] | None = None
title: str | None = None
icons: list[dict[str, Any]] | None = None
meta: dict[str, Any] | None = None
@dataclass
class FastMCPInfo:
"""Information extracted from a FastMCP instance."""
name: str
instructions: str | None
version: str | None # The server's own version string (if specified)
website_url: str | None
icons: list[dict[str, Any]] | None
fastmcp_version: str # Version of FastMCP generating this manifest
mcp_version: str # Version of MCP protocol library
server_generation: int # Server generation: 1 (mcp package) or 2 (fastmcp)
tools: list[ToolInfo]
prompts: list[PromptInfo]
resources: list[ResourceInfo]
templates: list[TemplateInfo]
capabilities: dict[str, Any]
async def inspect_fastmcp_v2(mcp: FastMCP[Any]) -> FastMCPInfo:
"""Extract information from a FastMCP v2.x instance.
Args:
mcp: The FastMCP v2.x instance to inspect
Returns:
FastMCPInfo dataclass containing the extracted information
"""
# Get all components (list_* includes middleware, enabled/auth filtering)
tools_list = await mcp.list_tools()
prompts_list = await mcp.list_prompts()
resources_list = await mcp.list_resources()
templates_list = await mcp.list_resource_templates()
# Extract detailed tool information
tool_infos = []
for tool in tools_list:
mcp_tool = tool.to_mcp_tool(name=tool.name)
tool_infos.append(
ToolInfo(
key=tool.key,
name=tool.name or tool.key,
description=tool.description,
input_schema=mcp_tool.inputSchema if mcp_tool.inputSchema else {},
output_schema=tool.output_schema,
annotations=tool.annotations.model_dump() if tool.annotations else None,
tags=list(tool.tags) if tool.tags else None,
title=tool.title,
icons=[icon.model_dump() for icon in tool.icons]
if tool.icons
else None,
meta=tool.meta,
)
)
# Extract detailed prompt information
prompt_infos = []
for prompt in prompts_list:
prompt_infos.append(
PromptInfo(
key=prompt.key,
name=prompt.name or prompt.key,
description=prompt.description,
arguments=[arg.model_dump() for arg in prompt.arguments]
if prompt.arguments
else None,
tags=list(prompt.tags) if prompt.tags else None,
title=prompt.title,
icons=[icon.model_dump() for icon in prompt.icons]
if prompt.icons
else None,
meta=prompt.meta,
)
)
# Extract detailed resource information
resource_infos = []
for resource in resources_list:
resource_infos.append(
ResourceInfo(
key=resource.key,
uri=str(resource.uri),
name=resource.name,
description=resource.description,
mime_type=resource.mime_type,
annotations=resource.annotations.model_dump()
if resource.annotations
else None,
tags=list(resource.tags) if resource.tags else None,
title=resource.title,
icons=[icon.model_dump() for icon in resource.icons]
if resource.icons
else None,
meta=resource.meta,
)
)
# Extract detailed template information
template_infos = []
for template in templates_list:
template_infos.append(
TemplateInfo(
key=template.key,
uri_template=template.uri_template,
name=template.name,
description=template.description,
mime_type=template.mime_type,
parameters=template.parameters,
annotations=template.annotations.model_dump()
if template.annotations
else None,
tags=list(template.tags) if template.tags else None,
title=template.title,
icons=[icon.model_dump() for icon in template.icons]
if template.icons
else None,
meta=template.meta,
)
)
# Basic MCP capabilities that FastMCP supports
capabilities = {
"tools": {"listChanged": True},
"resources": {"subscribe": False, "listChanged": False},
"prompts": {"listChanged": False},
"logging": {},
}
# Extract server-level icons and website_url
server_icons = (
[icon.model_dump() for icon in mcp._mcp_server.icons]
if hasattr(mcp._mcp_server, "icons") and mcp._mcp_server.icons
else None
)
server_website_url = (
mcp._mcp_server.website_url if hasattr(mcp._mcp_server, "website_url") else None
)
return FastMCPInfo(
name=mcp.name,
instructions=mcp.instructions,
version=(mcp.version if hasattr(mcp, "version") else mcp._mcp_server.version),
website_url=server_website_url,
icons=server_icons,
fastmcp_version=fastmcp.__version__,
mcp_version=importlib.metadata.version("mcp"),
server_generation=2, # FastMCP v2
tools=tool_infos,
prompts=prompt_infos,
resources=resource_infos,
templates=template_infos,
capabilities=capabilities,
)
async def inspect_fastmcp_v1(mcp: FastMCP1x) -> FastMCPInfo:
"""Extract information from a FastMCP v1.x instance using a Client.
Args:
mcp: The FastMCP v1.x instance to inspect
Returns:
FastMCPInfo dataclass containing the extracted information
"""
# Use a client to interact with the FastMCP1x server
async with Client(mcp) as client:
# Get components via client calls (these return MCP objects)
mcp_tools = await client.list_tools()
mcp_prompts = await client.list_prompts()
mcp_resources = await client.list_resources()
# Try to get resource templates (FastMCP 1.x does have templates)
try:
mcp_templates = await client.list_resource_templates()
except Exception:
mcp_templates = []
# Extract detailed tool information from MCP Tool objects
tool_infos = []
for mcp_tool in mcp_tools:
tool_infos.append(
ToolInfo(
key=mcp_tool.name,
name=mcp_tool.name,
description=mcp_tool.description,
input_schema=mcp_tool.inputSchema if mcp_tool.inputSchema else {},
output_schema=None, # v1 doesn't have output_schema
annotations=None, # v1 doesn't have annotations
tags=None, # v1 doesn't have tags
title=None, # v1 doesn't have title
icons=[icon.model_dump() for icon in mcp_tool.icons]
if hasattr(mcp_tool, "icons") and mcp_tool.icons
else None,
meta=None, # v1 doesn't have meta field
)
)
# Extract detailed prompt information from MCP Prompt objects
prompt_infos = []
for mcp_prompt in mcp_prompts:
# Convert arguments if they exist
arguments = None
if hasattr(mcp_prompt, "arguments") and mcp_prompt.arguments:
arguments = [arg.model_dump() for arg in mcp_prompt.arguments]
prompt_infos.append(
PromptInfo(
key=mcp_prompt.name,
name=mcp_prompt.name,
description=mcp_prompt.description,
arguments=arguments,
tags=None, # v1 doesn't have tags
title=None, # v1 doesn't have title
icons=[icon.model_dump() for icon in mcp_prompt.icons]
if hasattr(mcp_prompt, "icons") and mcp_prompt.icons
else None,
meta=None, # v1 doesn't have meta field
)
)
# Extract detailed resource information from MCP Resource objects
resource_infos = []
for mcp_resource in mcp_resources:
resource_infos.append(
ResourceInfo(
key=str(mcp_resource.uri),
uri=str(mcp_resource.uri),
name=mcp_resource.name,
description=mcp_resource.description,
mime_type=mcp_resource.mimeType,
annotations=None, # v1 doesn't have annotations
tags=None, # v1 doesn't have tags
title=None, # v1 doesn't have title
icons=[icon.model_dump() for icon in mcp_resource.icons]
if hasattr(mcp_resource, "icons") and mcp_resource.icons
else None,
meta=None, # v1 doesn't have meta field
)
)
# Extract detailed template information from MCP ResourceTemplate objects
template_infos = []
for mcp_template in mcp_templates:
template_infos.append(
TemplateInfo(
key=str(mcp_template.uriTemplate),
uri_template=str(mcp_template.uriTemplate),
name=mcp_template.name,
description=mcp_template.description,
mime_type=mcp_template.mimeType,
parameters=None, # v1 doesn't expose template parameters
annotations=None, # v1 doesn't have annotations
tags=None, # v1 doesn't have tags
title=None, # v1 doesn't have title
icons=[icon.model_dump() for icon in mcp_template.icons]
if hasattr(mcp_template, "icons") and mcp_template.icons
else None,
meta=None, # v1 doesn't have meta field
)
)
# Basic MCP capabilities
capabilities = {
"tools": {"listChanged": True},
"resources": {"subscribe": False, "listChanged": False},
"prompts": {"listChanged": False},
"logging": {},
}
# Extract server-level icons and website_url from serverInfo
server_info = client.initialize_result.serverInfo
server_icons = (
[icon.model_dump() for icon in server_info.icons]
if hasattr(server_info, "icons") and server_info.icons
else None
)
server_website_url = (
server_info.websiteUrl if hasattr(server_info, "websiteUrl") else None
)
return FastMCPInfo(
name=mcp._mcp_server.name,
instructions=mcp._mcp_server.instructions,
version=mcp._mcp_server.version,
website_url=server_website_url,
icons=server_icons,
fastmcp_version=fastmcp.__version__, # Version generating this manifest
mcp_version=importlib.metadata.version("mcp"),
server_generation=1, # MCP v1
tools=tool_infos,
prompts=prompt_infos,
resources=resource_infos,
templates=template_infos,
capabilities=capabilities,
)
async def inspect_fastmcp(mcp: FastMCP[Any] | FastMCP1x) -> FastMCPInfo:
"""Extract information from a FastMCP instance into a dataclass.
This function automatically detects whether the instance is FastMCP v1.x or v2.x
and uses the appropriate extraction method.
Args:
mcp: The FastMCP instance to inspect (v1.x or v2.x)
Returns:
FastMCPInfo dataclass containing the extracted information
"""
if isinstance(mcp, FastMCP1x):
return await inspect_fastmcp_v1(mcp)
else:
return await inspect_fastmcp_v2(cast(FastMCP[Any], mcp))
class InspectFormat(str, Enum):
"""Output format for inspect command."""
FASTMCP = "fastmcp"
MCP = "mcp"
def format_fastmcp_info(info: FastMCPInfo) -> bytes:
"""Format FastMCPInfo as FastMCP-specific JSON.
This includes FastMCP-specific fields like tags, enabled, annotations, etc.
"""
# Build the output dict with nested structure
result = {
"server": {
"name": info.name,
"instructions": info.instructions,
"version": info.version,
"website_url": info.website_url,
"icons": info.icons,
"generation": info.server_generation,
"capabilities": info.capabilities,
},
"environment": {
"fastmcp": info.fastmcp_version,
"mcp": info.mcp_version,
},
"tools": info.tools,
"prompts": info.prompts,
"resources": info.resources,
"templates": info.templates,
}
return pydantic_core.to_json(result, indent=2)
async def format_mcp_info(mcp: FastMCP[Any] | FastMCP1x) -> bytes:
"""Format server info as standard MCP protocol JSON.
Uses Client to get the standard MCP protocol format with camelCase fields.
Includes version metadata at the top level.
"""
async with Client(mcp) as client:
# Get all the MCP protocol objects
tools_result = await client.list_tools_mcp()
prompts_result = await client.list_prompts_mcp()
resources_result = await client.list_resources_mcp()
templates_result = await client.list_resource_templates_mcp()
# Get server info from the initialize result
server_info = client.initialize_result.serverInfo
# Combine into MCP protocol structure with environment metadata
result = {
"environment": {
"fastmcp": fastmcp.__version__, # Version generating this manifest
"mcp": importlib.metadata.version("mcp"), # MCP protocol version
},
"serverInfo": server_info,
"capabilities": {}, # MCP format doesn't include capabilities at top level
"tools": tools_result.tools,
"prompts": prompts_result.prompts,
"resources": resources_result.resources,
"resourceTemplates": templates_result.resourceTemplates,
}
return pydantic_core.to_json(result, indent=2)
async def format_info(
mcp: FastMCP[Any] | FastMCP1x,
format: InspectFormat | Literal["fastmcp", "mcp"],
info: FastMCPInfo | None = None,
) -> bytes:
"""Format server information according to the specified format.
Args:
mcp: The FastMCP instance
format: Output format ("fastmcp" or "mcp")
info: Pre-extracted FastMCPInfo (optional, will be extracted if not provided)
Returns:
JSON bytes in the requested format
"""
# Convert string to enum if needed
if isinstance(format, str):
format = InspectFormat(format)
if format == InspectFormat.MCP:
# MCP format doesn't need FastMCPInfo, it uses Client directly
return await format_mcp_info(mcp)
elif format == InspectFormat.FASTMCP:
# For FastMCP format, we need the FastMCPInfo
# This works for both v1 and v2 servers
if info is None:
info = await inspect_fastmcp(mcp)
return format_fastmcp_info(info)
else:
raise ValueError(f"Unknown format: {format}")
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "src/fastmcp/utilities/inspect.py",
"license": "Apache License 2.0",
"lines": 428,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/fastmcp:tests/utilities/test_inspect.py | """Tests for the inspect.py module."""
import importlib.metadata
from mcp.server.fastmcp import FastMCP as FastMCP1x
import fastmcp
from fastmcp import Client, FastMCP
from fastmcp.utilities.inspect import (
FastMCPInfo,
ToolInfo,
inspect_fastmcp,
inspect_fastmcp_v1,
)
class TestFastMCPInfo:
"""Tests for the FastMCPInfo dataclass."""
def test_fastmcp_info_creation(self):
"""Test that FastMCPInfo can be created with all required fields."""
tool = ToolInfo(
key="tool1",
name="tool1",
description="Test tool",
input_schema={},
output_schema={
"type": "object",
"properties": {"result": {"type": "string"}},
},
)
info = FastMCPInfo(
name="TestServer",
instructions="Test instructions",
fastmcp_version="1.0.0",
mcp_version="1.0.0",
server_generation=2,
version="1.0.0",
website_url=None,
icons=None,
tools=[tool],
prompts=[],
resources=[],
templates=[],
capabilities={"tools": {"listChanged": True}},
)
assert info.name == "TestServer"
assert info.instructions == "Test instructions"
assert info.fastmcp_version == "1.0.0"
assert info.mcp_version == "1.0.0"
assert info.server_generation == 2
assert info.version == "1.0.0"
assert len(info.tools) == 1
assert info.tools[0].name == "tool1"
assert info.capabilities == {"tools": {"listChanged": True}}
def test_fastmcp_info_with_none_instructions(self):
"""Test that FastMCPInfo works with None instructions."""
info = FastMCPInfo(
name="TestServer",
instructions=None,
fastmcp_version="1.0.0",
mcp_version="1.0.0",
server_generation=2,
version="1.0.0",
website_url=None,
icons=None,
tools=[],
prompts=[],
resources=[],
templates=[],
capabilities={},
)
assert info.instructions is None
class TestGetFastMCPInfo:
"""Tests for the get_fastmcp_info function."""
async def test_empty_server(self):
"""Test get_fastmcp_info with an empty server."""
mcp = FastMCP("EmptyServer")
info = await inspect_fastmcp(mcp)
assert info.name == "EmptyServer"
assert info.instructions is None
assert info.fastmcp_version == fastmcp.__version__
assert info.mcp_version == importlib.metadata.version("mcp")
assert info.server_generation == 2 # v2 server
assert info.version == fastmcp.__version__
assert info.tools == []
assert info.prompts == []
assert info.resources == []
assert info.templates == []
assert "tools" in info.capabilities
assert "resources" in info.capabilities
assert "prompts" in info.capabilities
assert "logging" in info.capabilities
async def test_server_with_instructions(self):
"""Test get_fastmcp_info with a server that has instructions."""
mcp = FastMCP("InstructionsServer", instructions="Test instructions")
info = await inspect_fastmcp(mcp)
assert info.instructions == "Test instructions"
async def test_server_with_version(self):
"""Test get_fastmcp_info with a server that has a version."""
mcp = FastMCP("VersionServer", version="1.2.3")
info = await inspect_fastmcp(mcp)
assert info.version == "1.2.3"
async def test_server_with_tools(self):
"""Test get_fastmcp_info with a server that has tools."""
mcp = FastMCP("ToolServer")
@mcp.tool
def add_numbers(a: int, b: int) -> int:
return a + b
@mcp.tool
def greet(name: str) -> str:
return f"Hello, {name}!"
info = await inspect_fastmcp(mcp)
assert info.name == "ToolServer"
assert len(info.tools) == 2
tool_names = [tool.name for tool in info.tools]
assert "add_numbers" in tool_names
assert "greet" in tool_names
async def test_server_with_resources(self):
"""Test get_fastmcp_info with a server that has resources."""
mcp = FastMCP("ResourceServer")
@mcp.resource("resource://static")
def get_static_data() -> str:
return "Static data"
@mcp.resource("resource://dynamic/{param}")
def get_dynamic_data(param: str) -> str:
return f"Dynamic data: {param}"
info = await inspect_fastmcp(mcp)
assert info.name == "ResourceServer"
assert len(info.resources) == 1 # Static resource
assert len(info.templates) == 1 # Dynamic resource becomes template
resource_uris = [res.uri for res in info.resources]
template_uris = [tmpl.uri_template for tmpl in info.templates]
assert "resource://static" in resource_uris
assert "resource://dynamic/{param}" in template_uris
async def test_server_with_prompts(self):
"""Test get_fastmcp_info with a server that has prompts."""
mcp = FastMCP("PromptServer")
@mcp.prompt
def analyze_data(data: str) -> list:
return [{"role": "user", "content": f"Analyze: {data}"}]
@mcp.prompt("custom_prompt")
def custom_analysis(text: str) -> list:
return [{"role": "user", "content": f"Custom: {text}"}]
info = await inspect_fastmcp(mcp)
assert info.name == "PromptServer"
assert len(info.prompts) == 2
prompt_names = [prompt.name for prompt in info.prompts]
assert "analyze_data" in prompt_names
assert "custom_prompt" in prompt_names
async def test_comprehensive_server(self):
"""Test get_fastmcp_info with a server that has all component types."""
mcp = FastMCP("ComprehensiveServer", instructions="A server with everything")
# Add a tool
@mcp.tool
def calculate(x: int, y: int) -> int:
return x * y
# Add a resource
@mcp.resource("resource://data")
def get_data() -> str:
return "Some data"
# Add a template
@mcp.resource("resource://item/{id}")
def get_item(id: str) -> str:
return f"Item {id}"
# Add a prompt
@mcp.prompt
def analyze(content: str) -> list:
return [{"role": "user", "content": content}]
info = await inspect_fastmcp(mcp)
assert info.name == "ComprehensiveServer"
assert info.instructions == "A server with everything"
assert info.fastmcp_version == fastmcp.__version__
# Check all components are present
assert len(info.tools) == 1
tool_names = [tool.name for tool in info.tools]
assert "calculate" in tool_names
assert len(info.resources) == 1
resource_uris = [res.uri for res in info.resources]
assert "resource://data" in resource_uris
assert len(info.templates) == 1
template_uris = [tmpl.uri_template for tmpl in info.templates]
assert "resource://item/{id}" in template_uris
assert len(info.prompts) == 1
prompt_names = [prompt.name for prompt in info.prompts]
assert "analyze" in prompt_names
# Check capabilities
assert "tools" in info.capabilities
assert "resources" in info.capabilities
assert "prompts" in info.capabilities
assert "logging" in info.capabilities
async def test_server_no_instructions(self):
"""Test get_fastmcp_info with a server that has no instructions."""
mcp = FastMCP("NoInstructionsServer")
info = await inspect_fastmcp(mcp)
assert info.name == "NoInstructionsServer"
assert info.instructions is None
async def test_server_with_client_integration(self):
"""Test that the extracted info matches what a client would see."""
mcp = FastMCP("IntegrationServer")
@mcp.tool
def test_tool() -> str:
return "test"
@mcp.resource("resource://test")
def test_resource() -> str:
return "test resource"
@mcp.prompt
def test_prompt() -> list:
return [{"role": "user", "content": "test"}]
# Get info using our function
info = await inspect_fastmcp(mcp)
# Verify using client
async with Client(mcp) as client:
tools = await client.list_tools()
resources = await client.list_resources()
prompts = await client.list_prompts()
assert len(info.tools) == len(tools)
assert len(info.resources) == len(resources)
assert len(info.prompts) == len(prompts)
assert info.tools[0].name == tools[0].name
assert info.resources[0].uri == str(resources[0].uri)
assert info.prompts[0].name == prompts[0].name
async def test_inspect_respects_tag_filtering(self):
"""Test that inspect omits components filtered out by include_tags/exclude_tags.
Regression test for Issue #2032: inspect command was showing components
that were filtered out by tag rules, causing confusion when those
components weren't actually available to clients.
"""
# Create server with include_tags that will filter out untagged components
mcp = FastMCP("FilteredServer")
mcp.enable(tags={"fetch", "analyze", "create"}, only=True)
# Add tools with and without matching tags
@mcp.tool(tags={"fetch"})
def tagged_tool() -> str:
"""Tool with matching tag - should be visible."""
return "visible"
@mcp.tool
def untagged_tool() -> str:
"""Tool without tags - should be filtered out."""
return "hidden"
# Add resources with and without matching tags
@mcp.resource("resource://tagged", tags={"analyze"})
def tagged_resource() -> str:
"""Resource with matching tag - should be visible."""
return "visible resource"
@mcp.resource("resource://untagged")
def untagged_resource() -> str:
"""Resource without tags - should be filtered out."""
return "hidden resource"
# Add templates with and without matching tags
@mcp.resource("resource://tagged/{id}", tags={"create"})
def tagged_template(id: str) -> str:
"""Template with matching tag - should be visible."""
return f"visible template {id}"
@mcp.resource("resource://untagged/{id}")
def untagged_template(id: str) -> str:
"""Template without tags - should be filtered out."""
return f"hidden template {id}"
# Add prompts with and without matching tags
@mcp.prompt(tags={"fetch"})
def tagged_prompt() -> list:
"""Prompt with matching tag - should be visible."""
return [{"role": "user", "content": "visible prompt"}]
@mcp.prompt
def untagged_prompt() -> list:
"""Prompt without tags - should be filtered out."""
return [{"role": "user", "content": "hidden prompt"}]
# Get inspect info
info = await inspect_fastmcp(mcp)
# Verify only tagged components are visible
assert len(info.tools) == 1
assert info.tools[0].name == "tagged_tool"
assert len(info.resources) == 1
assert info.resources[0].uri == "resource://tagged"
assert len(info.templates) == 1
assert info.templates[0].uri_template == "resource://tagged/{id}"
assert len(info.prompts) == 1
assert info.prompts[0].name == "tagged_prompt"
# Verify this matches what a client would see
async with Client(mcp) as client:
tools = await client.list_tools()
resources = await client.list_resources()
templates = await client.list_resource_templates()
prompts = await client.list_prompts()
assert len(info.tools) == len(tools)
assert len(info.resources) == len(resources)
assert len(info.templates) == len(templates)
assert len(info.prompts) == len(prompts)
async def test_inspect_respects_tag_filtering_with_mounted_servers(self):
"""Test that inspect applies tag filtering to mounted servers.
Verifies that when a parent server has tag filters, those filters
are respected when inspecting components from mounted servers.
"""
# Create a mounted server with various tagged and untagged components
mounted = FastMCP("MountedServer")
@mounted.tool(tags={"allowed"})
def allowed_tool() -> str:
return "allowed"
@mounted.tool(tags={"blocked"})
def blocked_tool() -> str:
return "blocked"
@mounted.tool
def untagged_tool() -> str:
return "untagged"
@mounted.resource("resource://allowed", tags={"allowed"})
def allowed_resource() -> str:
return "allowed resource"
@mounted.resource("resource://blocked", tags={"blocked"})
def blocked_resource() -> str:
return "blocked resource"
@mounted.prompt(tags={"allowed"})
def allowed_prompt() -> list:
return [{"role": "user", "content": "allowed"}]
@mounted.prompt(tags={"blocked"})
def blocked_prompt() -> list:
return [{"role": "user", "content": "blocked"}]
# Create parent server with tag filtering
parent = FastMCP("ParentServer")
parent.enable(tags={"allowed"}, only=True)
parent.mount(mounted)
# Get inspect info
info = await inspect_fastmcp(parent)
# Only components with "allowed" tag should be visible
tool_names = [t.name for t in info.tools]
assert "allowed_tool" in tool_names
assert "blocked_tool" not in tool_names
assert "untagged_tool" not in tool_names
resource_uris = [r.uri for r in info.resources]
assert "resource://allowed" in resource_uris
assert "resource://blocked" not in resource_uris
prompt_names = [p.name for p in info.prompts]
assert "allowed_prompt" in prompt_names
assert "blocked_prompt" not in prompt_names
# Verify this matches what a client would see
async with Client(parent) as client:
tools = await client.list_tools()
resources = await client.list_resources()
prompts = await client.list_prompts()
assert len(info.tools) == len(tools)
assert len(info.resources) == len(resources)
assert len(info.prompts) == len(prompts)
async def test_inspect_parent_filters_override_mounted_server_filters(self):
"""Test that parent server tag filters apply to mounted servers.
Even if a mounted server has no tag filters of its own,
the parent server's filters should still apply.
"""
# Create mounted server with NO tag filters (allows everything)
mounted = FastMCP("MountedServer")
@mounted.tool(tags={"production"})
def production_tool() -> str:
return "production"
@mounted.tool(tags={"development"})
def development_tool() -> str:
return "development"
@mounted.tool
def untagged_tool() -> str:
return "untagged"
# Create parent with exclude_tags - should filter mounted components
parent = FastMCP("ParentServer")
parent.disable(tags={"development"})
parent.mount(mounted)
# Get inspect info
info = await inspect_fastmcp(parent)
# Only production and untagged should be visible
tool_names = [t.name for t in info.tools]
assert "production_tool" in tool_names
assert "untagged_tool" in tool_names
assert "development_tool" not in tool_names
# Verify this matches what a client would see
async with Client(parent) as client:
tools = await client.list_tools()
assert len(info.tools) == len(tools)
class TestFastMCP1xCompatibility:
"""Tests for FastMCP 1.x compatibility."""
async def test_fastmcp1x_empty_server(self):
"""Test get_fastmcp_info_v1 with an empty FastMCP1x server."""
mcp = FastMCP1x("Test1x")
info = await inspect_fastmcp_v1(mcp)
assert info.name == "Test1x"
assert info.instructions is None
assert info.fastmcp_version == fastmcp.__version__ # CLI version
assert info.mcp_version == importlib.metadata.version("mcp")
assert info.server_generation == 1 # v1 server
assert info.version is None
assert info.tools == []
assert info.prompts == []
assert info.resources == []
assert info.templates == [] # No templates added in this test
assert "tools" in info.capabilities
async def test_fastmcp1x_with_tools(self):
"""Test get_fastmcp_info_v1 with a FastMCP1x server that has tools."""
mcp = FastMCP1x("Test1x")
@mcp.tool()
def add_numbers(a: int, b: int) -> int:
return a + b
@mcp.tool()
def greet(name: str) -> str:
return f"Hello, {name}!"
info = await inspect_fastmcp_v1(mcp)
assert info.name == "Test1x"
assert len(info.tools) == 2
tool_names = [tool.name for tool in info.tools]
assert "add_numbers" in tool_names
assert "greet" in tool_names
async def test_fastmcp1x_with_resources(self):
"""Test get_fastmcp_info_v1 with a FastMCP1x server that has resources."""
mcp = FastMCP1x("Test1x")
@mcp.resource("resource://data")
def get_data() -> str:
return "Some data"
info = await inspect_fastmcp_v1(mcp)
assert info.name == "Test1x"
assert len(info.resources) == 1
resource_uris = [res.uri for res in info.resources]
assert "resource://data" in resource_uris
assert len(info.templates) == 0 # No templates added in this test
assert info.server_generation == 1 # v1 server
async def test_fastmcp1x_with_prompts(self):
"""Test get_fastmcp_info_v1 with a FastMCP1x server that has prompts."""
mcp = FastMCP1x("Test1x")
@mcp.prompt("analyze")
def analyze_data(data: str) -> list:
return [{"role": "user", "content": f"Analyze: {data}"}]
info = await inspect_fastmcp_v1(mcp)
assert info.name == "Test1x"
assert len(info.prompts) == 1
prompt_names = [prompt.name for prompt in info.prompts]
assert "analyze" in prompt_names
async def test_dispatcher_with_fastmcp1x(self):
"""Test that the main get_fastmcp_info function correctly dispatches to v1."""
mcp = FastMCP1x("Test1x")
@mcp.tool()
def test_tool() -> str:
return "test"
info = await inspect_fastmcp(mcp)
assert info.name == "Test1x"
assert len(info.tools) == 1
tool_names = [tool.name for tool in info.tools]
assert "test_tool" in tool_names
assert len(info.templates) == 0 # No templates added in this test
assert info.server_generation == 1 # v1 server
async def test_dispatcher_with_fastmcp2x(self):
"""Test that the main get_fastmcp_info function correctly dispatches to v2."""
mcp = FastMCP("Test2x")
@mcp.tool
def test_tool() -> str:
return "test"
info = await inspect_fastmcp(mcp)
assert info.name == "Test2x"
assert len(info.tools) == 1
tool_names = [tool.name for tool in info.tools]
assert "test_tool" in tool_names
async def test_fastmcp1x_vs_fastmcp2x_comparison(self):
"""Test that both versions can be inspected and compared."""
mcp1x = FastMCP1x("Test1x")
mcp2x = FastMCP("Test2x")
@mcp1x.tool()
def tool1x() -> str:
return "1x"
@mcp2x.tool
def tool2x() -> str:
return "2x"
info1x = await inspect_fastmcp(mcp1x)
info2x = await inspect_fastmcp(mcp2x)
assert info1x.name == "Test1x"
assert info2x.name == "Test2x"
assert len(info1x.tools) == 1
assert len(info2x.tools) == 1
tool1x_names = [tool.name for tool in info1x.tools]
tool2x_names = [tool.name for tool in info2x.tools]
assert "tool1x" in tool1x_names
assert "tool2x" in tool2x_names
# Check server versions
assert info1x.server_generation == 1 # v1
assert info2x.server_generation == 2 # v2
assert info1x.version is None
assert info2x.version == fastmcp.__version__
# No templates added in these tests
assert len(info1x.templates) == 0
assert len(info2x.templates) == 0
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/utilities/test_inspect.py",
"license": "Apache License 2.0",
"lines": 474,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:tests/server/middleware/test_middleware.py | from collections.abc import Callable
from dataclasses import dataclass
from typing import Any
import mcp.types
import pytest
from fastmcp import Client, FastMCP
from fastmcp.server.context import Context
from fastmcp.server.middleware import CallNext, Middleware, MiddlewareContext
from fastmcp.tools.tool import ToolResult
@dataclass
class Recording:
# the hook is the name of the hook that was called, e.g. "on_list_tools"
hook: str
context: MiddlewareContext
result: mcp.types.ServerResult | None
class RecordingMiddleware(Middleware):
"""A middleware that automatically records all method calls."""
def __init__(self, name: str | None = None):
super().__init__()
self.calls: list[Recording] = []
self.name = name
def __getattribute__(self, name: str) -> Callable:
"""Dynamically create recording methods for any on_* method."""
if name.startswith("on_"):
async def record_and_call(
context: MiddlewareContext, call_next: Callable
) -> Any:
result = await call_next(context)
self.calls.append(Recording(hook=name, context=context, result=result))
return result
return record_and_call
return super().__getattribute__(name)
def get_calls(
self, method: str | None = None, hook: str | None = None
) -> list[Recording]:
"""
Get all recorded calls for a specific method or hook.
Args:
method: The method to filter by (e.g. "tools/list")
hook: The hook to filter by (e.g. "on_list_tools")
Returns:
A list of recorded calls.
"""
calls = []
for recording in self.calls:
if method and hook:
if recording.context.method == method and recording.hook == hook:
calls.append(recording)
elif method:
if recording.context.method == method:
calls.append(recording)
elif hook:
if recording.hook == hook:
calls.append(recording)
else:
calls.append(recording)
return calls
def assert_called(
self,
hook: str | None = None,
method: str | None = None,
times: int | None = None,
at_least: int | None = None,
) -> bool:
"""Assert that a hook was called a specific number of times."""
if times is not None and at_least is not None:
raise ValueError("Cannot specify both times and at_least")
elif times is None and at_least is None:
times = 1
calls = self.get_calls(hook=hook, method=method)
actual_times = len(calls)
identifier = dict(hook=hook, method=method)
if times is not None:
assert actual_times == times, (
f"Expected {times} calls for {identifier}, "
f"but was called {actual_times} times"
)
elif at_least is not None:
assert actual_times >= at_least, (
f"Expected at least {at_least} calls for {identifier}, "
f"but was called {actual_times} times"
)
return True
def assert_not_called(self, hook: str | None = None, method: str | None = None):
"""Assert that a hook was not called."""
calls = self.get_calls(hook=hook, method=method)
assert len(calls) == 0, f"Expected {hook!r} to not be called"
return True
def reset(self):
"""Clear all recorded calls."""
self.calls.clear()
@pytest.fixture
def recording_middleware():
"""Fixture that provides a recording middleware instance."""
middleware = RecordingMiddleware(name="recording_middleware")
yield middleware
@pytest.fixture
def mcp_server(recording_middleware):
mcp = FastMCP()
@mcp.tool(tags={"add-tool"})
def add(a: int, b: int) -> int:
return a + b
@mcp.resource("resource://test")
def test_resource() -> str:
return "test resource"
@mcp.resource("resource://test-template/{x}")
def test_resource_with_path(x: int) -> str:
return f"test resource with {x}"
@mcp.prompt
def test_prompt(x: str) -> str:
return f"test prompt with {x}"
@mcp.tool
async def progress_tool(context: Context) -> None:
await context.report_progress(progress=1, total=10, message="test")
@mcp.tool
async def log_tool(context: Context) -> None:
await context.info(message="test log")
@mcp.tool
async def sample_tool(context: Context) -> None:
await context.sample("hello")
mcp.add_middleware(recording_middleware)
# Register progress handler
@mcp._mcp_server.progress_notification()
async def handle_progress(
progress_token: str | int,
progress: float,
total: float | None,
message: str | None,
):
print("HI")
return mcp
class TestMiddlewareHooks:
async def test_call_tool(
self, mcp_server: FastMCP, recording_middleware: RecordingMiddleware
):
async with Client(mcp_server) as client:
await client.call_tool("add", {"a": 1, "b": 2})
assert recording_middleware.assert_called(at_least=9)
assert recording_middleware.assert_called(method="tools/call", at_least=3)
assert recording_middleware.assert_called(hook="on_message", at_least=1)
assert recording_middleware.assert_called(hook="on_request", at_least=1)
assert recording_middleware.assert_called(hook="on_call_tool", at_least=1)
async def test_read_resource(
self, mcp_server: FastMCP, recording_middleware: RecordingMiddleware
):
async with Client(mcp_server) as client:
await client.read_resource("resource://test")
assert recording_middleware.assert_called(at_least=3)
assert recording_middleware.assert_called(method="resources/read", at_least=3)
assert recording_middleware.assert_called(hook="on_message", at_least=1)
assert recording_middleware.assert_called(hook="on_request", at_least=1)
assert recording_middleware.assert_called(hook="on_read_resource", at_least=1)
async def test_read_resource_template(
self, mcp_server: FastMCP, recording_middleware: RecordingMiddleware
):
async with Client(mcp_server) as client:
await client.read_resource("resource://test-template/1")
assert recording_middleware.assert_called(at_least=3)
assert recording_middleware.assert_called(method="resources/read", at_least=3)
assert recording_middleware.assert_called(hook="on_message", at_least=1)
assert recording_middleware.assert_called(hook="on_request", at_least=1)
assert recording_middleware.assert_called(hook="on_read_resource", at_least=1)
async def test_get_prompt(
self, mcp_server: FastMCP, recording_middleware: RecordingMiddleware
):
async with Client(mcp_server) as client:
await client.get_prompt("test_prompt", {"x": "test"})
assert recording_middleware.assert_called(at_least=3)
assert recording_middleware.assert_called(method="prompts/get", at_least=3)
assert recording_middleware.assert_called(hook="on_message", at_least=1)
assert recording_middleware.assert_called(hook="on_request", at_least=1)
assert recording_middleware.assert_called(hook="on_get_prompt", at_least=1)
async def test_list_tools(
self, mcp_server: FastMCP, recording_middleware: RecordingMiddleware
):
async with Client(mcp_server) as client:
await client.list_tools()
assert recording_middleware.assert_called(at_least=3)
assert recording_middleware.assert_called(method="tools/list", at_least=3)
assert recording_middleware.assert_called(hook="on_message", at_least=1)
assert recording_middleware.assert_called(hook="on_request", at_least=1)
assert recording_middleware.assert_called(hook="on_list_tools", at_least=1)
# Verify the middleware receives a list of tools
list_tools_calls = recording_middleware.get_calls(hook="on_list_tools")
assert len(list_tools_calls) > 0
result = list_tools_calls[0].result
assert isinstance(result, list)
async def test_list_resources(
self, mcp_server: FastMCP, recording_middleware: RecordingMiddleware
):
async with Client(mcp_server) as client:
await client.list_resources()
assert recording_middleware.assert_called(at_least=3)
assert recording_middleware.assert_called(method="resources/list", at_least=3)
assert recording_middleware.assert_called(hook="on_message", at_least=1)
assert recording_middleware.assert_called(hook="on_request", at_least=1)
assert recording_middleware.assert_called(hook="on_list_resources", at_least=1)
# Verify the middleware receives a list of resources
list_resources_calls = recording_middleware.get_calls(hook="on_list_resources")
assert len(list_resources_calls) > 0
result = list_resources_calls[0].result
assert isinstance(result, list)
async def test_list_resource_templates(
self, mcp_server: FastMCP, recording_middleware: RecordingMiddleware
):
async with Client(mcp_server) as client:
await client.list_resource_templates()
assert recording_middleware.assert_called(at_least=3)
assert recording_middleware.assert_called(
method="resources/templates/list", at_least=3
)
assert recording_middleware.assert_called(hook="on_message", at_least=1)
assert recording_middleware.assert_called(hook="on_request", at_least=1)
assert recording_middleware.assert_called(
hook="on_list_resource_templates", at_least=1
)
# Verify the middleware receives a list of resource templates
list_templates_calls = recording_middleware.get_calls(
hook="on_list_resource_templates"
)
assert len(list_templates_calls) > 0
result = list_templates_calls[0].result
assert isinstance(result, list)
async def test_list_prompts(
self, mcp_server: FastMCP, recording_middleware: RecordingMiddleware
):
async with Client(mcp_server) as client:
await client.list_prompts()
assert recording_middleware.assert_called(at_least=3)
assert recording_middleware.assert_called(method="prompts/list", at_least=3)
assert recording_middleware.assert_called(hook="on_message", at_least=1)
assert recording_middleware.assert_called(hook="on_request", at_least=1)
assert recording_middleware.assert_called(hook="on_list_prompts", at_least=1)
# Verify the middleware receives a list of prompts
list_prompts_calls = recording_middleware.get_calls(hook="on_list_prompts")
assert len(list_prompts_calls) > 0
result = list_prompts_calls[0].result
assert isinstance(result, list)
async def test_initialize(
self, mcp_server: FastMCP, recording_middleware: RecordingMiddleware
):
async with Client(mcp_server) as client:
await client.ping()
assert recording_middleware.assert_called(at_least=1)
assert recording_middleware.assert_called(hook="on_message", at_least=1)
assert recording_middleware.assert_called(hook="on_request", at_least=1)
assert recording_middleware.assert_called(hook="on_initialize", at_least=1)
async def test_list_tools_filtering_middleware(self):
"""Test that middleware can filter tools."""
class FilteringMiddleware(Middleware):
async def on_list_tools(self, context: MiddlewareContext, call_next):
result = await call_next(context)
# Filter out tools with "private" tag - simple list filtering
filtered_tools = [tool for tool in result if "private" not in tool.tags]
return filtered_tools
server = FastMCP("TestServer")
@server.tool
def public_tool(name: str) -> str:
return f"Hello {name}"
@server.tool(tags={"private"})
def private_tool(secret: str) -> str:
return f"Secret: {secret}"
server.add_middleware(FilteringMiddleware())
async with Client(server) as client:
tools = await client.list_tools()
assert len(tools) == 1
assert tools[0].name == "public_tool"
async def test_list_resources_filtering_middleware(self):
"""Test that middleware can filter resources."""
class FilteringMiddleware(Middleware):
async def on_list_resources(self, context: MiddlewareContext, call_next):
result = await call_next(context)
# Filter out resources with "private" tag
filtered_resources = [
resource for resource in result if "private" not in resource.tags
]
return filtered_resources
server = FastMCP("TestServer")
@server.resource("resource://public")
def public_resource() -> str:
return "public data"
@server.resource("resource://private", tags={"private"})
def private_resource() -> str:
return "private data"
server.add_middleware(FilteringMiddleware())
async with Client(server) as client:
resources = await client.list_resources()
assert len(resources) == 1
assert str(resources[0].uri) == "resource://public"
async def test_list_resource_templates_filtering_middleware(self):
"""Test that middleware can filter resource templates."""
class FilteringMiddleware(Middleware):
async def on_list_resource_templates(
self, context: MiddlewareContext, call_next
):
result = await call_next(context)
# Filter out templates with "private" tag
filtered_templates = [
template for template in result if "private" not in template.tags
]
return filtered_templates
server = FastMCP("TestServer")
@server.resource("resource://public/{x}")
def public_template(x: str) -> str:
return f"public {x}"
@server.resource("resource://private/{x}", tags={"private"})
def private_template(x: str) -> str:
return f"private {x}"
server.add_middleware(FilteringMiddleware())
async with Client(server) as client:
templates = await client.list_resource_templates()
assert len(templates) == 1
assert str(templates[0].uriTemplate) == "resource://public/{x}"
async def test_list_prompts_filtering_middleware(self):
"""Test that middleware can filter prompts."""
class FilteringMiddleware(Middleware):
async def on_list_prompts(self, context: MiddlewareContext, call_next):
result = await call_next(context)
# Filter out prompts with "private" tag
filtered_prompts = [
prompt for prompt in result if "private" not in prompt.tags
]
return filtered_prompts
server = FastMCP("TestServer")
@server.prompt
def public_prompt(name: str) -> str:
return f"Hello {name}"
@server.prompt(tags={"private"})
def private_prompt(secret: str) -> str:
return f"Secret: {secret}"
server.add_middleware(FilteringMiddleware())
async with Client(server) as client:
prompts = await client.list_prompts()
assert len(prompts) == 1
assert prompts[0].name == "public_prompt"
async def test_call_tool_middleware(self):
server = FastMCP()
@server.tool
def add(a: int, b: int) -> int:
return a + b
class CallToolMiddleware(Middleware):
async def on_call_tool(
self,
context: MiddlewareContext[mcp.types.CallToolRequestParams],
call_next: CallNext[mcp.types.CallToolRequestParams, ToolResult],
):
# modify argument
if context.message.name == "add":
assert context.message.arguments is not None
args = context.message.arguments
assert isinstance(args["a"], int)
args["a"] += 100
result = await call_next(context)
# modify result
if context.message.name == "add":
assert result.structured_content is not None
content = result.structured_content
assert isinstance(content["result"], int)
content["result"] += 5
return result
server.add_middleware(CallToolMiddleware())
async with Client(server) as client:
result = await client.call_tool("add", {"a": 1, "b": 2})
assert isinstance(result.structured_content["result"], int)
assert result.structured_content["result"] == 108
class TestApplyMiddlewareParameter:
"""Tests for run_middleware parameter on execution methods."""
async def test_call_tool_with_run_middleware_true(self):
"""Middleware is applied when run_middleware=True (default)."""
recording = RecordingMiddleware()
server = FastMCP()
@server.tool
def add(a: int, b: int) -> int:
return a + b
server.add_middleware(recording)
result = await server.call_tool("add", {"a": 1, "b": 2})
assert result.structured_content["result"] == 3 # type: ignore[union-attr,index]
assert recording.assert_called(hook="on_call_tool", times=1)
async def test_call_tool_with_run_middleware_false(self):
"""Middleware is NOT applied when run_middleware=False."""
recording = RecordingMiddleware()
server = FastMCP()
@server.tool
def add(a: int, b: int) -> int:
return a + b
server.add_middleware(recording)
result = await server.call_tool("add", {"a": 1, "b": 2}, run_middleware=False)
assert result.structured_content["result"] == 3 # type: ignore[union-attr,index]
# Middleware should not have been called
assert len(recording.calls) == 0
async def test_read_resource_with_run_middleware_true(self):
"""Middleware is applied when run_middleware=True (default)."""
recording = RecordingMiddleware()
server = FastMCP()
@server.resource("resource://test")
def test_resource() -> str:
return "test content"
server.add_middleware(recording)
result = await server.read_resource("resource://test")
assert len(result.contents) == 1
assert result.contents[0].content == "test content"
assert recording.assert_called(hook="on_read_resource", times=1)
async def test_read_resource_with_run_middleware_false(self):
"""Middleware is NOT applied when run_middleware=False."""
recording = RecordingMiddleware()
server = FastMCP()
@server.resource("resource://test")
def test_resource() -> str:
return "test content"
server.add_middleware(recording)
result = await server.read_resource("resource://test", run_middleware=False)
assert len(result.contents) == 1
assert result.contents[0].content == "test content"
# Middleware should not have been called
assert len(recording.calls) == 0
async def test_read_resource_template_with_run_middleware_false(self):
"""Templates also skip middleware when run_middleware=False."""
recording = RecordingMiddleware()
server = FastMCP()
@server.resource("resource://items/{item_id}")
def get_item(item_id: int) -> str:
return f"item {item_id}"
server.add_middleware(recording)
result = await server.read_resource("resource://items/42", run_middleware=False)
assert len(result.contents) == 1
assert result.contents[0].content == "item 42"
assert len(recording.calls) == 0
async def test_render_prompt_with_run_middleware_true(self):
"""Middleware is applied when run_middleware=True (default)."""
recording = RecordingMiddleware()
server = FastMCP()
@server.prompt
def greet(name: str) -> str:
return f"Hello, {name}!"
server.add_middleware(recording)
result = await server.render_prompt("greet", {"name": "World"})
assert len(result.messages) == 1
# content is TextContent | EmbeddedResource, but we know it's TextContent from the test
assert isinstance(result.messages[0].content, mcp.types.TextContent)
assert result.messages[0].content.text == "Hello, World!"
assert recording.assert_called(hook="on_get_prompt", times=1)
async def test_render_prompt_with_run_middleware_false(self):
"""Middleware is NOT applied when run_middleware=False."""
recording = RecordingMiddleware()
server = FastMCP()
@server.prompt
def greet(name: str) -> str:
return f"Hello, {name}!"
server.add_middleware(recording)
result = await server.render_prompt(
"greet", {"name": "World"}, run_middleware=False
)
assert len(result.messages) == 1
# content is TextContent | EmbeddedResource, but we know it's TextContent from the test
assert isinstance(result.messages[0].content, mcp.types.TextContent)
assert result.messages[0].content.text == "Hello, World!"
# Middleware should not have been called
assert len(recording.calls) == 0
async def test_middleware_modification_skipped_when_run_middleware_false(self):
"""Middleware that modifies args/results is skipped."""
class ModifyingMiddleware(Middleware):
async def on_call_tool(self, context: MiddlewareContext, call_next):
# Double the 'a' argument
assert context.message.arguments is not None
context.message.arguments["a"] *= 2
return await call_next(context)
server = FastMCP()
@server.tool
def add(a: int, b: int) -> int:
return a + b
server.add_middleware(ModifyingMiddleware())
# With middleware: a=5 becomes a=10, result = 10 + 3 = 13
result_with = await server.call_tool("add", {"a": 5, "b": 3})
assert result_with.structured_content["result"] == 13 # type: ignore[union-attr,index]
# Without middleware: a=5 stays a=5, result = 5 + 3 = 8
result_without = await server.call_tool(
"add", {"a": 5, "b": 3}, run_middleware=False
)
assert result_without.structured_content["result"] == 8 # type: ignore[union-attr,index]
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/server/middleware/test_middleware.py",
"license": "Apache License 2.0",
"lines": 479,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:src/fastmcp/server/middleware/middleware.py | from __future__ import annotations
import logging
from collections.abc import Awaitable, Sequence
from dataclasses import dataclass, field, replace
from datetime import datetime, timezone
from functools import partial
from typing import (
TYPE_CHECKING,
Any,
Generic,
Literal,
Protocol,
runtime_checkable,
)
import mcp.types as mt
from typing_extensions import TypeVar
from fastmcp.prompts.prompt import Prompt, PromptResult
from fastmcp.resources.resource import Resource, ResourceResult
from fastmcp.resources.template import ResourceTemplate
from fastmcp.tools.tool import Tool, ToolResult
if TYPE_CHECKING:
from fastmcp.server.context import Context
__all__ = [
"CallNext",
"Middleware",
"MiddlewareContext",
]
logger = logging.getLogger(__name__)
T = TypeVar("T", default=Any)
R = TypeVar("R", covariant=True, default=Any)
@runtime_checkable
class CallNext(Protocol[T, R]):
def __call__(self, context: MiddlewareContext[T]) -> Awaitable[R]: ...
@dataclass(kw_only=True, frozen=True)
class MiddlewareContext(Generic[T]):
"""
Unified context for all middleware operations.
"""
message: T
fastmcp_context: Context | None = None
# Common metadata
source: Literal["client", "server"] = "client"
type: Literal["request", "notification"] = "request"
method: str | None = None
timestamp: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
def copy(self, **kwargs: Any) -> MiddlewareContext[T]:
return replace(self, **kwargs)
def make_middleware_wrapper(
middleware: Middleware, call_next: CallNext[T, R]
) -> CallNext[T, R]:
"""Create a wrapper that applies a single middleware to a context. The
closure bakes in the middleware and call_next function, so it can be
passed to other functions that expect a call_next function."""
async def wrapper(context: MiddlewareContext[T]) -> R:
return await middleware(context, call_next)
return wrapper
class Middleware:
"""Base class for FastMCP middleware with dispatching hooks."""
async def __call__(
self,
context: MiddlewareContext[T],
call_next: CallNext[T, Any],
) -> Any:
"""Main entry point that orchestrates the pipeline."""
handler_chain = await self._dispatch_handler(
context,
call_next=call_next,
)
return await handler_chain(context)
async def _dispatch_handler(
self, context: MiddlewareContext[Any], call_next: CallNext[Any, Any]
) -> CallNext[Any, Any]:
"""Builds a chain of handlers for a given message."""
handler = call_next
match context.method:
case "initialize":
handler = partial(self.on_initialize, call_next=handler)
case "tools/call":
handler = partial(self.on_call_tool, call_next=handler)
case "resources/read":
handler = partial(self.on_read_resource, call_next=handler)
case "prompts/get":
handler = partial(self.on_get_prompt, call_next=handler)
case "tools/list":
handler = partial(self.on_list_tools, call_next=handler)
case "resources/list":
handler = partial(self.on_list_resources, call_next=handler)
case "resources/templates/list":
handler = partial(self.on_list_resource_templates, call_next=handler)
case "prompts/list":
handler = partial(self.on_list_prompts, call_next=handler)
match context.type:
case "request":
handler = partial(self.on_request, call_next=handler)
case "notification":
handler = partial(self.on_notification, call_next=handler)
handler = partial(self.on_message, call_next=handler)
return handler
async def on_message(
self,
context: MiddlewareContext[Any],
call_next: CallNext[Any, Any],
) -> Any:
return await call_next(context)
async def on_request(
self,
context: MiddlewareContext[mt.Request[Any, Any]],
call_next: CallNext[mt.Request[Any, Any], Any],
) -> Any:
return await call_next(context)
async def on_notification(
self,
context: MiddlewareContext[mt.Notification[Any, Any]],
call_next: CallNext[mt.Notification[Any, Any], Any],
) -> Any:
return await call_next(context)
async def on_initialize(
self,
context: MiddlewareContext[mt.InitializeRequest],
call_next: CallNext[mt.InitializeRequest, mt.InitializeResult | None],
) -> mt.InitializeResult | None:
return await call_next(context)
async def on_call_tool(
self,
context: MiddlewareContext[mt.CallToolRequestParams],
call_next: CallNext[mt.CallToolRequestParams, ToolResult],
) -> ToolResult:
return await call_next(context)
async def on_read_resource(
self,
context: MiddlewareContext[mt.ReadResourceRequestParams],
call_next: CallNext[mt.ReadResourceRequestParams, ResourceResult],
) -> ResourceResult:
return await call_next(context)
async def on_get_prompt(
self,
context: MiddlewareContext[mt.GetPromptRequestParams],
call_next: CallNext[mt.GetPromptRequestParams, PromptResult],
) -> PromptResult:
return await call_next(context)
async def on_list_tools(
self,
context: MiddlewareContext[mt.ListToolsRequest],
call_next: CallNext[mt.ListToolsRequest, Sequence[Tool]],
) -> Sequence[Tool]:
return await call_next(context)
async def on_list_resources(
self,
context: MiddlewareContext[mt.ListResourcesRequest],
call_next: CallNext[mt.ListResourcesRequest, Sequence[Resource]],
) -> Sequence[Resource]:
return await call_next(context)
async def on_list_resource_templates(
self,
context: MiddlewareContext[mt.ListResourceTemplatesRequest],
call_next: CallNext[
mt.ListResourceTemplatesRequest, Sequence[ResourceTemplate]
],
) -> Sequence[ResourceTemplate]:
return await call_next(context)
async def on_list_prompts(
self,
context: MiddlewareContext[mt.ListPromptsRequest],
call_next: CallNext[mt.ListPromptsRequest, Sequence[Prompt]],
) -> Sequence[Prompt]:
return await call_next(context)
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "src/fastmcp/server/middleware/middleware.py",
"license": "Apache License 2.0",
"lines": 166,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
PrefectHQ/fastmcp:examples/get_file.py | # /// script
# dependencies = ["aiohttp", "fastmcp"]
# ///
# uv pip install aiohttp fastmcp
import aiohttp
from fastmcp.server import FastMCP
from fastmcp.utilities.types import File
def create_server():
mcp = FastMCP(name="File Demo", instructions="Get files from the server or URL.")
@mcp.tool()
async def get_test_file_from_server(path: str = "requirements.txt") -> File:
"""
Get a test file from the server. If the path is not provided, it defaults to 'requirements.txt'.
"""
return File(path=path)
@mcp.tool()
async def get_test_pdf_from_url(
url: str = "https://mozilla.github.io/pdf.js/web/compressed.tracemonkey-pldi-09.pdf",
) -> File:
"""
Get a test PDF file from a URL. If the URL is not provided, it defaults to a sample PDF.
"""
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
pdf_data = await response.read()
return File(data=pdf_data, format="pdf")
return mcp
if __name__ == "__main__":
create_server().run(transport="sse", host="0.0.0.0", port=8001, path="/sse")
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "examples/get_file.py",
"license": "Apache License 2.0",
"lines": 29,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
PrefectHQ/fastmcp:src/fastmcp/tools/tool_transform.py | from __future__ import annotations
import inspect
import warnings
from collections.abc import Callable
from contextvars import ContextVar
from copy import deepcopy
from dataclasses import dataclass
from typing import Annotated, Any, Literal, cast
import pydantic_core
from mcp.types import ToolAnnotations
from pydantic import ConfigDict
from pydantic.fields import Field
from pydantic.functional_validators import BeforeValidator
from pydantic.json_schema import SkipJsonSchema
import fastmcp
from fastmcp.tools.function_parsing import ParsedFunction
from fastmcp.tools.tool import Tool, ToolResult, _convert_to_content
from fastmcp.utilities.components import _convert_set_default_none
from fastmcp.utilities.json_schema import compress_schema
from fastmcp.utilities.logging import get_logger
from fastmcp.utilities.types import (
FastMCPBaseModel,
NotSet,
NotSetT,
get_cached_typeadapter,
)
logger = get_logger(__name__)
# Context variable to store current transformed tool
_current_tool: ContextVar[TransformedTool | None] = ContextVar(
"_current_tool", default=None
)
async def forward(**kwargs: Any) -> ToolResult:
"""Forward to parent tool with argument transformation applied.
This function can only be called from within a transformed tool's custom
function. It applies argument transformation (renaming, validation) before
calling the parent tool.
For example, if the parent tool has args `x` and `y`, but the transformed
tool has args `a` and `b`, and an `transform_args` was provided that maps `x` to
`a` and `y` to `b`, then `forward(a=1, b=2)` will call the parent tool with
`x=1` and `y=2`.
Args:
**kwargs: Arguments to forward to the parent tool (using transformed names).
Returns:
The ToolResult from the parent tool execution.
Raises:
RuntimeError: If called outside a transformed tool context.
TypeError: If provided arguments don't match the transformed schema.
"""
tool = _current_tool.get()
if tool is None:
raise RuntimeError("forward() can only be called within a transformed tool")
# Use the forwarding function that handles mapping
return await tool.forwarding_fn(**kwargs)
async def forward_raw(**kwargs: Any) -> ToolResult:
"""Forward directly to parent tool without transformation.
This function bypasses all argument transformation and validation, calling the parent
tool directly with the provided arguments. Use this when you need to call the parent
with its original parameter names and structure.
For example, if the parent tool has args `x` and `y`, then `forward_raw(x=1,
y=2)` will call the parent tool with `x=1` and `y=2`.
Args:
**kwargs: Arguments to pass directly to the parent tool (using original names).
Returns:
The ToolResult from the parent tool execution.
Raises:
RuntimeError: If called outside a transformed tool context.
"""
tool = _current_tool.get()
if tool is None:
raise RuntimeError("forward_raw() can only be called within a transformed tool")
return await tool.parent_tool.run(kwargs)
@dataclass(kw_only=True)
class ArgTransform:
"""Configuration for transforming a parent tool's argument.
This class allows fine-grained control over how individual arguments are transformed
when creating a new tool from an existing one. You can rename arguments, change their
descriptions, add default values, or hide them from clients while passing constants.
Attributes:
name: New name for the argument. Use None to keep original name, or ... for no change.
description: New description for the argument. Use None to remove description, or ... for no change.
default: New default value for the argument. Use ... for no change.
default_factory: Callable that returns a default value. Cannot be used with default.
type: New type for the argument. Use ... for no change.
hide: If True, hide this argument from clients but pass a constant value to parent.
required: If True, make argument required (remove default). Use ... for no change.
examples: Examples for the argument. Use ... for no change.
Examples:
Rename argument 'old_name' to 'new_name'
```python
ArgTransform(name="new_name")
```
Change description only
```python
ArgTransform(description="Updated description")
```
Add a default value (makes argument optional)
```python
ArgTransform(default=42)
```
Add a default factory (makes argument optional)
```python
ArgTransform(default_factory=lambda: time.time())
```
Change the type
```python
ArgTransform(type=str)
```
Hide the argument entirely from clients
```python
ArgTransform(hide=True)
```
Hide argument but pass a constant value to parent
```python
ArgTransform(hide=True, default="constant_value")
```
Hide argument but pass a factory-generated value to parent
```python
ArgTransform(hide=True, default_factory=lambda: uuid.uuid4().hex)
```
Make an optional parameter required (removes any default)
```python
ArgTransform(required=True)
```
Combine multiple transformations
```python
ArgTransform(name="new_name", description="New desc", default=None, type=int)
```
"""
name: str | NotSetT = NotSet
description: str | NotSetT = NotSet
default: Any | NotSetT = NotSet
default_factory: Callable[[], Any] | NotSetT = NotSet
type: Any | NotSetT = NotSet
hide: bool = False
required: Literal[True] | NotSetT = NotSet
examples: Any | NotSetT = NotSet
def __post_init__(self):
"""Validate that only one of default or default_factory is provided."""
has_default = self.default is not NotSet
has_factory = self.default_factory is not NotSet
if has_default and has_factory:
raise ValueError(
"Cannot specify both 'default' and 'default_factory' in ArgTransform. "
"Use either 'default' for a static value or 'default_factory' for a callable."
)
if has_factory and not self.hide:
raise ValueError(
"default_factory can only be used with hide=True. "
"Visible parameters must use static 'default' values since JSON schema "
"cannot represent dynamic factories."
)
if self.required is True and (has_default or has_factory):
raise ValueError(
"Cannot specify 'required=True' with 'default' or 'default_factory'. "
"Required parameters cannot have defaults."
)
if self.hide and self.required is True:
raise ValueError(
"Cannot specify both 'hide=True' and 'required=True'. "
"Hidden parameters cannot be required since clients cannot provide them."
)
if self.required is False:
raise ValueError(
"Cannot specify 'required=False'. Set a default value instead."
)
class ArgTransformConfig(FastMCPBaseModel):
"""A model for requesting a single argument transform."""
name: str | None = Field(default=None, description="The new name for the argument.")
description: str | None = Field(
default=None, description="The new description for the argument."
)
default: str | int | float | bool | None = Field(
default=None, description="The new default value for the argument."
)
hide: bool = Field(
default=False, description="Whether to hide the argument from the tool."
)
required: Literal[True] | None = Field(
default=None, description="Whether the argument is required."
)
examples: Any | None = Field(default=None, description="Examples of the argument.")
def to_arg_transform(self) -> ArgTransform:
"""Convert the argument transform to a FastMCP argument transform."""
return ArgTransform(**self.model_dump(exclude_unset=True)) # pyright: ignore[reportAny]
class TransformedTool(Tool):
"""A tool that is transformed from another tool.
This class represents a tool that has been created by transforming another tool.
It supports argument renaming, schema modification, custom function injection,
structured output control, and provides context for the forward() and forward_raw() functions.
The transformation can be purely schema-based (argument renaming, dropping, etc.)
or can include a custom function that uses forward() to call the parent tool
with transformed arguments. Output schemas and structured outputs are automatically
inherited from the parent tool but can be overridden or disabled.
Attributes:
parent_tool: The original tool that this tool was transformed from.
fn: The function to execute when this tool is called (either the forwarding
function for pure transformations or a custom user function).
forwarding_fn: Internal function that handles argument transformation and
validation when forward() is called from custom functions.
"""
model_config = ConfigDict(extra="allow", arbitrary_types_allowed=True)
parent_tool: SkipJsonSchema[Tool]
fn: SkipJsonSchema[Callable[..., Any]]
forwarding_fn: SkipJsonSchema[
Callable[..., Any]
] # Always present, handles arg transformation
transform_args: dict[str, ArgTransform]
async def run(self, arguments: dict[str, Any]) -> ToolResult:
"""Run the tool with context set for forward() functions.
This method executes the tool's function while setting up the context
that allows forward() and forward_raw() to work correctly within custom
functions.
Args:
arguments: Dictionary of arguments to pass to the tool's function.
Returns:
ToolResult object containing content and optional structured output.
"""
# Fill in missing arguments with schema defaults to ensure
# ArgTransform defaults take precedence over function defaults
arguments = arguments.copy()
properties = self.parameters.get("properties", {})
for param_name, param_schema in properties.items():
if param_name not in arguments and "default" in param_schema:
# Check if this parameter has a default_factory from transform_args
# We need to call the factory for each run, not use the cached schema value
has_factory_default = False
if self.transform_args:
# Find the original parameter name that maps to this param_name
for orig_name, transform in self.transform_args.items():
transform_name = (
transform.name
if transform.name is not NotSet
else orig_name
)
if (
transform_name == param_name
and transform.default_factory is not NotSet
):
# Type check to ensure default_factory is callable
if callable(transform.default_factory):
arguments[param_name] = transform.default_factory()
has_factory_default = True
break
if not has_factory_default:
arguments[param_name] = param_schema["default"]
token = _current_tool.set(self)
try:
result = await self.fn(**arguments)
# If transform function returns ToolResult, respect our output_schema setting
if isinstance(result, ToolResult):
if self.output_schema is None:
# Check if this is from a custom function that returns ToolResult
return_annotation = inspect.signature(self.fn).return_annotation
if return_annotation is ToolResult:
# Custom function returns ToolResult - preserve its content
return result
else:
# Forwarded call with no explicit schema - preserve parent's structured content
# The parent tool may have generated structured content via its own fallback logic
return result
elif self.output_schema.get(
"type"
) != "object" and not self.output_schema.get("x-fastmcp-wrap-result"):
# Non-object explicit schemas disable structured content
return ToolResult(
content=result.content,
structured_content=None,
)
else:
return result
# Otherwise convert to content and create ToolResult with proper structured content
unstructured_result = _convert_to_content(
result, serializer=self.serializer
)
structured_output = None
# First handle structured content based on output schema, if any
if self.output_schema is not None:
if self.output_schema.get("x-fastmcp-wrap-result"):
# Schema says wrap - always wrap in result key
structured_output = {"result": result}
else:
structured_output = result
# If no output schema, try to serialize the result. If it is a dict, use
# it as structured content. If it is not a dict, ignore it.
if structured_output is None:
try:
structured_output = pydantic_core.to_jsonable_python(result)
if not isinstance(structured_output, dict):
structured_output = None
except Exception:
pass
return ToolResult(
content=unstructured_result,
structured_content=structured_output,
)
finally:
_current_tool.reset(token)
@classmethod
def from_tool(
cls,
tool: Tool | Callable[..., Any],
name: str | None = None,
version: str | NotSetT | None = NotSet,
title: str | NotSetT | None = NotSet,
description: str | NotSetT | None = NotSet,
tags: set[str] | None = None,
transform_fn: Callable[..., Any] | None = None,
transform_args: dict[str, ArgTransform] | None = None,
annotations: ToolAnnotations | NotSetT | None = NotSet,
output_schema: dict[str, Any] | NotSetT | None = NotSet,
serializer: Callable[[Any], str] | NotSetT | None = NotSet, # Deprecated
meta: dict[str, Any] | NotSetT | None = NotSet,
) -> TransformedTool:
"""Create a transformed tool from a parent tool.
Args:
tool: The parent tool to transform.
transform_fn: Optional custom function. Can use forward() and forward_raw()
to call the parent tool. Functions with **kwargs receive transformed
argument names.
name: New name for the tool. Defaults to parent tool's name.
version: New version for the tool. Defaults to parent tool's version.
title: New title for the tool. Defaults to parent tool's title.
transform_args: Optional transformations for parent tool arguments.
Only specified arguments are transformed, others pass through unchanged:
- Simple rename (str)
- Complex transformation (rename/description/default/drop) (ArgTransform)
- Drop the argument (None)
description: New description. Defaults to parent's description.
tags: New tags. Defaults to parent's tags.
annotations: New annotations. Defaults to parent's annotations.
output_schema: Control output schema for structured outputs:
- None (default): Inherit from transform_fn if available, then parent tool
- dict: Use custom output schema
- False: Disable output schema and structured outputs
serializer: Deprecated. Return ToolResult from your tools for full control over serialization.
meta: Control meta information:
- NotSet (default): Inherit from parent tool
- dict: Use custom meta information
- None: Remove meta information
Returns:
TransformedTool with the specified transformations.
Examples:
# Transform specific arguments only
```python
Tool.from_tool(parent, transform_args={"old": "new"}) # Others unchanged
```
# Custom function with partial transforms
```python
async def custom(x: int, y: int) -> str:
result = await forward(x=x, y=y)
return f"Custom: {result}"
Tool.from_tool(parent, transform_fn=custom, transform_args={"a": "x", "b": "y"})
```
# Using **kwargs (gets all args, transformed and untransformed)
```python
async def flexible(**kwargs) -> str:
result = await forward(**kwargs)
return f"Got: {kwargs}"
Tool.from_tool(parent, transform_fn=flexible, transform_args={"a": "x"})
```
# Control structured outputs and schemas
```python
# Custom output schema
Tool.from_tool(parent, output_schema={
"type": "object",
"properties": {"status": {"type": "string"}}
})
# Disable structured outputs
Tool.from_tool(parent, output_schema=None)
# Return ToolResult for full control
async def custom_output(**kwargs) -> ToolResult:
result = await forward(**kwargs)
return ToolResult(
content=[TextContent(text="Summary")],
structured_content={"processed": True}
)
```
"""
tool = Tool._ensure_tool(tool)
if (
serializer is not NotSet
and serializer is not None
and fastmcp.settings.deprecation_warnings
):
warnings.warn(
"The `serializer` parameter is deprecated. "
"Return ToolResult from your tools for full control over serialization. "
"See https://gofastmcp.com/servers/tools#custom-serialization for migration examples.",
DeprecationWarning,
stacklevel=2,
)
transform_args = transform_args or {}
if transform_fn is not None:
parsed_fn = ParsedFunction.from_function(transform_fn, validate=False)
else:
parsed_fn = None
# Validate transform_args
parent_params = set(tool.parameters.get("properties", {}).keys())
unknown_args = set(transform_args.keys()) - parent_params
if unknown_args:
raise ValueError(
f"Unknown arguments in transform_args: {', '.join(sorted(unknown_args))}. "
f"Parent tool `{tool.name}` has: {', '.join(sorted(parent_params))}"
)
# Always create the forwarding transform
schema, forwarding_fn = cls._create_forwarding_transform(tool, transform_args)
# Handle output schema
if output_schema is NotSet:
# Use smart fallback: try custom function, then parent
if transform_fn is not None:
# parsed fn is not none here
final_output_schema = cast(ParsedFunction, parsed_fn).output_schema
if final_output_schema is None:
# Check if function returns ToolResult - if so, don't fall back to parent
return_annotation = inspect.signature(
transform_fn
).return_annotation
if return_annotation is ToolResult:
final_output_schema = None
else:
final_output_schema = tool.output_schema
else:
final_output_schema = tool.output_schema
else:
final_output_schema = cast(dict | None, output_schema)
if transform_fn is None:
# User wants pure transformation - use forwarding_fn as the main function
final_fn = forwarding_fn
final_schema = schema
else:
# parsed fn is not none here
parsed_fn = cast(ParsedFunction, parsed_fn)
# User provided custom function - merge schemas
final_fn = transform_fn
has_kwargs = cls._function_has_kwargs(transform_fn)
# Validate function parameters against transformed schema
fn_params = set(parsed_fn.input_schema.get("properties", {}).keys())
transformed_params = set(schema.get("properties", {}).keys())
if not has_kwargs:
# Without **kwargs, function must declare all transformed params
# Check if function is missing any parameters required after transformation
missing_params = transformed_params - fn_params
if missing_params:
raise ValueError(
f"Function missing parameters required after transformation: "
f"{', '.join(sorted(missing_params))}. "
f"Function declares: {', '.join(sorted(fn_params))}"
)
# ArgTransform takes precedence over function signature
# Start with function schema as base, then override with transformed schema
final_schema = cls._merge_schema_with_precedence(
parsed_fn.input_schema, schema
)
else:
# With **kwargs, function can access all transformed params
# ArgTransform takes precedence over function signature
# No validation needed - kwargs makes everything accessible
# Start with function schema as base, then override with transformed schema
final_schema = cls._merge_schema_with_precedence(
parsed_fn.input_schema, schema
)
# Additional validation: check for naming conflicts after transformation
if transform_args:
new_names = []
for old_name in parent_params:
transform = transform_args.get(old_name, ArgTransform())
if transform.hide:
continue
if transform.name is not NotSet:
new_names.append(transform.name)
else:
new_names.append(old_name)
# Check for duplicate names after transformation
name_counts = {}
for arg_name in new_names:
name_counts[arg_name] = name_counts.get(arg_name, 0) + 1
duplicates = [
arg_name for arg_name, count in name_counts.items() if count > 1
]
if duplicates:
raise ValueError(
f"Multiple arguments would be mapped to the same names: "
f"{', '.join(sorted(duplicates))}"
)
final_name = name or tool.name
final_version = version if not isinstance(version, NotSetT) else tool.version
final_description = (
description if not isinstance(description, NotSetT) else tool.description
)
final_title = title if not isinstance(title, NotSetT) else tool.title
final_meta = meta if not isinstance(meta, NotSetT) else tool.meta
final_annotations = (
annotations if not isinstance(annotations, NotSetT) else tool.annotations
)
final_serializer = (
serializer if not isinstance(serializer, NotSetT) else tool.serializer
)
transformed_tool = cls(
fn=final_fn,
forwarding_fn=forwarding_fn,
parent_tool=tool,
name=final_name,
version=final_version,
title=final_title,
description=final_description,
parameters=final_schema,
output_schema=final_output_schema,
tags=tags or tool.tags,
annotations=final_annotations,
serializer=final_serializer,
meta=final_meta,
transform_args=transform_args,
auth=tool.auth,
)
return transformed_tool
@classmethod
def _create_forwarding_transform(
cls,
parent_tool: Tool,
transform_args: dict[str, ArgTransform] | None,
) -> tuple[dict[str, Any], Callable[..., Any]]:
"""Create schema and forwarding function that encapsulates all transformation logic.
This method builds a new JSON schema for the transformed tool and creates a
forwarding function that validates arguments against the new schema and maps
them back to the parent tool's expected arguments.
Args:
parent_tool: The original tool to transform.
transform_args: Dictionary defining how to transform each argument.
Returns:
A tuple containing:
- The new JSON schema for the transformed tool as a dictionary
- Async function that validates and forwards calls to the parent tool
"""
# Build transformed schema and mapping
# Deep copy to prevent compress_schema from mutating parent tool's $defs
parent_defs = deepcopy(parent_tool.parameters.get("$defs", {}))
parent_props = parent_tool.parameters.get("properties", {}).copy()
parent_required = set(parent_tool.parameters.get("required", []))
new_props = {}
new_required = set()
new_to_old = {}
hidden_defaults = {} # Track hidden parameters with constant values
for old_name, old_schema in parent_props.items():
# Check if parameter is in transform_args
if transform_args and old_name in transform_args:
transform = transform_args[old_name]
else:
# Default behavior - pass through (no transformation)
transform = ArgTransform() # Default ArgTransform with no changes
# Handle hidden parameters with defaults
if transform.hide:
# Validate that hidden parameters without user defaults have parent defaults
has_user_default = (
transform.default is not NotSet
or transform.default_factory is not NotSet
)
if not has_user_default and old_name in parent_required:
raise ValueError(
f"Hidden parameter '{old_name}' has no default value in parent tool "
f"and no default or default_factory provided in ArgTransform. Either provide a default "
f"or default_factory in ArgTransform or don't hide required parameters."
)
if has_user_default:
# Store info for later factory calling or direct value
hidden_defaults[old_name] = transform
# Skip adding to schema (not exposed to clients)
continue
transform_result = cls._apply_single_transform(
old_name,
old_schema,
transform,
old_name in parent_required,
)
if transform_result:
new_name, new_schema, is_required = transform_result
new_props[new_name] = new_schema
new_to_old[new_name] = old_name
if is_required:
new_required.add(new_name)
schema = {
"type": "object",
"properties": new_props,
"required": list(new_required),
"additionalProperties": False,
}
if parent_defs:
schema["$defs"] = parent_defs
schema = compress_schema(schema)
# Create forwarding function that closes over everything it needs
async def _forward(**kwargs: Any):
# Validate arguments
valid_args = set(new_props.keys())
provided_args = set(kwargs.keys())
unknown_args = provided_args - valid_args
if unknown_args:
raise TypeError(
f"Got unexpected keyword argument(s): {', '.join(sorted(unknown_args))}"
)
# Check required arguments
missing_args = new_required - provided_args
if missing_args:
raise TypeError(
f"Missing required argument(s): {', '.join(sorted(missing_args))}"
)
# Map arguments to parent names
parent_args = {}
for new_name, value in kwargs.items():
old_name = new_to_old.get(new_name, new_name)
parent_args[old_name] = value
# Add hidden defaults (constant values for hidden parameters)
for old_name, transform in hidden_defaults.items():
if transform.default is not NotSet:
parent_args[old_name] = transform.default
elif transform.default_factory is not NotSet:
# Type check to ensure default_factory is callable
if callable(transform.default_factory):
parent_args[old_name] = transform.default_factory()
return await parent_tool.run(parent_args)
return schema, _forward
@staticmethod
def _apply_single_transform(
old_name: str,
old_schema: dict[str, Any],
transform: ArgTransform,
is_required: bool,
) -> tuple[str, dict[str, Any], bool] | None:
"""Apply transformation to a single parameter.
This method handles the transformation of a single argument according to
the specified transformation rules.
Args:
old_name: Original name of the parameter.
old_schema: Original JSON schema for the parameter.
transform: ArgTransform object specifying how to transform the parameter.
is_required: Whether the original parameter was required.
Returns:
Tuple of (new_name, new_schema, new_is_required) if parameter should be kept,
None if parameter should be dropped.
"""
if transform.hide:
return None
# Handle name transformation - ensure we always have a string
if transform.name is not NotSet:
new_name = transform.name if transform.name is not None else old_name
else:
new_name = old_name
# Ensure new_name is always a string
if not isinstance(new_name, str):
new_name = old_name
new_schema = old_schema.copy()
# Handle description transformation
if transform.description is not NotSet:
if transform.description is None:
new_schema.pop("description", None) # Remove description
else:
new_schema["description"] = transform.description
# Handle required transformation first
if transform.required is not NotSet:
is_required = bool(transform.required)
if transform.required is True:
# Remove any existing default when making required
new_schema.pop("default", None)
# Handle default value transformation (only if not making required)
if transform.default is not NotSet and transform.required is not True:
new_schema["default"] = transform.default
is_required = False
# Handle type transformation
if transform.type is not NotSet:
# Use TypeAdapter to get proper JSON schema for the type
type_schema = get_cached_typeadapter(transform.type).json_schema()
# Update the schema with the type information from TypeAdapter
new_schema.update(type_schema)
# Handle examples transformation
if transform.examples is not NotSet:
new_schema["examples"] = transform.examples
return new_name, new_schema, is_required
@staticmethod
def _merge_schema_with_precedence(
base_schema: dict[str, Any], override_schema: dict[str, Any]
) -> dict[str, Any]:
"""Merge two schemas, with the override schema taking precedence.
Args:
base_schema: Base schema to start with
override_schema: Schema that takes precedence for overlapping properties
Returns:
Merged schema with override taking precedence
"""
merged_props = base_schema.get("properties", {}).copy()
merged_required = set(base_schema.get("required", []))
override_props = override_schema.get("properties", {})
override_required = set(override_schema.get("required", []))
# Override properties
for param_name, param_schema in override_props.items():
if param_name in merged_props:
# Merge the schemas, with override taking precedence
base_param = merged_props[param_name].copy()
base_param.update(param_schema)
merged_props[param_name] = base_param
else:
merged_props[param_name] = param_schema.copy()
# Handle required parameters - override takes complete precedence
# Start with override's required set
final_required = override_required.copy()
# For parameters not in override, inherit base requirement status
# but only if they don't have a default in the final merged properties
for param_name in merged_required:
if param_name not in override_props:
# Parameter not mentioned in override, keep base requirement status
final_required.add(param_name)
elif (
param_name in override_props
and "default" not in merged_props[param_name]
):
# Parameter in override but no default, keep required if it was required in base
if param_name not in override_required:
# Override doesn't specify it as required, and it has no default,
# so inherit from base
final_required.add(param_name)
# Remove any parameters that have defaults (they become optional)
for param_name, param_schema in merged_props.items():
if "default" in param_schema:
final_required.discard(param_name)
# Merge $defs from both schemas, with override taking precedence
merged_defs = base_schema.get("$defs", {}).copy()
override_defs = override_schema.get("$defs", {})
for def_name, def_schema in override_defs.items():
if def_name in merged_defs:
base_def = merged_defs[def_name].copy()
base_def.update(def_schema)
merged_defs[def_name] = base_def
else:
merged_defs[def_name] = def_schema.copy()
result = {
"type": "object",
"properties": merged_props,
"required": list(final_required),
"additionalProperties": False,
}
if merged_defs:
result["$defs"] = merged_defs
result = compress_schema(result)
return result
@staticmethod
def _function_has_kwargs(fn: Callable[..., Any]) -> bool:
"""Check if function accepts **kwargs.
This determines whether a custom function can accept arbitrary keyword arguments,
which affects how schemas are merged during tool transformation.
Args:
fn: Function to inspect.
Returns:
True if the function has a **kwargs parameter, False otherwise.
"""
sig = inspect.signature(fn)
return any(
p.kind == inspect.Parameter.VAR_KEYWORD for p in sig.parameters.values()
)
def _set_visibility_metadata(tool: Tool, *, enabled: bool) -> None:
"""Set visibility state in tool metadata.
This uses the same metadata format as the Visibility transform,
so tools marked here will be filtered by the standard visibility system.
Args:
tool: Tool to mark.
enabled: Whether the tool should be visible to clients.
"""
# Import here to avoid circular imports
from fastmcp.server.transforms.visibility import _FASTMCP_KEY, _INTERNAL_KEY
if tool.meta is None:
tool.meta = {_FASTMCP_KEY: {_INTERNAL_KEY: {"visibility": enabled}}}
else:
old_fastmcp = tool.meta.get(_FASTMCP_KEY, {})
old_internal = old_fastmcp.get(_INTERNAL_KEY, {})
new_internal = {**old_internal, "visibility": enabled}
new_fastmcp = {**old_fastmcp, _INTERNAL_KEY: new_internal}
tool.meta = {**tool.meta, _FASTMCP_KEY: new_fastmcp}
class ToolTransformConfig(FastMCPBaseModel):
"""Provides a way to transform a tool."""
name: str | None = Field(default=None, description="The new name for the tool.")
version: str | None = Field(
default=None, description="The new version for the tool."
)
title: str | None = Field(
default=None,
description="The new title of the tool.",
)
description: str | None = Field(
default=None,
description="The new description of the tool.",
)
tags: Annotated[set[str], BeforeValidator(_convert_set_default_none)] = Field(
default_factory=set,
description="The new tags for the tool.",
)
meta: dict[str, Any] | None = Field(
default=None,
description="The new meta information for the tool.",
)
enabled: bool = Field(
default=True,
description="Whether the tool is enabled. If False, the tool will be hidden from clients.",
)
arguments: dict[str, ArgTransformConfig] = Field(
default_factory=dict,
description="A dictionary of argument transforms to apply to the tool.",
)
def apply(self, tool: Tool) -> TransformedTool:
"""Create a TransformedTool from a provided tool and this transformation configuration."""
tool_changes: dict[str, Any] = self.model_dump(
exclude_unset=True, exclude={"arguments", "enabled"}
)
transformed = TransformedTool.from_tool(
tool=tool,
**tool_changes,
transform_args={k: v.to_arg_transform() for k, v in self.arguments.items()},
)
# Set visibility metadata if enabled was explicitly provided.
# This allows enabled=True to override an earlier disable (later transforms win).
if "enabled" in self.model_fields_set:
_set_visibility_metadata(transformed, enabled=self.enabled)
return transformed
def apply_transformations_to_tools(
tools: dict[str, Tool],
transformations: dict[str, ToolTransformConfig],
) -> dict[str, Tool]:
"""Apply a list of transformations to a list of tools. Tools that do not have any transformations
are left unchanged.
Note: tools dict is keyed by prefixed key (e.g., "tool:my_tool"),
but transformations are keyed by tool name (e.g., "my_tool").
"""
transformed_tools: dict[str, Tool] = {}
for tool_key, tool in tools.items():
# Look up transformation by tool name, not prefixed key
if transformation := transformations.get(tool.name):
transformed = transformation.apply(tool)
transformed_tools[transformed.key] = transformed
continue
transformed_tools[tool_key] = tool
return transformed_tools
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "src/fastmcp/tools/tool_transform.py",
"license": "Apache License 2.0",
"lines": 836,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/fastmcp:src/fastmcp/utilities/components.py | from __future__ import annotations
from collections.abc import Sequence
from typing import TYPE_CHECKING, Annotated, Any, ClassVar, TypedDict, cast
from mcp.types import Icon
from pydantic import BeforeValidator, Field
from typing_extensions import Self, TypeVar
from fastmcp.server.tasks.config import TaskConfig
from fastmcp.utilities.types import FastMCPBaseModel
if TYPE_CHECKING:
from docket import Docket
from docket.execution import Execution
T = TypeVar("T", default=Any)
class FastMCPMeta(TypedDict, total=False):
tags: list[str]
version: str
versions: list[str]
def get_fastmcp_metadata(meta: dict[str, Any] | None) -> FastMCPMeta:
"""Extract FastMCP metadata from a component's meta dict.
Handles both the current `fastmcp` namespace and the legacy `_fastmcp`
namespace for compatibility with older FastMCP servers.
"""
if not meta:
return {}
for key in ("fastmcp", "_fastmcp"):
metadata = meta.get(key)
if isinstance(metadata, dict):
return cast(FastMCPMeta, metadata)
return {}
def _convert_set_default_none(maybe_set: set[T] | Sequence[T] | None) -> set[T]:
"""Convert a sequence to a set, defaulting to an empty set if None."""
if maybe_set is None:
return set()
if isinstance(maybe_set, set):
return maybe_set
return set(maybe_set)
def _coerce_version(v: str | int | float | None) -> str | None:
"""Coerce version to string, accepting int, float, or str.
Raises TypeError for non-scalar types (list, dict, set, etc.).
Raises ValueError if version contains '@' (used as key delimiter).
"""
if v is None:
return None
if isinstance(v, bool):
raise TypeError(f"Version must be a string, int, or float, got bool: {v!r}")
if not isinstance(v, (str, int, float)):
raise TypeError(
f"Version must be a string, int, or float, got {type(v).__name__}: {v!r}"
)
version = str(v)
if "@" in version:
raise ValueError(
f"Version string cannot contain '@' (used as key delimiter): {version!r}"
)
return version
class FastMCPComponent(FastMCPBaseModel):
"""Base class for FastMCP tools, prompts, resources, and resource templates."""
KEY_PREFIX: ClassVar[str] = ""
def __init_subclass__(cls, **kwargs: Any) -> None:
super().__init_subclass__(**kwargs)
# Warn if a subclass doesn't define KEY_PREFIX (inherited or its own)
if not cls.KEY_PREFIX:
import warnings
warnings.warn(
f"{cls.__name__} does not define KEY_PREFIX. "
f"Component keys will not be type-prefixed, which may cause collisions.",
UserWarning,
stacklevel=2,
)
name: str = Field(
description="The name of the component.",
)
version: Annotated[str | None, BeforeValidator(_coerce_version)] = Field(
default=None,
description="Optional version identifier for this component. "
"Multiple versions of the same component (same name) can coexist.",
)
title: str | None = Field(
default=None,
description="The title of the component for display purposes.",
)
description: str | None = Field(
default=None,
description="The description of the component.",
)
icons: list[Icon] | None = Field(
default=None,
description="Optional list of icons for this component to display in user interfaces.",
)
tags: Annotated[set[str], BeforeValidator(_convert_set_default_none)] = Field(
default_factory=set,
description="Tags for the component.",
)
meta: dict[str, Any] | None = Field(
default=None, description="Meta information about the component"
)
task_config: Annotated[
TaskConfig,
Field(description="Background task execution configuration (SEP-1686)."),
] = Field(default_factory=lambda: TaskConfig(mode="forbidden"))
@classmethod
def make_key(cls, identifier: str) -> str:
"""Construct the lookup key for this component type.
Args:
identifier: The raw identifier (name for tools/prompts, uri for resources)
Returns:
A prefixed key like "tool:name" or "resource:uri"
"""
if cls.KEY_PREFIX:
return f"{cls.KEY_PREFIX}:{identifier}"
return identifier
@property
def key(self) -> str:
"""The globally unique lookup key for this component.
Format: "{key_prefix}:{identifier}@{version}" or "{key_prefix}:{identifier}@"
e.g. "tool:my_tool@v2", "tool:my_tool@", "resource:file://x.txt@"
The @ suffix is ALWAYS present to enable unambiguous parsing of keys
(URIs may contain @ characters, so we always include the delimiter).
Subclasses should override this to use their specific identifier.
Base implementation uses name.
"""
base_key = self.make_key(self.name)
return f"{base_key}@{self.version or ''}"
def get_meta(self) -> dict[str, Any]:
"""Get the meta information about the component.
Returns a dict that always includes a `fastmcp` key containing:
- `tags`: sorted list of component tags
- `version`: component version (only if set)
Internal keys (prefixed with `_`) are stripped from the fastmcp namespace.
"""
meta = dict(self.meta) if self.meta else {}
fastmcp_meta: FastMCPMeta = {"tags": sorted(self.tags)}
if self.version is not None:
fastmcp_meta["version"] = self.version
# Merge with upstream fastmcp meta, stripping internal keys
if (upstream_meta := meta.get("fastmcp")) is not None:
if not isinstance(upstream_meta, dict):
raise TypeError("meta['fastmcp'] must be a dict")
# Filter out internal keys (e.g., _internal used for enabled state)
public_upstream = {
k: v for k, v in upstream_meta.items() if not k.startswith("_")
}
fastmcp_meta = cast(FastMCPMeta, public_upstream | fastmcp_meta)
meta["fastmcp"] = fastmcp_meta
return meta
def __eq__(self, other: object) -> bool:
if type(self) is not type(other):
return False
if not isinstance(other, type(self)):
return False
return self.model_dump() == other.model_dump()
def __repr__(self) -> str:
parts = [f"name={self.name!r}"]
if self.version:
parts.append(f"version={self.version!r}")
parts.extend(
[
f"title={self.title!r}",
f"description={self.description!r}",
f"tags={self.tags}",
]
)
return f"{self.__class__.__name__}({', '.join(parts)})"
def enable(self) -> None:
"""Removed in 3.0. Use server.enable(keys=[...]) instead."""
raise NotImplementedError(
f"Component.enable() was removed in FastMCP 3.0. "
f"Use server.enable(keys=['{self.key}']) instead."
)
def disable(self) -> None:
"""Removed in 3.0. Use server.disable(keys=[...]) instead."""
raise NotImplementedError(
f"Component.disable() was removed in FastMCP 3.0. "
f"Use server.disable(keys=['{self.key}']) instead."
)
def copy(self) -> Self: # type: ignore[override]
"""Create a copy of the component."""
return self.model_copy()
def register_with_docket(self, docket: Docket) -> None:
"""Register this component with docket for background execution.
No-ops if task_config.mode is "forbidden". Subclasses override to
register their callable (self.run, self.read, self.render, or self.fn).
"""
# Base implementation: no-op (subclasses override)
async def add_to_docket(
self, docket: Docket, *args: Any, **kwargs: Any
) -> Execution:
"""Schedule this component for background execution via docket.
Subclasses override this to handle their specific calling conventions:
- Tool: add_to_docket(docket, arguments: dict, **kwargs)
- Resource: add_to_docket(docket, **kwargs)
- ResourceTemplate: add_to_docket(docket, params: dict, **kwargs)
- Prompt: add_to_docket(docket, arguments: dict | None, **kwargs)
The **kwargs are passed through to docket.add() (e.g., key=task_key).
"""
if not self.task_config.supports_tasks():
raise RuntimeError(
f"Cannot add {self.__class__.__name__} '{self.name}' to docket: "
f"task execution not supported"
)
raise NotImplementedError(
f"{self.__class__.__name__} does not implement add_to_docket()"
)
def get_span_attributes(self) -> dict[str, Any]:
"""Return span attributes for telemetry.
Subclasses should call super() and merge their specific attributes.
"""
return {"fastmcp.component.key": self.key}
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "src/fastmcp/utilities/components.py",
"license": "Apache License 2.0",
"lines": 207,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/fastmcp:tests/deprecated/test_settings.py | import pytest
from fastmcp import FastMCP
class TestRemovedServerInitKwargs:
"""Test that removed server initialization keyword arguments raise TypeError."""
@pytest.mark.parametrize(
"kwarg, value, expected_message",
[
("host", "0.0.0.0", "run_http_async"),
("port", 8080, "run_http_async"),
("sse_path", "/custom-sse", "FASTMCP_SSE_PATH"),
("message_path", "/custom-message", "FASTMCP_MESSAGE_PATH"),
("streamable_http_path", "/custom-http", "run_http_async"),
("json_response", True, "run_http_async"),
("stateless_http", True, "run_http_async"),
("debug", True, "FASTMCP_DEBUG"),
("log_level", "DEBUG", "run_http_async"),
("on_duplicate_tools", "warn", "on_duplicate="),
("on_duplicate_resources", "error", "on_duplicate="),
("on_duplicate_prompts", "replace", "on_duplicate="),
("tool_serializer", lambda x: str(x), "ToolResult"),
("include_tags", {"public"}, "server.enable"),
("exclude_tags", {"internal"}, "server.disable"),
(
"tool_transformations",
{"my_tool": {"name": "renamed"}},
"server.add_transform",
),
],
)
def test_removed_kwarg_raises_type_error(self, kwarg, value, expected_message):
with pytest.raises(TypeError, match=f"no longer accepts `{kwarg}`"):
FastMCP("TestServer", **{kwarg: value})
@pytest.mark.parametrize(
"kwarg, value, expected_message",
[
("host", "0.0.0.0", "run_http_async"),
("on_duplicate_tools", "warn", "on_duplicate="),
("include_tags", {"public"}, "server.enable"),
],
)
def test_removed_kwarg_error_includes_migration_hint(
self, kwarg, value, expected_message
):
with pytest.raises(TypeError, match=expected_message):
FastMCP("TestServer", **{kwarg: value})
def test_unknown_kwarg_raises_standard_type_error(self):
with pytest.raises(TypeError, match="unexpected keyword argument"):
FastMCP("TestServer", **{"totally_fake_param": True}) # ty: ignore[invalid-argument-type]
def test_valid_kwargs_still_work(self):
server = FastMCP(
name="TestServer",
instructions="Test instructions",
on_duplicate="warn",
mask_error_details=True,
)
assert server.name == "TestServer"
assert server.instructions == "Test instructions"
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/deprecated/test_settings.py",
"license": "Apache License 2.0",
"lines": 57,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:examples/config_server.py | """
Simple example showing FastMCP server with command line argument support.
Usage:
fastmcp run examples/config_server.py -- --name MyServer --debug
"""
import argparse
from fastmcp import FastMCP
parser = argparse.ArgumentParser(description="Simple configurable MCP server")
parser.add_argument(
"--name", type=str, default="ConfigurableServer", help="Server name"
)
parser.add_argument("--debug", action="store_true", help="Enable debug mode")
args = parser.parse_args()
server_name = args.name
if args.debug:
server_name += " (Debug)"
mcp = FastMCP(server_name)
@mcp.tool
def get_status() -> dict[str, str | bool]:
"""Get the current server configuration and status."""
return {
"server_name": server_name,
"debug_mode": args.debug,
"original_name": args.name,
}
@mcp.tool
def echo_message(message: str) -> str:
"""Echo a message, with debug info if debug mode is enabled."""
if args.debug:
return f"[DEBUG] Echoing: {message}"
return message
if __name__ == "__main__":
mcp.run()
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "examples/config_server.py",
"license": "Apache License 2.0",
"lines": 33,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
PrefectHQ/fastmcp:src/fastmcp/client/auth/bearer.py | import httpx
from pydantic import SecretStr
from fastmcp.utilities.logging import get_logger
__all__ = ["BearerAuth"]
logger = get_logger(__name__)
class BearerAuth(httpx.Auth):
def __init__(self, token: str):
self.token = SecretStr(token)
def auth_flow(self, request):
request.headers["Authorization"] = f"Bearer {self.token.get_secret_value()}"
yield request
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "src/fastmcp/client/auth/bearer.py",
"license": "Apache License 2.0",
"lines": 11,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
PrefectHQ/fastmcp:src/fastmcp/utilities/http.py | import socket
def find_available_port() -> int:
"""Find an available port by letting the OS assign one."""
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(("127.0.0.1", 0))
return s.getsockname()[1]
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "src/fastmcp/utilities/http.py",
"license": "Apache License 2.0",
"lines": 6,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
PrefectHQ/fastmcp:src/fastmcp/client/oauth_callback.py | """
OAuth callback server for handling authorization code flows.
This module provides a reusable callback server that can handle OAuth redirects
and display styled responses to users.
"""
from __future__ import annotations
from dataclasses import dataclass
import anyio
from starlette.applications import Starlette
from starlette.requests import Request
from starlette.routing import Route
from uvicorn import Config, Server
from fastmcp.utilities.http import find_available_port
from fastmcp.utilities.logging import get_logger
from fastmcp.utilities.ui import (
HELPER_TEXT_STYLES,
INFO_BOX_STYLES,
STATUS_MESSAGE_STYLES,
create_info_box,
create_logo,
create_page,
create_secure_html_response,
create_status_message,
)
logger = get_logger(__name__)
def create_callback_html(
message: str,
is_success: bool = True,
title: str = "FastMCP OAuth",
server_url: str | None = None,
) -> str:
"""Create a styled HTML response for OAuth callbacks."""
# Build the main status message
status_title = (
"Authentication successful" if is_success else "Authentication failed"
)
# Add detail info box for both success and error cases
detail_info = ""
if is_success and server_url:
detail_info = create_info_box(
f"Connected to: {server_url}", centered=True, monospace=True
)
elif not is_success:
detail_info = create_info_box(
message, is_error=True, centered=True, monospace=True
)
# Build the page content
content = f"""
<div class="container">
{create_logo()}
{create_status_message(status_title, is_success=is_success)}
{detail_info}
<div class="close-instruction">
You can safely close this tab now.
</div>
</div>
"""
# Additional styles needed for this page
additional_styles = STATUS_MESSAGE_STYLES + INFO_BOX_STYLES + HELPER_TEXT_STYLES
return create_page(
content=content,
title=title,
additional_styles=additional_styles,
)
@dataclass
class CallbackResponse:
code: str | None = None
state: str | None = None
error: str | None = None
error_description: str | None = None
@classmethod
def from_dict(cls, data: dict[str, str]) -> CallbackResponse:
return cls(**{k: v for k, v in data.items() if k in cls.__annotations__})
def to_dict(self) -> dict[str, str]:
return {k: v for k, v in self.__dict__.items() if v is not None}
@dataclass
class OAuthCallbackResult:
"""Container for OAuth callback results, used with anyio.Event for async coordination."""
code: str | None = None
state: str | None = None
error: Exception | None = None
def create_oauth_callback_server(
port: int,
callback_path: str = "/callback",
server_url: str | None = None,
result_container: OAuthCallbackResult | None = None,
result_ready: anyio.Event | None = None,
) -> Server:
"""
Create an OAuth callback server.
Args:
port: The port to run the server on
callback_path: The path to listen for OAuth redirects on
server_url: Optional server URL to display in success messages
result_container: Optional container to store callback results
result_ready: Optional event to signal when callback is received
Returns:
Configured uvicorn Server instance (not yet running)
"""
def store_result_once(
*,
code: str | None = None,
state: str | None = None,
error: Exception | None = None,
) -> None:
"""Store the first callback result and ignore subsequent requests."""
if result_container is None or result_ready is None or result_ready.is_set():
return
result_container.code = code
result_container.state = state
result_container.error = error
result_ready.set()
async def callback_handler(request: Request):
"""Handle OAuth callback requests with proper HTML responses."""
query_params = dict(request.query_params)
callback_response = CallbackResponse.from_dict(query_params)
if callback_response.error:
error_desc = callback_response.error_description or "Unknown error"
# Create user-friendly error messages
if callback_response.error == "access_denied":
user_message = "Access was denied by the authorization server."
else:
user_message = f"Authorization failed: {error_desc}"
# Store error and signal completion if result tracking provided
store_result_once(error=RuntimeError(user_message))
return create_secure_html_response(
create_callback_html(
user_message,
is_success=False,
),
status_code=400,
)
if not callback_response.code:
user_message = "No authorization code was received from the server."
# Store error and signal completion if result tracking provided
store_result_once(error=RuntimeError(user_message))
return create_secure_html_response(
create_callback_html(
user_message,
is_success=False,
),
status_code=400,
)
# Check for missing state parameter (indicates OAuth flow issue)
if callback_response.state is None:
user_message = (
"The OAuth server did not return the expected state parameter."
)
# Store error and signal completion if result tracking provided
store_result_once(error=RuntimeError(user_message))
return create_secure_html_response(
create_callback_html(
user_message,
is_success=False,
),
status_code=400,
)
# Success case - store result and signal completion if result tracking provided
store_result_once(
code=callback_response.code,
state=callback_response.state,
)
return create_secure_html_response(
create_callback_html("", is_success=True, server_url=server_url)
)
app = Starlette(routes=[Route(callback_path, callback_handler)])
return Server(
Config(
app=app,
host="127.0.0.1",
port=port,
lifespan="off",
log_level="warning",
ws="websockets-sansio",
)
)
if __name__ == "__main__":
"""Run a test server when executed directly."""
import webbrowser
import uvicorn
port = find_available_port()
print("π OAuth Callback Test Server")
print("π Test URLs:")
print(f" Success: http://localhost:{port}/callback?code=test123&state=xyz")
print(
f" Error: http://localhost:{port}/callback?error=access_denied&error_description=User%20denied"
)
print(f" Missing: http://localhost:{port}/callback")
print("π Press Ctrl+C to stop")
print()
# Create test server without future (just for testing HTML responses)
server = create_oauth_callback_server(
port=port, server_url="https://fastmcp-test-server.example.com"
)
# Open browser to success example
webbrowser.open(f"http://localhost:{port}/callback?code=test123&state=xyz")
# Run with uvicorn directly
uvicorn.run(
server.config.app,
host="127.0.0.1",
port=port,
log_level="warning",
access_log=False,
)
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "src/fastmcp/client/oauth_callback.py",
"license": "Apache License 2.0",
"lines": 204,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/fastmcp:src/fastmcp/server/auth/auth.py | from __future__ import annotations
import json
from typing import TYPE_CHECKING, Any, cast
from urllib.parse import urlparse
from mcp.server.auth.handlers.token import TokenErrorResponse
from mcp.server.auth.handlers.token import TokenHandler as _SDKTokenHandler
from mcp.server.auth.json_response import PydanticJSONResponse
from mcp.server.auth.middleware.auth_context import AuthContextMiddleware
from mcp.server.auth.middleware.bearer_auth import BearerAuthBackend
from mcp.server.auth.middleware.client_auth import (
AuthenticationError,
ClientAuthenticator,
)
from mcp.server.auth.middleware.client_auth import (
ClientAuthenticator as _SDKClientAuthenticator,
)
from mcp.server.auth.provider import (
AccessToken as _SDKAccessToken,
)
from mcp.server.auth.provider import (
AuthorizationCode,
OAuthAuthorizationServerProvider,
RefreshToken,
)
from mcp.server.auth.provider import (
TokenVerifier as TokenVerifierProtocol,
)
from mcp.server.auth.routes import (
cors_middleware,
create_auth_routes,
create_protected_resource_routes,
)
from mcp.server.auth.settings import (
ClientRegistrationOptions,
RevocationOptions,
)
from mcp.shared.auth import OAuthClientInformationFull
from pydantic import AnyHttpUrl, Field
from starlette.middleware import Middleware
from starlette.middleware.authentication import AuthenticationMiddleware
from starlette.requests import Request
from starlette.routing import Route
from fastmcp.utilities.logging import get_logger
if TYPE_CHECKING:
from fastmcp.server.auth.cimd import CIMDClientManager
logger = get_logger(__name__)
class AccessToken(_SDKAccessToken):
"""AccessToken that includes all JWT claims."""
claims: dict[str, Any] = Field(default_factory=dict)
class TokenHandler(_SDKTokenHandler):
"""TokenHandler that returns MCP-compliant error responses.
This handler addresses two SDK issues:
1. Error code: The SDK returns `unauthorized_client` for client authentication
failures, but RFC 6749 Section 5.2 requires `invalid_client` with HTTP 401.
This distinction matters for client re-registration behavior.
2. Status code: The SDK returns HTTP 400 for all token errors including
`invalid_grant` (expired/invalid tokens). However, the MCP spec requires:
"Invalid or expired tokens MUST receive a HTTP 401 response."
This handler transforms responses to be compliant with both OAuth 2.1 and MCP specs.
"""
async def handle(self, request: Any):
"""Wrap SDK handle() and transform auth error responses."""
response = await super().handle(request)
# Transform 401 unauthorized_client -> invalid_client
if response.status_code == 401:
try:
body = json.loads(response.body)
if body.get("error") == "unauthorized_client":
return PydanticJSONResponse(
content=TokenErrorResponse(
error="invalid_client",
error_description=body.get("error_description"),
),
status_code=401,
headers={
"Cache-Control": "no-store",
"Pragma": "no-cache",
},
)
except (json.JSONDecodeError, AttributeError):
pass # Not JSON or unexpected format, return as-is
# Transform 400 invalid_grant -> 401 for expired/invalid tokens
# Per MCP spec: "Invalid or expired tokens MUST receive a HTTP 401 response."
if response.status_code == 400:
try:
body = json.loads(response.body)
if body.get("error") == "invalid_grant":
return PydanticJSONResponse(
content=TokenErrorResponse(
error="invalid_grant",
error_description=body.get("error_description"),
),
status_code=401,
headers={
"Cache-Control": "no-store",
"Pragma": "no-cache",
},
)
except (json.JSONDecodeError, AttributeError):
pass # Not JSON or unexpected format, return as-is
return response
# Expected assertion type for private_key_jwt
JWT_BEARER_ASSERTION_TYPE = "urn:ietf:params:oauth:client-assertion-type:jwt-bearer"
class PrivateKeyJWTClientAuthenticator(_SDKClientAuthenticator):
"""Client authenticator with private_key_jwt support for CIMD clients.
Extends the SDK's ClientAuthenticator to add support for the `private_key_jwt`
authentication method per RFC 7523. This is required for CIMD (Client ID Metadata
Document) clients that use asymmetric keys for authentication.
The authenticator:
1. Delegates to SDK for standard methods (client_secret_basic, client_secret_post, none)
2. Adds private_key_jwt handling for CIMD clients
3. Validates JWT assertions against client's JWKS
"""
def __init__(
self,
provider: OAuthAuthorizationServerProvider[Any, Any, Any],
cimd_manager: CIMDClientManager,
token_endpoint_url: str,
):
"""Initialize the authenticator.
Args:
provider: OAuth provider for client lookups
cimd_manager: CIMD manager for private_key_jwt validation
token_endpoint_url: Token endpoint URL for audience validation
"""
super().__init__(provider)
self._cimd_manager = cimd_manager
self._token_endpoint_url = token_endpoint_url
async def authenticate_request(
self, request: Request
) -> OAuthClientInformationFull:
"""Authenticate a client from an HTTP request.
Extends SDK authentication to support private_key_jwt for CIMD clients.
Delegates to SDK for client_secret_basic (Authorization header) and
client_secret_post (form body) authentication.
"""
form_data = await request.form()
client_id = form_data.get("client_id")
# If client_id is not in form data, delegate to SDK
# This handles client_secret_basic which sends credentials in Authorization header
if not client_id:
return await super().authenticate_request(request)
client = await self.provider.get_client(str(client_id))
if not client:
raise AuthenticationError("Invalid client_id")
# Handle private_key_jwt authentication for CIMD clients
if client.token_endpoint_auth_method == "private_key_jwt":
# Validate assertion parameters
assertion_type = form_data.get("client_assertion_type")
assertion = form_data.get("client_assertion")
if assertion_type != JWT_BEARER_ASSERTION_TYPE:
raise AuthenticationError(
f"Invalid client_assertion_type: expected {JWT_BEARER_ASSERTION_TYPE}"
)
if not assertion or not isinstance(assertion, str):
raise AuthenticationError("Missing client_assertion")
# Validate the JWT assertion using CIMD manager
try:
await self._cimd_manager.validate_private_key_jwt(
assertion=assertion,
client=client,
token_endpoint=self._token_endpoint_url,
)
except ValueError as e:
raise AuthenticationError(f"Invalid client assertion: {e}") from e
return client
# Delegate to SDK for other authentication methods
return await super().authenticate_request(request)
class AuthProvider(TokenVerifierProtocol):
"""Base class for all FastMCP authentication providers.
This class provides a unified interface for all authentication providers,
whether they are simple token verifiers or full OAuth authorization servers.
All providers must be able to verify tokens and can optionally provide
custom authentication routes.
"""
def __init__(
self,
base_url: AnyHttpUrl | str | None = None,
required_scopes: list[str] | None = None,
):
"""
Initialize the auth provider.
Args:
base_url: The base URL of this server (e.g., http://localhost:8000).
This is used for constructing .well-known endpoints and OAuth metadata.
required_scopes: List of OAuth scopes required for all requests.
"""
if isinstance(base_url, str):
base_url = AnyHttpUrl(base_url)
self.base_url = base_url
self.required_scopes = required_scopes or []
self._mcp_path: str | None = None
self._resource_url: AnyHttpUrl | None = None
async def verify_token(self, token: str) -> AccessToken | None:
"""Verify a bearer token and return access info if valid.
All auth providers must implement token verification.
Args:
token: The token string to validate
Returns:
AccessToken object if valid, None if invalid or expired
"""
raise NotImplementedError("Subclasses must implement verify_token")
def set_mcp_path(self, mcp_path: str | None) -> None:
"""Set the MCP endpoint path and compute resource URL.
This method is called by get_routes() to configure the expected
resource URL before route creation. Subclasses can override to
perform additional initialization that depends on knowing the
MCP endpoint path.
Args:
mcp_path: The path where the MCP endpoint is mounted (e.g., "/mcp")
"""
self._mcp_path = mcp_path
self._resource_url = self._get_resource_url(mcp_path)
def get_routes(
self,
mcp_path: str | None = None,
) -> list[Route]:
"""Get all routes for this authentication provider.
This includes both well-known discovery routes and operational routes.
Each provider is responsible for creating whatever routes it needs:
- TokenVerifier: typically no routes (default implementation)
- RemoteAuthProvider: protected resource metadata routes
- OAuthProvider: full OAuth authorization server routes
- Custom providers: whatever routes they need
Args:
mcp_path: The path where the MCP endpoint is mounted (e.g., "/mcp")
This is used to advertise the resource URL in metadata, but the
provider does not create the actual MCP endpoint route.
Returns:
List of all routes for this provider (excluding the MCP endpoint itself)
"""
return []
def get_well_known_routes(
self,
mcp_path: str | None = None,
) -> list[Route]:
"""Get well-known discovery routes for this authentication provider.
This is a utility method that filters get_routes() to return only
well-known discovery routes (those starting with /.well-known/).
Well-known routes provide OAuth metadata and discovery endpoints that
clients use to discover authentication capabilities. These routes should
be mounted at the root level of the application to comply with RFC 8414
and RFC 9728.
Common well-known routes:
- /.well-known/oauth-authorization-server (authorization server metadata)
- /.well-known/oauth-protected-resource/* (protected resource metadata)
Args:
mcp_path: The path where the MCP endpoint is mounted (e.g., "/mcp")
This is used to construct path-scoped well-known URLs.
Returns:
List of well-known discovery routes (typically mounted at root level)
"""
all_routes = self.get_routes(mcp_path)
return [
route
for route in all_routes
if isinstance(route, Route) and route.path.startswith("/.well-known/")
]
def get_middleware(self) -> list:
"""Get HTTP application-level middleware for this auth provider.
Returns:
List of Starlette Middleware instances to apply to the HTTP app
"""
# TODO(ty): remove type ignores when ty supports Starlette Middleware typing
return [
Middleware(
AuthenticationMiddleware, # type: ignore[arg-type]
backend=BearerAuthBackend(self),
),
Middleware(AuthContextMiddleware), # type: ignore[arg-type]
]
def _get_resource_url(self, path: str | None = None) -> AnyHttpUrl | None:
"""Get the actual resource URL being protected.
Args:
path: The path where the resource endpoint is mounted (e.g., "/mcp")
Returns:
The full URL of the protected resource
"""
if self.base_url is None:
return None
if path:
prefix = str(self.base_url).rstrip("/")
suffix = path.lstrip("/")
return AnyHttpUrl(f"{prefix}/{suffix}")
return self.base_url
class TokenVerifier(AuthProvider):
"""Base class for token verifiers (Resource Servers).
This class provides token verification capability without OAuth server functionality.
Token verifiers typically don't provide authentication routes by default.
"""
def __init__(
self,
base_url: AnyHttpUrl | str | None = None,
required_scopes: list[str] | None = None,
):
"""
Initialize the token verifier.
Args:
base_url: The base URL of this server
required_scopes: Scopes that are required for all requests
"""
super().__init__(base_url=base_url, required_scopes=required_scopes)
@property
def scopes_supported(self) -> list[str]:
"""Scopes to advertise in OAuth metadata.
Defaults to required_scopes. Override in subclasses when the
advertised scopes differ from the validation scopes (e.g., Azure AD
where tokens contain short-form scopes but clients request full URI
scopes).
"""
return self.required_scopes or []
async def verify_token(self, token: str) -> AccessToken | None:
"""Verify a bearer token and return access info if valid."""
raise NotImplementedError("Subclasses must implement verify_token")
class RemoteAuthProvider(AuthProvider):
"""Authentication provider for resource servers that verify tokens from known authorization servers.
This provider composes a TokenVerifier with authorization server metadata to create
standardized OAuth 2.0 Protected Resource endpoints (RFC 9728). Perfect for:
- JWT verification with known issuers
- Remote token introspection services
- Any resource server that knows where its tokens come from
Use this when you have token verification logic and want to advertise
the authorization servers that issue valid tokens.
"""
base_url: AnyHttpUrl
def __init__(
self,
token_verifier: TokenVerifier,
authorization_servers: list[AnyHttpUrl],
base_url: AnyHttpUrl | str,
scopes_supported: list[str] | None = None,
resource_name: str | None = None,
resource_documentation: AnyHttpUrl | None = None,
):
"""Initialize the remote auth provider.
Args:
token_verifier: TokenVerifier instance for token validation
authorization_servers: List of authorization servers that issue valid tokens
base_url: The base URL of this server
scopes_supported: Scopes to advertise in OAuth metadata. If None,
uses the token verifier's scopes_supported property. Use this
when the scopes clients request differ from the scopes that
appear in tokens (e.g., Azure AD full URI scopes vs short-form).
resource_name: Optional name for the protected resource
resource_documentation: Optional documentation URL for the protected resource
"""
super().__init__(
base_url=base_url,
required_scopes=token_verifier.required_scopes,
)
self.token_verifier = token_verifier
self.authorization_servers = authorization_servers
self._scopes_supported = scopes_supported
self.resource_name = resource_name
self.resource_documentation = resource_documentation
async def verify_token(self, token: str) -> AccessToken | None:
"""Verify token using the configured token verifier."""
return await self.token_verifier.verify_token(token)
def get_routes(
self,
mcp_path: str | None = None,
) -> list[Route]:
"""Get routes for this provider.
Creates protected resource metadata routes (RFC 9728).
"""
routes = []
# Get the resource URL based on the MCP path
resource_url = self._get_resource_url(mcp_path)
if resource_url:
# Add protected resource metadata routes
routes.extend(
create_protected_resource_routes(
resource_url=resource_url,
authorization_servers=self.authorization_servers,
scopes_supported=(
self._scopes_supported
if self._scopes_supported is not None
else self.token_verifier.scopes_supported
),
resource_name=self.resource_name,
resource_documentation=self.resource_documentation,
)
)
return routes
class MultiAuth(AuthProvider):
"""Composes an optional auth server with additional token verifiers.
Use this when a single server needs to accept tokens from multiple sources.
For example, an OAuth proxy for interactive clients combined with a JWT
verifier for machine-to-machine tokens.
Token verification tries the server first (if present), then each verifier
in order, returning the first successful result. Routes and OAuth metadata
come from the server; verifiers contribute only token verification.
Example:
```python
from fastmcp.server.auth import MultiAuth, JWTVerifier, OAuthProxy
auth = MultiAuth(
server=OAuthProxy(issuer_url="https://login.example.com/..."),
verifiers=[JWTVerifier(jwks_uri="https://example.com/.well-known/jwks.json")],
)
mcp = FastMCP("my-server", auth=auth)
```
"""
def __init__(
self,
*,
server: AuthProvider | None = None,
verifiers: list[TokenVerifier] | TokenVerifier | None = None,
base_url: AnyHttpUrl | str | None = None,
required_scopes: list[str] | None = None,
):
"""Initialize the multi-auth provider.
Args:
server: Optional auth provider (e.g., OAuthProxy) that owns routes
and OAuth metadata. Also participates in token verification as
the first verifier tried.
verifiers: One or more token verifiers to try after the server.
base_url: Override the base URL. Defaults to the server's base_url.
required_scopes: Override required scopes. Defaults to the server's.
"""
if verifiers is None:
verifiers = []
elif isinstance(verifiers, TokenVerifier):
verifiers = [verifiers]
if server is None and not verifiers:
raise ValueError("MultiAuth requires at least a server or one verifier")
effective_base_url = base_url or (server.base_url if server else None)
effective_scopes = (
required_scopes
if required_scopes is not None
else (server.required_scopes if server else None)
)
super().__init__(base_url=effective_base_url, required_scopes=effective_scopes)
self.server = server
self.verifiers = list(verifiers)
self._sources: list[AuthProvider] = []
if self.server is not None:
self._sources.append(self.server)
self._sources.extend(self.verifiers)
async def verify_token(self, token: str) -> AccessToken | None:
"""Verify a token by trying the server, then each verifier in order.
Each source is tried independently. If a source raises an exception,
it is logged and treated as a non-match so that remaining sources
still get a chance to verify the token.
"""
for source in self._sources:
try:
result = await source.verify_token(token)
if result is not None:
return result
except Exception:
logger.debug(
"Token verification failed for %s, trying next source",
type(source).__name__,
exc_info=True,
)
return None
def set_mcp_path(self, mcp_path: str | None) -> None:
"""Propagate MCP path to the server and all verifiers."""
super().set_mcp_path(mcp_path)
if self.server is not None:
self.server.set_mcp_path(mcp_path)
for verifier in self.verifiers:
verifier.set_mcp_path(mcp_path)
def get_routes(self, mcp_path: str | None = None) -> list[Route]:
"""Delegate route creation to the server."""
if self.server is not None:
return self.server.get_routes(mcp_path)
return []
def get_well_known_routes(self, mcp_path: str | None = None) -> list[Route]:
"""Delegate well-known route creation to the server.
This ensures that server-specific well-known route logic (e.g.,
OAuthProvider's RFC 8414 path-aware discovery) is preserved.
"""
if self.server is not None:
return self.server.get_well_known_routes(mcp_path)
return []
class OAuthProvider(
AuthProvider,
OAuthAuthorizationServerProvider[AuthorizationCode, RefreshToken, AccessToken],
):
"""OAuth Authorization Server provider.
This class provides full OAuth server functionality including client registration,
authorization flows, token issuance, and token verification.
"""
def __init__(
self,
*,
base_url: AnyHttpUrl | str,
issuer_url: AnyHttpUrl | str | None = None,
service_documentation_url: AnyHttpUrl | str | None = None,
client_registration_options: ClientRegistrationOptions | None = None,
revocation_options: RevocationOptions | None = None,
required_scopes: list[str] | None = None,
):
"""
Initialize the OAuth provider.
Args:
base_url: The public URL of this FastMCP server
issuer_url: The issuer URL for OAuth metadata (defaults to base_url)
service_documentation_url: The URL of the service documentation.
client_registration_options: The client registration options.
revocation_options: The revocation options.
required_scopes: Scopes that are required for all requests.
"""
super().__init__(base_url=base_url, required_scopes=required_scopes)
if issuer_url is None:
self.issuer_url = self.base_url
elif isinstance(issuer_url, str):
self.issuer_url = AnyHttpUrl(issuer_url)
else:
self.issuer_url = issuer_url
# Log if issuer_url and base_url differ (requires additional setup)
if (
self.base_url is not None
and self.issuer_url is not None
and str(self.base_url) != str(self.issuer_url)
):
logger.info(
f"OAuth endpoints at {self.base_url}, issuer at {self.issuer_url}. "
f"Ensure well-known routes are accessible at root ({self.issuer_url}/.well-known/). "
f"See: https://gofastmcp.com/deployment/http#mounting-authenticated-servers"
)
# Initialize OAuth Authorization Server Provider
OAuthAuthorizationServerProvider.__init__(self)
if isinstance(service_documentation_url, str):
service_documentation_url = AnyHttpUrl(service_documentation_url)
self.service_documentation_url = service_documentation_url
self.client_registration_options = client_registration_options
self.revocation_options = revocation_options
async def verify_token(self, token: str) -> AccessToken | None:
"""
Verify a bearer token and return access info if valid.
This method implements the TokenVerifier protocol by delegating
to our existing load_access_token method.
Args:
token: The token string to validate
Returns:
AccessToken object if valid, None if invalid or expired
"""
return await self.load_access_token(token)
def get_routes(
self,
mcp_path: str | None = None,
) -> list[Route]:
"""Get OAuth authorization server routes and optional protected resource routes.
This method creates the full set of OAuth routes including:
- Standard OAuth authorization server routes (/.well-known/oauth-authorization-server, /authorize, /token, etc.)
- Optional protected resource routes
Returns:
List of OAuth routes
"""
# Configure resource URL before creating routes
self.set_mcp_path(mcp_path)
# Create standard OAuth authorization server routes
# Pass base_url as issuer_url to ensure metadata declares endpoints where
# they're actually accessible (operational routes are mounted at
# base_url)
assert self.base_url is not None # typing check
assert (
self.issuer_url is not None
) # typing check (issuer_url defaults to base_url)
sdk_routes = create_auth_routes(
provider=self,
issuer_url=self.base_url,
service_documentation_url=self.service_documentation_url,
client_registration_options=self.client_registration_options,
revocation_options=self.revocation_options,
)
# Replace the token endpoint with our custom handler that returns
# proper OAuth 2.1 error codes (invalid_client instead of unauthorized_client)
oauth_routes: list[Route] = []
for route in sdk_routes:
if (
isinstance(route, Route)
and route.path == "/token"
and route.methods is not None
and "POST" in route.methods
):
# Replace with our OAuth 2.1 compliant token handler
token_handler = TokenHandler(
provider=self, client_authenticator=ClientAuthenticator(self)
)
oauth_routes.append(
Route(
path="/token",
endpoint=cors_middleware(
token_handler.handle, ["POST", "OPTIONS"]
),
methods=["POST", "OPTIONS"],
)
)
else:
oauth_routes.append(route)
# Add protected resource routes if this server is also acting as a resource server
if self._resource_url:
supported_scopes = (
self.client_registration_options.valid_scopes
if self.client_registration_options
and self.client_registration_options.valid_scopes
else self.required_scopes
)
protected_routes = create_protected_resource_routes(
resource_url=self._resource_url,
authorization_servers=[cast(AnyHttpUrl, self.issuer_url)],
scopes_supported=supported_scopes,
)
oauth_routes.extend(protected_routes)
# Add base routes
oauth_routes.extend(super().get_routes(mcp_path))
return oauth_routes
def get_well_known_routes(
self,
mcp_path: str | None = None,
) -> list[Route]:
"""Get well-known discovery routes with RFC 8414 path-aware support.
Overrides the base implementation to support path-aware authorization
server metadata discovery per RFC 8414. If issuer_url has a path component,
the authorization server metadata route is adjusted to include that path.
For example, if issuer_url is "http://example.com/api", the discovery
endpoint will be at "/.well-known/oauth-authorization-server/api" instead
of just "/.well-known/oauth-authorization-server".
Args:
mcp_path: The path where the MCP endpoint is mounted (e.g., "/mcp")
Returns:
List of well-known discovery routes
"""
routes = super().get_well_known_routes(mcp_path)
# RFC 8414: If issuer_url has a path, use path-aware discovery
if self.issuer_url:
parsed = urlparse(str(self.issuer_url))
issuer_path = parsed.path.rstrip("/")
if issuer_path and issuer_path != "/":
# Replace /.well-known/oauth-authorization-server with path-aware version
new_routes = []
for route in routes:
if route.path == "/.well-known/oauth-authorization-server":
new_path = (
f"/.well-known/oauth-authorization-server{issuer_path}"
)
new_routes.append(
Route(
new_path,
endpoint=route.endpoint,
methods=route.methods,
)
)
else:
new_routes.append(route)
return new_routes
return routes
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "src/fastmcp/server/auth/auth.py",
"license": "Apache License 2.0",
"lines": 650,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/fastmcp:src/fastmcp/server/auth/providers/in_memory.py | import secrets
import time
from mcp.server.auth.provider import (
AccessToken,
AuthorizationCode,
AuthorizationParams,
AuthorizeError,
RefreshToken,
TokenError,
construct_redirect_uri,
)
from mcp.shared.auth import (
OAuthClientInformationFull,
OAuthToken,
)
from pydantic import AnyHttpUrl
from fastmcp.server.auth.auth import (
ClientRegistrationOptions,
OAuthProvider,
RevocationOptions,
)
# Default expiration times (in seconds)
DEFAULT_AUTH_CODE_EXPIRY_SECONDS = 5 * 60 # 5 minutes
DEFAULT_ACCESS_TOKEN_EXPIRY_SECONDS = 60 * 60 # 1 hour
DEFAULT_REFRESH_TOKEN_EXPIRY_SECONDS = None # No expiry
class InMemoryOAuthProvider(OAuthProvider):
"""
An in-memory OAuth provider for testing purposes.
It simulates the OAuth 2.1 flow locally without external calls.
"""
def __init__(
self,
base_url: AnyHttpUrl | str | None = None,
service_documentation_url: AnyHttpUrl | str | None = None,
client_registration_options: ClientRegistrationOptions | None = None,
revocation_options: RevocationOptions | None = None,
required_scopes: list[str] | None = None,
):
super().__init__(
base_url=base_url or "http://fastmcp.example.com",
service_documentation_url=service_documentation_url,
client_registration_options=client_registration_options,
revocation_options=revocation_options,
required_scopes=required_scopes,
)
self.clients: dict[str, OAuthClientInformationFull] = {}
self.auth_codes: dict[str, AuthorizationCode] = {}
self.access_tokens: dict[str, AccessToken] = {}
self.refresh_tokens: dict[str, RefreshToken] = {}
# For revoking associated tokens
self._access_to_refresh_map: dict[
str, str
] = {} # access_token_str -> refresh_token_str
self._refresh_to_access_map: dict[
str, str
] = {} # refresh_token_str -> access_token_str
async def get_client(self, client_id: str) -> OAuthClientInformationFull | None:
return self.clients.get(client_id)
async def register_client(self, client_info: OAuthClientInformationFull) -> None:
# Validate scopes against valid_scopes if configured (matches MCP SDK behavior)
if (
client_info.scope is not None
and self.client_registration_options is not None
and self.client_registration_options.valid_scopes is not None
):
requested_scopes = set(client_info.scope.split())
valid_scopes = set(self.client_registration_options.valid_scopes)
invalid_scopes = requested_scopes - valid_scopes
if invalid_scopes:
raise ValueError(
f"Requested scopes are not valid: {', '.join(invalid_scopes)}"
)
if client_info.client_id is None:
raise ValueError("client_id is required for client registration")
if client_info.client_id in self.clients:
# As per RFC 7591, if client_id is already known, it's an update.
# For this simple provider, we'll treat it as re-registration.
# A real provider might handle updates or raise errors for conflicts.
pass
self.clients[client_info.client_id] = client_info
async def authorize(
self, client: OAuthClientInformationFull, params: AuthorizationParams
) -> str:
"""
Simulates user authorization and generates an authorization code.
Returns a redirect URI with the code and state.
"""
if client.client_id not in self.clients:
raise AuthorizeError(
error="unauthorized_client",
error_description=f"Client '{client.client_id}' not registered.",
)
# Validate redirect_uri (already validated by AuthorizationHandler, but good practice)
try:
# OAuthClientInformationFull should have a method like validate_redirect_uri
# For this test provider, we assume it's valid if it matches one in client_info
# The AuthorizationHandler already does robust validation using client.validate_redirect_uri
if client.redirect_uris and params.redirect_uri not in client.redirect_uris:
# This check might be too simplistic if redirect_uris can be patterns
# or if params.redirect_uri is None and client has a default.
# However, the AuthorizationHandler handles the primary validation.
pass # Let's assume AuthorizationHandler did its job.
except Exception as e: # Replace with specific validation error if client.validate_redirect_uri existed
raise AuthorizeError(
error="invalid_request", error_description="Invalid redirect_uri."
) from e
auth_code_value = f"test_auth_code_{secrets.token_hex(16)}"
expires_at = time.time() + DEFAULT_AUTH_CODE_EXPIRY_SECONDS
# Ensure scopes are a list
scopes_list = params.scopes if params.scopes is not None else []
if client.scope: # Filter params.scopes against client's registered scopes
client_allowed_scopes = set(client.scope.split())
scopes_list = [s for s in scopes_list if s in client_allowed_scopes]
if client.client_id is None:
raise AuthorizeError(
error="invalid_client", error_description="Client ID is required"
)
auth_code = AuthorizationCode(
code=auth_code_value,
client_id=client.client_id,
redirect_uri=params.redirect_uri,
redirect_uri_provided_explicitly=params.redirect_uri_provided_explicitly,
scopes=scopes_list,
expires_at=expires_at,
code_challenge=params.code_challenge,
# code_challenge_method is assumed S256 by the framework
)
self.auth_codes[auth_code_value] = auth_code
return construct_redirect_uri(
str(params.redirect_uri), code=auth_code_value, state=params.state
)
async def load_authorization_code(
self, client: OAuthClientInformationFull, authorization_code: str
) -> AuthorizationCode | None:
auth_code_obj = self.auth_codes.get(authorization_code)
if auth_code_obj:
if auth_code_obj.client_id != client.client_id:
return None # Belongs to a different client
if auth_code_obj.expires_at < time.time():
del self.auth_codes[authorization_code] # Expired
return None
return auth_code_obj
return None
async def exchange_authorization_code(
self, client: OAuthClientInformationFull, authorization_code: AuthorizationCode
) -> OAuthToken:
# Authorization code should have been validated (existence, expiry, client_id match)
# by the TokenHandler calling load_authorization_code before this.
# We might want to re-verify or simply trust it's valid.
if authorization_code.code not in self.auth_codes:
raise TokenError(
"invalid_grant", "Authorization code not found or already used."
)
# Consume the auth code
del self.auth_codes[authorization_code.code]
access_token_value = f"test_access_token_{secrets.token_hex(32)}"
refresh_token_value = f"test_refresh_token_{secrets.token_hex(32)}"
access_token_expires_at = int(time.time() + DEFAULT_ACCESS_TOKEN_EXPIRY_SECONDS)
# Refresh token expiry
refresh_token_expires_at = None
if DEFAULT_REFRESH_TOKEN_EXPIRY_SECONDS is not None:
refresh_token_expires_at = int(
time.time() + DEFAULT_REFRESH_TOKEN_EXPIRY_SECONDS
)
if client.client_id is None:
raise TokenError("invalid_client", "Client ID is required")
self.access_tokens[access_token_value] = AccessToken(
token=access_token_value,
client_id=client.client_id,
scopes=authorization_code.scopes,
expires_at=access_token_expires_at,
)
self.refresh_tokens[refresh_token_value] = RefreshToken(
token=refresh_token_value,
client_id=client.client_id,
scopes=authorization_code.scopes, # Refresh token inherits scopes
expires_at=refresh_token_expires_at,
)
self._access_to_refresh_map[access_token_value] = refresh_token_value
self._refresh_to_access_map[refresh_token_value] = access_token_value
return OAuthToken(
access_token=access_token_value,
token_type="Bearer",
expires_in=DEFAULT_ACCESS_TOKEN_EXPIRY_SECONDS,
refresh_token=refresh_token_value,
scope=" ".join(authorization_code.scopes),
)
async def load_refresh_token(
self, client: OAuthClientInformationFull, refresh_token: str
) -> RefreshToken | None:
token_obj = self.refresh_tokens.get(refresh_token)
if token_obj:
if token_obj.client_id != client.client_id:
return None # Belongs to different client
if token_obj.expires_at is not None and token_obj.expires_at < time.time():
self._revoke_internal(
refresh_token_str=token_obj.token
) # Clean up expired
return None
return token_obj
return None
async def exchange_refresh_token(
self,
client: OAuthClientInformationFull,
refresh_token: RefreshToken, # This is the RefreshToken object, already loaded
scopes: list[str], # Requested scopes for the new access token
) -> OAuthToken:
# Validate scopes: requested scopes must be a subset of original scopes
original_scopes = set(refresh_token.scopes)
requested_scopes = set(scopes)
if not requested_scopes.issubset(original_scopes):
raise TokenError(
"invalid_scope",
"Requested scopes exceed those authorized by the refresh token.",
)
# Invalidate old refresh token and its associated access token (rotation)
self._revoke_internal(refresh_token_str=refresh_token.token)
# Issue new tokens
new_access_token_value = f"test_access_token_{secrets.token_hex(32)}"
new_refresh_token_value = f"test_refresh_token_{secrets.token_hex(32)}"
access_token_expires_at = int(time.time() + DEFAULT_ACCESS_TOKEN_EXPIRY_SECONDS)
# Refresh token expiry
refresh_token_expires_at = None
if DEFAULT_REFRESH_TOKEN_EXPIRY_SECONDS is not None:
refresh_token_expires_at = int(
time.time() + DEFAULT_REFRESH_TOKEN_EXPIRY_SECONDS
)
if client.client_id is None:
raise TokenError("invalid_client", "Client ID is required")
self.access_tokens[new_access_token_value] = AccessToken(
token=new_access_token_value,
client_id=client.client_id,
scopes=scopes, # Use newly requested (and validated) scopes
expires_at=access_token_expires_at,
)
self.refresh_tokens[new_refresh_token_value] = RefreshToken(
token=new_refresh_token_value,
client_id=client.client_id,
scopes=scopes, # New refresh token also gets these scopes
expires_at=refresh_token_expires_at,
)
self._access_to_refresh_map[new_access_token_value] = new_refresh_token_value
self._refresh_to_access_map[new_refresh_token_value] = new_access_token_value
return OAuthToken(
access_token=new_access_token_value,
token_type="Bearer",
expires_in=DEFAULT_ACCESS_TOKEN_EXPIRY_SECONDS,
refresh_token=new_refresh_token_value,
scope=" ".join(scopes),
)
async def load_access_token(self, token: str) -> AccessToken | None: # type: ignore[override]
token_obj = self.access_tokens.get(token)
if token_obj:
if token_obj.expires_at is not None and token_obj.expires_at < time.time():
self._revoke_internal(
access_token_str=token_obj.token
) # Clean up expired
return None
return token_obj
return None
async def verify_token(self, token: str) -> AccessToken | None: # type: ignore[override]
"""
Verify a bearer token and return access info if valid.
This method implements the TokenVerifier protocol by delegating
to our existing load_access_token method.
Args:
token: The token string to validate
Returns:
AccessToken object if valid, None if invalid or expired
"""
return await self.load_access_token(token)
def _revoke_internal(
self, access_token_str: str | None = None, refresh_token_str: str | None = None
):
"""Internal helper to remove tokens and their associations."""
removed_access_token = None
removed_refresh_token = None
if access_token_str:
if access_token_str in self.access_tokens:
del self.access_tokens[access_token_str]
removed_access_token = access_token_str
# Get associated refresh token
associated_refresh = self._access_to_refresh_map.pop(access_token_str, None)
if associated_refresh:
if associated_refresh in self.refresh_tokens:
del self.refresh_tokens[associated_refresh]
removed_refresh_token = associated_refresh
self._refresh_to_access_map.pop(associated_refresh, None)
if refresh_token_str:
if refresh_token_str in self.refresh_tokens:
del self.refresh_tokens[refresh_token_str]
removed_refresh_token = refresh_token_str
# Get associated access token
associated_access = self._refresh_to_access_map.pop(refresh_token_str, None)
if associated_access:
if associated_access in self.access_tokens:
del self.access_tokens[associated_access]
removed_access_token = associated_access
self._access_to_refresh_map.pop(associated_access, None)
# Clean up any dangling references if one part of the pair was already gone
if removed_access_token and removed_access_token in self._access_to_refresh_map:
del self._access_to_refresh_map[removed_access_token]
if (
removed_refresh_token
and removed_refresh_token in self._refresh_to_access_map
):
del self._refresh_to_access_map[removed_refresh_token]
async def revoke_token(
self,
token: AccessToken | RefreshToken,
) -> None:
"""Revokes an access or refresh token and its counterpart."""
if isinstance(token, AccessToken):
self._revoke_internal(access_token_str=token.token)
elif isinstance(token, RefreshToken):
self._revoke_internal(refresh_token_str=token.token)
# If token is not found or already revoked, _revoke_internal does nothing, which is correct.
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "src/fastmcp/server/auth/providers/in_memory.py",
"license": "Apache License 2.0",
"lines": 317,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/fastmcp:examples/tags_example.py | """
Example demonstrating RouteMap tags functionality.
This example shows how to use the tags parameter in RouteMap
to selectively route OpenAPI endpoints based on their tags.
"""
import asyncio
from fastapi import FastAPI
from fastmcp import FastMCP
from fastmcp.server.openapi import MCPType, RouteMap
# Create a FastAPI app with tagged endpoints
app = FastAPI(title="Tagged API Example")
@app.get("/users", tags=["users", "public"])
async def get_users():
"""Get all users - public endpoint"""
return [{"id": 1, "name": "Alice"}, {"id": 2, "name": "Bob"}]
@app.post("/users", tags=["users", "admin"])
async def create_user(name: str):
"""Create a user - admin only"""
return {"id": 3, "name": name}
@app.get("/admin/stats", tags=["admin", "internal"])
async def get_admin_stats():
"""Get admin statistics - internal use"""
return {"total_users": 100, "active_sessions": 25}
@app.get("/health", tags=["public"])
async def health_check():
"""Public health check"""
return {"status": "healthy"}
@app.get("/metrics")
async def get_metrics():
"""Metrics endpoint with no tags"""
return {"requests": 1000, "errors": 5}
async def main():
"""Demonstrate different tag-based routing strategies."""
print("=== Example 1: Make admin-tagged routes tools ===")
# Strategy 1: Convert admin-tagged routes to tools
mcp1 = FastMCP.from_fastapi(
app=app,
route_maps=[
RouteMap(methods="*", pattern=r".*", mcp_type=MCPType.TOOL, tags={"admin"}),
RouteMap(methods=["GET"], pattern=r".*", mcp_type=MCPType.RESOURCE),
],
)
tools = await mcp1.list_tools()
resources = await mcp1.list_resources()
print(f"Tools ({len(tools)}): {', '.join(t.name for t in tools)}")
print(f"Resources ({len(resources)}): {', '.join(str(r.uri) for r in resources)}")
print("\n=== Example 2: Exclude internal routes ===")
# Strategy 2: Exclude internal routes entirely
mcp2 = FastMCP.from_fastapi(
app=app,
route_maps=[
RouteMap(
methods="*", pattern=r".*", mcp_type=MCPType.EXCLUDE, tags={"internal"}
),
RouteMap(methods=["GET"], pattern=r".*", mcp_type=MCPType.RESOURCE),
RouteMap(methods=["POST"], pattern=r".*", mcp_type=MCPType.TOOL),
],
)
tools = await mcp2.list_tools()
resources = await mcp2.list_resources()
print(f"Tools ({len(tools)}): {', '.join(t.name for t in tools)}")
print(f"Resources ({len(resources)}): {', '.join(str(r.uri) for r in resources)}")
print("\n=== Example 3: Pattern + Tags combination ===")
# Strategy 3: Routes matching both pattern AND tags
mcp3 = FastMCP.from_fastapi(
app=app,
route_maps=[
# Admin routes under /admin path -> tools
RouteMap(
methods="*",
pattern=r".*/admin/.*",
mcp_type=MCPType.TOOL,
tags={"admin"},
),
# Public routes -> tools
RouteMap(
methods="*", pattern=r".*", mcp_type=MCPType.TOOL, tags={"public"}
),
RouteMap(methods=["GET"], pattern=r".*", mcp_type=MCPType.RESOURCE),
],
)
tools = await mcp3.list_tools()
resources = await mcp3.list_resources()
print(f"Tools ({len(tools)}): {', '.join(t.name for t in tools)}")
print(f"Resources ({len(resources)}): {', '.join(str(r.uri) for r in resources)}")
print("\n=== Example 4: Multiple tag AND condition ===")
# Strategy 4: Routes must have ALL specified tags
mcp4 = FastMCP.from_fastapi(
app=app,
route_maps=[
# Routes with BOTH "users" AND "admin" tags -> tools
RouteMap(
methods="*",
pattern=r".*",
mcp_type=MCPType.TOOL,
tags={"users", "admin"},
),
RouteMap(methods=["GET"], pattern=r".*", mcp_type=MCPType.RESOURCE),
],
)
tools = await mcp4.list_tools()
resources = await mcp4.list_resources()
print(f"Tools ({len(tools)}): {', '.join(t.name for t in tools)}")
print(f"Resources ({len(resources)}): {', '.join(str(r.uri) for r in resources)}")
if __name__ == "__main__":
asyncio.run(main())
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "examples/tags_example.py",
"license": "Apache License 2.0",
"lines": 106,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
PrefectHQ/fastmcp:src/fastmcp/cli/run.py | """FastMCP run command implementation with enhanced type hints."""
import asyncio
import contextlib
import json
import os
import re
import signal
import subprocess
import sys
from collections.abc import Callable
from pathlib import Path
from typing import Any, Literal
from mcp.server.fastmcp import FastMCP as FastMCP1x
from watchfiles import Change, awatch
from fastmcp.server.server import FastMCP, create_proxy
from fastmcp.utilities.logging import get_logger
from fastmcp.utilities.mcp_server_config import (
MCPServerConfig,
)
from fastmcp.utilities.mcp_server_config.v1.sources.filesystem import FileSystemSource
logger = get_logger("cli.run")
# Type aliases for better type safety
TransportType = Literal["stdio", "http", "sse", "streamable-http"]
LogLevelType = Literal["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]
# File extensions to watch for reload
WATCHED_EXTENSIONS: set[str] = {
# Python
".py",
# JavaScript/TypeScript
".js",
".ts",
".jsx",
".tsx",
# Markup/Content
".html",
".md",
".mdx",
".txt",
".xml",
# Styles
".css",
".scss",
".sass",
".less",
# Data/Config
".json",
".yaml",
".yml",
".toml",
# Framework-specific
".vue",
".svelte",
# GraphQL
".graphql",
".gql",
# Images
".svg",
".png",
".jpg",
".jpeg",
".gif",
".ico",
".webp",
# Media
".mp3",
".mp4",
".wav",
".webm",
# Fonts
".woff",
".woff2",
".ttf",
".eot",
}
def is_url(path: str) -> bool:
"""Check if a string is a URL."""
url_pattern = re.compile(r"^https?://")
return bool(url_pattern.match(path))
def create_client_server(url: str) -> Any:
"""Create a FastMCP server from a client URL.
Args:
url: The URL to connect to
Returns:
A FastMCP server instance
"""
try:
import fastmcp
client = fastmcp.Client(url)
server = create_proxy(client)
return server
except Exception as e:
logger.error(f"Failed to create client for URL {url}: {e}")
sys.exit(1)
def create_mcp_config_server(mcp_config_path: Path) -> FastMCP[None]:
"""Create a FastMCP server from a MCPConfig."""
with mcp_config_path.open() as src:
mcp_config = json.load(src)
server = create_proxy(mcp_config)
return server
def load_mcp_server_config(config_path: Path) -> MCPServerConfig:
"""Load a FastMCP configuration from a fastmcp.json file.
Args:
config_path: Path to fastmcp.json file
Returns:
MCPServerConfig object
"""
config = MCPServerConfig.from_file(config_path)
# Apply runtime settings from deployment config
config.deployment.apply_runtime_settings(config_path)
return config
async def run_command(
server_spec: str,
transport: TransportType | None = None,
host: str | None = None,
port: int | None = None,
path: str | None = None,
log_level: LogLevelType | None = None,
server_args: list[str] | None = None,
show_banner: bool = True,
use_direct_import: bool = False,
skip_source: bool = False,
stateless: bool = False,
) -> None:
"""Run a MCP server or connect to a remote one.
Args:
server_spec: Python file, object specification (file:obj), config file, or URL
transport: Transport protocol to use
host: Host to bind to when using http transport
port: Port to bind to when using http transport
path: Path to bind to when using http transport
log_level: Log level
server_args: Additional arguments to pass to the server
show_banner: Whether to show the server banner
use_direct_import: Whether to use direct import instead of subprocess
skip_source: Whether to skip source preparation step
stateless: Whether to run in stateless mode (no session)
"""
# Special case: URLs
if is_url(server_spec):
# Handle URL case
server = create_client_server(server_spec)
logger.debug(f"Created client proxy server for {server_spec}")
# Special case: MCPConfig files (legacy)
elif server_spec.endswith(".json"):
# Load JSON and check which type of config it is
config_path = Path(server_spec)
with open(config_path) as f:
data = json.load(f)
# Check if it's an MCPConfig first (has canonical mcpServers key)
if "mcpServers" in data:
# It's an MCP config
server = create_mcp_config_server(config_path)
else:
# It's a FastMCP config - load it properly
config = load_mcp_server_config(config_path)
# Merge deployment config with CLI arguments (CLI takes precedence)
transport = transport or config.deployment.transport
host = host or config.deployment.host
port = port or config.deployment.port
path = path or config.deployment.path
log_level = log_level or config.deployment.log_level
server_args = (
server_args if server_args is not None else config.deployment.args
)
# Prepare source only (environment is handled by uv run)
await config.prepare_source() if not skip_source else None
# Load the server using the source
from contextlib import nullcontext
from fastmcp.cli.cli import with_argv
# Use sys.argv context manager if deployment args specified
argv_context = with_argv(server_args) if server_args else nullcontext()
with argv_context:
server = await config.source.load_server()
logger.debug(f'Found server "{server.name}" from config {config_path}')
else:
# Regular file case - create a MCPServerConfig with FileSystemSource
source = FileSystemSource(path=server_spec)
config = MCPServerConfig(source=source)
# Prepare source only (environment is handled by uv run)
await config.prepare_source() if not skip_source else None
# Load the server
from contextlib import nullcontext
from fastmcp.cli.cli import with_argv
# Use sys.argv context manager if server_args specified
argv_context = with_argv(server_args) if server_args else nullcontext()
with argv_context:
server = await config.source.load_server()
logger.debug(f'Found server "{server.name}" in {source.path}')
# Run the server
# handle v1 servers
if isinstance(server, FastMCP1x):
await run_v1_server_async(server, host=host, port=port, transport=transport)
return
kwargs = {}
if transport:
kwargs["transport"] = transport
if host:
kwargs["host"] = host
if port:
kwargs["port"] = port
if path:
kwargs["path"] = path
if log_level:
kwargs["log_level"] = log_level
if stateless:
kwargs["stateless"] = True
if not show_banner:
kwargs["show_banner"] = False
try:
await server.run_async(**kwargs)
except Exception as e:
logger.error(f"Failed to run server: {e}")
sys.exit(1)
def run_module_command(
module_name: str,
*,
env_command_builder: Callable[[list[str]], list[str]] | None = None,
extra_args: list[str] | None = None,
) -> None:
"""Run a Python module directly using ``python -m <module>``.
When ``-m`` is used, the module manages its own server startup.
No server-object discovery or transport overrides are applied.
Args:
module_name: Dotted module name (e.g. ``my_package``).
env_command_builder: An optional callable that wraps a command list
with environment setup (e.g. ``UVEnvironment.build_command``).
extra_args: Extra arguments forwarded after the module name.
"""
# Use bare "python" when an env wrapper (e.g. uv run) is active so that
# the wrapper can resolve the interpreter via --python / environment config.
# Fall back to sys.executable for direct execution without a wrapper.
python = "python" if env_command_builder is not None else sys.executable
cmd: list[str] = [python, "-m", module_name]
if extra_args:
cmd.extend(extra_args)
# Wrap with environment (e.g. uv run) if configured
if env_command_builder is not None:
cmd = env_command_builder(cmd)
logger.debug(f"Running module: {' '.join(cmd)}")
try:
process = subprocess.run(cmd, check=True)
sys.exit(process.returncode)
except subprocess.CalledProcessError as e:
logger.error(f"Module {module_name} exited with code {e.returncode}")
sys.exit(e.returncode)
async def run_v1_server_async(
server: FastMCP1x,
host: str | None = None,
port: int | None = None,
transport: TransportType | None = None,
) -> None:
"""Run a FastMCP 1.x server using async methods.
Args:
server: FastMCP 1.x server instance
host: Host to bind to
port: Port to bind to
transport: Transport protocol to use
"""
if host:
server.settings.host = host
if port:
server.settings.port = port
match transport:
case "stdio":
await server.run_stdio_async()
case "http" | "streamable-http" | None:
await server.run_streamable_http_async()
case "sse":
await server.run_sse_async()
def _watch_filter(_change: Change, path: str) -> bool:
"""Filter for files that should trigger reload."""
return any(path.endswith(ext) for ext in WATCHED_EXTENSIONS)
async def _terminate_process(process: asyncio.subprocess.Process) -> None:
"""Terminate a subprocess and all its children.
Sends SIGTERM to the process group first for graceful shutdown,
then falls back to SIGKILL if the process doesn't exit in time.
"""
if process.returncode is not None:
return
pid = process.pid
if sys.platform != "win32":
# Send SIGTERM to the entire process group for graceful shutdown
with contextlib.suppress(ProcessLookupError, OSError):
os.killpg(os.getpgid(pid), signal.SIGTERM)
# Wait briefly for graceful exit
try:
await asyncio.wait_for(process.wait(), timeout=3.0)
return
except asyncio.TimeoutError:
pass
# Force kill the entire process group
with contextlib.suppress(ProcessLookupError, OSError):
os.killpg(os.getpgid(pid), signal.SIGKILL)
else:
process.kill()
await process.wait()
async def run_with_reload(
cmd: list[str],
reload_dirs: list[Path] | None = None,
is_stdio: bool = False,
) -> None:
"""Run a command with file watching and auto-reload.
Args:
cmd: Command to run as subprocess (should include --no-reload)
reload_dirs: Directories to watch for changes (default: cwd)
is_stdio: Whether this is stdio transport
"""
watch_paths = reload_dirs or [Path.cwd()]
process: asyncio.subprocess.Process | None = None
first_run = True
if is_stdio:
logger.info("Reload mode enabled (using stateless sessions)")
else:
logger.info(
"Reload mode enabled (using stateless HTTP). "
"Some features requiring bidirectional communication "
"(like elicitation) are not available."
)
# Handle SIGTERM/SIGINT gracefully with proper asyncio integration
shutdown_event = asyncio.Event()
loop = asyncio.get_running_loop()
def signal_handler() -> None:
logger.info("Received shutdown signal, stopping...")
shutdown_event.set()
# Windows doesn't support add_signal_handler
if sys.platform != "win32":
loop.add_signal_handler(signal.SIGTERM, signal_handler)
loop.add_signal_handler(signal.SIGINT, signal_handler)
try:
while not shutdown_event.is_set():
# Build command - add --no-banner on restarts to reduce noise
if first_run or "--no-banner" in cmd:
run_cmd = cmd
else:
run_cmd = [*cmd, "--no-banner"]
first_run = False
process = await asyncio.create_subprocess_exec(
*run_cmd,
stdin=None,
stdout=None,
stderr=None,
# Own process group so _terminate_process can kill the whole tree
start_new_session=sys.platform != "win32",
)
# Watch for either: file changes OR process death
watch_task = asyncio.create_task(
anext(aiter(awatch(*watch_paths, watch_filter=_watch_filter))) # ty: ignore[invalid-argument-type]
)
wait_task = asyncio.create_task(process.wait())
shutdown_task = asyncio.create_task(shutdown_event.wait())
done, pending = await asyncio.wait(
[watch_task, wait_task, shutdown_task],
return_when=asyncio.FIRST_COMPLETED,
)
for task in pending:
task.cancel()
with contextlib.suppress(asyncio.CancelledError):
await task
if shutdown_task in done:
# User requested shutdown
break
if wait_task in done:
# Server died on its own - wait for file change before restart
code = wait_task.result()
if code != 0:
logger.error(
f"Server exited with code {code}, waiting for file change..."
)
else:
logger.info("Server exited, waiting for file change...")
# Wait for file change or shutdown (avoid hot loop on crash)
watch_task = asyncio.create_task(
anext(aiter(awatch(*watch_paths, watch_filter=_watch_filter))) # ty: ignore[invalid-argument-type]
)
shutdown_task = asyncio.create_task(shutdown_event.wait())
done, pending = await asyncio.wait(
[watch_task, shutdown_task],
return_when=asyncio.FIRST_COMPLETED,
)
for task in pending:
task.cancel()
with contextlib.suppress(asyncio.CancelledError):
await task
if shutdown_task in done:
break
logger.info("Detected changes, restarting...")
else:
# File changed - restart server
changes = watch_task.result()
logger.info(
f"Detected changes in {len(changes)} file(s), restarting..."
)
await _terminate_process(process)
except KeyboardInterrupt:
# Handle Ctrl+C on Windows (where add_signal_handler isn't available)
logger.info("Received shutdown signal, stopping...")
finally:
# Clean up signal handlers
if sys.platform != "win32":
loop.remove_signal_handler(signal.SIGTERM)
loop.remove_signal_handler(signal.SIGINT)
if process and process.returncode is None:
await _terminate_process(process)
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "src/fastmcp/cli/run.py",
"license": "Apache License 2.0",
"lines": 403,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/fastmcp:src/fastmcp/client/progress.py | from typing import TypeAlias
from mcp.shared.session import ProgressFnT
from fastmcp.utilities.logging import get_logger
logger = get_logger(__name__)
ProgressHandler: TypeAlias = ProgressFnT
async def default_progress_handler(
progress: float, total: float | None, message: str | None
) -> None:
"""Default handler for progress notifications.
Logs progress updates at debug level, properly handling missing total or message values.
Args:
progress: Current progress value
total: Optional total expected value
message: Optional status message
"""
if total not in (None, 0):
# We have both progress and total
percent = (progress / total) * 100
progress_str = f"{progress}/{total} ({percent:.1f}%)"
elif total == 0:
# Avoid division by zero when a server reports an invalid total.
progress_str = f"{progress}/{total}"
else:
# We only have progress
progress_str = f"{progress}"
# Include message if available
if message:
log_msg = f"Progress: {progress_str} - {message}"
else:
log_msg = f"Progress: {progress_str}"
logger.debug(log_msg)
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "src/fastmcp/client/progress.py",
"license": "Apache License 2.0",
"lines": 31,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
PrefectHQ/fastmcp:tests/client/test_openapi.py | import json
import pytest
from fastapi import FastAPI, Request
from mcp.types import TextResourceContents
from fastmcp import Client, FastMCP
from fastmcp.client.transports import SSETransport, StreamableHttpTransport
from fastmcp.server.providers.openapi import MCPType, RouteMap
from fastmcp.utilities.tests import run_server_async
def create_fastmcp_server_for_headers() -> FastMCP:
"""Create a FastMCP server from FastAPI app with experimental parser."""
app = FastAPI()
@app.get("/headers")
def get_headers(request: Request):
return request.headers
@app.get("/headers/{header_name}")
def get_header_by_name(header_name: str, request: Request):
return request.headers[header_name]
@app.post("/headers")
def post_headers(request: Request):
return request.headers
mcp = FastMCP.from_fastapi(
app,
httpx_client_kwargs={"headers": {"x-server-header": "test-abc"}},
route_maps=[
# GET requests with path parameters go to ResourceTemplate
RouteMap(
methods=["GET"],
pattern=r".*\{.*\}.*",
mcp_type=MCPType.RESOURCE_TEMPLATE,
),
# GET requests without path parameters go to Resource
RouteMap(methods=["GET"], pattern=r".*", mcp_type=MCPType.RESOURCE),
],
)
return mcp
@pytest.fixture
async def shttp_server():
"""Start a test server with StreamableHttp transport."""
server = create_fastmcp_server_for_headers()
async with run_server_async(server, transport="http") as url:
yield url
@pytest.fixture
async def sse_server():
"""Start a test server with SSE transport."""
server = create_fastmcp_server_for_headers()
async with run_server_async(server, transport="sse") as url:
yield url
@pytest.fixture
async def proxy_server(shttp_server: str):
"""Start a proxy server."""
proxy = FastMCP.as_proxy(StreamableHttpTransport(shttp_server))
async with run_server_async(proxy, transport="http") as url:
yield url
async def test_fastapi_client_headers_streamable_http_resource(shttp_server: str):
async with Client(transport=StreamableHttpTransport(shttp_server)) as client:
result = await client.read_resource("resource://get_headers_headers_get")
assert isinstance(result[0], TextResourceContents)
headers = json.loads(result[0].text)
assert headers["x-server-header"] == "test-abc"
async def test_fastapi_client_headers_sse_resource(sse_server: str):
async with Client(transport=SSETransport(sse_server)) as client:
result = await client.read_resource("resource://get_headers_headers_get")
assert isinstance(result[0], TextResourceContents)
headers = json.loads(result[0].text)
assert headers["x-server-header"] == "test-abc"
async def test_fastapi_client_headers_streamable_http_tool(shttp_server: str):
async with Client(transport=StreamableHttpTransport(shttp_server)) as client:
result = await client.call_tool("post_headers_headers_post")
headers: dict[str, str] = result.data
assert headers["x-server-header"] == "test-abc"
async def test_fastapi_client_headers_sse_tool(sse_server: str):
async with Client(transport=SSETransport(sse_server)) as client:
result = await client.call_tool("post_headers_headers_post")
headers: dict[str, str] = result.data
assert headers["x-server-header"] == "test-abc"
async def test_client_headers_sse_resource(sse_server: str):
async with Client(
transport=SSETransport(sse_server, headers={"X-TEST": "test-123"})
) as client:
result = await client.read_resource("resource://get_headers_headers_get")
assert isinstance(result[0], TextResourceContents)
headers = json.loads(result[0].text)
assert headers["x-test"] == "test-123"
async def test_client_headers_shttp_resource(shttp_server: str):
async with Client(
transport=StreamableHttpTransport(shttp_server, headers={"X-TEST": "test-123"})
) as client:
result = await client.read_resource("resource://get_headers_headers_get")
assert isinstance(result[0], TextResourceContents)
headers = json.loads(result[0].text)
assert headers["x-test"] == "test-123"
async def test_client_headers_sse_resource_template(sse_server: str):
async with Client(
transport=SSETransport(sse_server, headers={"X-TEST": "test-123"})
) as client:
result = await client.read_resource(
"resource://get_header_by_name_headers/x-test"
)
assert isinstance(result[0], TextResourceContents)
header = json.loads(result[0].text)
assert header == "test-123"
async def test_client_headers_shttp_resource_template(shttp_server: str):
async with Client(
transport=StreamableHttpTransport(shttp_server, headers={"X-TEST": "test-123"})
) as client:
result = await client.read_resource(
"resource://get_header_by_name_headers/x-test"
)
assert isinstance(result[0], TextResourceContents)
header = json.loads(result[0].text)
assert header == "test-123"
async def test_client_headers_sse_tool(sse_server: str):
async with Client(
transport=SSETransport(sse_server, headers={"X-TEST": "test-123"})
) as client:
result = await client.call_tool("post_headers_headers_post")
headers: dict[str, str] = result.data
assert headers["x-test"] == "test-123"
async def test_client_headers_shttp_tool(shttp_server: str):
async with Client(
transport=StreamableHttpTransport(shttp_server, headers={"X-TEST": "test-123"})
) as client:
result = await client.call_tool("post_headers_headers_post")
headers: dict[str, str] = result.data
assert headers["x-test"] == "test-123"
async def test_client_overrides_server_headers(shttp_server: str):
async with Client(
transport=StreamableHttpTransport(
shttp_server, headers={"x-server-header": "test-client"}
)
) as client:
result = await client.read_resource("resource://get_headers_headers_get")
assert isinstance(result[0], TextResourceContents)
headers = json.loads(result[0].text)
assert headers["x-server-header"] == "test-client"
async def test_client_with_excluded_header_is_ignored(sse_server: str):
async with Client(
transport=SSETransport(
sse_server,
headers={
"x-server-header": "test-client",
"host": "1.2.3.4",
"not-host": "1.2.3.4",
},
)
) as client:
result = await client.read_resource("resource://get_headers_headers_get")
assert isinstance(result[0], TextResourceContents)
headers = json.loads(result[0].text)
assert headers["not-host"] == "1.2.3.4"
assert headers["host"] == "fastapi"
@pytest.mark.flaky(retries=2, delay=1)
async def test_client_headers_proxy(proxy_server: str):
"""
Test that client headers are passed through the proxy to the remove server.
"""
async with Client(transport=StreamableHttpTransport(proxy_server)) as client:
result = await client.read_resource("resource://get_headers_headers_get")
assert isinstance(result[0], TextResourceContents)
headers = json.loads(result[0].text)
assert headers["x-server-header"] == "test-abc"
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/client/test_openapi.py",
"license": "Apache License 2.0",
"lines": 161,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:tests/client/test_progress.py | import pytest
from fastmcp import Client, Context, FastMCP
PROGRESS_MESSAGES = []
@pytest.fixture(autouse=True)
def clear_progress_messages():
PROGRESS_MESSAGES.clear()
yield
PROGRESS_MESSAGES.clear()
@pytest.fixture
def fastmcp_server():
mcp = FastMCP()
@mcp.tool
async def progress_tool(context: Context) -> int:
for i in range(3):
await context.report_progress(
progress=i + 1,
total=3,
message=f"{(i + 1) / 3 * 100:.2f}% complete",
)
return 100
return mcp
EXPECTED_PROGRESS_MESSAGES = [
dict(progress=1, total=3, message="33.33% complete"),
dict(progress=2, total=3, message="66.67% complete"),
dict(progress=3, total=3, message="100.00% complete"),
]
async def progress_handler(
progress: float, total: float | None, message: str | None
) -> None:
PROGRESS_MESSAGES.append(dict(progress=progress, total=total, message=message))
async def test_progress_handler(fastmcp_server: FastMCP):
async with Client(fastmcp_server, progress_handler=progress_handler) as client:
await client.call_tool("progress_tool", {})
assert PROGRESS_MESSAGES == EXPECTED_PROGRESS_MESSAGES
async def test_progress_handler_can_be_supplied_on_tool_call(fastmcp_server: FastMCP):
async with Client(fastmcp_server) as client:
await client.call_tool("progress_tool", {}, progress_handler=progress_handler)
assert PROGRESS_MESSAGES == EXPECTED_PROGRESS_MESSAGES
async def test_progress_handler_supplied_on_tool_call_overrides_default(
fastmcp_server: FastMCP,
):
async def bad_progress_handler(
progress: float, total: float | None, message: str | None
) -> None:
raise Exception("This should not be called")
async with Client(fastmcp_server, progress_handler=bad_progress_handler) as client:
await client.call_tool("progress_tool", {}, progress_handler=progress_handler)
assert PROGRESS_MESSAGES == EXPECTED_PROGRESS_MESSAGES
async def test_default_progress_handler_handles_zero_total() -> None:
from fastmcp.client.progress import default_progress_handler
await default_progress_handler(progress=1, total=0, message="starting")
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/client/test_progress.py",
"license": "Apache License 2.0",
"lines": 51,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:tests/client/test_stdio.py | import asyncio
import gc
import inspect
import os
import weakref
import psutil
import pytest
from fastmcp import Client, FastMCP
from fastmcp.client.transports import PythonStdioTransport, StdioTransport
def running_under_debugger():
return os.environ.get("DEBUGPY_RUNNING") == "true"
def gc_collect_harder():
gc.collect()
gc.collect()
gc.collect()
gc.collect()
gc.collect()
gc.collect()
class TestParallelCalls:
@pytest.fixture
def stdio_script(self, tmp_path):
script = inspect.cleandoc('''
import os
from fastmcp import FastMCP
mcp = FastMCP()
@mcp.tool
def pid() -> int:
"""Gets PID of server"""
return os.getpid()
if __name__ == "__main__":
mcp.run()
''')
script_file = tmp_path / "stdio.py"
script_file.write_text(script)
return script_file
async def test_parallel_calls(self, stdio_script):
backend_transport = PythonStdioTransport(script_path=stdio_script)
backend_client = Client(transport=backend_transport)
proxy = FastMCP.as_proxy(backend=backend_client, name="PROXY")
count = 10
tasks = [proxy.list_tools() for _ in range(count)]
results = await asyncio.gather(*tasks, return_exceptions=True)
assert len(results) == count
errors = [result for result in results if isinstance(result, Exception)]
assert len(errors) == 0
class TestKeepAlive:
# https://github.com/PrefectHQ/fastmcp/issues/581
@pytest.fixture
def stdio_script(self, tmp_path):
script = inspect.cleandoc('''
import os
from fastmcp import FastMCP
mcp = FastMCP()
@mcp.tool
def pid() -> int:
"""Gets PID of server"""
return os.getpid()
if __name__ == "__main__":
mcp.run()
''')
script_file = tmp_path / "stdio.py"
script_file.write_text(script)
return script_file
async def test_keep_alive_default_true(self):
client = Client(transport=StdioTransport(command="python", args=[""]))
assert client.transport.keep_alive is True
async def test_keep_alive_set_false(self):
client = Client(
transport=StdioTransport(command="python", args=[""], keep_alive=False)
)
assert client.transport.keep_alive is False
async def test_keep_alive_maintains_session_across_multiple_calls(
self, stdio_script
):
client = Client(transport=PythonStdioTransport(script_path=stdio_script))
assert client.transport.keep_alive is True
async with client:
result1 = await client.call_tool("pid")
pid1: int = result1.data
async with client:
result2 = await client.call_tool("pid")
pid2: int = result2.data
assert pid1 == pid2
@pytest.mark.skipif(
running_under_debugger(), reason="Debugger holds a reference to the transport"
)
async def test_keep_alive_true_exit_scope_kills_transport(self, stdio_script):
transport_weak_ref: weakref.ref[PythonStdioTransport] | None = None
async def test_server():
transport = PythonStdioTransport(script_path=stdio_script, keep_alive=True)
nonlocal transport_weak_ref
transport_weak_ref = weakref.ref(transport)
async with transport.connect_session():
pass
await test_server()
gc_collect_harder()
# This test will fail while debugging because the debugger holds a reference to the underlying transport
assert transport_weak_ref
transport = transport_weak_ref()
assert transport is None
@pytest.mark.skipif(
running_under_debugger(), reason="Debugger holds a reference to the transport"
)
async def test_keep_alive_true_exit_scope_kills_client(self, stdio_script):
pid: int | None = None
async def test_server():
transport = PythonStdioTransport(script_path=stdio_script, keep_alive=True)
client = Client(transport=transport)
assert client.transport.keep_alive is True
async with client:
result1 = await client.call_tool("pid")
nonlocal pid
pid = result1.data
await test_server()
gc_collect_harder()
# This test may fail/hang while debugging because the debugger holds a reference to the underlying transport
with pytest.raises(psutil.NoSuchProcess):
while True:
psutil.Process(pid)
await asyncio.sleep(0.1)
async def test_keep_alive_false_exit_scope_kills_server(self, stdio_script):
pid: int | None = None
async def test_server():
transport = PythonStdioTransport(script_path=stdio_script, keep_alive=False)
client = Client(transport=transport)
assert client.transport.keep_alive is False
async with client:
result1 = await client.call_tool("pid")
nonlocal pid
pid = result1.data
del client
await test_server()
with pytest.raises(psutil.NoSuchProcess):
while True:
psutil.Process(pid)
await asyncio.sleep(0.1)
async def test_keep_alive_false_starts_new_session_across_multiple_calls(
self, stdio_script
):
client = Client(
transport=PythonStdioTransport(script_path=stdio_script, keep_alive=False)
)
assert client.transport.keep_alive is False
async with client:
result1 = await client.call_tool("pid")
pid1: int = result1.data
async with client:
result2 = await client.call_tool("pid")
pid2: int = result2.data
assert pid1 != pid2
async def test_keep_alive_starts_new_session_if_manually_closed(self, stdio_script):
client = Client(transport=PythonStdioTransport(script_path=stdio_script))
assert client.transport.keep_alive is True
async with client:
result1 = await client.call_tool("pid")
pid1: int = result1.data
await client.close()
async with client:
result2 = await client.call_tool("pid")
pid2: int = result2.data
assert pid1 != pid2
async def test_keep_alive_maintains_session_if_reentered(self, stdio_script):
client = Client(transport=PythonStdioTransport(script_path=stdio_script))
assert client.transport.keep_alive is True
async with client:
result1 = await client.call_tool("pid")
pid1: int = result1.data
async with client:
result2 = await client.call_tool("pid")
pid2: int = result2.data
result3 = await client.call_tool("pid")
pid3: int = result3.data
assert pid1 == pid2 == pid3
async def test_close_session_and_try_to_use_client_raises_error(self, stdio_script):
client = Client(transport=PythonStdioTransport(script_path=stdio_script))
assert client.transport.keep_alive is True
async with client:
await client.close()
with pytest.raises(RuntimeError, match="Client is not connected"):
await client.call_tool("pid")
async def test_session_task_failure_raises_immediately_on_enter(self):
# Use a command that will fail to start
client = Client(
transport=StdioTransport(command="nonexistent_command", args=[])
)
# Should raise RuntimeError immediately, not defer until first use
with pytest.raises(RuntimeError, match="Client failed to connect"):
async with client:
pass
class TestLogFile:
@pytest.fixture
def stdio_script_with_stderr(self, tmp_path):
script = inspect.cleandoc('''
import sys
from fastmcp import FastMCP
mcp = FastMCP()
@mcp.tool
def write_error(message: str) -> str:
"""Writes a message to stderr and returns it"""
print(message, file=sys.stderr, flush=True)
return message
if __name__ == "__main__":
mcp.run()
''')
script_file = tmp_path / "stderr_script.py"
script_file.write_text(script)
return script_file
async def test_log_file_parameter_accepted_by_stdio_transport(self, tmp_path):
"""Test that log_file parameter can be set on StdioTransport"""
log_file_path = tmp_path / "errors.log"
transport = StdioTransport(
command="python", args=["script.py"], log_file=log_file_path
)
assert transport.log_file == log_file_path
async def test_log_file_parameter_accepted_by_python_stdio_transport(
self, tmp_path, stdio_script_with_stderr
):
"""Test that log_file parameter can be set on PythonStdioTransport"""
log_file_path = tmp_path / "errors.log"
transport = PythonStdioTransport(
script_path=stdio_script_with_stderr, log_file=log_file_path
)
assert transport.log_file == log_file_path
async def test_log_file_parameter_accepts_textio(self, tmp_path):
"""Test that log_file parameter can accept a TextIO object"""
log_file_path = tmp_path / "errors.log"
with open(log_file_path, "w") as log_file:
transport = StdioTransport(
command="python", args=["script.py"], log_file=log_file
)
assert transport.log_file == log_file
async def test_log_file_captures_stderr_output_with_path(
self, tmp_path, stdio_script_with_stderr
):
"""Test that stderr output is written to the log_file when using Path"""
log_file_path = tmp_path / "errors.log"
transport = PythonStdioTransport(
script_path=stdio_script_with_stderr, log_file=log_file_path
)
client = Client(transport=transport)
async with client:
await client.call_tool("write_error", {"message": "Test error message"})
# Need to wait a bit for stderr to flush
await asyncio.sleep(0.1)
content = log_file_path.read_text()
assert "Test error message" in content
async def test_log_file_captures_stderr_output_with_textio(
self, tmp_path, stdio_script_with_stderr
):
"""Test that stderr output is written to the log_file when using TextIO"""
log_file_path = tmp_path / "errors.log"
with open(log_file_path, "w") as log_file:
transport = PythonStdioTransport(
script_path=stdio_script_with_stderr, log_file=log_file
)
client = Client(transport=transport)
async with client:
await client.call_tool(
"write_error", {"message": "Test error with TextIO"}
)
# Need to wait a bit for stderr to flush
await asyncio.sleep(0.1)
content = log_file_path.read_text()
assert "Test error with TextIO" in content
async def test_log_file_none_uses_default_behavior(
self, tmp_path, stdio_script_with_stderr
):
"""Test that log_file=None uses default stderr handling"""
transport = PythonStdioTransport(
script_path=stdio_script_with_stderr, log_file=None
)
client = Client(transport=transport)
async with client:
# Should work without error even without explicit log_file
result = await client.call_tool(
"write_error", {"message": "Default stderr"}
)
assert result.data == "Default stderr"
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/client/test_stdio.py",
"license": "Apache License 2.0",
"lines": 278,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:tests/server/http/test_custom_routes.py | import pytest
from starlette.requests import Request
from starlette.responses import JSONResponse
from starlette.routing import Route
from fastmcp import FastMCP
from fastmcp.server.http import create_sse_app, create_streamable_http_app
class TestCustomRoutes:
@pytest.fixture
def server_with_custom_route(self):
"""Create a FastMCP server with a custom route."""
server = FastMCP()
@server.custom_route("/custom-route", methods=["GET"])
async def custom_route(request: Request):
return JSONResponse({"message": "custom route"})
return server
def test_custom_routes_apply_filtering_http_app(self, server_with_custom_route):
"""Test that custom routes are included when using server.http_app()."""
# Get the app via server.http_app()
app = server_with_custom_route.http_app()
# Verify that the custom route is included
custom_route_found = False
for route in app.routes:
if isinstance(route, Route) and route.path == "/custom-route":
custom_route_found = True
break
assert custom_route_found, "Custom route was not found in app routes"
def test_custom_routes_via_streamable_http_app_direct(
self, server_with_custom_route
):
"""Test that custom routes are included when using create_streamable_http_app directly."""
# Create the app by calling the constructor function directly
app = create_streamable_http_app(
server=server_with_custom_route, streamable_http_path="/api"
)
# Verify that the custom route is included
custom_route_found = False
for route in app.routes:
if isinstance(route, Route) and route.path == "/custom-route":
custom_route_found = True
break
assert custom_route_found, "Custom route was not found in app routes"
def test_custom_routes_via_sse_app_direct(self, server_with_custom_route):
"""Test that custom routes are included when using create_sse_app directly."""
# Create the app by calling the constructor function directly
app = create_sse_app(
server=server_with_custom_route, message_path="/message", sse_path="/sse/"
)
# Verify that the custom route is included
custom_route_found = False
for route in app.routes:
if isinstance(route, Route) and route.path == "/custom-route":
custom_route_found = True
break
assert custom_route_found, "Custom route was not found in app routes"
def test_multiple_custom_routes(
self,
):
"""Test that multiple custom routes are included in both methods."""
server = FastMCP()
custom_paths = ["/route1", "/route2", "/route3"]
# Add multiple custom routes
for path in custom_paths:
@server.custom_route(path, methods=["GET"])
async def custom_route(request: Request):
return JSONResponse({"message": f"route {path}"})
# Test with server.http_app()
app1 = server.http_app()
# Test with direct constructor call
app2 = create_streamable_http_app(server=server, streamable_http_path="/api")
# Check all routes are in both apps
for path in custom_paths:
# Check in app1
route_in_app1 = any(
isinstance(route, Route) and route.path == path for route in app1.routes
)
assert route_in_app1, f"Route {path} not found in server.http_app()"
# Check in app2
route_in_app2 = any(
isinstance(route, Route) and route.path == path for route in app2.routes
)
assert route_in_app2, (
f"Route {path} not found in create_streamable_http_app()"
)
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/server/http/test_custom_routes.py",
"license": "Apache License 2.0",
"lines": 83,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:tests/server/http/test_http_dependencies.py | import json
import pytest
from mcp.types import TextContent, TextResourceContents
from fastmcp.client import Client
from fastmcp.client.transports import SSETransport, StreamableHttpTransport
from fastmcp.server.dependencies import get_http_request
from fastmcp.server.server import FastMCP
from fastmcp.utilities.tests import run_server_async
def fastmcp_server():
server = FastMCP()
# Add a tool
@server.tool
def get_headers_tool() -> dict[str, str]:
"""Get the HTTP headers from the request."""
request = get_http_request()
return dict(request.headers)
@server.resource(uri="request://headers")
async def get_headers_resource() -> str:
import json
request = get_http_request()
return json.dumps(dict(request.headers))
# Add a prompt
@server.prompt
def get_headers_prompt() -> str:
"""Get the HTTP headers from the request."""
request = get_http_request()
return json.dumps(dict(request.headers))
return server
@pytest.fixture
async def shttp_server():
"""Start a test server with StreamableHttp transport."""
server = fastmcp_server()
async with run_server_async(server, transport="http") as url:
yield url
@pytest.fixture
async def sse_server():
"""Start a test server with SSE transport."""
server = fastmcp_server()
async with run_server_async(server, transport="sse") as url:
yield url
async def test_http_headers_resource_shttp(shttp_server: str):
"""Test getting HTTP headers from the server."""
async with Client(
transport=StreamableHttpTransport(
shttp_server, headers={"X-DEMO-HEADER": "ABC"}
)
) as client:
raw_result = await client.read_resource("request://headers")
assert isinstance(raw_result[0], TextResourceContents)
json_result = json.loads(raw_result[0].text)
assert "x-demo-header" in json_result
assert json_result["x-demo-header"] == "ABC"
async def test_http_headers_resource_sse(sse_server: str):
"""Test getting HTTP headers from the server."""
async with Client(
transport=SSETransport(sse_server, headers={"X-DEMO-HEADER": "ABC"})
) as client:
raw_result = await client.read_resource("request://headers")
assert isinstance(raw_result[0], TextResourceContents)
json_result = json.loads(raw_result[0].text)
assert "x-demo-header" in json_result
assert json_result["x-demo-header"] == "ABC"
async def test_http_headers_tool_shttp(shttp_server: str):
"""Test getting HTTP headers from the server."""
async with Client(
transport=StreamableHttpTransport(
shttp_server, headers={"X-DEMO-HEADER": "ABC"}
)
) as client:
result = await client.call_tool("get_headers_tool")
assert "x-demo-header" in result.data
assert result.data["x-demo-header"] == "ABC"
async def test_http_headers_tool_sse(sse_server: str):
async with Client(
transport=SSETransport(sse_server, headers={"X-DEMO-HEADER": "ABC"})
) as client:
result = await client.call_tool("get_headers_tool")
assert "x-demo-header" in result.data
assert result.data["x-demo-header"] == "ABC"
async def test_http_headers_prompt_shttp(shttp_server: str):
"""Test getting HTTP headers from the server."""
async with Client(
transport=StreamableHttpTransport(
shttp_server, headers={"X-DEMO-HEADER": "ABC"}
)
) as client:
result = await client.get_prompt("get_headers_prompt")
assert isinstance(result.messages[0].content, TextContent)
json_result = json.loads(result.messages[0].content.text)
assert "x-demo-header" in json_result
assert json_result["x-demo-header"] == "ABC"
async def test_http_headers_prompt_sse(sse_server: str):
"""Test getting HTTP headers from the server."""
async with Client(
transport=SSETransport(sse_server, headers={"X-DEMO-HEADER": "ABC"})
) as client:
result = await client.get_prompt("get_headers_prompt")
assert isinstance(result.messages[0].content, TextContent)
json_result = json.loads(result.messages[0].content.text)
assert "x-demo-header" in json_result
assert json_result["x-demo-header"] == "ABC"
async def test_get_http_headers_excludes_content_type(sse_server: str):
"""Test that get_http_headers() excludes content-type header (issue #3097).
This prevents HTTP 415 errors when forwarding headers to downstream APIs
that require specific Content-Type headers (e.g., application/vnd.api+json).
"""
from fastmcp.server.dependencies import get_http_headers
server = FastMCP()
@server.tool
def check_excluded_headers() -> dict[str, str]:
"""Check that problematic headers are excluded from get_http_headers()."""
return get_http_headers()
async with run_server_async(server, transport="sse") as url:
async with Client(
transport=SSETransport(
url,
headers={
"Content-Type": "application/json",
"Accept": "application/json",
"X-Custom-Header": "should-be-included",
},
)
) as client:
result = await client.call_tool("check_excluded_headers")
headers = result.data
# These headers should be excluded
assert "content-type" not in headers
assert "accept" not in headers
assert "host" not in headers
assert "content-length" not in headers
# Custom headers should be included
assert "x-custom-header" in headers
assert headers["x-custom-header"] == "should-be-included"
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/server/http/test_http_dependencies.py",
"license": "Apache License 2.0",
"lines": 133,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:tests/server/test_logging.py | import asyncio
import logging
from unittest.mock import AsyncMock, Mock, patch
import anyio
import pytest
from fastmcp.server.server import FastMCP
class CustomLogFormatterForTest(logging.Formatter):
def format(self, record: logging.LogRecord) -> str:
return f"TEST_FORMAT::{record.levelname}::{record.name}::{record.getMessage()}"
@pytest.fixture
def mcp_server() -> FastMCP:
return FastMCP(name="TestLogServer")
@patch("fastmcp.server.mixins.transport.uvicorn.Server")
@patch("fastmcp.server.mixins.transport.uvicorn.Config")
async def test_uvicorn_logging_default_level(
mock_uvicorn_config_constructor: Mock,
mock_uvicorn_server_constructor: Mock,
mcp_server: FastMCP,
):
"""Tests that FastMCP passes log_level to uvicorn.Config if no log_config is given."""
mock_server_instance = AsyncMock()
mock_uvicorn_server_constructor.return_value = mock_server_instance
serve_finished_event = anyio.Event()
mock_server_instance.serve.side_effect = serve_finished_event.wait
test_log_level = "warning"
server_task = asyncio.create_task(
mcp_server.run_http_async(log_level=test_log_level, port=8003)
)
await mcp_server._started.wait()
mock_uvicorn_config_constructor.assert_called_once()
_, kwargs_config = mock_uvicorn_config_constructor.call_args
assert kwargs_config.get("log_level") == test_log_level.lower()
assert "log_config" not in kwargs_config
mock_uvicorn_server_constructor.assert_called_once_with(
mock_uvicorn_config_constructor.return_value
)
mock_server_instance.serve.assert_awaited_once()
# Signal the mock to finish and cancel with timeout
# Required for uvicorn 0.39+ due to context isolation
serve_finished_event.set()
server_task.cancel()
try:
await asyncio.wait_for(server_task, timeout=2.0)
except (asyncio.CancelledError, asyncio.TimeoutError):
pass
@patch("fastmcp.server.mixins.transport.uvicorn.Server")
@patch("fastmcp.server.mixins.transport.uvicorn.Config")
async def test_uvicorn_logging_with_custom_log_config(
mock_uvicorn_config_constructor: Mock,
mock_uvicorn_server_constructor: Mock,
mcp_server: FastMCP,
):
"""Tests that FastMCP passes log_config to uvicorn.Config and not log_level."""
mock_server_instance = AsyncMock()
mock_uvicorn_server_constructor.return_value = mock_server_instance
serve_finished_event = anyio.Event()
mock_server_instance.serve.side_effect = serve_finished_event.wait
sample_log_config = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"test_formatter": {
"()": "tests.server.test_logging.CustomLogFormatterForTest"
}
},
"handlers": {
"test_handler": {
"formatter": "test_formatter",
"class": "logging.StreamHandler",
"stream": "ext://sys.stdout",
}
},
"loggers": {
"uvicorn.error": {
"handlers": ["test_handler"],
"level": "INFO",
"propagate": False,
}
},
}
server_task = asyncio.create_task(
mcp_server.run_http_async(
uvicorn_config={"log_config": sample_log_config}, port=8004
)
)
await mcp_server._started.wait()
mock_uvicorn_config_constructor.assert_called_once()
_, kwargs_config = mock_uvicorn_config_constructor.call_args
assert kwargs_config.get("log_config") == sample_log_config
assert "log_level" not in kwargs_config
mock_uvicorn_server_constructor.assert_called_once_with(
mock_uvicorn_config_constructor.return_value
)
mock_server_instance.serve.assert_awaited_once()
# Signal the mock to finish and cancel with timeout
# Required for uvicorn 0.39+ due to context isolation
serve_finished_event.set()
server_task.cancel()
try:
await asyncio.wait_for(server_task, timeout=2.0)
except (asyncio.CancelledError, asyncio.TimeoutError):
pass
@patch("fastmcp.server.mixins.transport.uvicorn.Server")
@patch("fastmcp.server.mixins.transport.uvicorn.Config")
async def test_uvicorn_logging_custom_log_config_overrides_log_level_param(
mock_uvicorn_config_constructor: Mock,
mock_uvicorn_server_constructor: Mock,
mcp_server: FastMCP,
):
"""Tests log_config precedence if log_level is also passed to run_http_async."""
mock_server_instance = AsyncMock()
mock_uvicorn_server_constructor.return_value = mock_server_instance
serve_finished_event = anyio.Event()
mock_server_instance.serve.side_effect = serve_finished_event.wait
sample_log_config = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"test_formatter": {
"()": "tests.server.test_logging.CustomLogFormatterForTest"
}
},
"handlers": {
"test_handler": {
"formatter": "test_formatter",
"class": "logging.StreamHandler",
"stream": "ext://sys.stdout",
}
},
"loggers": {
"uvicorn.error": {
"handlers": ["test_handler"],
"level": "INFO",
"propagate": False,
}
},
}
explicit_log_level = "debug"
server_task = asyncio.create_task(
mcp_server.run_http_async(
log_level=explicit_log_level,
uvicorn_config={"log_config": sample_log_config},
port=8005,
)
)
await mcp_server._started.wait()
mock_uvicorn_config_constructor.assert_called_once()
_, kwargs_config = mock_uvicorn_config_constructor.call_args
assert kwargs_config.get("log_config") == sample_log_config
assert "log_level" not in kwargs_config
mock_uvicorn_server_constructor.assert_called_once_with(
mock_uvicorn_config_constructor.return_value
)
mock_server_instance.serve.assert_awaited_once()
# Signal the mock to finish and cancel with timeout
# Required for uvicorn 0.39+ due to context isolation
serve_finished_event.set()
server_task.cancel()
try:
await asyncio.wait_for(server_task, timeout=2.0)
except (asyncio.CancelledError, asyncio.TimeoutError):
pass
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/server/test_logging.py",
"license": "Apache License 2.0",
"lines": 162,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:tests/server/test_app_state.py | from fastmcp.server import FastMCP
from fastmcp.server.http import create_sse_app, create_streamable_http_app
def test_http_app_sets_mcp_server_state():
server = FastMCP(name="StateTest")
app = server.http_app()
assert app.state.fastmcp_server is server
def test_http_app_sse_sets_mcp_server_state():
server = FastMCP(name="StateTest")
app = server.http_app(transport="sse")
assert app.state.fastmcp_server is server
def test_create_streamable_http_app_sets_state():
server = FastMCP(name="StateTest")
app = create_streamable_http_app(server, "/mcp/")
assert app.state.fastmcp_server is server
def test_create_sse_app_sets_state():
server = FastMCP(name="StateTest")
app = create_sse_app(server, message_path="/message", sse_path="/sse/")
assert app.state.fastmcp_server is server
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/server/test_app_state.py",
"license": "Apache License 2.0",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:examples/in_memory_proxy_example.py | """
This example demonstrates how to set up and use an in-memory FastMCP proxy.
It illustrates the pattern:
1. Create an original FastMCP server with some tools.
2. Create a proxy FastMCP server using ``FastMCP.as_proxy(original_server)``.
3. Use another Client to connect to the proxy server (in-memory) and interact with the original server's tools through the proxy.
"""
import asyncio
from mcp.types import TextContent
from fastmcp import FastMCP
from fastmcp.client import Client
class EchoService:
"""A simple service to demonstrate with"""
def echo(self, message: str) -> str:
return f"Original server echoes: {message}"
async def main():
print("--- In-Memory FastMCP Proxy Example ---")
print("This example will walk through setting up an in-memory proxy.")
print("-----------------------------------------")
# 1. Original Server Setup
print(
"\nStep 1: Setting up the Original Server (OriginalEchoServer) with an 'echo' tool..."
)
original_server = FastMCP("OriginalEchoServer")
original_server.add_tool(EchoService().echo)
print(f" -> Original Server '{original_server.name}' created.")
# 2. Proxy Server Creation
print("\nStep 2: Creating the Proxy Server (InMemoryProxy)...")
print(
f" (Using FastMCP.as_proxy to wrap '{original_server.name}' directly)"
)
proxy_server = FastMCP.as_proxy(original_server, name="InMemoryProxy")
print(
f" -> Proxy Server '{proxy_server.name}' created, proxying '{original_server.name}'."
)
# 3. Interacting via Proxy
print("\nStep 3: Using a new Client to connect to the Proxy Server and interact...")
async with Client(proxy_server) as final_client:
print(f" -> Successfully connected to proxy '{proxy_server.name}'.")
print("\n Listing tools available via proxy...")
tools = await final_client.list_tools()
if tools:
print(" Available Tools:")
for tool in tools:
print(
f" - {tool.name} (Description: {tool.description or 'N/A'})"
)
else:
print(" No tools found via proxy.")
message_to_echo = "Hello, simplified proxied world!"
print(f"\n Calling 'echo' tool via proxy with message: '{message_to_echo}'")
try:
result = await final_client.call_tool("echo", {"message": message_to_echo})
if result and isinstance(result[0], TextContent):
print(f" Result from proxied 'echo' call: '{result[0].text}'")
else:
print(
f" Error: Unexpected result format from proxied 'echo' call: {result}"
)
except Exception as e:
print(f" Error calling 'echo' tool via proxy: {e}")
print("\n-----------------------------------------")
print("--- In-Memory Proxy Example Finished ---")
if __name__ == "__main__":
asyncio.run(main())
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "examples/in_memory_proxy_example.py",
"license": "Apache License 2.0",
"lines": 65,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
PrefectHQ/fastmcp:src/fastmcp/utilities/exceptions.py | from collections.abc import Callable, Iterable, Mapping
from typing import Any
import httpx
import mcp.types
from exceptiongroup import BaseExceptionGroup
from mcp import McpError
import fastmcp
def iter_exc(group: BaseExceptionGroup):
for exc in group.exceptions:
if isinstance(exc, BaseExceptionGroup):
yield from iter_exc(exc)
else:
yield exc
def _exception_handler(group: BaseExceptionGroup):
for leaf in iter_exc(group):
if isinstance(leaf, httpx.ConnectTimeout):
raise McpError(
error=mcp.types.ErrorData(
code=httpx.codes.REQUEST_TIMEOUT,
message="Timed out while waiting for response.",
)
)
raise leaf
# this catch handler is used to catch taskgroup exception groups and raise the
# first exception. This allows more sane debugging.
_catch_handlers: Mapping[
type[BaseException] | Iterable[type[BaseException]],
Callable[[BaseExceptionGroup[Any]], Any],
] = {
Exception: _exception_handler,
}
def get_catch_handlers() -> Mapping[
type[BaseException] | Iterable[type[BaseException]],
Callable[[BaseExceptionGroup[Any]], Any],
]:
if fastmcp.settings.client_raise_first_exceptiongroup_error:
return _catch_handlers
else:
return {}
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "src/fastmcp/utilities/exceptions.py",
"license": "Apache License 2.0",
"lines": 39,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
PrefectHQ/fastmcp:tests/client/test_streamable_http.py | import asyncio
import json
import sys
from contextlib import suppress
from unittest.mock import AsyncMock, call
import pytest
from mcp import McpError
from mcp.types import TextResourceContents
from fastmcp import Context
from fastmcp.client import Client
from fastmcp.client.transports import StreamableHttpTransport
from fastmcp.server.dependencies import get_http_request
from fastmcp.server.server import FastMCP
from fastmcp.utilities.tests import run_server_async
def create_test_server() -> FastMCP:
"""Create a FastMCP server with tools, resources, and prompts."""
server = FastMCP("TestServer")
@server.tool
def greet(name: str) -> str:
"""Greet someone by name."""
return f"Hello, {name}!"
@server.tool
async def elicit(ctx: Context) -> str:
"""Elicit a response from the user."""
result = await ctx.elicit("What is your name?", response_type=str)
if result.action == "accept":
return f"You said your name was: {result.data}!" # ty: ignore[unresolved-attribute]
else:
return "No name provided"
@server.tool
def add(a: int, b: int) -> int:
"""Add two numbers together."""
return a + b
@server.tool
async def sleep(seconds: float) -> str:
"""Sleep for a given number of seconds."""
await asyncio.sleep(seconds)
return f"Slept for {seconds} seconds"
@server.tool
async def greet_with_progress(name: str, ctx: Context) -> str:
"""Report progress for a greeting."""
await ctx.report_progress(0.5, 1.0, "Greeting in progress")
await ctx.report_progress(0.75, 1.0, "Almost there!")
return f"Hello, {name}!"
@server.resource(uri="data://users")
async def get_users() -> str:
import json
return json.dumps(["Alice", "Bob", "Charlie"])
@server.resource(uri="data://user/{user_id}")
async def get_user(user_id: str) -> str:
import json
return json.dumps({"id": user_id, "name": f"User {user_id}", "active": True})
@server.resource(uri="request://headers")
async def get_headers() -> str:
import json
request = get_http_request()
return json.dumps(dict(request.headers))
@server.prompt
def welcome(name: str) -> str:
"""Example greeting prompt."""
return f"Welcome to FastMCP, {name}!"
return server
@pytest.fixture
async def streamable_http_server(request):
"""Start a test server and return its URL."""
import fastmcp
stateless_http = getattr(request, "param", False)
if stateless_http:
fastmcp.settings.stateless_http = True
server = create_test_server()
async with run_server_async(server) as url:
yield url
if stateless_http:
fastmcp.settings.stateless_http = False
@pytest.fixture
async def streamable_http_server_with_streamable_http_alias():
"""Test that the "streamable-http" transport alias works."""
server = create_test_server()
async with run_server_async(server, transport="streamable-http") as url:
yield url
@pytest.fixture
async def nested_server():
"""Test nested server mounts with Starlette."""
import uvicorn
from starlette.applications import Starlette
from starlette.routing import Mount
from fastmcp.utilities.http import find_available_port
mcp_server = create_test_server()
mcp_app = mcp_server.http_app(path="/final/mcp")
# Nest the app under multiple mounts to test URL resolution
inner = Starlette(routes=[Mount("/nest-inner", app=mcp_app)])
outer = Starlette(
routes=[Mount("/nest-outer", app=inner)], lifespan=mcp_app.lifespan
)
# Run uvicorn with the nested ASGI app
port = find_available_port()
config = uvicorn.Config(
app=outer,
host="127.0.0.1",
port=port,
log_level="critical",
ws="websockets-sansio",
timeout_graceful_shutdown=0,
)
uvicorn_server = uvicorn.Server(config)
server_task = asyncio.create_task(uvicorn_server.serve())
await asyncio.sleep(0.1)
yield f"http://127.0.0.1:{port}/nest-outer/nest-inner/final/mcp"
# Graceful shutdown - required for uvicorn 0.39+ due to context isolation
uvicorn_server.should_exit = True
with suppress(asyncio.CancelledError, asyncio.TimeoutError):
await asyncio.wait_for(server_task, timeout=2.0)
async def test_ping(streamable_http_server: str):
"""Test pinging the server."""
async with Client(
transport=StreamableHttpTransport(streamable_http_server)
) as client:
result = await client.ping()
assert result is True
async def test_ping_with_streamable_http_alias(
streamable_http_server_with_streamable_http_alias: str,
):
"""Test pinging the server."""
async with Client(
transport=StreamableHttpTransport(
streamable_http_server_with_streamable_http_alias
)
) as client:
result = await client.ping()
assert result is True
async def test_http_headers(streamable_http_server: str):
"""Test getting HTTP headers from the server."""
async with Client(
transport=StreamableHttpTransport(
streamable_http_server, headers={"X-DEMO-HEADER": "ABC"}
)
) as client:
raw_result = await client.read_resource("request://headers")
assert isinstance(raw_result[0], TextResourceContents)
json_result = json.loads(raw_result[0].text)
assert "x-demo-header" in json_result
assert json_result["x-demo-header"] == "ABC"
async def test_session_id_callback(streamable_http_server: str):
"""Test getting mcp-session-id from the transport."""
transport = StreamableHttpTransport(streamable_http_server)
assert transport.get_session_id() is None
async with Client(transport=transport):
session_id = transport.get_session_id()
assert session_id is not None
@pytest.mark.parametrize("streamable_http_server", [True, False], indirect=True)
async def test_greet_with_progress_tool(streamable_http_server: str):
"""Test calling the greet tool."""
progress_handler = AsyncMock(return_value=None)
async with Client(
transport=StreamableHttpTransport(streamable_http_server),
progress_handler=progress_handler,
) as client:
result = await client.call_tool("greet_with_progress", {"name": "Alice"})
assert result.data == "Hello, Alice!"
progress_handler.assert_has_calls(
[
call(0.5, 1.0, "Greeting in progress"),
call(0.75, 1.0, "Almost there!"),
]
)
@pytest.mark.parametrize("streamable_http_server", [True, False], indirect=True)
async def test_elicitation_tool(streamable_http_server: str, request):
"""Test calling the elicitation tool in both stateless and stateful modes."""
async def elicitation_handler(message, response_type, params, ctx):
return {"value": "Alice"}
stateless_http = request.node.callspec.params.get("streamable_http_server", False)
if stateless_http:
pytest.xfail("Elicitation is not supported in stateless HTTP mode")
async with Client(
transport=StreamableHttpTransport(streamable_http_server),
elicitation_handler=elicitation_handler,
) as client:
result = await client.call_tool("elicit")
assert result.data == "You said your name was: Alice!"
@pytest.mark.parametrize("streamable_http_server", [True], indirect=True)
async def test_stateless_http_rejects_get_sse(streamable_http_server: str):
"""Stateless servers should reject GET SSE requests with 405."""
import httpx
async with httpx.AsyncClient() as http_client:
response = await http_client.get(streamable_http_server)
assert response.status_code == 405
@pytest.mark.parametrize("streamable_http_server", [True], indirect=True)
async def test_stateless_http_still_accepts_post(streamable_http_server: str):
"""Stateless servers should still handle POST requests normally."""
async with Client(
transport=StreamableHttpTransport(streamable_http_server)
) as client:
result = await client.call_tool("greet", {"name": "World"})
assert result.data == "Hello, World!"
async def test_nested_streamable_http_server_resolves_correctly(nested_server: str):
"""Test patch for https://github.com/modelcontextprotocol/python-sdk/pull/659"""
async with Client(transport=StreamableHttpTransport(nested_server)) as client:
result = await client.ping()
assert result is True
@pytest.mark.skipif(
sys.platform == "win32",
reason="Timeout tests are flaky on Windows. Timeouts *are* supported but the tests are unreliable.",
)
class TestTimeout:
async def test_timeout(self, streamable_http_server: str):
# note this transport behaves differently than others and raises
# McpError from the *client* context
with pytest.raises(McpError, match="Timed out"):
async with Client(
transport=StreamableHttpTransport(streamable_http_server),
timeout=0.02,
) as client:
await client.call_tool("sleep", {"seconds": 0.05})
async def test_timeout_tool_call(self, streamable_http_server: str):
async with Client(
transport=StreamableHttpTransport(streamable_http_server),
) as client:
with pytest.raises(McpError):
await client.call_tool("sleep", {"seconds": 0.2}, timeout=0.1)
async def test_timeout_tool_call_overrides_client_timeout(
self, streamable_http_server: str
):
async with Client(
transport=StreamableHttpTransport(streamable_http_server),
timeout=2,
) as client:
with pytest.raises(McpError):
await client.call_tool("sleep", {"seconds": 0.2}, timeout=0.1)
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/client/test_streamable_http.py",
"license": "Apache License 2.0",
"lines": 228,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:tests/client/test_sse.py | import asyncio
import json
import sys
import pytest
from mcp import McpError
from mcp.types import TextResourceContents
from fastmcp.client import Client
from fastmcp.client.transports import SSETransport
from fastmcp.server.dependencies import get_http_request
from fastmcp.server.http import create_sse_app
from fastmcp.server.server import FastMCP
from fastmcp.utilities.tests import run_server_async
def create_test_server() -> FastMCP:
"""Create a FastMCP server with tools, resources, and prompts."""
server = FastMCP("TestServer")
@server.tool
def greet(name: str) -> str:
"""Greet someone by name."""
return f"Hello, {name}!"
@server.tool
def add(a: int, b: int) -> int:
"""Add two numbers together."""
return a + b
@server.tool
async def sleep(seconds: float) -> str:
"""Sleep for a given number of seconds."""
await asyncio.sleep(seconds)
return f"Slept for {seconds} seconds"
@server.resource(uri="data://users")
async def get_users() -> str:
import json
return json.dumps(["Alice", "Bob", "Charlie"])
@server.resource(uri="data://user/{user_id}")
async def get_user(user_id: str) -> str:
import json
return json.dumps({"id": user_id, "name": f"User {user_id}", "active": True})
@server.resource(uri="request://headers")
async def get_headers() -> str:
import json
request = get_http_request()
return json.dumps(dict(request.headers))
@server.prompt
def welcome(name: str) -> str:
"""Example greeting prompt."""
return f"Welcome to FastMCP, {name}!"
return server
@pytest.fixture
async def sse_server():
"""Start a test server with SSE transport and return its URL."""
server = create_test_server()
async with run_server_async(server, transport="sse") as url:
yield url
async def test_ping(sse_server: str):
"""Test pinging the server."""
async with Client(transport=SSETransport(sse_server)) as client:
result = await client.ping()
assert result is True
async def test_http_headers(sse_server: str):
"""Test getting HTTP headers from the server."""
async with Client(
transport=SSETransport(sse_server, headers={"X-DEMO-HEADER": "ABC"})
) as client:
raw_result = await client.read_resource("request://headers")
assert isinstance(raw_result[0], TextResourceContents)
json_result = json.loads(raw_result[0].text)
assert "x-demo-header" in json_result
assert json_result["x-demo-header"] == "ABC"
@pytest.fixture
async def sse_server_custom_path():
"""Start a test server with SSE on a custom path."""
server = create_test_server()
async with run_server_async(server, transport="sse", path="/help") as url:
yield url
@pytest.fixture
async def nested_sse_server():
"""Test nested server mounts with SSE."""
import uvicorn
from starlette.applications import Starlette
from starlette.routing import Mount
from fastmcp.utilities.http import find_available_port
server = create_test_server()
sse_app = create_sse_app(
server=server, message_path="/mcp/messages", sse_path="/mcp/sse/"
)
# Nest the app under multiple mounts to test URL resolution
inner = Starlette(routes=[Mount("/nest-inner", app=sse_app)])
outer = Starlette(routes=[Mount("/nest-outer", app=inner)])
# Run uvicorn with the nested ASGI app
port = find_available_port()
config = uvicorn.Config(
app=outer,
host="127.0.0.1",
port=port,
log_level="critical",
ws="websockets-sansio",
)
uvicorn_server = uvicorn.Server(config)
server_task = asyncio.create_task(uvicorn_server.serve())
await asyncio.sleep(0.1)
try:
yield f"http://127.0.0.1:{port}/nest-outer/nest-inner/mcp/sse/"
finally:
# Graceful shutdown - required for uvicorn 0.39+ due to context isolation
uvicorn_server.should_exit = True
try:
await server_task
except asyncio.CancelledError:
pass
async def test_run_server_on_path(sse_server_custom_path: str):
"""Test running server on a custom path."""
async with Client(transport=SSETransport(sse_server_custom_path)) as client:
result = await client.ping()
assert result is True
async def test_nested_sse_server_resolves_correctly(nested_sse_server: str):
"""Test patch for https://github.com/modelcontextprotocol/python-sdk/pull/659"""
async with Client(transport=SSETransport(nested_sse_server)) as client:
result = await client.ping()
assert result is True
@pytest.mark.skipif(
sys.platform == "win32",
reason="Timeout tests are flaky on Windows. Timeouts *are* supported but the tests are unreliable.",
)
class TestTimeout:
async def test_timeout(self, sse_server: str):
with pytest.raises(
McpError,
match="Timed out while waiting for response to ClientRequest. Waited 0.03 seconds",
):
async with Client(
transport=SSETransport(sse_server),
timeout=0.03,
) as client:
await client.call_tool("sleep", {"seconds": 0.1})
async def test_timeout_tool_call(self, sse_server: str):
async with Client(transport=SSETransport(sse_server)) as client:
with pytest.raises(McpError, match="Timed out"):
await client.call_tool("sleep", {"seconds": 0.1}, timeout=0.03)
async def test_timeout_tool_call_overrides_client_timeout_if_lower(
self, sse_server: str
):
async with Client(
transport=SSETransport(sse_server),
timeout=2,
) as client:
with pytest.raises(McpError, match="Timed out"):
await client.call_tool("sleep", {"seconds": 0.1}, timeout=0.03)
async def test_timeout_client_timeout_does_not_override_tool_call_timeout_if_lower(
self, sse_server: str
):
"""
With SSE, the tool call timeout always takes precedence over the client.
Note: on Windows, the behavior appears unpredictable.
"""
async with Client(
transport=SSETransport(sse_server),
timeout=0.5,
) as client:
await client.call_tool("sleep", {"seconds": 0.8}, timeout=2)
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/client/test_sse.py",
"license": "Apache License 2.0",
"lines": 158,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:tests/server/test_context.py | from typing import Any, cast
from unittest.mock import MagicMock
import pytest
from mcp.types import ModelPreferences
from fastmcp.server.context import (
Context,
reset_transport,
set_transport,
)
from fastmcp.server.sampling.run import _parse_model_preferences
from fastmcp.server.server import FastMCP
@pytest.fixture
def context():
return Context(fastmcp=FastMCP())
class TestParseModelPreferences:
def test_parse_model_preferences_string(self, context):
mp = _parse_model_preferences("claude-haiku-4-5")
assert isinstance(mp, ModelPreferences)
assert mp.hints is not None
assert mp.hints[0].name == "claude-haiku-4-5"
def test_parse_model_preferences_list(self, context):
mp = _parse_model_preferences(["claude-haiku-4-5", "claude"])
assert isinstance(mp, ModelPreferences)
assert mp.hints is not None
assert [h.name for h in mp.hints] == ["claude-haiku-4-5", "claude"]
def test_parse_model_preferences_object(self, context):
obj = ModelPreferences(hints=[])
assert _parse_model_preferences(obj) is obj
def test_parse_model_preferences_invalid_type(self, context):
with pytest.raises(ValueError):
_parse_model_preferences(model_preferences=123) # pyright: ignore[reportArgumentType] # type: ignore[invalid-argument-type]
class TestSessionId:
def test_session_id_with_http_headers(self, context):
"""Test that session_id returns the value from mcp-session-id header."""
from mcp.server.lowlevel.server import request_ctx
from mcp.shared.context import RequestContext
mock_headers = {"mcp-session-id": "test-session-123"}
token = request_ctx.set(
RequestContext(
request_id=0,
meta=None,
session=MagicMock(wraps={}),
lifespan_context=MagicMock(),
request=MagicMock(headers=mock_headers),
)
)
try:
assert context.session_id == "test-session-123"
finally:
request_ctx.reset(token)
def test_session_id_without_http_headers(self, context):
"""Test that session_id returns a UUID when no HTTP headers are available.
For STDIO/SSE/in-memory transports, we generate a UUID and cache it
on the session for consistency with state operations.
"""
import uuid
from mcp.server.lowlevel.server import request_ctx
from mcp.shared.context import RequestContext
mock_session = MagicMock(wraps={})
token = request_ctx.set(
RequestContext(
request_id=0,
meta=None,
session=mock_session,
lifespan_context=MagicMock(),
)
)
try:
# session_id should be a valid UUID for non-HTTP transports
session_id = context.session_id
assert uuid.UUID(session_id) # Valid UUID format
# Should be cached on session
assert mock_session._fastmcp_state_prefix == session_id
finally:
request_ctx.reset(token)
class TestContextState:
"""Test suite for Context state functionality."""
async def test_context_state_basic(self):
"""Test basic get/set/delete state operations."""
server = FastMCP("test")
mock_session = MagicMock() # Use same session for consistent id()
async with Context(fastmcp=server, session=mock_session) as context:
# Initially empty
assert await context.get_state("test1") is None
assert await context.get_state("test2") is None
# Set values
await context.set_state("test1", "value")
await context.set_state("test2", 2)
# Retrieve values
assert await context.get_state("test1") == "value"
assert await context.get_state("test2") == 2
# Update value
await context.set_state("test1", "new_value")
assert await context.get_state("test1") == "new_value"
# Delete value
await context.delete_state("test1")
assert await context.get_state("test1") is None
async def test_context_state_session_isolation(self):
"""Test that different sessions have isolated state."""
server = FastMCP("test")
session_a = MagicMock()
session_b = MagicMock()
async with Context(fastmcp=server, session=session_a) as context1:
await context1.set_state("key", "value-from-A")
async with Context(fastmcp=server, session=session_b) as context2:
# Session B should not see session A's state
assert await context2.get_state("key") is None
await context2.set_state("key", "value-from-B")
assert await context2.get_state("key") == "value-from-B"
# Verify session A's state is still intact
async with Context(fastmcp=server, session=session_a) as context3:
assert await context3.get_state("key") == "value-from-A"
async def test_context_state_persists_across_requests(self):
"""Test that state persists across multiple context instances (requests)."""
server = FastMCP("test")
mock_session = MagicMock() # Same session = same id()
# First request sets state
async with Context(fastmcp=server, session=mock_session) as context1:
await context1.set_state("counter", 1)
# Second request in same session sees the state
async with Context(fastmcp=server, session=mock_session) as context2:
counter = await context2.get_state("counter")
assert counter == 1
await context2.set_state("counter", counter + 1)
# Third request sees updated state
async with Context(fastmcp=server, session=mock_session) as context3:
assert await context3.get_state("counter") == 2
async def test_context_state_nested_contexts_share_state(self):
"""Test that nested contexts within the same session share state."""
server = FastMCP("test")
mock_session = MagicMock()
async with Context(fastmcp=server, session=mock_session) as context1:
await context1.set_state("key", "outer-value")
async with Context(fastmcp=server, session=mock_session) as context2:
# Nested context sees same state (same session)
assert await context2.get_state("key") == "outer-value"
# Nested context can modify shared state
await context2.set_state("key", "inner-value")
# Outer context sees the modification
assert await context1.get_state("key") == "inner-value"
async def test_two_clients_same_key_isolated_by_session(self):
"""Test that two different clients can store the same key independently.
Each client gets an auto-generated session ID, and their state is isolated.
"""
import json
from fastmcp import Client
server = FastMCP("test")
stored_session_ids: list[str] = []
@server.tool
async def store_and_read(value: str, ctx: Context) -> dict:
"""Store a value and return all state info."""
stored_session_ids.append(ctx.session_id)
existing = await ctx.get_state("shared_key")
await ctx.set_state("shared_key", value)
new_value = await ctx.get_state("shared_key")
return {
"session_id": ctx.session_id,
"existing_value": existing,
"new_value": new_value,
}
# Client 1 stores "value-from-client-1"
async with Client(server) as client1:
result1 = await client1.call_tool(
"store_and_read", {"value": "value-from-client-1"}
)
data1 = json.loads(result1.content[0].text)
assert data1["existing_value"] is None # First write
assert data1["new_value"] == "value-from-client-1"
session_id_1 = data1["session_id"]
# Client 2 stores "value-from-client-2" with the SAME key
async with Client(server) as client2:
result2 = await client2.call_tool(
"store_and_read", {"value": "value-from-client-2"}
)
data2 = json.loads(result2.content[0].text)
# Client 2 should NOT see client 1's value (different session)
assert data2["existing_value"] is None
assert data2["new_value"] == "value-from-client-2"
session_id_2 = data2["session_id"]
# Verify session IDs were auto-generated and are different
assert session_id_1 is not None
assert session_id_2 is not None
assert session_id_1 != session_id_2
# Client 1 reconnects and should still see their value
async with Client(server) as client1_again:
# But this is a NEW session (new connection = new session ID)
result3 = await client1_again.call_tool(
"store_and_read", {"value": "value-from-client-1-again"}
)
data3 = json.loads(result3.content[0].text)
# New session, so existing value is None
assert data3["existing_value"] is None
assert data3["session_id"] != session_id_1 # Different session
class TestContextStateSerializable:
"""Tests for the serializable parameter on set_state."""
async def test_set_state_serializable_false_stores_arbitrary_objects(self):
"""Non-serializable objects can be stored with serializable=False."""
server = FastMCP("test")
mock_session = MagicMock()
class MyClient:
def __init__(self):
self.connected = True
client = MyClient()
async with Context(fastmcp=server, session=mock_session) as context:
await context.set_state("client", client, serializable=False)
result = await context.get_state("client")
assert result is client
assert result.connected is True
async def test_set_state_serializable_false_does_not_persist_across_requests(self):
"""Non-serializable state is request-scoped and gone in a new context."""
server = FastMCP("test")
mock_session = MagicMock()
async with Context(fastmcp=server, session=mock_session) as context:
await context.set_state("key", object(), serializable=False)
assert await context.get_state("key") is not None
async with Context(fastmcp=server, session=mock_session) as context:
assert await context.get_state("key") is None
async def test_set_state_serializable_true_rejects_non_serializable(self):
"""Default set_state raises TypeError for non-serializable values."""
server = FastMCP("test")
mock_session = MagicMock()
async with Context(fastmcp=server, session=mock_session) as context:
with pytest.raises(TypeError, match="serializable=False"):
await context.set_state("key", object())
async def test_set_state_serializable_false_shadows_session_state(self):
"""Request-scoped state shadows session-scoped state for the same key."""
server = FastMCP("test")
mock_session = MagicMock()
async with Context(fastmcp=server, session=mock_session) as context:
await context.set_state("key", "session-value")
assert await context.get_state("key") == "session-value"
await context.set_state("key", "request-value", serializable=False)
assert await context.get_state("key") == "request-value"
async def test_delete_state_removes_from_both_stores(self):
"""delete_state clears both request-scoped and session-scoped values."""
server = FastMCP("test")
mock_session = MagicMock()
async with Context(fastmcp=server, session=mock_session) as context:
await context.set_state("key", "session-value")
await context.set_state("key", "request-value", serializable=False)
assert await context.get_state("key") == "request-value"
await context.delete_state("key")
assert await context.get_state("key") is None
async def test_serializable_state_still_persists_across_requests(self):
"""Serializable state (default) still persists across requests."""
server = FastMCP("test")
mock_session = MagicMock()
async with Context(fastmcp=server, session=mock_session) as context:
await context.set_state("key", "persistent")
async with Context(fastmcp=server, session=mock_session) as context:
assert await context.get_state("key") == "persistent"
async def test_serializable_write_clears_request_scoped_shadow(self):
"""Writing serializable state clears any request-scoped shadow for the same key."""
server = FastMCP("test")
mock_session = MagicMock()
async with Context(fastmcp=server, session=mock_session) as context:
await context.set_state("key", "request-value", serializable=False)
assert await context.get_state("key") == "request-value"
# Serializable write should clear the shadow
await context.set_state("key", "session-value")
assert await context.get_state("key") == "session-value"
class TestContextMeta:
"""Test suite for Context meta functionality."""
def test_request_context_meta_access(self, context):
"""Test that meta can be accessed from request context."""
from mcp.server.lowlevel.server import request_ctx
from mcp.shared.context import RequestContext
# Create a mock meta object with attributes
class MockMeta:
def __init__(self):
self.user_id = "user-123"
self.trace_id = "trace-456"
self.custom_field = "custom-value"
mock_meta = MockMeta()
token = request_ctx.set(
RequestContext(
request_id=0,
meta=cast(Any, mock_meta), # Mock object for testing
session=MagicMock(wraps={}),
lifespan_context=MagicMock(),
)
)
# Access meta through context
retrieved_meta = context.request_context.meta
assert retrieved_meta is not None
assert retrieved_meta.user_id == "user-123"
assert retrieved_meta.trace_id == "trace-456"
assert retrieved_meta.custom_field == "custom-value"
request_ctx.reset(token)
def test_request_context_meta_none(self, context):
"""Test that context handles None meta gracefully."""
from mcp.server.lowlevel.server import request_ctx
from mcp.shared.context import RequestContext
token = request_ctx.set(
RequestContext(
request_id=0,
meta=None,
session=MagicMock(wraps={}),
lifespan_context=MagicMock(),
)
)
# Access meta through context
retrieved_meta = context.request_context.meta
assert retrieved_meta is None
request_ctx.reset(token)
class TestTransport:
"""Test suite for Context transport property."""
def test_transport_returns_none_outside_server_context(self, context):
"""Test that transport returns None when not in a server context."""
assert context.transport is None
def test_transport_returns_stdio(self, context):
"""Test that transport returns 'stdio' when set."""
token = set_transport("stdio")
try:
assert context.transport == "stdio"
finally:
reset_transport(token)
def test_transport_returns_sse(self, context):
"""Test that transport returns 'sse' when set."""
token = set_transport("sse")
try:
assert context.transport == "sse"
finally:
reset_transport(token)
def test_transport_returns_streamable_http(self, context):
"""Test that transport returns 'streamable-http' when set."""
token = set_transport("streamable-http")
try:
assert context.transport == "streamable-http"
finally:
reset_transport(token)
def test_transport_reset(self, context):
"""Test that transport resets correctly."""
assert context.transport is None
token = set_transport("stdio")
assert context.transport == "stdio"
reset_transport(token)
assert context.transport is None
class TestTransportIntegration:
"""Integration tests for transport property with actual server/client."""
async def test_transport_in_tool_via_client(self):
"""Test that transport is accessible from within a tool via Client."""
from fastmcp import Client
mcp = FastMCP("test")
observed_transport = None
@mcp.tool
def get_transport(ctx: Context) -> str:
nonlocal observed_transport
observed_transport = ctx.transport
return observed_transport or "none"
# Client uses in-memory transport which doesn't set transport type
# so we expect None here (the transport is only set by run_* methods)
async with Client(mcp) as client:
result = await client.call_tool("get_transport", {})
assert observed_transport is None
assert result.data == "none"
async def test_transport_set_manually_is_visible_in_tool(self):
"""Test that manually set transport is visible from within a tool."""
from fastmcp import Client
mcp = FastMCP("test")
observed_transport = None
@mcp.tool
def get_transport(ctx: Context) -> str:
nonlocal observed_transport
observed_transport = ctx.transport
return observed_transport or "none"
# Manually set transport before running
token = set_transport("stdio")
try:
async with Client(mcp) as client:
result = await client.call_tool("get_transport", {})
assert observed_transport == "stdio"
assert result.data == "stdio"
finally:
reset_transport(token)
async def test_transport_set_via_http_middleware(self):
"""Test that transport is set per-request via HTTP middleware."""
from fastmcp import Client
from fastmcp.client.transports import StreamableHttpTransport
from fastmcp.utilities.tests import run_server_async
mcp = FastMCP("test")
observed_transport = None
@mcp.tool
def get_transport(ctx: Context) -> str:
nonlocal observed_transport
observed_transport = ctx.transport
return observed_transport or "none"
async with run_server_async(mcp, transport="streamable-http") as url:
transport = StreamableHttpTransport(url=url)
async with Client(transport=transport) as client:
result = await client.call_tool("get_transport", {})
assert observed_transport == "streamable-http"
assert result.data == "streamable-http"
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/server/test_context.py",
"license": "Apache License 2.0",
"lines": 393,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:src/fastmcp/server/dependencies.py | """Dependency injection for FastMCP.
DI features (Depends, CurrentContext, CurrentFastMCP) work without pydocket
using the uncalled-for DI engine. Only task-related dependencies (CurrentDocket,
CurrentWorker) and background task execution require fastmcp[tasks].
"""
from __future__ import annotations
import contextlib
import inspect
import logging
import weakref
from collections.abc import AsyncGenerator, Callable
from contextlib import AsyncExitStack, asynccontextmanager
from contextvars import ContextVar, Token
from dataclasses import dataclass
from datetime import datetime, timezone
from functools import lru_cache
from types import TracebackType
from typing import TYPE_CHECKING, Any, Protocol, cast, get_type_hints, runtime_checkable
from mcp.server.auth.middleware.auth_context import (
get_access_token as _sdk_get_access_token,
)
from mcp.server.auth.middleware.bearer_auth import AuthenticatedUser
from mcp.server.auth.provider import (
AccessToken as _SDKAccessToken,
)
from mcp.server.lowlevel.server import request_ctx
from starlette.requests import Request
from uncalled_for import Dependency, get_dependency_parameters
from uncalled_for.resolution import _Depends
from fastmcp.exceptions import FastMCPError
from fastmcp.server.auth import AccessToken
from fastmcp.server.http import _current_http_request
from fastmcp.utilities.async_utils import (
call_sync_fn_in_threadpool,
is_coroutine_function,
)
from fastmcp.utilities.types import find_kwarg_by_type, is_class_member_of_type
_logger = logging.getLogger(__name__)
if TYPE_CHECKING:
from docket import Docket
from docket.worker import Worker
from mcp.server.session import ServerSession
from fastmcp.server.context import Context
from fastmcp.server.server import FastMCP
__all__ = [
"AccessToken",
"CurrentAccessToken",
"CurrentContext",
"CurrentDocket",
"CurrentFastMCP",
"CurrentHeaders",
"CurrentRequest",
"CurrentWorker",
"Progress",
"TaskContextInfo",
"TokenClaim",
"get_access_token",
"get_context",
"get_http_headers",
"get_http_request",
"get_server",
"get_task_context",
"get_task_session",
"is_docket_available",
"register_task_session",
"require_docket",
"resolve_dependencies",
"transform_context_annotations",
"without_injected_parameters",
]
# --- TaskContextInfo and get_task_context ---
@dataclass(frozen=True, slots=True)
class TaskContextInfo:
"""Information about the current background task context.
Returned by ``get_task_context()`` when running inside a Docket worker.
Contains identifiers needed to communicate with the MCP session.
"""
task_id: str
"""The MCP task ID (server-generated UUID)."""
session_id: str
"""The session ID that submitted this task."""
def get_task_context() -> TaskContextInfo | None:
"""Get the current task context if running inside a background task worker.
This function extracts task information from the Docket execution context.
Returns None if not running in a task context (e.g., foreground execution).
Returns:
TaskContextInfo with task_id and session_id, or None if not in a task.
"""
if not is_docket_available():
return None
from docket.dependencies import current_execution
try:
execution = current_execution.get()
# Parse the task key: {session_id}:{task_id}:{task_type}:{component}
from fastmcp.server.tasks.keys import parse_task_key
key_parts = parse_task_key(execution.key)
return TaskContextInfo(
task_id=key_parts["client_task_id"],
session_id=key_parts["session_id"],
)
except LookupError:
# Not in worker context
return None
except (ValueError, KeyError):
# Invalid task key format
return None
# --- Session registry for background task Context ---
_task_sessions: dict[str, weakref.ref[ServerSession]] = {}
def register_task_session(session_id: str, session: ServerSession) -> None:
"""Register a session for Context access in background tasks.
Called automatically when a task is submitted to Docket. The session is
stored as a weakref so it doesn't prevent garbage collection when the
client disconnects.
Args:
session_id: The session identifier
session: The ServerSession instance
"""
_task_sessions[session_id] = weakref.ref(session)
def get_task_session(session_id: str) -> ServerSession | None:
"""Get a registered session by ID if still alive.
Args:
session_id: The session identifier
Returns:
The ServerSession if found and alive, None otherwise
"""
ref = _task_sessions.get(session_id)
if ref is None:
return None
session = ref()
if session is None:
# Session was garbage collected, clean up entry
_task_sessions.pop(session_id, None)
return session
# --- ContextVars ---
_current_server: ContextVar[weakref.ref[FastMCP] | None] = ContextVar(
"server", default=None
)
_current_docket: ContextVar[Docket | None] = ContextVar("docket", default=None)
_current_worker: ContextVar[Worker | None] = ContextVar("worker", default=None)
_task_access_token: ContextVar[AccessToken | None] = ContextVar(
"task_access_token", default=None
)
# --- Docket availability check ---
_DOCKET_AVAILABLE: bool | None = None
def is_docket_available() -> bool:
"""Check if pydocket is installed."""
global _DOCKET_AVAILABLE
if _DOCKET_AVAILABLE is None:
try:
import docket # noqa: F401
_DOCKET_AVAILABLE = True
except ImportError:
_DOCKET_AVAILABLE = False
return _DOCKET_AVAILABLE
def require_docket(feature: str) -> None:
"""Raise ImportError with install instructions if docket not available.
Args:
feature: Description of what requires docket (e.g., "`task=True`",
"CurrentDocket()"). Will be included in the error message.
"""
if not is_docket_available():
raise ImportError(
f"FastMCP background tasks require the `tasks` extra. "
f"Install with: pip install 'fastmcp[tasks]'. "
f"(Triggered by {feature})"
)
# Import Progress separately β it's docket-specific, not part of uncalled-for
try:
from docket.dependencies import Progress as DocketProgress
except ImportError:
DocketProgress = None # type: ignore[assignment]
# --- Context utilities ---
def transform_context_annotations(fn: Callable[..., Any]) -> Callable[..., Any]:
"""Transform ctx: Context into ctx: Context = CurrentContext().
Transforms ALL params typed as Context to use Docket's DI system,
unless they already have a Dependency-based default (like CurrentContext()).
This unifies the legacy type annotation DI with Docket's Depends() system,
allowing both patterns to work through a single resolution path.
Note: Only POSITIONAL_OR_KEYWORD parameters are reordered (params with defaults
after those without). KEYWORD_ONLY parameters keep their position since Python
allows them to have defaults in any order.
Args:
fn: Function to transform
Returns:
Function with modified signature (same function object, updated __signature__)
"""
from fastmcp.server.context import Context
# Get the function's signature
try:
sig = inspect.signature(fn)
except (ValueError, TypeError):
return fn
# Get type hints for accurate type checking
try:
type_hints = get_type_hints(fn, include_extras=True)
except Exception:
type_hints = getattr(fn, "__annotations__", {})
# First pass: identify which params need transformation
params_to_transform: set[str] = set()
optional_context_params: set[str] = set()
for name, param in sig.parameters.items():
annotation = type_hints.get(name, param.annotation)
if is_class_member_of_type(annotation, Context):
if not isinstance(param.default, Dependency):
params_to_transform.add(name)
if param.default is None:
optional_context_params.add(name)
if not params_to_transform:
return fn
# Second pass: build new param list preserving parameter kind structure
# Python signature structure: [POSITIONAL_ONLY] / [POSITIONAL_OR_KEYWORD] *args [KEYWORD_ONLY] **kwargs
# Within POSITIONAL_ONLY and POSITIONAL_OR_KEYWORD: params without defaults must come first
# KEYWORD_ONLY params can have defaults in any order
P = inspect.Parameter
# Group params by section, preserving order within each
positional_only_no_default: list[P] = []
positional_only_with_default: list[P] = []
positional_or_keyword_no_default: list[P] = []
positional_or_keyword_with_default: list[P] = []
var_positional: list[P] = [] # *args (at most one)
keyword_only: list[P] = [] # After * or *args, order preserved
var_keyword: list[P] = [] # **kwargs (at most one)
for name, param in sig.parameters.items():
# Transform Context params by adding CurrentContext default
if name in params_to_transform:
# We use CurrentContext() instead of Depends(get_context) because
# get_context() returns the Context which is an AsyncContextManager,
# and the DI system would try to enter it again (it's already entered)
if name in optional_context_params:
param = param.replace(default=OptionalCurrentContext())
else:
param = param.replace(default=CurrentContext())
# Sort into buckets based on parameter kind
if param.kind == P.POSITIONAL_ONLY:
if param.default is P.empty:
positional_only_no_default.append(param)
else:
positional_only_with_default.append(param)
elif param.kind == P.POSITIONAL_OR_KEYWORD:
if param.default is P.empty:
positional_or_keyword_no_default.append(param)
else:
positional_or_keyword_with_default.append(param)
elif param.kind == P.VAR_POSITIONAL:
var_positional.append(param)
elif param.kind == P.KEYWORD_ONLY:
keyword_only.append(param)
elif param.kind == P.VAR_KEYWORD:
var_keyword.append(param)
# Reconstruct parameter list maintaining Python's required structure
new_params: list[P] = (
positional_only_no_default
+ positional_only_with_default
+ positional_or_keyword_no_default
+ positional_or_keyword_with_default
+ var_positional
+ keyword_only
+ var_keyword
)
# Update function's signature in place
# Handle methods by setting signature on the underlying function
# For bound methods, we need to preserve the 'self' parameter because
# inspect.signature(bound_method) automatically removes the first param
if inspect.ismethod(fn):
# Get the original __func__ signature which includes 'self'
func_sig = inspect.signature(fn.__func__)
# Insert 'self' at the beginning of our new params
self_param = next(iter(func_sig.parameters.values())) # Should be 'self'
new_sig = func_sig.replace(parameters=[self_param, *new_params])
fn.__func__.__signature__ = new_sig # type: ignore[union-attr]
else:
new_sig = sig.replace(parameters=new_params)
fn.__signature__ = new_sig # type: ignore[attr-defined]
# Clear caches that may have cached the old signature
# This ensures get_dependency_parameters and without_injected_parameters
# see the transformed signature
_clear_signature_caches(fn)
return fn
def _clear_signature_caches(fn: Callable[..., Any]) -> None:
"""Clear signature-related caches for a function.
Called after modifying a function's signature to ensure downstream
code sees the updated signature.
"""
from uncalled_for.introspection import _parameter_cache, _signature_cache
_signature_cache.pop(fn, None)
_parameter_cache.pop(fn, None)
if inspect.ismethod(fn):
_signature_cache.pop(fn.__func__, None)
_parameter_cache.pop(fn.__func__, None)
def get_context() -> Context:
"""Get the current FastMCP Context instance directly."""
from fastmcp.server.context import _current_context
context = _current_context.get()
if context is None:
raise RuntimeError("No active context found.")
return context
def get_server() -> FastMCP:
"""Get the current FastMCP server instance directly.
Returns:
The active FastMCP server
Raises:
RuntimeError: If no server in context
"""
server_ref = _current_server.get()
if server_ref is None:
raise RuntimeError("No FastMCP server instance in context")
server = server_ref()
if server is None:
raise RuntimeError("FastMCP server instance is no longer available")
return server
def get_http_request() -> Request:
"""Get the current HTTP request.
Tries MCP SDK's request_ctx first, then falls back to FastMCP's HTTP context.
"""
# Try MCP SDK's request_ctx first (set during normal MCP request handling)
request = None
with contextlib.suppress(LookupError):
request = request_ctx.get().request
# Fallback to FastMCP's HTTP context variable
# This is needed during `on_initialize` middleware where request_ctx isn't set yet
if request is None:
request = _current_http_request.get()
if request is None:
raise RuntimeError("No active HTTP request found.")
return request
def get_http_headers(
include_all: bool = False,
include: set[str] | None = None,
) -> dict[str, str]:
"""Extract headers from the current HTTP request if available.
Never raises an exception, even if there is no active HTTP request (in which case
an empty dict is returned).
By default, strips problematic headers like `content-length` and `authorization`
that cause issues if forwarded to downstream services. If `include_all` is True,
all headers are returned.
The `include` parameter allows specific headers to be included even if they would
normally be excluded. This is useful for proxy transports that need to forward
authorization headers to upstream MCP servers.
"""
if include_all:
exclude_headers: set[str] = set()
else:
exclude_headers = {
"host",
"content-length",
"content-type",
"connection",
"transfer-encoding",
"upgrade",
"te",
"keep-alive",
"expect",
"accept",
"authorization",
# Proxy-related headers
"proxy-authenticate",
"proxy-authorization",
"proxy-connection",
# MCP-related headers
"mcp-session-id",
}
if include:
exclude_headers -= {h.lower() for h in include}
# (just in case)
if not all(h.lower() == h for h in exclude_headers):
raise ValueError("Excluded headers must be lowercase")
headers: dict[str, str] = {}
try:
request = get_http_request()
for name, value in request.headers.items():
lower_name = name.lower()
if lower_name not in exclude_headers:
headers[lower_name] = str(value)
return headers
except RuntimeError:
return {}
def get_access_token() -> AccessToken | None:
"""Get the FastMCP access token from the current context.
This function first tries to get the token from the current HTTP request's scope,
which is more reliable for long-lived connections where the SDK's auth_context_var
may become stale after token refresh. Falls back to the SDK's context var if no
request is available. In background tasks (Docket workers), falls back to the
token snapshot stored in Redis at task submission time.
Returns:
The access token if an authenticated user is available, None otherwise.
"""
access_token: _SDKAccessToken | None = None
# First, try to get from current HTTP request's scope (issue #1863)
# This is more reliable than auth_context_var for Streamable HTTP sessions
# where tokens may be refreshed between MCP messages
try:
request = get_http_request()
user = request.scope.get("user")
if isinstance(user, AuthenticatedUser):
access_token = user.access_token
except RuntimeError:
# No HTTP request available, fall back to context var
pass
# Fall back to SDK's context var if we didn't get a token from the request
if access_token is None:
access_token = _sdk_get_access_token()
# Fall back to background task snapshot (#3095)
# In Docket workers, neither HTTP request nor SDK context var are available.
# The token was snapshotted in Redis at submit_to_docket() time and restored
# into this ContextVar by _CurrentContext.__aenter__().
if access_token is None:
task_token = _task_access_token.get()
if task_token is not None:
# Check expiration: if expires_at is set and past, treat as expired
if task_token.expires_at is not None:
if task_token.expires_at < int(datetime.now(timezone.utc).timestamp()):
return None
return task_token
if access_token is None or isinstance(access_token, AccessToken):
return access_token
# If the object is not a FastMCP AccessToken, convert it to one if the
# fields are compatible (e.g. `claims` is not present in the SDK's AccessToken).
# This is a workaround for the case where the SDK or auth provider returns a different type
# If it fails, it will raise a TypeError
try:
access_token_as_dict = access_token.model_dump()
return AccessToken(
token=access_token_as_dict["token"],
client_id=access_token_as_dict["client_id"],
scopes=access_token_as_dict["scopes"],
# Optional fields
expires_at=access_token_as_dict.get("expires_at"),
resource=access_token_as_dict.get("resource"),
claims=access_token_as_dict.get("claims") or {},
)
except Exception as e:
raise TypeError(
f"Expected fastmcp.server.auth.auth.AccessToken, got {type(access_token).__name__}. "
"Ensure the SDK is using the correct AccessToken type."
) from e
# --- Schema generation helper ---
@lru_cache(maxsize=5000)
def without_injected_parameters(fn: Callable[..., Any]) -> Callable[..., Any]:
"""Create a wrapper function without injected parameters.
Returns a wrapper that excludes Context and Docket dependency parameters,
making it safe to use with Pydantic TypeAdapter for schema generation and
validation. The wrapper internally handles all dependency resolution and
Context injection when called.
Handles:
- Legacy Context injection (always works)
- Depends() injection (always works - uses docket or vendored DI engine)
Args:
fn: Original function with Context and/or dependencies
Returns:
Async wrapper function without injected parameters
"""
from fastmcp.server.context import Context
# Identify parameters to exclude
context_kwarg = find_kwarg_by_type(fn, Context)
dependency_params = get_dependency_parameters(fn)
exclude = set()
if context_kwarg:
exclude.add(context_kwarg)
if dependency_params:
exclude.update(dependency_params.keys())
if not exclude:
return fn
# Build new signature with only user parameters
sig = inspect.signature(fn)
user_params = [
param for name, param in sig.parameters.items() if name not in exclude
]
new_sig = inspect.Signature(user_params)
# Create async wrapper that handles dependency resolution
fn_is_async = is_coroutine_function(fn)
async def wrapper(**user_kwargs: Any) -> Any:
async with resolve_dependencies(fn, user_kwargs) as resolved_kwargs:
if fn_is_async:
return await fn(**resolved_kwargs)
else:
# Run sync functions in threadpool to avoid blocking the event loop
result = await call_sync_fn_in_threadpool(fn, **resolved_kwargs)
# Handle sync wrappers that return awaitables (e.g., partial(async_fn))
if inspect.isawaitable(result):
result = await result
return result
# Resolve string annotations (from `from __future__ import annotations`) using
# the original function's module context. The wrapper's __globals__ points to
# this module (dependencies.py) and is read-only, so some Pydantic versions
# can't resolve names like Annotated or Literal from string annotations.
try:
resolved_hints = get_type_hints(fn, include_extras=True)
except Exception:
resolved_hints = getattr(fn, "__annotations__", {})
wrapper.__signature__ = new_sig # type: ignore[attr-defined]
wrapper.__annotations__ = {
k: v for k, v in resolved_hints.items() if k not in exclude and k != "return"
}
wrapper.__name__ = getattr(fn, "__name__", "wrapper")
wrapper.__doc__ = getattr(fn, "__doc__", None)
wrapper.__module__ = fn.__module__
wrapper.__qualname__ = getattr(fn, "__qualname__", wrapper.__qualname__)
return wrapper
# --- Dependency resolution ---
@asynccontextmanager
async def _resolve_fastmcp_dependencies(
fn: Callable[..., Any], arguments: dict[str, Any]
) -> AsyncGenerator[dict[str, Any], None]:
"""Resolve Docket dependencies for a FastMCP function.
Sets up the minimal context needed for Docket's Depends() to work:
- A cache for resolved dependencies
- An AsyncExitStack for managing context manager lifetimes
The Docket instance (for CurrentDocket dependency) is managed separately
by the server's lifespan and made available via ContextVar.
Note: This does NOT set up Docket's Execution context. If user code needs
Docket-specific dependencies like TaskArgument(), TaskKey(), etc., those
will fail with clear errors about missing context.
Args:
fn: The function to resolve dependencies for
arguments: The arguments passed to the function
Yields:
Dictionary of resolved dependencies merged with provided arguments
"""
dependency_params = get_dependency_parameters(fn)
if not dependency_params:
yield arguments
return
# Initialize dependency cache and exit stack
cache_token = _Depends.cache.set({})
try:
async with AsyncExitStack() as stack:
stack_token = _Depends.stack.set(stack)
try:
resolved: dict[str, Any] = {}
for parameter, dependency in dependency_params.items():
# If argument was explicitly provided, use that instead
if parameter in arguments:
resolved[parameter] = arguments[parameter]
continue
# Resolve the dependency
try:
resolved[parameter] = await stack.enter_async_context(
dependency
)
except FastMCPError:
# Let FastMCPError subclasses (ToolError, ResourceError, etc.)
# propagate unchanged so they can be handled appropriately
raise
except Exception as error:
fn_name = getattr(fn, "__name__", repr(fn))
raise RuntimeError(
f"Failed to resolve dependency '{parameter}' for {fn_name}"
) from error
# Merge resolved dependencies with provided arguments
final_arguments = {**arguments, **resolved}
yield final_arguments
finally:
_Depends.stack.reset(stack_token)
finally:
_Depends.cache.reset(cache_token)
@asynccontextmanager
async def resolve_dependencies(
fn: Callable[..., Any], arguments: dict[str, Any]
) -> AsyncGenerator[dict[str, Any], None]:
"""Resolve dependencies for a FastMCP function.
This function:
1. Filters out any dependency parameter names from user arguments (security)
2. Resolves Depends() parameters via the DI system
The filtering prevents external callers from overriding injected parameters by
providing values for dependency parameter names. This is a security feature.
Note: Context injection is handled via transform_context_annotations() which
converts `ctx: Context` to `ctx: Context = Depends(get_context)` at registration
time, so all injection goes through the unified DI system.
Args:
fn: The function to resolve dependencies for
arguments: User arguments (may contain keys that match dependency names,
which will be filtered out)
Yields:
Dictionary of filtered user args + resolved dependencies
Example:
```python
async with resolve_dependencies(my_tool, {"name": "Alice"}) as kwargs:
result = my_tool(**kwargs)
if inspect.isawaitable(result):
result = await result
```
"""
# Filter out dependency parameters from user arguments to prevent override
# This is a security measure - external callers should never be able to
# provide values for injected parameters
dependency_params = get_dependency_parameters(fn)
user_args = {k: v for k, v in arguments.items() if k not in dependency_params}
async with _resolve_fastmcp_dependencies(fn, user_args) as resolved_kwargs:
yield resolved_kwargs
# --- Dependency classes ---
# These must inherit from docket.dependencies.Dependency when docket is available
# so that get_dependency_parameters can detect them.
async def _restore_task_access_token(
session_id: str, task_id: str
) -> Token[AccessToken | None] | None:
"""Restore the access token snapshot from Redis into a ContextVar.
Called when setting up context in a Docket worker. The token was stored at
submit_to_docket() time. The token is restored regardless of expiration;
get_access_token() checks expiry when reading from the ContextVar.
Returns:
The ContextVar token for resetting, or None if nothing was restored.
"""
docket = _current_docket.get()
if docket is None:
return None
token_key = docket.key(f"fastmcp:task:{session_id}:{task_id}:access_token")
try:
async with docket.redis() as redis:
token_data = await redis.get(token_key)
if token_data is not None:
restored = AccessToken.model_validate_json(token_data)
return _task_access_token.set(restored)
except Exception:
_logger.warning(
"Failed to restore access token for task %s:%s",
session_id,
task_id,
exc_info=True,
)
return None
async def _restore_task_origin_request_id(session_id: str, task_id: str) -> str | None:
"""Restore the origin request ID snapshot for a background task.
Returns None if no request ID was captured at submission time.
"""
docket = _current_docket.get()
if docket is None:
return None
request_id_key = docket.key(
f"fastmcp:task:{session_id}:{task_id}:origin_request_id"
)
try:
async with docket.redis() as redis:
request_id_data = await redis.get(request_id_key)
if request_id_data is None:
return None
if isinstance(request_id_data, bytes):
return request_id_data.decode()
return str(request_id_data)
except Exception:
_logger.warning(
"Failed to restore origin request ID for task %s:%s",
session_id,
task_id,
exc_info=True,
)
return None
class _CurrentContext(Dependency["Context"]):
"""Async context manager for Context dependency.
In foreground (request) mode: returns the active context from _current_context.
In background (Docket worker) mode: creates a task-aware Context with task_id
and restores the access token snapshot from Redis.
"""
_context: Context | None = None
_access_token_cv_token: Token[AccessToken | None] | None = None
async def __aenter__(self) -> Context:
from fastmcp.server.context import Context, _current_context
# Try foreground context first (normal MCP request)
context = _current_context.get()
if context is not None:
return context
# Check if we're in a Docket worker context
task_info = get_task_context()
if task_info is not None:
# Get session from registry (registered when task was submitted)
session = get_task_session(task_info.session_id)
# Get server from ContextVar
server = get_server()
origin_request_id = await _restore_task_origin_request_id(
task_info.session_id, task_info.task_id
)
# Create task-aware Context
self._context = Context(
fastmcp=server,
session=session,
task_id=task_info.task_id,
origin_request_id=origin_request_id,
)
# Enter the context to set up ContextVars
await self._context.__aenter__()
# Restore access token snapshot from Redis (#3095)
self._access_token_cv_token = await _restore_task_access_token(
task_info.session_id, task_info.task_id
)
return self._context
# Neither foreground nor background context available
raise RuntimeError(
"No active context found. This can happen if:\n"
" - Called outside an MCP request handler\n"
" - Called in a background task before session was registered\n"
"Check `context.request_context` for None before accessing."
)
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc_value: BaseException | None,
traceback: TracebackType | None,
) -> None:
# Clean up access token ContextVar
if self._access_token_cv_token is not None:
_task_access_token.reset(self._access_token_cv_token)
self._access_token_cv_token = None
# Clean up if we created a context for background task
if self._context is not None:
await self._context.__aexit__(exc_type, exc_value, traceback)
self._context = None
class _OptionalCurrentContext(Dependency["Context | None"]):
"""Context dependency that degrades to None when no context is active.
This is implemented as a wrapper (composition), not a subclass of
`_CurrentContext`, to avoid overriding `__aenter__` with an incompatible
return type.
"""
_inner: _CurrentContext | None = None
async def __aenter__(self) -> Context | None:
inner = _CurrentContext()
try:
context = await inner.__aenter__()
except RuntimeError as exc:
if "No active context found" in str(exc):
return None
raise
self._inner = inner
return context
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc_value: BaseException | None,
traceback: TracebackType | None,
) -> None:
if self._inner is None:
return
await self._inner.__aexit__(exc_type, exc_value, traceback)
self._inner = None
def CurrentContext() -> Context:
"""Get the current FastMCP Context instance.
This dependency provides access to the active FastMCP Context for the
current MCP operation (tool/resource/prompt call).
Returns:
A dependency that resolves to the active Context instance
Raises:
RuntimeError: If no active context found (during resolution)
Example:
```python
from fastmcp.dependencies import CurrentContext
@mcp.tool()
async def log_progress(ctx: Context = CurrentContext()) -> str:
ctx.report_progress(50, 100, "Halfway done")
return "Working"
```
"""
return cast("Context", _CurrentContext())
def OptionalCurrentContext() -> Context | None:
"""Get the current FastMCP Context, or None when no context is active."""
return cast("Context | None", _OptionalCurrentContext())
class _CurrentDocket(Dependency["Docket"]):
"""Async context manager for Docket dependency."""
async def __aenter__(self) -> Docket:
require_docket("CurrentDocket()")
docket = _current_docket.get()
if docket is None:
raise RuntimeError(
"No Docket instance found. Docket is only initialized when there are "
"task-enabled components (task=True). Add task=True to a component "
"to enable Docket infrastructure."
)
return docket
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc_value: BaseException | None,
traceback: TracebackType | None,
) -> None:
pass
def CurrentDocket() -> Docket:
"""Get the current Docket instance managed by FastMCP.
This dependency provides access to the Docket instance that FastMCP
automatically creates for background task scheduling.
Returns:
A dependency that resolves to the active Docket instance
Raises:
RuntimeError: If not within a FastMCP server context
ImportError: If fastmcp[tasks] not installed
Example:
```python
from fastmcp.dependencies import CurrentDocket
@mcp.tool()
async def schedule_task(docket: Docket = CurrentDocket()) -> str:
await docket.add(some_function)(arg1, arg2)
return "Scheduled"
```
"""
require_docket("CurrentDocket()")
return cast("Docket", _CurrentDocket())
class _CurrentWorker(Dependency["Worker"]):
"""Async context manager for Worker dependency."""
async def __aenter__(self) -> Worker:
require_docket("CurrentWorker()")
worker = _current_worker.get()
if worker is None:
raise RuntimeError(
"No Worker instance found. Worker is only initialized when there are "
"task-enabled components (task=True). Add task=True to a component "
"to enable Docket infrastructure."
)
return worker
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc_value: BaseException | None,
traceback: TracebackType | None,
) -> None:
pass
def CurrentWorker() -> Worker:
"""Get the current Docket Worker instance managed by FastMCP.
This dependency provides access to the Worker instance that FastMCP
automatically creates for background task processing.
Returns:
A dependency that resolves to the active Worker instance
Raises:
RuntimeError: If not within a FastMCP server context
ImportError: If fastmcp[tasks] not installed
Example:
```python
from fastmcp.dependencies import CurrentWorker
@mcp.tool()
async def check_worker_status(worker: Worker = CurrentWorker()) -> str:
return f"Worker: {worker.name}"
```
"""
require_docket("CurrentWorker()")
return cast("Worker", _CurrentWorker())
class _CurrentFastMCP(Dependency["FastMCP"]):
"""Async context manager for FastMCP server dependency."""
async def __aenter__(self) -> FastMCP:
server_ref = _current_server.get()
if server_ref is None:
raise RuntimeError("No FastMCP server instance in context")
server = server_ref()
if server is None:
raise RuntimeError("FastMCP server instance is no longer available")
return server
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc_value: BaseException | None,
traceback: TracebackType | None,
) -> None:
pass
def CurrentFastMCP() -> FastMCP:
"""Get the current FastMCP server instance.
This dependency provides access to the active FastMCP server.
Returns:
A dependency that resolves to the active FastMCP server
Raises:
RuntimeError: If no server in context (during resolution)
Example:
```python
from fastmcp.dependencies import CurrentFastMCP
@mcp.tool()
async def introspect(server: FastMCP = CurrentFastMCP()) -> str:
return f"Server: {server.name}"
```
"""
from fastmcp.server.server import FastMCP
return cast(FastMCP, _CurrentFastMCP())
class _CurrentRequest(Dependency[Request]):
"""Async context manager for HTTP Request dependency."""
async def __aenter__(self) -> Request:
return get_http_request()
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc_value: BaseException | None,
traceback: TracebackType | None,
) -> None:
pass
def CurrentRequest() -> Request:
"""Get the current HTTP request.
This dependency provides access to the Starlette Request object for the
current HTTP request. Only available when running over HTTP transports
(SSE or Streamable HTTP).
Returns:
A dependency that resolves to the active Starlette Request
Raises:
RuntimeError: If no HTTP request in context (e.g., STDIO transport)
Example:
```python
from fastmcp.server.dependencies import CurrentRequest
from starlette.requests import Request
@mcp.tool()
async def get_client_ip(request: Request = CurrentRequest()) -> str:
return request.client.host if request.client else "Unknown"
```
"""
return cast(Request, _CurrentRequest())
class _CurrentHeaders(Dependency[dict[str, str]]):
"""Async context manager for HTTP Headers dependency."""
async def __aenter__(self) -> dict[str, str]:
return get_http_headers(include={"authorization"})
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc_value: BaseException | None,
traceback: TracebackType | None,
) -> None:
pass
def CurrentHeaders() -> dict[str, str]:
"""Get the current HTTP request headers.
This dependency provides access to the HTTP headers for the current request,
including the authorization header. Returns an empty dictionary when no HTTP
request is available, making it safe to use in code that might run over any
transport.
Returns:
A dependency that resolves to a dictionary of header name -> value
Example:
```python
from fastmcp.server.dependencies import CurrentHeaders
@mcp.tool()
async def get_auth_type(headers: dict = CurrentHeaders()) -> str:
auth = headers.get("authorization", "")
return "Bearer" if auth.startswith("Bearer ") else "None"
```
"""
return cast(dict[str, str], _CurrentHeaders())
# --- Progress dependency ---
@runtime_checkable
class ProgressLike(Protocol):
"""Protocol for progress tracking interface.
Defines the common interface between InMemoryProgress (server context)
and Docket's Progress (worker context).
"""
@property
def current(self) -> int | None:
"""Current progress value."""
...
@property
def total(self) -> int:
"""Total/target progress value."""
...
@property
def message(self) -> str | None:
"""Current progress message."""
...
async def set_total(self, total: int) -> None:
"""Set the total/target value for progress tracking."""
...
async def increment(self, amount: int = 1) -> None:
"""Atomically increment the current progress value."""
...
async def set_message(self, message: str | None) -> None:
"""Update the progress status message."""
...
class InMemoryProgress:
"""In-memory progress tracker for immediate tool execution.
Provides the same interface as Docket's Progress but stores state in memory
instead of Redis. Useful for testing and immediate execution where
progress doesn't need to be observable across processes.
"""
def __init__(self) -> None:
self._current: int | None = None
self._total: int = 1
self._message: str | None = None
async def __aenter__(self) -> InMemoryProgress:
return self
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc_value: BaseException | None,
traceback: TracebackType | None,
) -> None:
pass
@property
def current(self) -> int | None:
return self._current
@property
def total(self) -> int:
return self._total
@property
def message(self) -> str | None:
return self._message
async def set_total(self, total: int) -> None:
"""Set the total/target value for progress tracking."""
if total < 1:
raise ValueError("Total must be at least 1")
self._total = total
async def increment(self, amount: int = 1) -> None:
"""Atomically increment the current progress value."""
if amount < 1:
raise ValueError("Amount must be at least 1")
if self._current is None:
self._current = amount
else:
self._current += amount
async def set_message(self, message: str | None) -> None:
"""Update the progress status message."""
self._message = message
class Progress(Dependency["Progress"]):
"""FastMCP Progress dependency that works in both server and worker contexts.
Handles three execution modes:
- In Docket worker: Uses the execution's progress (observable via Redis)
- In FastMCP server with Docket: Falls back to in-memory progress
- In FastMCP server without Docket: Uses in-memory progress
This allows tools to use Progress() regardless of whether they're called
immediately or as background tasks, and regardless of whether pydocket
is installed.
"""
_impl: ProgressLike | None = None
async def __aenter__(self) -> Progress:
server_ref = _current_server.get()
if server_ref is None or server_ref() is None:
raise RuntimeError("Progress dependency requires a FastMCP server context.")
if is_docket_available():
from docket.dependencies import Progress as DocketProgress
try:
docket_progress = DocketProgress()
self._impl = await docket_progress.__aenter__()
return self
except LookupError:
pass
self._impl = InMemoryProgress()
return self
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc_value: BaseException | None,
traceback: TracebackType | None,
) -> None:
self._impl = None
@property
def current(self) -> int | None:
"""Current progress value."""
assert self._impl is not None, "Progress must be used as a dependency"
return self._impl.current
@property
def total(self) -> int:
"""Total/target progress value."""
assert self._impl is not None, "Progress must be used as a dependency"
return self._impl.total
@property
def message(self) -> str | None:
"""Current progress message."""
assert self._impl is not None, "Progress must be used as a dependency"
return self._impl.message
async def set_total(self, total: int) -> None:
"""Set the total/target value for progress tracking."""
assert self._impl is not None, "Progress must be used as a dependency"
await self._impl.set_total(total)
async def increment(self, amount: int = 1) -> None:
"""Atomically increment the current progress value."""
assert self._impl is not None, "Progress must be used as a dependency"
await self._impl.increment(amount)
async def set_message(self, message: str | None) -> None:
"""Update the progress status message."""
assert self._impl is not None, "Progress must be used as a dependency"
await self._impl.set_message(message)
# --- Access Token dependency ---
class _CurrentAccessToken(Dependency[AccessToken]):
"""Async context manager for AccessToken dependency."""
_access_token_cv_token: Token[AccessToken | None] | None = None
async def __aenter__(self) -> AccessToken:
token = get_access_token()
# If no token found and we're in a Docket worker, try restoring from
# Redis. This handles the case where ctx: Context is not in the
# function signature, so _CurrentContext never ran the restoration.
if token is None:
task_info = get_task_context()
if task_info is not None:
self._access_token_cv_token = await _restore_task_access_token(
task_info.session_id, task_info.task_id
)
token = get_access_token()
if token is None:
raise RuntimeError(
"No access token found. Ensure authentication is configured "
"and the request is authenticated."
)
return token
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc_value: BaseException | None,
traceback: TracebackType | None,
) -> None:
if self._access_token_cv_token is not None:
_task_access_token.reset(self._access_token_cv_token)
self._access_token_cv_token = None
def CurrentAccessToken() -> AccessToken:
"""Get the current access token for the authenticated user.
This dependency provides access to the AccessToken for the current
authenticated request. Raises an error if no authentication is present.
Returns:
A dependency that resolves to the active AccessToken
Raises:
RuntimeError: If no authenticated user (use get_access_token() for optional)
Example:
```python
from fastmcp.server.dependencies import CurrentAccessToken
from fastmcp.server.auth import AccessToken
@mcp.tool()
async def get_user_id(token: AccessToken = CurrentAccessToken()) -> str:
return token.claims.get("sub", "unknown")
```
"""
return cast(AccessToken, _CurrentAccessToken())
# --- Token Claim dependency ---
class _TokenClaim(Dependency[str]):
"""Dependency that extracts a specific claim from the access token."""
def __init__(self, claim_name: str):
self.claim_name = claim_name
async def __aenter__(self) -> str:
token = get_access_token()
if token is None:
raise RuntimeError(
f"No access token available. Cannot extract claim '{self.claim_name}'."
)
value = token.claims.get(self.claim_name)
if value is None:
raise RuntimeError(
f"Claim '{self.claim_name}' not found in access token. "
f"Available claims: {list(token.claims.keys())}"
)
return str(value)
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc_value: BaseException | None,
traceback: TracebackType | None,
) -> None:
pass
def TokenClaim(name: str) -> str:
"""Get a specific claim from the access token.
This dependency extracts a single claim value from the current access token.
It's useful for getting user identifiers, roles, or other token claims
without needing the full token object.
Args:
name: The name of the claim to extract (e.g., "oid", "sub", "email")
Returns:
A dependency that resolves to the claim value as a string
Raises:
RuntimeError: If no access token is available or claim is missing
Example:
```python
from fastmcp.server.dependencies import TokenClaim
@mcp.tool()
async def add_expense(
user_id: str = TokenClaim("oid"), # Azure object ID
amount: float,
):
# user_id is automatically injected from the token
await db.insert({"user_id": user_id, "amount": amount})
```
"""
return cast(str, _TokenClaim(name))
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "src/fastmcp/server/dependencies.py",
"license": "Apache License 2.0",
"lines": 1152,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/fastmcp:src/fastmcp/server/http.py | from __future__ import annotations
from collections.abc import AsyncGenerator, Callable, Generator
from contextlib import asynccontextmanager, contextmanager
from contextvars import ContextVar
from typing import TYPE_CHECKING
from mcp.server.auth.routes import build_resource_metadata_url
from mcp.server.lowlevel.server import LifespanResultT
from mcp.server.sse import SseServerTransport
from mcp.server.streamable_http import (
EventStore,
)
from mcp.server.streamable_http_manager import StreamableHTTPSessionManager
from starlette.applications import Starlette
from starlette.middleware import Middleware
from starlette.requests import Request
from starlette.responses import Response
from starlette.routing import BaseRoute, Mount, Route
from starlette.types import Lifespan, Receive, Scope, Send
from fastmcp.server.auth import AuthProvider
from fastmcp.server.auth.middleware import RequireAuthMiddleware
from fastmcp.utilities.logging import get_logger
if TYPE_CHECKING:
from fastmcp.server.server import FastMCP
logger = get_logger(__name__)
class StreamableHTTPASGIApp:
"""ASGI application wrapper for Streamable HTTP server transport."""
def __init__(self, session_manager):
self.session_manager = session_manager
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
try:
await self.session_manager.handle_request(scope, receive, send)
except RuntimeError as e:
if str(e) == "Task group is not initialized. Make sure to use run().":
logger.error(
f"Original RuntimeError from mcp library: {e}", exc_info=True
)
new_error_message = (
"FastMCP's StreamableHTTPSessionManager task group was not initialized. "
"This commonly occurs when the FastMCP application's lifespan is not "
"passed to the parent ASGI application (e.g., FastAPI or Starlette). "
"Please ensure you are setting `lifespan=mcp_app.lifespan` in your "
"parent app's constructor, where `mcp_app` is the application instance "
"returned by `fastmcp_instance.http_app()`. \\n"
"For more details, see the FastMCP ASGI integration documentation: "
"https://gofastmcp.com/deployment/asgi"
)
# Raise a new RuntimeError that includes the original error's message
# for full context, but leads with the more helpful guidance.
raise RuntimeError(f"{new_error_message}\\nOriginal error: {e}") from e
else:
# Re-raise other RuntimeErrors if they don't match the specific message
raise
_current_http_request: ContextVar[Request | None] = ContextVar(
"http_request",
default=None,
)
class StarletteWithLifespan(Starlette):
@property
def lifespan(self) -> Lifespan[Starlette]:
return self.router.lifespan_context
@contextmanager
def set_http_request(request: Request) -> Generator[Request, None, None]:
token = _current_http_request.set(request)
try:
yield request
finally:
_current_http_request.reset(token)
class RequestContextMiddleware:
"""
Middleware that stores each request in a ContextVar and sets transport type.
"""
def __init__(self, app):
self.app = app
async def __call__(self, scope, receive, send):
if scope["type"] == "http":
from fastmcp.server.context import reset_transport, set_transport
# Get transport type from app state (set during app creation)
transport_type = getattr(scope["app"].state, "transport_type", None)
transport_token = set_transport(transport_type) if transport_type else None
try:
with set_http_request(Request(scope)):
await self.app(scope, receive, send)
finally:
if transport_token is not None:
reset_transport(transport_token)
else:
await self.app(scope, receive, send)
def create_base_app(
routes: list[BaseRoute],
middleware: list[Middleware],
debug: bool = False,
lifespan: Callable | None = None,
) -> StarletteWithLifespan:
"""Create a base Starlette app with common middleware and routes.
Args:
routes: List of routes to include in the app
middleware: List of middleware to include in the app
debug: Whether to enable debug mode
lifespan: Optional lifespan manager for the app
Returns:
A Starlette application
"""
# Always add RequestContextMiddleware as the outermost middleware
# TODO(ty): remove type ignore when ty supports Starlette Middleware typing
middleware.insert(0, Middleware(RequestContextMiddleware)) # type: ignore[arg-type]
return StarletteWithLifespan(
routes=routes,
middleware=middleware,
debug=debug,
lifespan=lifespan,
)
def create_sse_app(
server: FastMCP[LifespanResultT],
message_path: str,
sse_path: str,
auth: AuthProvider | None = None,
debug: bool = False,
routes: list[BaseRoute] | None = None,
middleware: list[Middleware] | None = None,
) -> StarletteWithLifespan:
"""Return an instance of the SSE server app.
Args:
server: The FastMCP server instance
message_path: Path for SSE messages
sse_path: Path for SSE connections
auth: Optional authentication provider (AuthProvider)
debug: Whether to enable debug mode
routes: Optional list of custom routes
middleware: Optional list of middleware
Returns:
A Starlette application with RequestContextMiddleware
"""
server_routes: list[BaseRoute] = []
server_middleware: list[Middleware] = []
# Set up SSE transport
sse = SseServerTransport(message_path)
# Create handler for SSE connections
async def handle_sse(scope: Scope, receive: Receive, send: Send) -> Response:
async with sse.connect_sse(scope, receive, send) as streams:
await server._mcp_server.run(
streams[0],
streams[1],
server._mcp_server.create_initialization_options(),
)
return Response()
# Set up auth if enabled
if auth:
# Get auth middleware from the provider
auth_middleware = auth.get_middleware()
# Get auth provider's own routes (OAuth endpoints, metadata, etc)
auth_routes = auth.get_routes(mcp_path=sse_path)
server_routes.extend(auth_routes)
server_middleware.extend(auth_middleware)
# Build RFC 9728-compliant metadata URL
resource_url = auth._get_resource_url(sse_path)
resource_metadata_url = (
build_resource_metadata_url(resource_url) if resource_url else None
)
# Create protected SSE endpoint route
server_routes.append(
Route(
sse_path,
endpoint=RequireAuthMiddleware(
handle_sse,
auth.required_scopes,
resource_metadata_url,
),
methods=["GET"],
)
)
# Wrap the SSE message endpoint with RequireAuthMiddleware
server_routes.append(
Mount(
message_path,
app=RequireAuthMiddleware(
sse.handle_post_message,
auth.required_scopes,
resource_metadata_url,
),
)
)
else:
# No auth required
async def sse_endpoint(request: Request) -> Response:
return await handle_sse(request.scope, request.receive, request._send)
server_routes.append(
Route(
sse_path,
endpoint=sse_endpoint,
methods=["GET"],
)
)
server_routes.append(
Mount(
message_path,
app=sse.handle_post_message,
)
)
# Add custom routes with lowest precedence
if routes:
server_routes.extend(routes)
server_routes.extend(server._get_additional_http_routes())
# Add middleware
if middleware:
server_middleware.extend(middleware)
@asynccontextmanager
async def lifespan(app: Starlette) -> AsyncGenerator[None, None]:
async with server._lifespan_manager():
yield
# Create and return the app
app = create_base_app(
routes=server_routes,
middleware=server_middleware,
debug=debug,
lifespan=lifespan,
)
# Store the FastMCP server instance on the Starlette app state
app.state.fastmcp_server = server
app.state.path = sse_path
app.state.transport_type = "sse"
return app
def create_streamable_http_app(
server: FastMCP[LifespanResultT],
streamable_http_path: str,
event_store: EventStore | None = None,
retry_interval: int | None = None,
auth: AuthProvider | None = None,
json_response: bool = False,
stateless_http: bool = False,
debug: bool = False,
routes: list[BaseRoute] | None = None,
middleware: list[Middleware] | None = None,
) -> StarletteWithLifespan:
"""Return an instance of the StreamableHTTP server app.
Args:
server: The FastMCP server instance
streamable_http_path: Path for StreamableHTTP connections
event_store: Optional event store for SSE polling/resumability
retry_interval: Optional retry interval in milliseconds for SSE polling.
Controls how quickly clients should reconnect after server-initiated
disconnections. Requires event_store to be set. Defaults to SDK default.
auth: Optional authentication provider (AuthProvider)
json_response: Whether to use JSON response format
stateless_http: Whether to use stateless mode (new transport per request)
debug: Whether to enable debug mode
routes: Optional list of custom routes
middleware: Optional list of middleware
Returns:
A Starlette application with StreamableHTTP support
"""
server_routes: list[BaseRoute] = []
server_middleware: list[Middleware] = []
# Create session manager using the provided event store
session_manager = StreamableHTTPSessionManager(
app=server._mcp_server,
event_store=event_store,
retry_interval=retry_interval,
json_response=json_response,
stateless=stateless_http,
)
# Create the ASGI app wrapper
streamable_http_app = StreamableHTTPASGIApp(session_manager)
# Add StreamableHTTP routes with or without auth
if auth:
# Get auth middleware from the provider
auth_middleware = auth.get_middleware()
# Get auth provider's own routes (OAuth endpoints, metadata, etc)
auth_routes = auth.get_routes(mcp_path=streamable_http_path)
server_routes.extend(auth_routes)
server_middleware.extend(auth_middleware)
# Build RFC 9728-compliant metadata URL
resource_url = auth._get_resource_url(streamable_http_path)
resource_metadata_url = (
build_resource_metadata_url(resource_url) if resource_url else None
)
# Create protected HTTP endpoint route
# Stateless servers have no session tracking, so GET SSE streams
# (for server-initiated notifications) serve no purpose.
http_methods = (
["POST", "DELETE"] if stateless_http else ["GET", "POST", "DELETE"]
)
server_routes.append(
Route(
streamable_http_path,
endpoint=RequireAuthMiddleware(
streamable_http_app,
auth.required_scopes,
resource_metadata_url,
),
methods=http_methods,
)
)
else:
# No auth required
http_methods = ["POST", "DELETE"] if stateless_http else None
server_routes.append(
Route(
streamable_http_path,
endpoint=streamable_http_app,
methods=http_methods,
)
)
# Add custom routes with lowest precedence
if routes:
server_routes.extend(routes)
server_routes.extend(server._get_additional_http_routes())
# Add middleware
if middleware:
server_middleware.extend(middleware)
# Create a lifespan manager to start and stop the session manager
@asynccontextmanager
async def lifespan(app: Starlette) -> AsyncGenerator[None, None]:
async with server._lifespan_manager(), session_manager.run():
yield
# Create and return the app with lifespan
app = create_base_app(
routes=server_routes,
middleware=server_middleware,
debug=debug,
lifespan=lifespan,
)
# Store the FastMCP server instance on the Starlette app state
app.state.fastmcp_server = server
app.state.path = streamable_http_path
app.state.transport_type = "streamable-http"
return app
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "src/fastmcp/server/http.py",
"license": "Apache License 2.0",
"lines": 326,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/fastmcp:src/fastmcp/utilities/tests.py | from __future__ import annotations
import copy
import multiprocessing
import socket
import time
from collections.abc import AsyncGenerator, Callable, Generator
from contextlib import asynccontextmanager, contextmanager, suppress
from typing import TYPE_CHECKING, Any, Literal
from urllib.parse import parse_qs, urlparse
import httpx
import uvicorn
from fastmcp import settings
from fastmcp.client.auth.oauth import OAuth
from fastmcp.utilities.http import find_available_port
if TYPE_CHECKING:
from fastmcp.server.server import FastMCP
@contextmanager
def temporary_settings(**kwargs: Any):
"""
Temporarily override FastMCP setting values.
Args:
**kwargs: The settings to override, including nested settings.
Example:
Temporarily override a setting:
```python
import fastmcp
from fastmcp.utilities.tests import temporary_settings
with temporary_settings(log_level='DEBUG'):
assert fastmcp.settings.log_level == 'DEBUG'
assert fastmcp.settings.log_level == 'INFO'
```
"""
old_settings = copy.deepcopy(settings)
try:
# apply the new settings
for attr, value in kwargs.items():
settings.set_setting(attr, value)
yield
finally:
# restore the old settings
for attr in kwargs:
settings.set_setting(attr, old_settings.get_setting(attr))
def _run_server(mcp_server: FastMCP, transport: Literal["sse"], port: int) -> None:
# Some Starlette apps are not pickleable, so we need to create them here based on the indicated transport
if transport == "sse":
app = mcp_server.http_app(transport="sse")
else:
raise ValueError(f"Invalid transport: {transport}")
uvicorn_server = uvicorn.Server(
config=uvicorn.Config(
app=app,
host="127.0.0.1",
port=port,
log_level="error",
ws="websockets-sansio",
)
)
uvicorn_server.run()
@contextmanager
def run_server_in_process(
server_fn: Callable[..., None],
*args: Any,
provide_host_and_port: bool = True,
host: str = "127.0.0.1",
port: int | None = None,
**kwargs: Any,
) -> Generator[str, None, None]:
"""
Context manager that runs a FastMCP server in a separate process and
returns the server URL. When the context manager is exited, the server process is killed.
Args:
server_fn: The function that runs a FastMCP server. FastMCP servers are
not pickleable, so we need a function that creates and runs one.
*args: Arguments to pass to the server function.
provide_host_and_port: Whether to provide the host and port to the server function as kwargs.
host: Host to bind the server to (default: "127.0.0.1").
port: Port to bind the server to (default: find available port).
**kwargs: Keyword arguments to pass to the server function.
Returns:
The server URL.
"""
# Use provided port or find an available one
if port is None:
port = find_available_port()
if provide_host_and_port:
kwargs |= {"host": host, "port": port}
proc = multiprocessing.Process(
target=server_fn, args=args, kwargs=kwargs, daemon=True
)
proc.start()
# Wait for server to be running
max_attempts = 30
attempt = 0
while attempt < max_attempts and proc.is_alive():
try:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect((host, port))
break
except ConnectionRefusedError:
if attempt < 5:
time.sleep(0.05)
elif attempt < 15:
time.sleep(0.1)
else:
time.sleep(0.2)
attempt += 1
else:
raise RuntimeError(f"Server failed to start after {max_attempts} attempts")
yield f"http://{host}:{port}"
proc.terminate()
proc.join(timeout=5)
if proc.is_alive():
# If it's still alive, then force kill it
proc.kill()
proc.join(timeout=2)
if proc.is_alive():
raise RuntimeError("Server process failed to terminate even after kill")
@asynccontextmanager
async def run_server_async(
server: FastMCP,
port: int | None = None,
transport: Literal["http", "streamable-http", "sse"] = "http",
path: str = "/mcp",
host: str = "127.0.0.1",
) -> AsyncGenerator[str, None]:
"""
Start a FastMCP server as an asyncio task for in-process async testing.
This is the recommended way to test FastMCP servers. It runs the server
as an async task in the same process, eliminating subprocess coordination,
sleeps, and cleanup issues.
Args:
server: FastMCP server instance
port: Port to bind to (default: find available port)
transport: Transport type ("http", "streamable-http", or "sse")
path: URL path for the server (default: "/mcp")
host: Host to bind to (default: "127.0.0.1")
Yields:
Server URL string
Example:
```python
import pytest
from fastmcp import FastMCP, Client
from fastmcp.client.transports import StreamableHttpTransport
from fastmcp.utilities.tests import run_server_async
@pytest.fixture
async def server():
mcp = FastMCP("test")
@mcp.tool()
def greet(name: str) -> str:
return f"Hello, {name}!"
async with run_server_async(mcp) as url:
yield url
async def test_greet(server: str):
async with Client(StreamableHttpTransport(server)) as client:
result = await client.call_tool("greet", {"name": "World"})
assert result.content[0].text == "Hello, World!"
```
"""
import asyncio
if port is None:
port = find_available_port()
# Wait a tiny bit for the port to be released if it was just used
await asyncio.sleep(0.01)
# Start server as a background task
server_task = asyncio.create_task(
server.run_http_async(
host=host,
port=port,
transport=transport,
path=path,
show_banner=False,
)
)
# Wait for server lifespan to be ready
await server._started.wait()
# Give uvicorn a moment to bind the port after lifespan is ready
await asyncio.sleep(0.1)
try:
yield f"http://{host}:{port}{path}"
finally:
# Cleanup: cancel the task with timeout to avoid hanging on Windows
server_task.cancel()
with suppress(asyncio.CancelledError, asyncio.TimeoutError):
await asyncio.wait_for(server_task, timeout=2.0)
class HeadlessOAuth(OAuth):
"""
OAuth provider that bypasses browser interaction for testing.
This simulates the complete OAuth flow programmatically by making HTTP requests
instead of opening a browser and running a callback server. Useful for automated testing.
"""
def __init__(self, mcp_url: str, **kwargs):
"""Initialize HeadlessOAuth with stored response tracking."""
self._stored_response = None
super().__init__(mcp_url, **kwargs)
async def redirect_handler(self, authorization_url: str) -> None:
"""Make HTTP request to authorization URL and store response for callback handler."""
async with httpx.AsyncClient() as client:
response = await client.get(authorization_url, follow_redirects=False)
self._stored_response = response
async def callback_handler(self) -> tuple[str, str | None]:
"""Parse stored response and return (auth_code, state)."""
if not self._stored_response:
raise RuntimeError(
"No authorization response stored. redirect_handler must be called first."
)
response = self._stored_response
# Extract auth code from redirect location
if response.status_code == 302:
redirect_url = response.headers["location"]
parsed = urlparse(redirect_url)
query_params = parse_qs(parsed.query)
if "error" in query_params:
error = query_params["error"][0]
error_desc = query_params.get("error_description", ["Unknown error"])[0]
raise RuntimeError(
f"OAuth authorization failed: {error} - {error_desc}"
)
auth_code = query_params["code"][0]
state = query_params.get("state", [None])[0]
return auth_code, state
else:
raise RuntimeError(f"Authorization failed: {response.status_code}")
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "src/fastmcp/utilities/tests.py",
"license": "Apache License 2.0",
"lines": 222,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/fastmcp:tests/utilities/test_tests.py | from unittest.mock import AsyncMock, patch
import fastmcp
from fastmcp import FastMCP
from fastmcp.utilities.tests import temporary_settings
class TestTemporarySettings:
def test_temporary_settings(self):
assert fastmcp.settings.log_level == "DEBUG"
with temporary_settings(log_level="ERROR"):
assert fastmcp.settings.log_level == "ERROR"
assert fastmcp.settings.log_level == "DEBUG"
class TestTransportSetting:
def test_transport_default_is_stdio(self):
assert fastmcp.settings.transport == "stdio"
def test_transport_setting_can_be_changed(self):
with temporary_settings(transport="http"):
assert fastmcp.settings.transport == "http"
assert fastmcp.settings.transport == "stdio"
async def test_run_async_uses_transport_setting(self):
mcp = FastMCP("test")
with temporary_settings(transport="http"):
with patch.object(
mcp, "run_http_async", new_callable=AsyncMock
) as mock_http:
await mcp.run_async()
mock_http.assert_called_once()
async def test_run_async_explicit_transport_overrides_setting(self):
mcp = FastMCP("test")
with temporary_settings(transport="http"):
with patch.object(
mcp, "run_stdio_async", new_callable=AsyncMock
) as mock_stdio:
await mcp.run_async(transport="stdio")
mock_stdio.assert_called_once()
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/utilities/test_tests.py",
"license": "Apache License 2.0",
"lines": 33,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:src/fastmcp/utilities/json_schema.py | from __future__ import annotations
from collections import defaultdict
from typing import Any
from jsonref import JsonRefError, replace_refs
def _defs_have_cycles(defs: dict[str, Any]) -> bool:
"""Check whether any definitions in ``$defs`` form a reference cycle.
A cycle means a definition directly or transitively references itself
(e.g. Node β children β Node, or A β B β A). ``jsonref.replace_refs``
silently produces Python-level object cycles for these, which Pydantic's
serializer rejects.
"""
if not defs:
return False
# Build adjacency: def_name -> set of def_names it references.
edges: dict[str, set[str]] = defaultdict(set)
def _collect_refs(obj: Any, source: str) -> None:
if isinstance(obj, dict):
ref = obj.get("$ref")
if isinstance(ref, str) and ref.startswith("#/$defs/"):
edges[source].add(ref.split("/")[-1])
for v in obj.values():
_collect_refs(v, source)
elif isinstance(obj, list):
for item in obj:
_collect_refs(item, source)
for name, definition in defs.items():
_collect_refs(definition, name)
# DFS cycle detection.
UNVISITED, IN_STACK, DONE = 0, 1, 2
state: dict[str, int] = defaultdict(int)
def _has_cycle(node: str) -> bool:
state[node] = IN_STACK
for neighbor in edges.get(node, ()):
if neighbor not in defs:
continue
if state[neighbor] == IN_STACK:
return True
if state[neighbor] == UNVISITED and _has_cycle(neighbor):
return True
state[node] = DONE
return False
return any(state[name] == UNVISITED and _has_cycle(name) for name in defs)
def dereference_refs(schema: dict[str, Any]) -> dict[str, Any]:
"""Resolve all $ref references in a JSON schema by inlining definitions.
This function resolves $ref references that point to $defs, replacing them
with the actual definition content while preserving sibling keywords (like
description, default, examples) that Pydantic places alongside $ref.
This is necessary because some MCP clients (e.g., VS Code Copilot) don't
properly handle $ref in tool input schemas.
For self-referencing/circular schemas where full dereferencing is not possible,
this function falls back to resolving only the root-level $ref while preserving
$defs for nested references.
Args:
schema: JSON schema dict that may contain $ref references
Returns:
A new schema dict with $ref resolved where possible and $defs removed
when no longer needed
Example:
>>> schema = {
... "$defs": {"Category": {"enum": ["a", "b"], "type": "string"}},
... "properties": {"cat": {"$ref": "#/$defs/Category", "default": "a"}}
... }
>>> resolved = dereference_refs(schema)
>>> # Result: {"properties": {"cat": {"enum": ["a", "b"], "type": "string", "default": "a"}}}
"""
# Circular $defs can't be fully inlined β jsonref.replace_refs produces
# Python dicts with object-identity cycles that Pydantic's model_dump
# rejects with "Circular reference detected (id repeated)".
# Detect cycles up front and fall back to root-only resolution.
if _defs_have_cycles(schema.get("$defs", {})):
return resolve_root_ref(schema)
try:
# Use jsonref to resolve all $ref references
# proxies=False returns plain dicts (not proxy objects)
# lazy_load=False resolves immediately
dereferenced = replace_refs(schema, proxies=False, lazy_load=False)
# Merge sibling keywords that were lost during dereferencing
# Pydantic puts description, default, examples as siblings to $ref
defs = schema.get("$defs", {})
merged = _merge_ref_siblings(schema, dereferenced, defs)
# Type assertion: top-level schema is always a dict
assert isinstance(merged, dict)
dereferenced = merged
# Remove $defs since all references have been resolved
if "$defs" in dereferenced:
dereferenced = {k: v for k, v in dereferenced.items() if k != "$defs"}
return dereferenced
except JsonRefError:
# Self-referencing/circular schemas can't be fully dereferenced
# Fall back to resolving only root-level $ref (for MCP spec compliance)
return resolve_root_ref(schema)
def _merge_ref_siblings(
original: Any,
dereferenced: Any,
defs: dict[str, Any],
visited: set[str] | None = None,
) -> Any:
"""Merge sibling keywords from original $ref nodes into dereferenced schema.
When jsonref resolves $ref, it replaces the entire node with the referenced
definition, losing any sibling keywords like description, default, or examples.
This function walks both trees in parallel and merges those siblings back.
Args:
original: The original schema with $ref and potential siblings
dereferenced: The schema after jsonref processing
defs: The $defs from the original schema, for looking up referenced definitions
visited: Set of definition names already being processed (prevents cycles)
Returns:
The dereferenced schema with sibling keywords restored
"""
if visited is None:
visited = set()
if isinstance(original, dict) and isinstance(dereferenced, dict):
# Check if original had a $ref
if "$ref" in original:
ref = original["$ref"]
siblings = {k: v for k, v in original.items() if k not in ("$ref", "$defs")}
# Look up the referenced definition to process its nested siblings
if isinstance(ref, str) and ref.startswith("#/$defs/"):
def_name = ref.split("/")[-1]
# Prevent infinite recursion on circular references
if def_name in defs and def_name not in visited:
# Recursively process the definition's content for nested siblings
dereferenced = _merge_ref_siblings(
defs[def_name], dereferenced, defs, visited | {def_name}
)
if siblings:
# Merge local siblings, which take precedence
merged = dict(dereferenced)
merged.update(siblings)
return merged
return dereferenced
# Recurse into nested structures
result = {}
for key, value in dereferenced.items():
if key in original:
result[key] = _merge_ref_siblings(original[key], value, defs, visited)
else:
result[key] = value
return result
elif isinstance(original, list) and isinstance(dereferenced, list):
# Process list items in parallel
min_len = min(len(original), len(dereferenced))
return [
_merge_ref_siblings(o, d, defs, visited)
for o, d in zip(original[:min_len], dereferenced[:min_len], strict=False)
] + dereferenced[min_len:]
return dereferenced
def resolve_root_ref(schema: dict[str, Any]) -> dict[str, Any]:
"""Resolve $ref at root level to meet MCP spec requirements.
MCP specification requires outputSchema to have "type": "object" at the root level.
When Pydantic generates schemas for self-referential models, it uses $ref at the
root level pointing to $defs. This function resolves such references by inlining
the referenced definition while preserving $defs for nested references.
Args:
schema: JSON schema dict that may have $ref at root level
Returns:
A new schema dict with root-level $ref resolved, or the original schema
if no resolution is needed
Example:
>>> schema = {
... "$defs": {"Node": {"type": "object", "properties": {...}}},
... "$ref": "#/$defs/Node"
... }
>>> resolved = resolve_root_ref(schema)
>>> # Result: {"type": "object", "properties": {...}, "$defs": {...}}
"""
# Only resolve if we have $ref at root level with $defs but no explicit type
if "$ref" in schema and "$defs" in schema and "type" not in schema:
ref = schema["$ref"]
# Only handle local $defs references
if isinstance(ref, str) and ref.startswith("#/$defs/"):
def_name = ref.split("/")[-1]
defs = schema["$defs"]
if def_name in defs:
# Create a new schema by copying the referenced definition
resolved = dict(defs[def_name])
# Preserve $defs for nested references (other fields may still use them)
resolved["$defs"] = defs
return resolved
return schema
def _prune_param(schema: dict[str, Any], param: str) -> dict[str, Any]:
"""Return a new schema with *param* removed from `properties`, `required`,
and (if no longer referenced) `$defs`.
"""
# ββ 1. drop from properties/required ββββββββββββββββββββββββββββββ
props = schema.get("properties", {})
removed = props.pop(param, None)
if removed is None: # nothing to do
return schema
# Keep empty properties object rather than removing it entirely
schema["properties"] = props
if param in schema.get("required", []):
schema["required"].remove(param)
if not schema["required"]:
schema.pop("required")
return schema
def _single_pass_optimize(
schema: dict[str, Any],
prune_titles: bool = False,
prune_additional_properties: bool = False,
prune_defs: bool = True,
) -> dict[str, Any]:
"""
Optimize JSON schemas in a single traversal for better performance.
This function combines three schema cleanup operations that would normally require
separate tree traversals:
1. **Remove unused definitions** (prune_defs): Finds and removes `$defs` entries
that aren't referenced anywhere in the schema, reducing schema size.
2. **Remove titles** (prune_titles): Strips `title` fields throughout the schema
to reduce verbosity while preserving functional information.
3. **Remove restrictive additionalProperties** (prune_additional_properties):
Removes `"additionalProperties": false` constraints to make schemas more flexible.
**Performance Benefits:**
- Single tree traversal instead of multiple passes (2-3x faster)
- Immutable design prevents shared reference bugs
- Early termination prevents runaway recursion on deeply nested schemas
**Algorithm Overview:**
1. Traverse main schema, collecting $ref references and applying cleanups
2. Traverse $defs section to map inter-definition dependencies
3. Remove unused definitions based on reference analysis
Args:
schema: JSON schema dict to optimize (not modified)
prune_titles: Remove title fields for cleaner output
prune_additional_properties: Remove "additionalProperties": false constraints
prune_defs: Remove unused $defs entries to reduce size
Returns:
A new optimized schema dict
Example:
>>> schema = {
... "type": "object",
... "title": "MySchema",
... "additionalProperties": False,
... "$defs": {"UnusedDef": {"type": "string"}}
... }
>>> result = _single_pass_optimize(schema, prune_titles=True, prune_defs=True)
>>> # Result: {"type": "object", "additionalProperties": False}
"""
if not (prune_defs or prune_titles or prune_additional_properties):
return schema # Nothing to do
# Phase 1: Collect references and apply simple cleanups
# Track which $defs are referenced from the main schema and from other $defs
root_refs: set[str] = set() # $defs referenced directly from main schema
def_dependencies: defaultdict[str, list[str]] = defaultdict(
list
) # def A references def B
defs = schema.get("$defs")
def traverse_and_clean(
node: object,
current_def_name: str | None = None,
skip_defs_section: bool = False,
depth: int = 0,
) -> None:
"""Traverse schema tree, collecting $ref info and applying cleanups."""
if depth > 50: # Prevent infinite recursion
return
if isinstance(node, dict):
# Collect $ref references for unused definition removal
if prune_defs:
ref = node.get("$ref") # type: ignore
if isinstance(ref, str) and ref.startswith("#/$defs/"):
referenced_def = ref.split("/")[-1]
if current_def_name:
# We're inside a $def, so this is a def->def reference
def_dependencies[referenced_def].append(current_def_name)
else:
# We're in the main schema, so this is a root reference
root_refs.add(referenced_def)
# Apply cleanups
# Only remove "title" if it's a schema metadata field
# Schema objects have keywords like "type", "properties", "$ref", etc.
# If we see these, then "title" is metadata, not a property name
if prune_titles and "title" in node:
# Check if this looks like a schema node
if any(
k in node
for k in [
"type",
"properties",
"$ref",
"items",
"allOf",
"oneOf",
"anyOf",
"required",
]
):
node.pop("title") # type: ignore
if (
prune_additional_properties
and node.get("additionalProperties") is False # type: ignore
):
node.pop("additionalProperties") # type: ignore
# Recursive traversal
for key, value in node.items():
if skip_defs_section and key == "$defs":
continue # Skip $defs during main schema traversal
# Handle schema composition keywords with special traversal
if key in ["allOf", "oneOf", "anyOf"] and isinstance(value, list):
for item in value:
traverse_and_clean(item, current_def_name, depth=depth + 1)
else:
traverse_and_clean(value, current_def_name, depth=depth + 1)
elif isinstance(node, list):
for item in node:
traverse_and_clean(item, current_def_name, depth=depth + 1)
# Phase 2: Traverse main schema (excluding $defs section)
traverse_and_clean(schema, skip_defs_section=True)
# Phase 3: Traverse $defs to find inter-definition references
if prune_defs and defs:
for def_name, def_schema in defs.items():
traverse_and_clean(def_schema, current_def_name=def_name)
# Phase 4: Remove unused definitions
def is_def_used(def_name: str, visiting: set[str] | None = None) -> bool:
"""Check if a definition is used, handling circular references."""
if def_name in root_refs:
return True # Used directly from main schema
# Check if any definition that references this one is itself used
referencing_defs = def_dependencies.get(def_name, [])
if referencing_defs:
if visiting is None:
visiting = set()
# Avoid infinite recursion on circular references
if def_name in visiting:
return False
visiting = visiting | {def_name}
# If any referencing def is used, then this def is used
for referencing_def in referencing_defs:
if referencing_def not in visiting and is_def_used(
referencing_def, visiting
):
return True
return False
# Remove unused definitions
for def_name in list(defs.keys()):
if not is_def_used(def_name):
defs.pop(def_name)
# Clean up empty $defs section
if not defs:
schema.pop("$defs", None)
return schema
def compress_schema(
schema: dict[str, Any],
prune_params: list[str] | None = None,
prune_additional_properties: bool = False,
prune_titles: bool = False,
dereference: bool = False,
) -> dict[str, Any]:
"""
Compress and optimize a JSON schema for MCP compatibility.
Args:
schema: The schema to compress
prune_params: List of parameter names to remove from properties
prune_additional_properties: Whether to remove additionalProperties: false.
Defaults to False to maintain MCP client compatibility, as some clients
(e.g., Claude) require additionalProperties: false for strict validation.
prune_titles: Whether to remove title fields from the schema
dereference: Whether to dereference $ref by inlining definitions.
Defaults to False; dereferencing is typically handled by
middleware at serve-time instead.
"""
if dereference:
schema = dereference_refs(schema)
# Resolve root-level $ref for MCP spec compliance (requires type: object at root)
schema = resolve_root_ref(schema)
# Remove specific parameters if requested
for param in prune_params or []:
schema = _prune_param(schema, param=param)
# Apply combined optimizations in a single tree traversal.
# Always prune unused $defs to keep schemas clean after parameter removal.
schema = _single_pass_optimize(
schema,
prune_titles=prune_titles,
prune_additional_properties=prune_additional_properties,
prune_defs=True,
)
return schema
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "src/fastmcp/utilities/json_schema.py",
"license": "Apache License 2.0",
"lines": 376,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/fastmcp:tests/utilities/test_json_schema.py | from fastmcp.utilities.json_schema import (
_prune_param,
compress_schema,
dereference_refs,
resolve_root_ref,
)
class TestPruneParam:
"""Tests for the _prune_param function."""
def test_nonexistent(self):
"""Test pruning a parameter that doesn't exist."""
schema = {"properties": {"foo": {"type": "string"}}}
result = _prune_param(schema, "bar")
assert result == schema # Schema should be unchanged
def test_exists(self):
"""Test pruning a parameter that exists."""
schema = {"properties": {"foo": {"type": "string"}, "bar": {"type": "integer"}}}
result = _prune_param(schema, "bar")
assert result["properties"] == {"foo": {"type": "string"}}
def test_last_property(self):
"""Test pruning the only/last parameter, should leave empty properties object."""
schema = {"properties": {"foo": {"type": "string"}}}
result = _prune_param(schema, "foo")
assert "properties" in result
assert result["properties"] == {}
def test_from_required(self):
"""Test pruning a parameter that's in the required list."""
schema = {
"properties": {"foo": {"type": "string"}, "bar": {"type": "integer"}},
"required": ["foo", "bar"],
}
result = _prune_param(schema, "bar")
assert result["required"] == ["foo"]
def test_last_required(self):
"""Test pruning the last required parameter, should remove required field."""
schema = {
"properties": {"foo": {"type": "string"}, "bar": {"type": "integer"}},
"required": ["foo"],
}
result = _prune_param(schema, "foo")
assert "required" not in result
class TestDereferenceRefs:
"""Tests for the dereference_refs function."""
def test_dereferences_simple_ref(self):
"""Test that simple $ref is dereferenced."""
schema = {
"properties": {
"foo": {"$ref": "#/$defs/foo_def"},
},
"$defs": {
"foo_def": {"type": "string"},
},
}
result = dereference_refs(schema)
# $ref should be inlined
assert result["properties"]["foo"] == {"type": "string"}
# $defs should be removed
assert "$defs" not in result
def test_dereferences_nested_refs(self):
"""Test that nested $refs are dereferenced."""
schema = {
"properties": {
"foo": {"$ref": "#/$defs/foo_def"},
},
"$defs": {
"foo_def": {
"type": "object",
"properties": {"nested": {"$ref": "#/$defs/nested_def"}},
},
"nested_def": {"type": "string"},
},
}
result = dereference_refs(schema)
# All refs should be inlined
assert result["properties"]["foo"]["properties"]["nested"] == {"type": "string"}
# $defs should be removed
assert "$defs" not in result
def test_falls_back_for_circular_refs(self):
"""Test that circular references fall back to resolve_root_ref."""
schema = {
"$defs": {
"Node": {
"type": "object",
"properties": {
"children": {
"type": "array",
"items": {"$ref": "#/$defs/Node"},
}
},
}
},
"$ref": "#/$defs/Node",
}
result = dereference_refs(schema)
# Should fall back to resolve_root_ref behavior
# Root should be resolved but nested refs preserved
assert result.get("type") == "object"
assert "$defs" in result # $defs preserved for circular refs
def test_preserves_sibling_keywords(self):
"""Test that sibling keywords (default, description) are preserved.
Pydantic places description, default, examples as siblings to $ref.
These should not be lost during dereferencing.
"""
schema = {
"$defs": {
"Status": {"type": "string", "enum": ["active", "inactive"]},
},
"properties": {
"status": {
"$ref": "#/$defs/Status",
"default": "active",
"description": "The user status",
},
},
"type": "object",
}
result = dereference_refs(schema)
# $ref should be inlined with siblings preserved
status = result["properties"]["status"]
assert status["type"] == "string"
assert status["enum"] == ["active", "inactive"]
assert status["default"] == "active"
assert status["description"] == "The user status"
# $defs should be removed
assert "$defs" not in result
def test_preserves_siblings_in_lists(self):
"""Test that siblings are preserved for $refs inside lists (allOf, anyOf, etc)."""
schema = {
"$defs": {
"StringType": {"type": "string"},
"IntType": {"type": "integer"},
},
"properties": {
"field": {
"anyOf": [
{"$ref": "#/$defs/StringType", "description": "As string"},
{"$ref": "#/$defs/IntType", "description": "As integer"},
]
},
},
}
result = dereference_refs(schema)
# Both items in anyOf should have their siblings preserved
any_of = result["properties"]["field"]["anyOf"]
assert any_of[0]["type"] == "string"
assert any_of[0]["description"] == "As string"
assert any_of[1]["type"] == "integer"
assert any_of[1]["description"] == "As integer"
assert "$defs" not in result
def test_preserves_nested_siblings(self):
"""Test that siblings on nested $refs are preserved."""
schema = {
"$defs": {
"Address": {
"type": "object",
"properties": {
"country": {"$ref": "#/$defs/Country", "default": "US"},
},
},
"Country": {"type": "string", "enum": ["US", "UK", "CA"]},
},
"properties": {
"home_address": {"$ref": "#/$defs/Address"},
},
}
result = dereference_refs(schema)
# The nested $ref's sibling (default) should be preserved
country = result["properties"]["home_address"]["properties"]["country"]
assert country["type"] == "string"
assert country["enum"] == ["US", "UK", "CA"]
assert country["default"] == "US"
assert "$defs" not in result
class TestCompressSchema:
"""Tests for the compress_schema function."""
def test_preserves_refs_by_default(self):
"""Test that compress_schema preserves $refs by default."""
schema = {
"properties": {
"foo": {"$ref": "#/$defs/foo_def"},
},
"$defs": {
"foo_def": {"type": "string"},
},
}
result = compress_schema(schema)
# $ref should be preserved (dereferencing is handled by middleware)
assert result["properties"]["foo"] == {"$ref": "#/$defs/foo_def"}
assert "$defs" in result
def test_prune_params(self):
"""Test pruning parameters with compress_schema."""
schema = {
"properties": {
"foo": {"type": "string"},
"bar": {"type": "integer"},
"baz": {"type": "boolean"},
},
"required": ["foo", "bar"],
}
result = compress_schema(schema, prune_params=["foo", "baz"])
assert result["properties"] == {"bar": {"type": "integer"}}
assert result["required"] == ["bar"]
def test_pruning_additional_properties(self):
"""Test pruning additionalProperties when explicitly enabled."""
schema = {
"type": "object",
"properties": {"foo": {"type": "string"}},
"additionalProperties": False,
}
# Must explicitly enable pruning now (default changed for MCP compatibility)
result = compress_schema(schema, prune_additional_properties=True)
assert "additionalProperties" not in result
def test_disable_pruning_additional_properties(self):
"""Test disabling pruning of additionalProperties."""
schema = {
"type": "object",
"properties": {"foo": {"type": "string"}},
"additionalProperties": False,
}
result = compress_schema(schema, prune_additional_properties=False)
assert "additionalProperties" in result
assert result["additionalProperties"] is False
def test_combined_operations(self):
"""Test all pruning operations together."""
schema = {
"type": "object",
"properties": {
"keep": {"type": "string"},
"remove": {"$ref": "#/$defs/remove_def"},
},
"required": ["keep", "remove"],
"additionalProperties": False,
"$defs": {
"remove_def": {"type": "string"},
"unused_def": {"type": "number"},
},
}
result = compress_schema(
schema, prune_params=["remove"], prune_additional_properties=True
)
# Check that parameter was removed
assert "remove" not in result["properties"]
# Check that required list was updated
assert result["required"] == ["keep"]
# All $defs entries are now unreferenced after pruning "remove", so they're cleaned up
assert "$defs" not in result
# Check that additionalProperties was removed
assert "additionalProperties" not in result
def test_prune_titles(self):
"""Test pruning title fields."""
schema = {
"title": "Root Schema",
"type": "object",
"properties": {
"foo": {"title": "Foo Property", "type": "string"},
"bar": {
"title": "Bar Property",
"type": "object",
"properties": {
"nested": {"title": "Nested Property", "type": "string"}
},
},
},
}
result = compress_schema(schema, prune_titles=True)
assert "title" not in result
assert "title" not in result["properties"]["foo"]
assert "title" not in result["properties"]["bar"]
assert "title" not in result["properties"]["bar"]["properties"]["nested"]
def test_prune_nested_additional_properties(self):
"""Test pruning additionalProperties: false at all levels when explicitly enabled."""
schema = {
"type": "object",
"additionalProperties": False,
"properties": {
"foo": {
"type": "object",
"additionalProperties": False,
"properties": {
"nested": {
"type": "object",
"additionalProperties": False,
}
},
},
},
}
result = compress_schema(schema, prune_additional_properties=True)
assert "additionalProperties" not in result
assert "additionalProperties" not in result["properties"]["foo"]
assert (
"additionalProperties"
not in result["properties"]["foo"]["properties"]["nested"]
)
def test_title_pruning_preserves_parameter_named_title(self):
"""Test that a parameter named 'title' is not removed during title pruning.
This is a critical edge case - we want to remove title metadata but preserve
actual parameters that happen to be named 'title'.
"""
from typing import Annotated
from pydantic import Field, TypeAdapter
def greet(
name: Annotated[str, Field(description="The name to greet")],
title: Annotated[str, Field(description="Optional title", default="")],
) -> str:
"""A greeting function."""
return f"Hello {title} {name}"
adapter = TypeAdapter(greet)
schema = adapter.json_schema()
# Compress with title pruning
compressed = compress_schema(schema, prune_titles=True)
# The 'title' parameter should be preserved
assert "title" in compressed["properties"]
assert compressed["properties"]["title"]["description"] == "Optional title"
assert compressed["properties"]["title"]["default"] == ""
# But title metadata should be removed
assert "title" not in compressed["properties"]["name"]
assert "title" not in compressed["properties"]["title"]
def test_title_pruning_with_nested_properties(self):
"""Test that nested property structures are handled correctly."""
schema = {
"type": "object",
"title": "OuterObject",
"properties": {
"title": { # This is a property named "title", not metadata
"type": "object",
"title": "TitleObject", # This is metadata
"properties": {
"subtitle": {
"type": "string",
"title": "SubTitle", # This is metadata
}
},
},
"normal_field": {
"type": "string",
"title": "NormalField", # This is metadata
},
},
}
compressed = compress_schema(schema, prune_titles=True)
# Root title should be removed
assert "title" not in compressed
# The property named "title" should be preserved
assert "title" in compressed["properties"]
# But its metadata title should be removed
assert "title" not in compressed["properties"]["title"]
# Nested metadata titles should be removed
assert (
"title" not in compressed["properties"]["title"]["properties"]["subtitle"]
)
assert "title" not in compressed["properties"]["normal_field"]
def test_mcp_client_compatibility_requires_additional_properties(self):
"""Test that compress_schema preserves additionalProperties: false for MCP clients.
MCP clients like Claude require strict JSON schemas with additionalProperties: false.
When tools use Pydantic models with extra="forbid", this constraint must be preserved.
Without this, MCP clients return:
"Invalid schema for function 'X': In context=('properties', 'Y'),
'additionalProperties' is required to be supplied and to be false"
See: https://github.com/PrefectHQ/fastmcp/issues/3008
"""
# Schema representing a Pydantic model with extra="forbid"
schema = {
"type": "object",
"properties": {
"graph_table": {
"type": "object",
"properties": {
"name": {"type": "string"},
"columns": {"type": "array", "items": {"type": "string"}},
},
"required": ["name"],
"additionalProperties": False,
}
},
"required": ["graph_table"],
"additionalProperties": False,
}
# By default, compress_schema should NOT strip additionalProperties: false
# This is the new expected behavior for MCP compatibility
result = compress_schema(schema)
# Root level should preserve additionalProperties: false
assert result.get("additionalProperties") is False, (
"Root additionalProperties: false was removed, breaking MCP compatibility"
)
# Nested object should also preserve additionalProperties: false
graph_table = result["properties"]["graph_table"]
assert graph_table.get("additionalProperties") is False, (
"Nested additionalProperties: false was removed, breaking MCP compatibility"
)
class TestCompressSchemaDereference:
"""Tests for the dereference parameter of compress_schema."""
SCHEMA_WITH_REFS = {
"properties": {
"foo": {"$ref": "#/$defs/foo_def"},
},
"$defs": {
"foo_def": {"type": "string"},
},
}
def test_dereference_true_inlines_refs(self):
result = compress_schema(self.SCHEMA_WITH_REFS, dereference=True)
assert result["properties"]["foo"] == {"type": "string"}
assert "$defs" not in result
def test_dereference_false_preserves_refs(self):
result = compress_schema(self.SCHEMA_WITH_REFS, dereference=False)
assert result["properties"]["foo"] == {"$ref": "#/$defs/foo_def"}
assert "$defs" in result
def test_other_optimizations_still_apply_without_dereference(self):
schema = {
"properties": {
"foo": {"$ref": "#/$defs/foo_def"},
"bar": {"type": "integer", "title": "Bar"},
},
"$defs": {
"foo_def": {"type": "string"},
},
}
result = compress_schema(
schema, dereference=False, prune_params=["bar"], prune_titles=True
)
assert "bar" not in result["properties"]
assert "$ref" in result["properties"]["foo"]
assert "$defs" in result
class TestResolveRootRef:
"""Tests for the resolve_root_ref function.
This function resolves $ref at root level to meet MCP spec requirements.
MCP specification requires outputSchema to have "type": "object" at root.
"""
def test_resolves_simple_root_ref(self):
"""Test that simple $ref at root is resolved."""
schema = {
"$defs": {
"Node": {
"type": "object",
"properties": {
"id": {"type": "string"},
"name": {"type": "string"},
},
"required": ["id"],
}
},
"$ref": "#/$defs/Node",
}
result = resolve_root_ref(schema)
# Should have type: object at root now
assert result.get("type") == "object"
assert "properties" in result
assert "id" in result["properties"]
assert "name" in result["properties"]
# Should still have $defs for nested references
assert "$defs" in result
# Should NOT have $ref at root
assert "$ref" not in result
def test_resolves_self_referential_model(self):
"""Test resolving schema for self-referential models like Issue."""
# This is the exact schema Pydantic generates for self-referential models
schema = {
"$defs": {
"Issue": {
"type": "object",
"properties": {
"id": {"type": "string"},
"title": {"type": "string"},
"dependencies": {
"type": "array",
"items": {"$ref": "#/$defs/Issue"},
},
"dependents": {
"type": "array",
"items": {"$ref": "#/$defs/Issue"},
},
},
"required": ["id", "title"],
}
},
"$ref": "#/$defs/Issue",
}
result = resolve_root_ref(schema)
# Should have type: object at root
assert result.get("type") == "object"
assert "properties" in result
assert "id" in result["properties"]
assert "dependencies" in result["properties"]
# Nested $refs should still point to $defs
assert result["properties"]["dependencies"]["items"]["$ref"] == "#/$defs/Issue"
# Should have $defs preserved for nested references
assert "$defs" in result
assert "Issue" in result["$defs"]
def test_does_not_modify_schema_with_type_at_root(self):
"""Test that schemas already having type at root are not modified."""
schema = {
"type": "object",
"properties": {"id": {"type": "string"}},
"$defs": {"SomeType": {"type": "string"}},
"$ref": "#/$defs/SomeType", # This would be unusual but possible
}
result = resolve_root_ref(schema)
# Schema should be unchanged (returned as-is)
assert result is schema
def test_does_not_modify_schema_without_ref(self):
"""Test that schemas without $ref are not modified."""
schema = {
"type": "object",
"properties": {"id": {"type": "string"}},
}
result = resolve_root_ref(schema)
assert result is schema
def test_does_not_modify_schema_without_defs(self):
"""Test that schemas with $ref but without $defs are not modified."""
schema = {
"$ref": "#/$defs/Missing",
}
result = resolve_root_ref(schema)
assert result is schema
def test_does_not_modify_external_ref(self):
"""Test that external $refs (not pointing to $defs) are not resolved."""
schema = {
"$defs": {"Node": {"type": "object"}},
"$ref": "https://example.com/schema.json#/definitions/Node",
}
result = resolve_root_ref(schema)
assert result is schema
def test_preserves_all_defs_for_nested_references(self):
"""Test that $defs are preserved even if multiple definitions exist."""
schema = {
"$defs": {
"Node": {
"type": "object",
"properties": {
"child": {"$ref": "#/$defs/ChildNode"},
},
},
"ChildNode": {
"type": "object",
"properties": {"value": {"type": "string"}},
},
},
"$ref": "#/$defs/Node",
}
result = resolve_root_ref(schema)
# Both defs should be preserved
assert "$defs" in result
assert "Node" in result["$defs"]
assert "ChildNode" in result["$defs"]
def test_handles_missing_def_gracefully(self):
"""Test that missing definition in $defs doesn't cause error."""
schema = {
"$defs": {"OtherType": {"type": "string"}},
"$ref": "#/$defs/Missing",
}
result = resolve_root_ref(schema)
# Should return original schema unchanged
assert result is schema
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/utilities/test_json_schema.py",
"license": "Apache License 2.0",
"lines": 552,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:tests/utilities/test_typeadapter.py | """
This test file adapts tests from test_func_metadata.py which tested a custom implementation
that has been replaced by pydantic TypeAdapters.
The tests ensure our TypeAdapter-based approach covers all the edge cases the old custom
implementation handled. Since we're now using standard pydantic functionality, these tests
may be redundant with pydantic's own tests and could potentially be removed in the future.
"""
from typing import Annotated
import annotated_types
import pytest
from pydantic import BaseModel, Field
from fastmcp.utilities.json_schema import compress_schema
from fastmcp.utilities.types import get_cached_typeadapter
# Models must be defined at the module level for forward references to work
class SomeInputModelA(BaseModel):
pass
class SomeInputModelB(BaseModel):
class InnerModel(BaseModel):
x: int
how_many_shrimp: Annotated[int, Field(description="How many shrimp in the tank???")]
ok: InnerModel
y: None
# Define additional models needed in tests
class SomeComplexModel(BaseModel):
x: int
y: dict[int, str]
class ClassWithMethods:
def do_something(self, x: int) -> int:
return x
def do_something_annotated(
self, x: Annotated[int, Field(description="A description")]
) -> int:
return x
def do_something_return_none(self) -> None:
return None
def complex_arguments_fn(
an_int: int,
must_be_none: None,
must_be_none_dumb_annotation: Annotated[None, "blah"],
list_of_ints: list[int],
# list[str] | str is an interesting case because if it comes in as JSON like
# "[\"a\", \"b\"]" then it will be naively parsed as a string.
list_str_or_str: list[str] | str,
an_int_annotated_with_field: Annotated[
int, Field(description="An int with a field")
],
an_int_annotated_with_field_and_others: Annotated[
int,
str, # Should be ignored, really
Field(description="An int with a field"),
annotated_types.Gt(1),
],
an_int_annotated_with_junk: Annotated[
int,
"123",
456,
],
field_with_default_via_field_annotation_before_nondefault_arg: Annotated[
int, Field(default=1)
],
unannotated,
my_model_a: SomeInputModelA,
my_model_a_forward_ref: "SomeInputModelA",
my_model_b: SomeInputModelB,
an_int_annotated_with_field_default: Annotated[
int,
Field(default=1, description="An int with a field"),
],
unannotated_with_default=5,
my_model_a_with_default: SomeInputModelA = SomeInputModelA(), # noqa: B008
an_int_with_default: int = 1,
must_be_none_with_default: None = None,
an_int_with_equals_field: int = Field(1, ge=0),
int_annotated_with_default: Annotated[int, Field(description="hey")] = 5,
) -> str:
_ = (
an_int,
must_be_none,
must_be_none_dumb_annotation,
list_of_ints,
list_str_or_str,
an_int_annotated_with_field,
an_int_annotated_with_field_and_others,
an_int_annotated_with_junk,
field_with_default_via_field_annotation_before_nondefault_arg,
unannotated,
an_int_annotated_with_field_default,
unannotated_with_default,
my_model_a,
my_model_a_forward_ref,
my_model_b,
my_model_a_with_default,
an_int_with_default,
must_be_none_with_default,
an_int_with_equals_field,
int_annotated_with_default,
)
return "ok!"
def get_simple_func_adapter():
"""Get a TypeAdapter for a simple function to avoid forward reference issues"""
def simple_func(x: int, y: str = "default") -> str:
return f"{x}-{y}"
return get_cached_typeadapter(simple_func)
async def test_complex_function_runtime_arg_validation_non_json():
"""Test that basic non-JSON arguments are validated correctly using a simpler function"""
type_adapter = get_simple_func_adapter()
# Test with minimum required arguments
args = {"x": 1}
result = type_adapter.validate_python(args)
assert (
result == "1-default"
) # Don't call result() as TypeAdapter returns the value directly
# Test with all arguments
args = {"x": 1, "y": "hello"}
result = type_adapter.validate_python(args)
assert result == "1-hello"
# Test with invalid types
with pytest.raises(Exception):
type_adapter.validate_python({"x": "not an int"})
def test_missing_annotation():
"""Test that missing annotations don't cause errors"""
def func_no_annotations(x, y):
return x + y
type_adapter = get_cached_typeadapter(func_no_annotations)
result = type_adapter.validate_python({"x": "1", "y": "2"})
assert result == "12" # String concatenation since no type info
def test_convert_str_to_complex_type():
"""Test that string arguments are converted to the complex type when valid"""
def func_with_str_types(string: SomeComplexModel):
return string
# Create a valid model instance
input_data = {"x": 1, "y": {1: "hello"}}
# Validate with model directly
SomeComplexModel.model_validate(input_data)
# Now check if type adapter validates correctly
type_adapter = get_cached_typeadapter(func_with_str_types)
result = type_adapter.validate_python({"string": input_data})
assert isinstance(result, SomeComplexModel)
assert result.x == 1
assert result.y == {1: "hello"}
def test_skip_names():
"""Test that skipped parameters are not included in the schema"""
def func_with_many_params(
keep_this: int, skip_this: str, also_keep: float, also_skip: bool
):
return keep_this, skip_this, also_keep, also_skip
# Get schema and prune parameters
type_adapter = get_cached_typeadapter(func_with_many_params)
schema = type_adapter.json_schema()
pruned_schema = compress_schema(schema, prune_params=["skip_this", "also_skip"])
# Check that only the desired parameters remain
assert "keep_this" in pruned_schema["properties"]
assert "also_keep" in pruned_schema["properties"]
assert "skip_this" not in pruned_schema["properties"]
assert "also_skip" not in pruned_schema["properties"]
# The pruned parameters should also be removed from required
if "required" in pruned_schema:
assert "skip_this" not in pruned_schema["required"]
assert "also_skip" not in pruned_schema["required"]
async def test_lambda_function():
"""Test lambda function schema and validation"""
fn = lambda x, y=5: str(x) # noqa: E731
type_adapter = get_cached_typeadapter(fn)
# Basic calls - validate_python returns the result directly
result = type_adapter.validate_python({"x": "hello"})
assert result == "hello"
result = type_adapter.validate_python({"x": "hello", "y": "world"})
assert result == "hello"
# Missing required arg
with pytest.raises(Exception):
type_adapter.validate_python({"y": "world"})
def test_basic_json_schema():
"""Test JSON schema generation for a simple function"""
def simple_func(a: int, b: str = "default") -> str:
return f"{a}-{b}"
type_adapter = get_cached_typeadapter(simple_func)
schema = type_adapter.json_schema()
# Check basic properties
assert "properties" in schema
assert "a" in schema["properties"]
assert "b" in schema["properties"]
assert schema["properties"]["a"]["type"] == "integer"
assert schema["properties"]["b"]["type"] == "string"
assert "default" in schema["properties"]["b"]
assert schema["properties"]["b"]["default"] == "default"
# Check required
assert "required" in schema
assert "a" in schema["required"]
assert "b" not in schema["required"]
def test_str_vs_int():
"""
Test that string values are kept as strings even when they contain numbers,
while numbers are parsed correctly.
"""
def func_with_str_and_int(a: str, b: int):
return a
type_adapter = get_cached_typeadapter(func_with_str_and_int)
result = type_adapter.validate_python({"a": "123", "b": 123})
assert result == "123"
def test_class_with_methods():
"""Test that class methods are not included in the schema"""
class_with_methods = ClassWithMethods()
type_adapter = get_cached_typeadapter(class_with_methods.do_something)
schema = type_adapter.json_schema()
assert "self" not in schema["properties"]
type_adapter = get_cached_typeadapter(class_with_methods.do_something_annotated)
schema = type_adapter.json_schema()
assert "self" not in schema["properties"]
type_adapter = get_cached_typeadapter(class_with_methods.do_something_return_none)
schema = type_adapter.json_schema()
assert "self" not in schema["properties"]
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/utilities/test_typeadapter.py",
"license": "Apache License 2.0",
"lines": 211,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:tests/server/test_auth_integration.py | import base64
import hashlib
import secrets
import time
import unittest.mock
from urllib.parse import parse_qs, urlparse
import httpx
import pytest
from mcp.server.auth.provider import (
AccessToken,
AuthorizationCode,
AuthorizationParams,
OAuthAuthorizationServerProvider,
RefreshToken,
construct_redirect_uri,
)
from mcp.server.auth.routes import (
create_auth_routes,
)
from mcp.server.auth.settings import (
ClientRegistrationOptions,
RevocationOptions,
)
from mcp.shared.auth import (
OAuthClientInformationFull,
OAuthToken,
)
from pydantic import AnyHttpUrl
from starlette.applications import Starlette
# Mock OAuth provider for testing
class MockOAuthProvider(OAuthAuthorizationServerProvider):
def __init__(self):
self.clients = {}
self.auth_codes = {} # code -> {client_id, code_challenge, redirect_uri}
self.tokens = {} # token -> {client_id, scopes, expires_at}
self.refresh_tokens = {} # refresh_token -> access_token
async def get_client(self, client_id: str) -> OAuthClientInformationFull | None:
return self.clients.get(client_id)
async def register_client(self, client_info: OAuthClientInformationFull):
self.clients[client_info.client_id] = client_info
async def authorize(
self, client: OAuthClientInformationFull, params: AuthorizationParams
) -> str:
# toy authorize implementation which just immediately generates an authorization
# code and completes the redirect
if client.client_id is None:
raise ValueError("client_id is required")
code = AuthorizationCode(
code=f"code_{int(time.time())}",
client_id=client.client_id,
code_challenge=params.code_challenge,
redirect_uri=params.redirect_uri,
redirect_uri_provided_explicitly=params.redirect_uri_provided_explicitly,
expires_at=time.time() + 300,
scopes=params.scopes or ["read", "write"],
)
self.auth_codes[code.code] = code
return construct_redirect_uri(
str(params.redirect_uri), code=code.code, state=params.state
)
async def load_authorization_code(
self, client: OAuthClientInformationFull, authorization_code: str
) -> AuthorizationCode | None:
return self.auth_codes.get(authorization_code)
async def exchange_authorization_code(
self, client: OAuthClientInformationFull, authorization_code: AuthorizationCode
) -> OAuthToken:
assert authorization_code.code in self.auth_codes
# Generate an access token and refresh token
access_token = f"access_{secrets.token_hex(32)}"
refresh_token = f"refresh_{secrets.token_hex(32)}"
# Store the tokens
if client.client_id is None:
raise ValueError("client_id is required")
self.tokens[access_token] = AccessToken(
token=access_token,
client_id=client.client_id,
scopes=authorization_code.scopes,
expires_at=int(time.time()) + 3600,
)
self.refresh_tokens[refresh_token] = access_token
# Remove the used code
del self.auth_codes[authorization_code.code]
return OAuthToken(
access_token=access_token,
token_type="Bearer",
expires_in=3600,
scope="read write",
refresh_token=refresh_token,
)
async def load_refresh_token(
self, client: OAuthClientInformationFull, refresh_token: str
) -> RefreshToken | None:
old_access_token = self.refresh_tokens.get(refresh_token)
if old_access_token is None:
return None
token_info = self.tokens.get(old_access_token)
if token_info is None:
return None
# Create a RefreshToken object that matches what is expected in later code
refresh_obj = RefreshToken(
token=refresh_token,
client_id=token_info.client_id,
scopes=token_info.scopes,
expires_at=token_info.expires_at,
)
return refresh_obj
async def exchange_refresh_token(
self,
client: OAuthClientInformationFull,
refresh_token: RefreshToken,
scopes: list[str],
) -> OAuthToken:
# Check if refresh token exists
assert refresh_token.token in self.refresh_tokens
old_access_token = self.refresh_tokens[refresh_token.token]
# Check if the access token exists
assert old_access_token in self.tokens
# Check if the token was issued to this client
token_info = self.tokens[old_access_token]
assert token_info.client_id == client.client_id
# Generate a new access token and refresh token
new_access_token = f"access_{secrets.token_hex(32)}"
new_refresh_token = f"refresh_{secrets.token_hex(32)}"
# Store the new tokens
if client.client_id is None:
raise ValueError("client_id is required")
self.tokens[new_access_token] = AccessToken(
token=new_access_token,
client_id=client.client_id,
scopes=scopes or token_info.scopes,
expires_at=int(time.time()) + 3600,
)
self.refresh_tokens[new_refresh_token] = new_access_token
# Remove the old tokens
del self.refresh_tokens[refresh_token.token]
del self.tokens[old_access_token]
return OAuthToken(
access_token=new_access_token,
token_type="Bearer",
expires_in=3600,
scope=" ".join(scopes) if scopes else " ".join(token_info.scopes),
refresh_token=new_refresh_token,
)
async def load_access_token(self, token: str) -> AccessToken | None:
token_info = self.tokens.get(token)
# Check if token is expired
# if token_info.expires_at < int(time.time()):
# raise InvalidTokenError("Access token has expired")
return token_info and AccessToken(
token=token,
client_id=token_info.client_id,
scopes=token_info.scopes,
expires_at=token_info.expires_at,
)
async def revoke_token(self, token: AccessToken | RefreshToken) -> None:
match token:
case RefreshToken():
# Remove the refresh token
del self.refresh_tokens[token.token]
case AccessToken():
# Remove the access token
del self.tokens[token.token]
# Also remove any refresh tokens that point to this access token
for refresh_token, access_token in list(self.refresh_tokens.items()):
if access_token == token.token:
del self.refresh_tokens[refresh_token]
@pytest.fixture
def mock_oauth_provider():
return MockOAuthProvider()
@pytest.fixture
def auth_app(mock_oauth_provider):
# Create auth router
auth_routes = create_auth_routes(
mock_oauth_provider,
AnyHttpUrl("https://auth.example.com"),
AnyHttpUrl("https://docs.example.com"),
client_registration_options=ClientRegistrationOptions(
enabled=True,
valid_scopes=["read", "write", "profile"],
default_scopes=["read", "write"],
),
revocation_options=RevocationOptions(enabled=True),
)
# Create Starlette app
app = Starlette(routes=auth_routes)
return app
@pytest.fixture
async def test_client(auth_app):
async with httpx.AsyncClient(
transport=httpx.ASGITransport(app=auth_app), base_url="https://mcptest.com"
) as client:
yield client
@pytest.fixture
async def registered_client(test_client: httpx.AsyncClient, request):
"""Create and register a test client.
Parameters can be customized via indirect parameterization:
@pytest.mark.parametrize("registered_client",
[{"grant_types": ["authorization_code"]}],
indirect=True)
"""
# Default client metadata
client_metadata = {
"redirect_uris": ["https://client.example.com/callback"],
"client_name": "Test Client",
"grant_types": ["authorization_code", "refresh_token"],
}
# Override with any parameters from the test
if hasattr(request, "param") and request.param:
client_metadata.update(request.param)
response = await test_client.post("/register", json=client_metadata)
assert response.status_code == 201, f"Failed to register client: {response.content}"
client_info = response.json()
return client_info
@pytest.fixture
def pkce_challenge():
"""Create a PKCE challenge with code_verifier and code_challenge."""
code_verifier = "some_random_verifier_string"
code_challenge = (
base64.urlsafe_b64encode(hashlib.sha256(code_verifier.encode()).digest())
.decode()
.rstrip("=")
)
return {"code_verifier": code_verifier, "code_challenge": code_challenge}
@pytest.fixture
async def auth_code(test_client, registered_client, pkce_challenge, request):
"""Get an authorization code.
Parameters can be customized via indirect parameterization:
@pytest.mark.parametrize("auth_code",
[{"redirect_uri": "https://client.example.com/other-callback"}],
indirect=True)
"""
# Default authorize params
auth_params = {
"response_type": "code",
"client_id": registered_client["client_id"],
"redirect_uri": "https://client.example.com/callback",
"code_challenge": pkce_challenge["code_challenge"],
"code_challenge_method": "S256",
"state": "test_state",
}
# Override with any parameters from the test
if hasattr(request, "param") and request.param:
auth_params.update(request.param)
response = await test_client.get("/authorize", params=auth_params)
assert response.status_code == 302, f"Failed to get auth code: {response.content}"
# Extract the authorization code
redirect_url = response.headers["location"]
parsed_url = urlparse(redirect_url)
query_params = parse_qs(parsed_url.query)
assert "code" in query_params, f"No code in response: {query_params}"
auth_code = query_params["code"][0]
return {
"code": auth_code,
"redirect_uri": auth_params["redirect_uri"],
"state": query_params.get("state", [None])[0],
}
@pytest.fixture
async def tokens(test_client, registered_client, auth_code, pkce_challenge, request):
"""Exchange authorization code for tokens.
Parameters can be customized via indirect parameterization:
@pytest.mark.parametrize("tokens",
[{"code_verifier": "wrong_verifier"}],
indirect=True)
"""
# Default token request params
token_params = {
"grant_type": "authorization_code",
"client_id": registered_client["client_id"],
"client_secret": registered_client["client_secret"],
"code": auth_code["code"],
"code_verifier": pkce_challenge["code_verifier"],
"redirect_uri": auth_code["redirect_uri"],
}
# Override with any parameters from the test
if hasattr(request, "param") and request.param:
token_params.update(request.param)
response = await test_client.post("/token", data=token_params)
# Don't assert success here since some tests will intentionally cause errors
return {
"response": response,
"params": token_params,
}
class TestAuthEndpoints:
async def test_metadata_endpoint(self, test_client: httpx.AsyncClient):
"""Test the OAuth 2.1 metadata endpoint."""
print("Sending request to metadata endpoint")
response = await test_client.get("/.well-known/oauth-authorization-server")
print(f"Got response: {response.status_code}")
if response.status_code != 200:
print(f"Response content: {response.content}")
assert response.status_code == 200
metadata = response.json()
assert metadata["issuer"] == "https://auth.example.com/"
assert (
metadata["authorization_endpoint"] == "https://auth.example.com/authorize"
)
assert metadata["token_endpoint"] == "https://auth.example.com/token"
assert metadata["registration_endpoint"] == "https://auth.example.com/register"
assert metadata["revocation_endpoint"] == "https://auth.example.com/revoke"
assert metadata["response_types_supported"] == ["code"]
assert metadata["code_challenge_methods_supported"] == ["S256"]
assert set(metadata["token_endpoint_auth_methods_supported"]) == {
"client_secret_post",
"client_secret_basic",
}
assert metadata["grant_types_supported"] == [
"authorization_code",
"refresh_token",
]
assert metadata["service_documentation"] == "https://docs.example.com/"
async def test_token_validation_error(self, test_client: httpx.AsyncClient):
"""Test token endpoint error - missing client_id returns auth error."""
# Missing required fields - SDK validates client_id first
response = await test_client.post(
"/token",
data={
"grant_type": "authorization_code",
# Missing code, code_verifier, client_id, etc.
},
)
error_response = response.json()
# SDK validates client_id before other fields, returning unauthorized_client
# (FastMCP's OAuthProxy transforms this to invalid_client, but this test
# uses the SDK's create_auth_routes directly)
assert error_response["error"] == "unauthorized_client"
assert "error_description" in error_response
async def test_token_invalid_auth_code(
self, test_client, registered_client, pkce_challenge
):
"""Test token endpoint error - authorization code does not exist."""
# Try to use a non-existent authorization code
response = await test_client.post(
"/token",
data={
"grant_type": "authorization_code",
"client_id": registered_client["client_id"],
"client_secret": registered_client["client_secret"],
"code": "non_existent_auth_code",
"code_verifier": pkce_challenge["code_verifier"],
"redirect_uri": "https://client.example.com/callback",
},
)
print(f"Status code: {response.status_code}")
print(f"Response body: {response.content}")
print(f"Response JSON: {response.json()}")
assert response.status_code == 400
error_response = response.json()
assert error_response["error"] == "invalid_grant"
assert (
"authorization code does not exist" in error_response["error_description"]
)
async def test_token_expired_auth_code(
self,
test_client,
registered_client,
auth_code,
pkce_challenge,
mock_oauth_provider,
):
"""Test token endpoint error - authorization code has expired."""
# Get the current time for our time mocking
current_time = time.time()
# Find the auth code object
code_value = auth_code["code"]
found_code = None
for code_obj in mock_oauth_provider.auth_codes.values():
if code_obj.code == code_value:
found_code = code_obj
break
assert found_code is not None
# Authorization codes are typically short-lived (5 minutes = 300 seconds)
# So we'll mock time to be 10 minutes (600 seconds) in the future
with unittest.mock.patch("time.time", return_value=current_time + 600):
# Try to use the expired authorization code
response = await test_client.post(
"/token",
data={
"grant_type": "authorization_code",
"client_id": registered_client["client_id"],
"client_secret": registered_client["client_secret"],
"code": code_value,
"code_verifier": pkce_challenge["code_verifier"],
"redirect_uri": auth_code["redirect_uri"],
},
)
assert response.status_code == 400
error_response = response.json()
assert error_response["error"] == "invalid_grant"
assert (
"authorization code has expired" in error_response["error_description"]
)
@pytest.mark.parametrize(
"registered_client",
[
{
"redirect_uris": [
"https://client.example.com/callback",
"https://client.example.com/other-callback",
]
}
],
indirect=True,
)
async def test_token_redirect_uri_mismatch(
self, test_client, registered_client, auth_code, pkce_challenge
):
"""Test token endpoint error - redirect URI mismatch."""
# Try to use the code with a different redirect URI
response = await test_client.post(
"/token",
data={
"grant_type": "authorization_code",
"client_id": registered_client["client_id"],
"client_secret": registered_client["client_secret"],
"code": auth_code["code"],
"code_verifier": pkce_challenge["code_verifier"],
# Different from the one used in /authorize
"redirect_uri": "https://client.example.com/other-callback",
},
)
assert response.status_code == 400
error_response = response.json()
assert error_response["error"] == "invalid_request"
assert "redirect_uri did not match" in error_response["error_description"]
async def test_token_code_verifier_mismatch(
self, test_client, registered_client, auth_code
):
"""Test token endpoint error - PKCE code verifier mismatch."""
# Try to use the code with an incorrect code verifier
response = await test_client.post(
"/token",
data={
"grant_type": "authorization_code",
"client_id": registered_client["client_id"],
"client_secret": registered_client["client_secret"],
"code": auth_code["code"],
# Different from the one used to create challenge
"code_verifier": "incorrect_code_verifier",
"redirect_uri": auth_code["redirect_uri"],
},
)
assert response.status_code == 400
error_response = response.json()
assert error_response["error"] == "invalid_grant"
assert "incorrect code_verifier" in error_response["error_description"]
async def test_token_invalid_refresh_token(self, test_client, registered_client):
"""Test token endpoint error - refresh token does not exist."""
# Try to use a non-existent refresh token
response = await test_client.post(
"/token",
data={
"grant_type": "refresh_token",
"client_id": registered_client["client_id"],
"client_secret": registered_client["client_secret"],
"refresh_token": "non_existent_refresh_token",
},
)
assert response.status_code == 400
error_response = response.json()
assert error_response["error"] == "invalid_grant"
assert "refresh token does not exist" in error_response["error_description"]
async def test_token_expired_refresh_token(
self,
test_client,
registered_client,
auth_code,
pkce_challenge,
mock_oauth_provider,
):
"""Test token endpoint error - refresh token has expired."""
# Step 1: First, let's create a token and refresh token at the current time
current_time = time.time()
# Exchange authorization code for tokens normally
token_response = await test_client.post(
"/token",
data={
"grant_type": "authorization_code",
"client_id": registered_client["client_id"],
"client_secret": registered_client["client_secret"],
"code": auth_code["code"],
"code_verifier": pkce_challenge["code_verifier"],
"redirect_uri": auth_code["redirect_uri"],
},
)
assert token_response.status_code == 200
tokens = token_response.json()
refresh_token = tokens["refresh_token"]
# Step 2: Time travel forward 4 hours (tokens expire in 1 hour by default)
# Mock the time.time() function to return a value 4 hours in the future
with unittest.mock.patch(
"time.time", return_value=current_time + 14400
): # 4 hours = 14400 seconds
# Try to use the refresh token which should now be considered expired
response = await test_client.post(
"/token",
data={
"grant_type": "refresh_token",
"client_id": registered_client["client_id"],
"client_secret": registered_client["client_secret"],
"refresh_token": refresh_token,
},
)
# In the "future", the token should be considered expired
assert response.status_code == 400
error_response = response.json()
assert error_response["error"] == "invalid_grant"
assert "refresh token has expired" in error_response["error_description"]
async def test_token_invalid_scope(
self, test_client, registered_client, auth_code, pkce_challenge
):
"""Test token endpoint error - invalid scope in refresh token request."""
# Exchange authorization code for tokens
token_response = await test_client.post(
"/token",
data={
"grant_type": "authorization_code",
"client_id": registered_client["client_id"],
"client_secret": registered_client["client_secret"],
"code": auth_code["code"],
"code_verifier": pkce_challenge["code_verifier"],
"redirect_uri": auth_code["redirect_uri"],
},
)
assert token_response.status_code == 200
tokens = token_response.json()
refresh_token = tokens["refresh_token"]
# Try to use refresh token with an invalid scope
response = await test_client.post(
"/token",
data={
"grant_type": "refresh_token",
"client_id": registered_client["client_id"],
"client_secret": registered_client["client_secret"],
"refresh_token": refresh_token,
"scope": "read write invalid_scope", # Adding an invalid scope
},
)
assert response.status_code == 400
error_response = response.json()
assert error_response["error"] == "invalid_scope"
assert "cannot request scope" in error_response["error_description"]
async def test_client_registration(
self, test_client: httpx.AsyncClient, mock_oauth_provider: MockOAuthProvider
):
"""Test client registration."""
client_metadata = {
"redirect_uris": ["https://client.example.com/callback"],
"client_name": "Test Client",
"client_uri": "https://client.example.com",
}
response = await test_client.post(
"/register",
json=client_metadata,
)
assert response.status_code == 201, response.content
client_info = response.json()
assert "client_id" in client_info
assert "client_secret" in client_info
assert client_info["client_name"] == "Test Client"
assert client_info["redirect_uris"] == ["https://client.example.com/callback"]
# Verify that the client was registered
# assert await mock_oauth_provider.clients_store.get_client(
# client_info["client_id"]
# ) is not None
async def test_client_registration_missing_required_fields(
self, test_client: httpx.AsyncClient
):
"""Test client registration with missing required fields."""
# Missing redirect_uris which is a required field
client_metadata = {
"client_name": "Test Client",
"client_uri": "https://client.example.com",
}
response = await test_client.post(
"/register",
json=client_metadata,
)
assert response.status_code == 400
error_data = response.json()
assert "error" in error_data
assert error_data["error"] == "invalid_client_metadata"
assert error_data["error_description"] == "redirect_uris: Field required"
async def test_client_registration_invalid_uri(
self, test_client: httpx.AsyncClient
):
"""Test client registration with invalid URIs."""
# Invalid redirect_uri format
client_metadata = {
"redirect_uris": ["not-a-valid-uri"],
"client_name": "Test Client",
}
response = await test_client.post(
"/register",
json=client_metadata,
)
assert response.status_code == 400
error_data = response.json()
assert "error" in error_data
assert error_data["error"] == "invalid_client_metadata"
assert error_data["error_description"] == (
"redirect_uris.0: Input should be a valid URL, relative URL without a base"
)
async def test_client_registration_empty_redirect_uris(
self, test_client: httpx.AsyncClient
):
"""Test client registration with empty redirect_uris array."""
client_metadata = {
"redirect_uris": [], # Empty array
"client_name": "Test Client",
}
response = await test_client.post(
"/register",
json=client_metadata,
)
assert response.status_code == 400
error_data = response.json()
assert "error" in error_data
assert error_data["error"] == "invalid_client_metadata"
assert (
error_data["error_description"]
== "redirect_uris: List should have at least 1 item after validation, not 0"
)
async def test_authorize_form_post(
self,
test_client: httpx.AsyncClient,
mock_oauth_provider: MockOAuthProvider,
pkce_challenge,
):
"""Test the authorization endpoint using POST with form-encoded data."""
# Register a client
client_metadata = {
"redirect_uris": ["https://client.example.com/callback"],
"client_name": "Test Client",
"grant_types": ["authorization_code", "refresh_token"],
}
response = await test_client.post(
"/register",
json=client_metadata,
)
assert response.status_code == 201
client_info = response.json()
# Use POST with form-encoded data for authorization
response = await test_client.post(
"/authorize",
data={
"response_type": "code",
"client_id": client_info["client_id"],
"redirect_uri": "https://client.example.com/callback",
"code_challenge": pkce_challenge["code_challenge"],
"code_challenge_method": "S256",
"state": "test_form_state",
},
)
assert response.status_code == 302
# Extract the authorization code from the redirect URL
redirect_url = response.headers["location"]
parsed_url = urlparse(redirect_url)
query_params = parse_qs(parsed_url.query)
assert "code" in query_params
assert query_params["state"][0] == "test_form_state"
async def test_authorization_get(
self,
test_client: httpx.AsyncClient,
mock_oauth_provider: MockOAuthProvider,
pkce_challenge,
):
"""Test the full authorization flow."""
# 1. Register a client
client_metadata = {
"redirect_uris": ["https://client.example.com/callback"],
"client_name": "Test Client",
"grant_types": ["authorization_code", "refresh_token"],
}
response = await test_client.post(
"/register",
json=client_metadata,
)
assert response.status_code == 201
client_info = response.json()
# 2. Request authorization using GET with query params
response = await test_client.get(
"/authorize",
params={
"response_type": "code",
"client_id": client_info["client_id"],
"redirect_uri": "https://client.example.com/callback",
"code_challenge": pkce_challenge["code_challenge"],
"code_challenge_method": "S256",
"state": "test_state",
},
)
assert response.status_code == 302
# 3. Extract the authorization code from the redirect URL
redirect_url = response.headers["location"]
parsed_url = urlparse(redirect_url)
query_params = parse_qs(parsed_url.query)
assert "code" in query_params
assert query_params["state"][0] == "test_state"
auth_code = query_params["code"][0]
# 4. Exchange the authorization code for tokens
response = await test_client.post(
"/token",
data={
"grant_type": "authorization_code",
"client_id": client_info["client_id"],
"client_secret": client_info["client_secret"],
"code": auth_code,
"code_verifier": pkce_challenge["code_verifier"],
"redirect_uri": "https://client.example.com/callback",
},
)
assert response.status_code == 200
token_response = response.json()
assert "access_token" in token_response
assert "token_type" in token_response
assert "refresh_token" in token_response
assert "expires_in" in token_response
assert token_response["token_type"] == "Bearer"
# 5. Verify the access token
access_token = token_response["access_token"]
refresh_token = token_response["refresh_token"]
# Create a test client with the token
auth_info = await mock_oauth_provider.load_access_token(access_token)
assert auth_info
assert auth_info.client_id == client_info["client_id"]
assert "read" in auth_info.scopes
assert "write" in auth_info.scopes
# 6. Refresh the token
response = await test_client.post(
"/token",
data={
"grant_type": "refresh_token",
"client_id": client_info["client_id"],
"client_secret": client_info["client_secret"],
"refresh_token": refresh_token,
"redirect_uri": "https://client.example.com/callback",
},
)
assert response.status_code == 200
new_token_response = response.json()
assert "access_token" in new_token_response
assert "refresh_token" in new_token_response
assert new_token_response["access_token"] != access_token
assert new_token_response["refresh_token"] != refresh_token
# 7. Revoke the token
response = await test_client.post(
"/revoke",
data={
"client_id": client_info["client_id"],
"client_secret": client_info["client_secret"],
"token": new_token_response["access_token"],
},
)
assert response.status_code == 200
# Verify that the token was revoked
assert (
await mock_oauth_provider.load_access_token(
new_token_response["access_token"]
)
is None
)
async def test_revoke_invalid_token(self, test_client, registered_client):
"""Test revoking an invalid token."""
response = await test_client.post(
"/revoke",
data={
"client_id": registered_client["client_id"],
"client_secret": registered_client["client_secret"],
"token": "invalid_token",
},
)
# per RFC, this should return 200 even if the token is invalid
assert response.status_code == 200
async def test_revoke_with_malformed_token(self, test_client, registered_client):
response = await test_client.post(
"/revoke",
data={
"client_id": registered_client["client_id"],
"client_secret": registered_client["client_secret"],
"token": 123,
"token_type_hint": "asdf",
},
)
assert response.status_code == 400
error_response = response.json()
assert error_response["error"] == "invalid_request"
assert "token_type_hint" in error_response["error_description"]
async def test_client_registration_disallowed_scopes(
self, test_client: httpx.AsyncClient
):
"""Test client registration with scopes that are not allowed."""
client_metadata = {
"redirect_uris": ["https://client.example.com/callback"],
"client_name": "Test Client",
"scope": "read write profile admin", # 'admin' is not in valid_scopes
}
response = await test_client.post(
"/register",
json=client_metadata,
)
assert response.status_code == 400
error_data = response.json()
assert "error" in error_data
assert error_data["error"] == "invalid_client_metadata"
assert "scope" in error_data["error_description"]
assert "admin" in error_data["error_description"]
async def test_client_registration_default_scopes(
self, test_client: httpx.AsyncClient, mock_oauth_provider: MockOAuthProvider
):
client_metadata = {
"redirect_uris": ["https://client.example.com/callback"],
"client_name": "Test Client",
# No scope specified
}
response = await test_client.post(
"/register",
json=client_metadata,
)
assert response.status_code == 201
client_info = response.json()
# Verify client was registered successfully
assert client_info["scope"] == "read write"
# Retrieve the client from the store to verify default scopes
registered_client = await mock_oauth_provider.get_client(
client_info["client_id"]
)
assert registered_client is not None
# Check that default scopes were applied
assert registered_client.scope == "read write"
async def test_client_registration_invalid_grant_type(
self, test_client: httpx.AsyncClient
):
client_metadata = {
"redirect_uris": ["https://client.example.com/callback"],
"client_name": "Test Client",
"grant_types": ["authorization_code"],
}
response = await test_client.post(
"/register",
json=client_metadata,
)
assert response.status_code == 400
error_data = response.json()
assert "error" in error_data
assert error_data["error"] == "invalid_client_metadata"
assert (
error_data["error_description"]
== "grant_types must be authorization_code and refresh_token"
)
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/server/test_auth_integration.py",
"license": "Apache License 2.0",
"lines": 850,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:tests/server/test_tool_annotations.py | from typing import Any
import mcp.types as mcp_types
from mcp.types import Tool as MCPTool
from mcp.types import ToolAnnotations, ToolExecution
from fastmcp import Client, FastMCP
from fastmcp.tools.tool import Tool
async def test_tool_annotations_in_tool_manager():
"""Test that tool annotations are correctly stored in the tool manager."""
mcp = FastMCP("Test Server")
@mcp.tool(
annotations=ToolAnnotations(
title="Echo Tool",
readOnlyHint=True,
openWorldHint=False,
)
)
def echo(message: str) -> str:
"""Echo back the message provided."""
return message
# Check internal tool objects directly
tools = await mcp.list_tools()
assert len(tools) == 1
assert tools[0].annotations is not None
assert tools[0].annotations.title == "Echo Tool"
assert tools[0].annotations.readOnlyHint is True
assert tools[0].annotations.openWorldHint is False
async def test_tool_annotations_in_mcp_protocol():
"""Test that tool annotations are correctly propagated to MCP tools list."""
mcp = FastMCP("Test Server")
@mcp.tool(
annotations=ToolAnnotations(
title="Echo Tool",
readOnlyHint=True,
openWorldHint=False,
)
)
def echo(message: str) -> str:
"""Echo back the message provided."""
return message
# Check via MCP protocol
result = await mcp._list_tools_mcp(mcp_types.ListToolsRequest())
assert len(result.tools) == 1
assert result.tools[0].annotations is not None
assert result.tools[0].annotations.title == "Echo Tool"
assert result.tools[0].annotations.readOnlyHint is True
assert result.tools[0].annotations.openWorldHint is False
async def test_tool_annotations_in_client_api():
"""Test that tool annotations are correctly accessible via client API."""
mcp = FastMCP("Test Server")
@mcp.tool(
annotations=ToolAnnotations(
title="Echo Tool",
readOnlyHint=True,
openWorldHint=False,
)
)
def echo(message: str) -> str:
"""Echo back the message provided."""
return message
# Check via client API
async with Client(mcp) as client:
tools_result = await client.list_tools()
assert len(tools_result) == 1
assert tools_result[0].name == "echo"
assert tools_result[0].annotations is not None
assert tools_result[0].annotations.title == "Echo Tool"
assert tools_result[0].annotations.readOnlyHint is True
assert tools_result[0].annotations.openWorldHint is False
async def test_provide_tool_annotations_as_dict_to_decorator():
"""Test that tool annotations are correctly accessible via client API."""
mcp = FastMCP("Test Server")
@mcp.tool(
annotations={
"title": "Echo Tool",
"readOnlyHint": True,
"openWorldHint": False,
}
)
def echo(message: str) -> str:
"""Echo back the message provided."""
return message
# Check via client API
async with Client(mcp) as client:
tools_result = await client.list_tools()
assert len(tools_result) == 1
assert tools_result[0].name == "echo"
assert tools_result[0].annotations is not None
assert tools_result[0].annotations.title == "Echo Tool"
assert tools_result[0].annotations.readOnlyHint is True
assert tools_result[0].annotations.openWorldHint is False
async def test_direct_tool_annotations_in_tool_manager():
"""Test direct ToolAnnotations object is correctly stored in tool manager."""
mcp = FastMCP("Test Server")
annotations = ToolAnnotations(
title="Direct Tool",
readOnlyHint=False,
destructiveHint=True,
idempotentHint=False,
openWorldHint=True,
)
@mcp.tool(annotations=annotations)
def modify(data: dict[str, Any]) -> dict[str, Any]:
"""Modify the data provided."""
return {"modified": True, **data}
# Check internal tool objects directly
tools = await mcp.list_tools()
assert len(tools) == 1
assert tools[0].annotations is not None
assert tools[0].annotations.title == "Direct Tool"
assert tools[0].annotations.readOnlyHint is False
assert tools[0].annotations.destructiveHint is True
assert tools[0].annotations.idempotentHint is False
assert tools[0].annotations.openWorldHint is True
async def test_direct_tool_annotations_in_client_api():
"""Test direct ToolAnnotations object is correctly accessible via client API."""
mcp = FastMCP("Test Server")
annotations = ToolAnnotations(
title="Direct Tool",
readOnlyHint=False,
destructiveHint=True,
idempotentHint=False,
openWorldHint=True,
)
@mcp.tool(annotations=annotations)
def modify(data: dict[str, Any]) -> dict[str, Any]:
"""Modify the data provided."""
return {"modified": True, **data}
# Check via client API
async with Client(mcp) as client:
tools_result = await client.list_tools()
assert len(tools_result) == 1
assert tools_result[0].name == "modify"
assert tools_result[0].annotations is not None
assert tools_result[0].annotations.title == "Direct Tool"
assert tools_result[0].annotations.readOnlyHint is False
assert tools_result[0].annotations.destructiveHint is True
async def test_add_tool_method_annotations():
"""Test that tool annotations work with add_tool method."""
mcp = FastMCP("Test Server")
def create_item(name: str, value: int) -> dict[str, Any]:
"""Create a new item."""
return {"name": name, "value": value}
tool = Tool.from_function(
create_item,
name="create_item",
annotations=ToolAnnotations(
title="Create Item",
readOnlyHint=False,
destructiveHint=False,
),
)
mcp.add_tool(tool)
# Check internal tool objects directly
tools = await mcp.list_tools()
assert len(tools) == 1
assert tools[0].annotations is not None
assert tools[0].annotations.title == "Create Item"
assert tools[0].annotations.readOnlyHint is False
assert tools[0].annotations.destructiveHint is False
async def test_tool_functionality_with_annotations():
"""Test that tool functionality is preserved when using annotations."""
mcp = FastMCP("Test Server")
def create_item(name: str, value: int) -> dict[str, Any]:
"""Create a new item."""
return {"name": name, "value": value}
tool = Tool.from_function(
create_item,
name="create_item",
annotations=ToolAnnotations(
title="Create Item",
readOnlyHint=False,
destructiveHint=False,
),
)
mcp.add_tool(tool)
# Use the tool to verify functionality is preserved
async with Client(mcp) as client:
result = await client.call_tool(
"create_item", {"name": "test_item", "value": 42}
)
assert result.data == {"name": "test_item", "value": 42}
async def test_task_execution_auto_populated_for_task_enabled_tool():
"""Test that execution.taskSupport is automatically set when tool has task=True."""
mcp = FastMCP("Test Server")
@mcp.tool(task=True)
async def background_tool(data: str) -> str:
"""A tool that runs in background."""
return f"Processed: {data}"
async with Client(mcp) as client:
tools_result = await client.list_tools()
assert len(tools_result) == 1
assert tools_result[0].name == "background_tool"
assert isinstance(tools_result[0], MCPTool)
assert isinstance(tools_result[0].execution, ToolExecution)
assert tools_result[0].execution.taskSupport == "optional"
async def test_task_execution_omitted_for_task_disabled_tool():
"""Test that execution is not set when tool has task=False or default."""
mcp = FastMCP("Test Server")
@mcp.tool(task=False)
def sync_tool(data: str) -> str:
"""A synchronous tool."""
return f"Processed: {data}"
async with Client(mcp) as client:
tools_result = await client.list_tools()
assert len(tools_result) == 1
assert tools_result[0].name == "sync_tool"
# execution should be None for non-task tools (default is False, omitted)
assert tools_result[0].execution is None
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/server/test_tool_annotations.py",
"license": "Apache License 2.0",
"lines": 208,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:src/integrations/prefect-dbt/tests/core/test_orchestrator_plan.py | """Tests for PrefectDbtOrchestrator.plan() dry-run method."""
from unittest.mock import patch
import pytest
from conftest import (
_make_mock_executor,
_make_mock_settings,
write_manifest,
write_sql_files,
)
from dbt.artifacts.resources.types import NodeType
from prefect_dbt.core._manifest import DbtNode, ExecutionWave
from prefect_dbt.core._orchestrator import (
BuildPlan,
CacheConfig,
ExecutionMode,
PrefectDbtOrchestrator,
TestStrategy,
)
# =============================================================================
# TestPlanBasic
# =============================================================================
class TestPlanBasic:
def test_empty_manifest(self, tmp_path):
"""Empty manifest produces an empty plan."""
manifest = write_manifest(tmp_path, {"nodes": {}, "sources": {}})
orch = PrefectDbtOrchestrator(
settings=_make_mock_settings(),
manifest_path=manifest,
executor=_make_mock_executor(),
)
plan = orch.plan()
assert isinstance(plan, BuildPlan)
assert plan.waves == ()
assert plan.node_count == 0
assert plan.estimated_parallelism == 0
assert plan.skipped_nodes == {}
assert plan.cache_predictions is None
def test_single_node(self, tmp_path):
"""Single-node manifest produces one wave with one node."""
manifest = write_manifest(
tmp_path,
{
"nodes": {
"model.test.m1": {
"name": "m1",
"resource_type": "model",
"depends_on": {"nodes": []},
"config": {"materialized": "table"},
},
},
"sources": {},
},
)
orch = PrefectDbtOrchestrator(
settings=_make_mock_settings(),
manifest_path=manifest,
executor=_make_mock_executor(),
)
plan = orch.plan()
assert plan.node_count == 1
assert len(plan.waves) == 1
assert plan.estimated_parallelism == 1
assert plan.waves[0].wave_number == 0
assert plan.waves[0].nodes[0].unique_id == "model.test.m1"
def test_diamond_graph(self, tmp_path, diamond_manifest_data):
"""Diamond graph: root -> left/right -> leaf produces 3 waves."""
manifest = write_manifest(tmp_path, diamond_manifest_data)
orch = PrefectDbtOrchestrator(
settings=_make_mock_settings(),
manifest_path=manifest,
executor=_make_mock_executor(),
)
plan = orch.plan()
assert plan.node_count == 4
assert len(plan.waves) == 3
# Wave 0: root (1 node), Wave 1: left+right (2 nodes), Wave 2: leaf (1 node)
assert plan.estimated_parallelism == 2
wave_0_ids = {n.unique_id for n in plan.waves[0].nodes}
wave_1_ids = {n.unique_id for n in plan.waves[1].nodes}
wave_2_ids = {n.unique_id for n in plan.waves[2].nodes}
assert wave_0_ids == {"model.test.root"}
assert wave_1_ids == {"model.test.left", "model.test.right"}
assert wave_2_ids == {"model.test.leaf"}
def test_linear_chain(self, tmp_path, linear_manifest_data):
"""Linear chain a -> b -> c produces 3 waves, parallelism 1."""
manifest = write_manifest(tmp_path, linear_manifest_data)
orch = PrefectDbtOrchestrator(
settings=_make_mock_settings(),
manifest_path=manifest,
executor=_make_mock_executor(),
)
plan = orch.plan()
assert plan.node_count == 3
assert len(plan.waves) == 3
assert plan.estimated_parallelism == 1
def test_plan_returns_frozen_dataclass(self, tmp_path):
"""BuildPlan is frozen (immutable)."""
manifest = write_manifest(tmp_path, {"nodes": {}, "sources": {}})
orch = PrefectDbtOrchestrator(
settings=_make_mock_settings(),
manifest_path=manifest,
executor=_make_mock_executor(),
)
plan = orch.plan()
with pytest.raises(AttributeError):
plan.node_count = 99 # type: ignore[misc]
def test_no_cache_predictions_without_cache_config(self, tmp_path):
"""cache_predictions is None when no CacheConfig is set."""
manifest = write_manifest(
tmp_path,
{
"nodes": {
"model.test.m1": {
"name": "m1",
"resource_type": "model",
"depends_on": {"nodes": []},
"config": {"materialized": "table"},
},
},
"sources": {},
},
)
orch = PrefectDbtOrchestrator(
settings=_make_mock_settings(),
manifest_path=manifest,
executor=_make_mock_executor(),
)
plan = orch.plan()
assert plan.cache_predictions is None
# =============================================================================
# TestPlanWithSelectors
# =============================================================================
class TestPlanWithSelectors:
@patch("prefect_dbt.core._orchestrator.resolve_selection")
def test_select_filters_nodes(self, mock_resolve, tmp_path, diamond_manifest_data):
"""plan() with select= filters to only selected nodes."""
manifest = write_manifest(tmp_path, diamond_manifest_data)
mock_resolve.return_value = {"model.test.root", "model.test.left"}
orch = PrefectDbtOrchestrator(
settings=_make_mock_settings(),
manifest_path=manifest,
executor=_make_mock_executor(),
)
plan = orch.plan(select="tag:daily")
assert plan.node_count == 2
all_ids = {n.unique_id for w in plan.waves for n in w.nodes}
assert all_ids == {"model.test.root", "model.test.left"}
mock_resolve.assert_called_once()
@patch("prefect_dbt.core._orchestrator.resolve_selection")
def test_no_selectors_skips_resolve(self, mock_resolve, tmp_path):
"""plan() without select/exclude doesn't call resolve_selection."""
manifest = write_manifest(
tmp_path,
{
"nodes": {
"model.test.m1": {
"name": "m1",
"resource_type": "model",
"depends_on": {"nodes": []},
"config": {"materialized": "table"},
},
},
"sources": {},
},
)
orch = PrefectDbtOrchestrator(
settings=_make_mock_settings(),
manifest_path=manifest,
executor=_make_mock_executor(),
)
orch.plan()
mock_resolve.assert_not_called()
# =============================================================================
# TestPlanTestStrategies
# =============================================================================
DIAMOND_WITH_TESTS = {
"nodes": {
"model.test.root": {
"name": "root",
"resource_type": "model",
"depends_on": {"nodes": []},
"config": {"materialized": "table"},
},
"model.test.leaf": {
"name": "leaf",
"resource_type": "model",
"depends_on": {"nodes": ["model.test.root"]},
"config": {"materialized": "table"},
},
"test.test.not_null_root_id": {
"name": "not_null_root_id",
"resource_type": "test",
"depends_on": {"nodes": ["model.test.root"]},
"config": {},
},
},
"sources": {},
}
class TestPlanTestStrategies:
def test_skip_excludes_tests(self, tmp_path):
"""SKIP strategy excludes test nodes from plan."""
manifest = write_manifest(tmp_path, DIAMOND_WITH_TESTS)
orch = PrefectDbtOrchestrator(
settings=_make_mock_settings(),
manifest_path=manifest,
executor=_make_mock_executor(),
test_strategy=TestStrategy.SKIP,
)
plan = orch.plan()
all_ids = {n.unique_id for w in plan.waves for n in w.nodes}
assert "test.test.not_null_root_id" not in all_ids
assert plan.node_count == 2
def test_immediate_includes_tests_interleaved(self, tmp_path):
"""IMMEDIATE strategy interleaves tests with models."""
manifest = write_manifest(tmp_path, DIAMOND_WITH_TESTS)
orch = PrefectDbtOrchestrator(
settings=_make_mock_settings(),
manifest_path=manifest,
executor=_make_mock_executor(),
test_strategy=TestStrategy.IMMEDIATE,
)
plan = orch.plan()
all_ids = {n.unique_id for w in plan.waves for n in w.nodes}
assert "test.test.not_null_root_id" in all_ids
# Test should be in a wave after root but before or with leaf
assert plan.node_count == 3
def test_deferred_appends_tests_after_models(self, tmp_path):
"""DEFERRED strategy places tests after all model waves."""
manifest = write_manifest(tmp_path, DIAMOND_WITH_TESTS)
orch = PrefectDbtOrchestrator(
settings=_make_mock_settings(),
manifest_path=manifest,
executor=_make_mock_executor(),
test_strategy=TestStrategy.DEFERRED,
)
plan = orch.plan()
all_ids = {n.unique_id for w in plan.waves for n in w.nodes}
assert "test.test.not_null_root_id" in all_ids
assert plan.node_count == 3
# Last wave should contain the test
last_wave_ids = {n.unique_id for n in plan.waves[-1].nodes}
assert "test.test.not_null_root_id" in last_wave_ids
# =============================================================================
# TestPlanWithFreshness
# =============================================================================
class TestPlanWithFreshness:
@patch("prefect_dbt.core._orchestrator.filter_stale_nodes")
@patch("prefect_dbt.core._orchestrator.run_source_freshness")
def test_only_fresh_sources_filters_stale(
self, mock_freshness, mock_filter, tmp_path, source_manifest_data
):
"""only_fresh_sources=True filters stale nodes and populates skipped_nodes."""
manifest = write_manifest(tmp_path, source_manifest_data)
# Simulate freshness results
mock_freshness.return_value = {"source.test.raw.customers": {"status": "error"}}
# Build nodes that would remain after filtering
from prefect_dbt.core._manifest import ManifestParser
parser = ManifestParser(manifest)
all_nodes = parser.filter_nodes()
# Keep only stg_src_orders and src_customer_summary, skip stg_src_customers
remaining = {
k: v for k, v in all_nodes.items() if k != "model.test.stg_src_customers"
}
skipped = {
"model.test.stg_src_customers": {
"status": "skipped",
"reason": "stale upstream source",
}
}
mock_filter.return_value = (remaining, skipped)
orch = PrefectDbtOrchestrator(
settings=_make_mock_settings(),
manifest_path=manifest,
executor=_make_mock_executor(),
)
plan = orch.plan(only_fresh_sources=True)
assert "model.test.stg_src_customers" in plan.skipped_nodes
mock_freshness.assert_called_once()
mock_filter.assert_called_once()
# =============================================================================
# TestPlanCachePredictions
# =============================================================================
class TestPlanCachePredictions:
def _make_cache_orch(self, tmp_path, manifest_data, cache_config=None):
"""Helper to create a PER_NODE orchestrator with cache."""
manifest = write_manifest(tmp_path, manifest_data)
settings = _make_mock_settings(project_dir=tmp_path)
if cache_config is None:
cache_config = CacheConfig(key_storage=tmp_path / "cache")
return PrefectDbtOrchestrator(
settings=settings,
manifest_path=manifest,
executor=_make_mock_executor(),
execution_mode=ExecutionMode.PER_NODE,
cache=cache_config,
)
def test_cache_predictions_all_miss_on_fresh_build(self, tmp_path):
"""First build with no execution state β all misses."""
manifest_data = {
"nodes": {
"model.test.m1": {
"name": "m1",
"resource_type": "model",
"depends_on": {"nodes": []},
"config": {"materialized": "table"},
"original_file_path": "models/m1.sql",
},
},
"sources": {},
}
write_sql_files(tmp_path, {"models/m1.sql": "SELECT 1"})
(tmp_path / "cache").mkdir(exist_ok=True)
orch = self._make_cache_orch(tmp_path, manifest_data)
plan = orch.plan()
assert plan.cache_predictions is not None
assert plan.cache_predictions["model.test.m1"] == "miss"
def test_cache_predictions_excluded_by_resource_type(self, tmp_path):
"""Test nodes are excluded from cache predictions by default."""
manifest_data = {
"nodes": {
"model.test.m1": {
"name": "m1",
"resource_type": "model",
"depends_on": {"nodes": []},
"config": {"materialized": "table"},
"original_file_path": "models/m1.sql",
},
"test.test.t1": {
"name": "t1",
"resource_type": "test",
"depends_on": {"nodes": ["model.test.m1"]},
"config": {},
},
},
"sources": {},
}
write_sql_files(tmp_path, {"models/m1.sql": "SELECT 1"})
(tmp_path / "cache").mkdir(exist_ok=True)
orch = PrefectDbtOrchestrator(
settings=_make_mock_settings(project_dir=tmp_path),
manifest_path=write_manifest(tmp_path, manifest_data),
executor=_make_mock_executor(),
execution_mode=ExecutionMode.PER_NODE,
cache=CacheConfig(key_storage=tmp_path / "cache"),
test_strategy=TestStrategy.IMMEDIATE,
)
plan = orch.plan()
assert plan.cache_predictions is not None
assert plan.cache_predictions.get("test.test.t1") == "excluded"
def test_cache_predictions_excluded_by_materialization(self, tmp_path):
"""Incremental models are excluded from cache by default."""
manifest_data = {
"nodes": {
"model.test.inc": {
"name": "inc",
"resource_type": "model",
"depends_on": {"nodes": []},
"config": {"materialized": "incremental"},
"original_file_path": "models/inc.sql",
},
},
"sources": {},
}
write_sql_files(tmp_path, {"models/inc.sql": "SELECT 1"})
(tmp_path / "cache").mkdir(exist_ok=True)
orch = self._make_cache_orch(tmp_path, manifest_data)
plan = orch.plan()
assert plan.cache_predictions is not None
assert plan.cache_predictions["model.test.inc"] == "excluded"
def test_cache_predictions_hit_when_state_matches(self, tmp_path):
"""Cache prediction is 'hit' when execution state matches precomputed key."""
import json
manifest_data = {
"nodes": {
"model.test.m1": {
"name": "m1",
"resource_type": "model",
"depends_on": {"nodes": []},
"config": {"materialized": "table"},
"original_file_path": "models/m1.sql",
},
},
"sources": {},
}
write_sql_files(tmp_path, {"models/m1.sql": "SELECT 1"})
cache_dir = tmp_path / "cache"
cache_dir.mkdir(exist_ok=True)
orch = self._make_cache_orch(tmp_path, manifest_data)
# First, compute the key that _precompute_all_cache_keys would produce
from prefect_dbt.core._manifest import ManifestParser
parser = ManifestParser(write_manifest(tmp_path, manifest_data))
precomputed = orch._precompute_all_cache_keys(
parser.get_executable_nodes(), False, parser.get_macro_paths()
)
# Write execution state that matches
state_path = cache_dir / ".execution_state.json"
state_path.write_text(json.dumps(precomputed))
plan = orch.plan()
assert plan.cache_predictions is not None
assert plan.cache_predictions["model.test.m1"] == "hit"
def test_cache_predictions_miss_when_full_refresh(self, tmp_path):
"""full_refresh=True forces all predictions to 'miss' even with matching state."""
import json
manifest_data = {
"nodes": {
"model.test.m1": {
"name": "m1",
"resource_type": "model",
"depends_on": {"nodes": []},
"config": {"materialized": "table"},
"original_file_path": "models/m1.sql",
},
},
"sources": {},
}
write_sql_files(tmp_path, {"models/m1.sql": "SELECT 1"})
cache_dir = tmp_path / "cache"
cache_dir.mkdir(exist_ok=True)
orch = self._make_cache_orch(tmp_path, manifest_data)
from prefect_dbt.core._manifest import ManifestParser
parser = ManifestParser(write_manifest(tmp_path, manifest_data))
precomputed = orch._precompute_all_cache_keys(
parser.get_executable_nodes(), False, parser.get_macro_paths()
)
# Write execution state that matches β would be "hit" without full_refresh
state_path = cache_dir / ".execution_state.json"
state_path.write_text(json.dumps(precomputed))
plan = orch.plan(full_refresh=True)
assert plan.cache_predictions is not None
assert plan.cache_predictions["model.test.m1"] == "miss"
# =============================================================================
# TestPlanExtraCliArgs
# =============================================================================
class TestPlanExtraCliArgs:
def test_blocked_flag_raises(self, tmp_path):
"""Blocked CLI args raise ValueError in plan() too."""
manifest = write_manifest(
tmp_path,
{
"nodes": {
"model.test.m1": {
"name": "m1",
"resource_type": "model",
"depends_on": {"nodes": []},
"config": {"materialized": "table"},
},
},
"sources": {},
},
)
orch = PrefectDbtOrchestrator(
settings=_make_mock_settings(),
manifest_path=manifest,
executor=_make_mock_executor(),
)
with pytest.raises(ValueError, match="--select"):
orch.plan(extra_cli_args=["--select", "tag:foo"])
def test_safe_flags_accepted(self, tmp_path):
"""Safe extra CLI args don't raise in plan()."""
manifest = write_manifest(
tmp_path,
{
"nodes": {
"model.test.m1": {
"name": "m1",
"resource_type": "model",
"depends_on": {"nodes": []},
"config": {"materialized": "table"},
},
},
"sources": {},
},
)
orch = PrefectDbtOrchestrator(
settings=_make_mock_settings(),
manifest_path=manifest,
executor=_make_mock_executor(),
)
# Should not raise
plan = orch.plan(extra_cli_args=["--store-failures"])
assert plan.node_count == 1
# =============================================================================
# TestPlanDoesNotExecute
# =============================================================================
class TestPlanDoesNotExecute:
def test_executor_not_called(self, tmp_path, diamond_manifest_data):
"""plan() must not call executor.execute_wave or execute_node."""
manifest = write_manifest(tmp_path, diamond_manifest_data)
executor = _make_mock_executor()
orch = PrefectDbtOrchestrator(
settings=_make_mock_settings(),
manifest_path=manifest,
executor=executor,
)
orch.plan()
executor.execute_wave.assert_not_called()
assert not hasattr(executor, "execute_node") or not executor.execute_node.called
# =============================================================================
# TestPreparedBuildSharedWithRunBuild
# =============================================================================
class TestPreparedBuildSharedWithRunBuild:
def test_run_build_still_works_after_refactor(self, tmp_path):
"""run_build() continues to work with the _prepare_build refactor."""
manifest = write_manifest(
tmp_path,
{
"nodes": {
"model.test.m1": {
"name": "m1",
"resource_type": "model",
"depends_on": {"nodes": []},
"config": {"materialized": "table"},
},
},
"sources": {},
},
)
orch = PrefectDbtOrchestrator(
settings=_make_mock_settings(),
manifest_path=manifest,
executor=_make_mock_executor(),
)
results = orch.run_build()
assert "model.test.m1" in results
assert results["model.test.m1"]["status"] == "success"
def test_plan_and_run_build_see_same_waves(self, tmp_path, diamond_manifest_data):
"""plan() and run_build() produce consistent wave structures."""
manifest = write_manifest(tmp_path, diamond_manifest_data)
orch = PrefectDbtOrchestrator(
settings=_make_mock_settings(),
manifest_path=manifest,
executor=_make_mock_executor(),
)
plan = orch.plan()
results = orch.run_build()
# All nodes from the plan should appear in run_build results
plan_ids = {n.unique_id for w in plan.waves for n in w.nodes}
assert plan_ids == set(results.keys())
# =============================================================================
# TestBuildPlanStr
# =============================================================================
def _node(
uid: str, resource_type: NodeType = NodeType.Model, materialization: str = "table"
) -> DbtNode:
return DbtNode(
unique_id=uid,
name=uid.split(".")[-1],
resource_type=resource_type,
materialization=materialization,
)
class TestBuildPlanStr:
def test_empty_plan(self):
plan = BuildPlan(
waves=(),
node_count=0,
cache_predictions=None,
skipped_nodes={},
estimated_parallelism=0,
)
text = str(plan)
assert "0 node(s) in 0 wave(s)" in text
assert "max parallelism = 0" in text
# No cache or skipped sections
assert "Cache:" not in text
assert "Skipped" not in text
def test_single_wave_no_cache(self):
m1 = _node("model.test.m1")
plan = BuildPlan(
waves=(ExecutionWave(wave_number=0, nodes=[m1]),),
node_count=1,
cache_predictions=None,
skipped_nodes={},
estimated_parallelism=1,
)
text = str(plan)
assert "1 node(s) in 1 wave(s)" in text
assert "Wave 0 (1 node(s))" in text
assert "model.test.m1" in text
assert "[model, table]" in text
assert "Cache:" not in text
def test_multiple_waves(self):
m1 = _node("model.test.m1")
m2 = _node("model.test.m2", materialization="view")
m3 = _node("model.test.m3")
plan = BuildPlan(
waves=(
ExecutionWave(wave_number=0, nodes=[m1, m2]),
ExecutionWave(wave_number=1, nodes=[m3]),
),
node_count=3,
cache_predictions=None,
skipped_nodes={},
estimated_parallelism=2,
)
text = str(plan)
assert "3 node(s) in 2 wave(s)" in text
assert "max parallelism = 2" in text
assert "Wave 0 (2 node(s))" in text
assert "Wave 1 (1 node(s))" in text
assert "[model, view]" in text
def test_cache_predictions_displayed(self):
m1 = _node("model.test.m1")
m2 = _node("model.test.m2")
t1 = _node("test.test.t1", resource_type=NodeType.Test, materialization="test")
plan = BuildPlan(
waves=(ExecutionWave(wave_number=0, nodes=[m1, m2, t1]),),
node_count=3,
cache_predictions={
"model.test.m1": "hit",
"model.test.m2": "miss",
"test.test.t1": "excluded",
},
skipped_nodes={},
estimated_parallelism=3,
)
text = str(plan)
assert "(cache: hit)" in text
assert "(cache: miss)" in text
assert "(cache: excluded)" in text
assert "1 hit(s), 1 miss(es), 1 excluded" in text
def test_skipped_nodes_displayed(self):
m1 = _node("model.test.m1")
plan = BuildPlan(
waves=(ExecutionWave(wave_number=0, nodes=[m1]),),
node_count=1,
cache_predictions=None,
skipped_nodes={
"model.test.stale": {"status": "skipped", "reason": "stale source"},
},
estimated_parallelism=1,
)
text = str(plan)
assert "Skipped (1)" in text
assert "model.test.stale: stale source" in text
def test_str_matches_print_output(self, tmp_path):
"""str() on a plan from the orchestrator works end-to-end."""
manifest = write_manifest(
tmp_path,
{
"nodes": {
"model.test.m1": {
"name": "m1",
"resource_type": "model",
"depends_on": {"nodes": []},
"config": {"materialized": "table"},
},
},
"sources": {},
},
)
orch = PrefectDbtOrchestrator(
settings=_make_mock_settings(),
manifest_path=manifest,
executor=_make_mock_executor(),
)
plan = orch.plan()
text = str(plan)
assert "1 node(s) in 1 wave(s)" in text
assert "model.test.m1" in text
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/integrations/prefect-dbt/tests/core/test_orchestrator_plan.py",
"license": "Apache License 2.0",
"lines": 659,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:src/prefect/utilities/_infrastructure_exit_codes.py | """Centralized registry of infrastructure process exit code explanations and
resolution hints.
Consumed by the worker and runner to emit actionable log messages when flow run
infrastructure exits with a non-zero status code.
"""
import logging
from dataclasses import dataclass
@dataclass(frozen=True)
class InfrastructureExitInfo:
"""Human-readable explanation and resolution for an infrastructure process exit code."""
explanation: str
resolution: str
log_level: int = logging.ERROR
INFRASTRUCTURE_EXIT_HINTS: dict[int, InfrastructureExitInfo] = {
0: InfrastructureExitInfo(
explanation="Process exited cleanly.",
resolution="No action needed.",
log_level=logging.INFO,
),
-9: InfrastructureExitInfo(
explanation=(
"Process exited due to a SIGKILL signal. Typically caused by the"
" operating system terminating the process for exceeding memory limits,"
" or by manual cancellation."
),
resolution=(
"Check whether the flow run exceeded its memory allocation. If running"
" in a container, increase the memory limit. If this was intentional"
" cancellation, no action is needed."
),
log_level=logging.INFO,
),
-15: InfrastructureExitInfo(
explanation=(
"Process exited due to a SIGTERM signal. Typically caused by graceful"
" shutdown or manual cancellation."
),
resolution=(
"If this was caused by cancellation, no action is needed. Otherwise,"
" check for infrastructure scaling events or deployment rollovers."
),
log_level=logging.INFO,
),
1: InfrastructureExitInfo(
explanation="Process exited with a general error.",
resolution=(
"Check the flow run logs for an unhandled exception or assertion error."
),
),
125: InfrastructureExitInfo(
explanation="Container failed to run (Docker/OCI exit code 125).",
resolution=(
"Verify the container image exists, the entrypoint is correct, and"
" the container runtime is healthy."
),
),
126: InfrastructureExitInfo(
explanation="Command found but not executable (permission denied).",
resolution=(
"Check file permissions on the entrypoint script or binary. Ensure the"
" file has the executable bit set."
),
),
127: InfrastructureExitInfo(
explanation="Command not found.",
resolution=(
"Verify that the entrypoint command or script is installed in the"
" execution environment. Check your PATH and container image contents."
),
),
137: InfrastructureExitInfo(
explanation="Process was killed, likely due to an out-of-memory (OOM) condition.",
resolution=(
"Increase the memory allocation for the flow run infrastructure."
" In Kubernetes, raise the memory limit in the job template."
" In ECS, increase the task memory."
),
),
143: InfrastructureExitInfo(
explanation=(
"Process received SIGTERM via the container runtime. This is the"
" containerized equivalent of exit code -15."
),
resolution=(
"If this was caused by cancellation or a scaling event, no action is"
" needed. Otherwise, check for pod evictions or ECS task stops."
),
log_level=logging.INFO,
),
247: InfrastructureExitInfo(
explanation="Process was terminated due to high memory usage.",
resolution=(
"Increase the memory allocation for the flow run infrastructure."
" Consider profiling memory usage to find the source of consumption."
),
),
# Windows Ctrl+C / Ctrl+Break (STATUS_CONTROL_C_EXIT)
0xC000013A: InfrastructureExitInfo(
explanation=(
"Process was terminated due to a Ctrl+C or Ctrl+Break signal."
" Typically caused by manual cancellation."
),
resolution="If this was intentional cancellation, no action is needed.",
log_level=logging.INFO,
),
}
def get_infrastructure_exit_info(code: int) -> InfrastructureExitInfo:
"""Return the `InfrastructureExitInfo` for *code*.
Known codes return a specific explanation and resolution. Unknown non-zero
codes return a generic entry. Code 0 always returns the "exited cleanly"
entry.
"""
try:
return INFRASTRUCTURE_EXIT_HINTS[code]
except KeyError:
return InfrastructureExitInfo(
explanation="Process exited with an unexpected status code.",
resolution=(
"Check the flow run logs and infrastructure logs for more details."
),
)
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/prefect/utilities/_infrastructure_exit_codes.py",
"license": "Apache License 2.0",
"lines": 121,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/prefect:tests/utilities/test_exit_codes.py | import logging
import pytest
from prefect.utilities._infrastructure_exit_codes import (
INFRASTRUCTURE_EXIT_HINTS,
InfrastructureExitInfo,
get_infrastructure_exit_info,
)
class TestInfrastructureExitInfo:
def test_default_log_level_is_error(self):
info = InfrastructureExitInfo(explanation="test", resolution="test")
assert info.log_level == logging.ERROR
def test_is_frozen(self):
info = InfrastructureExitInfo(explanation="a", resolution="b")
with pytest.raises(AttributeError):
info.explanation = "c" # type: ignore[misc]
class TestInfrastructureExitHints:
@pytest.mark.parametrize(
"code", [0, -9, -15, 1, 125, 126, 127, 137, 143, 247, 0xC000013A]
)
def test_known_codes_present(self, code: int):
assert code in INFRASTRUCTURE_EXIT_HINTS
def test_code_zero_is_info_level(self):
assert INFRASTRUCTURE_EXIT_HINTS[0].log_level == logging.INFO
@pytest.mark.parametrize("code", [-9, -15, 143, 0xC000013A])
def test_signal_codes_are_info_level(self, code: int):
assert INFRASTRUCTURE_EXIT_HINTS[code].log_level == logging.INFO
@pytest.mark.parametrize("code", [1, 125, 126, 127, 137, 247])
def test_error_codes_are_error_level(self, code: int):
assert INFRASTRUCTURE_EXIT_HINTS[code].log_level == logging.ERROR
class TestGetInfrastructureExitInfo:
def test_known_code_returns_registry_entry(self):
info = get_infrastructure_exit_info(137)
assert info is INFRASTRUCTURE_EXIT_HINTS[137]
def test_unknown_code_returns_generic_entry(self):
info = get_infrastructure_exit_info(42)
assert "unexpected" in info.explanation
assert info.log_level == logging.ERROR
def test_zero_returns_clean_exit(self):
info = get_infrastructure_exit_info(0)
assert info.log_level == logging.INFO
assert "cleanly" in info.explanation
def test_negative_unknown_code(self):
info = get_infrastructure_exit_info(-99)
assert "unexpected" in info.explanation
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "tests/utilities/test_exit_codes.py",
"license": "Apache License 2.0",
"lines": 44,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:src/integrations/prefect-dbt/benches/bench_orchestrator.py | #!/usr/bin/env python
"""Benchmark PrefectDbtOrchestrator vs PrefectDbtRunner.
Creates a 5-layer Γ 10-model synthetic dbt project backed by Postgres (50 nodes
total) and times each configuration once. This is intentionally a wall-clock
timer, not a statistical micro-benchmark, so the numbers reflect what users
actually experience.
Each execution group (runner, PER_WAVE, PER_NODE) gets its own Postgres schema
so runs are isolated without requiring separate database connections.
Postgres connection defaults match the GitHub Actions service definition.
Override via environment variables: BENCH_PG_HOST, BENCH_PG_PORT,
BENCH_PG_USER, BENCH_PG_PASSWORD, BENCH_PG_DBNAME.
Usage
-----
uv run python benches/bench_orchestrator.py
uv run python benches/bench_orchestrator.py --output results.json
uv run python benches/bench_orchestrator.py --output pr.json --baseline main.json
uv run python benches/bench_orchestrator.py --markdown-out comment.md
"""
from __future__ import annotations
import argparse
import json
import os
import sys
import tempfile
import time
from dataclasses import asdict, dataclass
from pathlib import Path
import yaml
from dbt.cli.main import dbtRunner
from prefect_dbt.core._orchestrator import (
ExecutionMode,
PrefectDbtOrchestrator,
TestStrategy,
)
from prefect_dbt.core.runner import PrefectDbtRunner
from prefect_dbt.core.settings import PrefectDbtSettings
from prefect import flow
# ---------------------------------------------------------------------------
# Project configuration
# ---------------------------------------------------------------------------
LAYERS = 5 # dependency layers (= number of PER_WAVE dbt invocations for "all")
WIDTH = 10 # models per layer (total = LAYERS * WIDTH = 50 nodes)
# Subset selector: first layer only (1 wave, WIDTH nodes, no dependencies)
SUBSET_SELECT = "path:models/layer_0"
# ---------------------------------------------------------------------------
# Postgres connection defaults
# ---------------------------------------------------------------------------
_PG_HOST = os.environ.get("BENCH_PG_HOST", "localhost")
_PG_PORT = int(os.environ.get("BENCH_PG_PORT", "5432"))
_PG_USER = os.environ.get("BENCH_PG_USER", "postgres")
_PG_PASSWORD = os.environ.get("BENCH_PG_PASSWORD", "postgres")
_PG_DBNAME = os.environ.get("BENCH_PG_DBNAME", "bench")
# ---------------------------------------------------------------------------
# Project setup helpers
# ---------------------------------------------------------------------------
def _write_project_files(project_dir: Path) -> None:
"""Write dbt_project.yml and model SQL files (no profiles.yml)."""
for layer in range(LAYERS):
layer_dir = project_dir / "models" / f"layer_{layer}"
layer_dir.mkdir(parents=True)
for w in range(WIDTH):
if layer == 0:
sql = "SELECT 1 AS id"
else:
sql = f"SELECT id FROM {{{{ ref('l{layer - 1}_m{w}') }}}}"
(layer_dir / f"l{layer}_m{w}.sql").write_text(sql)
(project_dir / "dbt_project.yml").write_text(
yaml.dump(
{
"name": "dbt_bench",
"version": "1.0.0",
"config-version": 2,
"profile": "bench",
"model-paths": ["models"],
"models": {"dbt_bench": {"+materialized": "table"}},
}
)
)
def _write_profiles(profiles_dir: Path, schema: str) -> None:
profiles_dir.mkdir(parents=True, exist_ok=True)
(profiles_dir / "profiles.yml").write_text(
yaml.dump(
{
"bench": {
"target": "dev",
"outputs": {
"dev": {
"type": "postgres",
"host": _PG_HOST,
"port": _PG_PORT,
"user": _PG_USER,
"password": _PG_PASSWORD,
"dbname": _PG_DBNAME,
"schema": schema,
"threads": WIDTH,
}
},
}
}
)
)
def setup_project(root: Path) -> tuple[Path, Path, Path, Path]:
"""
Create the shared project tree and three isolated environments.
Each group writes to its own Postgres schema so runs don't interfere.
Returns
-------
project_dir, runner_profiles, per_wave_profiles, per_node_profiles
"""
project_dir = root / "project"
project_dir.mkdir()
_write_project_files(project_dir)
for name in ("runner", "per_wave", "per_node"):
_write_profiles(root / f"profiles_{name}", schema=f"bench_{name}")
# Run dbt parse once (parse doesn't connect to the database).
# Reuse the manifest across all three groups.
_write_profiles(root / "profiles_parse", schema="bench_parse")
result = dbtRunner().invoke(
[
"parse",
"--project-dir",
str(project_dir),
"--profiles-dir",
str(root / "profiles_parse"),
]
)
if not result.success:
raise RuntimeError(f"dbt parse failed: {result.exception}")
manifest_path = project_dir / "target" / "manifest.json"
assert manifest_path.exists(), "dbt parse did not produce manifest.json"
return (
project_dir,
root / "profiles_runner",
root / "profiles_per_wave",
root / "profiles_per_node",
)
# ---------------------------------------------------------------------------
# Timing helpers
# ---------------------------------------------------------------------------
@dataclass
class BenchmarkResult:
name: str
elapsed_s: float
status: str # "OK" or "FAIL"
error: str | None = None
def _time(name: str, fn) -> BenchmarkResult:
"""Time fn(); inspect the return value for node-level dbt failures."""
start = time.perf_counter()
try:
result = fn()
elapsed = time.perf_counter() - start
# run_build() returns a dict; check for failed nodes
if isinstance(result, dict):
failed = [
uid
for uid, r in result.items()
if isinstance(r, dict) and r.get("status") == "error"
]
if failed:
return BenchmarkResult(
name=name,
elapsed_s=elapsed,
status="FAIL",
error=f"dbt build failed for nodes: {failed}",
)
return BenchmarkResult(name=name, elapsed_s=elapsed, status="OK")
except Exception as exc:
elapsed = time.perf_counter() - start
return BenchmarkResult(
name=name, elapsed_s=elapsed, status="FAIL", error=str(exc)
)
# ---------------------------------------------------------------------------
# Benchmark scenarios
# ---------------------------------------------------------------------------
def run_benchmarks(
project_dir: Path,
manifest_path: Path,
runner_profiles: Path,
per_wave_profiles: Path,
per_node_profiles: Path,
) -> list[BenchmarkResult]:
results: list[BenchmarkResult] = []
# -- PrefectDbtRunner -------------------------------------------------
# Calls dbt build in a single invocation; dbt handles DAG ordering.
def _runner(select: str | None = None) -> None:
s = PrefectDbtSettings(project_dir=project_dir, profiles_dir=runner_profiles)
r = PrefectDbtRunner(settings=s, raise_on_failure=True)
args = ["build"]
if select:
args += ["--select", select]
r.invoke(args)
results.append(_time("runner / select=None (all)", lambda: _runner()))
results.append(
_time(f"runner / select={SUBSET_SELECT}", lambda: _runner(SUBSET_SELECT))
)
# -- Orchestrator PER_WAVE --------------------------------------------
# One dbt invocation per topological wave (LAYERS invocations for "all").
def _per_wave(select: str | None = None):
s = PrefectDbtSettings(project_dir=project_dir, profiles_dir=per_wave_profiles)
orch = PrefectDbtOrchestrator(
settings=s,
manifest_path=manifest_path,
execution_mode=ExecutionMode.PER_WAVE,
test_strategy=TestStrategy.SKIP,
create_summary_artifact=False,
write_run_results=False,
)
return orch.run_build(select=select)
results.append(
_time("orchestrator / select=None (all) / PER_WAVE", lambda: _per_wave())
)
results.append(
_time(
f"orchestrator / select={SUBSET_SELECT} / PER_WAVE",
lambda: _per_wave(SUBSET_SELECT),
)
)
# -- Orchestrator PER_NODE --------------------------------------------
# One dbt invocation per node (LAYERS*WIDTH invocations for "all").
# run_build() must be called inside a @flow.
@flow
def _per_node_flow(select: str | None = None): # type: ignore[misc]
s = PrefectDbtSettings(project_dir=project_dir, profiles_dir=per_node_profiles)
orch = PrefectDbtOrchestrator(
settings=s,
manifest_path=manifest_path,
execution_mode=ExecutionMode.PER_NODE,
test_strategy=TestStrategy.SKIP,
create_summary_artifact=False,
write_run_results=False,
)
return orch.run_build(select=select)
results.append(
_time("orchestrator / select=None (all) / PER_NODE", lambda: _per_node_flow())
)
results.append(
_time(
f"orchestrator / select={SUBSET_SELECT} / PER_NODE",
lambda: _per_node_flow(SUBSET_SELECT),
)
)
return results
# ---------------------------------------------------------------------------
# Formatting
# ---------------------------------------------------------------------------
_COL_NAME = 54
_COL_TIME = 9
_COL_STATUS = 8
_RULE = "-" * (_COL_NAME + _COL_TIME + _COL_STATUS + 2)
def format_text_table(results: list[BenchmarkResult]) -> str:
header = f"{'Name':<{_COL_NAME}} {'Time':>{_COL_TIME}} {'Status':>{_COL_STATUS}}"
lines = ["=" * 72, "BENCHMARK RESULTS", "=" * 72, header, _RULE]
for r in results:
time_str = f"{r.elapsed_s:.3f}s"
lines.append(
f"{r.name:<{_COL_NAME}} {time_str:>{_COL_TIME}} {r.status:>{_COL_STATUS}}"
)
if r.error:
for chunk in _wrap(f" Error: {r.error}", 70):
lines.append(chunk)
lines.append(_RULE)
return "\n".join(lines)
def _wrap(text: str, width: int) -> list[str]:
if len(text) <= width:
return [text]
out = []
while len(text) > width:
split = text.rfind(" ", 0, width)
if split == -1:
split = width
out.append(text[:split])
text = " " + text[split:].lstrip()
if text:
out.append(text)
return out
def format_markdown(
results: list[BenchmarkResult],
baseline: list[dict] | None,
sha: str | None,
) -> str:
node_count = LAYERS * WIDTH
lines = ["## dbt Orchestrator Benchmark Results"]
if sha:
lines.append(f"\nCommit: `{sha[:12]}`")
lines.append(
f"\nProject: **{LAYERS} layers Γ {WIDTH} models = {node_count} nodes** "
f"(Postgres, default concurrency for PER_NODE)"
)
block = "```\n" + format_text_table(results) + "\n```"
lines.append(
f"\n<details open>\n<summary>Results</summary>\n\n{block}\n\n</details>"
)
if baseline:
by_name = {r["name"]: r for r in baseline}
rows = []
for r in results:
b = by_name.get(r.name)
if not b:
continue
pct = (r.elapsed_s - b["elapsed_s"]) / b["elapsed_s"] * 100
sign = "+" if pct >= 0 else ""
flag = " π¨" if pct > 15 else (" β οΈ" if pct > 5 else "")
rows.append(
f"| `{r.name}` | {b['elapsed_s']:.3f}s | "
f"{r.elapsed_s:.3f}s | {sign}{pct:.1f}%{flag} |"
)
if rows:
table = (
"| Benchmark | main | PR | Change |\n"
"|---|---|---|---|\n" + "\n".join(rows)
)
lines.append(
"\n<details>\n<summary>Comparison vs main</summary>\n\n"
+ table
+ "\n\n</details>"
)
return "\n".join(lines)
# ---------------------------------------------------------------------------
# Entry point
# ---------------------------------------------------------------------------
def main() -> int:
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--output", metavar="JSON", help="Save results to JSON")
parser.add_argument(
"--baseline", metavar="JSON", help="Load baseline results from JSON"
)
parser.add_argument(
"--markdown-out", metavar="MD", help="Write Markdown comment body"
)
parser.add_argument("--sha", help="Git SHA to include in the comment")
args = parser.parse_args()
baseline: list[dict] | None = None
if args.baseline:
p = Path(args.baseline)
if p.exists():
baseline = json.loads(p.read_text()).get("results")
with tempfile.TemporaryDirectory() as tmp:
root = Path(tmp)
print("Setting up benchmark project β¦", flush=True)
project_dir, runner_profiles, per_wave_profiles, per_node_profiles = (
setup_project(root)
)
manifest_path = project_dir / "target" / "manifest.json"
print(
f"Running benchmarks ({LAYERS} layers Γ {WIDTH} models = {LAYERS * WIDTH} nodes) β¦\n",
flush=True,
)
results = run_benchmarks(
project_dir,
manifest_path,
runner_profiles,
per_wave_profiles,
per_node_profiles,
)
table = format_text_table(results)
print(table)
if args.output:
Path(args.output).write_text(
json.dumps({"results": [asdict(r) for r in results]}, indent=2)
)
print(f"\nResults saved to {args.output}", flush=True)
if args.markdown_out:
md = format_markdown(results, baseline, args.sha)
Path(args.markdown_out).write_text(md)
print(f"Markdown written to {args.markdown_out}", flush=True)
return 1 if any(r.status == "FAIL" for r in results) else 0
if __name__ == "__main__":
sys.exit(main())
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/integrations/prefect-dbt/benches/bench_orchestrator.py",
"license": "Apache License 2.0",
"lines": 368,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/prefect:src/prefect/server/services/db_vacuum.py | """
The database vacuum service. Two perpetual services schedule cleanup tasks
independently, gated by the `enabled` set in
`PREFECT_SERVER_SERVICES_DB_VACUUM_ENABLED` (default `["events"]`):
1. schedule_vacuum_tasks β Cleans up old flow runs and orphaned resources
(logs, artifacts, artifact collections). Enabled when `"flow_runs"`
is in the enabled set.
2. schedule_event_vacuum_tasks β Cleans up old events, including any
event types with per-type retention overrides. Enabled when `"events"`
is in the enabled set **and** `event_persister.enabled` is true
(the default), so that operators who disabled event processing are not
surprised on upgrade. Runs in all server modes, including ephemeral.
Per-event-type retention can be customised via
`PREFECT_SERVER_SERVICES_DB_VACUUM_EVENT_RETENTION_OVERRIDES`. Event types
not listed fall back to `server.events.retention_period`.
Each task runs independently with its own error isolation and
docket-managed retries. Deterministic keys prevent duplicate tasks from
accumulating if a cycle overlaps with in-progress work.
"""
from __future__ import annotations
import asyncio
import logging
from datetime import timedelta
import sqlalchemy as sa
from docket import CurrentDocket, Depends, Docket, Perpetual
from prefect.logging import get_logger
from prefect.server.database import PrefectDBInterface, provide_database_interface
from prefect.server.schemas.states import TERMINAL_STATES
from prefect.server.services.perpetual_services import perpetual_service
from prefect.settings.context import get_current_settings
from prefect.types._datetime import now
logger: logging.Logger = get_logger(__name__)
# ---------------------------------------------------------------------------
# Finder (perpetual service)
# ---------------------------------------------------------------------------
@perpetual_service(
enabled_getter=lambda: (
"flow_runs"
in get_current_settings().server.services.db_vacuum.enabled_vacuum_types
),
)
async def schedule_vacuum_tasks(
docket: Docket = CurrentDocket(),
perpetual: Perpetual = Perpetual(
automatic=False,
every=timedelta(
seconds=get_current_settings().server.services.db_vacuum.loop_seconds
),
),
) -> None:
"""Schedule cleanup tasks for old flow runs and orphaned resources.
Each task is enqueued with a deterministic key so that overlapping
cycles (e.g. when cleanup takes longer than loop_seconds) naturally
deduplicate instead of piling up redundant work.
Disabled by default because it permanently deletes flow runs. Enable
via PREFECT_SERVER_SERVICES_DB_VACUUM_ENABLED=true.
"""
await docket.add(vacuum_orphaned_logs, key="db-vacuum:orphaned-logs")()
await docket.add(vacuum_orphaned_artifacts, key="db-vacuum:orphaned-artifacts")()
await docket.add(
vacuum_stale_artifact_collections, key="db-vacuum:stale-collections"
)()
await docket.add(vacuum_old_flow_runs, key="db-vacuum:old-flow-runs")()
@perpetual_service(
enabled_getter=lambda: (
"events"
in get_current_settings().server.services.db_vacuum.enabled_vacuum_types
and get_current_settings().server.services.event_persister.enabled
),
run_in_ephemeral=True,
)
async def schedule_event_vacuum_tasks(
docket: Docket = CurrentDocket(),
perpetual: Perpetual = Perpetual(
automatic=False,
every=timedelta(
seconds=get_current_settings().server.services.db_vacuum.loop_seconds
),
),
) -> None:
"""Schedule cleanup tasks for old events and heartbeat events.
Enabled by default (`"events"` is in the default enabled set).
Automatically disabled when the event persister service is disabled
(PREFECT_SERVER_SERVICES_EVENT_PERSISTER_ENABLED=false) so that
operators who opted out of event processing are not surprised by
trimming on upgrade.
"""
await docket.add(
vacuum_events_with_retention_overrides, key="db-vacuum:retention-overrides"
)()
await docket.add(vacuum_old_events, key="db-vacuum:old-events")()
# ---------------------------------------------------------------------------
# Cleanup tasks (docket task functions)
# ---------------------------------------------------------------------------
async def vacuum_orphaned_logs(
*,
db: PrefectDBInterface = Depends(provide_database_interface),
) -> None:
"""Delete logs whose flow_run_id references a non-existent flow run."""
settings = get_current_settings().server.services.db_vacuum
orphaned_fk_ids = await _find_orphaned_fk_ids(
db, db.Log, db.Log.flow_run_id, db.FlowRun
)
if not orphaned_fk_ids:
return
deleted = await _batch_delete(
db,
db.Log,
db.Log.flow_run_id.in_(orphaned_fk_ids),
settings.batch_size,
)
if deleted:
logger.info("Database vacuum: deleted %d orphaned logs.", deleted)
async def vacuum_orphaned_artifacts(
*,
db: PrefectDBInterface = Depends(provide_database_interface),
) -> None:
"""Delete artifacts whose flow_run_id references a non-existent flow run."""
settings = get_current_settings().server.services.db_vacuum
orphaned_fk_ids = await _find_orphaned_fk_ids(
db, db.Artifact, db.Artifact.flow_run_id, db.FlowRun
)
if not orphaned_fk_ids:
return
deleted = await _batch_delete(
db,
db.Artifact,
db.Artifact.flow_run_id.in_(orphaned_fk_ids),
settings.batch_size,
)
if deleted:
logger.info("Database vacuum: deleted %d orphaned artifacts.", deleted)
async def vacuum_stale_artifact_collections(
*,
db: PrefectDBInterface = Depends(provide_database_interface),
) -> None:
"""Reconcile artifact collections whose latest_id points to a deleted artifact.
Re-points to the next latest version if one exists, otherwise deletes
the collection row.
"""
settings = get_current_settings().server.services.db_vacuum
updated, deleted = await _reconcile_artifact_collections(db, settings.batch_size)
if updated or deleted:
logger.info(
"Database vacuum: reconciled %d stale artifact collections "
"(%d re-pointed, %d removed).",
updated + deleted,
updated,
deleted,
)
async def vacuum_old_flow_runs(
*,
db: PrefectDBInterface = Depends(provide_database_interface),
) -> None:
"""Delete old top-level terminal flow runs past the retention period."""
settings = get_current_settings().server.services.db_vacuum
retention_cutoff = now("UTC") - settings.retention_period
deleted = await _batch_delete(
db,
db.FlowRun,
sa.and_(
db.FlowRun.parent_task_run_id.is_(None),
db.FlowRun.state_type.in_(TERMINAL_STATES),
db.FlowRun.end_time.is_not(None),
db.FlowRun.end_time < retention_cutoff,
),
settings.batch_size,
)
if deleted:
logger.info("Database vacuum: deleted %d old flow runs.", deleted)
async def vacuum_events_with_retention_overrides(
*,
db: PrefectDBInterface = Depends(provide_database_interface),
) -> None:
"""Delete events whose types have per-type retention overrides.
Iterates over all entries in `event_retention_overrides` and deletes
events (and their resources) that are older than the configured retention
for that type, capped by the global events retention period.
"""
settings = get_current_settings()
global_retention = settings.server.events.retention_period
overrides = settings.server.services.db_vacuum.event_retention_overrides
batch_size = settings.server.services.db_vacuum.batch_size
for event_type, type_retention in overrides.items():
retention = min(type_retention, global_retention)
retention_cutoff = now("UTC") - retention
# Delete event resources first (no FK cascade)
event_ids = (
sa.select(db.Event.id)
.where(
db.Event.event == event_type,
db.Event.occurred < retention_cutoff,
)
.scalar_subquery()
)
resources_deleted = await _batch_delete(
db,
db.EventResource,
db.EventResource.event_id.in_(event_ids),
batch_size,
)
# Then delete the events themselves
events_deleted = await _batch_delete(
db,
db.Event,
sa.and_(
db.Event.event == event_type,
db.Event.occurred < retention_cutoff,
),
batch_size,
)
if events_deleted or resources_deleted:
logger.info(
"Database vacuum: deleted %d %r events and %d event resources.",
events_deleted,
event_type,
resources_deleted,
)
async def vacuum_old_events(
*,
db: PrefectDBInterface = Depends(provide_database_interface),
) -> None:
"""Delete all events and event resources past the general events retention period."""
settings = get_current_settings()
retention_cutoff = now("UTC") - settings.server.events.retention_period
batch_size = settings.server.services.db_vacuum.batch_size
# Delete old event resources first (no FK cascade on event_id).
# Uses EventResource.occurred (the event timestamp) rather than
# EventResource.updated (the row insertion time) so that retention
# is measured from when the event happened, consistent with how
# events themselves are deleted by Event.occurred below.
resources_deleted = await _batch_delete(
db,
db.EventResource,
db.EventResource.occurred < retention_cutoff,
batch_size,
)
# Then delete old events
events_deleted = await _batch_delete(
db,
db.Event,
db.Event.occurred < retention_cutoff,
batch_size,
)
if events_deleted or resources_deleted:
logger.info(
"Database vacuum: deleted %d old events and %d event resources.",
events_deleted,
resources_deleted,
)
# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------
async def _find_orphaned_fk_ids(
db: PrefectDBInterface,
child_model: type,
fk_column: sa.Column,
parent_model: type,
) -> list:
"""Find foreign key values in child_model that have no matching parent row.
Queries the distinct set of FK values rather than scanning every child row,
which allows the database to use an index scan on the FK column instead of
a full table scan.
"""
distinct_fks = (
sa.select(fk_column.label("fk_id"))
.where(fk_column.is_not(None))
.distinct()
.subquery()
)
orphaned = sa.select(distinct_fks.c.fk_id).where(
~sa.exists(
sa.select(sa.literal(1)).where(parent_model.id == distinct_fks.c.fk_id)
)
)
async with db.session_context() as session:
result = await session.execute(orphaned)
return result.scalars().all()
async def _reconcile_artifact_collections(
db: PrefectDBInterface,
batch_size: int,
) -> tuple[int, int]:
"""Reconcile artifact collections whose latest_id points to a deleted artifact.
For each stale collection, if another artifact with the same key still
exists, re-point latest_id to the newest remaining version (mirroring the
logic in models.artifacts.delete_artifact). Otherwise delete the row.
Returns (updated_count, deleted_count).
"""
total_updated = 0
total_deleted = 0
stale_condition = ~sa.exists(
sa.select(sa.literal(1)).where(
db.Artifact.id == db.ArtifactCollection.latest_id
)
)
while True:
async with db.session_context(begin_transaction=True) as session:
rows = (
await session.execute(
sa.select(db.ArtifactCollection.id, db.ArtifactCollection.key)
.where(stale_condition)
.limit(batch_size)
)
).all()
if not rows:
break
for collection_id, key in rows:
next_latest = (
await session.execute(
sa.select(db.Artifact)
.where(db.Artifact.key == key)
.order_by(db.Artifact.created.desc())
.limit(1)
)
).scalar_one_or_none()
if next_latest is not None:
await session.execute(
sa.update(db.ArtifactCollection)
.where(db.ArtifactCollection.id == collection_id)
.values(
latest_id=next_latest.id,
data=next_latest.data,
description=next_latest.description,
type=next_latest.type,
created=next_latest.created,
updated=next_latest.updated,
flow_run_id=next_latest.flow_run_id,
task_run_id=next_latest.task_run_id,
metadata_=next_latest.metadata_,
)
)
total_updated += 1
else:
await session.execute(
sa.delete(db.ArtifactCollection).where(
db.ArtifactCollection.id == collection_id
)
)
total_deleted += 1
await asyncio.sleep(0)
return total_updated, total_deleted
async def _batch_delete(
db: PrefectDBInterface,
model: type,
condition: sa.ColumnElement[bool],
batch_size: int,
) -> int:
"""Delete matching rows in batches. Each batch gets its own DB transaction."""
total = 0
while True:
async with db.session_context(begin_transaction=True) as session:
subquery = (
sa.select(model.id).where(condition).limit(batch_size).scalar_subquery()
)
result = await session.execute(
sa.delete(model).where(model.id.in_(subquery))
)
deleted = result.rowcount
if deleted == 0:
break
total += deleted
await asyncio.sleep(0) # yield to event loop between batches
return total
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/prefect/server/services/db_vacuum.py",
"license": "Apache License 2.0",
"lines": 363,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/prefect:tests/server/services/test_db_vacuum.py | """Tests for the database vacuum docket task functions."""
from __future__ import annotations
import datetime
import uuid
from datetime import timedelta
import pytest
import sqlalchemy as sa
from prefect.server import models, schemas
from prefect.server.database import PrefectDBInterface, provide_database_interface
from prefect.server.events.schemas.events import ReceivedEvent, Resource
from prefect.server.events.storage.database import write_events
from prefect.server.schemas.actions import LogCreate
from prefect.server.services.db_vacuum import (
vacuum_events_with_retention_overrides,
vacuum_old_events,
vacuum_old_flow_runs,
vacuum_orphaned_artifacts,
vacuum_orphaned_logs,
vacuum_stale_artifact_collections,
)
from prefect.settings.context import get_current_settings
from prefect.types._datetime import now
@pytest.fixture(autouse=True)
def enable_db_vacuum(monkeypatch: pytest.MonkeyPatch) -> None:
"""Enable the vacuum service and set short retention for testing."""
settings = get_current_settings()
monkeypatch.setattr(
settings.server.services.db_vacuum, "enabled", {"events", "flow_runs"}
)
monkeypatch.setattr(
settings.server.services.db_vacuum,
"retention_period",
timedelta(days=1),
)
monkeypatch.setattr(settings.server.services.db_vacuum, "batch_size", 100)
# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------
OLD = now("UTC") - timedelta(days=30)
RECENT = now("UTC") - timedelta(hours=1)
async def _create_flow_run(
session,
flow,
*,
state=None,
end_time=None,
parent_task_run_id=None,
):
if state is None:
state = schemas.states.Completed()
flow_run = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
flow_id=flow.id,
state=state,
end_time=end_time,
parent_task_run_id=parent_task_run_id,
),
)
await session.commit()
return flow_run
async def _create_task_run(session, flow_run):
task_run = await models.task_runs.create_task_run(
session=session,
task_run=schemas.actions.TaskRunCreate(
flow_run_id=flow_run.id,
task_key=f"task-{uuid.uuid4()}",
dynamic_key="0",
),
)
await session.commit()
return task_run
async def _create_log(session, flow_run_id=None, task_run_id=None):
await models.logs.create_logs(
session=session,
logs=[
LogCreate(
name="prefect.test",
level=20,
message="test log",
timestamp=now("UTC"),
flow_run_id=flow_run_id,
task_run_id=task_run_id,
),
],
)
await session.commit()
async def _create_artifact(session, flow_run_id=None, key=None):
artifact = await models.artifacts.create_artifact(
session=session,
artifact=schemas.core.Artifact(
key=key,
data=1,
flow_run_id=flow_run_id,
),
)
await session.commit()
return artifact
async def _count(session, db: PrefectDBInterface, model) -> int:
result = await session.execute(sa.select(sa.func.count(model.id)))
return result.scalar_one()
async def _create_event(
db: PrefectDBInterface,
event_type: str,
occurred: datetime.datetime,
) -> ReceivedEvent:
"""Create an event + its resource row in the database."""
event = ReceivedEvent(
occurred=occurred,
event=event_type,
resource=Resource.model_validate(
{"prefect.resource.id": f"prefect.flow-run.{uuid.uuid4()}"}
),
payload={},
id=uuid.uuid4(),
)
async with db.session_context(begin_transaction=True) as session:
await write_events(session, [event])
return event
async def _count_events(db: PrefectDBInterface) -> int:
async with db.session_context() as session:
result = await session.execute(sa.select(sa.func.count(db.Event.id)))
return result.scalar_one()
async def _count_event_resources(db: PrefectDBInterface) -> int:
async with db.session_context() as session:
result = await session.execute(sa.select(sa.func.count(db.EventResource.id)))
return result.scalar_one()
# ---------------------------------------------------------------------------
# Test classes
# ---------------------------------------------------------------------------
class TestVacuumOldFlowRuns:
async def test_deletes_old_completed_flow_runs(self, session, flow):
"""Old terminal flow runs should be deleted."""
db = provide_database_interface()
await _create_flow_run(session, flow, end_time=OLD)
assert await _count(session, db, db.FlowRun) == 1
await vacuum_old_flow_runs(db=db)
async with db.session_context() as new_session:
assert await _count(new_session, db, db.FlowRun) == 0
async def test_preserves_recent_flow_runs(self, session, flow):
"""Flow runs within the retention period should not be deleted."""
db = provide_database_interface()
await _create_flow_run(session, flow, end_time=RECENT)
await vacuum_old_flow_runs(db=db)
async with db.session_context() as new_session:
assert await _count(new_session, db, db.FlowRun) == 1
async def test_preserves_running_flow_runs(self, session, flow):
"""Non-terminal flow runs should never be deleted."""
db = provide_database_interface()
await _create_flow_run(
session,
flow,
state=schemas.states.Running(),
end_time=None,
)
await vacuum_old_flow_runs(db=db)
async with db.session_context() as new_session:
assert await _count(new_session, db, db.FlowRun) == 1
async def test_cascade_deletes_task_runs(self, session, flow):
"""Task runs belonging to a deleted flow run should be cascade-deleted."""
db = provide_database_interface()
flow_run = await _create_flow_run(session, flow, end_time=OLD)
await _create_task_run(session, flow_run)
assert await _count(session, db, db.TaskRun) == 1
await vacuum_old_flow_runs(db=db)
async with db.session_context() as new_session:
assert await _count(new_session, db, db.FlowRun) == 0
assert await _count(new_session, db, db.TaskRun) == 0
async def test_subflow_cleaned_up_with_parent(self, session, flow):
"""Old subflows are cleaned up when their parent is deleted (same vacuum run)."""
db = provide_database_interface()
parent = await _create_flow_run(session, flow, end_time=OLD)
parent_task = await _create_task_run(session, parent)
# Subflow: old, terminal, has parent_task_run_id
await _create_flow_run(
session,
flow,
end_time=OLD,
parent_task_run_id=parent_task.id,
)
assert await _count(session, db, db.FlowRun) == 2
# Parent deletion cascades SET NULL on subflow's parent_task_run_id,
# making it top-level. The batch loop's next iteration picks it up.
await vacuum_old_flow_runs(db=db)
async with db.session_context() as new_session:
assert await _count(new_session, db, db.FlowRun) == 0
async def test_recent_subflow_survives_parent_deletion(self, session, flow):
"""A recent subflow survives even after its parent is deleted."""
db = provide_database_interface()
parent = await _create_flow_run(session, flow, end_time=OLD)
parent_task = await _create_task_run(session, parent)
# Subflow: recent end_time, so not eligible for deletion
subflow = await _create_flow_run(
session,
flow,
end_time=RECENT,
parent_task_run_id=parent_task.id,
)
await vacuum_old_flow_runs(db=db)
async with db.session_context() as new_session:
# Parent deleted, but subflow survives (too recent)
result = await new_session.execute(
sa.select(db.FlowRun).where(db.FlowRun.id == subflow.id)
)
remaining = result.scalar_one_or_none()
assert remaining is not None
assert remaining.parent_task_run_id is None # SET NULL by cascade
async def test_deletes_all_terminal_states(self, session, flow):
"""All terminal state types should be eligible for deletion."""
db = provide_database_interface()
for state_cls in (
schemas.states.Completed,
schemas.states.Failed,
schemas.states.Cancelled,
schemas.states.Crashed,
):
await _create_flow_run(session, flow, state=state_cls(), end_time=OLD)
assert await _count(session, db, db.FlowRun) == 4
await vacuum_old_flow_runs(db=db)
async with db.session_context() as new_session:
assert await _count(new_session, db, db.FlowRun) == 0
async def test_preserves_terminal_run_without_end_time(self, session, flow):
"""Terminal flow runs with end_time=None should not be deleted."""
db = provide_database_interface()
await _create_flow_run(
session, flow, state=schemas.states.Completed(), end_time=None
)
await vacuum_old_flow_runs(db=db)
async with db.session_context() as new_session:
assert await _count(new_session, db, db.FlowRun) == 1
async def test_preserves_scheduled_flow_runs(self, session, flow):
"""Scheduled (non-terminal) flow runs should not be deleted."""
db = provide_database_interface()
await _create_flow_run(
session, flow, state=schemas.states.Scheduled(), end_time=None
)
await vacuum_old_flow_runs(db=db)
async with db.session_context() as new_session:
assert await _count(new_session, db, db.FlowRun) == 1
async def test_preserves_cancelling_flow_runs(self, session, flow):
"""CANCELLING is non-terminal and should not be deleted."""
db = provide_database_interface()
await _create_flow_run(
session, flow, state=schemas.states.Cancelling(), end_time=OLD
)
await vacuum_old_flow_runs(db=db)
async with db.session_context() as new_session:
assert await _count(new_session, db, db.FlowRun) == 1
class TestVacuumOrphanedLogs:
async def test_deletes_orphaned_logs(self, session, flow):
"""Logs referencing a non-existent flow run should be deleted."""
db = provide_database_interface()
# Create a log pointing to a flow_run_id that doesn't exist
fake_flow_run_id = uuid.uuid4()
await _create_log(session, flow_run_id=fake_flow_run_id)
assert await _count(session, db, db.Log) == 1
await vacuum_orphaned_logs(db=db)
async with db.session_context() as new_session:
assert await _count(new_session, db, db.Log) == 0
async def test_preserves_logs_with_existing_flow_run(self, session, flow):
"""Logs tied to an existing flow run should not be deleted."""
db = provide_database_interface()
flow_run = await _create_flow_run(session, flow, end_time=RECENT)
await _create_log(session, flow_run_id=flow_run.id)
await vacuum_orphaned_logs(db=db)
async with db.session_context() as new_session:
assert await _count(new_session, db, db.Log) == 1
async def test_preserves_logs_with_null_flow_run_id(self, session, flow):
"""Logs with flow_run_id=NULL (e.g. task-run-only) should not be deleted."""
db = provide_database_interface()
await _create_log(session, flow_run_id=None)
await vacuum_orphaned_logs(db=db)
async with db.session_context() as new_session:
assert await _count(new_session, db, db.Log) == 1
class TestVacuumOrphanedArtifacts:
async def test_deletes_orphaned_artifacts(self, session, flow):
"""Artifacts referencing a non-existent flow run should be deleted."""
db = provide_database_interface()
fake_flow_run_id = uuid.uuid4()
await _create_artifact(session, flow_run_id=fake_flow_run_id)
assert await _count(session, db, db.Artifact) == 1
await vacuum_orphaned_artifacts(db=db)
async with db.session_context() as new_session:
assert await _count(new_session, db, db.Artifact) == 0
async def test_preserves_artifacts_with_existing_flow_run(self, session, flow):
"""Artifacts tied to an existing flow run should not be deleted."""
db = provide_database_interface()
flow_run = await _create_flow_run(session, flow, end_time=RECENT)
await _create_artifact(session, flow_run_id=flow_run.id)
await vacuum_orphaned_artifacts(db=db)
async with db.session_context() as new_session:
assert await _count(new_session, db, db.Artifact) == 1
async def test_preserves_artifacts_with_null_flow_run_id(self, session, flow):
"""Artifacts with flow_run_id=NULL should not be deleted."""
db = provide_database_interface()
await _create_artifact(session, flow_run_id=None)
await vacuum_orphaned_artifacts(db=db)
async with db.session_context() as new_session:
assert await _count(new_session, db, db.Artifact) == 1
class TestVacuumArtifactCollections:
async def test_deletes_stale_artifact_collections(self, session, flow):
"""Artifact collections pointing to deleted artifacts should be removed."""
db = provide_database_interface()
# Create an artifact with a key -> this also creates an artifact_collection
fake_flow_run_id = uuid.uuid4()
await _create_artifact(session, flow_run_id=fake_flow_run_id, key="my-report")
assert await _count(session, db, db.ArtifactCollection) == 1
# First vacuum orphaned artifacts, then stale collections
await vacuum_orphaned_artifacts(db=db)
await vacuum_stale_artifact_collections(db=db)
async with db.session_context() as new_session:
assert await _count(new_session, db, db.Artifact) == 0
assert await _count(new_session, db, db.ArtifactCollection) == 0
async def test_repoints_collection_to_next_latest_version(self, session, flow):
"""When the latest artifact is orphaned but an older version exists,
the collection should be re-pointed to the older version."""
db = provide_database_interface()
# Older artifact version β tied to an existing flow run
live_run = await _create_flow_run(session, flow, end_time=RECENT)
older_artifact = await _create_artifact(
session, flow_run_id=live_run.id, key="my-report"
)
# Newer artifact version (same key) β orphaned flow run
fake_flow_run_id = uuid.uuid4()
await _create_artifact(session, flow_run_id=fake_flow_run_id, key="my-report")
# Collection should point to the newer (orphaned) artifact
assert await _count(session, db, db.ArtifactCollection) == 1
await vacuum_orphaned_artifacts(db=db)
await vacuum_stale_artifact_collections(db=db)
async with db.session_context() as new_session:
# Orphaned artifact deleted, but collection survives re-pointed
assert await _count(new_session, db, db.Artifact) == 1
assert await _count(new_session, db, db.ArtifactCollection) == 1
# Verify collection now points to the older (surviving) artifact
result = await new_session.execute(
sa.select(db.ArtifactCollection).where(
db.ArtifactCollection.key == "my-report"
)
)
collection = result.scalar_one()
assert collection.latest_id == older_artifact.id
async def test_deletes_standalone_stale_collection(self, session):
"""A stale collection row (e.g. left over from a previous crash)
should be deleted even without artifact cleanup in the same cycle."""
db = provide_database_interface()
# Directly insert a collection row with a dangling latest_id
async with db.session_context(begin_transaction=True) as s:
await s.execute(
sa.insert(db.ArtifactCollection).values(
id=uuid.uuid4(),
key="stale-key",
latest_id=uuid.uuid4(), # points to nothing
)
)
async with db.session_context() as s:
assert await _count(s, db, db.ArtifactCollection) == 1
await vacuum_stale_artifact_collections(db=db)
async with db.session_context() as new_session:
assert await _count(new_session, db, db.ArtifactCollection) == 0
async def test_preserves_valid_artifact_collections(self, session, flow):
"""Artifact collections pointing to existing artifacts should be preserved."""
db = provide_database_interface()
flow_run = await _create_flow_run(session, flow, end_time=RECENT)
await _create_artifact(session, flow_run_id=flow_run.id, key="my-report")
await vacuum_stale_artifact_collections(db=db)
async with db.session_context() as new_session:
assert await _count(new_session, db, db.ArtifactCollection) == 1
class TestVacuumHeartbeatEvents:
async def test_deletes_old_heartbeat_events(self):
"""Old heartbeat events and their resources should be deleted."""
db = provide_database_interface()
await _create_event(db, "prefect.flow-run.heartbeat", OLD)
assert await _count_events(db) == 1
assert await _count_event_resources(db) >= 1
await vacuum_events_with_retention_overrides(db=db)
assert await _count_events(db) == 0
assert await _count_event_resources(db) == 0
async def test_preserves_recent_heartbeat_events(self):
"""Recent heartbeat events should not be deleted."""
db = provide_database_interface()
await _create_event(db, "prefect.flow-run.heartbeat", RECENT)
await vacuum_events_with_retention_overrides(db=db)
assert await _count_events(db) == 1
assert await _count_event_resources(db) >= 1
async def test_preserves_non_heartbeat_events(self):
"""Old non-heartbeat events should not be deleted by this task."""
db = provide_database_interface()
await _create_event(db, "prefect.flow-run.completed", OLD)
await vacuum_events_with_retention_overrides(db=db)
assert await _count_events(db) == 1
assert await _count_event_resources(db) >= 1
async def test_respects_events_retention_period(self, monkeypatch):
"""Heartbeat retention should not exceed the general events retention period."""
db = provide_database_interface()
settings = get_current_settings()
# Set general events retention to 12 hours (shorter than heartbeat default of 7 days)
monkeypatch.setattr(
settings.server.events,
"retention_period",
timedelta(hours=12),
)
# Create a heartbeat event 18 hours ago (past 12h, within 7 days)
eighteen_hours_ago = now("UTC") - timedelta(hours=18)
await _create_event(db, "prefect.flow-run.heartbeat", eighteen_hours_ago)
await vacuum_events_with_retention_overrides(db=db)
# Should be deleted because events retention (12h) is shorter
assert await _count_events(db) == 0
async def test_uses_event_retention_override(self, monkeypatch):
"""Should use per-type retention from event_retention_overrides."""
db = provide_database_interface()
settings = get_current_settings()
# Set a custom short retention for heartbeat events (2 hours)
monkeypatch.setattr(
settings.server.services.db_vacuum,
"event_retention_overrides",
{"prefect.flow-run.heartbeat": timedelta(hours=2)},
)
# Create a heartbeat event 4 hours ago (past 2h override)
four_hours_ago = now("UTC") - timedelta(hours=4)
await _create_event(db, "prefect.flow-run.heartbeat", four_hours_ago)
await vacuum_events_with_retention_overrides(db=db)
# Should be deleted because heartbeat override (2h) is shorter than age (4h)
assert await _count_events(db) == 0
async def test_skips_types_without_override(self, monkeypatch):
"""Without an override, events are left for vacuum_old_events to handle."""
db = provide_database_interface()
settings = get_current_settings()
# Remove the heartbeat override
monkeypatch.setattr(
settings.server.services.db_vacuum,
"event_retention_overrides",
{},
)
await _create_event(db, "prefect.flow-run.heartbeat", OLD)
await vacuum_events_with_retention_overrides(db=db)
# Event is NOT deleted β no override means no action from this task
assert await _count_events(db) == 1
async def test_deletes_associated_event_resources(self):
"""Resources for deleted heartbeat events should be removed."""
db = provide_database_interface()
event = await _create_event(db, "prefect.flow-run.heartbeat", OLD)
# Verify the resource was created
async with db.session_context() as session:
result = await session.execute(
sa.select(sa.func.count(db.EventResource.id)).where(
db.EventResource.event_id == event.id
)
)
assert result.scalar_one() >= 1
await vacuum_events_with_retention_overrides(db=db)
async with db.session_context() as session:
result = await session.execute(
sa.select(sa.func.count(db.EventResource.id)).where(
db.EventResource.event_id == event.id
)
)
assert result.scalar_one() == 0
class TestVacuumOldEvents:
async def test_deletes_old_events(self, monkeypatch):
"""Events and resources past the events retention period should be deleted."""
db = provide_database_interface()
# events.retention_period defaults to 7 days; our OLD is 30 days ago
await _create_event(db, "prefect.flow-run.completed", OLD)
assert await _count_events(db) == 1
assert await _count_event_resources(db) >= 1
await vacuum_old_events(db=db)
assert await _count_events(db) == 0
assert await _count_event_resources(db) == 0
async def test_preserves_recent_events(self):
"""Recent events should not be deleted."""
db = provide_database_interface()
await _create_event(db, "prefect.flow-run.completed", RECENT)
await vacuum_old_events(db=db)
assert await _count_events(db) == 1
assert await _count_event_resources(db) >= 1
async def test_uses_events_retention_period(self, monkeypatch):
"""Should use settings.server.events.retention_period, not db_vacuum.retention_period."""
db = provide_database_interface()
settings = get_current_settings()
# Set events retention to 60 days (longer than our 30-day-old event)
monkeypatch.setattr(
settings.server.events,
"retention_period",
timedelta(days=60),
)
await _create_event(db, "prefect.flow-run.completed", OLD)
await vacuum_old_events(db=db)
# The 30-day-old event should survive because retention is 60 days
assert await _count_events(db) == 1
class TestVacuumBatching:
async def test_batching_deletes_all_records(self, session, flow, monkeypatch):
"""With batch_size=5, all 12 old flow runs should eventually be deleted."""
settings = get_current_settings()
monkeypatch.setattr(settings.server.services.db_vacuum, "batch_size", 5)
db = provide_database_interface()
for _ in range(12):
await _create_flow_run(session, flow, end_time=OLD)
assert await _count(session, db, db.FlowRun) == 12
await vacuum_old_flow_runs(db=db)
async with db.session_context() as new_session:
assert await _count(new_session, db, db.FlowRun) == 0
class TestVacuumIdempotency:
async def test_second_run_is_noop(self, session, flow):
"""Running vacuum tasks twice should produce zero changes on the second run."""
db = provide_database_interface()
await _create_flow_run(session, flow, end_time=OLD)
fake_flow_run_id = uuid.uuid4()
await _create_log(session, flow_run_id=fake_flow_run_id)
await _create_artifact(session, flow_run_id=fake_flow_run_id, key="report")
await _create_event(db, "prefect.flow-run.heartbeat", OLD)
await _create_event(db, "prefect.flow-run.completed", OLD)
await vacuum_orphaned_logs(db=db)
await vacuum_orphaned_artifacts(db=db)
await vacuum_stale_artifact_collections(db=db)
await vacuum_old_flow_runs(db=db)
await vacuum_events_with_retention_overrides(db=db)
await vacuum_old_events(db=db)
async with db.session_context() as new_session:
assert await _count(new_session, db, db.FlowRun) == 0
assert await _count(new_session, db, db.Log) == 0
assert await _count(new_session, db, db.Artifact) == 0
assert await _count(new_session, db, db.ArtifactCollection) == 0
assert await _count_events(db) == 0
assert await _count_event_resources(db) == 0
# Second run should be a no-op
await vacuum_orphaned_logs(db=db)
await vacuum_orphaned_artifacts(db=db)
await vacuum_stale_artifact_collections(db=db)
await vacuum_old_flow_runs(db=db)
await vacuum_events_with_retention_overrides(db=db)
await vacuum_old_events(db=db)
async with db.session_context() as new_session:
assert await _count(new_session, db, db.FlowRun) == 0
assert await _count(new_session, db, db.Log) == 0
assert await _count(new_session, db, db.Artifact) == 0
assert await _count(new_session, db, db.ArtifactCollection) == 0
assert await _count_events(db) == 0
assert await _count_event_resources(db) == 0
class TestNoOp:
async def test_empty_database_does_not_error(self):
"""Running vacuum tasks on an empty database should complete without error."""
db = provide_database_interface()
await vacuum_orphaned_logs(db=db)
await vacuum_orphaned_artifacts(db=db)
await vacuum_stale_artifact_collections(db=db)
await vacuum_old_flow_runs(db=db)
await vacuum_events_with_retention_overrides(db=db)
await vacuum_old_events(db=db)
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "tests/server/services/test_db_vacuum.py",
"license": "Apache License 2.0",
"lines": 548,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:src/integrations/prefect-dbt/prefect_dbt/cloud/_executor.py | """dbt Cloud executor for per-node orchestration.
This module provides:
- DbtCloudExecutor: Execute dbt nodes via dbt Cloud ephemeral jobs
"""
import json
import shlex
import tempfile
import time
from pathlib import Path
from typing import Any
import httpx
import prefect
from prefect.logging import get_logger
from prefect_dbt.cloud.credentials import DbtCloudCredentials
from prefect_dbt.cloud.runs import DbtCloudJobRunStatus
from prefect_dbt.core._executor import ExecutionResult
from prefect_dbt.core._manifest import DbtNode
logger = get_logger(__name__)
class DbtCloudExecutor:
"""Execute dbt nodes via dbt Cloud ephemeral jobs.
Creates a temporary dbt Cloud job for each node (or wave) execution,
triggers a run, polls for completion, extracts results from the run
artifacts, and deletes the ephemeral job afterwards.
Manifest Resolution:
The orchestrator requires a `manifest.json` to parse the DAG.
Call `resolve_manifest_path()` to download or generate the manifest
and write it to a local temp file:
1. If `defer_to_job_id` is set: downloads `manifest.json` from
the job's most recent successful run via the dbt Cloud API.
2. Otherwise: runs an ephemeral `dbt compile` job to generate the
manifest, then deletes the ephemeral job.
Args:
credentials: DbtCloudCredentials block with API key and account ID.
project_id: Numeric dbt Cloud project ID.
environment_id: Numeric dbt Cloud environment ID.
job_name_prefix: Prefix for ephemeral job names.
timeout_seconds: Max seconds to wait for a run to complete.
poll_frequency_seconds: Seconds between run status checks.
threads: Override dbt `--threads` for all jobs (omitted if None).
defer_to_job_id: Job ID to fetch `manifest.json` from. When set,
`resolve_manifest_path()` downloads the manifest from this job's
most recent successful run rather than generating it fresh.
Example:
```python
from prefect import flow
from prefect_dbt import PrefectDbtOrchestrator
from prefect_dbt.cloud import DbtCloudCredentials
from prefect_dbt.cloud import DbtCloudExecutor
@flow
def run_dbt_cloud():
executor = DbtCloudExecutor(
credentials=DbtCloudCredentials.load("my-dbt-cloud"),
project_id=12345,
environment_id=67890,
defer_to_job_id=111, # fetch manifest from prod job
)
orchestrator = PrefectDbtOrchestrator(executor=executor)
return orchestrator.run_build()
```
"""
# Commands that accept the --full-refresh flag.
_FULL_REFRESH_COMMANDS = frozenset({"run", "build", "seed"})
def __init__(
self,
credentials: DbtCloudCredentials,
project_id: int,
environment_id: int,
job_name_prefix: str = "prefect-orchestrator",
timeout_seconds: int = 900,
poll_frequency_seconds: int = 10,
threads: int | None = None,
defer_to_job_id: int | None = None,
):
self._credentials = credentials
self._project_id = project_id
self._environment_id = environment_id
self._job_name_prefix = job_name_prefix
self._timeout_seconds = timeout_seconds
self._poll_frequency_seconds = poll_frequency_seconds
self._threads = threads
self._defer_to_job_id = defer_to_job_id
self._manifest_temp_dir: tempfile.TemporaryDirectory[str] | None = None
self._client = httpx.Client(
headers={
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
"user-agent": f"prefect-{prefect.__version__}",
"x-dbt-partner-source": "prefect",
},
base_url=f"https://{credentials.domain}/api/v2/accounts/{credentials.account_id}",
)
# ------------------------------------------------------------------
# Internal helpers
# ------------------------------------------------------------------
def _build_dbt_command(
self,
command: str,
selectors: list[str],
full_refresh: bool = False,
indirect_selection: str | None = None,
target: str | None = None,
extra_cli_args: list[str] | None = None,
) -> str:
"""Build a dbt command string for a Cloud job step.
Args:
command: dbt sub-command (`"run"`, `"seed"`, `"build"`,
`"test"`, `"snapshot"`, etc.)
selectors: List of `--select` values.
full_refresh: Whether to pass `--full-refresh`.
indirect_selection: Optional `--indirect-selection` value (e.g.
`"empty"` to suppress automatic test inclusion).
target: Optional dbt target name (`--target`).
extra_cli_args: Additional CLI arguments appended at the end.
Returns:
Complete dbt command string, e.g.
`"dbt run --select path:models/staging/stg_users.sql"`.
"""
parts = ["dbt", command]
if self._threads is not None:
parts.extend(["--threads", str(self._threads)])
if full_refresh and command in self._FULL_REFRESH_COMMANDS:
parts.append("--full-refresh")
if indirect_selection is not None:
parts.extend(["--indirect-selection", indirect_selection])
if target is not None:
parts.extend(["--target", target])
if selectors:
parts.extend(["--select"] + selectors)
if extra_cli_args:
parts.extend(extra_cli_args)
return " ".join(shlex.quote(p) for p in parts)
def _parse_run_results(
self, run_results: dict[str, Any] | None
) -> dict[str, Any] | None:
"""Parse dbt `run_results.json` into `ExecutionResult` artifacts.
Args:
run_results: Parsed `run_results.json` dict from the run artifact.
Returns:
Dict mapping `unique_id` to `{status, message, execution_time}`,
or `None` if *run_results* is empty or missing.
"""
if not run_results or "results" not in run_results:
return None
artifacts: dict[str, Any] = {}
for result in run_results["results"]:
uid = result.get("unique_id")
if not uid:
continue
artifacts[uid] = {
"status": str(result.get("status", "")),
"message": result.get("message", ""),
"execution_time": result.get("execution_time", 0.0),
}
return artifacts or None
def _poll_run(self, run_id: int) -> DbtCloudJobRunStatus:
"""Poll a run until it reaches a terminal status.
Args:
run_id: dbt Cloud run ID to poll.
Returns:
Final `DbtCloudJobRunStatus`.
Raises:
TimeoutError: If the run does not complete within `timeout_seconds`.
"""
start = time.monotonic()
while True:
resp = self._client.get(f"/runs/{run_id}/")
resp.raise_for_status()
status_code = resp.json()["data"].get("status")
if DbtCloudJobRunStatus.is_terminal_status_code(status_code):
return DbtCloudJobRunStatus(status_code)
elapsed = time.monotonic() - start
if elapsed >= self._timeout_seconds:
break
logger.debug(
"Run %d status: %s. Polling again in %ds.",
run_id,
DbtCloudJobRunStatus(status_code).name
if status_code is not None
else "unknown",
self._poll_frequency_seconds,
)
time.sleep(self._poll_frequency_seconds)
raise TimeoutError(
f"dbt Cloud run {run_id} did not complete within {self._timeout_seconds}s"
)
def _run_ephemeral_job(
self,
step: str,
job_name: str,
) -> tuple[bool, dict[str, Any] | None, Exception | None]:
"""Create, trigger, poll, and clean up an ephemeral dbt Cloud job.
The job is always deleted after completion or failure, even if an
exception occurs during execution (cleanup errors are silently ignored
to avoid masking the original error).
Args:
step: The dbt command to run (e.g.
`"dbt run --select path:models/staging/stg_users.sql"`).
job_name: Name for the ephemeral job (visible in the Cloud UI).
Returns:
Tuple of `(success, run_results_dict, error)`.
- *success*: `True` if the run reached `SUCCESS` status.
- *run_results_dict*: Parsed `run_results.json` or `None`.
- *error*: Exception if a non-SUCCESS status or unexpected error
occurred.
"""
job_id: int | None = None
try:
create_resp = self._client.post(
"/jobs/",
json={
"project_id": self._project_id,
"environment_id": self._environment_id,
"name": job_name,
"execute_steps": [step],
},
)
create_resp.raise_for_status()
job_id = create_resp.json()["data"]["id"]
logger.debug("Created ephemeral dbt Cloud job %d: %s", job_id, job_name)
trigger_resp = self._client.post(f"/jobs/{job_id}/run/", json={})
trigger_resp.raise_for_status()
run_id: int = trigger_resp.json()["data"]["id"]
logger.debug("Triggered run %d for job %d (%s)", run_id, job_id, step)
final_status = self._poll_run(run_id)
logger.debug("Run %d completed with status %s", run_id, final_status.name)
run_results: dict[str, Any] | None = None
try:
artifact_resp = self._client.get(
f"/runs/{run_id}/artifacts/run_results.json"
)
artifact_resp.raise_for_status()
run_results = artifact_resp.json()
except Exception as artifact_err:
logger.debug(
"Could not fetch run_results.json for run %d: %s",
run_id,
artifact_err,
)
success = final_status == DbtCloudJobRunStatus.SUCCESS
error: Exception | None = None
if not success:
error = RuntimeError(
f"dbt Cloud run {run_id} finished with status {final_status.name}"
)
return success, run_results, error
except Exception as exc:
return False, None, exc
finally:
if job_id is not None:
try:
self._client.delete(f"/jobs/{job_id}/").raise_for_status()
logger.debug("Deleted ephemeral dbt Cloud job %d", job_id)
except Exception as del_err:
logger.warning(
"Failed to delete ephemeral job %d: %s", job_id, del_err
)
# ------------------------------------------------------------------
# Public API
# ------------------------------------------------------------------
def fetch_manifest_from_job(self, job_id: int) -> dict[str, Any]:
"""Fetch `manifest.json` from a job's most recent successful run.
Uses the dbt Cloud endpoint::
GET /accounts/{account_id}/jobs/{job_id}/artifacts/manifest.json
Args:
job_id: dbt Cloud job ID whose latest artifact to fetch.
Returns:
Parsed `manifest.json` as a dict.
"""
resp = self._client.get(f"/jobs/{job_id}/artifacts/manifest.json")
resp.raise_for_status()
return resp.json()
def generate_manifest(self) -> dict[str, Any]:
"""Generate a manifest by running an ephemeral dbt compile job.
Creates a temporary job with `dbt compile`, triggers it, downloads
`manifest.json` from the run artifacts, and then deletes the job.
Returns:
Parsed `manifest.json` as a dict.
Raises:
RuntimeError: If the compile job fails or the manifest artifact
cannot be fetched.
"""
job_name = f"{self._job_name_prefix}-compile-{int(time.time())}"
job_id: int | None = None
try:
create_resp = self._client.post(
"/jobs/",
json={
"project_id": self._project_id,
"environment_id": self._environment_id,
"name": job_name,
"execute_steps": ["dbt compile"],
},
)
create_resp.raise_for_status()
job_id = create_resp.json()["data"]["id"]
logger.debug("Created ephemeral compile job %d: %s", job_id, job_name)
trigger_resp = self._client.post(f"/jobs/{job_id}/run/", json={})
trigger_resp.raise_for_status()
run_id: int = trigger_resp.json()["data"]["id"]
logger.debug("Triggered compile run %d", run_id)
final_status = self._poll_run(run_id)
if final_status != DbtCloudJobRunStatus.SUCCESS:
raise RuntimeError(
f"dbt compile job failed with status {final_status.name}. "
"Cannot generate manifest."
)
artifact_resp = self._client.get(f"/runs/{run_id}/artifacts/manifest.json")
artifact_resp.raise_for_status()
return artifact_resp.json()
finally:
if job_id is not None:
try:
self._client.delete(f"/jobs/{job_id}/").raise_for_status()
logger.debug("Deleted ephemeral compile job %d", job_id)
except Exception as del_err:
logger.warning(
"Failed to delete compile job %d: %s", job_id, del_err
)
def resolve_manifest_path(self) -> Path:
"""Fetch or generate a manifest and write it to a temporary file.
Called by `PrefectDbtOrchestrator` when no local `manifest_path`
is provided.
Strategy:
- If `defer_to_job_id` is set, downloads `manifest.json` from
that job's most recent successful run.
- Otherwise, runs an ephemeral `dbt compile` job to generate it.
Returns:
Absolute `Path` to a local temp file containing `manifest.json`.
The directory is owned by this executor instance and is cleaned up
automatically when the executor is garbage collected.
"""
if self._defer_to_job_id is not None:
logger.info(
"Fetching manifest.json from dbt Cloud job %d",
self._defer_to_job_id,
)
manifest_data = self.fetch_manifest_from_job(self._defer_to_job_id)
else:
logger.info(
"Generating manifest via ephemeral dbt compile job in environment %d",
self._environment_id,
)
manifest_data = self.generate_manifest()
# Use an isolated TemporaryDirectory tied to this executor so that:
# 1. _resolve_target_path() gets a unique dbt target path per run.
# 2. The directory is cleaned up automatically when the executor is
# garbage collected (no leaked /tmp/prefect_dbt_* directories).
self._manifest_temp_dir = tempfile.TemporaryDirectory(prefix="prefect_dbt_")
manifest_path = Path(self._manifest_temp_dir.name) / "manifest.json"
manifest_path.write_text(json.dumps(manifest_data))
logger.debug("Wrote manifest to %s", manifest_path)
return manifest_path
def execute_node(
self,
node: DbtNode,
command: str,
full_refresh: bool = False,
target: str | None = None,
extra_cli_args: list[str] | None = None,
) -> ExecutionResult:
"""Execute a single dbt node via an ephemeral dbt Cloud job.
Creates a job with a single step (e.g.
`"dbt run --select path:models/staging/stg_users.sql"`), triggers it,
waits for completion, extracts results from `run_results.json`, and
deletes the job.
Args:
node: The `DbtNode` to execute.
command: dbt command (`"run"`, `"seed"`, `"snapshot"`, `"test"`).
full_refresh: Whether to pass `--full-refresh` (ignored for
commands that don't support it).
target: Optional dbt target name (`--target`).
extra_cli_args: Additional CLI arguments appended to the command.
Returns:
`ExecutionResult` with success/failure status and per-node artifacts.
"""
step = self._build_dbt_command(
command,
selectors=[node.dbt_selector],
full_refresh=full_refresh,
target=target,
extra_cli_args=extra_cli_args,
)
# Truncate node name to keep job names reasonable in the Cloud UI.
safe_name = node.name[:40] if len(node.name) > 40 else node.name
job_name = f"{self._job_name_prefix}-{command}-{safe_name}"
success, run_results, error = self._run_ephemeral_job(step, job_name)
artifacts = self._parse_run_results(run_results)
# Union of requested node IDs and any IDs from run_results.json.
result_ids = [node.unique_id]
if artifacts:
result_ids = list(dict.fromkeys(result_ids + list(artifacts)))
return ExecutionResult(
success=success,
node_ids=result_ids,
error=error,
artifacts=artifacts,
)
def execute_wave(
self,
nodes: list[DbtNode],
full_refresh: bool = False,
indirect_selection: str | None = None,
target: str | None = None,
extra_cli_args: list[str] | None = None,
) -> ExecutionResult:
"""Execute a wave of dbt nodes via an ephemeral dbt Cloud job.
Uses `dbt build --select sel1 sel2 ...` to execute all nodes in the
wave in a single job step.
Args:
nodes: List of `DbtNode` objects to execute.
full_refresh: Whether to pass `--full-refresh`.
indirect_selection: Optional `--indirect-selection` value (e.g.
`"empty"` to suppress automatic test inclusion).
target: Optional dbt target name (`--target`).
extra_cli_args: Additional CLI arguments appended to the command.
Returns:
`ExecutionResult` with success/failure status and per-node artifacts.
Raises:
ValueError: If *nodes* is empty.
"""
if not nodes:
raise ValueError("Cannot execute an empty wave")
selectors = [node.dbt_selector for node in nodes]
step = self._build_dbt_command(
"build",
selectors=selectors,
full_refresh=full_refresh,
indirect_selection=indirect_selection,
target=target,
extra_cli_args=extra_cli_args,
)
job_name = f"{self._job_name_prefix}-build-wave"
success, run_results, error = self._run_ephemeral_job(step, job_name)
artifacts = self._parse_run_results(run_results)
node_ids = [node.unique_id for node in nodes]
if artifacts:
result_ids = list(dict.fromkeys(node_ids + list(artifacts)))
else:
result_ids = list(node_ids)
return ExecutionResult(
success=success,
node_ids=result_ids,
error=error,
artifacts=artifacts,
)
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/integrations/prefect-dbt/prefect_dbt/cloud/_executor.py",
"license": "Apache License 2.0",
"lines": 444,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/prefect:src/integrations/prefect-dbt/tests/core/test_cloud_executor.py | """Tests for DbtCloudExecutor."""
import json
import shlex
from contextlib import contextmanager
from pathlib import Path
from unittest.mock import MagicMock
import httpx
import pytest
import respx
from dbt.artifacts.resources.types import NodeType
from prefect_dbt.cloud import DbtCloudExecutor
from prefect_dbt.cloud.runs import DbtCloudJobRunStatus
from prefect_dbt.core._executor import ExecutionResult
from prefect_dbt.core._manifest import DbtNode
# =============================================================================
# Constants
# =============================================================================
_ACCOUNT_ID = 123
_DOMAIN = "cloud.getdbt.com"
_BASE = f"https://{_DOMAIN}/api/v2/accounts/{_ACCOUNT_ID}"
# =============================================================================
# Helpers
# =============================================================================
def _make_node(
unique_id: str = "model.test.my_model",
name: str = "my_model",
resource_type: NodeType = NodeType.Model,
original_file_path: str | None = "models/my_model.sql",
) -> DbtNode:
return DbtNode(
unique_id=unique_id,
name=name,
resource_type=resource_type,
original_file_path=original_file_path,
)
def _make_mock_credentials() -> MagicMock:
"""Return a credentials mock that satisfies httpx.Client construction."""
credentials = MagicMock()
credentials.api_key.get_secret_value.return_value = "test-api-key"
credentials.account_id = _ACCOUNT_ID
credentials.domain = _DOMAIN
return credentials
def _make_executor(
job_name_prefix: str = "test-orchestrator",
timeout_seconds: int = 30,
poll_frequency_seconds: int = 0,
threads: int | None = None,
defer_to_job_id: int | None = None,
) -> DbtCloudExecutor:
return DbtCloudExecutor(
credentials=_make_mock_credentials(),
project_id=1,
environment_id=2,
job_name_prefix=job_name_prefix,
timeout_seconds=timeout_seconds,
poll_frequency_seconds=poll_frequency_seconds,
threads=threads,
defer_to_job_id=defer_to_job_id,
)
@contextmanager
def _mock_ephemeral_job(
job_id: int = 42,
run_id: int = 100,
final_status: int = DbtCloudJobRunStatus.SUCCESS.value,
run_results: dict | None = None,
delete_raises: bool = False,
):
"""Register all respx routes for a single ephemeral job execution.
Yields the `MockRouter`. Assertions on `mock.calls` must be made
*inside* this context β respx clears calls on exit.
"""
with respx.mock as mock:
mock.post(f"{_BASE}/jobs/").mock(
return_value=httpx.Response(200, json={"data": {"id": job_id}})
)
mock.post(f"{_BASE}/jobs/{job_id}/run/").mock(
return_value=httpx.Response(200, json={"data": {"id": run_id}})
)
mock.get(f"{_BASE}/runs/{run_id}/").mock(
return_value=httpx.Response(200, json={"data": {"status": final_status}})
)
mock.get(f"{_BASE}/runs/{run_id}/artifacts/run_results.json").mock(
return_value=httpx.Response(200, json=run_results or {"results": []})
)
mock.delete(f"{_BASE}/jobs/{job_id}/").mock(
return_value=httpx.Response(500 if delete_raises else 200, json={})
)
yield mock
def _create_job_body(mock) -> dict:
"""Return the JSON body sent with the first (create-job) request."""
return json.loads(mock.calls[0].request.content)
# =============================================================================
# _build_dbt_command
# =============================================================================
class TestBuildDbtCommand:
def test_basic_run(self):
ex = _make_executor()
cmd = ex._build_dbt_command("run", ["path:models/my_model.sql"])
assert cmd == "dbt run --select path:models/my_model.sql"
def test_full_refresh_run(self):
ex = _make_executor()
cmd = ex._build_dbt_command("run", ["path:models/my.sql"], full_refresh=True)
assert "--full-refresh" in cmd
def test_full_refresh_ignored_for_test(self):
ex = _make_executor()
cmd = ex._build_dbt_command("test", ["some_test"], full_refresh=True)
assert "--full-refresh" not in cmd
def test_full_refresh_ignored_for_snapshot(self):
ex = _make_executor()
cmd = ex._build_dbt_command("snapshot", ["path:snap.sql"], full_refresh=True)
assert "--full-refresh" not in cmd
def test_threads_flag(self):
ex = _make_executor(threads=4)
cmd = ex._build_dbt_command("run", ["path:my.sql"])
assert "--threads 4" in cmd
def test_no_threads_by_default(self):
ex = _make_executor()
cmd = ex._build_dbt_command("run", ["path:my.sql"])
assert "--threads" not in cmd
def test_indirect_selection(self):
ex = _make_executor()
cmd = ex._build_dbt_command(
"build", ["path:my.sql"], indirect_selection="empty"
)
assert "--indirect-selection empty" in cmd
def test_multiple_selectors(self):
ex = _make_executor()
cmd = ex._build_dbt_command("build", ["sel1", "sel2", "sel3"])
assert "--select sel1 sel2 sel3" in cmd
def test_seed_with_full_refresh(self):
ex = _make_executor()
cmd = ex._build_dbt_command("seed", ["path:seeds/my.csv"], full_refresh=True)
assert "--full-refresh" in cmd
def test_target_flag(self):
ex = _make_executor()
cmd = ex._build_dbt_command("run", ["path:my.sql"], target="prod")
assert "--target prod" in cmd
def test_target_absent_by_default(self):
ex = _make_executor()
cmd = ex._build_dbt_command("run", ["path:my.sql"])
assert "--target" not in cmd
def test_extra_cli_args_appended(self):
ex = _make_executor()
cmd = ex._build_dbt_command(
"run", ["path:my.sql"], extra_cli_args=["--store-failures", "--warn-error"]
)
assert "--store-failures" in cmd
assert "--warn-error" in cmd
def test_extra_cli_args_none_no_effect(self):
ex = _make_executor()
cmd = ex._build_dbt_command("run", ["path:my.sql"], extra_cli_args=None)
assert cmd == "dbt run --select path:my.sql"
def test_space_in_extra_cli_args_is_shell_quoted(self):
"""Values with spaces must be shell-quoted so Cloud parses them as one token."""
ex = _make_executor()
cmd = ex._build_dbt_command(
"run",
["path:my.sql"],
extra_cli_args=["--vars", "{'my_var': 'hello world'}"],
)
tokens = shlex.split(cmd)
assert "--vars" in tokens
assert "{'my_var': 'hello world'}" in tokens
def test_build_with_all_flags(self):
ex = _make_executor(threads=8)
cmd = ex._build_dbt_command(
"build",
["path:models/a.sql", "path:models/b.sql"],
full_refresh=True,
indirect_selection="empty",
target="staging",
extra_cli_args=["--store-failures"],
)
assert cmd.startswith("dbt build")
assert "--threads 8" in cmd
assert "--full-refresh" in cmd
assert "--indirect-selection empty" in cmd
assert "--target staging" in cmd
assert "path:models/a.sql" in cmd
assert "path:models/b.sql" in cmd
assert "--store-failures" in cmd
# =============================================================================
# _parse_run_results
# =============================================================================
class TestParseRunResults:
def test_none_input(self):
assert _make_executor()._parse_run_results(None) is None
def test_empty_dict(self):
assert _make_executor()._parse_run_results({}) is None
def test_missing_results_key(self):
assert _make_executor()._parse_run_results({"metadata": {}}) is None
def test_empty_results_list(self):
assert _make_executor()._parse_run_results({"results": []}) is None
def test_single_result(self):
run_results = {
"results": [
{
"unique_id": "model.test.stg_users",
"status": "success",
"message": "SELECT 100",
"execution_time": 1.5,
}
]
}
artifacts = _make_executor()._parse_run_results(run_results)
assert artifacts == {
"model.test.stg_users": {
"status": "success",
"message": "SELECT 100",
"execution_time": 1.5,
}
}
def test_multiple_results(self):
run_results = {
"results": [
{
"unique_id": "model.test.a",
"status": "success",
"execution_time": 1.0,
},
{"unique_id": "model.test.b", "status": "error", "execution_time": 0.5},
]
}
artifacts = _make_executor()._parse_run_results(run_results)
assert len(artifacts) == 2
assert artifacts["model.test.a"]["status"] == "success"
assert artifacts["model.test.b"]["status"] == "error"
def test_result_without_unique_id_skipped(self):
run_results = {
"results": [
{"status": "success"}, # no unique_id
{"unique_id": "model.test.a", "status": "success"},
]
}
artifacts = _make_executor()._parse_run_results(run_results)
assert list(artifacts.keys()) == ["model.test.a"]
# =============================================================================
# execute_node
# =============================================================================
class TestExecuteNode:
def test_success(self):
run_results = {
"results": [
{
"unique_id": "model.test.my_model",
"status": "success",
"message": "SELECT 50",
"execution_time": 2.0,
}
]
}
ex = _make_executor()
with _mock_ephemeral_job(run_results=run_results):
result = ex.execute_node(_make_node(), "run")
assert result.success is True
assert "model.test.my_model" in result.node_ids
assert result.error is None
assert result.artifacts["model.test.my_model"]["status"] == "success"
def test_success_creates_and_deletes_job(self):
ex = _make_executor()
with _mock_ephemeral_job(job_id=99) as mock:
ex.execute_node(_make_node(), "run")
# First request: POST /jobs/ β Last request: DELETE /jobs/99/
assert mock.calls[0].request.method == "POST"
assert mock.calls[0].request.url.path.endswith("/jobs/")
assert mock.calls[-1].request.method == "DELETE"
assert mock.calls[-1].request.url.path.endswith("/jobs/99/")
def test_failure_still_deletes_job(self):
ex = _make_executor()
with _mock_ephemeral_job(
job_id=55, final_status=DbtCloudJobRunStatus.FAILED.value
) as mock:
result = ex.execute_node(_make_node(), "run")
assert mock.calls[-1].request.method == "DELETE"
assert mock.calls[-1].request.url.path.endswith("/jobs/55/")
assert result.success is False
assert result.error is not None
def test_failure_result(self):
ex = _make_executor()
with _mock_ephemeral_job(
final_status=DbtCloudJobRunStatus.FAILED.value,
run_results={"results": []},
):
result = ex.execute_node(_make_node(), "run")
assert result.success is False
assert isinstance(result.error, RuntimeError)
assert "FAILED" in str(result.error)
def test_cancelled_run_is_failure(self):
ex = _make_executor()
with _mock_ephemeral_job(final_status=DbtCloudJobRunStatus.CANCELLED.value):
result = ex.execute_node(_make_node(), "run")
assert result.success is False
assert "CANCELLED" in str(result.error)
def test_correct_command_built(self):
ex = _make_executor()
node = _make_node(original_file_path="models/staging/stg_users.sql")
with _mock_ephemeral_job() as mock:
ex.execute_node(node, "run")
steps = _create_job_body(mock)["execute_steps"]
assert len(steps) == 1
assert steps[0] == "dbt run --select path:models/staging/stg_users.sql"
def test_full_refresh_flag_in_command(self):
ex = _make_executor()
with _mock_ephemeral_job() as mock:
ex.execute_node(_make_node(), "run", full_refresh=True)
step = _create_job_body(mock)["execute_steps"][0]
assert "--full-refresh" in step
def test_seed_command(self):
ex = _make_executor()
node = _make_node(
unique_id="seed.test.customers",
name="customers",
resource_type=NodeType.Seed,
original_file_path="seeds/customers.csv",
)
with _mock_ephemeral_job() as mock:
ex.execute_node(node, "seed")
step = _create_job_body(mock)["execute_steps"][0]
assert step.startswith("dbt seed")
def test_job_name_contains_command_and_node_name(self):
ex = _make_executor(job_name_prefix="my-prefix")
with _mock_ephemeral_job() as mock:
ex.execute_node(_make_node(name="stg_users"), "run")
name = _create_job_body(mock)["name"]
assert "my-prefix" in name
assert "run" in name
assert "stg_users" in name
def test_timeout_raises_error_when_run_stays_non_terminal(self):
ex = _make_executor(timeout_seconds=0, poll_frequency_seconds=0)
with respx.mock:
respx.post(f"{_BASE}/jobs/").mock(
return_value=httpx.Response(200, json={"data": {"id": 42}})
)
respx.post(f"{_BASE}/jobs/42/run/").mock(
return_value=httpx.Response(200, json={"data": {"id": 100}})
)
respx.get(f"{_BASE}/runs/100/").mock(
return_value=httpx.Response(
200, json={"data": {"status": DbtCloudJobRunStatus.RUNNING.value}}
)
)
respx.delete(f"{_BASE}/jobs/42/").mock(
return_value=httpx.Response(200, json={})
)
result = ex.execute_node(_make_node(), "run")
assert result.success is False
assert "did not complete within" in str(result.error)
def test_cleanup_failure_does_not_mask_run_error(self):
ex = _make_executor()
with _mock_ephemeral_job(
final_status=DbtCloudJobRunStatus.FAILED.value, delete_raises=True
):
result = ex.execute_node(_make_node(), "run")
assert result.success is False
assert "FAILED" in str(result.error)
def test_exception_during_trigger_cleans_up(self):
ex = _make_executor()
with respx.mock as mock:
respx.post(f"{_BASE}/jobs/").mock(
return_value=httpx.Response(200, json={"data": {"id": 42}})
)
respx.post(f"{_BASE}/jobs/42/run/").mock(
return_value=httpx.Response(500, text="network error")
)
respx.delete(f"{_BASE}/jobs/42/").mock(
return_value=httpx.Response(200, json={})
)
result = ex.execute_node(_make_node(), "run")
# Job was created so cleanup DELETE must have been called.
assert mock.calls[-1].request.method == "DELETE"
assert result.success is False
def test_exception_during_create_job_no_cleanup(self):
"""If create_job fails, job_id is never set, so DELETE is never called."""
ex = _make_executor()
with respx.mock as mock:
# Only register POST /jobs/ with 500.
# Strict mode means any extra (DELETE) call would also raise,
# giving automatic verification that cleanup was skipped.
respx.post(f"{_BASE}/jobs/").mock(
return_value=httpx.Response(500, text="error")
)
result = ex.execute_node(_make_node(), "run")
assert len(mock.calls) == 1
assert result.success is False
def test_node_name_truncated_in_job_name(self):
ex = _make_executor()
with _mock_ephemeral_job() as mock:
ex.execute_node(_make_node(name="a" * 100), "run")
name = _create_job_body(mock)["name"]
assert len(name) <= 100
def test_target_forwarded_to_command(self):
ex = _make_executor()
with _mock_ephemeral_job() as mock:
ex.execute_node(_make_node(), "run", target="prod")
step = _create_job_body(mock)["execute_steps"][0]
assert "--target prod" in step
def test_extra_cli_args_forwarded_to_command(self):
ex = _make_executor()
with _mock_ephemeral_job() as mock:
ex.execute_node(_make_node(), "run", extra_cli_args=["--store-failures"])
step = _create_job_body(mock)["execute_steps"][0]
assert "--store-failures" in step
# =============================================================================
# execute_wave
# =============================================================================
class TestExecuteWave:
def test_empty_wave_raises(self):
with pytest.raises(ValueError, match="empty wave"):
_make_executor().execute_wave([])
def test_success(self):
run_results = {
"results": [
{
"unique_id": "model.test.a",
"status": "success",
"execution_time": 1.0,
},
{
"unique_id": "model.test.b",
"status": "success",
"execution_time": 2.0,
},
]
}
ex = _make_executor()
nodes = [
_make_node("model.test.a", "a", original_file_path="models/a.sql"),
_make_node("model.test.b", "b", original_file_path="models/b.sql"),
]
with _mock_ephemeral_job(run_results=run_results):
result = ex.execute_wave(nodes)
assert result.success is True
assert "model.test.a" in result.node_ids
assert "model.test.b" in result.node_ids
def test_uses_dbt_build(self):
ex = _make_executor()
with _mock_ephemeral_job() as mock:
ex.execute_wave(
[_make_node("model.test.a", "a", original_file_path="models/a.sql")]
)
step = _create_job_body(mock)["execute_steps"][0]
assert step.startswith("dbt build")
def test_all_selectors_included(self):
ex = _make_executor()
nodes = [
_make_node("model.test.a", "a", original_file_path="models/a.sql"),
_make_node("model.test.b", "b", original_file_path="models/b.sql"),
_make_node("model.test.c", "c", original_file_path="models/c.sql"),
]
with _mock_ephemeral_job() as mock:
ex.execute_wave(nodes)
cmd = _create_job_body(mock)["execute_steps"][0]
assert "path:models/a.sql" in cmd
assert "path:models/b.sql" in cmd
assert "path:models/c.sql" in cmd
def test_indirect_selection_passed(self):
ex = _make_executor()
with _mock_ephemeral_job() as mock:
ex.execute_wave(
[_make_node("model.test.a", "a", original_file_path="models/a.sql")],
indirect_selection="empty",
)
step = _create_job_body(mock)["execute_steps"][0]
assert "--indirect-selection empty" in step
def test_failure_deletes_job(self):
ex = _make_executor()
with _mock_ephemeral_job(
job_id=77, final_status=DbtCloudJobRunStatus.FAILED.value
) as mock:
result = ex.execute_wave([_make_node()])
assert mock.calls[-1].request.method == "DELETE"
assert mock.calls[-1].request.url.path.endswith("/jobs/77/")
assert result.success is False
def test_full_refresh_flag(self):
ex = _make_executor()
with _mock_ephemeral_job() as mock:
ex.execute_wave(
[_make_node("model.test.a", "a", original_file_path="models/a.sql")],
full_refresh=True,
)
step = _create_job_body(mock)["execute_steps"][0]
assert "--full-refresh" in step
def test_target_forwarded_to_command(self):
ex = _make_executor()
with _mock_ephemeral_job() as mock:
ex.execute_wave(
[_make_node("model.test.a", "a", original_file_path="models/a.sql")],
target="prod",
)
step = _create_job_body(mock)["execute_steps"][0]
assert "--target prod" in step
def test_extra_cli_args_forwarded_to_command(self):
ex = _make_executor()
with _mock_ephemeral_job() as mock:
ex.execute_wave(
[_make_node("model.test.a", "a", original_file_path="models/a.sql")],
extra_cli_args=["--store-failures"],
)
step = _create_job_body(mock)["execute_steps"][0]
assert "--store-failures" in step
# =============================================================================
# fetch_manifest_from_job
# =============================================================================
class TestFetchManifestFromJob:
def test_fetches_manifest(self):
manifest_data = {"metadata": {"dbt_version": "1.7.0"}, "nodes": {}}
ex = _make_executor()
with respx.mock:
respx.get(f"{_BASE}/jobs/111/artifacts/manifest.json").mock(
return_value=httpx.Response(200, json=manifest_data)
)
result = ex.fetch_manifest_from_job(job_id=111)
assert result == manifest_data
def test_correct_endpoint_called(self):
ex = _make_executor()
with respx.mock as mock:
respx.get(f"{_BASE}/jobs/999/artifacts/manifest.json").mock(
return_value=httpx.Response(200, json={"nodes": {}})
)
ex.fetch_manifest_from_job(job_id=999)
call = mock.calls[0]
assert call.request.method == "GET"
assert "/jobs/999/artifacts/manifest.json" in str(call.request.url)
# =============================================================================
# generate_manifest
# =============================================================================
@contextmanager
def _mock_generate_manifest(
job_id: int = 50,
run_id: int = 500,
final_status: int = DbtCloudJobRunStatus.SUCCESS.value,
manifest_data: dict | None = None,
):
"""Register respx routes for a generate_manifest (dbt compile) call.
Yields the `MockRouter`. Assertions on `mock.calls` must be made inside.
"""
with respx.mock as mock:
mock.post(f"{_BASE}/jobs/").mock(
return_value=httpx.Response(200, json={"data": {"id": job_id}})
)
mock.post(f"{_BASE}/jobs/{job_id}/run/").mock(
return_value=httpx.Response(200, json={"data": {"id": run_id}})
)
mock.get(f"{_BASE}/runs/{run_id}/").mock(
return_value=httpx.Response(200, json={"data": {"status": final_status}})
)
if final_status == DbtCloudJobRunStatus.SUCCESS.value:
mock.get(f"{_BASE}/runs/{run_id}/artifacts/manifest.json").mock(
return_value=httpx.Response(
200, json=manifest_data or {"nodes": {}, "sources": {}}
)
)
mock.delete(f"{_BASE}/jobs/{job_id}/").mock(
return_value=httpx.Response(200, json={})
)
yield mock
class TestGenerateManifest:
def test_success_returns_manifest(self):
manifest_data = {"nodes": {}, "sources": {}}
ex = _make_executor()
with _mock_generate_manifest(manifest_data=manifest_data):
result = ex.generate_manifest()
assert result == manifest_data
def test_uses_dbt_compile_step(self):
ex = _make_executor()
with _mock_generate_manifest() as mock:
ex.generate_manifest()
assert _create_job_body(mock)["execute_steps"] == ["dbt compile"]
def test_compile_job_deleted_on_success(self):
ex = _make_executor()
with _mock_generate_manifest(job_id=60) as mock:
ex.generate_manifest()
assert mock.calls[-1].request.method == "DELETE"
assert mock.calls[-1].request.url.path.endswith("/jobs/60/")
def test_compile_job_deleted_on_failure(self):
ex = _make_executor()
with _mock_generate_manifest(
job_id=70, final_status=DbtCloudJobRunStatus.FAILED.value
) as mock:
with pytest.raises(RuntimeError, match="FAILED"):
ex.generate_manifest()
assert mock.calls[-1].request.method == "DELETE"
assert mock.calls[-1].request.url.path.endswith("/jobs/70/")
def test_job_name_contains_compile_and_prefix(self):
ex = _make_executor(job_name_prefix="my-prefix")
with _mock_generate_manifest() as mock:
ex.generate_manifest()
name = _create_job_body(mock)["name"]
assert "my-prefix" in name
assert "compile" in name
# =============================================================================
# resolve_manifest_path
# =============================================================================
class TestResolveManifestPath:
def test_uses_defer_to_job_id(self):
manifest_data = {"nodes": {}, "sources": {}}
ex = _make_executor(defer_to_job_id=111)
with respx.mock as mock:
respx.get(f"{_BASE}/jobs/111/artifacts/manifest.json").mock(
return_value=httpx.Response(200, json=manifest_data)
)
path = ex.resolve_manifest_path()
assert "/jobs/111/" in str(mock.calls[0].request.url)
assert path.exists()
assert path.name == "manifest.json"
with open(path) as f:
assert json.load(f) == manifest_data
def test_generates_manifest_when_no_defer(self):
manifest_data = {"nodes": {}, "sources": {}}
ex = _make_executor()
with _mock_generate_manifest(manifest_data=manifest_data):
path = ex.resolve_manifest_path()
assert path.exists()
assert path.name == "manifest.json"
with open(path) as f:
assert json.load(f) == manifest_data
def test_isolated_target_dir_per_executor(self):
"""Each executor instance gets its own temp directory."""
manifest_data = {"nodes": {}}
ex1 = _make_executor(defer_to_job_id=1)
ex2 = _make_executor(defer_to_job_id=1)
with respx.mock:
respx.get(f"{_BASE}/jobs/1/artifacts/manifest.json").mock(
return_value=httpx.Response(200, json=manifest_data)
)
path_a = ex1.resolve_manifest_path()
path_b = ex2.resolve_manifest_path()
assert path_a.parent != path_b.parent
def test_temp_dir_cleaned_up_on_executor_gc(self):
manifest_data = {"nodes": {}}
ex = _make_executor(defer_to_job_id=1)
with respx.mock:
respx.get(f"{_BASE}/jobs/1/artifacts/manifest.json").mock(
return_value=httpx.Response(200, json=manifest_data)
)
path = ex.resolve_manifest_path()
assert path.exists()
ex._manifest_temp_dir.cleanup()
assert not path.exists()
def test_returns_absolute_path_object(self):
manifest_data = {"nodes": {}}
ex = _make_executor(defer_to_job_id=1)
with respx.mock:
respx.get(f"{_BASE}/jobs/1/artifacts/manifest.json").mock(
return_value=httpx.Response(200, json=manifest_data)
)
path = ex.resolve_manifest_path()
assert isinstance(path, Path)
assert path.is_absolute()
assert path.name == "manifest.json"
# =============================================================================
# Orchestrator integration: manifest from DbtCloudExecutor
# =============================================================================
class TestOrchestratorManifestResolution:
def test_orchestrator_uses_executor_resolve_manifest_path(self, tmp_path):
from prefect_dbt.core._orchestrator import PrefectDbtOrchestrator
manifest_path = tmp_path / "manifest.json"
manifest_data = {
"nodes": {
"model.test.my_model": {
"name": "my_model",
"resource_type": "model",
"depends_on": {"nodes": []},
"config": {"materialized": "table"},
}
},
"sources": {},
"metadata": {"adapter_type": "postgres"},
}
manifest_path.write_text(json.dumps(manifest_data))
mock_executor = MagicMock()
mock_executor.resolve_manifest_path.return_value = manifest_path
mock_executor.execute_wave.return_value = ExecutionResult(
success=True, node_ids=["model.test.my_model"]
)
orch = PrefectDbtOrchestrator(executor=mock_executor)
resolved = orch._resolve_manifest_path()
assert resolved == manifest_path
mock_executor.resolve_manifest_path.assert_called_once()
def test_settings_target_path_synced_after_executor_manifest(self, tmp_path):
from prefect_dbt.core._orchestrator import PrefectDbtOrchestrator
manifest_path = tmp_path / "manifest.json"
manifest_path.write_text(json.dumps({"nodes": {}, "sources": {}}))
mock_executor = MagicMock()
mock_executor.resolve_manifest_path.return_value = manifest_path
orch = PrefectDbtOrchestrator(executor=mock_executor)
original_target_path = orch._settings.target_path
orch._resolve_manifest_path()
assert orch._settings.target_path == manifest_path.parent
assert orch._settings.target_path != original_target_path
def test_executor_manifest_path_persisted_for_target_resolution(self, tmp_path):
from prefect_dbt.core._orchestrator import PrefectDbtOrchestrator
manifest_path = tmp_path / "manifest.json"
manifest_path.write_text(json.dumps({"nodes": {}, "sources": {}}))
mock_executor = MagicMock()
mock_executor.resolve_manifest_path.return_value = manifest_path
orch = PrefectDbtOrchestrator(executor=mock_executor)
assert orch._manifest_path is None
orch._resolve_manifest_path()
assert orch._manifest_path == manifest_path
assert orch._resolve_target_path() == manifest_path.parent
assert orch._resolve_manifest_path() == manifest_path
assert mock_executor.resolve_manifest_path.call_count == 1
def test_orchestrator_skips_executor_when_manifest_path_provided(self, tmp_path):
from prefect_dbt.core._orchestrator import PrefectDbtOrchestrator
manifest_path = tmp_path / "manifest.json"
manifest_path.write_text(json.dumps({"nodes": {}, "sources": {}}))
mock_executor = MagicMock()
mock_executor.resolve_manifest_path.return_value = Path("/should/not/be/called")
orch = PrefectDbtOrchestrator(
executor=mock_executor, manifest_path=manifest_path
)
resolved = orch._resolve_manifest_path()
assert resolved == manifest_path
mock_executor.resolve_manifest_path.assert_not_called()
# =============================================================================
# DbtExecutor protocol compliance
# =============================================================================
class TestProtocolCompliance:
def test_implements_dbt_executor_protocol(self):
from prefect_dbt.core._executor import DbtExecutor
ex = DbtCloudExecutor(
credentials=_make_mock_credentials(), project_id=1, environment_id=2
)
assert isinstance(ex, DbtExecutor)
def test_has_execute_node(self):
assert callable(getattr(_make_executor(), "execute_node", None))
def test_has_execute_wave(self):
assert callable(getattr(_make_executor(), "execute_wave", None))
def test_has_resolve_manifest_path(self):
assert callable(getattr(_make_executor(), "resolve_manifest_path", None))
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/integrations/prefect-dbt/tests/core/test_cloud_executor.py",
"license": "Apache License 2.0",
"lines": 731,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:src/prefect/cli/_cloud_utils.py | """Shared cloud utilities used by both typer and cyclopts CLI implementations."""
from __future__ import annotations
import traceback
import urllib.parse
import uuid
import warnings
import webbrowser
from contextlib import asynccontextmanager
from typing import (
TYPE_CHECKING,
Iterable,
Literal,
NoReturn,
TypeVar,
overload,
)
import anyio
import anyio.abc
import readchar
import uvicorn
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
from rich.console import Console
from rich.live import Live
from rich.table import Table
if TYPE_CHECKING:
from prefect.client.schemas import Workspace
T = TypeVar("T")
def _exit_with_error(message: str, console: Console | None = None) -> NoReturn:
"""Print a styled error message and exit."""
target = console or Console(highlight=False, soft_wrap=True)
if message:
target.print(message, style="red")
raise SystemExit(1)
# ---------------------------------------------------------------------------
# Pydantic models for browser-based login
# ---------------------------------------------------------------------------
class LoginSuccess(BaseModel):
api_key: str
class LoginFailed(BaseModel):
reason: str
class LoginResult(BaseModel):
type: Literal["success", "failure"]
content: LoginSuccess | LoginFailed
class ServerExit(Exception):
pass
# ---------------------------------------------------------------------------
# Login API (FastAPI app for browser-based OAuth callback)
# ---------------------------------------------------------------------------
def set_login_api_ready_event() -> None:
login_api.extra["ready-event"].set()
@asynccontextmanager
async def lifespan(app: FastAPI):
try:
set_login_api_ready_event()
yield
finally:
pass
login_api: FastAPI = FastAPI(lifespan=lifespan)
"""
This small API server is used for data transmission for browser-based log in.
"""
login_api.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_methods=["*"],
allow_headers=["*"],
)
@login_api.post("/success")
def receive_login(payload: LoginSuccess) -> None:
login_api.extra["result"] = LoginResult(type="success", content=payload)
login_api.extra["result-event"].set()
@login_api.post("/failure")
def receive_failure(payload: LoginFailed) -> None:
login_api.extra["result"] = LoginResult(type="failure", content=payload)
login_api.extra["result-event"].set()
# ---------------------------------------------------------------------------
# Shared helper functions
# ---------------------------------------------------------------------------
def get_current_workspace(workspaces: Iterable[Workspace]) -> Workspace | None:
from prefect.settings import PREFECT_API_URL
current_api_url = PREFECT_API_URL.value()
if not current_api_url:
return None
for workspace in workspaces:
if workspace.api_url() == current_api_url:
return workspace
return None
def confirm_logged_in(console: Console | None = None) -> None:
import prefect.context
from prefect.settings import PREFECT_API_KEY
if not PREFECT_API_KEY:
profile = prefect.context.get_settings_context().profile
_exit_with_error(
f"Currently not authenticated in profile {profile.name!r}. "
"Please log in with `prefect cloud login`.",
console=console,
)
async def check_key_is_valid_for_login(key: str) -> bool:
from prefect.client.cloud import CloudUnauthorizedError, get_cloud_client
async with get_cloud_client(api_key=key) as client:
try:
await client.read_workspaces()
return True
except CloudUnauthorizedError:
return False
# ---------------------------------------------------------------------------
# Interactive prompt utilities
# ---------------------------------------------------------------------------
@overload
def prompt_select_from_list(
console: Console, prompt: str, options: list[str]
) -> str: ...
@overload
def prompt_select_from_list(
console: Console, prompt: str, options: list[tuple[T, str]]
) -> T: ...
def prompt_select_from_list(
console: Console, prompt: str, options: list[str] | list[tuple[T, str]]
) -> str | T:
"""
Given a list of options, display the values to user in a table and prompt them
to select one.
Args:
options: A list of options to present to the user.
A list of tuples can be passed as key value pairs. If a value is chosen, the
key will be returned.
Returns:
str: the selected option
"""
current_idx = 0
selected_option = None
def build_table() -> Table:
"""
Generate a table of options. The `current_idx` will be highlighted.
"""
table = Table(box=None, header_style=None, padding=(0, 0))
table.add_column(
f"? [bold]{prompt}[/] [bright_blue][Use arrows to move; enter to select]",
justify="left",
no_wrap=True,
)
for i, option in enumerate(options):
if isinstance(option, tuple):
option = option[1]
if i == current_idx:
# Use blue for selected options
table.add_row("[bold][blue]> " + option)
else:
table.add_row(" " + option)
return table
with Live(build_table(), auto_refresh=False, console=console) as live:
while selected_option is None:
try:
key = readchar.readkey()
except KeyboardInterrupt:
raise SystemExit(130)
if key == readchar.key.UP:
current_idx = current_idx - 1
# wrap to bottom if at the top
if current_idx < 0:
current_idx = len(options) - 1
elif key == readchar.key.DOWN:
current_idx = current_idx + 1
# wrap to top if at the bottom
if current_idx >= len(options):
current_idx = 0
elif key == readchar.key.CTRL_C:
# gracefully exit with no message (130 = 128 + SIGINT)
raise SystemExit(130)
elif key == readchar.key.ENTER or key == readchar.key.CR:
selected_option = options[current_idx]
# Break out of the loop immediately after setting selected_option
break
live.update(build_table(), refresh=True)
# Convert tuple to its first element if needed
if isinstance(selected_option, tuple):
selected_option = selected_option[0]
return selected_option
# ---------------------------------------------------------------------------
# Workspace / account selection
# ---------------------------------------------------------------------------
async def prompt_for_account_and_workspace(
workspaces: list[Workspace],
console: Console,
) -> tuple[Workspace | None, bool]:
if len(workspaces) > 10:
# Group workspaces by account_id
workspace_by_account: dict[uuid.UUID, list[Workspace]] = {}
for workspace in workspaces:
workspace_by_account.setdefault(workspace.account_id, []).append(workspace)
if len(workspace_by_account) == 1:
account_id = next(iter(workspace_by_account.keys()))
workspaces = workspace_by_account[account_id]
else:
accounts = [
{
"account_id": account_id,
"account_handle": workspace_by_account[account_id][
0
].account_handle,
}
for account_id in workspace_by_account.keys()
]
account_options = [
(account, str(account["account_handle"])) for account in accounts
]
account = prompt_select_from_list(
console,
"Which account would you like to use?",
options=account_options,
)
account_id = account["account_id"]
if TYPE_CHECKING:
assert isinstance(account_id, uuid.UUID)
workspaces = workspace_by_account[account_id]
workspace_options: list[tuple[Workspace | None, str]] = [
(workspace, workspace.handle) for workspace in workspaces
]
go_back_option = (
None,
"[bold]Go back to account selection[/bold]",
)
result = prompt_select_from_list(
console,
"Which workspace would you like to use?",
options=workspace_options + [go_back_option],
)
if not result:
return None, True
else:
return result, False
# ---------------------------------------------------------------------------
# Browser-based login flow
# ---------------------------------------------------------------------------
async def serve_login_api(
cancel_scope: anyio.CancelScope,
console: Console,
*,
task_status: anyio.abc.TaskStatus[uvicorn.Server],
) -> None:
config = uvicorn.Config(login_api, port=0, log_level="critical")
server = uvicorn.Server(config)
try:
# Yield the server object
task_status.started(server)
with warnings.catch_warnings():
# Uvicorn uses the deprecated pieces of websockets, filter out
# the warnings until uvicorn has its dependencies updated
warnings.filterwarnings(
"ignore", category=DeprecationWarning, module="websockets"
)
warnings.filterwarnings(
"ignore",
category=DeprecationWarning,
module="uvicorn.protocols.websockets",
)
await server.serve()
except anyio.get_cancelled_exc_class():
pass # Already cancelled, do not cancel again
except SystemExit as exc:
# If uvicorn is misconfigured, it will throw a system exit and hide the exc
console.print("[red][bold]X Error starting login service!")
cause = exc.__context__ # Hide the system exit
if TYPE_CHECKING:
assert isinstance(cause, BaseException)
traceback.print_exception(type(cause), value=cause, tb=cause.__traceback__)
cancel_scope.cancel()
else:
# Exit if we are done serving the API
# Uvicorn overrides signal handlers so without this Ctrl-C is broken
cancel_scope.cancel()
async def login_with_browser(console: Console) -> str:
"""
Perform login using the browser.
On failure, this function will exit the process.
On success, it will return an API key.
"""
from prefect.settings import PREFECT_CLOUD_UI_URL
from prefect.utilities.asyncutils import run_sync_in_worker_thread
# Set up an event that the login API will toggle on startup
ready_event = login_api.extra["ready-event"] = anyio.Event()
# Set up an event that the login API will set when a response comes from the UI
result_event = login_api.extra["result-event"] = anyio.Event()
timeout_scope = None
async with anyio.create_task_group() as tg:
# Run a server in the background to get payload from the browser
server = await tg.start(serve_login_api, tg.cancel_scope, console)
# Wait for the login server to be ready
with anyio.fail_after(10):
await ready_event.wait()
# The server may not actually be serving as the lifespan is started first
while not server.started:
await anyio.sleep(0)
# Get the port the server is using
server_port = server.servers[0].sockets[0].getsockname()[1]
callback = urllib.parse.quote(f"http://localhost:{server_port}")
ui_login_url = (
PREFECT_CLOUD_UI_URL.value() + f"/auth/client?callback={callback}"
)
# Then open the authorization page in a new browser tab
console.print("Opening browser...")
await run_sync_in_worker_thread(webbrowser.open_new_tab, ui_login_url)
# Wait for the response from the browser,
with anyio.move_on_after(120) as timeout_scope:
console.print("Waiting for response...")
await result_event.wait()
# Shut down the background uvicorn server
tg.cancel_scope.cancel()
result = login_api.extra.get("result")
if not result:
if timeout_scope and timeout_scope.cancel_called:
_exit_with_error("Timed out while waiting for authorization.", console)
else:
_exit_with_error("Aborted.", console)
if result.type == "success":
return result.content.api_key
else:
_exit_with_error(f"Failed to log in. {result.content.reason}", console)
# ---------------------------------------------------------------------------
# Webhook table rendering
# ---------------------------------------------------------------------------
def render_webhooks_into_table(webhooks: list[dict[str, str]]) -> Table:
display_table = Table(show_lines=True)
for field in ["webhook id", "url slug", "name", "enabled?", "template"]:
display_table.add_column(field, overflow="fold")
for webhook in webhooks:
display_table.add_row(
webhook["id"],
webhook["slug"],
webhook["name"],
str(webhook["enabled"]),
webhook["template"],
)
return display_table
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/prefect/cli/_cloud_utils.py",
"license": "Apache License 2.0",
"lines": 336,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/prefect:src/prefect/cli/_transfer_utils.py | """Shared transfer utilities used by both typer and cyclopts CLI implementations."""
from __future__ import annotations
import asyncio
import uuid
from typing import TYPE_CHECKING, Any, Callable, Sequence
from rich.console import Console
from rich.progress import (
BarColumn,
Progress,
TaskProgressColumn,
TextColumn,
)
if TYPE_CHECKING:
from prefect.cli.transfer._migratable_resources import MigratableProtocol
async def collect_resources(client: Any) -> Sequence["MigratableProtocol"]:
"""Collect all resources from the source profile."""
from prefect.cli.transfer._migratable_resources import construct_migratable_resource
collections = await asyncio.gather(
client.read_work_pools(),
client.read_work_queues(),
client.read_deployments(),
client.read_block_documents(),
client.read_variables(),
client.read_global_concurrency_limits(),
client.read_automations(),
)
resources = await asyncio.gather(
*[
construct_migratable_resource(item)
for collection in collections
for item in collection
]
)
return resources
async def find_root_resources(
resources: Sequence["MigratableProtocol"],
) -> Sequence["MigratableProtocol"]:
"""Find resources that aren't dependencies of any other resource."""
all_ids = {r.source_id for r in resources}
dependency_ids: set[uuid.UUID] = set()
for resource in resources:
deps = await resource.get_dependencies()
dependency_ids.update(d.source_id for d in deps)
root_ids = all_ids - dependency_ids
return (
resources if not root_ids else [r for r in resources if r.source_id in root_ids]
)
async def execute_transfer(dag: Any, console: Console) -> dict[uuid.UUID, Any]:
"""Execute the transfer with progress reporting."""
total = len(dag.nodes)
with Progress(
TextColumn("[progress.description]{task.description}"),
BarColumn(),
TaskProgressColumn(),
console=console,
) as progress:
task = progress.add_task("Transferring resources...", total=total)
async def migrate_with_progress(resource: "MigratableProtocol"):
try:
await resource.migrate()
progress.update(task, advance=1)
return None
except Exception as e:
progress.update(task, advance=1)
raise e
results = await dag.execute_concurrent(
migrate_with_progress,
max_workers=5,
skip_on_failure=True,
)
return results
def get_resource_display_name(resource: "MigratableProtocol") -> str:
"""Get a display name for a resource."""
mappings: list[tuple[str, Callable[["MigratableProtocol"], str]]] = [
("source_work_pool", lambda r: f"work-pool/{r.source_work_pool.name}"),
("source_work_queue", lambda r: f"work-queue/{r.source_work_queue.name}"),
("source_deployment", lambda r: f"deployment/{r.source_deployment.name}"),
("source_flow", lambda r: f"flow/{r.source_flow.name}"),
(
"source_block_document",
lambda r: f"block-document/{r.source_block_document.name}",
),
("source_block_type", lambda r: f"block-type/{r.source_block_type.slug}"),
(
"source_block_schema",
lambda r: f"block-schema/{str(r.source_block_schema.id)[:8]}",
),
("source_variable", lambda r: f"variable/{r.source_variable.name}"),
("source_automation", lambda r: f"automation/{r.source_automation.name}"),
(
"source_global_concurrency_limit",
lambda r: f"concurrency-limit/{r.source_global_concurrency_limit.name}",
),
]
for attr, formatter in mappings:
if hasattr(resource, attr):
return formatter(resource)
return str(resource)
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/prefect/cli/_transfer_utils.py",
"license": "Apache License 2.0",
"lines": 98,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/prefect:src/integrations/prefect-dbt/prefect_dbt/core/_artifacts.py | """
Artifact helpers for the per-node dbt orchestrator.
This module provides:
- create_summary_markdown: Build a markdown summary of orchestrator results
- create_run_results_dict: Build a dbt-compatible run_results dict
- create_asset_for_node: Create a Prefect Asset from a DbtNode
- get_upstream_assets_for_node: Get upstream Asset objects for lineage
- get_compiled_code_for_node: Read compiled SQL for a node
"""
import json
from datetime import datetime, timezone
from pathlib import Path
from typing import Any
import dbt.version
from dbt.artifacts.resources.types import NodeType
from prefect.assets import Asset, AssetProperties
from prefect.assets.core import MAX_ASSET_DESCRIPTION_LENGTH
from prefect_dbt.core._manifest import DbtNode
from prefect_dbt.utilities import format_resource_id
# Node types that produce database objects and should get assets.
ASSET_NODE_TYPES = frozenset({NodeType.Model, NodeType.Seed, NodeType.Snapshot})
# Node types that can be upstream asset dependencies.
_UPSTREAM_ASSET_TYPES = frozenset(
{NodeType.Model, NodeType.Seed, NodeType.Snapshot, NodeType.Source}
)
# ------------------------------------------------------------------
# Summary artifact
# ------------------------------------------------------------------
def create_summary_markdown(results: dict[str, Any]) -> str:
"""Build a markdown summary of orchestrator ``run_build()`` results.
Args:
results: Dict mapping node unique_id to result dict with a
``status`` key (``"success"``, ``"error"``, ``"skipped"``,
or ``"cached"``).
Returns:
Markdown string suitable for ``create_markdown_artifact()``.
"""
counts: dict[str, int] = {}
errors: list[tuple[str, dict[str, Any]]] = []
skipped: list[tuple[str, dict[str, Any]]] = []
successes: list[str] = []
for node_id, result in results.items():
status = result.get("status", "unknown")
counts[status] = counts.get(status, 0) + 1
if status == "error":
errors.append((node_id, result))
elif status == "skipped":
skipped.append((node_id, result))
elif status in ("success", "cached"):
successes.append(node_id)
total = len(results)
md = "## dbt build Task Summary\n\n"
md += "| Successes | Errors | Skipped | Total |\n"
md += "| :-------: | :----: | :-----: | :---: |\n"
success_count = counts.get("success", 0) + counts.get("cached", 0)
md += (
f"| {success_count} "
f"| {counts.get('error', 0)} "
f"| {counts.get('skipped', 0)} "
f"| {total} |\n"
)
if errors:
md += "\n### Unsuccessful Nodes\n\n"
for node_id, result in errors:
name = node_id.rsplit(".", 1)[-1] if "." in node_id else node_id
resource_type = node_id.split(".", 1)[0] if "." in node_id else "unknown"
error_info = result.get("error", {})
message = (
error_info.get("message", "Unknown error")
if isinstance(error_info, dict)
else str(error_info)
)
md += f"**{name}**\n\n"
md += f"Type: {resource_type}\n\n"
md += f"Message:\n\n> {message}\n\n"
if skipped:
md += "\n### Skipped Nodes\n\n"
for node_id, result in skipped:
name = node_id.rsplit(".", 1)[-1] if "." in node_id else node_id
reason = result.get("reason", "unknown reason")
md += f"* {name} ({reason})\n"
md += "\n"
if successes:
md += "\n### Successful Nodes\n\n"
for node_id in successes:
name = node_id.rsplit(".", 1)[-1] if "." in node_id else node_id
md += f"* {name}\n"
md += "\n"
return md
# ------------------------------------------------------------------
# run_results.json
# ------------------------------------------------------------------
def _map_status_to_dbt(status: str) -> str:
"""Map orchestrator status to dbt NodeStatus string."""
return {
"success": "success",
"error": "error",
"skipped": "skipped",
"cached": "success",
}.get(status, status)
def create_run_results_dict(
results: dict[str, Any],
elapsed_time: float,
) -> dict[str, Any]:
"""Build a dbt-compatible ``run_results.json`` dict.
The output schema is compatible with dbt's ``run_results.json`` v6,
allowing downstream tools (e.g. ``dbt-artifacts``) to consume
results produced by the orchestrator.
Args:
results: Dict mapping node unique_id to result dict.
elapsed_time: Total elapsed time in seconds.
Returns:
Dict in dbt run_results.json schema.
"""
run_results: list[dict[str, Any]] = []
for node_id, result in results.items():
status = result.get("status", "unknown")
timing_info = result.get("timing", {})
error_info = result.get("error", {})
timing_entries: list[dict[str, Any]] = []
if timing_info.get("started_at"):
timing_entries.append(
{
"name": "execute",
"started_at": timing_info["started_at"],
"completed_at": timing_info.get("completed_at", ""),
}
)
rr: dict[str, Any] = {
"unique_id": node_id,
"status": _map_status_to_dbt(status),
"timing": timing_entries,
"thread_id": "orchestrator",
"execution_time": timing_info.get(
"execution_time", timing_info.get("duration_seconds", 0.0)
),
"adapter_response": {},
"message": error_info.get("message", "") if status == "error" else "OK",
"failures": None,
"compiled": None,
"compiled_code": None,
"relation_name": None,
"batch_results": None,
}
run_results.append(rr)
return {
"metadata": {
"dbt_schema_version": "https://schemas.getdbt.com/dbt/run-results/v6.json",
"dbt_version": dbt.version.__version__,
"generated_at": datetime.now(timezone.utc).isoformat(),
},
"results": run_results,
"elapsed_time": elapsed_time,
"args": {},
}
def write_run_results_json(
results: dict[str, Any],
elapsed_time: float,
target_dir: Path,
) -> Path:
"""Write a dbt-compatible ``run_results.json`` to *target_dir*.
Args:
results: Orchestrator results dict.
elapsed_time: Total elapsed time in seconds.
target_dir: Directory to write the file into.
Returns:
Path to the written file.
"""
data = create_run_results_dict(results, elapsed_time)
target_dir.mkdir(parents=True, exist_ok=True)
out_path = target_dir / "run_results.json"
out_path.write_text(json.dumps(data, indent=2))
return out_path
# ------------------------------------------------------------------
# Asset helpers
# ------------------------------------------------------------------
def create_asset_for_node(
node: DbtNode,
adapter_type: str,
description_suffix: str = "",
) -> Asset:
"""Create a Prefect ``Asset`` from a ``DbtNode``.
Args:
node: The DbtNode to create an asset for. Must have a
``relation_name``.
adapter_type: Database adapter type (e.g. ``"postgres"``).
description_suffix: Optional suffix appended to the
description (e.g. compiled SQL block).
Returns:
Asset with key derived from *adapter_type* and *relation_name*.
Raises:
ValueError: If the node has no ``relation_name``.
"""
if not node.relation_name:
raise ValueError(f"Node {node.unique_id} has no relation_name")
asset_key = format_resource_id(adapter_type, node.relation_name)
description = (node.description or "") + description_suffix
if len(description) > MAX_ASSET_DESCRIPTION_LENGTH:
# Prefer the base description (drop the suffix / compiled code).
# Truncate if even the base description alone exceeds the limit.
description = (node.description or "")[:MAX_ASSET_DESCRIPTION_LENGTH]
owner = node.config.get("meta", {}).get("owner")
owners = [owner] if owner and isinstance(owner, str) else None
properties_kwargs: dict[str, Any] = {"name": node.name}
if description:
properties_kwargs["description"] = description
if owners:
properties_kwargs["owners"] = owners
return Asset(
key=asset_key,
properties=AssetProperties(**properties_kwargs),
)
def get_upstream_assets_for_node(
node: DbtNode,
all_nodes: dict[str, DbtNode],
adapter_type: str,
) -> list[Asset]:
"""Get upstream ``Asset`` objects for lineage tracking.
Returns assets for upstream nodes (models, seeds, snapshots,
sources) that have a ``relation_name``. Ephemeral models are
traversed recursively so that sources or models behind them are
still included.
Args:
node: The node to find upstream assets for.
all_nodes: All parsed nodes including sources.
adapter_type: Database adapter type.
Returns:
List of upstream ``Asset`` objects.
"""
# Collect asset-eligible dep IDs by walking through ephemerals.
# The resolved node's depends_on may have had sources stripped
# during dependency resolution, so start from the original
# (unresolved) node when available.
original_node = all_nodes.get(node.unique_id)
start_deps: set[str] = set(node.depends_on)
if original_node is not None:
start_deps |= set(original_node.depends_on)
collected: list[str] = []
visited: set[str] = set()
def _walk(dep_id: str) -> None:
if dep_id in visited:
return
visited.add(dep_id)
dep_node = all_nodes.get(dep_id)
if dep_node is None:
return
# Trace through ephemeral models to their dependencies.
if dep_node.materialization == "ephemeral":
for nested_dep in dep_node.depends_on:
_walk(nested_dep)
return
if dep_node.resource_type in _UPSTREAM_ASSET_TYPES and dep_node.relation_name:
collected.append(dep_id)
for dep_id in start_deps:
_walk(dep_id)
assets: list[Asset] = []
for dep_id in collected:
dep_node = all_nodes[dep_id]
asset_key = format_resource_id(adapter_type, dep_node.relation_name)
assets.append(
Asset(
key=asset_key,
properties=AssetProperties(name=dep_node.name),
)
)
return assets
def get_compiled_code_for_node(
node: DbtNode,
project_dir: Path,
target_path: Path,
project_name: str,
) -> str:
"""Get compiled SQL formatted for inclusion in an asset description.
Checks the node's ``compiled_code`` field first (populated by
``dbt compile``), then falls back to reading from disk.
Args:
node: The DbtNode.
project_dir: dbt project directory.
target_path: dbt target path (e.g. ``Path("target")``).
project_name: dbt project name from manifest metadata.
Returns:
Formatted markdown code block, or empty string if unavailable.
"""
code = node.compiled_code
if not code and node.original_file_path:
compiled_path = (
project_dir
/ target_path
/ "compiled"
/ project_name
/ node.original_file_path
)
if compiled_path.exists():
code = compiled_path.read_text()
if not code:
return ""
description = f"\n### Compiled code\n```sql\n{code.strip()}\n```"
if len(description) > MAX_ASSET_DESCRIPTION_LENGTH:
description = (
"\n### Compiled code\n"
"Compiled code was omitted because it exceeded the maximum "
f"asset description length of {MAX_ASSET_DESCRIPTION_LENGTH} characters."
)
return description
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/integrations/prefect-dbt/prefect_dbt/core/_artifacts.py",
"license": "Apache License 2.0",
"lines": 304,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/prefect:src/integrations/prefect-dbt/tests/core/test_orchestrator_artifacts.py | """Tests for Phase 9: Artifacts and Asset Tracking."""
import json
from pathlib import Path
from typing import Any
from unittest.mock import MagicMock, patch
import pytest
from conftest import (
_make_mock_executor_per_node,
_make_mock_settings,
_make_node,
write_manifest,
)
from dbt.artifacts.resources.types import NodeType
from prefect_dbt.core._artifacts import (
ASSET_NODE_TYPES,
create_asset_for_node,
create_run_results_dict,
create_summary_markdown,
get_compiled_code_for_node,
get_upstream_assets_for_node,
write_run_results_json,
)
from prefect_dbt.core._manifest import DbtNode
from prefect_dbt.core._orchestrator import (
ExecutionMode,
PrefectDbtOrchestrator,
)
from prefect import flow
# ============================================================
# Fixtures
# ============================================================
SAMPLE_RESULTS: dict[str, Any] = {
"model.test.stg_users": {
"status": "success",
"timing": {
"started_at": "2024-01-15T10:30:00+00:00",
"completed_at": "2024-01-15T10:30:05+00:00",
"duration_seconds": 5.0,
},
"invocation": {"command": "run", "args": ["model.test.stg_users"]},
},
"model.test.stg_orders": {
"status": "error",
"timing": {
"started_at": "2024-01-15T10:30:00+00:00",
"completed_at": "2024-01-15T10:30:03+00:00",
"duration_seconds": 3.0,
},
"invocation": {"command": "run", "args": ["model.test.stg_orders"]},
"error": {
"message": 'Database error: relation "raw.orders" does not exist',
"type": "DatabaseError",
},
},
"model.test.order_summary": {
"status": "skipped",
"reason": "upstream failure",
"failed_upstream": ["model.test.stg_orders"],
},
}
MANIFEST_WITH_ASSETS = {
"metadata": {
"adapter_type": "postgres",
"project_name": "test_project",
},
"nodes": {
"model.test.root": {
"name": "root",
"resource_type": "model",
"depends_on": {"nodes": []},
"config": {"materialized": "table"},
"relation_name": '"main"."public"."root"',
"description": "Root model",
"original_file_path": "models/root.sql",
},
"model.test.child": {
"name": "child",
"resource_type": "model",
"depends_on": {"nodes": ["model.test.root"]},
"config": {"materialized": "table"},
"relation_name": '"main"."public"."child"',
"description": "Child model",
"original_file_path": "models/child.sql",
},
},
"sources": {
"source.test.raw.users": {
"name": "users",
"resource_type": "source",
"fqn": ["test", "raw", "users"],
"relation_name": '"main"."raw"."users"',
"config": {},
"description": "Raw users source",
},
},
}
# ============================================================
# create_summary_markdown
# ============================================================
class TestCreateSummaryMarkdown:
def test_basic_summary(self):
md = create_summary_markdown(SAMPLE_RESULTS)
assert "## dbt build Task Summary" in md
assert "| 1 | 1 | 1 | 3 |" in md
def test_error_section(self):
md = create_summary_markdown(SAMPLE_RESULTS)
assert "### Unsuccessful Nodes" in md
assert "**stg_orders**" in md
assert "Database error" in md
def test_skipped_section(self):
md = create_summary_markdown(SAMPLE_RESULTS)
assert "### Skipped Nodes" in md
assert "order_summary (upstream failure)" in md
def test_success_section(self):
md = create_summary_markdown(SAMPLE_RESULTS)
assert "### Successful Nodes" in md
assert "stg_users" in md
def test_all_success(self):
results = {
"model.test.a": {"status": "success"},
"model.test.b": {"status": "success"},
}
md = create_summary_markdown(results)
assert "| 2 | 0 | 0 | 2 |" in md
assert "### Unsuccessful Nodes" not in md
assert "### Skipped Nodes" not in md
def test_empty_results(self):
md = create_summary_markdown({})
assert "| 0 | 0 | 0 | 0 |" in md
def test_cached_status_counted(self):
results = {
"model.test.a": {"status": "cached"},
"model.test.b": {"status": "success"},
}
md = create_summary_markdown(results)
# Cached nodes show in successful list
assert "* a" in md
assert "* b" in md
# Cached nodes count toward the Successes column (1 cached + 1 success = 2)
assert "| 2 | 0 | 0 | 2 |" in md
# ============================================================
# create_run_results_dict / write_run_results_json
# ============================================================
class TestRunResults:
def test_basic_structure(self):
data = create_run_results_dict(SAMPLE_RESULTS, elapsed_time=8.0)
assert "metadata" in data
assert "results" in data
assert data["elapsed_time"] == 8.0
assert len(data["results"]) == 3
def test_metadata_has_dbt_schema_version(self):
data = create_run_results_dict(SAMPLE_RESULTS, elapsed_time=1.0)
metadata = data["metadata"]
assert metadata["dbt_schema_version"] == (
"https://schemas.getdbt.com/dbt/run-results/v6.json"
)
assert "dbt_version" in metadata
assert "generated_at" in metadata
def test_result_has_mandatory_fields(self):
data = create_run_results_dict(SAMPLE_RESULTS, elapsed_time=1.0)
for result in data["results"]:
assert "unique_id" in result
assert "status" in result
assert "timing" in result
assert isinstance(result["timing"], list)
assert "thread_id" in result
assert "execution_time" in result
assert "adapter_response" in result
assert isinstance(result["adapter_response"], dict)
assert "message" in result
assert "failures" in result
assert "compiled" in result
assert "compiled_code" in result
assert "relation_name" in result
assert "batch_results" in result
def test_success_mapping(self):
data = create_run_results_dict(SAMPLE_RESULTS, elapsed_time=1.0)
by_id = {r["unique_id"]: r for r in data["results"]}
assert by_id["model.test.stg_users"]["status"] == "success"
assert by_id["model.test.stg_users"]["execution_time"] == 5.0
def test_error_mapping(self):
data = create_run_results_dict(SAMPLE_RESULTS, elapsed_time=1.0)
by_id = {r["unique_id"]: r for r in data["results"]}
assert by_id["model.test.stg_orders"]["status"] == "error"
assert "Database error" in by_id["model.test.stg_orders"]["message"]
def test_skipped_mapping(self):
data = create_run_results_dict(SAMPLE_RESULTS, elapsed_time=1.0)
by_id = {r["unique_id"]: r for r in data["results"]}
assert by_id["model.test.order_summary"]["status"] == "skipped"
def test_cached_maps_to_success(self):
results = {"model.test.a": {"status": "cached"}}
data = create_run_results_dict(results, elapsed_time=0.0)
assert data["results"][0]["status"] == "success"
def test_timing_included(self):
data = create_run_results_dict(SAMPLE_RESULTS, elapsed_time=1.0)
by_id = {r["unique_id"]: r for r in data["results"]}
timing = by_id["model.test.stg_users"]["timing"]
assert len(timing) == 1
assert timing[0]["name"] == "execute"
assert "2024-01-15T10:30:00" in timing[0]["started_at"]
def test_timing_empty_list_when_no_data(self):
results = {"model.test.a": {"status": "skipped"}}
data = create_run_results_dict(results, elapsed_time=0.0)
assert data["results"][0]["timing"] == []
def test_write_to_disk(self, tmp_path):
path = write_run_results_json(SAMPLE_RESULTS, 8.0, tmp_path)
assert path.exists()
assert path.name == "run_results.json"
data = json.loads(path.read_text())
assert len(data["results"]) == 3
def test_write_creates_directory(self, tmp_path):
target = tmp_path / "nested" / "target"
path = write_run_results_json(SAMPLE_RESULTS, 1.0, target)
assert path.exists()
# ============================================================
# Asset helpers
# ============================================================
class TestCreateAssetForNode:
def test_basic_asset(self):
node = _make_node(
name="my_model",
relation_name='"main"."public"."my_model"',
)
asset = create_asset_for_node(node, "postgres")
assert "postgres://" in asset.key
assert "my_model" in asset.key
assert asset.properties.name == "my_model"
def test_with_description(self):
node = DbtNode(
unique_id="model.test.m1",
name="m1",
resource_type=NodeType.Model,
materialization="table",
relation_name='"main"."public"."m1"',
description="A useful model",
)
asset = create_asset_for_node(node, "postgres")
assert asset.properties.description == "A useful model"
def test_with_none_description_omits_field(self):
node = DbtNode(
unique_id="model.test.m1",
name="m1",
resource_type=NodeType.Model,
materialization="table",
relation_name='"main"."public"."m1"',
description=None,
)
asset = create_asset_for_node(node, "postgres")
assert asset.properties.description is None
assert "description" not in asset.properties.model_dump(exclude_unset=True)
def test_with_description_suffix(self):
node = DbtNode(
unique_id="model.test.m1",
name="m1",
resource_type=NodeType.Model,
materialization="table",
relation_name='"main"."public"."m1"',
description="Base desc",
)
asset = create_asset_for_node(
node, "postgres", description_suffix="\n### Compiled\n```sql\nSELECT 1\n```"
)
assert "Base desc" in asset.properties.description
assert "SELECT 1" in asset.properties.description
def test_with_owner(self):
node = DbtNode(
unique_id="model.test.m1",
name="m1",
resource_type=NodeType.Model,
materialization="table",
relation_name='"main"."public"."m1"',
config={"meta": {"owner": "data-team"}},
)
asset = create_asset_for_node(node, "postgres")
assert asset.properties.owners == ["data-team"]
def test_description_truncated_when_combined_exceeds_limit(self):
from prefect.assets.core import MAX_ASSET_DESCRIPTION_LENGTH
base_desc = "A" * 200
suffix = "B" * MAX_ASSET_DESCRIPTION_LENGTH # Suffix that exceeds limit
node = DbtNode(
unique_id="model.test.m1",
name="m1",
resource_type=NodeType.Model,
materialization="table",
relation_name='"main"."public"."m1"',
description=base_desc,
)
asset = create_asset_for_node(node, "postgres", description_suffix=suffix)
assert asset.properties.description is not None
assert len(asset.properties.description) <= MAX_ASSET_DESCRIPTION_LENGTH
# Falls back to just the base description (truncated if needed)
assert asset.properties.description == base_desc
def test_long_base_description_truncated(self):
from prefect.assets.core import MAX_ASSET_DESCRIPTION_LENGTH
base_desc = "X" * (MAX_ASSET_DESCRIPTION_LENGTH + 500)
node = DbtNode(
unique_id="model.test.m1",
name="m1",
resource_type=NodeType.Model,
materialization="table",
relation_name='"main"."public"."m1"',
description=base_desc,
)
asset = create_asset_for_node(node, "postgres")
assert len(asset.properties.description) <= MAX_ASSET_DESCRIPTION_LENGTH
def test_no_relation_name_raises(self):
node = _make_node(name="ephemeral", materialization="ephemeral")
with pytest.raises(ValueError, match="no relation_name"):
create_asset_for_node(node, "postgres")
class TestGetUpstreamAssets:
def test_finds_upstream_models(self):
root = DbtNode(
unique_id="model.test.root",
name="root",
resource_type=NodeType.Model,
materialization="table",
relation_name='"main"."public"."root"',
)
child = DbtNode(
unique_id="model.test.child",
name="child",
resource_type=NodeType.Model,
depends_on=("model.test.root",),
materialization="table",
relation_name='"main"."public"."child"',
)
all_nodes = {root.unique_id: root, child.unique_id: child}
assets = get_upstream_assets_for_node(child, all_nodes, "postgres")
assert len(assets) == 1
assert "root" in assets[0].key
def test_finds_upstream_sources(self):
source = DbtNode(
unique_id="source.test.raw.users",
name="users",
resource_type=NodeType.Source,
relation_name='"main"."raw"."users"',
)
model = DbtNode(
unique_id="model.test.stg_users",
name="stg_users",
resource_type=NodeType.Model,
depends_on=("source.test.raw.users",),
materialization="view",
relation_name='"main"."public"."stg_users"',
)
all_nodes = {source.unique_id: source, model.unique_id: model}
assets = get_upstream_assets_for_node(model, all_nodes, "postgres")
assert len(assets) == 1
assert "users" in assets[0].key
def test_skips_nodes_without_relation_name(self):
parent = DbtNode(
unique_id="model.test.ephemeral",
name="ephemeral",
resource_type=NodeType.Model,
materialization="ephemeral",
)
child = DbtNode(
unique_id="model.test.child",
name="child",
resource_type=NodeType.Model,
depends_on=("model.test.ephemeral",),
materialization="table",
relation_name='"main"."public"."child"',
)
all_nodes = {parent.unique_id: parent, child.unique_id: child}
assets = get_upstream_assets_for_node(child, all_nodes, "postgres")
assert len(assets) == 0
def test_finds_sources_stripped_by_dependency_resolution(self):
"""After dependency resolution, source deps are stripped from
depends_on. get_upstream_assets_for_node should recover them
by looking up the original node in all_nodes."""
source = DbtNode(
unique_id="source.test.raw.users",
name="users",
resource_type=NodeType.Source,
relation_name='"main"."raw"."users"',
)
# Original node (before resolution) depends on the source.
original_model = DbtNode(
unique_id="model.test.stg_users",
name="stg_users",
resource_type=NodeType.Model,
depends_on=("source.test.raw.users",),
materialization="view",
relation_name='"main"."public"."stg_users"',
)
# Resolved node (from get_executable_nodes) has source stripped.
resolved_model = DbtNode(
unique_id="model.test.stg_users",
name="stg_users",
resource_type=NodeType.Model,
depends_on=(), # Source stripped during resolution
materialization="view",
relation_name='"main"."public"."stg_users"',
)
all_nodes = {
source.unique_id: source,
original_model.unique_id: original_model,
}
assets = get_upstream_assets_for_node(resolved_model, all_nodes, "postgres")
assert len(assets) == 1
assert "users" in assets[0].key
def test_traverses_ephemeral_to_find_source(self):
"""Model β ephemeral β source: the ephemeral should be walked
through so the source asset edge is emitted."""
source = DbtNode(
unique_id="source.test.raw.events",
name="events",
resource_type=NodeType.Source,
relation_name='"main"."raw"."events"',
)
ephemeral = DbtNode(
unique_id="model.test.stg_events_eph",
name="stg_events_eph",
resource_type=NodeType.Model,
depends_on=("source.test.raw.events",),
materialization="ephemeral",
)
# Original node depends on the ephemeral.
original_model = DbtNode(
unique_id="model.test.fct_events",
name="fct_events",
resource_type=NodeType.Model,
depends_on=("model.test.stg_events_eph",),
materialization="table",
relation_name='"main"."public"."fct_events"',
)
# Resolved node (ephemeral resolved away, source also stripped).
resolved_model = DbtNode(
unique_id="model.test.fct_events",
name="fct_events",
resource_type=NodeType.Model,
depends_on=(),
materialization="table",
relation_name='"main"."public"."fct_events"',
)
all_nodes = {
source.unique_id: source,
ephemeral.unique_id: ephemeral,
original_model.unique_id: original_model,
}
assets = get_upstream_assets_for_node(resolved_model, all_nodes, "postgres")
assert len(assets) == 1
assert "events" in assets[0].key
def test_empty_when_no_deps(self):
node = DbtNode(
unique_id="model.test.root",
name="root",
resource_type=NodeType.Model,
materialization="table",
relation_name='"main"."public"."root"',
)
assets = get_upstream_assets_for_node(node, {node.unique_id: node}, "postgres")
assert assets == []
class TestGetCompiledCode:
def test_from_node_field(self, tmp_path):
node = DbtNode(
unique_id="model.test.m1",
name="m1",
resource_type=NodeType.Model,
materialization="table",
compiled_code="SELECT 1 AS id",
)
result = get_compiled_code_for_node(node, tmp_path, Path("target"), "proj")
assert "SELECT 1 AS id" in result
assert "```sql" in result
def test_from_disk(self, tmp_path):
# Create compiled file on disk
compiled_dir = tmp_path / "target" / "compiled" / "proj" / "models"
compiled_dir.mkdir(parents=True)
(compiled_dir / "m1.sql").write_text("SELECT 2 AS id")
node = DbtNode(
unique_id="model.test.m1",
name="m1",
resource_type=NodeType.Model,
materialization="table",
original_file_path="models/m1.sql",
)
result = get_compiled_code_for_node(node, tmp_path, Path("target"), "proj")
assert "SELECT 2 AS id" in result
def test_returns_empty_when_unavailable(self, tmp_path):
node = DbtNode(
unique_id="model.test.m1",
name="m1",
resource_type=NodeType.Model,
materialization="table",
)
result = get_compiled_code_for_node(node, tmp_path, Path("target"), "proj")
assert result == ""
def test_prefers_node_field_over_disk(self, tmp_path):
compiled_dir = tmp_path / "target" / "compiled" / "proj" / "models"
compiled_dir.mkdir(parents=True)
(compiled_dir / "m1.sql").write_text("SELECT disk")
node = DbtNode(
unique_id="model.test.m1",
name="m1",
resource_type=NodeType.Model,
materialization="table",
compiled_code="SELECT manifest",
original_file_path="models/m1.sql",
)
result = get_compiled_code_for_node(node, tmp_path, Path("target"), "proj")
assert "SELECT manifest" in result
assert "disk" not in result
# ============================================================
# ManifestParser metadata properties
# ============================================================
class TestManifestParserMetadata:
def test_adapter_type(self, tmp_path):
from prefect_dbt.core._manifest import ManifestParser
path = write_manifest(tmp_path, MANIFEST_WITH_ASSETS)
parser = ManifestParser(path)
assert parser.adapter_type == "postgres"
def test_project_name(self, tmp_path):
from prefect_dbt.core._manifest import ManifestParser
path = write_manifest(tmp_path, MANIFEST_WITH_ASSETS)
parser = ManifestParser(path)
assert parser.project_name == "test_project"
def test_missing_metadata(self, tmp_path):
from prefect_dbt.core._manifest import ManifestParser
path = write_manifest(tmp_path, {"nodes": {}, "sources": {}})
parser = ManifestParser(path)
assert parser.adapter_type is None
assert parser.project_name is None
class TestDbtNodeNewFields:
def test_description_parsed(self, tmp_path):
from prefect_dbt.core._manifest import ManifestParser
path = write_manifest(tmp_path, MANIFEST_WITH_ASSETS)
parser = ManifestParser(path)
nodes = parser.get_executable_nodes()
assert nodes["model.test.root"].description == "Root model"
assert nodes["model.test.child"].description == "Child model"
def test_source_description_parsed(self, tmp_path):
from prefect_dbt.core._manifest import ManifestParser
path = write_manifest(tmp_path, MANIFEST_WITH_ASSETS)
parser = ManifestParser(path)
source = parser.all_nodes["source.test.raw.users"]
assert source.description == "Raw users source"
def test_compiled_code_parsed(self, tmp_path):
from prefect_dbt.core._manifest import ManifestParser
manifest = {
"nodes": {
"model.test.m1": {
"name": "m1",
"resource_type": "model",
"depends_on": {"nodes": []},
"config": {"materialized": "table"},
"compiled_code": "SELECT 1",
},
},
"sources": {},
}
path = write_manifest(tmp_path, manifest)
parser = ManifestParser(path)
nodes = parser.get_executable_nodes()
assert nodes["model.test.m1"].compiled_code == "SELECT 1"
def test_defaults_to_none(self):
node = _make_node()
assert node.description is None
assert node.compiled_code is None
# ============================================================
# Orchestrator artifact integration
# ============================================================
class TestOrchestratorSummaryArtifact:
"""Test that run_build() creates a summary artifact when enabled."""
def test_summary_artifact_created(self, tmp_path, caplog):
manifest_path = write_manifest(
tmp_path,
{
"nodes": {
"model.test.m1": {
"name": "m1",
"resource_type": "model",
"depends_on": {"nodes": []},
"config": {"materialized": "table"},
},
},
"sources": {},
},
)
settings = _make_mock_settings(project_dir=tmp_path)
executor = _make_mock_executor_per_node()
with (
patch("prefect_dbt.core._orchestrator.ManifestParser") as MockParser,
patch(
"prefect_dbt.core._orchestrator.create_markdown_artifact"
) as mock_create,
patch("prefect.context.FlowRunContext.get", return_value=MagicMock()),
):
parser_instance = MockParser.return_value
parser_instance.filter_nodes.return_value = {
"model.test.m1": _make_node("model.test.m1", "m1"),
}
parser_instance.all_nodes = parser_instance.filter_nodes.return_value
parser_instance.adapter_type = "duckdb"
parser_instance.project_name = "test"
parser_instance.compute_execution_waves.return_value = [
MagicMock(nodes=[_make_node("model.test.m1", "m1")]),
]
parser_instance.get_macro_paths.return_value = {}
orchestrator = PrefectDbtOrchestrator(
settings=settings,
executor=executor,
manifest_path=manifest_path,
create_summary_artifact=True,
)
with caplog.at_level("INFO"):
orchestrator.run_build()
mock_create.assert_called_once()
call_kwargs = mock_create.call_args
assert "dbt build Task Summary" in call_kwargs.kwargs.get(
"markdown", call_kwargs.args[0] if call_kwargs.args else ""
)
assert any("dbt-orchestrator-summary" in msg for msg in caplog.messages)
def test_summary_artifact_skipped_without_flow_context(self, tmp_path):
"""When there is no active flow run context, the summary artifact
should NOT be created even when create_summary_artifact=True."""
manifest_path = write_manifest(
tmp_path,
{"nodes": {}, "sources": {}},
)
settings = _make_mock_settings(project_dir=tmp_path)
executor = _make_mock_executor_per_node()
with (
patch("prefect_dbt.core._orchestrator.ManifestParser") as MockParser,
patch(
"prefect_dbt.core._orchestrator.create_markdown_artifact"
) as mock_create,
patch("prefect.context.FlowRunContext.get", return_value=None),
):
parser_instance = MockParser.return_value
parser_instance.filter_nodes.return_value = {}
parser_instance.all_nodes = {}
parser_instance.adapter_type = None
parser_instance.project_name = None
parser_instance.compute_execution_waves.return_value = []
orchestrator = PrefectDbtOrchestrator(
settings=settings,
executor=executor,
manifest_path=manifest_path,
create_summary_artifact=True,
)
orchestrator.run_build()
mock_create.assert_not_called()
def test_summary_artifact_disabled(self, tmp_path):
manifest_path = write_manifest(
tmp_path,
{"nodes": {}, "sources": {}},
)
settings = _make_mock_settings(project_dir=tmp_path)
executor = _make_mock_executor_per_node()
with (
patch("prefect_dbt.core._orchestrator.ManifestParser") as MockParser,
patch(
"prefect_dbt.core._orchestrator.create_markdown_artifact"
) as mock_create,
):
parser_instance = MockParser.return_value
parser_instance.filter_nodes.return_value = {}
parser_instance.all_nodes = {}
parser_instance.adapter_type = None
parser_instance.project_name = None
parser_instance.compute_execution_waves.return_value = []
orchestrator = PrefectDbtOrchestrator(
settings=settings,
executor=executor,
manifest_path=manifest_path,
create_summary_artifact=False,
)
orchestrator.run_build()
mock_create.assert_not_called()
class TestOrchestratorWriteRunResults:
"""Test that run_build() writes run_results.json when enabled."""
def test_run_results_written(self, tmp_path, caplog):
# Write manifest into target/ so _resolve_target_path returns tmp_path/target
target_dir = tmp_path / "target"
target_dir.mkdir()
manifest_path = write_manifest(
target_dir,
{
"nodes": {
"model.test.m1": {
"name": "m1",
"resource_type": "model",
"depends_on": {"nodes": []},
"config": {"materialized": "table"},
},
},
"sources": {},
},
)
settings = _make_mock_settings(project_dir=tmp_path)
executor = _make_mock_executor_per_node()
with patch("prefect_dbt.core._orchestrator.ManifestParser") as MockParser:
parser_instance = MockParser.return_value
parser_instance.filter_nodes.return_value = {
"model.test.m1": _make_node("model.test.m1", "m1"),
}
parser_instance.all_nodes = parser_instance.filter_nodes.return_value
parser_instance.adapter_type = "duckdb"
parser_instance.project_name = "test"
parser_instance.compute_execution_waves.return_value = [
MagicMock(nodes=[_make_node("model.test.m1", "m1")]),
]
parser_instance.get_macro_paths.return_value = {}
orchestrator = PrefectDbtOrchestrator(
settings=settings,
executor=executor,
manifest_path=manifest_path,
write_run_results=True,
create_summary_artifact=False,
)
with caplog.at_level("INFO"):
orchestrator.run_build()
run_results_path = tmp_path / "target" / "run_results.json"
assert run_results_path.exists()
data = json.loads(run_results_path.read_text())
assert len(data["results"]) == 1
assert data["results"][0]["unique_id"] == "model.test.m1"
assert data["results"][0]["status"] == "success"
assert any(
"run_results.json" in msg and str(run_results_path) in msg
for msg in caplog.messages
)
def test_run_results_not_written_by_default(self, tmp_path):
manifest_path = write_manifest(
tmp_path,
{"nodes": {}, "sources": {}},
)
settings = _make_mock_settings(project_dir=tmp_path)
executor = _make_mock_executor_per_node()
with patch("prefect_dbt.core._orchestrator.ManifestParser") as MockParser:
parser_instance = MockParser.return_value
parser_instance.filter_nodes.return_value = {}
parser_instance.all_nodes = {}
parser_instance.adapter_type = None
parser_instance.project_name = None
parser_instance.compute_execution_waves.return_value = []
orchestrator = PrefectDbtOrchestrator(
settings=settings,
executor=executor,
manifest_path=manifest_path,
create_summary_artifact=False,
)
orchestrator.run_build()
assert not (tmp_path / "target" / "run_results.json").exists()
# ============================================================
# Orchestrator MaterializingTask integration (PER_NODE)
# ============================================================
class _ThreadDelegatingRunner:
"""Lightweight task runner stand-in for unit tests.
Delegates `submit()` to `task.submit()` (Prefect's default
thread-based execution) so that mock executors (which are
not picklable) work without a real process pool.
"""
def __init__(self, **kwargs):
pass
def __enter__(self):
return self
def __exit__(self, *args):
pass
def submit(self, task, parameters=None, wait_for=None):
return task.submit(**(parameters or {}))
class TestOrchestratorAssetTracking:
"""Verify MaterializingTask is used for asset-eligible nodes in PER_NODE mode."""
def test_materializing_task_created_for_asset_nodes(self, tmp_path):
"""When adapter_type is available and nodes have relation_name,
_build_asset_task should return a MaterializingTask."""
manifest_path = write_manifest(tmp_path, MANIFEST_WITH_ASSETS)
settings = _make_mock_settings(project_dir=tmp_path)
executor = _make_mock_executor_per_node()
with (
patch("prefect_dbt.core._orchestrator.ManifestParser") as MockParser,
patch(
"prefect_dbt.core._orchestrator.create_asset_for_node"
) as mock_create_asset,
patch(
"prefect_dbt.core._orchestrator.get_upstream_assets_for_node"
) as mock_get_upstream,
):
from prefect.assets import Asset, AssetProperties
mock_asset = Asset(
key="postgres://main/public/root",
properties=AssetProperties(name="root"),
)
mock_create_asset.return_value = mock_asset
mock_get_upstream.return_value = []
root_node = _make_node(
"model.test.root",
"root",
materialization="table",
)
# We need to set relation_name but _make_node doesn't support it.
# Create a proper DbtNode instead.
root_node = DbtNode(
unique_id="model.test.root",
name="root",
resource_type=NodeType.Model,
materialization="table",
relation_name='"main"."public"."root"',
description="Root model",
original_file_path="models/root.sql",
)
parser_instance = MockParser.return_value
parser_instance.filter_nodes.return_value = {
"model.test.root": root_node,
}
parser_instance.all_nodes = {
"model.test.root": root_node,
}
parser_instance.adapter_type = "postgres"
parser_instance.project_name = "test_project"
parser_instance.compute_execution_waves.return_value = [
MagicMock(nodes=[root_node]),
]
parser_instance.get_macro_paths.return_value = {}
@flow
def test_flow():
orchestrator = PrefectDbtOrchestrator(
settings=settings,
executor=executor,
manifest_path=manifest_path,
execution_mode=ExecutionMode.PER_NODE,
task_runner_type=_ThreadDelegatingRunner,
create_summary_artifact=False,
)
return orchestrator.run_build()
results = test_flow()
assert results["model.test.root"]["status"] == "success"
mock_create_asset.assert_called_once()
def test_no_asset_for_nodes_without_relation_name(self, tmp_path):
"""Nodes without relation_name should NOT get MaterializingTask."""
manifest = {
"metadata": {"adapter_type": "postgres", "project_name": "test"},
"nodes": {
"model.test.m1": {
"name": "m1",
"resource_type": "model",
"depends_on": {"nodes": []},
"config": {"materialized": "table"},
# No relation_name!
},
},
"sources": {},
}
manifest_path = write_manifest(tmp_path, manifest)
settings = _make_mock_settings(project_dir=tmp_path)
executor = _make_mock_executor_per_node()
with (
patch("prefect_dbt.core._orchestrator.ManifestParser") as MockParser,
patch(
"prefect_dbt.core._orchestrator.create_asset_for_node"
) as mock_create_asset,
):
node = _make_node("model.test.m1", "m1")
parser_instance = MockParser.return_value
parser_instance.filter_nodes.return_value = {
"model.test.m1": node,
}
parser_instance.all_nodes = {"model.test.m1": node}
parser_instance.adapter_type = "postgres"
parser_instance.project_name = "test"
parser_instance.compute_execution_waves.return_value = [
MagicMock(nodes=[node]),
]
parser_instance.get_macro_paths.return_value = {}
@flow
def test_flow():
orchestrator = PrefectDbtOrchestrator(
settings=settings,
executor=executor,
manifest_path=manifest_path,
execution_mode=ExecutionMode.PER_NODE,
task_runner_type=_ThreadDelegatingRunner,
create_summary_artifact=False,
)
return orchestrator.run_build()
results = test_flow()
assert results["model.test.m1"]["status"] == "success"
# create_asset_for_node should NOT be called since no relation_name
mock_create_asset.assert_not_called()
def test_no_asset_without_adapter_type(self, tmp_path):
"""When adapter_type is None, no assets should be created."""
manifest = {
"nodes": {
"model.test.m1": {
"name": "m1",
"resource_type": "model",
"depends_on": {"nodes": []},
"config": {"materialized": "table"},
"relation_name": '"main"."public"."m1"',
},
},
"sources": {},
}
manifest_path = write_manifest(tmp_path, manifest)
settings = _make_mock_settings(project_dir=tmp_path)
executor = _make_mock_executor_per_node()
with (
patch("prefect_dbt.core._orchestrator.ManifestParser") as MockParser,
patch(
"prefect_dbt.core._orchestrator.create_asset_for_node"
) as mock_create_asset,
):
node = DbtNode(
unique_id="model.test.m1",
name="m1",
resource_type=NodeType.Model,
materialization="table",
relation_name='"main"."public"."m1"',
)
parser_instance = MockParser.return_value
parser_instance.filter_nodes.return_value = {"model.test.m1": node}
parser_instance.all_nodes = {"model.test.m1": node}
parser_instance.adapter_type = None # No adapter type!
parser_instance.project_name = None
parser_instance.compute_execution_waves.return_value = [
MagicMock(nodes=[node]),
]
parser_instance.get_macro_paths.return_value = {}
@flow
def test_flow():
orchestrator = PrefectDbtOrchestrator(
settings=settings,
executor=executor,
manifest_path=manifest_path,
execution_mode=ExecutionMode.PER_NODE,
task_runner_type=_ThreadDelegatingRunner,
create_summary_artifact=False,
)
return orchestrator.run_build()
results = test_flow()
assert results["model.test.m1"]["status"] == "success"
mock_create_asset.assert_not_called()
class TestOrchestratorDisableAssets:
"""Verify disable_assets flag suppresses asset creation."""
def test_disable_assets_skips_materializing_task(self, tmp_path):
"""When disable_assets=True, create_asset_for_node should NOT be called
even for nodes that would normally produce assets."""
manifest_path = write_manifest(tmp_path, MANIFEST_WITH_ASSETS)
settings = _make_mock_settings(project_dir=tmp_path)
executor = _make_mock_executor_per_node()
with (
patch("prefect_dbt.core._orchestrator.ManifestParser") as MockParser,
patch(
"prefect_dbt.core._orchestrator.create_asset_for_node"
) as mock_create_asset,
):
root_node = DbtNode(
unique_id="model.test.root",
name="root",
resource_type=NodeType.Model,
materialization="table",
relation_name='"main"."public"."root"',
description="Root model",
original_file_path="models/root.sql",
)
parser_instance = MockParser.return_value
parser_instance.filter_nodes.return_value = {
"model.test.root": root_node,
}
parser_instance.all_nodes = {"model.test.root": root_node}
parser_instance.adapter_type = "postgres"
parser_instance.project_name = "test_project"
parser_instance.compute_execution_waves.return_value = [
MagicMock(nodes=[root_node]),
]
parser_instance.get_macro_paths.return_value = {}
@flow
def test_flow():
orchestrator = PrefectDbtOrchestrator(
settings=settings,
executor=executor,
manifest_path=manifest_path,
execution_mode=ExecutionMode.PER_NODE,
task_runner_type=_ThreadDelegatingRunner,
create_summary_artifact=False,
disable_assets=True,
)
return orchestrator.run_build()
results = test_flow()
assert results["model.test.root"]["status"] == "success"
mock_create_asset.assert_not_called()
def test_disable_assets_defaults_false(self, tmp_path):
"""When disable_assets is not set, assets should be created as usual."""
manifest_path = write_manifest(tmp_path, MANIFEST_WITH_ASSETS)
settings = _make_mock_settings(project_dir=tmp_path)
executor = _make_mock_executor_per_node()
with (
patch("prefect_dbt.core._orchestrator.ManifestParser") as MockParser,
patch(
"prefect_dbt.core._orchestrator.create_asset_for_node"
) as mock_create_asset,
patch(
"prefect_dbt.core._orchestrator.get_upstream_assets_for_node"
) as mock_get_upstream,
):
from prefect.assets import Asset, AssetProperties
mock_asset = Asset(
key="postgres://main/public/root",
properties=AssetProperties(name="root"),
)
mock_create_asset.return_value = mock_asset
mock_get_upstream.return_value = []
root_node = DbtNode(
unique_id="model.test.root",
name="root",
resource_type=NodeType.Model,
materialization="table",
relation_name='"main"."public"."root"',
description="Root model",
original_file_path="models/root.sql",
)
parser_instance = MockParser.return_value
parser_instance.filter_nodes.return_value = {
"model.test.root": root_node,
}
parser_instance.all_nodes = {"model.test.root": root_node}
parser_instance.adapter_type = "postgres"
parser_instance.project_name = "test_project"
parser_instance.compute_execution_waves.return_value = [
MagicMock(nodes=[root_node]),
]
parser_instance.get_macro_paths.return_value = {}
@flow
def test_flow():
orchestrator = PrefectDbtOrchestrator(
settings=settings,
executor=executor,
manifest_path=manifest_path,
execution_mode=ExecutionMode.PER_NODE,
task_runner_type=_ThreadDelegatingRunner,
create_summary_artifact=False,
)
return orchestrator.run_build()
results = test_flow()
assert results["model.test.root"]["status"] == "success"
mock_create_asset.assert_called_once()
class TestOrchestratorCompiledCode:
"""Test include_compiled_code option."""
def test_compiled_code_included_in_asset(self, tmp_path):
"""When include_compiled_code=True, compiled SQL should be in asset description."""
manifest_path = write_manifest(tmp_path, MANIFEST_WITH_ASSETS)
settings = _make_mock_settings(project_dir=tmp_path)
executor = _make_mock_executor_per_node()
with (
patch("prefect_dbt.core._orchestrator.ManifestParser") as MockParser,
patch(
"prefect_dbt.core._orchestrator.get_compiled_code_for_node",
return_value="\n### Compiled code\n```sql\nSELECT 1\n```",
) as mock_compiled,
patch(
"prefect_dbt.core._orchestrator.get_upstream_assets_for_node",
return_value=[],
),
):
root_node = DbtNode(
unique_id="model.test.root",
name="root",
resource_type=NodeType.Model,
materialization="table",
relation_name='"main"."public"."root"',
description="Root model",
original_file_path="models/root.sql",
)
parser_instance = MockParser.return_value
parser_instance.filter_nodes.return_value = {
"model.test.root": root_node,
}
parser_instance.all_nodes = {"model.test.root": root_node}
parser_instance.adapter_type = "postgres"
parser_instance.project_name = "test_project"
parser_instance.compute_execution_waves.return_value = [
MagicMock(nodes=[root_node]),
]
parser_instance.get_macro_paths.return_value = {}
@flow
def test_flow():
orchestrator = PrefectDbtOrchestrator(
settings=settings,
executor=executor,
manifest_path=manifest_path,
execution_mode=ExecutionMode.PER_NODE,
task_runner_type=_ThreadDelegatingRunner,
include_compiled_code=True,
create_summary_artifact=False,
)
return orchestrator.run_build()
results = test_flow()
assert results["model.test.root"]["status"] == "success"
mock_compiled.assert_called_once()
# ============================================================
# ASSET_NODE_TYPES constant
# ============================================================
class TestAssetNodeTypes:
def test_model_seed_snapshot_included(self):
assert NodeType.Model in ASSET_NODE_TYPES
assert NodeType.Seed in ASSET_NODE_TYPES
assert NodeType.Snapshot in ASSET_NODE_TYPES
def test_test_not_included(self):
assert NodeType.Test not in ASSET_NODE_TYPES
assert NodeType.Source not in ASSET_NODE_TYPES
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/integrations/prefect-dbt/tests/core/test_orchestrator_artifacts.py",
"license": "Apache License 2.0",
"lines": 1102,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:src/integrations/prefect-dbt/tests/core/test_orchestrator_test_strategies.py | """Tests for dbt test strategy execution in PrefectDbtOrchestrator."""
from unittest.mock import MagicMock, patch
import pytest
from conftest import (
_make_mock_executor,
_make_mock_executor_per_node,
_make_mock_settings,
write_manifest,
)
from dbt.artifacts.resources.types import NodeType
from prefect_dbt.core._executor import DbtExecutor, ExecutionResult
from prefect_dbt.core._manifest import ManifestParser
from prefect_dbt.core._orchestrator import (
CacheConfig,
ExecutionMode,
PrefectDbtOrchestrator,
TestStrategy,
)
from prefect import flow
from prefect.task_runners import ThreadPoolTaskRunner
# -- Manifest data with tests -----------------------------------------------
SINGLE_MODEL_WITH_TEST = {
"nodes": {
"model.test.m1": {
"name": "m1",
"resource_type": "model",
"depends_on": {"nodes": []},
"config": {"materialized": "table"},
},
"test.test.not_null_m1_id": {
"name": "not_null_m1_id",
"resource_type": "test",
"depends_on": {"nodes": ["model.test.m1"]},
"config": {},
},
},
"sources": {},
}
SINGLE_MODEL_WITH_UNIT_TEST = {
"nodes": {
"model.test.m1": {
"name": "m1",
"resource_type": "model",
"depends_on": {"nodes": []},
"config": {"materialized": "table"},
},
"unit_test.test.ut_m1": {
"name": "ut_m1",
"resource_type": "unit_test",
"depends_on": {"nodes": ["model.test.m1"]},
"config": {},
},
},
"sources": {},
}
MIXED_TEST_TYPES = {
"nodes": {
"model.test.m1": {
"name": "m1",
"resource_type": "model",
"depends_on": {"nodes": []},
"config": {"materialized": "table"},
},
"test.test.not_null_m1_id": {
"name": "not_null_m1_id",
"resource_type": "test",
"depends_on": {"nodes": ["model.test.m1"]},
"config": {},
},
"unit_test.test.ut_m1": {
"name": "ut_m1",
"resource_type": "unit_test",
"depends_on": {"nodes": ["model.test.m1"]},
"config": {},
},
},
"sources": {},
}
EMPTY_MANIFEST = {"nodes": {}, "sources": {}}
# -- Fixtures ----------------------------------------------------------------
@pytest.fixture
def per_node_orch(tmp_path):
"""Factory for PER_NODE orchestrator with mock executor."""
def _factory(manifest_data, *, executor=None, **kwargs):
manifest = write_manifest(tmp_path, manifest_data)
if executor is None:
executor = _make_mock_executor_per_node(**kwargs.pop("executor_kwargs", {}))
defaults = {
"settings": _make_mock_settings(),
"manifest_path": manifest,
"executor": executor,
"execution_mode": ExecutionMode.PER_NODE,
"task_runner_type": ThreadPoolTaskRunner,
}
defaults.update(kwargs)
return PrefectDbtOrchestrator(**defaults), executor
return _factory
@pytest.fixture
def per_wave_orch(tmp_path):
"""Factory for PER_WAVE orchestrator with mock executor."""
def _factory(manifest_data, *, executor=None, **kwargs):
manifest = write_manifest(tmp_path, manifest_data)
if executor is None:
executor = _make_mock_executor(**kwargs.pop("executor_kwargs", {}))
defaults = {
"settings": _make_mock_settings(),
"manifest_path": manifest,
"executor": executor,
"execution_mode": ExecutionMode.PER_WAVE,
}
defaults.update(kwargs)
return PrefectDbtOrchestrator(**defaults), executor
return _factory
# =============================================================================
# TestTestStrategyInit
# =============================================================================
class TestTestStrategyInit:
def test_default_is_immediate(self, tmp_path):
manifest = write_manifest(tmp_path, EMPTY_MANIFEST)
orch = PrefectDbtOrchestrator(
settings=_make_mock_settings(),
manifest_path=manifest,
executor=_make_mock_executor(),
)
assert orch._test_strategy == TestStrategy.IMMEDIATE
def test_immediate_accepted(self, tmp_path):
manifest = write_manifest(tmp_path, EMPTY_MANIFEST)
orch = PrefectDbtOrchestrator(
settings=_make_mock_settings(),
manifest_path=manifest,
executor=_make_mock_executor(),
test_strategy=TestStrategy.IMMEDIATE,
)
assert orch._test_strategy == TestStrategy.IMMEDIATE
def test_deferred_accepted(self, tmp_path):
manifest = write_manifest(tmp_path, EMPTY_MANIFEST)
orch = PrefectDbtOrchestrator(
settings=_make_mock_settings(),
manifest_path=manifest,
executor=_make_mock_executor(),
test_strategy=TestStrategy.DEFERRED,
)
assert orch._test_strategy == TestStrategy.DEFERRED
def test_skip_accepted(self, tmp_path):
manifest = write_manifest(tmp_path, EMPTY_MANIFEST)
orch = PrefectDbtOrchestrator(
settings=_make_mock_settings(),
manifest_path=manifest,
executor=_make_mock_executor(),
test_strategy=TestStrategy.SKIP,
)
assert orch._test_strategy == TestStrategy.SKIP
def test_invalid_raises_value_error(self, tmp_path):
manifest = write_manifest(tmp_path, EMPTY_MANIFEST)
with pytest.raises(ValueError, match="Invalid test_strategy"):
PrefectDbtOrchestrator(
settings=_make_mock_settings(),
manifest_path=manifest,
executor=_make_mock_executor(),
test_strategy="invalid",
)
def test_string_value_accepted(self, tmp_path):
"""String enum values are auto-coerced."""
manifest = write_manifest(tmp_path, EMPTY_MANIFEST)
orch = PrefectDbtOrchestrator(
settings=_make_mock_settings(),
manifest_path=manifest,
executor=_make_mock_executor(),
test_strategy="immediate",
)
assert orch._test_strategy == TestStrategy.IMMEDIATE
# =============================================================================
# TestSkipStrategy
# =============================================================================
class TestSkipStrategy:
def test_per_wave_excludes_tests(self, per_wave_orch):
"""SKIP + PER_WAVE: test nodes do not appear in results."""
orch, executor = per_wave_orch(
SINGLE_MODEL_WITH_TEST,
test_strategy=TestStrategy.SKIP,
)
results = orch.run_build()
assert "model.test.m1" in results
assert "test.test.not_null_m1_id" not in results
def test_per_node_excludes_tests(self, per_node_orch):
"""SKIP + PER_NODE: test nodes do not appear in results."""
orch, executor = per_node_orch(
SINGLE_MODEL_WITH_TEST,
test_strategy=TestStrategy.SKIP,
)
@flow
def test_flow():
return orch.run_build()
results = test_flow()
assert "model.test.m1" in results
assert "test.test.not_null_m1_id" not in results
def test_executor_not_called_for_tests_per_node(self, per_node_orch):
"""SKIP: executor is only called for model, not test."""
orch, executor = per_node_orch(
SINGLE_MODEL_WITH_TEST,
test_strategy=TestStrategy.SKIP,
)
@flow
def test_flow():
return orch.run_build()
test_flow()
assert executor.execute_node.call_count == 1
# =============================================================================
# TestImmediatePerNode
# =============================================================================
class TestImmediatePerNode:
def test_test_runs_after_parent_model(
self, per_node_orch, diamond_with_tests_manifest_data
):
"""IMMEDIATE + PER_NODE: tests appear in results after their parent models."""
orch, executor = per_node_orch(
diamond_with_tests_manifest_data,
test_strategy=TestStrategy.IMMEDIATE,
)
@flow
def test_flow():
return orch.run_build()
results = test_flow()
# All 4 models + 3 tests should be in results
assert len(results) == 7
for node_id in results:
assert results[node_id]["status"] == "success"
# Test on root ran after root
root_completed = results["model.test.root"]["timing"]["completed_at"]
test_root_started = results["test.test.not_null_root_id"]["timing"][
"started_at"
]
assert root_completed <= test_root_started
def test_multi_model_test_waits_for_all_parents(
self, per_node_orch, diamond_with_tests_manifest_data
):
"""IMMEDIATE + PER_NODE: multi-model test waits for all parents."""
orch, _ = per_node_orch(
diamond_with_tests_manifest_data,
test_strategy=TestStrategy.IMMEDIATE,
)
@flow
def test_flow():
return orch.run_build()
results = test_flow()
# rel_leaf_to_left depends on leaf and left.
# Both must complete before the test starts.
leaf_completed = results["model.test.leaf"]["timing"]["completed_at"]
left_completed = results["model.test.left"]["timing"]["completed_at"]
test_started = results["test.test.rel_leaf_to_left"]["timing"]["started_at"]
assert leaf_completed <= test_started
assert left_completed <= test_started
def test_uses_test_command(self, per_node_orch):
"""IMMEDIATE + PER_NODE: test nodes use the 'test' command."""
orch, _ = per_node_orch(
SINGLE_MODEL_WITH_TEST,
test_strategy=TestStrategy.IMMEDIATE,
)
@flow
def test_flow():
return orch.run_build()
results = test_flow()
assert results["test.test.not_null_m1_id"]["invocation"]["command"] == "test"
def test_test_failure_skips_downstream_models(self, per_node_orch):
"""IMMEDIATE: a test failure on root skips downstream leaf.
Matches `dbt build` semantics: a failing test on model M causes
all downstream dependents of M to be skipped.
"""
# Use a simple chain: root -> leaf, test on root
data = {
"nodes": {
"model.test.root": {
"name": "root",
"resource_type": "model",
"depends_on": {"nodes": []},
"config": {"materialized": "table"},
},
"model.test.leaf": {
"name": "leaf",
"resource_type": "model",
"depends_on": {"nodes": ["model.test.root"]},
"config": {"materialized": "table"},
},
"test.test.not_null_root_id": {
"name": "not_null_root_id",
"resource_type": "test",
"depends_on": {"nodes": ["model.test.root"]},
"config": {},
},
},
"sources": {},
}
orch, _ = per_node_orch(
data,
test_strategy=TestStrategy.IMMEDIATE,
executor_kwargs={
"fail_nodes": {"test.test.not_null_root_id"},
"error": RuntimeError("test failed"),
},
)
@flow
def test_flow():
return orch.run_build()
results = test_flow()
assert results["model.test.root"]["status"] == "success"
assert results["test.test.not_null_root_id"]["status"] == "error"
# leaf is downstream of root whose test failed -> skipped
assert results["model.test.leaf"]["status"] == "skipped"
assert (
"test.test.not_null_root_id"
in results["model.test.leaf"]["failed_upstream"]
)
def test_test_failure_transitive_cascade(self, per_node_orch):
"""IMMEDIATE: test failure cascades transitively through the DAG.
root -> mid -> leaf, test on root fails.
Both mid and leaf should be skipped.
"""
data = {
"nodes": {
"model.test.root": {
"name": "root",
"resource_type": "model",
"depends_on": {"nodes": []},
"config": {"materialized": "table"},
},
"model.test.mid": {
"name": "mid",
"resource_type": "model",
"depends_on": {"nodes": ["model.test.root"]},
"config": {"materialized": "table"},
},
"model.test.leaf": {
"name": "leaf",
"resource_type": "model",
"depends_on": {"nodes": ["model.test.mid"]},
"config": {"materialized": "table"},
},
"test.test.not_null_root_id": {
"name": "not_null_root_id",
"resource_type": "test",
"depends_on": {"nodes": ["model.test.root"]},
"config": {},
},
},
"sources": {},
}
orch, _ = per_node_orch(
data,
test_strategy=TestStrategy.IMMEDIATE,
executor_kwargs={
"fail_nodes": {"test.test.not_null_root_id"},
"error": RuntimeError("test failed"),
},
)
@flow
def test_flow():
return orch.run_build()
results = test_flow()
assert results["model.test.root"]["status"] == "success"
assert results["test.test.not_null_root_id"]["status"] == "error"
assert results["model.test.mid"]["status"] == "skipped"
assert results["model.test.leaf"]["status"] == "skipped"
# All selected nodes must appear in results
assert len(results) == 4
def test_relationship_test_spanning_chain_no_cycle(self, per_node_orch):
"""IMMEDIATE: a relationship test spanning a parent-child chain does not cause cycles.
DAG: root -> mid -> leaf
Test: rel_leaf_to_root depends on (root, leaf)
The augmentation must NOT add rel_leaf_to_root as a dependency of
mid, because that would create mid -> rel_leaf_to_root -> leaf -> mid.
"""
data = {
"nodes": {
"model.test.root": {
"name": "root",
"resource_type": "model",
"depends_on": {"nodes": []},
"config": {"materialized": "table"},
},
"model.test.mid": {
"name": "mid",
"resource_type": "model",
"depends_on": {"nodes": ["model.test.root"]},
"config": {"materialized": "table"},
},
"model.test.leaf": {
"name": "leaf",
"resource_type": "model",
"depends_on": {"nodes": ["model.test.mid"]},
"config": {"materialized": "table"},
},
"test.test.rel_leaf_to_root": {
"name": "rel_leaf_to_root",
"resource_type": "test",
"depends_on": {"nodes": ["model.test.root", "model.test.leaf"]},
"config": {},
},
},
"sources": {},
}
orch, _ = per_node_orch(
data,
test_strategy=TestStrategy.IMMEDIATE,
)
@flow
def test_flow():
return orch.run_build()
results = test_flow()
# All nodes should execute successfully β no cycle error
assert results["model.test.root"]["status"] == "success"
assert results["model.test.mid"]["status"] == "success"
assert results["model.test.leaf"]["status"] == "success"
assert results["test.test.rel_leaf_to_root"]["status"] == "success"
def test_all_selected_nodes_in_results(self, per_node_orch):
"""IMMEDIATE: every selected node appears in results even on test failure."""
data = {
"nodes": {
"model.test.root": {
"name": "root",
"resource_type": "model",
"depends_on": {"nodes": []},
"config": {"materialized": "table"},
},
"model.test.leaf": {
"name": "leaf",
"resource_type": "model",
"depends_on": {"nodes": ["model.test.root"]},
"config": {"materialized": "table"},
},
"test.test.not_null_root_id": {
"name": "not_null_root_id",
"resource_type": "test",
"depends_on": {"nodes": ["model.test.root"]},
"config": {},
},
},
"sources": {},
}
orch, _ = per_node_orch(
data,
test_strategy=TestStrategy.IMMEDIATE,
executor_kwargs={
"fail_nodes": {"test.test.not_null_root_id"},
"error": RuntimeError("test failed"),
},
)
@flow
def test_flow():
return orch.run_build()
results = test_flow()
# Every node must be present β none silently dropped
assert "model.test.root" in results
assert "test.test.not_null_root_id" in results
assert "model.test.leaf" in results
def test_model_failure_skips_dependent_tests(self, per_node_orch):
"""IMMEDIATE: model failure skips tests that depend on that model."""
orch, _ = per_node_orch(
SINGLE_MODEL_WITH_TEST,
test_strategy=TestStrategy.IMMEDIATE,
executor_kwargs={
"fail_nodes": {"model.test.m1"},
"error": RuntimeError("model failed"),
},
)
@flow
def test_flow():
return orch.run_build()
results = test_flow()
assert results["model.test.m1"]["status"] == "error"
assert results["test.test.not_null_m1_id"]["status"] == "skipped"
assert "model.test.m1" in results["test.test.not_null_m1_id"]["failed_upstream"]
# =============================================================================
# TestImmediatePerWave
# =============================================================================
class TestImmediatePerWave:
def test_default_strategy_includes_tests(self, per_wave_orch):
"""Default (no test_strategy arg) includes tests (IMMEDIATE)."""
orch, _ = per_wave_orch(SINGLE_MODEL_WITH_TEST)
results = orch.run_build()
assert "model.test.m1" in results
assert "test.test.not_null_m1_id" in results
assert results["test.test.not_null_m1_id"]["status"] == "success"
def test_tests_interleaved_in_waves(
self, per_wave_orch, diamond_with_tests_manifest_data
):
"""IMMEDIATE + PER_WAVE: tests appear in correct waves.
With implicit test-to-downstream edges the wave order is:
Wave 0: root
Wave 1: not_null_root_id (test on root)
Wave 2: left, right
Wave 3: leaf
Wave 4: not_null_leaf_id, rel_leaf_to_left (tests on leaf)
"""
orch, _ = per_wave_orch(
diamond_with_tests_manifest_data,
test_strategy=TestStrategy.IMMEDIATE,
)
results = orch.run_build()
# All 7 nodes should succeed
assert len(results) == 7
for r in results.values():
assert r["status"] == "success"
def test_single_model_test_in_results(self, per_wave_orch):
"""IMMEDIATE + PER_WAVE: test node appears in results."""
orch, _ = per_wave_orch(
SINGLE_MODEL_WITH_TEST,
test_strategy=TestStrategy.IMMEDIATE,
)
results = orch.run_build()
assert "model.test.m1" in results
assert "test.test.not_null_m1_id" in results
assert results["test.test.not_null_m1_id"]["status"] == "success"
def test_test_failure_skips_downstream_models(self, tmp_path):
"""IMMEDIATE + PER_WAVE: test failure cascades to downstream models.
Graph: root -> leaf, test on root.
With implicit edges the test runs in a wave before leaf.
When the test fails, leaf's wave is skipped.
"""
data = {
"nodes": {
"model.test.root": {
"name": "root",
"resource_type": "model",
"depends_on": {"nodes": []},
"config": {"materialized": "table"},
},
"model.test.leaf": {
"name": "leaf",
"resource_type": "model",
"depends_on": {"nodes": ["model.test.root"]},
"config": {"materialized": "table"},
},
"test.test.not_null_root_id": {
"name": "not_null_root_id",
"resource_type": "test",
"depends_on": {"nodes": ["model.test.root"]},
"config": {},
},
},
"sources": {},
}
manifest = write_manifest(tmp_path, data)
fail_nodes = {"test.test.not_null_root_id"}
def _execute_wave(nodes, **kwargs):
node_ids = [n.unique_id for n in nodes]
has_failure = any(nid in fail_nodes for nid in node_ids)
if has_failure:
artifacts = {}
for n in nodes:
if n.unique_id in fail_nodes:
artifacts[n.unique_id] = {"status": "fail"}
else:
artifacts[n.unique_id] = {"status": "success"}
return ExecutionResult(
success=False,
node_ids=node_ids,
error=RuntimeError("test failed"),
artifacts=artifacts,
)
return ExecutionResult(
success=True,
node_ids=node_ids,
)
executor = MagicMock(spec=DbtExecutor)
executor.execute_wave = MagicMock(side_effect=_execute_wave)
orch = PrefectDbtOrchestrator(
settings=_make_mock_settings(),
manifest_path=manifest,
executor=executor,
test_strategy=TestStrategy.IMMEDIATE,
)
results = orch.run_build()
assert results["model.test.root"]["status"] == "success"
assert results["test.test.not_null_root_id"]["status"] == "error"
assert results["model.test.leaf"]["status"] == "skipped"
assert (
"test.test.not_null_root_id"
in results["model.test.leaf"]["failed_upstream"]
)
def test_all_selected_nodes_in_results(self, tmp_path):
"""IMMEDIATE + PER_WAVE: every selected node appears in results."""
data = {
"nodes": {
"model.test.root": {
"name": "root",
"resource_type": "model",
"depends_on": {"nodes": []},
"config": {"materialized": "table"},
},
"model.test.leaf": {
"name": "leaf",
"resource_type": "model",
"depends_on": {"nodes": ["model.test.root"]},
"config": {"materialized": "table"},
},
"test.test.not_null_root_id": {
"name": "not_null_root_id",
"resource_type": "test",
"depends_on": {"nodes": ["model.test.root"]},
"config": {},
},
},
"sources": {},
}
manifest = write_manifest(tmp_path, data)
fail_nodes = {"test.test.not_null_root_id"}
def _execute_wave(nodes, **kwargs):
node_ids = [n.unique_id for n in nodes]
has_failure = any(nid in fail_nodes for nid in node_ids)
if has_failure:
artifacts = {}
for n in nodes:
if n.unique_id in fail_nodes:
artifacts[n.unique_id] = {"status": "fail"}
else:
artifacts[n.unique_id] = {"status": "success"}
return ExecutionResult(
success=False,
node_ids=node_ids,
error=RuntimeError("test failed"),
artifacts=artifacts,
)
return ExecutionResult(success=True, node_ids=node_ids)
executor = MagicMock(spec=DbtExecutor)
executor.execute_wave = MagicMock(side_effect=_execute_wave)
orch = PrefectDbtOrchestrator(
settings=_make_mock_settings(),
manifest_path=manifest,
executor=executor,
test_strategy=TestStrategy.IMMEDIATE,
)
results = orch.run_build()
# Every node must be present β none silently dropped
assert "model.test.root" in results
assert "test.test.not_null_root_id" in results
assert "model.test.leaf" in results
# =============================================================================
# TestDeferredPerNode
# =============================================================================
class TestDeferredPerNode:
def test_tests_after_all_models(
self, per_node_orch, diamond_with_tests_manifest_data
):
"""DEFERRED + PER_NODE: all tests run after all models complete."""
orch, _ = per_node_orch(
diamond_with_tests_manifest_data,
test_strategy=TestStrategy.DEFERRED,
)
@flow
def test_flow():
return orch.run_build()
results = test_flow()
assert len(results) == 7
# All models must complete before any test starts
model_ids = [k for k in results if k.startswith("model.")]
test_ids = [k for k in results if k.startswith("test.")]
latest_model_completed = max(
results[m]["timing"]["completed_at"] for m in model_ids
)
earliest_test_started = min(
results[t]["timing"]["started_at"] for t in test_ids
)
assert latest_model_completed <= earliest_test_started
def test_all_tests_parallel_in_deferred_wave(
self, per_node_orch, diamond_with_tests_manifest_data
):
"""DEFERRED + PER_NODE: tests have no inter-dependencies, all in one wave."""
orch, _ = per_node_orch(
diamond_with_tests_manifest_data,
test_strategy=TestStrategy.DEFERRED,
)
@flow
def test_flow():
return orch.run_build()
results = test_flow()
test_ids = [k for k in results if k.startswith("test.")]
assert len(test_ids) == 3
for tid in test_ids:
assert results[tid]["status"] == "success"
# =============================================================================
# TestDeferredPerWave
# =============================================================================
class TestDeferredPerWave:
def test_test_wave_after_model_waves(
self, per_wave_orch, diamond_with_tests_manifest_data
):
"""DEFERRED + PER_WAVE: test wave comes after all model waves."""
orch, executor = per_wave_orch(
diamond_with_tests_manifest_data,
test_strategy=TestStrategy.DEFERRED,
)
results = orch.run_build()
# All 7 nodes should succeed
assert len(results) == 7
for r in results.values():
assert r["status"] == "success"
def test_tests_in_results(self, per_wave_orch):
"""DEFERRED + PER_WAVE: test nodes appear in results."""
orch, _ = per_wave_orch(
SINGLE_MODEL_WITH_TEST,
test_strategy=TestStrategy.DEFERRED,
)
results = orch.run_build()
assert "model.test.m1" in results
assert "test.test.not_null_m1_id" in results
# =============================================================================
# TestTestNodeCaching
# =============================================================================
class TestTestNodeCaching:
@patch.object(PrefectDbtOrchestrator, "_build_cache_options_for_node")
def test_tests_never_get_cache_policy(self, mock_cache_opts, per_node_orch):
"""Tests never get cache_policy even when caching is enabled.
We verify by patching _build_cache_options_for_node and asserting
it is only called for model nodes, never for test nodes.
"""
mock_cache_opts.return_value = {"persist_result": True}
orch, executor = per_node_orch(
SINGLE_MODEL_WITH_TEST,
test_strategy=TestStrategy.IMMEDIATE,
cache=CacheConfig(
result_storage="/tmp/test_results",
key_storage="/tmp/test_keys",
),
)
@flow
def test_flow():
return orch.run_build()
results = test_flow()
# Both nodes should succeed
assert results["model.test.m1"]["status"] == "success"
assert results["test.test.not_null_m1_id"]["status"] == "success"
# _build_cache_options_for_node should only be called for the model
assert mock_cache_opts.call_count == 1
called_node = mock_cache_opts.call_args[0][0]
assert called_node.resource_type == NodeType.Model
# =============================================================================
# TestTestsWithSelectors
# =============================================================================
class TestTestsWithSelectors:
@patch("prefect_dbt.core._orchestrator.resolve_selection")
def test_only_tests_with_all_parents_selected(
self, mock_resolve, per_wave_orch, diamond_with_tests_manifest_data
):
"""Only tests whose parents are ALL in the selected set are included.
Select root + left only. The test on root (not_null_root_id) should
be included, but test on leaf (not_null_leaf_id) and the
relationship test (rel_leaf_to_left) should be excluded because
leaf is not selected.
"""
mock_resolve.return_value = {
"model.test.root",
"model.test.left",
# Include the test IDs too since dbt ls --resource-type all returns them
"test.test.not_null_root_id",
"test.test.not_null_leaf_id",
"test.test.rel_leaf_to_left",
}
orch, _ = per_wave_orch(
diamond_with_tests_manifest_data,
test_strategy=TestStrategy.IMMEDIATE,
)
results = orch.run_build(select="root left")
# Models: root and left
assert "model.test.root" in results
assert "model.test.left" in results
assert "model.test.right" not in results
assert "model.test.leaf" not in results
# Tests: only not_null_root_id (parent root is selected)
assert "test.test.not_null_root_id" in results
# not_null_leaf_id excluded: parent leaf not in executable nodes
assert "test.test.not_null_leaf_id" not in results
# rel_leaf_to_left excluded: parent leaf not in executable nodes
assert "test.test.rel_leaf_to_left" not in results
@patch("prefect_dbt.core._orchestrator.resolve_selection")
def test_multi_model_test_included_when_all_parents_selected(
self, mock_resolve, per_wave_orch
):
"""Multi-model test is included when all its parents are in executable set."""
data = {
"nodes": {
"model.test.a": {
"name": "a",
"resource_type": "model",
"depends_on": {"nodes": []},
"config": {"materialized": "table"},
},
"model.test.b": {
"name": "b",
"resource_type": "model",
"depends_on": {"nodes": []},
"config": {"materialized": "table"},
},
"test.test.rel_a_b": {
"name": "rel_a_b",
"resource_type": "test",
"depends_on": {"nodes": ["model.test.a", "model.test.b"]},
"config": {},
},
},
"sources": {},
}
mock_resolve.return_value = {
"model.test.a",
"model.test.b",
"test.test.rel_a_b",
}
orch, _ = per_wave_orch(data, test_strategy=TestStrategy.IMMEDIATE)
results = orch.run_build(select="a b")
assert "test.test.rel_a_b" in results
@patch("prefect_dbt.core._orchestrator.resolve_selection")
def test_multi_model_test_excluded_when_one_parent_missing(
self, mock_resolve, per_wave_orch
):
"""Multi-model test excluded when one parent not selected."""
data = {
"nodes": {
"model.test.a": {
"name": "a",
"resource_type": "model",
"depends_on": {"nodes": []},
"config": {"materialized": "table"},
},
"model.test.b": {
"name": "b",
"resource_type": "model",
"depends_on": {"nodes": []},
"config": {"materialized": "table"},
},
"test.test.rel_a_b": {
"name": "rel_a_b",
"resource_type": "test",
"depends_on": {"nodes": ["model.test.a", "model.test.b"]},
"config": {},
},
},
"sources": {},
}
# Only select model a, not b
mock_resolve.return_value = {
"model.test.a",
"test.test.rel_a_b",
}
orch, _ = per_wave_orch(data, test_strategy=TestStrategy.IMMEDIATE)
results = orch.run_build(select="a")
assert "model.test.a" in results
assert "model.test.b" not in results
# Test excluded because model.test.b not in executable nodes
assert "test.test.rel_a_b" not in results
# =============================================================================
# TestIndirectSelectionSuppressed
# =============================================================================
class TestIndirectSelectionSuppressed:
"""Verify that PER_WAVE suppresses dbt's indirect test selection
when the orchestrator is managing test scheduling."""
def test_immediate_passes_indirect_selection_empty(self, per_wave_orch):
"""IMMEDIATE + PER_WAVE: execute_wave receives indirect_selection='empty'."""
orch, executor = per_wave_orch(
SINGLE_MODEL_WITH_TEST,
test_strategy=TestStrategy.IMMEDIATE,
)
orch.run_build()
for call_args in executor.execute_wave.call_args_list:
assert call_args.kwargs.get("indirect_selection") == "empty" or (
len(call_args.args) > 2 and call_args.args[2] == "empty"
)
def test_deferred_passes_indirect_selection_empty(
self, per_wave_orch, diamond_with_tests_manifest_data
):
"""DEFERRED + PER_WAVE: all waves receive indirect_selection='empty'."""
orch, executor = per_wave_orch(
diamond_with_tests_manifest_data,
test_strategy=TestStrategy.DEFERRED,
)
orch.run_build()
for call_obj in executor.execute_wave.call_args_list:
assert call_obj.kwargs.get("indirect_selection") == "empty"
def test_skip_also_passes_indirect_selection_empty(self, per_wave_orch):
"""SKIP + PER_WAVE: indirect_selection='empty' suppresses implicit tests."""
orch, executor = per_wave_orch(
SINGLE_MODEL_WITH_TEST,
test_strategy=TestStrategy.SKIP,
)
orch.run_build()
for call_obj in executor.execute_wave.call_args_list:
assert call_obj.kwargs.get("indirect_selection") == "empty"
def test_default_also_passes_indirect_selection_empty(self, per_wave_orch):
"""Default strategy (IMMEDIATE): indirect_selection='empty' suppresses implicit tests."""
orch, executor = per_wave_orch(SINGLE_MODEL_WITH_TEST)
orch.run_build()
for call_obj in executor.execute_wave.call_args_list:
assert call_obj.kwargs.get("indirect_selection") == "empty"
# =============================================================================
# TestManifestParserTestNodes
# =============================================================================
class TestManifestParserTestNodes:
"""Unit tests for get_test_nodes() and filter_test_nodes()."""
def test_get_test_nodes_returns_only_tests(self, tmp_path):
data = SINGLE_MODEL_WITH_TEST
manifest = write_manifest(tmp_path, data)
parser = ManifestParser(manifest)
test_nodes = parser.get_test_nodes()
assert set(test_nodes.keys()) == {"test.test.not_null_m1_id"}
assert test_nodes["test.test.not_null_m1_id"].resource_type == NodeType.Test
def test_get_test_nodes_resolves_through_ephemeral(self, tmp_path):
"""Test depending on ephemeral -> model resolves to model."""
data = {
"nodes": {
"model.test.base": {
"name": "base",
"resource_type": "model",
"depends_on": {"nodes": []},
"config": {"materialized": "table"},
},
"model.test.eph": {
"name": "eph",
"resource_type": "model",
"depends_on": {"nodes": ["model.test.base"]},
"config": {"materialized": "ephemeral"},
},
"test.test.not_null_eph_id": {
"name": "not_null_eph_id",
"resource_type": "test",
"depends_on": {"nodes": ["model.test.eph"]},
"config": {},
},
},
"sources": {},
}
manifest = write_manifest(tmp_path, data)
parser = ManifestParser(manifest)
test_nodes = parser.get_test_nodes()
# Test should resolve through ephemeral to base
assert test_nodes["test.test.not_null_eph_id"].depends_on == (
"model.test.base",
)
def test_get_test_nodes_caches_result(self, tmp_path):
data = SINGLE_MODEL_WITH_TEST
manifest = write_manifest(tmp_path, data)
parser = ManifestParser(manifest)
result1 = parser.get_test_nodes()
result2 = parser.get_test_nodes()
assert result1 is result2
def test_filter_test_nodes_by_selected_ids(self, tmp_path):
data = {
"nodes": {
"model.test.m1": {
"name": "m1",
"resource_type": "model",
"depends_on": {"nodes": []},
"config": {"materialized": "table"},
},
"test.test.t1": {
"name": "t1",
"resource_type": "test",
"depends_on": {"nodes": ["model.test.m1"]},
"config": {},
},
"test.test.t2": {
"name": "t2",
"resource_type": "test",
"depends_on": {"nodes": ["model.test.m1"]},
"config": {},
},
},
"sources": {},
}
manifest = write_manifest(tmp_path, data)
parser = ManifestParser(manifest)
filtered = parser.filter_test_nodes(
selected_node_ids={"test.test.t1"},
executable_node_ids={"model.test.m1"},
)
assert set(filtered.keys()) == {"test.test.t1"}
def test_filter_test_nodes_by_executable_ids(self, tmp_path):
data = {
"nodes": {
"model.test.a": {
"name": "a",
"resource_type": "model",
"depends_on": {"nodes": []},
"config": {"materialized": "table"},
},
"model.test.b": {
"name": "b",
"resource_type": "model",
"depends_on": {"nodes": []},
"config": {"materialized": "table"},
},
"test.test.rel_a_b": {
"name": "rel_a_b",
"resource_type": "test",
"depends_on": {"nodes": ["model.test.a", "model.test.b"]},
"config": {},
},
},
"sources": {},
}
manifest = write_manifest(tmp_path, data)
parser = ManifestParser(manifest)
# Only model.test.a is executable (b was filtered out)
filtered = parser.filter_test_nodes(
executable_node_ids={"model.test.a"},
)
# rel_a_b excluded because model.test.b not in executable set
assert filtered == {}
def test_filter_test_nodes_none_keeps_all(self, tmp_path):
data = SINGLE_MODEL_WITH_TEST
manifest = write_manifest(tmp_path, data)
parser = ManifestParser(manifest)
filtered = parser.filter_test_nodes(
selected_node_ids=None,
executable_node_ids={"model.test.m1"},
)
assert "test.test.not_null_m1_id" in filtered
def test_empty_manifest_returns_no_tests(self, tmp_path):
manifest = write_manifest(tmp_path, EMPTY_MANIFEST)
parser = ManifestParser(manifest)
assert parser.get_test_nodes() == {}
def test_get_test_nodes_includes_unit_tests(self, tmp_path):
"""get_test_nodes() returns NodeType.Unit nodes alongside NodeType.Test."""
manifest = write_manifest(tmp_path, MIXED_TEST_TYPES)
parser = ManifestParser(manifest)
test_nodes = parser.get_test_nodes()
assert "test.test.not_null_m1_id" in test_nodes
assert "unit_test.test.ut_m1" in test_nodes
assert test_nodes["test.test.not_null_m1_id"].resource_type == NodeType.Test
assert test_nodes["unit_test.test.ut_m1"].resource_type == NodeType.Unit
def test_get_test_nodes_unit_test_only(self, tmp_path):
"""get_test_nodes() works when manifest has only unit_test nodes."""
manifest = write_manifest(tmp_path, SINGLE_MODEL_WITH_UNIT_TEST)
parser = ManifestParser(manifest)
test_nodes = parser.get_test_nodes()
assert set(test_nodes.keys()) == {"unit_test.test.ut_m1"}
assert test_nodes["unit_test.test.ut_m1"].resource_type == NodeType.Unit
def test_filter_test_nodes_includes_unit_tests(self, tmp_path):
"""filter_test_nodes() keeps unit_test nodes when parents are executable."""
manifest = write_manifest(tmp_path, MIXED_TEST_TYPES)
parser = ManifestParser(manifest)
filtered = parser.filter_test_nodes(
executable_node_ids={"model.test.m1"},
)
assert "test.test.not_null_m1_id" in filtered
assert "unit_test.test.ut_m1" in filtered
# =============================================================================
# TestUnitTestNodes
# =============================================================================
class TestUnitTestNodes:
"""Tests for dbt unit_test (NodeType.Unit) support."""
def test_immediate_per_node_includes_unit_tests(self, per_node_orch):
"""IMMEDIATE + PER_NODE: unit_test nodes appear in results."""
orch, _ = per_node_orch(
SINGLE_MODEL_WITH_UNIT_TEST,
test_strategy=TestStrategy.IMMEDIATE,
)
@flow
def test_flow():
return orch.run_build()
results = test_flow()
assert "model.test.m1" in results
assert "unit_test.test.ut_m1" in results
assert results["unit_test.test.ut_m1"]["status"] == "success"
def test_immediate_per_wave_includes_unit_tests(self, per_wave_orch):
"""IMMEDIATE + PER_WAVE: unit_test nodes appear in results."""
orch, _ = per_wave_orch(
SINGLE_MODEL_WITH_UNIT_TEST,
test_strategy=TestStrategy.IMMEDIATE,
)
results = orch.run_build()
assert "unit_test.test.ut_m1" in results
assert results["unit_test.test.ut_m1"]["status"] == "success"
def test_deferred_per_node_includes_unit_tests(self, per_node_orch):
"""DEFERRED + PER_NODE: unit_test nodes run after all models."""
orch, _ = per_node_orch(
SINGLE_MODEL_WITH_UNIT_TEST,
test_strategy=TestStrategy.DEFERRED,
)
@flow
def test_flow():
return orch.run_build()
results = test_flow()
assert "unit_test.test.ut_m1" in results
assert results["unit_test.test.ut_m1"]["status"] == "success"
# Unit test must start after model completes
model_completed = results["model.test.m1"]["timing"]["completed_at"]
ut_started = results["unit_test.test.ut_m1"]["timing"]["started_at"]
assert model_completed <= ut_started
def test_skip_excludes_unit_tests(self, per_wave_orch):
"""SKIP: unit_test nodes do not appear in results."""
orch, _ = per_wave_orch(
SINGLE_MODEL_WITH_UNIT_TEST,
test_strategy=TestStrategy.SKIP,
)
results = orch.run_build()
assert "model.test.m1" in results
assert "unit_test.test.ut_m1" not in results
def test_unit_test_uses_test_command(self, per_node_orch):
"""PER_NODE: unit_test nodes use the 'test' command."""
orch, _ = per_node_orch(
SINGLE_MODEL_WITH_UNIT_TEST,
test_strategy=TestStrategy.IMMEDIATE,
)
@flow
def test_flow():
return orch.run_build()
results = test_flow()
assert results["unit_test.test.ut_m1"]["invocation"]["command"] == "test"
def test_mixed_test_types_all_included(self, per_node_orch):
"""IMMEDIATE: both NodeType.Test and NodeType.Unit appear in results."""
orch, _ = per_node_orch(
MIXED_TEST_TYPES,
test_strategy=TestStrategy.IMMEDIATE,
)
@flow
def test_flow():
return orch.run_build()
results = test_flow()
assert len(results) == 3
assert results["model.test.m1"]["status"] == "success"
assert results["test.test.not_null_m1_id"]["status"] == "success"
assert results["unit_test.test.ut_m1"]["status"] == "success"
@patch.object(PrefectDbtOrchestrator, "_build_cache_options_for_node")
def test_unit_tests_never_cached(self, mock_cache_opts, per_node_orch):
"""Unit tests are excluded from caching like schema tests."""
mock_cache_opts.return_value = {"persist_result": True}
orch, _ = per_node_orch(
SINGLE_MODEL_WITH_UNIT_TEST,
test_strategy=TestStrategy.IMMEDIATE,
cache=CacheConfig(
result_storage="/tmp/test_results",
key_storage="/tmp/test_keys",
),
)
@flow
def test_flow():
return orch.run_build()
test_flow()
# Only the model should trigger caching, not the unit test
assert mock_cache_opts.call_count == 1
called_node = mock_cache_opts.call_args[0][0]
assert called_node.resource_type == NodeType.Model
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/integrations/prefect-dbt/tests/core/test_orchestrator_test_strategies.py",
"license": "Apache License 2.0",
"lines": 1140,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:src/integrations/prefect-dbt/prefect_dbt/core/_freshness.py | """
Source freshness logic for per-node dbt orchestration.
This module provides:
- SourceFreshnessResult: Parsed result from dbt source freshness
- run_source_freshness: Run `dbt source freshness` and parse results
- get_source_ancestors: Walk the DAG to find source ancestors of a node
- compute_freshness_expiration: Compute cache expiration from upstream freshness
- filter_stale_nodes: Remove nodes with stale upstream sources
"""
import json
from collections import deque
from dataclasses import dataclass
from datetime import datetime, timedelta, timezone
from pathlib import Path
from typing import Any
from dbt.artifacts.resources.types import NodeType
from dbt.cli.main import dbtRunner
from prefect.logging import get_logger
from prefect_dbt.core._manifest import DbtNode
logger = get_logger(__name__)
@dataclass(frozen=True)
class SourceFreshnessResult:
"""Parsed result for a single source from dbt's sources.json.
Attributes:
unique_id: Source unique_id (e.g. "source.project.raw.customers")
status: Freshness status ("pass", "warn", "error", "runtime error")
max_loaded_at: When the source was last loaded
snapshotted_at: When the freshness check ran
max_loaded_at_time_ago_in_s: Seconds since last load
warn_after: Threshold for warning status
error_after: Threshold for error status
"""
unique_id: str
status: str
max_loaded_at: datetime | None = None
snapshotted_at: datetime | None = None
max_loaded_at_time_ago_in_s: float | None = None
warn_after: timedelta | None = None
error_after: timedelta | None = None
def _period_to_timedelta(count: int, period: str) -> timedelta:
"""Convert a dbt freshness period specification to a timedelta.
Args:
count: Number of periods
period: Period type ("minute", "hour", "day")
Returns:
Corresponding timedelta
Raises:
ValueError: If the period type is unrecognized
"""
if period == "minute":
return timedelta(minutes=count)
elif period == "hour":
return timedelta(hours=count)
elif period == "day":
return timedelta(days=count)
raise ValueError(f"Unrecognized freshness period: {period!r}")
def _parse_threshold(threshold_data: dict[str, Any] | None) -> timedelta | None:
"""Parse a dbt freshness threshold dict into a timedelta."""
if not threshold_data:
return None
count = threshold_data.get("count")
period = threshold_data.get("period")
if count is None or period is None:
return None
return _period_to_timedelta(count, period)
def parse_source_freshness_results(
sources_json_path: Path,
) -> dict[str, SourceFreshnessResult]:
"""Parse dbt's sources.json into SourceFreshnessResult objects.
Args:
sources_json_path: Path to the sources.json file
Returns:
Dict mapping source unique_id to SourceFreshnessResult
"""
with open(sources_json_path) as f:
data = json.load(f)
results: dict[str, SourceFreshnessResult] = {}
for entry in data.get("results", []):
unique_id = entry.get("unique_id", "")
status = entry.get("status", "")
# Parse timestamps
max_loaded_at = None
snapshotted_at = None
max_loaded_at_time_ago_in_s = entry.get("max_loaded_at_time_ago_in_s")
max_loaded_at_str = entry.get("max_loaded_at")
if max_loaded_at_str:
try:
max_loaded_at = datetime.fromisoformat(
max_loaded_at_str.replace("Z", "+00:00")
)
except (ValueError, AttributeError):
logger.debug(
"Could not parse max_loaded_at timestamp %r for source %s; "
"freshness-based cache expiration will not apply to "
"downstream nodes of this source",
max_loaded_at_str,
unique_id,
)
snapshotted_at_str = entry.get("snapshotted_at")
if snapshotted_at_str:
try:
snapshotted_at = datetime.fromisoformat(
snapshotted_at_str.replace("Z", "+00:00")
)
except (ValueError, AttributeError):
logger.debug(
"Could not parse snapshotted_at timestamp %r for source %s; "
"elapsed-time adjustment will be skipped for this source",
snapshotted_at_str,
unique_id,
)
# Parse thresholds from criteria
criteria = entry.get("criteria", {})
warn_after = _parse_threshold(criteria.get("warn_after"))
error_after = _parse_threshold(criteria.get("error_after"))
results[unique_id] = SourceFreshnessResult(
unique_id=unique_id,
status=status,
max_loaded_at=max_loaded_at,
snapshotted_at=snapshotted_at,
max_loaded_at_time_ago_in_s=max_loaded_at_time_ago_in_s,
warn_after=warn_after,
error_after=error_after,
)
return results
def run_source_freshness(
settings: Any,
target_path: Path | None = None,
target: str | None = None,
) -> dict[str, SourceFreshnessResult]:
"""Run `dbt source freshness` and parse the results.
Returns an empty dict on failure (graceful degradation).
Args:
settings: PrefectDbtSettings instance
target_path: Optional override for the target directory
target: dbt target name (`--target` / `-t`)
Returns:
Dict mapping source unique_id to SourceFreshnessResult
"""
output_target = target_path or settings.target_path
output_path = settings.project_dir / output_target / "sources.json"
# Remove any pre-existing sources.json so a failed run cannot
# silently reuse stale data from a previous invocation.
if output_path.exists():
output_path.unlink()
with settings.resolve_profiles_yml() as resolved_profiles_dir:
args = [
"source",
"freshness",
"--project-dir",
str(settings.project_dir),
"--profiles-dir",
resolved_profiles_dir,
]
args.extend(["--target-path", str(output_target)])
if target is not None:
args.extend(["--target", target])
try:
dbtRunner().invoke(args)
except Exception:
logger.warning(
"dbt source freshness command raised an exception",
exc_info=True,
)
return {}
# dbt source freshness returns success=False when sources are stale
# but still writes sources.json. Only treat it as failure if the
# output file was not written.
if not output_path.exists():
logger.warning(
"dbt source freshness did not produce %s; "
"freshness features will be disabled for this run",
output_path,
)
return {}
try:
return parse_source_freshness_results(output_path)
except Exception:
logger.warning(
"Failed to parse source freshness results from %s",
output_path,
exc_info=True,
)
return {}
def get_source_ancestors(
node_id: str,
all_nodes: dict[str, DbtNode],
) -> set[str]:
"""Walk the dependency graph to find all Source ancestors of a node.
Traces through ephemeral models and all intermediate nodes to find
every source that feeds into the given node.
Args:
node_id: The unique_id of the node to find ancestors for
all_nodes: All parsed nodes including sources and ephemeral models
Returns:
Set of unique_ids for source ancestors
"""
sources: set[str] = set()
visited: set[str] = set()
def _walk(current_id: str) -> None:
if current_id in visited:
return
visited.add(current_id)
node = all_nodes.get(current_id)
if node is None:
return
if node.resource_type == NodeType.Source:
sources.add(current_id)
return
for dep_id in node.depends_on:
_walk(dep_id)
node = all_nodes.get(node_id)
if node is None:
return sources
for dep_id in node.depends_on:
_walk(dep_id)
return sources
def compute_freshness_expiration(
node_id: str,
all_nodes: dict[str, DbtNode],
freshness_results: dict[str, SourceFreshnessResult],
) -> timedelta | None:
"""Compute cache expiration for a node based on upstream source freshness.
For each upstream source with freshness data, computes the remaining
time before the freshness threshold is reached. Prefers `warn_after`
but falls back to `error_after`. The elapsed time since the freshness
check (`snapshotted_at`) is subtracted so that later-wave nodes get
accurate TTLs. Returns the minimum across all sources so the cache
expires when the first source goes stale.
Args:
node_id: The node to compute expiration for
all_nodes: All parsed nodes including sources
freshness_results: Parsed source freshness results
Returns:
Minimum remaining time before any upstream source goes stale,
or None if no freshness data is available
"""
source_ids = get_source_ancestors(node_id, all_nodes)
if not source_ids:
return None
remaining_times: list[timedelta] = []
now = datetime.now(timezone.utc)
for source_id in source_ids:
fr = freshness_results.get(source_id)
if fr is None:
continue
threshold = fr.warn_after if fr.warn_after is not None else fr.error_after
if threshold is None or fr.max_loaded_at_time_ago_in_s is None:
continue
time_ago = timedelta(seconds=fr.max_loaded_at_time_ago_in_s)
# Account for time elapsed since the freshness check ran so
# later-wave nodes don't receive inflated TTLs.
if fr.snapshotted_at is not None:
time_ago += now - fr.snapshotted_at
remaining = threshold - time_ago
# Clamp to zero β if already past threshold, expire immediately
if remaining < timedelta(0):
remaining = timedelta(0)
remaining_times.append(remaining)
if not remaining_times:
return None
return min(remaining_times)
def filter_stale_nodes(
nodes: dict[str, DbtNode],
all_nodes: dict[str, DbtNode],
freshness_results: dict[str, SourceFreshnessResult],
) -> tuple[dict[str, DbtNode], dict[str, Any]]:
"""Remove nodes whose upstream sources are stale.
A source is considered stale if its freshness status is "error" or
"runtime error". Nodes that depend (directly or transitively) on a
stale source are removed, and so are their downstream dependents.
Args:
nodes: Executable nodes to filter (from ManifestParser)
all_nodes: All parsed nodes including sources and ephemeral models
freshness_results: Parsed source freshness results
Returns:
Tuple of (remaining_nodes, skipped_results) where skipped_results
maps node unique_id to a result dict with status="skipped" and
a reason indicating the stale source.
"""
stale_statuses = {"error", "runtime error"}
# Find all stale source IDs
stale_sources: set[str] = set()
for source_id, fr in freshness_results.items():
if fr.status in stale_statuses:
stale_sources.add(source_id)
if not stale_sources:
return nodes, {}
# Find nodes with stale upstream sources
directly_stale: set[str] = set()
stale_reason: dict[str, str] = {}
for node_id in nodes:
ancestors = get_source_ancestors(node_id, all_nodes)
stale_ancestors = ancestors & stale_sources
if stale_ancestors:
directly_stale.add(node_id)
source_names = sorted(stale_ancestors)
stale_reason[node_id] = f"stale source: {', '.join(source_names)}"
# Cascade: remove downstream dependents of stale nodes
all_stale: set[str] = set(directly_stale)
cascade_reason: dict[str, str] = {}
# Build a dependents map for the filtered nodes
dependents: dict[str, list[str]] = {nid: [] for nid in nodes}
for nid, node in nodes.items():
for dep_id in node.depends_on:
if dep_id in dependents:
dependents[dep_id].append(nid)
# BFS to find all downstream nodes
queue = deque(directly_stale)
while queue:
current = queue.popleft()
for downstream_id in dependents.get(current, []):
if downstream_id not in all_stale:
all_stale.add(downstream_id)
cascade_reason[downstream_id] = "upstream skipped due to stale source"
queue.append(downstream_id)
# Build results
remaining = {nid: node for nid, node in nodes.items() if nid not in all_stale}
skipped: dict[str, Any] = {}
for node_id in all_stale:
reason = stale_reason.get(node_id) or cascade_reason.get(
node_id, "stale source"
)
skipped[node_id] = {
"status": "skipped",
"reason": reason,
}
return remaining, skipped
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/integrations/prefect-dbt/prefect_dbt/core/_freshness.py",
"license": "Apache License 2.0",
"lines": 328,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/prefect:src/integrations/prefect-dbt/tests/core/test_freshness.py | """Unit tests for the source freshness module."""
from datetime import datetime, timedelta, timezone
from pathlib import Path
from unittest.mock import MagicMock, patch
import pytest
from conftest import (
_make_node,
_make_source_node,
write_manifest,
write_sources_json,
)
from dbt.artifacts.resources.types import NodeType
from prefect_dbt.core._freshness import (
SourceFreshnessResult,
_period_to_timedelta,
compute_freshness_expiration,
filter_stale_nodes,
get_source_ancestors,
parse_source_freshness_results,
run_source_freshness,
)
from prefect_dbt.core._manifest import DbtNode
class TestPeriodToTimedelta:
def test_minute(self):
assert _period_to_timedelta(5, "minute") == timedelta(minutes=5)
def test_hour(self):
assert _period_to_timedelta(12, "hour") == timedelta(hours=12)
def test_day(self):
assert _period_to_timedelta(3, "day") == timedelta(days=3)
def test_unknown_period_raises(self):
with pytest.raises(ValueError, match="Unrecognized freshness period"):
_period_to_timedelta(1, "week")
def test_zero_count(self):
assert _period_to_timedelta(0, "hour") == timedelta(0)
class TestParseSourceFreshnessResults:
def test_basic_pass(self, tmp_path):
path = write_sources_json(
tmp_path,
[
{
"unique_id": "source.proj.raw.users",
"status": "pass",
"max_loaded_at": "2024-01-15T10:00:00Z",
"snapshotted_at": "2024-01-15T12:00:00Z",
"max_loaded_at_time_ago_in_s": 7200.0,
"criteria": {
"warn_after": {"count": 12, "period": "hour"},
"error_after": {"count": 24, "period": "hour"},
},
}
],
)
results = parse_source_freshness_results(path)
assert len(results) == 1
r = results["source.proj.raw.users"]
assert r.status == "pass"
assert r.max_loaded_at_time_ago_in_s == 7200.0
assert r.warn_after == timedelta(hours=12)
assert r.error_after == timedelta(hours=24)
assert r.max_loaded_at is not None
assert r.snapshotted_at is not None
def test_error_status(self, tmp_path):
path = write_sources_json(
tmp_path,
[
{
"unique_id": "source.proj.raw.events",
"status": "error",
"max_loaded_at": "2024-01-01T00:00:00Z",
"snapshotted_at": "2024-01-15T12:00:00Z",
"max_loaded_at_time_ago_in_s": 1252800.0,
"criteria": {
"warn_after": {"count": 1, "period": "day"},
"error_after": {"count": 2, "period": "day"},
},
}
],
)
results = parse_source_freshness_results(path)
assert results["source.proj.raw.events"].status == "error"
def test_multiple_sources(self, tmp_path):
path = write_sources_json(
tmp_path,
[
{
"unique_id": "source.proj.raw.a",
"status": "pass",
"max_loaded_at_time_ago_in_s": 100.0,
"criteria": {},
},
{
"unique_id": "source.proj.raw.b",
"status": "warn",
"max_loaded_at_time_ago_in_s": 200.0,
"criteria": {},
},
],
)
results = parse_source_freshness_results(path)
assert len(results) == 2
assert "source.proj.raw.a" in results
assert "source.proj.raw.b" in results
def test_no_criteria(self, tmp_path):
path = write_sources_json(
tmp_path,
[
{
"unique_id": "source.proj.raw.x",
"status": "pass",
"max_loaded_at_time_ago_in_s": 50.0,
"criteria": {},
}
],
)
results = parse_source_freshness_results(path)
r = results["source.proj.raw.x"]
assert r.warn_after is None
assert r.error_after is None
def test_empty_results(self, tmp_path):
path = write_sources_json(tmp_path, [])
results = parse_source_freshness_results(path)
assert results == {}
def test_runtime_error_status(self, tmp_path):
path = write_sources_json(
tmp_path,
[
{
"unique_id": "source.proj.raw.broken",
"status": "runtime error",
"criteria": {},
}
],
)
results = parse_source_freshness_results(path)
assert results["source.proj.raw.broken"].status == "runtime error"
def test_missing_timestamps_handled(self, tmp_path):
path = write_sources_json(
tmp_path,
[
{
"unique_id": "source.proj.raw.no_ts",
"status": "pass",
"criteria": {},
}
],
)
results = parse_source_freshness_results(path)
r = results["source.proj.raw.no_ts"]
assert r.max_loaded_at is None
assert r.snapshotted_at is None
assert r.max_loaded_at_time_ago_in_s is None
class TestGetSourceAncestors:
def _build_all_nodes(self) -> dict[str, DbtNode]:
"""Build a simple graph: source -> staging -> mart."""
return {
"source.test.raw.customers": _make_source_node(
unique_id="source.test.raw.customers",
name="customers",
),
"source.test.raw.orders": _make_source_node(
unique_id="source.test.raw.orders",
name="orders",
),
"model.test.stg_customers": _make_node(
unique_id="model.test.stg_customers",
name="stg_customers",
depends_on=("source.test.raw.customers",),
materialization="view",
),
"model.test.stg_orders": _make_node(
unique_id="model.test.stg_orders",
name="stg_orders",
depends_on=("source.test.raw.orders",),
materialization="view",
),
"model.test.mart": _make_node(
unique_id="model.test.mart",
name="mart",
depends_on=(
"model.test.stg_customers",
"model.test.stg_orders",
),
materialization="table",
),
}
def test_direct_source_dependency(self):
all_nodes = self._build_all_nodes()
ancestors = get_source_ancestors("model.test.stg_customers", all_nodes)
assert ancestors == {"source.test.raw.customers"}
def test_transitive_source_through_models(self):
all_nodes = self._build_all_nodes()
ancestors = get_source_ancestors("model.test.mart", all_nodes)
assert ancestors == {
"source.test.raw.customers",
"source.test.raw.orders",
}
def test_no_sources(self):
all_nodes = {
"model.test.a": _make_node(
unique_id="model.test.a", name="a", depends_on=()
),
"model.test.b": _make_node(
unique_id="model.test.b",
name="b",
depends_on=("model.test.a",),
),
}
ancestors = get_source_ancestors("model.test.b", all_nodes)
assert ancestors == set()
def test_ephemeral_chain(self):
"""Source -> ephemeral -> model should trace through ephemeral."""
all_nodes = {
"source.test.raw.data": _make_source_node(
unique_id="source.test.raw.data",
name="data",
),
"model.test.ephemeral_step": DbtNode(
unique_id="model.test.ephemeral_step",
name="ephemeral_step",
resource_type=NodeType.Model,
depends_on=("source.test.raw.data",),
materialization="ephemeral",
),
"model.test.final": _make_node(
unique_id="model.test.final",
name="final",
depends_on=("model.test.ephemeral_step",),
materialization="table",
),
}
ancestors = get_source_ancestors("model.test.final", all_nodes)
assert ancestors == {"source.test.raw.data"}
def test_diamond_graph(self):
"""Diamond: source -> A, B -> C. C should find the single source."""
all_nodes = {
"source.test.raw.s": _make_source_node(
unique_id="source.test.raw.s", name="s"
),
"model.test.a": _make_node(
unique_id="model.test.a",
name="a",
depends_on=("source.test.raw.s",),
),
"model.test.b": _make_node(
unique_id="model.test.b",
name="b",
depends_on=("source.test.raw.s",),
),
"model.test.c": _make_node(
unique_id="model.test.c",
name="c",
depends_on=("model.test.a", "model.test.b"),
),
}
ancestors = get_source_ancestors("model.test.c", all_nodes)
assert ancestors == {"source.test.raw.s"}
def test_unknown_node(self):
ancestors = get_source_ancestors("model.test.nonexistent", {})
assert ancestors == set()
class TestComputeFreshnessExpiration:
def _build_graph_and_results(
self, time_ago_seconds: float, warn_hours: int = 12
) -> tuple[dict[str, DbtNode], dict[str, SourceFreshnessResult]]:
all_nodes = {
"source.test.raw.s": _make_source_node(
unique_id="source.test.raw.s", name="s"
),
"model.test.m": _make_node(
unique_id="model.test.m",
name="m",
depends_on=("source.test.raw.s",),
),
}
freshness_results = {
"source.test.raw.s": SourceFreshnessResult(
unique_id="source.test.raw.s",
status="pass",
max_loaded_at_time_ago_in_s=time_ago_seconds,
warn_after=timedelta(hours=warn_hours),
),
}
return all_nodes, freshness_results
def test_within_threshold(self):
# 2 hours ago, warn at 12 hours -> 10 hours remaining
all_nodes, results = self._build_graph_and_results(7200.0, 12)
exp = compute_freshness_expiration("model.test.m", all_nodes, results)
assert exp == timedelta(hours=12) - timedelta(seconds=7200)
def test_past_threshold_clamps_to_zero(self):
# 24 hours ago, warn at 12 hours -> 0 remaining
all_nodes, results = self._build_graph_and_results(86400.0, 12)
exp = compute_freshness_expiration("model.test.m", all_nodes, results)
assert exp == timedelta(0)
def test_multiple_sources_takes_min(self):
all_nodes = {
"source.test.raw.a": _make_source_node(
unique_id="source.test.raw.a", name="a"
),
"source.test.raw.b": _make_source_node(
unique_id="source.test.raw.b", name="b"
),
"model.test.m": _make_node(
unique_id="model.test.m",
name="m",
depends_on=("source.test.raw.a", "source.test.raw.b"),
),
}
freshness_results = {
"source.test.raw.a": SourceFreshnessResult(
unique_id="source.test.raw.a",
status="pass",
max_loaded_at_time_ago_in_s=3600.0, # 1 hour ago
warn_after=timedelta(hours=6), # 5 hours remaining
),
"source.test.raw.b": SourceFreshnessResult(
unique_id="source.test.raw.b",
status="pass",
max_loaded_at_time_ago_in_s=3600.0, # 1 hour ago
warn_after=timedelta(hours=3), # 2 hours remaining
),
}
exp = compute_freshness_expiration("model.test.m", all_nodes, freshness_results)
# Should be min(5h, 2h) = 2h
assert exp == timedelta(hours=3) - timedelta(seconds=3600)
def test_no_freshness_data_returns_none(self):
all_nodes = {
"source.test.raw.s": _make_source_node(
unique_id="source.test.raw.s", name="s"
),
"model.test.m": _make_node(
unique_id="model.test.m",
name="m",
depends_on=("source.test.raw.s",),
),
}
exp = compute_freshness_expiration("model.test.m", all_nodes, {})
assert exp is None
def test_no_source_ancestors_returns_none(self):
all_nodes = {
"model.test.a": _make_node(
unique_id="model.test.a", name="a", depends_on=()
),
}
exp = compute_freshness_expiration("model.test.a", all_nodes, {})
assert exp is None
def test_source_without_any_threshold_skipped(self):
all_nodes = {
"source.test.raw.s": _make_source_node(
unique_id="source.test.raw.s", name="s"
),
"model.test.m": _make_node(
unique_id="model.test.m",
name="m",
depends_on=("source.test.raw.s",),
),
}
freshness_results = {
"source.test.raw.s": SourceFreshnessResult(
unique_id="source.test.raw.s",
status="pass",
max_loaded_at_time_ago_in_s=100.0,
warn_after=None,
error_after=None,
),
}
exp = compute_freshness_expiration("model.test.m", all_nodes, freshness_results)
assert exp is None
def test_falls_back_to_error_after_when_no_warn_after(self):
"""Sources with only error_after still drive cache expiration."""
all_nodes = {
"source.test.raw.s": _make_source_node(
unique_id="source.test.raw.s", name="s"
),
"model.test.m": _make_node(
unique_id="model.test.m",
name="m",
depends_on=("source.test.raw.s",),
),
}
freshness_results = {
"source.test.raw.s": SourceFreshnessResult(
unique_id="source.test.raw.s",
status="pass",
max_loaded_at_time_ago_in_s=3600.0, # 1 hour ago
warn_after=None,
error_after=timedelta(hours=6),
),
}
exp = compute_freshness_expiration("model.test.m", all_nodes, freshness_results)
assert exp == timedelta(hours=6) - timedelta(seconds=3600)
def test_zero_warn_after_uses_warn_not_error(self):
"""warn_after=timedelta(0) is valid and must not fall back to error_after."""
all_nodes = {
"source.test.raw.s": _make_source_node(
unique_id="source.test.raw.s", name="s"
),
"model.test.m": _make_node(
unique_id="model.test.m",
name="m",
depends_on=("source.test.raw.s",),
),
}
freshness_results = {
"source.test.raw.s": SourceFreshnessResult(
unique_id="source.test.raw.s",
status="pass",
max_loaded_at_time_ago_in_s=60.0,
warn_after=timedelta(0),
error_after=timedelta(hours=24),
),
}
exp = compute_freshness_expiration("model.test.m", all_nodes, freshness_results)
# Should use warn_after=0, not error_after=24h; clamped to zero
assert exp == timedelta(0)
def test_subtracts_elapsed_time_since_snapshotted_at(self):
"""Elapsed time since the freshness check is deducted from remaining TTL."""
all_nodes = {
"source.test.raw.s": _make_source_node(
unique_id="source.test.raw.s", name="s"
),
"model.test.m": _make_node(
unique_id="model.test.m",
name="m",
depends_on=("source.test.raw.s",),
),
}
# Source was 1 hour old when checked 30 minutes ago; warn at 6 hours.
# Without elapsed adjustment: 6h - 1h = 5h remaining.
# With elapsed adjustment: 6h - 1h - 0.5h = 4.5h remaining.
snapshotted = datetime.now(timezone.utc) - timedelta(minutes=30)
freshness_results = {
"source.test.raw.s": SourceFreshnessResult(
unique_id="source.test.raw.s",
status="pass",
max_loaded_at_time_ago_in_s=3600.0,
warn_after=timedelta(hours=6),
snapshotted_at=snapshotted,
),
}
exp = compute_freshness_expiration("model.test.m", all_nodes, freshness_results)
assert exp is not None
# Allow 5 seconds tolerance for test execution time
assert abs(exp - timedelta(hours=4, minutes=30)) < timedelta(seconds=5)
class TestFilterStaleNodes:
def _build_graph(self):
"""Source -> stg -> mart, plus an independent model."""
all_nodes = {
"source.test.raw.customers": _make_source_node(
unique_id="source.test.raw.customers", name="customers"
),
"source.test.raw.orders": _make_source_node(
unique_id="source.test.raw.orders", name="orders"
),
"model.test.stg_customers": _make_node(
unique_id="model.test.stg_customers",
name="stg_customers",
depends_on=("source.test.raw.customers",),
materialization="view",
),
"model.test.stg_orders": _make_node(
unique_id="model.test.stg_orders",
name="stg_orders",
depends_on=("source.test.raw.orders",),
materialization="view",
),
"model.test.mart": _make_node(
unique_id="model.test.mart",
name="mart",
depends_on=(
"model.test.stg_customers",
"model.test.stg_orders",
),
materialization="table",
),
"model.test.independent": _make_node(
unique_id="model.test.independent",
name="independent",
depends_on=(),
materialization="table",
),
}
# Executable nodes (no sources)
nodes = {
k: v for k, v in all_nodes.items() if v.resource_type != NodeType.Source
}
return nodes, all_nodes
def test_no_stale_sources_returns_all(self):
nodes, all_nodes = self._build_graph()
freshness = {
"source.test.raw.customers": SourceFreshnessResult(
unique_id="source.test.raw.customers", status="pass"
),
"source.test.raw.orders": SourceFreshnessResult(
unique_id="source.test.raw.orders", status="pass"
),
}
remaining, skipped = filter_stale_nodes(nodes, all_nodes, freshness)
assert remaining == nodes
assert skipped == {}
def test_stale_source_removes_dependent(self):
nodes, all_nodes = self._build_graph()
freshness = {
"source.test.raw.customers": SourceFreshnessResult(
unique_id="source.test.raw.customers", status="error"
),
"source.test.raw.orders": SourceFreshnessResult(
unique_id="source.test.raw.orders", status="pass"
),
}
remaining, skipped = filter_stale_nodes(nodes, all_nodes, freshness)
# stg_customers depends on stale source -> removed
assert "model.test.stg_customers" not in remaining
assert "model.test.stg_customers" in skipped
assert skipped["model.test.stg_customers"]["status"] == "skipped"
assert "stale source" in skipped["model.test.stg_customers"]["reason"]
# stg_orders is fine -> kept
assert "model.test.stg_orders" in remaining
def test_downstream_cascade(self):
nodes, all_nodes = self._build_graph()
freshness = {
"source.test.raw.customers": SourceFreshnessResult(
unique_id="source.test.raw.customers", status="error"
),
"source.test.raw.orders": SourceFreshnessResult(
unique_id="source.test.raw.orders", status="pass"
),
}
remaining, skipped = filter_stale_nodes(nodes, all_nodes, freshness)
# mart depends on stg_customers (stale) -> cascaded removal
assert "model.test.mart" not in remaining
assert "model.test.mart" in skipped
def test_independent_nodes_unaffected(self):
nodes, all_nodes = self._build_graph()
freshness = {
"source.test.raw.customers": SourceFreshnessResult(
unique_id="source.test.raw.customers", status="error"
),
"source.test.raw.orders": SourceFreshnessResult(
unique_id="source.test.raw.orders", status="error"
),
}
remaining, skipped = filter_stale_nodes(nodes, all_nodes, freshness)
# independent has no source dependencies -> still present
assert "model.test.independent" in remaining
assert "model.test.independent" not in skipped
def test_runtime_error_treated_as_stale(self):
nodes, all_nodes = self._build_graph()
freshness = {
"source.test.raw.customers": SourceFreshnessResult(
unique_id="source.test.raw.customers", status="runtime error"
),
"source.test.raw.orders": SourceFreshnessResult(
unique_id="source.test.raw.orders", status="pass"
),
}
remaining, skipped = filter_stale_nodes(nodes, all_nodes, freshness)
assert "model.test.stg_customers" in skipped
def test_empty_freshness_results_returns_all(self):
nodes, all_nodes = self._build_graph()
remaining, skipped = filter_stale_nodes(nodes, all_nodes, {})
assert remaining == nodes
assert skipped == {}
def test_all_sources_stale(self):
nodes, all_nodes = self._build_graph()
freshness = {
"source.test.raw.customers": SourceFreshnessResult(
unique_id="source.test.raw.customers", status="error"
),
"source.test.raw.orders": SourceFreshnessResult(
unique_id="source.test.raw.orders", status="error"
),
}
remaining, skipped = filter_stale_nodes(nodes, all_nodes, freshness)
# Only independent should remain
assert set(remaining.keys()) == {"model.test.independent"}
# All source-dependent nodes should be skipped
assert "model.test.stg_customers" in skipped
assert "model.test.stg_orders" in skipped
assert "model.test.mart" in skipped
class TestRunSourceFreshness:
def _make_settings(self, tmp_path):
settings = MagicMock()
settings.project_dir = tmp_path
settings.target_path = Path("target")
settings.resolve_profiles_yml = MagicMock()
settings.resolve_profiles_yml.return_value.__enter__ = MagicMock(
return_value="/profiles"
)
settings.resolve_profiles_yml.return_value.__exit__ = MagicMock(
return_value=False
)
return settings
def test_successful_invocation(self, tmp_path):
"""run_source_freshness invokes dbtRunner and parses results."""
settings = self._make_settings(tmp_path)
target_dir = tmp_path / "target"
target_dir.mkdir()
# Simulate dbtRunner writing sources.json on invoke
def fake_invoke(args):
write_sources_json(
target_dir,
[
{
"unique_id": "source.proj.raw.users",
"status": "pass",
"max_loaded_at_time_ago_in_s": 100.0,
"criteria": {
"warn_after": {"count": 12, "period": "hour"},
},
}
],
)
return MagicMock(success=True)
with patch("prefect_dbt.core._freshness.dbtRunner") as mock_runner_cls:
mock_runner = MagicMock()
mock_runner.invoke.side_effect = fake_invoke
mock_runner_cls.return_value = mock_runner
results = run_source_freshness(settings)
assert "source.proj.raw.users" in results
assert results["source.proj.raw.users"].status == "pass"
def test_graceful_degradation_on_exception(self, tmp_path):
"""Returns empty dict when dbt source freshness raises."""
settings = self._make_settings(tmp_path)
with patch("prefect_dbt.core._freshness.dbtRunner") as mock_runner_cls:
mock_runner = MagicMock()
mock_runner.invoke.side_effect = RuntimeError("dbt crashed")
mock_runner_cls.return_value = mock_runner
results = run_source_freshness(settings)
assert results == {}
def test_graceful_degradation_when_no_output(self, tmp_path):
"""Returns empty dict when sources.json is not produced."""
settings = self._make_settings(tmp_path)
with patch("prefect_dbt.core._freshness.dbtRunner") as mock_runner_cls:
mock_runner = MagicMock()
mock_runner.invoke.return_value = MagicMock(success=False)
mock_runner_cls.return_value = mock_runner
results = run_source_freshness(settings)
assert results == {}
def test_stale_sources_json_not_reused_on_failure(self, tmp_path):
"""Pre-existing sources.json is deleted so failures don't reuse stale data."""
settings = self._make_settings(tmp_path)
target_dir = tmp_path / "target"
target_dir.mkdir()
# Write a pre-existing sources.json from a previous run
write_sources_json(
target_dir,
[
{
"unique_id": "source.proj.raw.old",
"status": "pass",
"max_loaded_at_time_ago_in_s": 50.0,
"criteria": {},
}
],
)
assert (target_dir / "sources.json").exists()
# dbtRunner raises β should NOT fall back to the old file
with patch("prefect_dbt.core._freshness.dbtRunner") as mock_runner_cls:
mock_runner = MagicMock()
mock_runner.invoke.side_effect = RuntimeError("adapter error")
mock_runner_cls.return_value = mock_runner
results = run_source_freshness(settings)
assert results == {}
assert not (target_dir / "sources.json").exists()
class TestOrchestratorFreshnessIntegration:
"""Test freshness features via the orchestrator with mocked dbt execution."""
def _make_freshness_results(self, sources_json_path):
"""Helper to parse freshness results from a sources.json file."""
return parse_source_freshness_results(sources_json_path)
def test_only_fresh_sources_skips_stale(self, tmp_path, source_manifest_data):
"""only_fresh_sources=True skips nodes with stale upstream sources."""
from conftest import _make_mock_executor, _make_mock_settings
from prefect_dbt.core._orchestrator import (
ExecutionMode,
PrefectDbtOrchestrator,
)
manifest_path = write_manifest(tmp_path, source_manifest_data)
settings = _make_mock_settings(project_dir=tmp_path)
executor = _make_mock_executor(success=True)
# Write sources.json with one stale source
target_dir = manifest_path.parent
write_sources_json(
target_dir,
[
{
"unique_id": "source.test.raw.customers",
"status": "error",
"max_loaded_at_time_ago_in_s": 999999.0,
"criteria": {
"warn_after": {"count": 1, "period": "day"},
"error_after": {"count": 2, "period": "day"},
},
},
{
"unique_id": "source.test.raw.orders",
"status": "pass",
"max_loaded_at_time_ago_in_s": 100.0,
"criteria": {
"warn_after": {"count": 1, "period": "day"},
},
},
],
)
orch = PrefectDbtOrchestrator(
settings=settings,
manifest_path=manifest_path,
executor=executor,
execution_mode=ExecutionMode.PER_WAVE,
)
freshness_data = self._make_freshness_results(target_dir / "sources.json")
with patch(
"prefect_dbt.core._orchestrator.run_source_freshness",
return_value=freshness_data,
):
results = orch.run_build(only_fresh_sources=True)
# stg_src_customers should be skipped (depends on stale customers source)
assert results["model.test.stg_src_customers"]["status"] == "skipped"
assert "stale source" in results["model.test.stg_src_customers"]["reason"]
# src_customer_summary should be skipped (cascaded from stg_src_customers)
assert results["model.test.src_customer_summary"]["status"] == "skipped"
# stg_src_orders should succeed (orders source is fresh)
assert results["model.test.stg_src_orders"]["status"] == "success"
def test_use_source_freshness_expiration(self, tmp_path, source_manifest_data):
"""use_source_freshness_expiration computes per-node cache expiration."""
from conftest import (
_make_mock_executor_per_node,
_make_mock_settings,
write_sql_files,
)
from prefect_dbt.core._orchestrator import (
CacheConfig,
ExecutionMode,
PrefectDbtOrchestrator,
)
manifest_path = write_manifest(tmp_path, source_manifest_data)
settings = _make_mock_settings(project_dir=tmp_path)
executor = _make_mock_executor_per_node(success=True)
# Write SQL files so cache policy can hash them
write_sql_files(
tmp_path,
{
"models/stg_src_customers.sql": "select * from {{ source('raw', 'customers') }}",
"models/stg_src_orders.sql": "select * from {{ source('raw', 'orders') }}",
"models/src_customer_summary.sql": "select * from {{ ref('stg_src_customers') }}",
},
)
# Write sources.json
target_dir = manifest_path.parent
write_sources_json(
target_dir,
[
{
"unique_id": "source.test.raw.customers",
"status": "pass",
"max_loaded_at_time_ago_in_s": 3600.0, # 1 hour ago
"criteria": {
"warn_after": {"count": 6, "period": "hour"},
},
},
{
"unique_id": "source.test.raw.orders",
"status": "pass",
"max_loaded_at_time_ago_in_s": 1800.0, # 30 min ago
"criteria": {
"warn_after": {"count": 12, "period": "hour"},
},
},
],
)
from prefect.task_runners import ThreadPoolTaskRunner
orch = PrefectDbtOrchestrator(
settings=settings,
manifest_path=manifest_path,
executor=executor,
execution_mode=ExecutionMode.PER_NODE,
cache=CacheConfig(
use_source_freshness_expiration=True,
result_storage=tmp_path / "results",
key_storage=str(tmp_path / "keys"),
),
task_runner_type=ThreadPoolTaskRunner,
)
(tmp_path / "results").mkdir()
(tmp_path / "keys").mkdir()
from prefect import flow
freshness_data = self._make_freshness_results(target_dir / "sources.json")
with patch(
"prefect_dbt.core._orchestrator.run_source_freshness",
return_value=freshness_data,
):
@flow
def test_flow():
return orch.run_build()
results = test_flow()
# All nodes should succeed
for node_id, result in results.items():
assert result["status"] == "success", f"{node_id}: {result.get('error')}"
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/integrations/prefect-dbt/tests/core/test_freshness.py",
"license": "Apache License 2.0",
"lines": 796,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:src/integrations/prefect-dbt/tests/core/test_freshness_integration.py | """Integration tests for source freshness features against a real DuckDB dbt project.
These tests exercise source freshness filtering and expiration with real
dbtRunner, real DuckDB, and real manifest parsing.
"""
import shutil
from pathlib import Path
import pytest
import yaml
from dbt.cli.main import dbtRunner
duckdb = pytest.importorskip("duckdb", reason="duckdb required for integration tests")
pytest.importorskip(
"dbt.adapters.duckdb", reason="dbt-duckdb required for integration tests"
)
from prefect_dbt.core._orchestrator import ( # noqa: E402
CacheConfig,
ExecutionMode,
PrefectDbtOrchestrator,
)
from prefect_dbt.core.settings import PrefectDbtSettings # noqa: E402
pytestmark = pytest.mark.integration
# Path to the bundled dbt test project
DBT_TEST_PROJECT = Path(__file__).resolve().parent.parent / "dbt_test_project"
@pytest.fixture(scope="session")
def dbt_project(tmp_path_factory):
"""Session-scoped dbt project with DuckDB and parsed manifest."""
project_dir = tmp_path_factory.mktemp("dbt_freshness_project")
for item in DBT_TEST_PROJECT.iterdir():
dest = project_dir / item.name
if item.is_dir():
shutil.copytree(item, dest)
else:
shutil.copy2(item, dest)
profiles = {
"test": {
"target": "dev",
"outputs": {
"dev": {
"type": "duckdb",
"path": str(project_dir / "warehouse.duckdb"),
"schema": "main",
"threads": 1,
}
},
}
}
(project_dir / "profiles.yml").write_text(yaml.dump(profiles))
runner = dbtRunner()
result = runner.invoke(
["parse", "--project-dir", str(project_dir), "--profiles-dir", str(project_dir)]
)
assert result.success, f"dbt parse failed: {result.exception}"
return {
"project_dir": project_dir,
"profiles_dir": project_dir,
"manifest_path": project_dir / "target" / "manifest.json",
}
@pytest.fixture
def freshness_dbt_project(dbt_project, tmp_path):
"""Factory fixture that creates a dbt project with source freshness config.
Returns a factory accepting (warn_count, warn_period, error_count, error_period).
Default: huge thresholds (always fresh).
"""
call_count = [0]
def _factory(
warn_count=99999,
warn_period="day",
error_count=99999,
error_period="day",
):
call_count[0] += 1
project_dir = tmp_path / f"freshness_{call_count[0]}"
shutil.copytree(dbt_project["project_dir"], project_dir)
# Write profiles.yml with new DuckDB path
profiles = {
"test": {
"target": "dev",
"outputs": {
"dev": {
"type": "duckdb",
"path": str(project_dir / "warehouse.duckdb"),
"schema": "main",
"threads": 1,
}
},
}
}
(project_dir / "profiles.yml").write_text(yaml.dump(profiles))
# Remove any existing DuckDB files
for f in project_dir.glob("warehouse.duckdb*"):
f.unlink()
# Run dbt seed first to populate the base tables
runner = dbtRunner()
seed_result = runner.invoke(
[
"seed",
"--project-dir",
str(project_dir),
"--profiles-dir",
str(project_dir),
]
)
assert seed_result.success, f"dbt seed failed: {seed_result.exception}"
# Create source tables with proper timestamp columns for freshness checks.
# dbt source freshness requires timestamp columns, but seed CSVs produce
# date columns. We create separate "src_*" tables with casted timestamps.
db_path = project_dir / "warehouse.duckdb"
conn = duckdb.connect(str(db_path))
try:
conn.execute("""
CREATE OR REPLACE TABLE main.src_customers AS
SELECT customer_id, name,
created_at::timestamp AS loaded_at
FROM main.customers
""")
conn.execute("""
CREATE OR REPLACE TABLE main.src_orders AS
SELECT order_id, customer_id, amount,
order_date::timestamp AS loaded_at
FROM main.orders
""")
finally:
conn.close()
# Write models/sources.yml defining sources on the src_* tables
sources_yml = {
"version": 2,
"sources": [
{
"name": "raw",
"schema": "main",
"freshness": {
"warn_after": {"count": warn_count, "period": warn_period},
"error_after": {"count": error_count, "period": error_period},
},
"loaded_at_field": "loaded_at",
"tables": [
{
"name": "src_customers",
"identifier": "src_customers",
},
{
"name": "src_orders",
"identifier": "src_orders",
},
],
}
],
}
sources_dir = project_dir / "models"
sources_dir.mkdir(exist_ok=True)
(sources_dir / "sources.yml").write_text(yaml.dump(sources_yml))
# Write source staging models
source_staging_dir = project_dir / "models" / "source_staging"
source_staging_dir.mkdir(parents=True, exist_ok=True)
(source_staging_dir / "stg_src_customers.sql").write_text(
"select customer_id, name, loaded_at\n"
"from {{ source('raw', 'src_customers') }}\n"
)
(source_staging_dir / "stg_src_orders.sql").write_text(
"select order_id, customer_id, amount, loaded_at\n"
"from {{ source('raw', 'src_orders') }}\n"
)
# Run dbt parse to update manifest with source definitions
parse_result = runner.invoke(
[
"parse",
"--project-dir",
str(project_dir),
"--profiles-dir",
str(project_dir),
]
)
assert parse_result.success, f"dbt parse failed: {parse_result.exception}"
manifest_path = project_dir / "target" / "manifest.json"
assert manifest_path.exists(), "manifest.json not generated"
return {
"project_dir": project_dir,
"profiles_dir": project_dir,
"manifest_path": manifest_path,
}
return _factory
class TestFreshnessIntegration:
def test_fresh_sources_all_models_execute(self, freshness_dbt_project):
"""With huge warn_after, all sources pass freshness -> all models run."""
proj = freshness_dbt_project(
warn_count=99999,
warn_period="day",
error_count=99999,
error_period="day",
)
settings = PrefectDbtSettings(
project_dir=proj["project_dir"],
profiles_dir=proj["profiles_dir"],
)
orch = PrefectDbtOrchestrator(
settings=settings,
manifest_path=proj["manifest_path"],
execution_mode=ExecutionMode.PER_WAVE,
)
results = orch.run_build(only_fresh_sources=True)
# Find source staging models
src_models = {k: v for k, v in results.items() if "stg_src" in k}
assert len(src_models) >= 2
for node_id, result in src_models.items():
assert result["status"] == "success", (
f"{node_id} failed: {result.get('error')}"
)
def test_stale_sources_skip_dependent_models(self, freshness_dbt_project):
"""With tiny warn_after, sources are stale -> source_staging models skipped."""
proj = freshness_dbt_project(
warn_count=1,
warn_period="minute",
error_count=1,
error_period="minute",
)
settings = PrefectDbtSettings(
project_dir=proj["project_dir"],
profiles_dir=proj["profiles_dir"],
)
orch = PrefectDbtOrchestrator(
settings=settings,
manifest_path=proj["manifest_path"],
execution_mode=ExecutionMode.PER_WAVE,
)
results = orch.run_build(only_fresh_sources=True)
# Source staging models should be skipped
src_models = {k: v for k, v in results.items() if "stg_src" in k}
assert len(src_models) >= 2
for node_id, result in src_models.items():
assert result["status"] == "skipped", (
f"{node_id} should be skipped but got: {result}"
)
assert "stale source" in result["reason"]
# Non-source models (seeds, ref-based staging, marts) should still execute
# Seeds should succeed
seed_results = {k: v for k, v in results.items() if k.startswith("seed.")}
for node_id, result in seed_results.items():
assert result["status"] == "success", (
f"{node_id} failed: {result.get('error')}"
)
def test_stale_sources_cascade_downstream(self, freshness_dbt_project):
"""Models downstream of stale-skipped models are also skipped."""
proj = freshness_dbt_project(
warn_count=1,
warn_period="minute",
error_count=1,
error_period="minute",
)
# Add a mart model that depends on stg_src_customers
mart_dir = proj["project_dir"] / "models" / "marts"
mart_dir.mkdir(parents=True, exist_ok=True)
(mart_dir / "src_based_mart.sql").write_text(
"select customer_id, name\nfrom {{ ref('stg_src_customers') }}\n"
)
# Also write a stg_src_customers model in source_staging for the mart to ref
source_staging_dir = proj["project_dir"] / "models" / "source_staging"
source_staging_dir.mkdir(parents=True, exist_ok=True)
# (Already exists from fixture, just need the mart model)
# Re-parse to pick up the new model
runner = dbtRunner()
parse_result = runner.invoke(
[
"parse",
"--project-dir",
str(proj["project_dir"]),
"--profiles-dir",
str(proj["profiles_dir"]),
]
)
assert parse_result.success, f"dbt parse failed: {parse_result.exception}"
settings = PrefectDbtSettings(
project_dir=proj["project_dir"],
profiles_dir=proj["profiles_dir"],
)
orch = PrefectDbtOrchestrator(
settings=settings,
manifest_path=proj["manifest_path"],
execution_mode=ExecutionMode.PER_WAVE,
)
results = orch.run_build(only_fresh_sources=True)
# The new mart should be skipped due to cascade
mart_result = results.get("model.test_project.src_based_mart")
assert mart_result is not None, (
f"src_based_mart not found in results. Keys: {list(results.keys())}"
)
assert mart_result["status"] == "skipped"
def test_only_fresh_sources_false_runs_everything(self, freshness_dbt_project):
"""Default only_fresh_sources=False executes all models regardless."""
proj = freshness_dbt_project(
warn_count=1,
warn_period="minute",
error_count=1,
error_period="minute",
)
settings = PrefectDbtSettings(
project_dir=proj["project_dir"],
profiles_dir=proj["profiles_dir"],
)
orch = PrefectDbtOrchestrator(
settings=settings,
manifest_path=proj["manifest_path"],
execution_mode=ExecutionMode.PER_WAVE,
)
# only_fresh_sources=False (default) -> run everything
results = orch.run_build(only_fresh_sources=False)
src_models = {k: v for k, v in results.items() if "stg_src" in k}
for node_id, result in src_models.items():
assert result["status"] == "success", (
f"{node_id} failed: {result.get('error')}"
)
def test_freshness_expiration_with_caching(self, freshness_dbt_project):
"""use_source_freshness_expiration computes cache_expiration from freshness data."""
from prefect import flow
from prefect.task_runners import ThreadPoolTaskRunner
proj = freshness_dbt_project(
warn_count=99999,
warn_period="day",
error_count=99999,
error_period="day",
)
result_dir = proj["project_dir"] / "result_storage"
result_dir.mkdir()
key_dir = proj["project_dir"] / "cache_key_storage"
key_dir.mkdir()
settings = PrefectDbtSettings(
project_dir=proj["project_dir"],
profiles_dir=proj["profiles_dir"],
)
orch = PrefectDbtOrchestrator(
settings=settings,
manifest_path=proj["manifest_path"],
execution_mode=ExecutionMode.PER_NODE,
cache=CacheConfig(
use_source_freshness_expiration=True,
result_storage=result_dir,
key_storage=str(key_dir),
),
concurrency=1,
task_runner_type=ThreadPoolTaskRunner,
)
@flow
def test_flow():
return orch.run_build()
results = test_flow()
for node_id, result in results.items():
assert result["status"] == "success", (
f"{node_id} failed: {result.get('error')}"
)
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/integrations/prefect-dbt/tests/core/test_freshness_integration.py",
"license": "Apache License 2.0",
"lines": 347,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:src/integrations/prefect-dbt/prefect_dbt/core/_cache.py | """
Cache policy for per-node dbt orchestration.
Provides DbtNodeCachePolicy (a CachePolicy subclass) and a factory function
that builds policies from DbtNode metadata. When enabled, unchanged nodes
are skipped on subsequent runs β cache keys incorporate SQL file content,
node config, and upstream cache keys so that changes cascade downstream.
"""
from dataclasses import dataclass
from pathlib import Path
from typing import Any
from prefect.cache_policies import CachePolicy
from prefect.context import TaskRunContext
from prefect.filesystems import WritableFileSystem
from prefect.logging import get_logger
from prefect.utilities.hashing import hash_objects, stable_hash
from prefect_dbt.core._manifest import DbtNode
logger = get_logger(__name__)
@dataclass
class DbtNodeCachePolicy(CachePolicy):
"""Cache policy for a single dbt node.
All data is baked in at construction time (pre-computed hashes) so the
policy is pickle-safe across process boundaries and does not hold
references to `ManifestParser` or `Path` objects.
Fields:
node_unique_id: Ensures distinct keys per node.
file_content_hash: Hash of the source SQL/CSV file (None if missing).
config_hash: Hash of the node config dict (None if empty).
full_refresh: Separates full_refresh vs normal cache entries.
upstream_cache_keys: Sorted upstream node_id β key pairs for
deterministic hashing.
macro_content_hash: Hash of macro dependency file contents
(None if no macro dependencies).
relation_name: Database relation name included in the cache key so
that changes to the materialized relation invalidate the cache.
"""
node_unique_id: str = ""
file_content_hash: str | None = None
config_hash: str | None = None
full_refresh: bool = False
upstream_cache_keys: tuple[tuple[str, str], ...] = ()
macro_content_hash: str | None = None
relation_name: str | None = None
def compute_key(
self,
task_ctx: TaskRunContext,
inputs: dict[str, Any],
flow_parameters: dict[str, Any],
**kwargs: Any,
) -> str | None:
"""Compute a cache key from pre-baked node metadata.
`task_ctx`, `inputs`, and `flow_parameters` are ignored β all
data needed for the key is stored directly on the policy instance.
"""
return hash_objects(
self.node_unique_id,
self.file_content_hash,
self.config_hash,
self.full_refresh,
self.upstream_cache_keys,
self.macro_content_hash,
self.relation_name,
)
def _hash_node_file(node: DbtNode, project_dir: Path) -> str | None:
"""Hash the source file for *node* (SQL for models/snapshots, CSV for seeds).
Returns `None` when the file cannot be located on disk.
"""
if not node.original_file_path:
return None
file_path = project_dir / node.original_file_path
try:
content = file_path.read_bytes()
except (OSError, IOError):
logger.warning(
"Could not read source file for %s at %s; "
"cache key will not reflect file content changes",
node.unique_id,
file_path,
)
return None
return stable_hash(content)
def _hash_node_config(node: DbtNode) -> str | None:
"""Hash the config dict for *node*. Returns `None` for empty configs."""
if not node.config:
return None
return hash_objects(node.config)
def _hash_macro_dependencies(
node: DbtNode,
project_dir: Path,
macro_paths: dict[str, str | None],
) -> str | None:
"""Hash macro dependencies for *node*.
For each macro in `node.depends_on_macros` (sorted for determinism):
- If the macro has an `original_file_path` and the file exists on
disk, hash the file contents.
- Otherwise, use the macro unique_id as a stable fallback (external
packages, builtins, etc.).
Returns `None` when the node has no macro dependencies.
"""
if not node.depends_on_macros:
return None
parts: list[str] = []
for macro_id in sorted(node.depends_on_macros):
file_path = macro_paths.get(macro_id)
if file_path is not None:
full_path = project_dir / file_path
try:
content = full_path.read_bytes()
parts.append(stable_hash(content))
continue
except (OSError, IOError):
pass
# Fallback: use macro ID itself (stable across runs)
parts.append(macro_id)
return stable_hash("|".join(parts).encode())
def build_cache_policy_for_node(
node: DbtNode,
project_dir: Path,
full_refresh: bool,
upstream_cache_keys: dict[str, str],
key_storage: WritableFileSystem | str | Path | None = None,
macro_paths: dict[str, str | None] | None = None,
) -> DbtNodeCachePolicy:
"""Construct a :class:`DbtNodeCachePolicy` for *node*.
1. Hashes the source file (SQL/CSV) on disk.
2. Hashes the node config dict.
3. Sorts upstream keys into a deterministic tuple-of-tuples.
4. Applies *key_storage* via :meth:`CachePolicy.configure` if provided.
"""
file_hash = _hash_node_file(node, project_dir)
config_hash = _hash_node_config(node)
sorted_upstream = tuple(sorted(upstream_cache_keys.items()))
macro_hash = _hash_macro_dependencies(node, project_dir, macro_paths or {})
policy = DbtNodeCachePolicy(
node_unique_id=node.unique_id,
file_content_hash=file_hash,
config_hash=config_hash,
full_refresh=full_refresh,
upstream_cache_keys=sorted_upstream,
macro_content_hash=macro_hash,
relation_name=node.relation_name,
)
if key_storage is not None:
policy = policy.configure(key_storage=key_storage)
return policy
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/integrations/prefect-dbt/prefect_dbt/core/_cache.py",
"license": "Apache License 2.0",
"lines": 144,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/prefect:src/integrations/prefect-dbt/tests/core/test_orchestrator_cache.py | """Tests for DbtNodeCachePolicy and caching integration."""
import pickle
from dataclasses import replace
from datetime import timedelta
from unittest.mock import MagicMock, patch
import pytest
from conftest import (
_make_mock_executor_per_node,
_make_mock_settings,
_make_node,
write_manifest,
write_sql_files,
)
from dbt.artifacts.resources.types import NodeType
from prefect_dbt.core._cache import (
DbtNodeCachePolicy,
_hash_macro_dependencies,
_hash_node_config,
_hash_node_file,
build_cache_policy_for_node,
)
from prefect_dbt.core._manifest import ManifestParser
from prefect_dbt.core._orchestrator import (
CacheConfig,
ExecutionMode,
PrefectDbtOrchestrator,
)
from prefect import flow
from prefect.task_runners import ThreadPoolTaskRunner
# =============================================================================
# TestDbtNodeCachePolicy
# =============================================================================
class TestDbtNodeCachePolicy:
def test_deterministic_key(self):
"""Same inputs produce the same key."""
kwargs = dict(
node_unique_id="model.test.m1",
file_content_hash="abc123",
config_hash="def456",
full_refresh=False,
upstream_cache_keys=(("model.test.root", "key1"),),
)
p1 = DbtNodeCachePolicy(**kwargs)
p2 = DbtNodeCachePolicy(**kwargs)
assert p1.compute_key(None, {}, {}) == p2.compute_key(None, {}, {})
def test_key_changes_on_file_content(self):
"""Different file_content_hash produces a different key."""
base = dict(
node_unique_id="model.test.m1",
config_hash="cfg",
full_refresh=False,
upstream_cache_keys=(),
)
p1 = DbtNodeCachePolicy(file_content_hash="aaa", **base)
p2 = DbtNodeCachePolicy(file_content_hash="bbb", **base)
assert p1.compute_key(None, {}, {}) != p2.compute_key(None, {}, {})
def test_key_changes_on_config(self):
"""Different config_hash produces a different key."""
base = dict(
node_unique_id="model.test.m1",
file_content_hash="file",
full_refresh=False,
upstream_cache_keys=(),
)
p1 = DbtNodeCachePolicy(config_hash="c1", **base)
p2 = DbtNodeCachePolicy(config_hash="c2", **base)
assert p1.compute_key(None, {}, {}) != p2.compute_key(None, {}, {})
def test_key_changes_on_upstream(self):
"""Different upstream_cache_keys produces a different key."""
base = dict(
node_unique_id="model.test.m1",
file_content_hash="file",
config_hash="cfg",
full_refresh=False,
)
p1 = DbtNodeCachePolicy(upstream_cache_keys=(("a", "k1"),), **base)
p2 = DbtNodeCachePolicy(upstream_cache_keys=(("a", "k2"),), **base)
assert p1.compute_key(None, {}, {}) != p2.compute_key(None, {}, {})
def test_key_changes_on_macro_hash(self):
"""Different macro_content_hash produces a different key."""
base = dict(
node_unique_id="model.test.m1",
file_content_hash="file",
config_hash="cfg",
full_refresh=False,
upstream_cache_keys=(),
)
p1 = DbtNodeCachePolicy(macro_content_hash="macro_a", **base)
p2 = DbtNodeCachePolicy(macro_content_hash="macro_b", **base)
assert p1.compute_key(None, {}, {}) != p2.compute_key(None, {}, {})
def test_key_changes_on_full_refresh(self):
"""full_refresh=True vs False produces different keys."""
base = dict(
node_unique_id="model.test.m1",
file_content_hash="file",
config_hash="cfg",
upstream_cache_keys=(),
)
p1 = DbtNodeCachePolicy(full_refresh=False, **base)
p2 = DbtNodeCachePolicy(full_refresh=True, **base)
assert p1.compute_key(None, {}, {}) != p2.compute_key(None, {}, {})
def test_key_ignores_task_context(self):
"""Different task_ctx values produce the same key."""
policy = DbtNodeCachePolicy(
node_unique_id="model.test.m1",
file_content_hash="f",
config_hash="c",
full_refresh=False,
upstream_cache_keys=(),
)
k1 = policy.compute_key(None, {}, {})
k2 = policy.compute_key(MagicMock(), {"x": 1}, {"y": 2})
assert k1 == k2
def test_none_hashes_still_produce_key(self):
"""Policy with None file/config hash still produces a valid key."""
policy = DbtNodeCachePolicy(
node_unique_id="model.test.m1",
file_content_hash=None,
config_hash=None,
full_refresh=False,
upstream_cache_keys=(),
)
key = policy.compute_key(None, {}, {})
assert key is not None
assert isinstance(key, str)
assert len(key) > 0
def test_pickle_roundtrip(self):
"""DbtNodeCachePolicy survives pickle roundtrip."""
policy = DbtNodeCachePolicy(
node_unique_id="model.test.m1",
file_content_hash="abc",
config_hash="def",
full_refresh=True,
upstream_cache_keys=(("x", "y"),),
)
key_before = policy.compute_key(None, {}, {})
restored = pickle.loads(pickle.dumps(policy))
assert restored.compute_key(None, {}, {}) == key_before
# =============================================================================
# TestBuildCachePolicyForNode
# =============================================================================
class TestBuildCachePolicyForNode:
def test_reads_sql_file(self, tmp_path):
"""Real file on disk is hashed into the policy."""
write_sql_files(tmp_path, {"models/my_model.sql": "SELECT 1"})
node = _make_node(
unique_id="model.test.m1",
name="m1",
resource_type=NodeType.Model,
)
# Attach original_file_path via a replacement node (frozen dataclass)
node = replace(node, original_file_path="models/my_model.sql")
policy = build_cache_policy_for_node(node, tmp_path, False, {})
assert policy.file_content_hash is not None
def test_missing_file_graceful(self, tmp_path):
"""Missing file results in None file hash, no crash."""
node = _make_node(unique_id="model.test.m1", name="m1")
node = replace(node, original_file_path="models/nonexistent.sql")
policy = build_cache_policy_for_node(node, tmp_path, False, {})
assert policy.file_content_hash is None
# Still produces a valid key
assert policy.compute_key(None, {}, {}) is not None
def test_no_original_file_path(self, tmp_path):
"""Node without original_file_path gets None file hash."""
node = _make_node(unique_id="model.test.m1", name="m1")
policy = build_cache_policy_for_node(node, tmp_path, False, {})
assert policy.file_content_hash is None
def test_seed_csv_hashed(self, tmp_path):
"""CSV content is hashed for seed nodes."""
write_sql_files(tmp_path, {"seeds/users.csv": "id,name\n1,Alice"})
node = _make_node(
unique_id="seed.test.users",
name="users",
resource_type=NodeType.Seed,
)
node = replace(node, original_file_path="seeds/users.csv")
policy = build_cache_policy_for_node(node, tmp_path, False, {})
assert policy.file_content_hash is not None
def test_upstream_keys_sorted(self, tmp_path):
"""Upstream keys are sorted for determinism."""
node = _make_node(unique_id="model.test.leaf", name="leaf")
upstream = {"z_model": "key_z", "a_model": "key_a", "m_model": "key_m"}
policy = build_cache_policy_for_node(node, tmp_path, False, upstream)
assert policy.upstream_cache_keys == (
("a_model", "key_a"),
("m_model", "key_m"),
("z_model", "key_z"),
)
def test_key_storage_configured(self, tmp_path):
"""key_storage is applied via configure()."""
node = _make_node(unique_id="model.test.m1", name="m1")
storage_path = str(tmp_path / "keys")
policy = build_cache_policy_for_node(
node, tmp_path, False, {}, key_storage=storage_path
)
assert policy.key_storage == storage_path
# =============================================================================
# TestHashHelpers
# =============================================================================
class TestHashHelpers:
def test_hash_node_file_returns_none_for_no_path(self, tmp_path):
node = _make_node(unique_id="model.test.m1", name="m1")
assert _hash_node_file(node, tmp_path) is None
def test_hash_node_file_returns_hash_for_existing_file(self, tmp_path):
write_sql_files(tmp_path, {"models/m.sql": "SELECT 1"})
node = _make_node(unique_id="model.test.m1", name="m1")
node = replace(node, original_file_path="models/m.sql")
h = _hash_node_file(node, tmp_path)
assert h is not None
assert isinstance(h, str)
def test_hash_node_file_different_content_different_hash(self, tmp_path):
write_sql_files(
tmp_path, {"models/a.sql": "SELECT 1", "models/b.sql": "SELECT 2"}
)
node_a = replace(
_make_node(unique_id="model.test.a", name="a"),
original_file_path="models/a.sql",
)
node_b = replace(
_make_node(unique_id="model.test.b", name="b"),
original_file_path="models/b.sql",
)
assert _hash_node_file(node_a, tmp_path) != _hash_node_file(node_b, tmp_path)
def test_hash_node_config_none_for_empty(self):
node = _make_node(unique_id="model.test.m1", name="m1")
node = replace(node, config={})
assert _hash_node_config(node) is None
def test_hash_node_config_returns_hash(self):
node = _make_node(unique_id="model.test.m1", name="m1")
node = replace(node, config={"materialized": "table", "schema": "raw"})
h = _hash_node_config(node)
assert h is not None
assert isinstance(h, str)
# =============================================================================
# TestHashMacroDependencies
# =============================================================================
class TestHashMacroDependencies:
def test_no_macros_returns_none(self, tmp_path):
"""Node with no macro dependencies returns None."""
node = _make_node(unique_id="model.test.m1", name="m1")
assert _hash_macro_dependencies(node, tmp_path, {}) is None
def test_project_macro_hashed(self, tmp_path):
"""Project-local macro file content is hashed."""
(tmp_path / "macros").mkdir()
(tmp_path / "macros/my_macro.sql").write_text(
"{% macro my_macro() %}1{% endmacro %}"
)
node = _make_node(
unique_id="model.test.m1",
name="m1",
depends_on_macros=("macro.proj.my_macro",),
)
macro_paths = {"macro.proj.my_macro": "macros/my_macro.sql"}
h = _hash_macro_dependencies(node, tmp_path, macro_paths)
assert h is not None
assert isinstance(h, str)
def test_different_content_different_hash(self, tmp_path):
"""Editing a macro file changes the hash."""
(tmp_path / "macros").mkdir()
macro_file = tmp_path / "macros/my_macro.sql"
node = _make_node(
unique_id="model.test.m1",
name="m1",
depends_on_macros=("macro.proj.my_macro",),
)
macro_paths = {"macro.proj.my_macro": "macros/my_macro.sql"}
macro_file.write_text("{% macro my_macro() %}v1{% endmacro %}")
h1 = _hash_macro_dependencies(node, tmp_path, macro_paths)
macro_file.write_text("{% macro my_macro() %}v2{% endmacro %}")
h2 = _hash_macro_dependencies(node, tmp_path, macro_paths)
assert h1 != h2
def test_external_macro_uses_id_fallback(self, tmp_path):
"""Macro with no file path falls back to macro ID."""
node = _make_node(
unique_id="model.test.m1",
name="m1",
depends_on_macros=("macro.dbt.run_query",),
)
macro_paths = {"macro.dbt.run_query": None}
h = _hash_macro_dependencies(node, tmp_path, macro_paths)
assert h is not None
def test_unknown_macro_uses_id_fallback(self, tmp_path):
"""Macro not in macro_paths falls back to macro ID."""
node = _make_node(
unique_id="model.test.m1",
name="m1",
depends_on_macros=("macro.unknown.foo",),
)
h = _hash_macro_dependencies(node, tmp_path, {})
assert h is not None
# =============================================================================
# TestManifestMacroParsing
# =============================================================================
class TestManifestMacroParsing:
def test_depends_on_macros_parsed(self, tmp_path):
"""depends_on.macros from manifest is parsed into DbtNode."""
manifest_data = {
"nodes": {
"model.test.m1": {
"name": "m1",
"resource_type": "model",
"depends_on": {
"nodes": [],
"macros": ["macro.proj.my_macro", "macro.dbt.run_query"],
},
"config": {"materialized": "table"},
}
},
"sources": {},
}
manifest_path = write_manifest(tmp_path, manifest_data)
parser = ManifestParser(manifest_path)
nodes = parser.get_executable_nodes()
node = nodes["model.test.m1"]
assert node.depends_on_macros == ("macro.proj.my_macro", "macro.dbt.run_query")
def test_get_macro_paths(self, tmp_path):
"""ManifestParser.get_macro_paths extracts macro file paths."""
manifest_data = {
"nodes": {},
"sources": {},
"macros": {
"macro.proj.my_macro": {
"name": "my_macro",
"original_file_path": "macros/my_macro.sql",
},
"macro.dbt.run_query": {
"name": "run_query",
},
},
}
manifest_path = write_manifest(tmp_path, manifest_data)
parser = ManifestParser(manifest_path)
paths = parser.get_macro_paths()
assert paths == {
"macro.proj.my_macro": "macros/my_macro.sql",
"macro.dbt.run_query": None,
}
# =============================================================================
# TestOrchestratorCachingIntegration
# =============================================================================
# Manifest data with original_file_path for cache tests
SINGLE_MODEL_WITH_FILE = {
"nodes": {
"model.test.m1": {
"name": "m1",
"resource_type": "model",
"depends_on": {"nodes": []},
"config": {"materialized": "table"},
"original_file_path": "models/m1.sql",
}
},
"sources": {},
}
DIAMOND_WITH_FILES = {
"nodes": {
"model.test.root": {
"name": "root",
"resource_type": "model",
"depends_on": {"nodes": []},
"config": {"materialized": "table"},
"original_file_path": "models/root.sql",
},
"model.test.left": {
"name": "left",
"resource_type": "model",
"depends_on": {"nodes": ["model.test.root"]},
"config": {"materialized": "table"},
"original_file_path": "models/left.sql",
},
"model.test.right": {
"name": "right",
"resource_type": "model",
"depends_on": {"nodes": ["model.test.root"]},
"config": {"materialized": "table"},
"original_file_path": "models/right.sql",
},
"model.test.leaf": {
"name": "leaf",
"resource_type": "model",
"depends_on": {"nodes": ["model.test.left", "model.test.right"]},
"config": {"materialized": "table"},
"original_file_path": "models/leaf.sql",
},
},
"sources": {},
}
DIAMOND_WITH_INDEPENDENT = {
"nodes": {
"model.test.root": {
"name": "root",
"resource_type": "model",
"depends_on": {"nodes": []},
"config": {"materialized": "table"},
"original_file_path": "models/root.sql",
},
"model.test.left": {
"name": "left",
"resource_type": "model",
"depends_on": {"nodes": ["model.test.root"]},
"config": {"materialized": "table"},
"original_file_path": "models/left.sql",
},
"model.test.right": {
"name": "right",
"resource_type": "model",
"depends_on": {"nodes": ["model.test.root"]},
"config": {"materialized": "table"},
"original_file_path": "models/right.sql",
},
"model.test.leaf": {
"name": "leaf",
"resource_type": "model",
"depends_on": {"nodes": ["model.test.left", "model.test.right"]},
"config": {"materialized": "table"},
"original_file_path": "models/leaf.sql",
},
"model.test.independent": {
"name": "independent",
"resource_type": "model",
"depends_on": {"nodes": []},
"config": {"materialized": "table"},
"original_file_path": "models/independent.sql",
},
},
"sources": {},
}
_UNSET = object()
@pytest.fixture
def cache_orch(tmp_path):
"""Factory fixture for PER_NODE orchestrator with caching and persistent storage.
Creates shared result_storage and key_storage directories that
persist across calls within the same test, enabling cross-run cache tests.
Each call gets a unique project_dir but shares storage by default.
Returns (orchestrator, executor, project_dir).
"""
result_dir = tmp_path / "result_storage"
result_dir.mkdir()
key_dir = tmp_path / "cache_key_storage"
key_dir.mkdir()
call_count = [0]
def _factory(
manifest_data,
sql_files=None,
*,
executor=None,
cache=_UNSET,
result_storage=_UNSET,
cache_key_storage=_UNSET,
**kwargs,
):
project_dir = tmp_path / f"project_{call_count[0]}"
project_dir.mkdir(exist_ok=True)
call_count[0] += 1
if sql_files:
write_sql_files(project_dir, sql_files)
manifest = write_manifest(project_dir, manifest_data)
if executor is None:
executor = _make_mock_executor_per_node(**kwargs.pop("executor_kwargs", {}))
settings = _make_mock_settings(project_dir=project_dir)
# result_storage must be a Path (not str) so Prefect creates a
# LocalFileSystem instead of trying Block.load() on a string.
rs = result_dir if result_storage is _UNSET else result_storage
ks = str(key_dir) if cache_key_storage is _UNSET else cache_key_storage
if cache is _UNSET:
cache_cfg = CacheConfig(result_storage=rs, key_storage=ks)
else:
cache_cfg = cache
defaults = {
"settings": settings,
"manifest_path": manifest,
"executor": executor,
"execution_mode": ExecutionMode.PER_NODE,
"task_runner_type": ThreadPoolTaskRunner,
"cache": cache_cfg,
}
defaults.update(kwargs)
return PrefectDbtOrchestrator(**defaults), executor, project_dir
return _factory
class TestOrchestratorCachingInit:
def test_caching_disabled_by_default(self, tmp_path):
"""cache=None (default) disables caching."""
manifest = write_manifest(tmp_path, {"nodes": {}, "sources": {}})
orch = PrefectDbtOrchestrator(
settings=_make_mock_settings(),
manifest_path=manifest,
executor=_make_mock_executor_per_node(),
execution_mode=ExecutionMode.PER_NODE,
task_runner_type=ThreadPoolTaskRunner,
)
assert orch._cache is None
def test_caching_rejected_in_per_wave(self, tmp_path):
"""Caching with PER_WAVE raises ValueError."""
manifest = write_manifest(tmp_path, {"nodes": {}, "sources": {}})
with pytest.raises(ValueError, match="Caching is only supported in PER_NODE"):
PrefectDbtOrchestrator(
settings=_make_mock_settings(),
manifest_path=manifest,
executor=_make_mock_executor_per_node(),
execution_mode=ExecutionMode.PER_WAVE,
cache=CacheConfig(),
)
def test_caching_params_stored(self, tmp_path):
"""CacheConfig is stored on the orchestrator."""
manifest = write_manifest(tmp_path, {"nodes": {}, "sources": {}})
cfg = CacheConfig(
expiration=timedelta(hours=1),
result_storage="/tmp/results",
key_storage="/tmp/keys",
)
orch = PrefectDbtOrchestrator(
settings=_make_mock_settings(),
manifest_path=manifest,
executor=_make_mock_executor_per_node(),
execution_mode=ExecutionMode.PER_NODE,
task_runner_type=ThreadPoolTaskRunner,
cache=cfg,
)
assert orch._cache is cfg
assert orch._cache.expiration == timedelta(hours=1)
assert orch._cache.result_storage == "/tmp/results"
assert orch._cache.key_storage == "/tmp/keys"
class TestOrchestratorCachingOutcomes:
"""Outcome-based integration tests for cross-run caching.
These tests validate real caching behavior by running builds multiple
times and observing whether the executor is invoked (cache miss) or
skipped (cache hit). No internals like ``with_options`` or
``build_cache_policy_for_node`` are patched or inspected.
"""
def test_second_run_skips_unchanged_nodes(self, cache_orch):
"""Second run with identical files hits cache; executor is not invoked."""
sql_files = {"models/m1.sql": "SELECT 1"}
orch, executor, _ = cache_orch(SINGLE_MODEL_WITH_FILE, sql_files)
@flow
def run_twice():
r1 = orch.run_build()
r2 = orch.run_build()
return r1, r2
r1, r2 = run_twice()
# First run executes; second run is a cache hit
assert r1["model.test.m1"]["status"] == "success"
assert r2["model.test.m1"]["status"] == "cached"
# Executor was only called once (first run); second was a cache hit
assert executor.execute_node.call_count == 1
def test_cache_invalidates_downstream_on_root_change(self, cache_orch):
"""Changing root SQL invalidates downstream nodes but not independent ones."""
sql_files = {
"models/root.sql": "SELECT 1",
"models/left.sql": "SELECT * FROM root",
"models/right.sql": "SELECT * FROM root",
"models/leaf.sql": "SELECT * FROM left JOIN right",
"models/independent.sql": "SELECT 42",
}
orch, executor, project_dir = cache_orch(DIAMOND_WITH_INDEPENDENT, sql_files)
@flow
def run_then_change():
r1 = orch.run_build()
# Change root SQL to invalidate its cache key (and all downstream)
(project_dir / "models/root.sql").write_text("SELECT 2")
r2 = orch.run_build()
return r1, r2
r1, r2 = run_then_change()
# All nodes succeed in first run
for node_id in DIAMOND_WITH_INDEPENDENT["nodes"]:
assert r1[node_id]["status"] == "success"
# Second run: root changed so root + downstream re-execute,
# independent node is a cache hit
re_executed = {
"model.test.root",
"model.test.left",
"model.test.right",
"model.test.leaf",
}
for node_id in DIAMOND_WITH_INDEPENDENT["nodes"]:
expected = "success" if node_id in re_executed else "cached"
assert r2[node_id]["status"] == expected
# Run 1: 5 nodes executed.
# Run 2: 4 re-executed (root changed + downstream cascade),
# independent cached.
# Total: 9
assert executor.execute_node.call_count == 9
# Verify independent was only executed once (cached on run 2)
executed_nodes = [
call.args[0].unique_id for call in executor.execute_node.call_args_list
]
assert executed_nodes.count("model.test.independent") == 1
def test_full_refresh_bypasses_cache(self, cache_orch):
"""full_refresh=True produces different cache keys, bypassing cache."""
sql_files = {"models/m1.sql": "SELECT 1"}
orch, executor, _ = cache_orch(SINGLE_MODEL_WITH_FILE, sql_files)
@flow
def run_then_refresh():
r1 = orch.run_build()
r2 = orch.run_build(full_refresh=True)
return r1, r2
r1, r2 = run_then_refresh()
assert r1["model.test.m1"]["status"] == "success"
assert r2["model.test.m1"]["status"] == "success"
# Both runs execute because full_refresh changes the cache key
assert executor.execute_node.call_count == 2
def test_cache_persists_across_orchestrator_instances(self, cache_orch):
"""A new orchestrator instance reuses cached results from a prior run."""
sql_files = {
"models/root.sql": "SELECT 1",
"models/left.sql": "SELECT * FROM root",
"models/right.sql": "SELECT * FROM root",
"models/leaf.sql": "SELECT * FROM left JOIN right",
}
orch1, exec1, _ = cache_orch(DIAMOND_WITH_FILES, sql_files)
orch2, exec2, _ = cache_orch(DIAMOND_WITH_FILES, sql_files)
@flow
def run_cross_instance():
r1 = orch1.run_build()
r2 = orch2.run_build()
return r1, r2
r1, r2 = run_cross_instance()
# First run executes; second run is all cache hits
for node_id in DIAMOND_WITH_FILES["nodes"]:
assert r1[node_id]["status"] == "success"
assert r2[node_id]["status"] == "cached"
# Instance 1 executed all nodes
assert exec1.execute_node.call_count == 4
# Instance 2 hit cache for all nodes
assert exec2.execute_node.call_count == 0
def test_full_refresh_always_executes(self, cache_orch):
"""Repeated full_refresh=True runs always execute (never cached)."""
sql_files = {"models/m1.sql": "SELECT 1"}
orch, executor, _ = cache_orch(SINGLE_MODEL_WITH_FILE, sql_files)
@flow
def run_full_refresh_twice():
r1 = orch.run_build(full_refresh=True)
r2 = orch.run_build(full_refresh=True)
return r1, r2
r1, r2 = run_full_refresh_twice()
assert r1["model.test.m1"]["status"] == "success"
assert r2["model.test.m1"]["status"] == "success"
# Both runs execute β full_refresh forces re-execution
assert executor.execute_node.call_count == 2
def test_normal_run_after_full_refresh_uses_own_cache(self, cache_orch):
"""Normal run, full_refresh, normal again β third run hits normal cache."""
sql_files = {"models/m1.sql": "SELECT 1"}
orch, executor, _ = cache_orch(SINGLE_MODEL_WITH_FILE, sql_files)
@flow
def run_three_ways():
r1 = orch.run_build() # normal β cache miss
r2 = orch.run_build(full_refresh=True) # full_refresh β always executes
r3 = orch.run_build() # normal β cache hit from r1
return r1, r2, r3
r1, r2, r3 = run_three_ways()
assert r1["model.test.m1"]["status"] == "success"
assert r2["model.test.m1"]["status"] == "success"
assert r3["model.test.m1"]["status"] == "cached"
# r1 executes (miss), r2 executes (refresh), r3 cached (hit from r1)
assert executor.execute_node.call_count == 2
def test_macro_change_invalidates_cache(self, cache_orch):
"""Editing a macro file between runs invalidates the cache for dependent nodes."""
manifest_data = {
"nodes": {
"model.test.m1": {
"name": "m1",
"resource_type": "model",
"depends_on": {
"nodes": [],
"macros": ["macro.proj.my_macro"],
},
"config": {"materialized": "table"},
"original_file_path": "models/m1.sql",
}
},
"sources": {},
"macros": {
"macro.proj.my_macro": {
"name": "my_macro",
"original_file_path": "macros/my_macro.sql",
},
},
}
sql_files = {
"models/m1.sql": "SELECT {{ my_macro() }}",
"macros/my_macro.sql": "{% macro my_macro() %}1{% endmacro %}",
}
orch, executor, project_dir = cache_orch(manifest_data, sql_files)
@flow
def run_then_edit_macro():
r1 = orch.run_build()
# Edit the macro file
(project_dir / "macros/my_macro.sql").write_text(
"{% macro my_macro() %}2{% endmacro %}"
)
r2 = orch.run_build()
return r1, r2
r1, r2 = run_then_edit_macro()
assert r1["model.test.m1"]["status"] == "success"
assert r2["model.test.m1"]["status"] == "success"
# Both runs execute β macro content changed
assert executor.execute_node.call_count == 2
# =============================================================================
# TestPrecomputeAllCacheKeys
# =============================================================================
def _make_precompute_orch(tmp_path, manifest_path):
"""Create an orchestrator configured for _precompute_all_cache_keys tests."""
return PrefectDbtOrchestrator(
settings=_make_mock_settings(project_dir=tmp_path),
manifest_path=manifest_path,
executor=_make_mock_executor_per_node(),
execution_mode=ExecutionMode.PER_NODE,
task_runner_type=ThreadPoolTaskRunner,
cache=CacheConfig(
result_storage=tmp_path / "results",
key_storage=str(tmp_path / "keys"),
),
)
class TestPrecomputeAllCacheKeys:
"""Unit tests for _precompute_all_cache_keys()."""
def test_linear_chain_computes_all_keys(self, tmp_path):
"""All nodes in a linear chain (a -> b -> c) get cache keys."""
sql_files = {
"models/a.sql": "SELECT 1",
"models/b.sql": "SELECT * FROM a",
"models/c.sql": "SELECT * FROM b",
}
write_sql_files(tmp_path, sql_files)
manifest_data = {
"nodes": {
"model.test.a": {
"name": "a",
"resource_type": "model",
"depends_on": {"nodes": []},
"config": {"materialized": "table"},
"original_file_path": "models/a.sql",
},
"model.test.b": {
"name": "b",
"resource_type": "model",
"depends_on": {"nodes": ["model.test.a"]},
"config": {"materialized": "table"},
"original_file_path": "models/b.sql",
},
"model.test.c": {
"name": "c",
"resource_type": "model",
"depends_on": {"nodes": ["model.test.b"]},
"config": {"materialized": "table"},
"original_file_path": "models/c.sql",
},
},
"sources": {},
}
manifest_path = write_manifest(tmp_path, manifest_data)
parser = ManifestParser(manifest_path)
all_exec = parser.get_executable_nodes()
orch = _make_precompute_orch(tmp_path, manifest_path)
keys = orch._precompute_all_cache_keys(all_exec, False, {})
assert "model.test.a" in keys
assert "model.test.b" in keys
assert "model.test.c" in keys
# All keys are non-empty strings
for v in keys.values():
assert isinstance(v, str) and len(v) > 0
def test_diamond_computes_all_keys(self, tmp_path):
"""All nodes in a diamond DAG get cache keys."""
sql_files = {
"models/root.sql": "SELECT 1",
"models/left.sql": "SELECT * FROM root",
"models/right.sql": "SELECT * FROM root",
"models/leaf.sql": "SELECT * FROM left JOIN right",
}
write_sql_files(tmp_path, sql_files)
manifest_path = write_manifest(tmp_path, DIAMOND_WITH_FILES)
parser = ManifestParser(manifest_path)
all_exec = parser.get_executable_nodes()
orch = _make_precompute_orch(tmp_path, manifest_path)
keys = orch._precompute_all_cache_keys(all_exec, False, {})
for node_id in DIAMOND_WITH_FILES["nodes"]:
assert node_id in keys
def test_keys_are_deterministic(self, tmp_path):
"""Calling _precompute_all_cache_keys twice produces identical keys."""
sql_files = {
"models/root.sql": "SELECT 1",
"models/left.sql": "SELECT * FROM root",
"models/right.sql": "SELECT * FROM root",
"models/leaf.sql": "SELECT * FROM left JOIN right",
}
write_sql_files(tmp_path, sql_files)
manifest_path = write_manifest(tmp_path, DIAMOND_WITH_FILES)
parser = ManifestParser(manifest_path)
all_exec = parser.get_executable_nodes()
orch = _make_precompute_orch(tmp_path, manifest_path)
keys1 = orch._precompute_all_cache_keys(all_exec, False, {})
keys2 = orch._precompute_all_cache_keys(all_exec, False, {})
assert keys1 == keys2
def test_full_refresh_produces_different_keys(self, tmp_path):
"""full_refresh=True produces different keys than full_refresh=False."""
sql_files = {"models/m1.sql": "SELECT 1"}
write_sql_files(tmp_path, sql_files)
manifest_path = write_manifest(tmp_path, SINGLE_MODEL_WITH_FILE)
parser = ManifestParser(manifest_path)
all_exec = parser.get_executable_nodes()
orch = _make_precompute_orch(tmp_path, manifest_path)
keys_normal = orch._precompute_all_cache_keys(all_exec, False, {})
keys_refresh = orch._precompute_all_cache_keys(all_exec, True, {})
assert keys_normal["model.test.m1"] != keys_refresh["model.test.m1"]
def test_upstream_change_cascades(self, tmp_path):
"""Changing root SQL content changes keys for root and all downstream."""
sql_files = {
"models/root.sql": "SELECT 1",
"models/left.sql": "SELECT * FROM root",
"models/right.sql": "SELECT * FROM root",
"models/leaf.sql": "SELECT * FROM left JOIN right",
"models/independent.sql": "SELECT 42",
}
write_sql_files(tmp_path, sql_files)
manifest_path = write_manifest(tmp_path, DIAMOND_WITH_INDEPENDENT)
parser = ManifestParser(manifest_path)
all_exec = parser.get_executable_nodes()
orch = _make_precompute_orch(tmp_path, manifest_path)
keys_before = orch._precompute_all_cache_keys(all_exec, False, {})
# Modify root SQL
(tmp_path / "models/root.sql").write_text("SELECT 2")
keys_after = orch._precompute_all_cache_keys(all_exec, False, {})
# Root and downstream should change
assert keys_before["model.test.root"] != keys_after["model.test.root"]
assert keys_before["model.test.left"] != keys_after["model.test.left"]
assert keys_before["model.test.right"] != keys_after["model.test.right"]
assert keys_before["model.test.leaf"] != keys_after["model.test.leaf"]
# Independent node is unaffected
assert (
keys_before["model.test.independent"]
== keys_after["model.test.independent"]
)
def test_source_dependencies_handled(self, tmp_path):
"""Nodes depending on sources (outside executable set) still get keys."""
sql_files = {
"models/stg.sql": "SELECT * FROM raw.customers",
}
write_sql_files(tmp_path, sql_files)
manifest_data = {
"nodes": {
"model.test.stg": {
"name": "stg",
"resource_type": "model",
"depends_on": {"nodes": ["source.test.raw.customers"]},
"config": {"materialized": "table"},
"original_file_path": "models/stg.sql",
},
},
"sources": {
"source.test.raw.customers": {
"name": "customers",
"resource_type": "source",
"fqn": ["test", "raw", "customers"],
"relation_name": '"main"."raw"."customers"',
"config": {},
},
},
}
manifest_path = write_manifest(tmp_path, manifest_data)
parser = ManifestParser(manifest_path)
all_exec = parser.get_executable_nodes()
orch = _make_precompute_orch(tmp_path, manifest_path)
keys = orch._precompute_all_cache_keys(all_exec, False, {})
# Source is not in executable nodes, but stg should still get a key
assert "model.test.stg" in keys
def test_unreadable_file_blocks_key_and_downstream(self, tmp_path):
"""Node with original_file_path but missing file gets no key, nor do dependents."""
# Write only leaf's file; root's file is declared but missing on disk.
write_sql_files(tmp_path, {"models/leaf.sql": "SELECT * FROM root"})
manifest_data = {
"nodes": {
"model.test.root": {
"name": "root",
"resource_type": "model",
"depends_on": {"nodes": []},
"config": {"materialized": "table"},
"original_file_path": "models/root.sql", # missing on disk
},
"model.test.leaf": {
"name": "leaf",
"resource_type": "model",
"depends_on": {"nodes": ["model.test.root"]},
"config": {"materialized": "table"},
"original_file_path": "models/leaf.sql",
},
},
"sources": {},
}
manifest_path = write_manifest(tmp_path, manifest_data)
parser = ManifestParser(manifest_path)
all_exec = parser.get_executable_nodes()
orch = _make_precompute_orch(tmp_path, manifest_path)
keys = orch._precompute_all_cache_keys(all_exec, False, {})
# root has no key because its file is unreadable
assert "model.test.root" not in keys
# leaf has no key because its upstream (root) has no key
assert "model.test.leaf" not in keys
def test_permission_denied_file_blocks_key(self, tmp_path):
"""Node whose source file exists but is unreadable gets no key."""
write_sql_files(tmp_path, {"models/root.sql": "SELECT 1"})
# Remove read permission
root_file = tmp_path / "models/root.sql"
root_file.chmod(0o000)
try:
manifest_data = {
"nodes": {
"model.test.root": {
"name": "root",
"resource_type": "model",
"depends_on": {"nodes": []},
"config": {"materialized": "table"},
"original_file_path": "models/root.sql",
},
},
"sources": {},
}
manifest_path = write_manifest(tmp_path, manifest_data)
parser = ManifestParser(manifest_path)
all_exec = parser.get_executable_nodes()
orch = _make_precompute_orch(tmp_path, manifest_path)
keys = orch._precompute_all_cache_keys(all_exec, False, {})
assert "model.test.root" not in keys
finally:
root_file.chmod(0o644)
def test_no_original_file_path_still_gets_key(self, tmp_path):
"""Node without original_file_path (e.g. ephemeral placeholder) still gets a key."""
manifest_data = {
"nodes": {
"model.test.m1": {
"name": "m1",
"resource_type": "model",
"depends_on": {"nodes": []},
"config": {"materialized": "table"},
# no original_file_path
},
},
"sources": {},
}
manifest_path = write_manifest(tmp_path, manifest_data)
parser = ManifestParser(manifest_path)
all_exec = parser.get_executable_nodes()
orch = _make_precompute_orch(tmp_path, manifest_path)
keys = orch._precompute_all_cache_keys(all_exec, False, {})
# No original_file_path means no file to read β that's fine,
# the key just won't incorporate file content.
assert "model.test.m1" in keys
def test_excluded_materialization_still_gets_precomputed_key(self, tmp_path):
"""Excluded nodes still get precomputed keys for downstream invalidation."""
write_sql_files(
tmp_path,
{
"models/inc.sql": "SELECT 1",
"models/downstream.sql": "SELECT * FROM inc",
},
)
manifest_data = {
"nodes": {
"model.test.inc": {
"name": "inc",
"resource_type": "model",
"depends_on": {"nodes": []},
"config": {"materialized": "incremental"},
"original_file_path": "models/inc.sql",
},
"model.test.downstream": {
"name": "downstream",
"resource_type": "model",
"depends_on": {"nodes": ["model.test.inc"]},
"config": {"materialized": "table"},
"original_file_path": "models/downstream.sql",
},
},
"sources": {},
}
manifest_path = write_manifest(tmp_path, manifest_data)
parser = ManifestParser(manifest_path)
all_exec = parser.get_executable_nodes()
orch = _make_precompute_orch(tmp_path, manifest_path)
keys = orch._precompute_all_cache_keys(all_exec, False, {})
# Both nodes get precomputed keys β exclusion only affects execution
assert "model.test.inc" in keys
assert "model.test.downstream" in keys
# =============================================================================
# TestCachingWithIsolatedSelection
# =============================================================================
class TestCachingWithIsolatedSelection:
"""Integration tests for caching when select= excludes upstream nodes.
This is the core bug fix: previously, selecting a downstream node
without its upstream dependencies (e.g. ``select="leaf"`` instead
of ``select="+leaf"``) silently disabled caching because upstream
cache keys were not available. With pre-computation, cache keys
for ALL executable nodes are computed upfront from manifest
metadata, so caching works regardless of the select= filter.
"""
CHAIN_WITH_FILES = {
"nodes": {
"model.test.root": {
"name": "root",
"resource_type": "model",
"depends_on": {"nodes": []},
"config": {"materialized": "table"},
"original_file_path": "models/root.sql",
},
"model.test.mid": {
"name": "mid",
"resource_type": "model",
"depends_on": {"nodes": ["model.test.root"]},
"config": {"materialized": "table"},
"original_file_path": "models/mid.sql",
},
"model.test.leaf": {
"name": "leaf",
"resource_type": "model",
"depends_on": {"nodes": ["model.test.mid"]},
"config": {"materialized": "table"},
"original_file_path": "models/leaf.sql",
},
},
"sources": {},
}
SQL_FILES = {
"models/root.sql": "SELECT 1",
"models/mid.sql": "SELECT * FROM root",
"models/leaf.sql": "SELECT * FROM mid",
}
def test_isolated_node_gets_cached_on_second_run(self, cache_orch):
"""Selecting only 'leaf' (no upstream) still enables caching."""
orch, executor, _ = cache_orch(self.CHAIN_WITH_FILES, self.SQL_FILES)
@flow
def run_selected_twice():
# Mock resolve_selection to return only the leaf node
with patch(
"prefect_dbt.core._orchestrator.resolve_selection",
return_value={"model.test.leaf"},
):
r1 = orch.run_build(select="leaf")
r2 = orch.run_build(select="leaf")
return r1, r2
r1, r2 = run_selected_twice()
# Only leaf should be in results (root and mid are not selected)
assert "model.test.leaf" in r1
assert "model.test.root" not in r1
assert "model.test.mid" not in r1
# First run executes, second run is a cache hit
assert r1["model.test.leaf"]["status"] == "success"
assert r2["model.test.leaf"]["status"] == "cached"
assert executor.execute_node.call_count == 1
def test_isolated_mid_node_gets_cached(self, cache_orch):
"""Selecting only 'mid' (upstream root not selected) still enables caching."""
orch, executor, _ = cache_orch(self.CHAIN_WITH_FILES, self.SQL_FILES)
@flow
def run_mid_twice():
with patch(
"prefect_dbt.core._orchestrator.resolve_selection",
return_value={"model.test.mid"},
):
r1 = orch.run_build(select="mid")
r2 = orch.run_build(select="mid")
return r1, r2
r1, r2 = run_mid_twice()
assert r1["model.test.mid"]["status"] == "success"
assert r2["model.test.mid"]["status"] == "cached"
assert executor.execute_node.call_count == 1
def test_upstream_file_change_invalidates_isolated_node(self, cache_orch):
"""Changing an unselected upstream's SQL file invalidates the selected node."""
orch, executor, project_dir = cache_orch(self.CHAIN_WITH_FILES, self.SQL_FILES)
@flow
def run_then_change_upstream():
with patch(
"prefect_dbt.core._orchestrator.resolve_selection",
return_value={"model.test.leaf"},
):
r1 = orch.run_build(select="leaf")
# Change root SQL β leaf's cache key should change because
# root's key cascades through mid to leaf via pre-computation
(project_dir / "models/root.sql").write_text("SELECT 2")
r2 = orch.run_build(select="leaf")
return r1, r2
r1, r2 = run_then_change_upstream()
assert r1["model.test.leaf"]["status"] == "success"
# Leaf re-executes because upstream root changed
assert r2["model.test.leaf"]["status"] == "success"
assert executor.execute_node.call_count == 2
def test_selective_run_does_not_poison_full_build_cache(self, cache_orch):
"""Selective run's cache entry must not be reused by a subsequent full build.
Scenario:
1. Full build β all nodes execute, cache populated.
2. root.sql changes.
3. ``select="leaf"`` β leaf re-executes (cache miss due to new
upstream key) against OLD root/mid warehouse tables.
4. Full build β root and mid re-execute with new data; leaf must
also re-execute because its prior result was computed against
stale upstream data.
Before this fix, step 4 would cache-hit on leaf using the
result from step 3 (computed against old warehouse data).
"""
orch, executor, project_dir = cache_orch(self.CHAIN_WITH_FILES, self.SQL_FILES)
@flow
def run_scenario():
# Step 1: full build β populate cache
r1 = orch.run_build()
# Step 2: change root SQL
(project_dir / "models/root.sql").write_text("SELECT 2")
# Step 3: selective run β only leaf
with patch(
"prefect_dbt.core._orchestrator.resolve_selection",
return_value={"model.test.leaf"},
):
r2 = orch.run_build(select="leaf")
# Step 4: full build β root/mid rebuild, leaf must NOT cache-hit
r3 = orch.run_build()
return r1, r2, r3
r1, r2, r3 = run_scenario()
# Step 1: all succeed
for nid in self.CHAIN_WITH_FILES["nodes"]:
assert r1[nid]["status"] == "success"
# Step 3: leaf re-executes (cache miss β upstream key changed)
assert r2["model.test.leaf"]["status"] == "success"
# Step 4: root/mid re-execute (new file content); leaf must also
# re-execute (NOT cached) because its step-3 result used stale data.
assert r3["model.test.root"]["status"] == "success"
assert r3["model.test.mid"]["status"] == "success"
assert r3["model.test.leaf"]["status"] == "success"
def test_upstream_only_rebuild_invalidates_selective_cache(self, cache_orch):
"""Rebuilding upstream between two selective runs invalidates downstream cache.
Scenario:
1. root.sql changes.
2. ``select=leaf`` β leaf executes against OLD root/mid tables.
3. ``select="root mid"`` β root and mid rebuild in the warehouse.
4. ``select=leaf`` β must NOT cache-hit from step 2 because the
upstream warehouse data changed.
This works because the execution state file tracks when each
node was last executed with its current file state. After
step 3, the state records root and mid as current, so step 4
uses an unsalted key (different from step 2's salted key).
"""
orch, executor, project_dir = cache_orch(self.CHAIN_WITH_FILES, self.SQL_FILES)
@flow
def run_scenario():
# Step 1: change root SQL
(project_dir / "models/root.sql").write_text("SELECT 2")
# Step 2: selective run β only leaf
with patch(
"prefect_dbt.core._orchestrator.resolve_selection",
return_value={"model.test.leaf"},
):
r1 = orch.run_build(select="leaf")
# Step 3: selective run β only root and mid (rebuild upstream)
with patch(
"prefect_dbt.core._orchestrator.resolve_selection",
return_value={"model.test.root", "model.test.mid"},
):
r2 = orch.run_build(select="root mid")
# Step 4: selective run β only leaf again
with patch(
"prefect_dbt.core._orchestrator.resolve_selection",
return_value={"model.test.leaf"},
):
r3 = orch.run_build(select="leaf")
return r1, r2, r3
r1, r2, r3 = run_scenario()
# Step 2: leaf executes (cache miss)
assert r1["model.test.leaf"]["status"] == "success"
# Step 3: root and mid execute
assert r2["model.test.root"]["status"] == "success"
assert r2["model.test.mid"]["status"] == "success"
# Step 4: leaf must re-execute (NOT cache-hit from step 2)
assert r3["model.test.leaf"]["status"] == "success"
def test_selective_execution_records_salted_key_in_state(self, cache_orch):
"""Execution state must record the actual (possibly salted) key.
Scenario (A -> B -> C -> D chain):
1. Run full build so all nodes have execution state.
2. Change A's SQL.
3. ``select=C`` β C executes with salted upstream keys because
A and B weren't re-executed after the file change.
4. ``select=D`` β D should NOT cache-hit because C ran against
stale upstream data. If the execution state incorrectly
recorded C's unsalted precomputed key (instead of the actual
salted key used in step 3), D would see the state as
"current" and incorrectly reuse a stale cached result.
"""
four_node_chain = {
"nodes": {
"model.test.a": {
"name": "a",
"resource_type": "model",
"depends_on": {"nodes": []},
"config": {"materialized": "table"},
"original_file_path": "models/a.sql",
},
"model.test.b": {
"name": "b",
"resource_type": "model",
"depends_on": {"nodes": ["model.test.a"]},
"config": {"materialized": "table"},
"original_file_path": "models/b.sql",
},
"model.test.c": {
"name": "c",
"resource_type": "model",
"depends_on": {"nodes": ["model.test.b"]},
"config": {"materialized": "table"},
"original_file_path": "models/c.sql",
},
"model.test.d": {
"name": "d",
"resource_type": "model",
"depends_on": {"nodes": ["model.test.c"]},
"config": {"materialized": "table"},
"original_file_path": "models/d.sql",
},
},
"sources": {},
}
sql_files = {
"models/a.sql": "SELECT 1",
"models/b.sql": "SELECT * FROM a",
"models/c.sql": "SELECT * FROM b",
"models/d.sql": "SELECT * FROM c",
}
orch, executor, project_dir = cache_orch(four_node_chain, sql_files)
@flow
def run_scenario():
# Step 1: full build β populates execution state for all nodes
r1 = orch.run_build()
# Step 2: change A's SQL
(project_dir / "models/a.sql").write_text("SELECT 2")
# Step 3: selective run β only C
with patch(
"prefect_dbt.core._orchestrator.resolve_selection",
return_value={"model.test.c"},
):
r2 = orch.run_build(select="c")
# Step 4: selective run β only D
with patch(
"prefect_dbt.core._orchestrator.resolve_selection",
return_value={"model.test.d"},
):
r3 = orch.run_build(select="d")
return r1, r2, r3
r1, r2, r3 = run_scenario()
# Step 1: all succeed
assert r1["model.test.d"]["status"] == "success"
# Step 3: C executes (salted upstream keys since A changed)
assert r2["model.test.c"]["status"] == "success"
# Step 4: D must re-execute, NOT cache-hit, because C was
# built against stale upstream data.
assert r3["model.test.d"]["status"] == "success"
def test_failed_node_key_removed_during_execution(self, cache_orch):
"""When a node fails, its key is removed so downstream caching is disabled."""
orch, executor, _ = cache_orch(
self.CHAIN_WITH_FILES,
self.SQL_FILES,
executor_kwargs={"fail_nodes": {"model.test.mid"}},
)
@flow
def run_with_failure():
with patch(
"prefect_dbt.core._orchestrator.resolve_selection",
return_value={"model.test.mid", "model.test.leaf"},
):
return orch.run_build(select="mid leaf")
results = run_with_failure()
assert results["model.test.mid"]["status"] == "error"
# leaf is skipped due to upstream failure
assert results["model.test.leaf"]["status"] == "skipped"
def test_writable_filesystem_execution_state(self, cache_orch, tmp_path):
"""Execution state persists through WritableFileSystem-backed storage."""
from prefect.filesystems import LocalFileSystem
fs_storage = LocalFileSystem(basepath=str(tmp_path / "fs_keys"))
(tmp_path / "fs_keys").mkdir()
orch, executor, project_dir = cache_orch(
self.CHAIN_WITH_FILES,
self.SQL_FILES,
cache_key_storage=fs_storage,
)
@flow
def run_scenario():
# Full build β populates execution state via WritableFileSystem
r1 = orch.run_build()
# Selective run β only leaf; upstream state should be loaded
with patch(
"prefect_dbt.core._orchestrator.resolve_selection",
return_value={"model.test.leaf"},
):
r2 = orch.run_build(select="leaf")
# Second selective run β should cache-hit
r3 = orch.run_build(select="leaf")
return r1, r2, r3
r1, r2, r3 = run_scenario()
assert r1["model.test.leaf"]["status"] == "success"
# After full build, execution state matches precomputed keys,
# so selective runs use unsalted keys and cache-hit from the
# full build result.
assert r2["model.test.leaf"]["status"] == "cached"
assert r3["model.test.leaf"]["status"] == "cached"
def test_failure_clears_execution_state(self, cache_orch):
"""A node that previously succeeded then fails has its state cleared.
If a node's execution state survives a failure, downstream
selective runs could treat it as "current" and reuse stale
cached results built against pre-failure warehouse data.
"""
orch, executor, project_dir = cache_orch(self.CHAIN_WITH_FILES, self.SQL_FILES)
@flow
def run_scenario():
# Step 1: full build β all succeed, execution state populated
r1 = orch.run_build()
# Step 2: change mid's SQL so the cache key changes,
# then make mid fail on re-execution.
(project_dir / "models/mid.sql").write_text("SELECT 999 FROM root")
from prefect_dbt.core._executor import ExecutionResult
def _fail_mid(node, command, **kwargs):
if node.unique_id == "model.test.mid":
return ExecutionResult(
success=False,
node_ids=[node.unique_id],
error=RuntimeError("mid failed"),
)
return ExecutionResult(success=True, node_ids=[node.unique_id])
executor.execute_node.side_effect = _fail_mid
r2 = orch.run_build()
return r1, r2
r1, r2 = run_scenario()
assert r1["model.test.mid"]["status"] == "success"
assert r2["model.test.mid"]["status"] == "error"
# Verify execution state was cleared for the failed node
state = orch._load_execution_state()
assert "model.test.mid" not in state
# root still succeeded in r2, so its state should remain
assert "model.test.root" in state
def test_no_key_storage_falls_back_to_result_storage(self, cache_orch, tmp_path):
"""Execution state persists via result_storage when key_storage is None.
By default Prefect co-locates cache metadata with results, so
execution state should fall back to result_storage when no explicit
key_storage is configured.
"""
result_dir = tmp_path / "fallback_results"
result_dir.mkdir()
orch, executor, project_dir = cache_orch(
self.CHAIN_WITH_FILES,
self.SQL_FILES,
result_storage=result_dir,
cache_key_storage=None,
)
@flow
def run_scenario():
r1 = orch.run_build()
with patch(
"prefect_dbt.core._orchestrator.resolve_selection",
return_value={"model.test.leaf"},
):
r2 = orch.run_build(select="leaf")
r3 = orch.run_build(select="leaf")
return r1, r2, r3
r1, r2, r3 = run_scenario()
assert r1["model.test.leaf"]["status"] == "success"
# Execution state was persisted to result_storage, so the
# selective runs should still benefit from the guard.
assert r2["model.test.leaf"]["status"] == "cached"
assert r3["model.test.leaf"]["status"] == "cached"
# Execution state file should live in result_storage
assert (result_dir / ".execution_state.json").exists()
def test_block_slug_execution_state(self, cache_orch, tmp_path):
"""Execution state persists through block-slug key_storage.
When key_storage is a string like "local-file-system/my-block",
Prefect resolves it as a block slug rather than a filesystem path.
The execution state methods must resolve it the same way instead of
treating the string as a local directory path.
"""
from prefect.filesystems import LocalFileSystem
fs_storage = LocalFileSystem(basepath=str(tmp_path / "slug_keys"))
(tmp_path / "slug_keys").mkdir()
# Simulate a block-slug string by patching _resolve_storage to return
# the block. We can't use a real block slug without a running server,
# so we verify the resolution logic directly.
orch, executor, project_dir = cache_orch(
self.CHAIN_WITH_FILES,
self.SQL_FILES,
cache_key_storage="local-file-system/my-keys",
)
# Patch resolve_result_storage so the slug resolves to our local fs
with patch(
"prefect.results.resolve_result_storage",
return_value=fs_storage,
) as mock_resolve:
@flow
def run_scenario():
r1 = orch.run_build()
with patch(
"prefect_dbt.core._orchestrator.resolve_selection",
return_value={"model.test.leaf"},
):
r2 = orch.run_build(select="leaf")
r3 = orch.run_build(select="leaf")
return r1, r2, r3
r1, r2, r3 = run_scenario()
assert r1["model.test.leaf"]["status"] == "success"
assert r2["model.test.leaf"]["status"] == "cached"
assert r3["model.test.leaf"]["status"] == "cached"
# Verify the block slug was resolved via resolve_result_storage
mock_resolve.assert_called()
INCREMENTAL_CHAIN = {
"nodes": {
"model.test.root": {
"name": "root",
"resource_type": "model",
"depends_on": {"nodes": []},
"config": {"materialized": "table"},
"original_file_path": "models/root.sql",
},
"model.test.inc": {
"name": "inc",
"resource_type": "model",
"depends_on": {"nodes": ["model.test.root"]},
"config": {"materialized": "incremental"},
"original_file_path": "models/inc.sql",
},
"model.test.downstream": {
"name": "downstream",
"resource_type": "model",
"depends_on": {"nodes": ["model.test.inc"]},
"config": {"materialized": "table"},
"original_file_path": "models/downstream.sql",
},
},
"sources": {},
}
INCREMENTAL_CHAIN_SQL = {
"models/root.sql": "SELECT 1 AS id",
"models/inc.sql": "SELECT * FROM root WHERE updated_at > '2024-01-01'",
"models/downstream.sql": "SELECT * FROM inc",
}
class TestCacheExcludeMaterializations:
"""Tests for materialization-based cache exclusion."""
def test_incremental_not_cached_on_second_run(self, cache_orch):
"""Incremental model re-executes every run by default."""
orch, executor, _ = cache_orch(INCREMENTAL_CHAIN, INCREMENTAL_CHAIN_SQL)
@flow
def run_twice():
r1 = orch.run_build()
r2 = orch.run_build()
return r1, r2
r1, r2 = run_twice()
assert r1["model.test.inc"]["status"] == "success"
# Incremental model must NOT be cached β it re-executes
assert r2["model.test.inc"]["status"] == "success"
def test_table_model_still_cached(self, cache_orch):
"""Table models are still cached alongside excluded incrementals."""
orch, executor, _ = cache_orch(INCREMENTAL_CHAIN, INCREMENTAL_CHAIN_SQL)
@flow
def run_twice():
r1 = orch.run_build()
r2 = orch.run_build()
return r1, r2
r1, r2 = run_twice()
assert r1["model.test.root"]["status"] == "success"
assert r2["model.test.root"]["status"] == "cached"
def test_downstream_of_incremental_still_cached(self, cache_orch):
"""Downstream table benefits from caching even with excluded upstream.
The incremental's precomputed cache key still flows into the
downstream node's upstream_cache_keys, so if SQL is unchanged
the downstream cache key matches.
"""
orch, executor, _ = cache_orch(INCREMENTAL_CHAIN, INCREMENTAL_CHAIN_SQL)
@flow
def run_twice():
r1 = orch.run_build()
r2 = orch.run_build()
return r1, r2
r1, r2 = run_twice()
assert r1["model.test.downstream"]["status"] == "success"
assert r2["model.test.downstream"]["status"] == "cached"
def test_incremental_sql_change_invalidates_downstream(self, cache_orch):
"""Changing incremental SQL invalidates downstream cache."""
orch, executor, project_dir = cache_orch(
INCREMENTAL_CHAIN, INCREMENTAL_CHAIN_SQL
)
@flow
def run_then_change():
r1 = orch.run_build()
(project_dir / "models" / "inc.sql").write_text(
"SELECT *, 'new' AS col FROM root"
)
r2 = orch.run_build()
return r1, r2
r1, r2 = run_then_change()
assert r1["model.test.downstream"]["status"] == "success"
# Downstream must re-execute because upstream incremental SQL changed
assert r2["model.test.downstream"]["status"] == "success"
def test_incremental_cached_when_exclusion_overridden(self, cache_orch, tmp_path):
"""Empty exclude_materializations caches incrementals normally."""
result_dir = tmp_path / "override_results"
result_dir.mkdir()
key_dir = tmp_path / "override_keys"
key_dir.mkdir()
orch, executor, _ = cache_orch(
INCREMENTAL_CHAIN,
INCREMENTAL_CHAIN_SQL,
cache=CacheConfig(
exclude_materializations=frozenset(),
result_storage=result_dir,
key_storage=str(key_dir),
),
)
@flow
def run_twice():
r1 = orch.run_build()
r2 = orch.run_build()
return r1, r2
r1, r2 = run_twice()
assert r1["model.test.inc"]["status"] == "success"
# With empty exclusion set, incremental IS cached
assert r2["model.test.inc"]["status"] == "cached"
class TestIsBlockSlug:
"""Unit tests for _is_block_slug detection."""
def test_simple_slug(self):
assert PrefectDbtOrchestrator._is_block_slug("local-file-system/my-block")
def test_path_not_slug(self):
assert not PrefectDbtOrchestrator._is_block_slug("/tmp/keys")
def test_nested_path_not_slug(self):
assert not PrefectDbtOrchestrator._is_block_slug("/tmp/a/b/c")
def test_relative_path_not_slug(self):
assert not PrefectDbtOrchestrator._is_block_slug("relative_dir")
def test_deeper_slash_path(self):
assert not PrefectDbtOrchestrator._is_block_slug("a/b/c")
class TestCacheConfig:
def test_defaults(self):
"""CacheConfig() has sensible defaults."""
cfg = CacheConfig()
assert cfg.expiration is None
assert cfg.result_storage is None
assert cfg.key_storage is None
assert cfg.use_source_freshness_expiration is False
assert cfg.exclude_materializations == frozenset({"incremental"})
assert NodeType.Test in cfg.exclude_resource_types
assert NodeType.Snapshot in cfg.exclude_resource_types
def test_custom_exclude_materializations(self):
"""Users can override excluded materializations."""
cfg = CacheConfig(
exclude_materializations=frozenset({"incremental", "snapshot"})
)
assert cfg.exclude_materializations == frozenset({"incremental", "snapshot"})
def test_empty_exclusions_cache_everything(self):
"""Empty sets mean nothing is excluded."""
cfg = CacheConfig(
exclude_materializations=frozenset(),
exclude_resource_types=frozenset(),
)
assert cfg.exclude_materializations == frozenset()
assert cfg.exclude_resource_types == frozenset()
def test_frozen(self):
"""CacheConfig is immutable."""
cfg = CacheConfig()
with pytest.raises(AttributeError):
cfg.expiration = timedelta(hours=1)
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/integrations/prefect-dbt/tests/core/test_orchestrator_cache.py",
"license": "Apache License 2.0",
"lines": 1552,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:tests/deployment/test_yamllint.py | import shutil
from pathlib import Path
import pytest
from yamllint import linter
from yamllint.config import YamlLintConfig
import prefect
from prefect.deployments.base import (
configure_project_by_recipe,
create_default_prefect_yaml,
initialize_project,
)
from prefect.utilities.filesystem import tmpchdir
TEST_PROJECTS_DIR = prefect.__development_base_path__ / "tests" / "test-projects"
RECIPES_DIR = (
prefect.__development_base_path__ / "src" / "prefect" / "deployments" / "recipes"
)
TEMPLATES_DIR = (
prefect.__development_base_path__ / "src" / "prefect" / "deployments" / "templates"
)
# yamllint configuration for static template and recipe files: extends default
# rules with relaxed indentation to accept both indented and non-indented
# sequence styles, since Python's yaml.dump() does not indent list items by
# default.
YAMLLINT_CONFIG = YamlLintConfig(
"""\
extends: default
rules:
indentation:
indent-sequences: whatever
"""
)
# yamllint configuration for generated files: same as above but with
# line-length disabled, because generated files may contain user-provided
# values (e.g. directory paths) that can exceed 80 characters.
YAMLLINT_GENERATED_CONFIG = YamlLintConfig(
"""\
extends: default
rules:
indentation:
indent-sequences: whatever
line-length: disable
"""
)
def _assert_yamllint_passes(
content: str,
filepath: str = "<string>",
config: YamlLintConfig = YAMLLINT_CONFIG,
) -> None:
"""Assert that the given YAML content passes yamllint with no errors
or warnings."""
problems = list(linter.run(content, config))
if problems:
details = "\n".join(
f" line {p.line}: [{p.level}] {p.message} ({p.rule})" for p in problems
)
pytest.fail(f"yamllint found issues in {filepath}:\n{details}")
@pytest.fixture(autouse=True)
def project_dir(tmp_path):
with tmpchdir(tmp_path):
shutil.copytree(TEST_PROJECTS_DIR, tmp_path, dirs_exist_ok=True)
(tmp_path / ".prefect").mkdir(exist_ok=True, mode=0o0700)
yield tmp_path
class TestStaticYamlFilesPassYamllint:
"""Validate that all static YAML files (recipes and templates) conform
to yamllint standards."""
@pytest.mark.parametrize(
"recipe",
[d.name for d in sorted(RECIPES_DIR.iterdir()) if d.is_dir()],
)
def test_recipe_yaml_passes_yamllint(self, recipe):
yaml_path = RECIPES_DIR / recipe / "prefect.yaml"
content = yaml_path.read_text()
_assert_yamllint_passes(content, filepath=str(yaml_path))
@pytest.mark.parametrize(
"template",
[f.name for f in sorted(TEMPLATES_DIR.glob("*.yaml"))],
)
def test_template_yaml_passes_yamllint(self, template):
yaml_path = TEMPLATES_DIR / template
content = yaml_path.read_text()
_assert_yamllint_passes(content, filepath=str(yaml_path))
class TestGeneratedYamlPassesYamllint:
"""Validate that generated prefect.yaml files conform to yamllint
standards.
Uses a relaxed line-length config because generated files contain
user-provided values (e.g. directory paths from the current working
directory) that may exceed 80 characters depending on the environment.
"""
def test_default_generated_yaml_passes_yamllint(self):
contents = configure_project_by_recipe(
"local", directory="/tmp/test", name="test"
)
create_default_prefect_yaml(".", name="test-project", contents=contents)
generated = Path("prefect.yaml").read_text()
_assert_yamllint_passes(
generated,
filepath="generated prefect.yaml (local)",
config=YAMLLINT_GENERATED_CONFIG,
)
@pytest.mark.parametrize(
"recipe",
[d.name for d in sorted(RECIPES_DIR.iterdir()) if d.is_dir()],
)
def test_initialized_project_yaml_passes_yamllint(self, recipe):
files = initialize_project(name="test-project", recipe=recipe)
if "prefect.yaml" in files:
generated = Path("prefect.yaml").read_text()
_assert_yamllint_passes(
generated,
filepath=f"generated prefect.yaml ({recipe})",
config=YAMLLINT_GENERATED_CONFIG,
)
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "tests/deployment/test_yamllint.py",
"license": "Apache License 2.0",
"lines": 112,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:src/integrations/prefect-kubernetes/integration_tests/src/prefect_kubernetes_integration_tests/test_suspend.py | import asyncio
import os
import subprocess
from typing import Any
import pytest
from prefect import get_client
from prefect.states import StateType, Suspended
from prefect_kubernetes_integration_tests.utils import display, k8s, prefect_core
DEFAULT_JOB_VARIABLES: dict[str, Any] = {
"image": "prefecthq/prefect:3.2.11-python3.12",
}
if os.environ.get("CI", False):
DEFAULT_JOB_VARIABLES["env"] = {"PREFECT_API_URL": "http://172.17.0.1:4200/api"}
DEFAULT_FLOW_SOURCE = "https://gist.github.com/772d095672484b76da40a4e6158187f0.git"
DEFAULT_FLOW_ENTRYPOINT = "sleeping.py:sleepy"
@pytest.mark.usefixtures("kind_cluster")
async def test_suspended_flow_not_crashed_by_observer(
work_pool_name: str,
):
"""Test that suspended flow runs are not marked as crashed when their K8s job fails.
This verifies the observer correctly skips paused/suspended flow runs
instead of incorrectly marking them as crashed.
"""
flow_run = await prefect_core.create_flow_run(
source=DEFAULT_FLOW_SOURCE,
entrypoint=DEFAULT_FLOW_ENTRYPOINT,
name="suspend-no-crash",
work_pool_name=work_pool_name,
job_variables=DEFAULT_JOB_VARIABLES | {"backoff_limit": 1},
parameters={"n": 300},
flow_run_name="suspend-no-crash-test",
)
display.print_flow_run_created(flow_run)
with subprocess.Popen(
["prefect", "worker", "start", "--pool", work_pool_name],
) as worker_process:
try:
prefect_core.wait_for_flow_run_state(
flow_run.id, StateType.RUNNING, timeout=120
)
async with get_client() as client:
await client.set_flow_run_state(
flow_run.id, Suspended(timeout_seconds=300), force=True
)
state_type, _ = prefect_core.get_flow_run_state(flow_run.id)
assert state_type == StateType.PAUSED, (
f"Expected flow run to be PAUSED after suspend, got {state_type}"
)
job = k8s.get_job_for_flow_run(flow_run.name, timeout=30)
while job.status and job.status.completion_time is None:
try:
pod_name = k8s.wait_for_pod(job.metadata.name, timeout=15)
except TimeoutError:
break
k8s.evict_pod(pod_name)
await asyncio.sleep(1)
await asyncio.sleep(15)
state_type, message = prefect_core.get_flow_run_state(flow_run.id)
assert state_type == StateType.PAUSED, (
f"Expected flow run to remain PAUSED after K8s job failure, "
f"but got {state_type}: {message}"
)
async with get_client() as client:
await client.resume_flow_run(flow_run.id)
prefect_core.wait_for_flow_run_state(
flow_run.id, StateType.SCHEDULED, timeout=30
)
finally:
worker_process.terminate()
async with get_client() as client:
updated_flow_run = await client.read_flow_run(flow_run.id)
display.print_flow_run_result(updated_flow_run)
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/integrations/prefect-kubernetes/integration_tests/src/prefect_kubernetes_integration_tests/test_suspend.py",
"license": "Apache License 2.0",
"lines": 72,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:ui-v2/e2e/helpers/run_flows.py | import argparse
import asyncio
import json
import sys
from uuid import uuid4
from prefect import flow, task
from prefect.client.orchestration import get_client
from prefect.client.schemas.filters import (
FlowRunFilter,
FlowRunFilterId,
FlowRunFilterParentTaskRunId,
)
async def get_task_runs_for_flow(flow_run_id, expected_count=1):
"""Query task runs for a flow, retrying until they are persisted.
After a flow completes, task run data may not be immediately visible
in the API due to async persistence. This polls until the expected
number of task runs appear.
"""
async with get_client() as client:
for _ in range(20):
task_runs = await client.read_task_runs(
flow_run_filter=FlowRunFilter(id=FlowRunFilterId(any_=[flow_run_id]))
)
if len(task_runs) >= expected_count:
return task_runs
await asyncio.sleep(0.5)
return task_runs
def run_simple_task(prefix):
suffix = uuid4().hex[:8]
@task(task_run_name=f"{prefix}task-{suffix}")
def simple_task():
return "done"
@flow(flow_run_name=f"{prefix}flow-{suffix}")
def simple_flow():
simple_task()
state = simple_flow(return_state=True)
flow_run_id = state.state_details.flow_run_id
task_runs = asyncio.run(get_task_runs_for_flow(flow_run_id))
return {
"flow_run_id": str(flow_run_id),
"flow_run_name": f"{prefix}flow-{suffix}",
"task_run_ids": [str(tr.id) for tr in task_runs],
"task_run_names": [tr.name for tr in task_runs],
}
def run_parent_child(prefix):
suffix = uuid4().hex[:8]
@flow(flow_run_name=f"{prefix}child-flow-{suffix}")
def child_flow():
return "child done"
@task(task_run_name=f"{prefix}trigger-task-{suffix}")
def trigger_task():
return child_flow()
@flow(flow_run_name=f"{prefix}parent-flow-{suffix}")
def parent_flow():
trigger_task()
state = parent_flow(return_state=True)
parent_flow_run_id = state.state_details.flow_run_id
async def get_data():
async with get_client() as client:
# Wait for task runs to be persisted before querying child flows
task_runs = []
for _ in range(20):
task_runs = await client.read_task_runs(
flow_run_filter=FlowRunFilter(
id=FlowRunFilterId(any_=[parent_flow_run_id])
)
)
if task_runs:
break
await asyncio.sleep(0.5)
flow_runs = (
await client.read_flow_runs(
flow_run_filter=FlowRunFilter(
parent_task_run_id=FlowRunFilterParentTaskRunId(
any_=[tr.id for tr in task_runs]
)
)
)
if task_runs
else []
)
return task_runs, flow_runs
task_runs, child_flow_runs = asyncio.run(get_data())
child_flow_run = child_flow_runs[0] if child_flow_runs else None
return {
"parent_flow_run_id": str(parent_flow_run_id),
"parent_flow_run_name": f"{prefix}parent-flow-{suffix}",
"child_flow_run_id": str(child_flow_run.id) if child_flow_run else None,
"child_flow_run_name": (child_flow_run.name if child_flow_run else None),
"task_run_ids": [str(tr.id) for tr in task_runs],
"task_run_names": [tr.name for tr in task_runs],
}
def run_flow_with_tasks(prefix):
suffix = uuid4().hex[:8]
@task(task_run_name=f"{prefix}task-a-{suffix}")
def task_a():
return "a"
@task(task_run_name=f"{prefix}task-b-{suffix}")
def task_b():
return "b"
@flow(flow_run_name=f"{prefix}graph-flow-{suffix}")
def graph_flow():
task_a()
task_b()
state = graph_flow(return_state=True)
flow_run_id = state.state_details.flow_run_id
task_runs = asyncio.run(get_task_runs_for_flow(flow_run_id, expected_count=2))
return {
"flow_run_id": str(flow_run_id),
"flow_run_name": f"{prefix}graph-flow-{suffix}",
"task_run_ids": [str(tr.id) for tr in task_runs],
"task_run_names": [tr.name for tr in task_runs],
}
SCENARIOS = {
"simple-task": run_simple_task,
"parent-child": run_parent_child,
"flow-with-tasks": run_flow_with_tasks,
}
def main():
parser = argparse.ArgumentParser(
description="Run Prefect flow scenarios for E2E tests"
)
parser.add_argument(
"--scenario",
required=True,
choices=SCENARIOS.keys(),
help="Scenario to run",
)
parser.add_argument(
"--prefix",
required=True,
help="Prefix for run names (for test isolation)",
)
args = parser.parse_args()
scenario_fn = SCENARIOS[args.scenario]
try:
result = scenario_fn(args.prefix)
except Exception as e:
print(f"Error running scenario '{args.scenario}': {e}", file=sys.stderr)
sys.exit(1)
print(json.dumps(result))
if __name__ == "__main__":
main()
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "ui-v2/e2e/helpers/run_flows.py",
"license": "Apache License 2.0",
"lines": 143,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/prefect:src/prefect/cli/_server_utils.py | """Shared server utilities used by both typer and cyclopts CLI implementations."""
from __future__ import annotations
import ipaddress
import os
import shlex
import subprocess
import sys
import textwrap
from datetime import timedelta
from pathlib import Path
from typing import TYPE_CHECKING, Callable
import uvicorn
import prefect
import prefect.settings
from prefect.cli._cloud_utils import prompt_select_from_list
from prefect.cli._prompts import prompt
from prefect.logging import get_logger
from prefect.settings import (
PREFECT_API_URL,
PREFECT_HOME,
PREFECT_SERVER_API_BASE_PATH,
Profile,
get_current_settings,
load_current_profile,
load_profiles,
save_profiles,
update_current_profile,
)
from prefect.settings.context import temporary_settings
if TYPE_CHECKING:
from rich.console import Console
logger = get_logger(__name__)
SERVER_PID_FILE_NAME = "server.pid"
SERVICES_PID_FILE = Path(PREFECT_HOME.value()) / "services.pid"
def _format_host_for_url(host: str) -> str:
"""Format a host for use in a URL, adding brackets for IPv6 addresses."""
try:
ip = ipaddress.ip_address(host)
if ip.version == 6:
return f"[{host}]"
except ValueError:
pass # not an IP address (e.g., hostname)
return host
def generate_welcome_blurb(base_url: str, ui_enabled: bool) -> str:
if PREFECT_SERVER_API_BASE_PATH:
suffix = PREFECT_SERVER_API_BASE_PATH.value()
else:
suffix = "/api"
blurb = textwrap.dedent(
r"""
___ ___ ___ ___ ___ ___ _____
| _ \ _ \ __| __| __/ __|_ _|
| _/ / _|| _|| _| (__ | |
|_| |_|_\___|_| |___\___| |_|
Configure Prefect to communicate with the server with:
prefect config set PREFECT_API_URL={api_url}
View the API reference documentation at {docs_url}
"""
).format(api_url=base_url + suffix, docs_url=base_url + "/docs")
visit_dashboard = textwrap.dedent(
f"""
Check out the dashboard at {base_url}
"""
)
dashboard_not_built = textwrap.dedent(
"""
The dashboard is not built. It looks like you're on a development version.
See `prefect dev` for development commands.
"""
)
dashboard_disabled = textwrap.dedent(
"""
The dashboard is disabled. Set `PREFECT_UI_ENABLED=1` to re-enable it.
"""
)
if not os.path.exists(prefect.__ui_static_path__):
blurb += dashboard_not_built
elif not ui_enabled:
blurb += dashboard_disabled
else:
blurb += visit_dashboard
return blurb
def prestart_check(console: "Console", base_url: str) -> None:
"""Check if PREFECT_API_URL is set in the current profile. If not, prompt the user to set it.
Args:
console: Rich console for output
base_url: The base URL the server will be running on
"""
api_url = f"{base_url}/api"
current_profile = load_current_profile()
profiles = load_profiles()
if current_profile and PREFECT_API_URL not in current_profile.settings:
profiles_with_matching_url = [
name
for name, profile in profiles.items()
if profile.settings.get(PREFECT_API_URL) == api_url
]
if len(profiles_with_matching_url) == 1:
profiles.set_active(profiles_with_matching_url[0])
save_profiles(profiles)
console.print(
f"Switched to profile {profiles_with_matching_url[0]!r}",
style="green",
)
return
elif len(profiles_with_matching_url) > 1:
console.print(
"Your current profile doesn't have `PREFECT_API_URL` set to the address"
" of the server that's running. Some of your other profiles do."
)
selected_profile = prompt_select_from_list(
console,
"Which profile would you like to switch to?",
sorted(
[profile for profile in profiles_with_matching_url],
),
)
profiles.set_active(selected_profile)
save_profiles(profiles)
console.print(f"Switched to profile {selected_profile!r}", style="green")
return
console.print(
"The `PREFECT_API_URL` setting for your current profile doesn't match the"
" address of the server that's running. You need to set it to communicate"
" with the server.",
style="yellow",
)
choice = prompt_select_from_list(
console,
"How would you like to proceed?",
[
(
"create",
"Create a new profile with `PREFECT_API_URL` set and switch to it",
),
(
"set",
f"Set `PREFECT_API_URL` in the current profile: {current_profile.name!r}",
),
],
)
if choice == "create":
while True:
profile_name = prompt("Enter a new profile name")
if profile_name in profiles:
console.print(
f"Profile {profile_name!r} already exists. Please choose a different name.",
style="red",
)
else:
break
profiles.add_profile(
Profile(
name=profile_name, settings={PREFECT_API_URL: f"{base_url}/api"}
)
)
profiles.set_active(profile_name)
save_profiles(profiles)
console.print(f"Switched to new profile {profile_name!r}", style="green")
elif choice == "set":
api_url = prompt(
"Enter the `PREFECT_API_URL` value", default="http://127.0.0.1:4200/api"
)
update_current_profile({PREFECT_API_URL: api_url})
console.print(
f"Set `PREFECT_API_URL` to {api_url!r} in the current profile {current_profile.name!r}",
style="green",
)
def _validate_multi_worker(workers: int, exit_fn: Callable[[str], object]) -> None:
"""Validate configuration for multi-worker mode.
Args:
workers: Number of worker processes.
exit_fn: Called with an error message when validation fails (e.g.
``exit_with_error``).
"""
from prefect.server.utilities.database import get_dialect
if workers == 1:
return
if workers < 1:
exit_fn("Number of workers must be >= 1")
settings = get_current_settings()
try:
dialect = get_dialect(
settings.server.database.connection_url.get_secret_value()
)
except Exception as e:
exit_fn(f"Unable to validate database configuration: {e}")
if dialect.name != "postgresql":
exit_fn(
"Multi-worker mode (--workers > 1) is not supported with SQLite database."
)
try:
messaging_cache = settings.server.events.messaging_cache
messaging_broker = settings.server.events.messaging_broker
causal_ordering = settings.server.events.causal_ordering
lease_storage = settings.server.concurrency.lease_storage
except Exception as e:
exit_fn(f"Unable to validate messaging configuration: {e}")
if (
messaging_cache == "prefect.server.utilities.messaging.memory"
or messaging_broker == "prefect.server.utilities.messaging.memory"
or causal_ordering == "prefect.server.events.ordering.memory"
or lease_storage == "prefect.server.concurrency.lease_storage.memory"
):
error_message = textwrap.dedent(
"""
Multi-worker mode (--workers > 1) requires Redis for messaging and lease storage.
Please configure the following settings to use Redis:
prefect config set PREFECT_MESSAGING_BROKER="prefect_redis.messaging"
prefect config set PREFECT_MESSAGING_CACHE="prefect_redis.messaging"
prefect config set PREFECT_SERVER_EVENTS_CAUSAL_ORDERING="prefect_redis.ordering"
prefect config set PREFECT_SERVER_CONCURRENCY_LEASE_STORAGE="prefect_redis.lease_storage"
You'll also need to configure your Redis connection:
export PREFECT_REDIS_MESSAGING_HOST="your-redis-host"
export PREFECT_REDIS_MESSAGING_PORT="6379"
export PREFECT_REDIS_MESSAGING_DB="0"
For complete setup instructions, see:
https://docs.prefect.io/v3/how-to-guides/self-hosted/server-cli#multi-worker-api-server
https://docs.prefect.io/v3/advanced/self-hosted#redis-setup
"""
).strip()
exit_fn(error_message)
def _run_in_background(
console: "Console",
pid_file: Path,
server_settings: dict[str, str],
host: str,
port: int,
keep_alive_timeout: int,
no_services: bool,
workers: int,
) -> None:
command = [
sys.executable,
"-m",
"uvicorn",
"--app-dir",
str(prefect.__module_path__.parent),
"--factory",
"prefect.server.api.server:create_app",
"--host",
str(host),
"--port",
str(port),
"--timeout-keep-alive",
str(keep_alive_timeout),
"--workers",
str(workers),
]
logger.debug("Opening server process with command: %s", shlex.join(command))
env = {**os.environ, **server_settings, "PREFECT__SERVER_FINAL": "1"}
if no_services:
env["PREFECT__SERVER_WEBSERVER_ONLY"] = "1"
process = subprocess.Popen(
command,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
process_id = process.pid
pid_file.write_text(str(process_id))
console.print(
"The Prefect server is running in the background. Run `prefect"
" server stop` to stop it."
)
def _run_in_foreground(
console: "Console",
server_settings: dict[str, str],
host: str,
port: int,
keep_alive_timeout: int,
no_services: bool,
workers: int,
) -> None:
from prefect.server.api.server import create_app
try:
with temporary_settings(
{getattr(prefect.settings, k): v for k, v in server_settings.items()}
):
if workers == 1:
uvicorn.run(
app=create_app(final=True, webserver_only=no_services),
app_dir=str(prefect.__module_path__.parent),
host=host,
port=port,
timeout_keep_alive=keep_alive_timeout,
log_level=server_settings.get(
"PREFECT_SERVER_LOGGING_LEVEL", "info"
).lower(),
)
else:
os.environ["PREFECT__SERVER_FINAL"] = "1"
os.environ["PREFECT__SERVER_WEBSERVER_ONLY"] = "1"
uvicorn.run(
app="prefect.server.api.server:create_app",
factory=True,
host=host,
port=port,
timeout_keep_alive=keep_alive_timeout,
log_level=server_settings.get(
"PREFECT_SERVER_LOGGING_LEVEL", "info"
).lower(),
workers=workers,
)
finally:
console.print("Server stopped!")
def _is_process_running(pid: int) -> bool:
"""Check if a process is running by attempting to send signal 0."""
try:
os.kill(pid, 0)
return True
except (ProcessLookupError, OSError):
return False
def _read_pid_file(path: Path) -> int | None:
"""Read and validate a PID from a file."""
try:
return int(path.read_text())
except (ValueError, OSError, FileNotFoundError):
return None
def _write_pid_file(path: Path, pid: int) -> None:
"""Write a PID to a file, creating parent directories if needed."""
path.parent.mkdir(parents=True, exist_ok=True)
path.write_text(str(pid))
def _cleanup_pid_file(path: Path) -> None:
"""Remove PID file and try to cleanup empty parent directory."""
path.unlink(missing_ok=True)
try:
path.parent.rmdir()
except OSError:
pass
async def _run_all_services() -> None:
"""Run Service-based services and docket-based perpetual services."""
from docket import Docket
from prefect.server.api.background_workers import background_worker
from prefect.server.services.base import Service
from prefect.settings.context import get_current_settings
docket_url = get_current_settings().server.docket.url
async with Docket(
name="prefect", url=docket_url, execution_ttl=timedelta(0)
) as docket:
async with background_worker(docket, ephemeral=False, webserver_only=False):
# Run Service-based services (will block until shutdown)
await Service.run_services()
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/prefect/cli/_server_utils.py",
"license": "Apache License 2.0",
"lines": 343,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/prefect:src/prefect/cli/_worker_utils.py | """Shared worker utilities used by both typer and cyclopts CLI implementations."""
from __future__ import annotations
from typing import TYPE_CHECKING, Optional, Type
from prefect._internal.integrations import KNOWN_EXTRAS_FOR_PACKAGES
from prefect.client.collections import get_collections_metadata_client
from prefect.client.orchestration import get_client
from prefect.client.schemas.filters import WorkQueueFilter, WorkQueueFilterName
from prefect.exceptions import ObjectNotFound
from prefect.plugins import load_prefect_collections
from prefect.utilities.dispatch import lookup_type
from prefect.workers.base import BaseWorker
if TYPE_CHECKING:
from rich.console import Console
async def _check_work_pool_paused(work_pool_name: str) -> bool:
try:
async with get_client() as client:
work_pool = await client.read_work_pool(work_pool_name=work_pool_name)
return work_pool.is_paused
except ObjectNotFound:
return False
async def _check_work_queues_paused(
work_pool_name: str, work_queues: Optional[list[str]]
) -> bool:
"""Check if all work queues in the work pool are paused.
Args:
work_pool_name: the name of the work pool to check
work_queues: the names of the work queues to check
"""
try:
work_queues_filter = (
WorkQueueFilter(name=WorkQueueFilterName(any_=work_queues))
if work_queues
else None
)
async with get_client() as client:
wqs = await client.read_work_queues(
work_pool_name=work_pool_name, work_queue_filter=work_queues_filter
)
return all(queue.is_paused for queue in wqs) if wqs else False
except ObjectNotFound:
return False
async def _retrieve_worker_type_from_pool(
console: "Console",
exit_fn: object,
work_pool_name: Optional[str] = None,
) -> str:
"""Discover the worker type for a work pool.
Args:
console: Rich console for output
exit_fn: Called with an error message when the pool is push/managed
work_pool_name: Name of the work pool
"""
try:
async with get_client() as client:
work_pool = await client.read_work_pool(work_pool_name=work_pool_name)
worker_type = work_pool.type
console.print(
f"Discovered type {worker_type!r} for work pool {work_pool.name!r}."
)
if work_pool.is_push_pool or work_pool.is_managed_pool:
exit_fn(
"Workers are not required for push work pools. "
"See https://docs.prefect.io/latest/deploy/infrastructure-examples/serverless "
"for more details."
)
except ObjectNotFound:
console.print(
(
f"Work pool {work_pool_name!r} does not exist and no worker type was"
" provided. Starting a process worker..."
),
style="yellow",
)
worker_type = "process"
return worker_type
def _load_worker_class(worker_type: str) -> Optional[Type[BaseWorker]]:
try:
load_prefect_collections()
return lookup_type(BaseWorker, worker_type)
except KeyError:
return None
async def _install_package(
console: "Console",
package: str,
upgrade: bool = False,
) -> None:
from prefect._internal.installation import ainstall_packages
console.print(f"Installing {package}...")
install_package = KNOWN_EXTRAS_FOR_PACKAGES.get(package, package)
await ainstall_packages([install_package], stream_output=True, upgrade=upgrade)
async def _find_package_for_worker_type(
console: "Console",
worker_type: str,
) -> Optional[str]:
async with get_collections_metadata_client() as client:
worker_metadata = await client.read_worker_metadata()
worker_types_with_packages = {
wt: package_name
for package_name, worker_dict in worker_metadata.items()
for wt in worker_dict
if wt != "prefect-agent"
}
try:
return worker_types_with_packages[worker_type]
except KeyError:
console.print(
f"Could not find a package for worker type {worker_type!r}.",
style="yellow",
)
return None
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/prefect/cli/_worker_utils.py",
"license": "Apache License 2.0",
"lines": 110,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/prefect:src/integrations/prefect-dbt/tests/core/test_orchestrator_per_node.py | """Tests for PrefectDbtOrchestrator PER_NODE mode."""
import pickle
from typing import Any
from unittest.mock import MagicMock, patch
import pytest
from conftest import (
_make_mock_executor,
_make_mock_executor_per_node,
_make_mock_settings,
write_manifest,
)
from prefect_dbt.core._executor import DbtCoreExecutor, DbtExecutor, ExecutionResult
from prefect_dbt.core._orchestrator import (
CacheConfig,
ExecutionMode,
PrefectDbtOrchestrator,
_dbt_global_log_dedupe_processor_factory,
_DbtNodeError,
)
from prefect import flow
from prefect.task_runners import ProcessPoolTaskRunner, ThreadPoolTaskRunner
# -- Common manifest snippets ------------------------------------------------
SINGLE_MODEL = {
"nodes": {
"model.test.m1": {
"name": "m1",
"resource_type": "model",
"depends_on": {"nodes": []},
"config": {"materialized": "table"},
}
},
"sources": {},
}
SEED_MANIFEST = {
"nodes": {
"seed.test.users": {
"name": "users",
"resource_type": "seed",
"depends_on": {"nodes": []},
"config": {"materialized": "seed"},
"original_file_path": "seeds/users.csv",
}
},
"sources": {},
}
SNAPSHOT_MANIFEST = {
"nodes": {
"snapshot.test.snap_users": {
"name": "snap_users",
"resource_type": "snapshot",
"depends_on": {"nodes": []},
"config": {"materialized": "snapshot"},
"original_file_path": "snapshots/snap_users.sql",
}
},
"sources": {},
}
INDEPENDENT_NODES = {
"nodes": {
"model.test.a": {
"name": "a",
"resource_type": "model",
"depends_on": {"nodes": []},
"config": {"materialized": "table"},
},
"model.test.b": {
"name": "b",
"resource_type": "model",
"depends_on": {"nodes": []},
"config": {"materialized": "table"},
},
"model.test.c": {
"name": "c",
"resource_type": "model",
"depends_on": {"nodes": []},
"config": {"materialized": "table"},
},
},
"sources": {},
}
EMPTY_MANIFEST = {"nodes": {}, "sources": {}}
# -- Fixtures ----------------------------------------------------------------
@pytest.fixture
def per_node_orch(tmp_path):
"""Factory fixture for creating a PER_NODE orchestrator with mock executor.
Returns a factory that accepts manifest data and optional overrides.
Defaults: execution_mode=PER_NODE, task_runner_type=ThreadPoolTaskRunner,
mock executor via _make_mock_executor_per_node().
Usage::
orch, executor = per_node_orch(SINGLE_MODEL)
orch, executor = per_node_orch(data, executor_kwargs={"fail_nodes": {"model.test.a"}})
orch, executor = per_node_orch(data, executor=custom_executor, retries=2)
"""
def _factory(manifest_data, *, executor=None, **kwargs):
manifest = write_manifest(tmp_path, manifest_data)
if executor is None:
executor = _make_mock_executor_per_node(**kwargs.pop("executor_kwargs", {}))
defaults = {
"settings": _make_mock_settings(),
"manifest_path": manifest,
"executor": executor,
"execution_mode": ExecutionMode.PER_NODE,
"task_runner_type": ThreadPoolTaskRunner,
}
defaults.update(kwargs)
return PrefectDbtOrchestrator(**defaults), executor
return _factory
@pytest.fixture
def mixed_resource_manifest_data() -> dict[str, Any]:
"""Manifest with seeds, models, and a snapshot.
Wave 0: seed_users (seed)
Wave 1: stg_users (model, depends on seed_users)
Wave 1: snap_users (snapshot, depends on seed_users)
"""
return {
"nodes": {
"seed.test.seed_users": {
"name": "seed_users",
"resource_type": "seed",
"depends_on": {"nodes": []},
"config": {"materialized": "seed"},
"original_file_path": "seeds/seed_users.csv",
},
"model.test.stg_users": {
"name": "stg_users",
"resource_type": "model",
"depends_on": {"nodes": ["seed.test.seed_users"]},
"config": {"materialized": "view"},
"original_file_path": "models/stg_users.sql",
},
"snapshot.test.snap_users": {
"name": "snap_users",
"resource_type": "snapshot",
"depends_on": {"nodes": ["seed.test.seed_users"]},
"config": {"materialized": "snapshot"},
"original_file_path": "snapshots/snap_users.sql",
},
},
"sources": {},
}
# =============================================================================
# TestPerNodeInit
# =============================================================================
class TestPerNodeInit:
def test_execution_mode_stored(self, tmp_path):
manifest = write_manifest(tmp_path, EMPTY_MANIFEST)
orch = PrefectDbtOrchestrator(
settings=_make_mock_settings(),
manifest_path=manifest,
executor=_make_mock_executor(),
execution_mode=ExecutionMode.PER_NODE,
task_runner_type=ThreadPoolTaskRunner,
)
assert orch._execution_mode == ExecutionMode.PER_NODE
def test_retries_stored(self, tmp_path):
manifest = write_manifest(tmp_path, EMPTY_MANIFEST)
orch = PrefectDbtOrchestrator(
settings=_make_mock_settings(),
manifest_path=manifest,
executor=_make_mock_executor(),
execution_mode=ExecutionMode.PER_NODE,
retries=3,
retry_delay_seconds=60,
)
assert orch._retries == 3
assert orch._retry_delay_seconds == 60
def test_int_concurrency_stored(self, tmp_path):
manifest = write_manifest(tmp_path, EMPTY_MANIFEST)
orch = PrefectDbtOrchestrator(
settings=_make_mock_settings(),
manifest_path=manifest,
executor=_make_mock_executor(),
concurrency=4,
)
assert orch._concurrency == 4
def test_str_concurrency_stored(self, tmp_path):
manifest = write_manifest(tmp_path, EMPTY_MANIFEST)
orch = PrefectDbtOrchestrator(
settings=_make_mock_settings(),
manifest_path=manifest,
executor=_make_mock_executor(),
concurrency="dbt-warehouse",
)
assert orch._concurrency == "dbt-warehouse"
def test_no_concurrency_default(self, tmp_path):
manifest = write_manifest(tmp_path, EMPTY_MANIFEST)
orch = PrefectDbtOrchestrator(
settings=_make_mock_settings(),
manifest_path=manifest,
executor=_make_mock_executor(),
)
assert orch._concurrency is None
def test_default_execution_mode_is_per_wave(self, tmp_path):
manifest = write_manifest(tmp_path, EMPTY_MANIFEST)
orch = PrefectDbtOrchestrator(
settings=_make_mock_settings(),
manifest_path=manifest,
executor=_make_mock_executor(),
)
assert orch._execution_mode == ExecutionMode.PER_WAVE
def test_invalid_execution_mode_raises(self, tmp_path):
manifest = write_manifest(tmp_path, EMPTY_MANIFEST)
with pytest.raises(ValueError, match="Invalid execution_mode"):
PrefectDbtOrchestrator(
settings=_make_mock_settings(),
manifest_path=manifest,
executor=_make_mock_executor(),
execution_mode="per_nod",
)
# =============================================================================
# TestPerNodeBasic
# =============================================================================
class TestPerNodeBasic:
def test_empty_manifest_returns_empty_dict(self, per_node_orch):
orch, executor = per_node_orch(EMPTY_MANIFEST)
@flow
def test_flow():
return orch.run_build()
result = test_flow()
assert result == {}
executor.execute_node.assert_not_called()
def test_single_node_success(self, per_node_orch):
orch, executor = per_node_orch(SINGLE_MODEL)
@flow
def test_flow():
return orch.run_build()
result = test_flow()
assert "model.test.m1" in result
assert result["model.test.m1"]["status"] == "success"
executor.execute_node.assert_called_once()
def test_multi_wave_diamond_all_succeed(self, per_node_orch, diamond_manifest_data):
orch, executor = per_node_orch(diamond_manifest_data)
@flow
def test_flow():
return orch.run_build()
result = test_flow()
assert len(result) == 4
for node_id in [
"model.test.root",
"model.test.left",
"model.test.right",
"model.test.leaf",
]:
assert result[node_id]["status"] == "success"
assert executor.execute_node.call_count == 4
def test_execute_node_called_not_execute_wave(self, per_node_orch):
"""PER_NODE uses execute_node, not execute_wave."""
orch, executor = per_node_orch(SINGLE_MODEL)
@flow
def test_flow():
return orch.run_build()
test_flow()
executor.execute_node.assert_called_once()
executor.execute_wave.assert_not_called()
def test_full_refresh_forwarded(self, per_node_orch):
orch, executor = per_node_orch(SINGLE_MODEL)
@flow
def test_flow():
return orch.run_build(full_refresh=True)
test_flow()
args, kwargs = executor.execute_node.call_args
assert args[2] is True or kwargs.get("full_refresh") is True
def test_target_forwarded_to_executor(self, per_node_orch):
orch, executor = per_node_orch(SINGLE_MODEL)
@flow
def test_flow():
return orch.run_build(target="prod")
test_flow()
_, kwargs = executor.execute_node.call_args
assert kwargs["target"] == "prod"
def test_target_none_by_default(self, per_node_orch):
orch, executor = per_node_orch(SINGLE_MODEL)
@flow
def test_flow():
return orch.run_build()
test_flow()
_, kwargs = executor.execute_node.call_args
assert kwargs["target"] is None
def test_global_log_messages_emitted_across_nodes_for_in_process_runner(
self, per_node_orch
):
orch, _ = per_node_orch(
INDEPENDENT_NODES,
executor_kwargs={
"log_messages": {
"": [("info", "Running with dbt=1.x")],
}
},
)
@flow
def test_flow():
return orch.run_build()
mock_run_logger = MagicMock()
mock_global_logger = MagicMock()
mock_run_logger.getChild.return_value = mock_global_logger
with patch(
"prefect_dbt.core._orchestrator.get_run_logger",
return_value=mock_run_logger,
):
test_flow()
global_calls = [
call.args[0]
for call in mock_global_logger.info.call_args_list
if call.args and call.args[0] == "Running with dbt=1.x"
]
assert len(global_calls) == 3
def test_dbt_global_dedupe_processor_drops_duplicates(self):
processor = _dbt_global_log_dedupe_processor_factory()
payload_1 = {
"flow_run_id": "flow-1",
"task_run_id": "task-1",
"name": "prefect.task_runs.dbt_orchestrator_global",
"level": 20,
"message": "Running with dbt=1.x",
}
payload_2 = {**payload_1, "task_run_id": "task-2"}
assert processor("log", payload_1) == ("log", payload_1)
assert processor("log", payload_2) is None
def test_dbt_global_dedupe_processor_only_applies_to_target_loggers(self):
processor = _dbt_global_log_dedupe_processor_factory()
non_target_payload = {
"flow_run_id": "flow-1",
"task_run_id": "task-1",
"name": "prefect.task_runs",
"level": 20,
"message": "keep me",
}
assert processor("log", non_target_payload) == ("log", non_target_payload)
assert processor("log", non_target_payload) == ("log", non_target_payload)
def test_dbt_global_dedupe_processor_lfu_retains_frequent_keys(self):
with patch(
"prefect_dbt.core._orchestrator._GLOBAL_LOG_DEDUPE_MAX_KEYS",
2,
):
processor = _dbt_global_log_dedupe_processor_factory()
payload_a_1 = {
"flow_run_id": "flow-1",
"task_run_id": "task-1",
"name": "prefect.task_runs.dbt_orchestrator_global",
"level": 20,
"message": "A",
}
payload_a_2 = {**payload_a_1, "task_run_id": "task-2"}
payload_a_3 = {**payload_a_1, "task_run_id": "task-3"}
payload_b_1 = {**payload_a_1, "message": "B"}
payload_b_2 = {**payload_b_1, "task_run_id": "task-4"}
payload_c_1 = {**payload_a_1, "message": "C"}
assert processor("log", payload_a_1) == ("log", payload_a_1)
assert processor("log", payload_a_2) is None
assert processor("log", payload_b_1) == ("log", payload_b_1)
# Adding C should evict B (low frequency) while retaining A (high frequency).
assert processor("log", payload_c_1) == ("log", payload_c_1)
assert processor("log", payload_b_2) == ("log", payload_b_2)
assert processor("log", payload_a_3) is None
def test_serializing_non_process_runner_does_not_capture_thread_lock(
self, per_node_orch
):
"""Task closures should stay picklable for non-process runners."""
class _SerializingThreadRunner(ThreadPoolTaskRunner):
def submit(self, task, *args, **kwargs):
import types
from threading import Lock
lock_type = type(Lock())
to_visit = [task.fn]
seen: set[int] = set()
while to_visit:
fn = to_visit.pop()
if id(fn) in seen:
continue
seen.add(id(fn))
for cell in fn.__closure__ or ():
value = cell.cell_contents
assert not isinstance(value, lock_type)
if isinstance(value, types.FunctionType):
to_visit.append(value)
return super().submit(task, *args, **kwargs)
orch, _ = per_node_orch(
SINGLE_MODEL,
task_runner_type=_SerializingThreadRunner,
)
@flow
def test_flow():
return orch.run_build()
result = test_flow()
assert result["model.test.m1"]["status"] == "success"
def test_process_pool_subclass_without_processor_kw_still_runs(self, per_node_orch):
class _SyncFuture:
def __init__(self, result):
self._result = result
def result(self):
return self._result
def add_done_callback(self, fn):
fn(self)
class _CompatProcessPoolRunner(ProcessPoolTaskRunner):
init_calls: list[dict[str, Any]] = []
def __init__(self, max_workers=None):
self.init_calls.append({"max_workers": max_workers})
super().__init__(max_workers=max_workers)
def __enter__(self):
self._started = True
return self
def __exit__(self, exc_type, exc_value, traceback):
self._started = False
def submit(self, task, parameters, wait_for=None, dependencies=None):
return _SyncFuture(task.fn(**parameters))
orch, _ = per_node_orch(
SINGLE_MODEL,
task_runner_type=_CompatProcessPoolRunner,
)
@flow
def test_flow():
return orch.run_build()
result = test_flow()
assert result["model.test.m1"]["status"] == "success"
assert _CompatProcessPoolRunner.init_calls == [{"max_workers": 1}]
def test_process_pool_subclass_without_processor_config_api_still_runs(
self, per_node_orch
):
class _SyncFuture:
def __init__(self, result):
self._result = result
def result(self):
return self._result
def add_done_callback(self, fn):
fn(self)
class _LegacyProcessPoolRunner(ProcessPoolTaskRunner):
init_calls: list[dict[str, Any]] = []
set_subprocess_message_processor_factories = None
@property
def subprocess_message_processor_factories(self):
return ()
@subprocess_message_processor_factories.setter
def subprocess_message_processor_factories(self, value):
raise AttributeError(
"Legacy runner does not support message processor configuration."
)
def __init__(self, max_workers=None):
self.init_calls.append({"max_workers": max_workers})
super().__init__(max_workers=max_workers)
def __enter__(self):
self._started = True
return self
def __exit__(self, exc_type, exc_value, traceback):
self._started = False
def submit(self, task, parameters, wait_for=None, dependencies=None):
return _SyncFuture(task.fn(**parameters))
orch, _ = per_node_orch(
SINGLE_MODEL,
task_runner_type=_LegacyProcessPoolRunner,
)
@flow
def test_flow():
return orch.run_build()
result = test_flow()
assert result["model.test.m1"]["status"] == "success"
assert _LegacyProcessPoolRunner.init_calls == [{"max_workers": 1}]
def test_process_pool_preserves_existing_subprocess_processors(self, per_node_orch):
class _SyncFuture:
def __init__(self, result):
self._result = result
def result(self):
return self._result
def add_done_callback(self, fn):
fn(self)
def _existing_processor_factory():
def _processor(message_type: str, message_payload: Any):
return message_type, message_payload
return _processor
class _PreconfiguredProcessPoolRunner(ProcessPoolTaskRunner):
configured_factories: tuple[Any, ...] | None = None
_processor_factories: tuple[Any, ...]
@property
def subprocess_message_processor_factories(self):
return self._processor_factories
@subprocess_message_processor_factories.setter
def subprocess_message_processor_factories(self, value):
self._processor_factories = tuple(value or ())
def __init__(self, max_workers=None):
super().__init__(max_workers=max_workers)
self.subprocess_message_processor_factories = [
_existing_processor_factory
]
def __enter__(self):
self._started = True
type(
self
).configured_factories = self.subprocess_message_processor_factories
return self
def __exit__(self, exc_type, exc_value, traceback):
self._started = False
def submit(self, task, parameters, wait_for=None, dependencies=None):
return _SyncFuture(task.fn(**parameters))
orch, _ = per_node_orch(
SINGLE_MODEL,
task_runner_type=_PreconfiguredProcessPoolRunner,
)
@flow
def test_flow():
return orch.run_build()
result = test_flow()
assert result["model.test.m1"]["status"] == "success"
assert _PreconfiguredProcessPoolRunner.configured_factories == (
_existing_processor_factory,
_dbt_global_log_dedupe_processor_factory,
)
# =============================================================================
# TestPerNodeCommandMapping
# =============================================================================
class TestPerNodeCommandMapping:
def test_model_uses_run_command(self, per_node_orch):
orch, _ = per_node_orch(SINGLE_MODEL)
@flow
def test_flow():
return orch.run_build()
result = test_flow()
assert result["model.test.m1"]["invocation"]["command"] == "run"
def test_seed_uses_seed_command(self, per_node_orch):
orch, _ = per_node_orch(SEED_MANIFEST)
@flow
def test_flow():
return orch.run_build()
result = test_flow()
assert result["seed.test.users"]["invocation"]["command"] == "seed"
def test_snapshot_uses_snapshot_command(self, per_node_orch):
orch, _ = per_node_orch(SNAPSHOT_MANIFEST)
@flow
def test_flow():
return orch.run_build()
result = test_flow()
assert result["snapshot.test.snap_users"]["invocation"]["command"] == "snapshot"
def test_mixed_resource_types(self, per_node_orch, mixed_resource_manifest_data):
"""Each resource type gets the correct dbt command."""
orch, _ = per_node_orch(mixed_resource_manifest_data)
@flow
def test_flow():
return orch.run_build()
result = test_flow()
assert result["seed.test.seed_users"]["invocation"]["command"] == "seed"
assert result["model.test.stg_users"]["invocation"]["command"] == "run"
assert result["snapshot.test.snap_users"]["invocation"]["command"] == "snapshot"
def test_executor_receives_correct_command(self, per_node_orch):
"""Verify execute_node is called with the right command string."""
orch, executor = per_node_orch(SEED_MANIFEST)
@flow
def test_flow():
return orch.run_build()
test_flow()
args, kwargs = executor.execute_node.call_args
assert args[1] == "seed" or kwargs.get("command") == "seed"
# =============================================================================
# TestPerNodeFailure
# =============================================================================
class TestPerNodeFailure:
def test_failed_node_marked_as_error(self, per_node_orch):
orch, _ = per_node_orch(
SINGLE_MODEL,
executor_kwargs={"success": False, "error": RuntimeError("dbt failed")},
)
@flow
def test_flow():
return orch.run_build()
result = test_flow()
assert result["model.test.m1"]["status"] == "error"
assert "dbt failed" in result["model.test.m1"]["error"]["message"]
assert result["model.test.m1"]["error"]["type"] == "RuntimeError"
def test_downstream_skip_on_failure(self, per_node_orch, linear_manifest_data):
"""In a linear chain a->b->c, if a fails, b and c are skipped."""
orch, _ = per_node_orch(
linear_manifest_data,
executor_kwargs={
"fail_nodes": {"model.test.a"},
"error": RuntimeError("a failed"),
},
)
@flow
def test_flow():
return orch.run_build()
result = test_flow()
assert result["model.test.a"]["status"] == "error"
assert result["model.test.b"]["status"] == "skipped"
assert result["model.test.b"]["reason"] == "upstream failure"
assert "model.test.a" in result["model.test.b"]["failed_upstream"]
assert result["model.test.c"]["status"] == "skipped"
def test_partial_wave_failure_diamond(self, per_node_orch, diamond_manifest_data):
"""In diamond graph, if 'right' fails, 'left' succeeds and 'leaf' is skipped."""
orch, _ = per_node_orch(
diamond_manifest_data,
executor_kwargs={
"fail_nodes": {"model.test.right"},
"error": RuntimeError("right failed"),
},
)
@flow
def test_flow():
return orch.run_build()
result = test_flow()
assert result["model.test.root"]["status"] == "success"
assert result["model.test.left"]["status"] == "success"
assert result["model.test.right"]["status"] == "error"
assert result["model.test.leaf"]["status"] == "skipped"
assert "model.test.right" in result["model.test.leaf"]["failed_upstream"]
def test_independent_nodes_not_affected(self, per_node_orch):
"""Nodes in the same wave are independent -- failure of one doesn't affect others."""
orch, _ = per_node_orch(
INDEPENDENT_NODES,
executor_kwargs={
"fail_nodes": {"model.test.b"},
"error": RuntimeError("b failed"),
},
)
@flow
def test_flow():
return orch.run_build()
result = test_flow()
assert result["model.test.a"]["status"] == "success"
assert result["model.test.b"]["status"] == "error"
assert result["model.test.c"]["status"] == "success"
def test_error_without_exception_no_artifacts(self, per_node_orch):
"""Node failure with no exception and no artifacts falls back to unknown error."""
executor = MagicMock(spec=DbtExecutor)
executor.execute_node.return_value = ExecutionResult(
success=False, node_ids=["model.test.m1"], error=None
)
orch, _ = per_node_orch(SINGLE_MODEL, executor=executor)
@flow
def test_flow():
return orch.run_build()
result = test_flow()
assert result["model.test.m1"]["status"] == "error"
assert result["model.test.m1"]["error"]["message"] == "unknown error"
assert result["model.test.m1"]["error"]["type"] == "UnknownError"
def test_error_without_exception_uses_artifact_message(self, per_node_orch):
"""Node failure with no exception extracts error from per-node artifacts."""
executor = MagicMock(spec=DbtExecutor)
executor.execute_node.return_value = ExecutionResult(
success=False,
node_ids=["model.test.m1"],
error=None,
artifacts={
"model.test.m1": {
"status": "error",
"message": 'relation "raw.nonexistent_table" does not exist',
"execution_time": 0.5,
}
},
)
orch, _ = per_node_orch(SINGLE_MODEL, executor=executor)
@flow
def test_flow():
return orch.run_build()
result = test_flow()
assert result["model.test.m1"]["status"] == "error"
assert (
result["model.test.m1"]["error"]["message"]
== 'relation "raw.nonexistent_table" does not exist'
)
def test_error_artifact_message_preferred_over_exception(self, per_node_orch):
"""Per-node artifact message takes precedence over execution-level exception."""
executor = MagicMock(spec=DbtExecutor)
executor.execute_node.return_value = ExecutionResult(
success=False,
node_ids=["model.test.m1"],
error=RuntimeError("generic error"),
artifacts={
"model.test.m1": {
"status": "error",
"message": 'Database Error: relation "raw.missing" does not exist',
"execution_time": 0.3,
}
},
)
orch, _ = per_node_orch(SINGLE_MODEL, executor=executor)
@flow
def test_flow():
return orch.run_build()
result = test_flow()
assert result["model.test.m1"]["status"] == "error"
assert (
result["model.test.m1"]["error"]["message"]
== 'Database Error: relation "raw.missing" does not exist'
)
# type still comes from the exception when present
assert result["model.test.m1"]["error"]["type"] == "RuntimeError"
def test_transitive_skip_propagation(self, per_node_orch, linear_manifest_data):
"""Skipped nodes also cause their dependents to be skipped."""
orch, _ = per_node_orch(
linear_manifest_data,
executor_kwargs={
"fail_nodes": {"model.test.a"},
"error": RuntimeError("a failed"),
},
)
@flow
def test_flow():
return orch.run_build()
result = test_flow()
assert result["model.test.a"]["status"] == "error"
assert result["model.test.b"]["status"] == "skipped"
assert result["model.test.c"]["status"] == "skipped"
assert "model.test.b" in result["model.test.c"]["failed_upstream"]
def test_executor_not_called_for_skipped_nodes(
self, per_node_orch, linear_manifest_data
):
"""Skipped nodes don't invoke the executor."""
orch, executor = per_node_orch(
linear_manifest_data,
executor_kwargs={
"fail_nodes": {"model.test.a"},
"error": RuntimeError("a failed"),
},
)
@flow
def test_flow():
return orch.run_build()
test_flow()
assert executor.execute_node.call_count == 1
def test_dbt_node_error_pickle_roundtrip(self):
"""_DbtNodeError survives pickle roundtrip across process boundaries."""
result = ExecutionResult(
success=False,
node_ids=["model.test.m1"],
error=RuntimeError("relation does not exist"),
)
timing = {
"started_at": "2026-01-01T00:00:00+00:00",
"completed_at": "2026-01-01T00:00:01+00:00",
"duration_seconds": 1.0,
}
invocation = {"command": "run", "args": ["model.test.m1"]}
err = _DbtNodeError(result, timing, invocation)
restored = pickle.loads(pickle.dumps(err))
assert str(restored) == str(err)
assert restored.timing == timing
assert restored.invocation == invocation
assert restored.execution_result.success is False
assert restored.execution_result.node_ids == ["model.test.m1"]
assert str(restored.execution_result.error) == "relation does not exist"
# =============================================================================
# TestPerNodeResults
# =============================================================================
class TestPerNodeResults:
def test_result_has_timing_fields(self, per_node_orch):
orch, _ = per_node_orch(SINGLE_MODEL)
@flow
def test_flow():
return orch.run_build()
result = test_flow()
timing = result["model.test.m1"]["timing"]
assert "started_at" in timing
assert "completed_at" in timing
assert "duration_seconds" in timing
assert isinstance(timing["duration_seconds"], float)
def test_result_has_per_node_invocation(self, per_node_orch):
"""PER_NODE invocation shows the specific command, not 'build'."""
orch, _ = per_node_orch(SINGLE_MODEL)
@flow
def test_flow():
return orch.run_build()
result = test_flow()
invocation = result["model.test.m1"]["invocation"]
assert invocation["command"] == "run"
assert "model.test.m1" in invocation["args"]
def test_artifacts_enrich_timing(self, per_node_orch):
orch, _ = per_node_orch(
SINGLE_MODEL,
executor_kwargs={
"artifacts": {
"model.test.m1": {
"status": "success",
"message": "OK",
"execution_time": 2.71,
}
},
},
)
@flow
def test_flow():
return orch.run_build()
result = test_flow()
assert result["model.test.m1"]["timing"]["execution_time"] == 2.71
def test_failed_node_has_timing_and_invocation(self, per_node_orch):
"""Error results include timing and invocation from the last attempt."""
orch, _ = per_node_orch(
SINGLE_MODEL,
executor_kwargs={"success": False, "error": RuntimeError("boom")},
)
@flow
def test_flow():
return orch.run_build()
result = test_flow()
assert result["model.test.m1"]["status"] == "error"
assert "timing" in result["model.test.m1"]
assert "invocation" in result["model.test.m1"]
# =============================================================================
# TestPerNodeRetries
# =============================================================================
class TestPerNodeRetries:
def test_retry_succeeds_on_second_attempt(self, per_node_orch):
"""Node fails once, then succeeds on retry."""
call_count = 0
def _execute_node(
node, command, full_refresh=False, target=None, extra_cli_args=None
):
nonlocal call_count
call_count += 1
if call_count == 1:
return ExecutionResult(
success=False,
node_ids=[node.unique_id],
error=RuntimeError("transient error"),
)
return ExecutionResult(
success=True,
node_ids=[node.unique_id],
)
executor = MagicMock(spec=DbtExecutor)
executor.execute_node = MagicMock(side_effect=_execute_node)
orch, _ = per_node_orch(
SINGLE_MODEL, executor=executor, retries=1, retry_delay_seconds=0
)
@flow
def test_flow():
return orch.run_build()
result = test_flow()
assert result["model.test.m1"]["status"] == "success"
assert executor.execute_node.call_count == 2
def test_retries_exhausted_marks_error(self, per_node_orch):
"""Node fails after all retries and is marked as error."""
orch, executor = per_node_orch(
SINGLE_MODEL,
executor_kwargs={
"success": False,
"error": RuntimeError("persistent error"),
},
retries=2,
retry_delay_seconds=0,
)
@flow
def test_flow():
return orch.run_build()
result = test_flow()
assert result["model.test.m1"]["status"] == "error"
# 1 original + 2 retries = 3 calls
assert executor.execute_node.call_count == 3
# =============================================================================
# TestPerNodeConcurrency
# =============================================================================
class TestPerNodeConcurrency:
def test_int_concurrency_sets_max_workers(self, per_node_orch):
"""With concurrency=2, task runner is created with max_workers=2."""
data = {
"nodes": {
f"model.test.m{i}": {
"name": f"m{i}",
"resource_type": "model",
"depends_on": {"nodes": []},
"config": {"materialized": "table"},
}
for i in range(4)
},
"sources": {},
}
captured_kwargs: list[dict] = []
class _TrackingRunner(ThreadPoolTaskRunner):
def __init__(self, **kwargs):
captured_kwargs.append(kwargs)
super().__init__(**kwargs)
orch, _ = per_node_orch(data, task_runner_type=_TrackingRunner, concurrency=2)
@flow
def test_flow():
return orch.run_build()
result = test_flow()
assert len(captured_kwargs) == 1
assert captured_kwargs[0]["max_workers"] == 2
for node_id in data["nodes"]:
assert result[node_id]["status"] == "success"
def test_no_concurrency_uses_wave_size(self, per_node_orch, diamond_manifest_data):
"""Without int concurrency, max_workers = max wave size."""
captured_kwargs: list[dict] = []
class _TrackingRunner(ThreadPoolTaskRunner):
def __init__(self, **kwargs):
captured_kwargs.append(kwargs)
super().__init__(**kwargs)
orch, _ = per_node_orch(diamond_manifest_data, task_runner_type=_TrackingRunner)
@flow
def test_flow():
return orch.run_build()
test_flow()
# max wave size is 2 (left + right)
assert len(captured_kwargs) == 1
assert captured_kwargs[0]["max_workers"] == 2
def test_process_pool_default_is_capped_by_cpu_count(self, per_node_orch):
"""Inferred ProcessPool max_workers is bounded by 2Γ available CPUs."""
orch, _ = per_node_orch(SINGLE_MODEL, task_runner_type=None)
with patch("prefect_dbt.core._orchestrator.os.cpu_count", return_value=2):
max_workers = orch._determine_per_node_max_workers(
task_runner_type=ProcessPoolTaskRunner,
largest_wave=10,
)
assert max_workers == 4
def test_process_pool_explicit_concurrency_is_respected(self, per_node_orch):
"""User-provided concurrency should not be clamped internally."""
orch, _ = per_node_orch(SINGLE_MODEL, task_runner_type=None, concurrency=10)
with patch("prefect_dbt.core._orchestrator.os.cpu_count", return_value=2):
max_workers = orch._determine_per_node_max_workers(
task_runner_type=ProcessPoolTaskRunner,
largest_wave=10,
)
assert max_workers == 10
def test_process_pool_default_respects_windows_cap(self, per_node_orch):
"""On Windows, inferred ProcessPool max_workers never exceeds 61."""
orch, _ = per_node_orch(SINGLE_MODEL, task_runner_type=None)
with (
patch("prefect_dbt.core._orchestrator.os.cpu_count", return_value=64),
patch("prefect_dbt.core._orchestrator.sys") as mock_sys,
):
mock_sys.platform = "win32"
max_workers = orch._determine_per_node_max_workers(
task_runner_type=ProcessPoolTaskRunner,
largest_wave=200,
)
assert max_workers == 61
def test_process_pool_default_not_capped_on_linux(self, per_node_orch):
"""On non-Windows, ProcessPool max_workers uses full 2Γ CPU count."""
orch, _ = per_node_orch(SINGLE_MODEL, task_runner_type=None)
with (
patch("prefect_dbt.core._orchestrator.os.cpu_count", return_value=64),
patch("prefect_dbt.core._orchestrator.sys") as mock_sys,
):
mock_sys.platform = "linux"
max_workers = orch._determine_per_node_max_workers(
task_runner_type=ProcessPoolTaskRunner,
largest_wave=200,
)
assert max_workers == 128
def test_thread_pool_concurrency_not_capped_by_cpu_count(self, per_node_orch):
"""Non-ProcessPool runners preserve explicit int concurrency."""
orch, _ = per_node_orch(SINGLE_MODEL, concurrency=10)
with patch("prefect_dbt.core._orchestrator.os.cpu_count", return_value=2):
max_workers = orch._determine_per_node_max_workers(
task_runner_type=ThreadPoolTaskRunner,
largest_wave=10,
)
assert max_workers == 10
def test_cached_per_node_does_not_eagerly_resolve_profiles(self, tmp_path):
"""Cached PER_NODE runs can complete without resolving profiles.yml."""
manifest = write_manifest(tmp_path, SINGLE_MODEL)
settings = _make_mock_settings()
settings.resolve_profiles_yml = MagicMock(
side_effect=RuntimeError("resolve_profiles_yml should not be called")
)
executor = DbtCoreExecutor(settings)
orch = PrefectDbtOrchestrator(
settings=settings,
manifest_path=manifest,
executor=executor,
execution_mode=ExecutionMode.PER_NODE,
cache=CacheConfig(),
task_runner_type=ThreadPoolTaskRunner,
)
orch._execute_per_node = MagicMock(
return_value={"model.test.m1": {"status": "cached"}}
)
result = orch.run_build()
settings.resolve_profiles_yml.assert_not_called()
assert result["model.test.m1"]["status"] == "cached"
# =============================================================================
# TestPerNodeTaskRunNames
# =============================================================================
class TestPerNodeTaskRunNames:
def test_model_task_run_name(self, per_node_orch):
"""Model node gets task run name 'model m1'."""
task_names = []
class _CapturingRunner(ThreadPoolTaskRunner):
def submit(self, task, *args, **kwargs):
task_names.append(task.task_run_name)
return super().submit(task, *args, **kwargs)
orch, _ = per_node_orch(SINGLE_MODEL, task_runner_type=_CapturingRunner)
@flow
def test_flow():
return orch.run_build()
test_flow()
assert "model m1" in task_names
def test_seed_task_run_name(self, per_node_orch):
"""Seed node gets task run name 'seed users'."""
task_names = []
class _CapturingRunner(ThreadPoolTaskRunner):
def submit(self, task, *args, **kwargs):
task_names.append(task.task_run_name)
return super().submit(task, *args, **kwargs)
orch, _ = per_node_orch(SEED_MANIFEST, task_runner_type=_CapturingRunner)
@flow
def test_flow():
return orch.run_build()
test_flow()
assert "seed users" in task_names
def test_snapshot_task_run_name(self, per_node_orch):
"""Snapshot node gets task run name 'snapshot snap_users'."""
task_names = []
class _CapturingRunner(ThreadPoolTaskRunner):
def submit(self, task, *args, **kwargs):
task_names.append(task.task_run_name)
return super().submit(task, *args, **kwargs)
orch, _ = per_node_orch(SNAPSHOT_MANIFEST, task_runner_type=_CapturingRunner)
@flow
def test_flow():
return orch.run_build()
test_flow()
assert "snapshot snap_users" in task_names
def test_mixed_resource_task_run_names(
self, per_node_orch, mixed_resource_manifest_data
):
"""Each resource type gets the correct '{type} {name}' task run name."""
task_names = []
class _CapturingRunner(ThreadPoolTaskRunner):
def submit(self, task, *args, **kwargs):
task_names.append(task.task_run_name)
return super().submit(task, *args, **kwargs)
orch, _ = per_node_orch(
mixed_resource_manifest_data, task_runner_type=_CapturingRunner
)
@flow
def test_flow():
return orch.run_build()
test_flow()
assert "seed seed_users" in task_names
assert "model stg_users" in task_names
assert "snapshot snap_users" in task_names
# =============================================================================
# TestPerNodeWithSelectors
# =============================================================================
class TestPerNodeEagerScheduling:
"""Tests that verify the eager DAG scheduler submits nodes as soon as
their individual dependencies complete, rather than waiting for wave
barriers.
"""
def test_diamond_fast_branch_does_not_wait_for_slow_branch(self, per_node_orch):
"""In a diamond graph (root -> left/right -> leaf), if 'left' is fast
and 'right' is slow, 'leaf' should NOT start until both finish β but
'left' should complete well before 'right'. This confirms there are
no artificial wave barriers: each node starts as soon as its own
dependencies finish.
"""
import threading
import time
# Track submission and completion timestamps per node.
submit_times: dict[str, float] = {}
complete_times: dict[str, float] = {}
lock = threading.Lock()
DIAMOND = {
"nodes": {
"model.test.root": {
"name": "root",
"resource_type": "model",
"depends_on": {"nodes": []},
"config": {"materialized": "table"},
},
"model.test.left": {
"name": "left",
"resource_type": "model",
"depends_on": {"nodes": ["model.test.root"]},
"config": {"materialized": "table"},
},
"model.test.right": {
"name": "right",
"resource_type": "model",
"depends_on": {"nodes": ["model.test.root"]},
"config": {"materialized": "table"},
},
"model.test.leaf": {
"name": "leaf",
"resource_type": "model",
"depends_on": {"nodes": ["model.test.left", "model.test.right"]},
"config": {"materialized": "table"},
},
},
"sources": {},
}
# Custom executor: 'right' sleeps 0.3s; everything else is instant.
from prefect_dbt.core._executor import ExecutionResult
def _timed_execute_node(
node, command, full_refresh=False, target=None, extra_cli_args=None
):
nid = node.unique_id
with lock:
submit_times[nid] = time.monotonic()
if nid == "model.test.right":
time.sleep(0.3)
with lock:
complete_times[nid] = time.monotonic()
return ExecutionResult(success=True, node_ids=[nid])
executor = MagicMock()
executor.execute_node = MagicMock(side_effect=_timed_execute_node)
orch, _ = per_node_orch(DIAMOND, executor=executor)
@flow
def test_flow():
return orch.run_build()
result = test_flow()
# All four nodes should succeed.
assert set(result.keys()) == {
"model.test.root",
"model.test.left",
"model.test.right",
"model.test.leaf",
}
for nid in result:
assert result[nid]["status"] == "success"
# 'left' should complete before 'right' (no wave barrier).
assert complete_times["model.test.left"] < complete_times["model.test.right"]
# 'leaf' should start only after both 'left' and 'right' complete.
leaf_submit = submit_times["model.test.leaf"]
assert leaf_submit >= complete_times["model.test.left"]
assert leaf_submit >= complete_times["model.test.right"]
class TestPerNodeWithSelectors:
@patch("prefect_dbt.core._orchestrator.resolve_selection")
def test_select_filters_nodes(
self, mock_resolve, per_node_orch, diamond_manifest_data
):
mock_resolve.return_value = {"model.test.root", "model.test.left"}
orch, _ = per_node_orch(diamond_manifest_data)
@flow
def test_flow():
return orch.run_build(select="tag:daily")
result = test_flow()
assert "model.test.root" in result
assert "model.test.left" in result
assert "model.test.right" not in result
assert "model.test.leaf" not in result
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/integrations/prefect-dbt/tests/core/test_orchestrator_per_node.py",
"license": "Apache License 2.0",
"lines": 1128,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:src/integrations/prefect-dbt/tests/core/test_orchestrator_postgres_integration.py | """Integration tests for PrefectDbtOrchestrator PER_NODE mode against Postgres.
These tests exercise real concurrent execution (concurrency > 1) which is not
possible with DuckDB's single-writer limitation. A Postgres service must be
available (default: localhost:5432, user=prefect, password=prefect, db=dbt_test).
Connection settings can be overridden with environment variables:
PG_HOST, PG_PORT, PG_USER, PG_PASSWORD, PG_DATABASE
Run locally:
docker run --rm -d -p 5433:5432 \
-e POSTGRES_USER=prefect -e POSTGRES_PASSWORD=prefect \
-e POSTGRES_DB=dbt_test --name dbt-test-pg postgres:16
PG_PORT=5433 uv run --group integration pytest tests/core/test_orchestrator_postgres_integration.py -m integration -v
docker stop dbt-test-pg
"""
import os
import shutil
from pathlib import Path
from uuid import uuid4
import pytest
import yaml
from dbt.cli.main import dbtRunner
psycopg2 = pytest.importorskip(
"psycopg2", reason="psycopg2 required for Postgres integration tests"
)
pytest.importorskip(
"dbt.adapters.postgres",
reason="dbt-postgres required for Postgres integration tests",
)
from prefect_dbt.core._orchestrator import ( # noqa: E402
ExecutionMode,
PrefectDbtOrchestrator,
TestStrategy,
)
from prefect_dbt.core.settings import PrefectDbtSettings # noqa: E402
pytestmark = pytest.mark.integration
DBT_TEST_PROJECT = Path(__file__).resolve().parent.parent / "dbt_test_project"
PG_HOST = os.environ.get("PG_HOST", "localhost")
PG_PORT = int(os.environ.get("PG_PORT", "5432"))
PG_USER = os.environ.get("PG_USER", "prefect")
PG_PASSWORD = os.environ.get("PG_PASSWORD", "prefect")
PG_DATABASE = os.environ.get("PG_DATABASE", "dbt_test")
SEED_CUSTOMERS = "seed.test_project.customers"
SEED_ORDERS = "seed.test_project.orders"
STG_CUSTOMERS = "model.test_project.stg_customers"
STG_ORDERS = "model.test_project.stg_orders"
CUSTOMER_SUMMARY = "model.test_project.customer_summary"
INT_ORDERS_ENRICHED = "model.test_project.int_orders_enriched"
ALL_EXECUTABLE = {
SEED_CUSTOMERS,
SEED_ORDERS,
STG_CUSTOMERS,
STG_ORDERS,
CUSTOMER_SUMMARY,
}
def _pg_connect():
return psycopg2.connect(
host=PG_HOST,
port=PG_PORT,
user=PG_USER,
password=PG_PASSWORD,
dbname=PG_DATABASE,
)
@pytest.fixture(scope="session")
def pg_dbt_project(tmp_path_factory):
"""Set up a real dbt project with Postgres and a parsed manifest.
Session-scoped: copies the test project to a temp directory, creates a
unique schema in Postgres, writes profiles.yml, and runs `dbt parse`.
"""
schema_name = f"dbt_test_{uuid4().hex[:8]}"
project_dir = tmp_path_factory.mktemp("dbt_project_pg")
for item in DBT_TEST_PROJECT.iterdir():
dest = project_dir / item.name
if item.is_dir():
shutil.copytree(item, dest)
else:
shutil.copy2(item, dest)
profiles = {
"test": {
"target": "dev",
"outputs": {
"dev": {
"type": "postgres",
"host": PG_HOST,
"port": PG_PORT,
"user": PG_USER,
"password": PG_PASSWORD,
"dbname": PG_DATABASE,
"schema": schema_name,
"threads": 4,
}
},
}
}
profiles_path = project_dir / "profiles.yml"
profiles_path.write_text(yaml.dump(profiles))
conn = _pg_connect()
conn.autocommit = True
try:
with conn.cursor() as cur:
cur.execute(f"CREATE SCHEMA IF NOT EXISTS {schema_name}")
finally:
conn.close()
dbt_args = [
"--project-dir",
str(project_dir),
"--profiles-dir",
str(project_dir),
]
runner = dbtRunner()
result = runner.invoke(["parse", *dbt_args])
assert result.success, f"dbt parse failed: {result.exception}"
manifest_path = project_dir / "target" / "manifest.json"
assert manifest_path.exists(), "manifest.json not generated"
yield {
"project_dir": project_dir,
"profiles_dir": project_dir,
"manifest_path": manifest_path,
"schema": schema_name,
}
conn = _pg_connect()
conn.autocommit = True
try:
with conn.cursor() as cur:
cur.execute(f"DROP SCHEMA IF EXISTS {schema_name} CASCADE")
finally:
conn.close()
@pytest.fixture
def orchestrator(pg_dbt_project):
"""Factory fixture that creates a PrefectDbtOrchestrator for the Postgres project.
Defaults to `test_strategy=TestStrategy.SKIP` so that concurrency tests
get deterministic results containing only model/seed nodes.
"""
def _factory(**kwargs):
kwargs.setdefault("test_strategy", TestStrategy.SKIP)
settings = PrefectDbtSettings(
project_dir=pg_dbt_project["project_dir"],
profiles_dir=pg_dbt_project["profiles_dir"],
)
return PrefectDbtOrchestrator(
settings=settings,
manifest_path=pg_dbt_project["manifest_path"],
**kwargs,
)
return _factory
class TestPerNodePostgresConcurrency:
"""Integration tests for PER_NODE mode with real concurrent execution on Postgres."""
def test_concurrent_full_build(self, orchestrator):
"""All 5 nodes succeed with concurrency=4."""
from prefect import flow
orch = orchestrator(
execution_mode=ExecutionMode.PER_NODE,
concurrency=4,
)
@flow
def test_flow():
return orch.run_build()
results = test_flow()
assert set(results.keys()) == ALL_EXECUTABLE
for node_id, result in results.items():
assert result["status"] == "success", (
f"{node_id} failed: {result.get('error')}"
)
def test_concurrency_limit_serializes(self, orchestrator):
"""With concurrency=1, independent nodes do not overlap.
We verify serialization by checking that no two independent nodes
have overlapping [started_at, completed_at] intervals. With
concurrency=1, each node must complete before the next starts.
"""
from datetime import datetime
from prefect import flow
orch = orchestrator(
execution_mode=ExecutionMode.PER_NODE,
concurrency=1,
)
@flow
def test_flow():
return orch.run_build()
results = test_flow()
def _no_overlap(node_ids):
"""Check that no pair of nodes has overlapping execution."""
timings = [results[nid]["timing"] for nid in node_ids]
intervals = sorted(
(
datetime.fromisoformat(t["started_at"]),
datetime.fromisoformat(t["completed_at"]),
)
for t in timings
)
for i in range(len(intervals) - 1):
if intervals[i][1] > intervals[i + 1][0]:
return False
return True
seed_serial = _no_overlap([SEED_CUSTOMERS, SEED_ORDERS])
stg_serial = _no_overlap([STG_CUSTOMERS, STG_ORDERS])
assert seed_serial and stg_serial, (
f"Expected serial execution (no overlap) with concurrency=1. "
f"Seed timings: {results[SEED_CUSTOMERS]['timing']} vs {results[SEED_ORDERS]['timing']}, "
f"Staging timings: {results[STG_CUSTOMERS]['timing']} vs {results[STG_ORDERS]['timing']}"
)
def test_concurrent_creates_correct_data(self, orchestrator, pg_dbt_project):
"""Postgres has correct row counts and aggregation values after concurrent build."""
from prefect import flow
orch = orchestrator(
execution_mode=ExecutionMode.PER_NODE,
concurrency=4,
)
@flow
def test_flow():
return orch.run_build()
results = test_flow()
for node_id, result in results.items():
assert result["status"] == "success", (
f"{node_id} failed: {result.get('error')}"
)
schema = pg_dbt_project["schema"]
conn = _pg_connect()
try:
with conn.cursor() as cur:
cur.execute(f"SELECT count(*) FROM {schema}.customers")
assert cur.fetchone()[0] == 5
cur.execute(f"SELECT count(*) FROM {schema}.orders")
assert cur.fetchone()[0] == 10
cur.execute(f"SELECT count(*) FROM {schema}.stg_customers")
assert cur.fetchone()[0] == 5
cur.execute(f"SELECT count(*) FROM {schema}.stg_orders")
assert cur.fetchone()[0] == 10
cur.execute(f"SELECT count(*) FROM {schema}.customer_summary")
assert cur.fetchone()[0] == 5
cur.execute(
f"SELECT order_count, total_amount FROM {schema}.customer_summary "
f"WHERE customer_name = 'alice'"
)
alice = cur.fetchone()
assert alice[0] == 2 # alice has 2 orders
assert alice[1] == 300 # 100 + 200
finally:
conn.close()
def test_per_node_ephemeral_not_in_results(self, orchestrator):
"""Ephemeral models are not executed in PER_NODE mode."""
from prefect import flow
orch = orchestrator(
execution_mode=ExecutionMode.PER_NODE,
concurrency=4,
)
@flow
def test_flow():
return orch.run_build()
results = test_flow()
assert INT_ORDERS_ENRICHED not in results
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/integrations/prefect-dbt/tests/core/test_orchestrator_postgres_integration.py",
"license": "Apache License 2.0",
"lines": 248,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:tests/cli/test_server_status.py | import json
from unittest.mock import AsyncMock, patch
import pytest
from prefect.settings import PREFECT_API_URL
from prefect.settings.context import temporary_settings
from prefect.testing.cli import invoke_and_assert
_SERVER_MOD = "prefect.cli.server"
@pytest.fixture(autouse=True)
def set_api_url():
with temporary_settings({PREFECT_API_URL: "http://localhost:4200/api"}):
yield
def _mock_client(healthy: bool = True, server_version: str = "3.0.0"):
mock = AsyncMock()
if healthy:
mock.api_healthcheck = AsyncMock(return_value=None)
mock.api_version = AsyncMock(return_value=server_version)
else:
mock.api_healthcheck = AsyncMock(
return_value=ConnectionError("Connection refused")
)
return mock
def _patch_get_client(mock):
"""Patch get_client for the server status command."""
mock_ctx = AsyncMock(
__aenter__=AsyncMock(return_value=mock),
__aexit__=AsyncMock(return_value=False),
)
return patch("prefect.client.orchestration.get_client", return_value=mock_ctx)
class TestServerStatus:
def test_status_healthy_server(self):
mock = _mock_client(healthy=True, server_version="3.0.0")
with _patch_get_client(mock):
invoke_and_assert(
command=["server", "status"],
expected_output_contains=[
"Server is available",
"http://localhost:4200/api",
"3.0.0",
],
expected_code=0,
)
def test_status_unhealthy_server(self):
mock = _mock_client(healthy=False)
with _patch_get_client(mock):
invoke_and_assert(
command=["server", "status"],
expected_output_contains="Server is not available",
expected_code=1,
)
def test_status_healthy_server_json_output(self):
mock = _mock_client(healthy=True, server_version="3.0.0")
with _patch_get_client(mock):
result = invoke_and_assert(
command=["server", "status", "--output", "json"],
expected_code=0,
)
output = json.loads(result.stdout.strip())
assert output["status"] == "available"
assert output["api_url"] == "http://localhost:4200/api"
assert output["server_version"] == "3.0.0"
def test_status_unhealthy_server_json_output(self):
mock = _mock_client(healthy=False)
with _patch_get_client(mock):
result = invoke_and_assert(
command=["server", "status", "--output", "json"],
expected_code=1,
)
output = json.loads(result.stdout.strip())
assert output["status"] == "unavailable"
assert "error" in output
def test_status_invalid_output_format(self):
mock = _mock_client(healthy=True)
with _patch_get_client(mock):
invoke_and_assert(
command=["server", "status", "--output", "xml"],
expected_output_contains="Only 'json' output format is supported.",
expected_code=1,
)
def test_status_wait_succeeds_immediately(self):
mock = _mock_client(healthy=True, server_version="3.0.0")
with _patch_get_client(mock):
invoke_and_assert(
command=["server", "status", "--wait"],
expected_output_contains="Server is available",
expected_code=0,
)
def test_status_wait_succeeds_after_retries(self):
mock = _mock_client(healthy=True, server_version="3.0.0")
healthcheck_results = iter(
[
ConnectionError("Connection refused"),
ConnectionError("Connection refused"),
None,
]
)
async def healthcheck_side_effect():
return next(healthcheck_results)
mock.api_healthcheck = AsyncMock(side_effect=healthcheck_side_effect)
with _patch_get_client(mock):
invoke_and_assert(
command=["server", "status", "--wait", "--timeout", "30"],
expected_output_contains="Server is available",
expected_code=0,
)
def test_status_wait_timeout(self):
mock = _mock_client(healthy=False)
monotonic_values = iter([0.0, 0.5, 5.1])
async def fake_sleep(seconds):
pass
with (
_patch_get_client(mock),
patch(f"{_SERVER_MOD}.asyncio.sleep", side_effect=fake_sleep),
patch(f"{_SERVER_MOD}._monotonic", side_effect=monotonic_values),
):
invoke_and_assert(
command=["server", "status", "--wait", "--timeout", "5"],
expected_output_contains="Timed out after 5 seconds",
expected_code=1,
)
def test_status_wait_timeout_json_output(self):
mock = _mock_client(healthy=False)
monotonic_values = iter([0.0, 0.5, 5.1])
async def fake_sleep(seconds):
pass
with (
_patch_get_client(mock),
patch(f"{_SERVER_MOD}.asyncio.sleep", side_effect=fake_sleep),
patch(f"{_SERVER_MOD}._monotonic", side_effect=monotonic_values),
):
result = invoke_and_assert(
command=[
"server",
"status",
"--wait",
"--timeout",
"5",
"--output",
"json",
],
expected_code=1,
)
output = json.loads(result.stdout.strip())
assert output["status"] == "timed_out"
assert output["timeout"] == 5
def test_status_version_fetch_failure(self):
mock = _mock_client(healthy=True)
mock.api_version = AsyncMock(side_effect=Exception("version error"))
with _patch_get_client(mock):
invoke_and_assert(
command=["server", "status"],
expected_output_contains="Server is available",
expected_output_does_not_contain="Server version",
expected_code=0,
)
def test_status_no_api_url_configured(self):
mock = _mock_client(healthy=True)
with (
_patch_get_client(mock),
temporary_settings({PREFECT_API_URL: None}),
):
invoke_and_assert(
command=["server", "status"],
expected_output_contains="No API URL configured",
expected_code=1,
)
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "tests/cli/test_server_status.py",
"license": "Apache License 2.0",
"lines": 165,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:tests/cli/test_cyclopts_runner.py | """Tests for the CycloptsCliRunner.
These tests validate that the runner correctly captures output, handles
interactive prompts, isolates global state, and returns proper exit codes.
"""
import os
import sys
import pytest
@pytest.fixture
def runner():
from prefect.testing.cli import CycloptsCliRunner
return CycloptsCliRunner()
class TestOutputCapture:
def test_captures_stdout(self, runner):
result = runner.invoke(["config", "view"])
assert result.exit_code == 0
assert "PREFECT_PROFILE" in result.stdout
def test_captures_stderr_separately(self, runner):
result = runner.invoke(["config", "view"])
assert isinstance(result.stderr, str)
def test_exit_code_zero_on_success(self, runner):
result = runner.invoke(["version"])
assert result.exit_code == 0
assert result.exception is None
class TestInteractiveMode:
"""Verify that prompts work correctly when input is provided."""
def test_confirm_prompt_with_yes(self, runner):
"""Confirm.ask should render the prompt text and accept 'y' input."""
result = runner.invoke(
["profile", "create", "test-runner-profile"],
input="y\n",
)
# The prompt should appear in stdout since is_interactive is True
assert result.exit_code == 0
def test_noninteractive_without_input(self, runner):
"""Without input, is_interactive should be False (no TTY stdin)."""
result = runner.invoke(["config", "view"])
# Should succeed without prompting
assert result.exit_code == 0
class TestGlobalStateIsolation:
"""Verify that the runner does not leak state between invocations."""
def test_stdout_restored(self, runner):
original_stdout = sys.stdout
runner.invoke(["config", "view"])
assert sys.stdout is original_stdout
def test_stderr_restored(self, runner):
original_stderr = sys.stderr
runner.invoke(["config", "view"])
assert sys.stderr is original_stderr
def test_stdin_restored(self, runner):
original_stdin = sys.stdin
runner.invoke(["config", "view"], input="y\n")
assert sys.stdin is original_stdin
def test_columns_env_restored(self, runner):
original = os.environ.get("COLUMNS")
runner.invoke(["config", "view"])
assert os.environ.get("COLUMNS") == original
def test_console_restored(self, runner):
import prefect.cli._app as _cli
original_console = _cli.console
runner.invoke(["config", "view"])
assert _cli.console is original_console
def test_multiple_invocations_isolated(self, runner):
"""Two sequential invocations should not affect each other."""
result1 = runner.invoke(["config", "view"])
result2 = runner.invoke(["config", "view"])
assert result1.exit_code == 0
assert result2.exit_code == 0
# Both should produce output (not empty from leaked state)
assert "PREFECT_PROFILE" in result1.stdout
assert "PREFECT_PROFILE" in result2.stdout
class TestExitCodes:
def test_successful_command(self, runner):
result = runner.invoke(["config", "view"])
assert result.exit_code == 0
def test_exception_returns_exit_code_1(self, runner):
"""An unhandled exception should result in exit_code=1."""
result = runner.invoke(["config", "set", "NONEXISTENT_SETTING=value"])
assert result.exit_code == 1
class TestResultInterface:
"""Verify the CycloptsResult is compatible with typer's Result."""
def test_has_stdout(self, runner):
result = runner.invoke(["version"])
assert hasattr(result, "stdout")
assert isinstance(result.stdout, str)
def test_has_stderr(self, runner):
result = runner.invoke(["version"])
assert hasattr(result, "stderr")
assert isinstance(result.stderr, str)
def test_has_output(self, runner):
result = runner.invoke(["version"])
assert hasattr(result, "output")
assert result.output == result.stdout
def test_has_exit_code(self, runner):
result = runner.invoke(["version"])
assert hasattr(result, "exit_code")
assert isinstance(result.exit_code, int)
def test_has_exception(self, runner):
result = runner.invoke(["version"])
assert hasattr(result, "exception")
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "tests/cli/test_cyclopts_runner.py",
"license": "Apache License 2.0",
"lines": 99,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:src/integrations/prefect-dbt/prefect_dbt/core/_orchestrator.py | """
Orchestrator for per-node and per-wave dbt execution.
This module provides:
- ExecutionMode: Constants for execution mode selection
- PrefectDbtOrchestrator: Executes dbt builds with wave or per-node execution
"""
import argparse
import dataclasses
import json as _json
import os
import sys
import threading
from collections import deque
from contextlib import ExitStack, nullcontext
from datetime import datetime, timedelta, timezone
from enum import Enum
from functools import partial
from pathlib import Path
from typing import Any
from uuid import uuid4
from cachetools import LFUCache
from dbt.artifacts.resources.types import NodeType
from prefect import task as prefect_task
from prefect.artifacts import create_markdown_artifact
from prefect.concurrency.sync import concurrency as prefect_concurrency
from prefect.context import AssetContext, FlowRunContext
from prefect.logging import get_logger, get_run_logger
from prefect.settings import PREFECT_CLIENT_SERVER_VERSION_CHECK_ENABLED
from prefect.settings.context import temporary_settings
from prefect.task_runners import ProcessPoolTaskRunner
from prefect.tasks import MaterializingTask
from prefect_dbt.core._artifacts import (
ASSET_NODE_TYPES,
create_asset_for_node,
create_summary_markdown,
get_compiled_code_for_node,
get_upstream_assets_for_node,
write_run_results_json,
)
from prefect_dbt.core._cache import build_cache_policy_for_node
from prefect_dbt.core._executor import DbtCoreExecutor, DbtExecutor, ExecutionResult
from prefect_dbt.core._freshness import (
compute_freshness_expiration,
filter_stale_nodes,
run_source_freshness,
)
from prefect_dbt.core._manifest import (
DbtNode,
ExecutionWave,
ManifestParser,
resolve_selection,
)
from prefect_dbt.core.settings import PrefectDbtSettings
from prefect_dbt.utilities import format_resource_id
logger = get_logger(__name__)
class ExecutionMode(Enum):
"""Execution mode for dbt orchestration.
PER_WAVE: Each wave is a single `dbt build` invocation containing all
nodes in the wave. Lower overhead, but a single failure marks the
entire wave as failed and retries are not per-node.
PER_NODE: Each node is a separate Prefect task with individual retries
and concurrency control. Requires `run_build()` to be called
inside a `@flow`.
"""
PER_NODE = "per_node"
PER_WAVE = "per_wave"
class TestStrategy(Enum):
"""Strategy for executing dbt test nodes.
IMMEDIATE: Tests are interleaved with models in the execution DAG.
Kahn's algorithm naturally places each test in the wave after
all of its parent models complete. This is the default,
matching `dbt build` semantics.
DEFERRED: All model waves execute first, then all tests execute
together in a final wave.
SKIP: Tests are excluded from execution.
"""
__test__ = False # prevent pytest collection
IMMEDIATE = "immediate"
DEFERRED = "deferred"
SKIP = "skip"
# ---------------------------------------------------------------
# extra_cli_args validation tables
# ---------------------------------------------------------------
_BLOCKED_FLAGS: dict[str, str] = {
"--select": (
"The orchestrator resolves selection at the manifest level and passes "
"nodes as unique-ID selectors; a second CLI-level --select conflicts. "
"Use the 'select' parameter of run_build() instead."
),
"--models": (
"Alias for --select. The orchestrator resolves selection at the "
"manifest level. Use the 'select' parameter of run_build() instead."
),
"--exclude": (
"The orchestrator resolves exclusion at the manifest level; a CLI-level "
"--exclude conflicts. Use the 'exclude' parameter of run_build() instead."
),
"--selector": (
"References a YAML selector that would override the orchestrator's "
"resolved node set."
),
"--indirect-selection": (
"Hardcoded to 'empty' in PER_WAVE mode so the orchestrator controls "
"test scheduling via TestStrategy; a user override would break "
"IMMEDIATE/DEFERRED behaviour."
),
"--project-dir": (
"Set from settings.project_dir in the executor; overriding "
"desynchronizes the orchestrator's path handling."
),
"--target-path": (
"Set from settings.target_path and used for manifest resolution; "
"overriding desynchronizes manifest resolution."
),
"--profiles-dir": (
"Managed via settings.resolve_profiles_yml(); bypassing breaks "
"temporary profile-file resolution."
),
"--log-level": (
"Set to 'none' for console output in the executor; the orchestrator "
"deliberately silences dbt's console output and captures logs via "
"callbacks."
),
}
_FIRST_CLASS_FLAGS: dict[str, str] = {
"--full-refresh": "run_build(full_refresh=True)",
"--target": "run_build(target='...')",
"--threads": "DbtCoreExecutor(threads=N)",
"--defer": "DbtCoreExecutor(defer=True)",
"--defer-state": "DbtCoreExecutor(defer_state_path=Path(...))",
"--favor-state": "DbtCoreExecutor(favor_state=True)",
"--state": "DbtCoreExecutor(state_path=Path(...))",
}
_CAVEAT_FLAGS: dict[str, str] = {
"--resource-type": (
"Filters resource types at the CLI level; passing '--resource-type model' "
"to a wave that includes tests (via TestStrategy.IMMEDIATE) would "
"silently drop those tests."
),
"--exclude-resource-type": (
"Filters resource types at the CLI level; may silently drop tests "
"scheduled by TestStrategy.IMMEDIATE."
),
"--fail-fast": (
"In PER_WAVE mode dbt stops the wave on first failure, potentially "
"leaving nodes in a state the orchestrator hasn't tracked. Safe in "
"PER_NODE mode since each invocation is a single node."
),
}
def _build_extra_cli_args_parser() -> tuple[
argparse.ArgumentParser, dict[str, tuple[str, str]]
]:
"""Build an ArgumentParser that detects blocked, first-class, and caveat flags.
Using argparse handles `--flag=value`, `--flag value`, and `-s value`
forms natively. Returns the parser and a mapping from argparse dest
to `(canonical_flag, category)` for error/warning lookup.
"""
p = argparse.ArgumentParser(add_help=False)
dest_to_info: dict[str, tuple[str, str]] = {}
def _add(flags: list[str], dest: str, category: str) -> None:
p.add_argument(*flags, dest=dest, nargs="?", const=True, default=None)
dest_to_info[dest] = (flags[0], category)
# Blocked flags (short aliases grouped with their long forms)
_add(["--select", "-s"], "select", "blocked")
_add(["--models", "-m"], "models", "blocked")
_add(["--exclude"], "exclude", "blocked")
_add(["--selector"], "selector", "blocked")
_add(["--indirect-selection"], "indirect_selection", "blocked")
_add(["--project-dir"], "project_dir", "blocked")
_add(["--target-path"], "target_path", "blocked")
_add(["--profiles-dir"], "profiles_dir", "blocked")
_add(["--log-level"], "log_level", "blocked")
# First-class flags
_add(["--full-refresh"], "full_refresh", "first_class")
_add(["--target", "-t"], "target", "first_class")
_add(["--threads"], "threads", "first_class")
_add(["--defer"], "defer", "first_class")
_add(["--defer-state"], "defer_state", "first_class")
_add(["--favor-state"], "favor_state", "first_class")
_add(["--state"], "state", "first_class")
# Caveat flags
_add(["--resource-type"], "resource_type", "caveat")
_add(["--exclude-resource-type"], "exclude_resource_type", "caveat")
_add(["--fail-fast", "-x"], "fail_fast", "caveat")
return p, dest_to_info
_EXTRA_CLI_ARGS_PARSER, _DEST_TO_INFO = _build_extra_cli_args_parser()
def _validate_extra_cli_args(extra_cli_args: list[str]) -> None:
"""Validate extra_cli_args against blocked, first-class, and caveat flags.
Uses `argparse.parse_known_args` to correctly handle `--flag=value`,
`--flag value`, and short-flag forms.
Raises:
ValueError: If any blocked or first-class flag is found.
"""
known, _ = _EXTRA_CLI_ARGS_PARSER.parse_known_args(extra_cli_args)
for dest, value in vars(known).items():
if value is None:
continue
flag, category = _DEST_TO_INFO[dest]
if category == "blocked":
raise ValueError(
f"Cannot pass '{flag}' via extra_cli_args: {_BLOCKED_FLAGS[flag]}"
)
if category == "first_class":
raise ValueError(
f"Cannot pass '{flag}' via extra_cli_args; use "
f"{_FIRST_CLASS_FLAGS[flag]} instead."
)
if category == "caveat":
logger.warning(
"extra_cli_args contains '%s': %s", flag, _CAVEAT_FLAGS[flag]
)
# Map executable node types to their dbt CLI commands.
_NODE_COMMAND = {
NodeType.Model: "run",
NodeType.Seed: "seed",
NodeType.Snapshot: "snapshot",
NodeType.Test: "test",
}
# NodeType.Unit was added in dbt-core 1.8; guard for older versions.
_UNIT_TYPE = getattr(NodeType, "Unit", None)
if _UNIT_TYPE is not None:
_NODE_COMMAND[_UNIT_TYPE] = "test"
# Resource types excluded from caching by default.
_DEFAULT_EXCLUDE_RESOURCE_TYPES: frozenset[NodeType] = frozenset(
t for t in (NodeType.Test, NodeType.Snapshot, _UNIT_TYPE) if t is not None
)
# Keep _TEST_NODE_TYPES for failure-propagation logic (not user-configurable).
_TEST_NODE_TYPES = frozenset(t for t in (NodeType.Test, _UNIT_TYPE) if t is not None)
@dataclasses.dataclass(frozen=True)
class CacheConfig:
"""Configuration for cross-run caching in PER_NODE execution mode.
Pass an instance to ``PrefectDbtOrchestrator(cache=CacheConfig(...))``
to enable caching. ``None`` (the default) disables caching entirely.
"""
expiration: timedelta | None = None
result_storage: Any | str | Path | None = None
key_storage: Any | str | Path | None = None
use_source_freshness_expiration: bool = False
exclude_materializations: frozenset[str] = frozenset({"incremental"})
exclude_resource_types: frozenset[NodeType] = _DEFAULT_EXCLUDE_RESOURCE_TYPES
@dataclasses.dataclass(frozen=True)
class BuildPlan:
"""Result of a dry-run plan showing what `run_build()` would execute.
Returned by `PrefectDbtOrchestrator.plan`. All fields are
read-only so the plan can be safely logged, serialised, or compared
across invocations.
Attributes:
waves: Execution waves in topological order. Each wave contains
nodes that can execute in parallel.
node_count: Total number of nodes across all waves.
cache_predictions: Per-node cache prediction when caching is
configured. Maps `node.unique_id` to `"hit"`,
`"miss"`, or `"excluded"`. `None` when caching is
not configured.
skipped_nodes: Nodes that were filtered out by selectors or
source-freshness checks. Maps `node.unique_id` to a
result dict with `status` and `reason` keys.
estimated_parallelism: Width of the largest wave β the maximum
number of nodes that could execute concurrently.
"""
waves: tuple[ExecutionWave, ...]
node_count: int
cache_predictions: dict[str, str] | None
skipped_nodes: dict[str, dict[str, Any]]
estimated_parallelism: int
def __str__(self) -> str:
lines: list[str] = []
lines.append(
f"BuildPlan: {self.node_count} node(s) in {len(self.waves)} wave(s)"
f" | max parallelism = {self.estimated_parallelism}"
)
lines.append("")
# Wave breakdown
for wave in self.waves:
lines.append(f" Wave {wave.wave_number} ({len(wave.nodes)} node(s)):")
for node in wave.nodes:
parts: list[str] = [f" - {node.unique_id}"]
tag_parts: list[str] = []
if node.resource_type is not None:
tag_parts.append(node.resource_type.value)
if node.materialization:
tag_parts.append(node.materialization)
if tag_parts:
parts.append(f"[{', '.join(tag_parts)}]")
if self.cache_predictions and node.unique_id in self.cache_predictions:
prediction = self.cache_predictions[node.unique_id]
parts.append(f"(cache: {prediction})")
lines.append(" ".join(parts))
# Cache summary
if self.cache_predictions:
hits = sum(1 for v in self.cache_predictions.values() if v == "hit")
misses = sum(1 for v in self.cache_predictions.values() if v == "miss")
excluded = sum(
1 for v in self.cache_predictions.values() if v == "excluded"
)
lines.append("")
lines.append(
f" Cache: {hits} hit(s), {misses} miss(es), {excluded} excluded"
)
# Skipped nodes
if self.skipped_nodes:
lines.append("")
lines.append(f" Skipped ({len(self.skipped_nodes)}):")
for nid, info in self.skipped_nodes.items():
reason = info.get("reason", "unknown")
lines.append(f" - {nid}: {reason}")
return "\n".join(lines)
_LOG_EMITTERS = {
"debug": lambda log, msg: log.debug(msg),
"info": lambda log, msg: log.info(msg),
"warning": lambda log, msg: log.warning(msg),
"error": lambda log, msg: log.error(msg),
}
# Bound process-pool dedupe state to avoid unbounded growth while retaining
# frequently repeated global messages.
_GLOBAL_LOG_DEDUPE_MAX_KEYS = 10_000
_DBT_GLOBAL_LOGGER_NAMES = frozenset(
{
"prefect.task_runs.dbt_orchestrator_global",
}
)
def _emit_log_messages(
log_messages: dict[str, list[tuple[str, str]]] | None,
node_id: str,
target_logger: Any,
) -> None:
"""Emit captured dbt log messages for *node_id* to a Prefect logger.
Only messages keyed by the given *node_id* are emitted. Each message
is emitted at the level it was captured at.
"""
if not log_messages:
return
for level, msg in log_messages.get(node_id, []):
emitter = _LOG_EMITTERS.get(level, _LOG_EMITTERS["info"])
emitter(target_logger, msg)
def _dbt_global_log_dedupe_processor_factory():
"""Build a process-pool message processor that drops duplicate dbt global logs."""
seen_messages: LFUCache[tuple[str, str, int, str], bool] = LFUCache(
maxsize=_GLOBAL_LOG_DEDUPE_MAX_KEYS
)
def _processor(message_type: str, message_payload: Any):
if message_type != "log" or not isinstance(message_payload, dict):
return message_type, message_payload
logger_name = message_payload.get("name")
flow_run_id = message_payload.get("flow_run_id")
level = message_payload.get("level")
message = message_payload.get("message")
if (
not isinstance(logger_name, str)
or logger_name not in _DBT_GLOBAL_LOGGER_NAMES
or not isinstance(flow_run_id, str)
or not isinstance(level, int)
or not isinstance(message, str)
):
return message_type, message_payload
dedupe_key = (flow_run_id, logger_name, level, message)
if seen_messages.get(dedupe_key):
return None
seen_messages[dedupe_key] = True
return message_type, message_payload
return _processor
def _configure_process_pool_subprocess_message_processors(
task_runner: ProcessPoolTaskRunner,
processor_factories: list[Any],
) -> bool:
"""Configure process-pool message processors when the runner supports it."""
try:
task_runner.subprocess_message_processor_factories = processor_factories
except (AttributeError, TypeError):
try:
task_runner.set_subprocess_message_processor_factories(processor_factories)
except (AttributeError, TypeError):
return False
return True
return True
class _DbtNodeError(Exception):
"""Raised inside per-node tasks to trigger Prefect retries.
Carries execution details so the orchestrator can build a proper
error result after all retries are exhausted.
Implements `__reduce__` so the exception survives pickle round-trips
across the `ProcessPoolTaskRunner` process boundary.
"""
def __init__(
self,
execution_result: ExecutionResult,
timing: dict[str, Any],
invocation: dict[str, Any],
):
self.execution_result = execution_result
self.timing = timing
self.invocation = invocation
msg = (
str(execution_result.error) if execution_result.error else "dbt node failed"
)
super().__init__(msg)
def __reduce__(self):
return (type(self), (self.execution_result, self.timing, self.invocation))
class PrefectDbtOrchestrator:
"""Orchestrate dbt builds wave-by-wave or per-node.
Wires together ManifestParser (Phase 1), resolve_selection (Phase 2),
and DbtExecutor (Phase 3) to execute a full dbt build in topological
wave order.
Supports two execution modes:
- **PER_WAVE** (default): Each wave is a single `dbt build` invocation.
Lower overhead but coarser failure granularity.
- **PER_NODE**: Each node is a separate Prefect task with individual
retries and concurrency control. Requires `run_build()` to be
called inside a `@flow`.
Args:
settings: PrefectDbtSettings instance (created with defaults if None)
manifest_path: Explicit path to manifest.json (auto-detected if None)
executor: DbtExecutor implementation (DbtCoreExecutor created if None)
threads: Number of dbt threads (forwarded to DbtCoreExecutor)
state_path: Path for --state flag
defer: Whether to pass --defer flag
defer_state_path: Path for --defer-state flag
favor_state: Whether to pass --favor-state flag
execution_mode: `ExecutionMode.PER_WAVE` or `ExecutionMode.PER_NODE`.
Raises `ValueError` for unrecognized values.
retries: Number of retries per node (PER_NODE mode only)
retry_delay_seconds: Delay between retries in seconds
concurrency: Concurrency limit. A string names an existing Prefect
global concurrency limit; an int sets the max_workers on the
ProcessPoolTaskRunner used for parallel node execution.
task_runner_type: Task runner class to use for PER_NODE execution.
Defaults to `ProcessPoolTaskRunner`.
cache: A `CacheConfig` instance to enable cross-run caching for
PER_NODE mode. When not None, unchanged nodes are skipped on
subsequent runs. ``None`` (default) disables caching entirely.
Only supported with `execution_mode=ExecutionMode.PER_NODE`.
test_strategy: Controls when dbt test nodes execute.
`TestStrategy.IMMEDIATE` (default) interleaves tests with
models in the DAG (each test runs in the wave after its
parent models), matching `dbt build` semantics.
`TestStrategy.DEFERRED` runs all tests after all model waves.
`TestStrategy.SKIP` excludes tests entirely.
create_summary_artifact: When True, create a Prefect markdown
artifact summarising the build results at the end of
`run_build()`. Requires an active flow run context.
include_compiled_code: When True, include compiled SQL in
asset descriptions (PER_NODE mode only).
write_run_results: When True, write a dbt-compatible
`run_results.json` to the target directory after
`run_build()`.
disable_assets: Global override to suppress Prefect asset
creation for dbt node runs. When True, no
`MaterializingTask` instances are created regardless of
per-node configuration. Defaults to False for backwards
compatibility.
Example:
```python
@flow
def run_dbt_build():
orchestrator = PrefectDbtOrchestrator(
execution_mode=ExecutionMode.PER_NODE,
retries=2,
concurrency=4,
)
return orchestrator.run_build()
```
"""
def __init__(
self,
settings: PrefectDbtSettings | None = None,
manifest_path: Path | None = None,
executor: DbtExecutor | None = None,
threads: int | None = None,
state_path: Path | None = None,
defer: bool = False,
defer_state_path: Path | None = None,
favor_state: bool = False,
execution_mode: ExecutionMode = ExecutionMode.PER_WAVE,
retries: int = 0,
retry_delay_seconds: int = 30,
concurrency: str | int | None = None,
task_runner_type: type | None = None,
cache: CacheConfig | None = None,
test_strategy: TestStrategy = TestStrategy.IMMEDIATE,
create_summary_artifact: bool = True,
include_compiled_code: bool = False,
write_run_results: bool = False,
disable_assets: bool = False,
):
self._settings = (settings or PrefectDbtSettings()).model_copy()
self._manifest_path = manifest_path
try:
self._execution_mode = ExecutionMode(execution_mode)
except ValueError:
raise ValueError(
f"Invalid execution_mode {execution_mode!r}. "
f"Must be one of: {', '.join(m.value for m in ExecutionMode)}"
) from None
try:
self._test_strategy = TestStrategy(test_strategy)
except ValueError:
raise ValueError(
f"Invalid test_strategy {test_strategy!r}. "
f"Must be one of: {', '.join(s.value for s in TestStrategy)}"
) from None
self._retries = retries
self._retry_delay_seconds = retry_delay_seconds
self._concurrency = concurrency
self._task_runner_type = task_runner_type
self._cache = cache
self._create_summary_artifact = create_summary_artifact
self._include_compiled_code = include_compiled_code
self._write_run_results = write_run_results
self._disable_assets = disable_assets
if retries and self._execution_mode != ExecutionMode.PER_NODE:
raise ValueError(
"Retries are only supported in PER_NODE execution mode. "
"Set execution_mode=ExecutionMode.PER_NODE to use retries."
)
if cache is not None and self._execution_mode != ExecutionMode.PER_NODE:
raise ValueError(
"Caching is only supported in PER_NODE execution mode. "
"Set execution_mode=ExecutionMode.PER_NODE to use caching."
)
# When the caller provides an explicit manifest_path that lives
# outside the default target dir, align settings.target_path so
# that both resolve_selection() and the executor use the same
# target directory. Without this, selection and execution could
# resolve against different target dirs.
if manifest_path is not None:
self._settings.target_path = self._resolve_target_path()
if executor is not None:
self._executor = executor
else:
self._executor = DbtCoreExecutor(
self._settings,
threads=threads,
state_path=state_path,
defer=defer,
defer_state_path=defer_state_path,
favor_state=favor_state,
)
@staticmethod
def _build_node_result(
status: str,
timing: dict[str, Any] | None = None,
invocation: dict[str, Any] | None = None,
error: dict[str, Any] | None = None,
reason: str | None = None,
failed_upstream: list[str] | None = None,
) -> dict[str, Any]:
result: dict[str, Any] = {"status": status}
if timing is not None:
result["timing"] = timing
if invocation is not None:
result["invocation"] = invocation
if error is not None:
result["error"] = error
if reason is not None:
result["reason"] = reason
if failed_upstream is not None:
result["failed_upstream"] = failed_upstream
return result
def _create_artifacts(
self,
results: dict[str, Any],
elapsed_time: float,
) -> None:
"""Create post-execution artifacts (summary markdown, run_results.json)."""
if self._create_summary_artifact:
if FlowRunContext.get() is not None:
try:
markdown = create_summary_markdown(results)
create_markdown_artifact(
markdown=markdown,
key="dbt-orchestrator-summary",
_sync=True,
)
except Exception as e:
logger.warning("Failed to create dbt summary artifact: %s", e)
else:
logger.info(
"Summary artifact created: key='dbt-orchestrator-summary'"
)
if self._write_run_results:
target_dir = self._settings.project_dir / self._settings.target_path
out_path = write_run_results_json(results, elapsed_time, target_dir)
logger.info("run_results.json written to %s", out_path)
def _resolve_target_path(self) -> Path:
"""Resolve the target directory path.
When `manifest_path` is explicitly set, normalizes it to an absolute
path (relative to `settings.project_dir`) and returns its parent
directory so that `resolve_selection` uses the same target directory
as the manifest. Otherwise, returns `settings.target_path`.
"""
if self._manifest_path is not None:
if self._manifest_path.is_absolute():
return self._manifest_path.parent
return (self._settings.project_dir / self._manifest_path).resolve().parent
return self._settings.target_path
def _resolve_manifest_path(self) -> Path:
"""Resolve the path to manifest.json.
Resolution order:
1. Explicit `manifest_path` (relative paths resolved against
`settings.project_dir`). If the file does not exist, returns
the path as-is β `ManifestParser` will raise a clear error.
2. Delegates to the executor's `resolve_manifest_path()`. The
executor is responsible for locating or generating the manifest
(e.g. running `dbt parse` locally or fetching from dbt Cloud).
Returns:
Resolved `Path` to the `manifest.json` file.
"""
if self._manifest_path is not None:
if self._manifest_path.is_absolute():
return self._manifest_path
return (self._settings.project_dir / self._manifest_path).resolve()
path = self._executor.resolve_manifest_path()
self._manifest_path = path
self._settings.target_path = path.parent
return path
@staticmethod
def _augment_immediate_test_edges(
merged_nodes: dict[str, DbtNode],
test_nodes: dict[str, DbtNode],
) -> dict[str, DbtNode]:
"""Add implicit edges so downstream models depend on upstream tests.
Under `dbt build`, a test failure on model M causes all downstream
models of M to be skipped. In the orchestrator's DAG, both the
test on M and a downstream model D originally share the same
dependency (M), placing them in the same wave. That means the
test cannot block D.
This method adds an implicit dependency: for every test T that
depends on model M, every non-test node D whose `depends_on`
includes M also gains a dependency on T. Kahn's algorithm then
places T in an earlier wave than D, allowing test-failure
cascading to work correctly in both PER_WAVE and PER_NODE modes.
Args:
merged_nodes: Combined dict of model + test nodes.
test_nodes: Subset containing only test nodes.
Returns:
A new dict of nodes with augmented dependencies. Only nodes
whose `depends_on` changed are replaced; all others are the
original objects.
"""
# Build a mapping: model_id -> list of test unique_ids that test it.
model_to_tests: dict[str, list[str]] = {}
for test_id, test_node in test_nodes.items():
for parent_id in test_node.depends_on:
model_to_tests.setdefault(parent_id, []).append(test_id)
if not model_to_tests:
return merged_nodes
# Build a reverse adjacency (parent -> children) for descendant
# lookups used by the cycle guard below.
children_of: dict[str, list[str]] = {}
for nid, node_ in merged_nodes.items():
for dep_id in node_.depends_on:
children_of.setdefault(dep_id, []).append(nid)
def _descendants(start: str) -> set[str]:
"""Return all transitive descendants of `start`."""
visited: set[str] = set()
stack = list(children_of.get(start, ()))
while stack:
nid = stack.pop()
if nid in visited:
continue
visited.add(nid)
stack.extend(children_of.get(nid, ()))
return visited
# For each non-test node, check if any of its dependencies have
# tests. If so, add those test IDs as extra dependencies.
#
# Cycle guard: adding D β T (D depends on test T) would create a
# cycle if T transitively reaches D through its other parents.
# This happens when a multi-parent test (e.g. a relationship test)
# depends on a model that is a descendant of D. We detect this by
# checking whether *any* parent of T is D itself or a descendant
# of D in the original graph.
result = dict(merged_nodes)
descendants_cache: dict[str, set[str]] = {}
for node_id, node in merged_nodes.items():
if node_id in test_nodes:
continue
extra_deps: list[str] = []
for dep_id in node.depends_on:
if dep_id in model_to_tests:
for tid in model_to_tests[dep_id]:
# Check if any of the test's parents is node_id
# itself or a transitive descendant of node_id.
if node_id not in descendants_cache:
descendants_cache[node_id] = _descendants(node_id)
test_parents = set(test_nodes[tid].depends_on)
if not test_parents & (descendants_cache[node_id] | {node_id}):
extra_deps.append(tid)
if extra_deps:
new_depends_on = node.depends_on + tuple(dict.fromkeys(extra_deps))
result[node_id] = dataclasses.replace(node, depends_on=new_depends_on)
return result
def _prepare_build(
self,
select: str | None = None,
exclude: str | None = None,
full_refresh: bool = False,
only_fresh_sources: bool = False,
target: str | None = None,
extra_cli_args: list[str] | None = None,
*,
_resolved_profiles_dir: str | None = None,
) -> tuple[
list[ExecutionWave],
list[dict[str, DbtNode]],
dict[str, DbtNode],
dict[str, Any],
dict,
ManifestParser,
]:
"""Execute steps 1-6 of the build pipeline without running anything.
Shared by `run_build` and `plan`.
Args:
_resolved_profiles_dir: When provided by the caller (e.g.
`run_build`), reuse this already-resolved profiles
directory instead of opening a new temporary context.
This avoids duplicate Prefect API calls for block /
variable resolution.
Returns:
A tuple of `(waves, phases, filtered_nodes, skipped_results,
freshness_results, parser)`. ``phases`` is a list of
node-dicts for eager per-node scheduling.
"""
if extra_cli_args:
_validate_extra_cli_args(extra_cli_args)
# 1. Parse manifest
manifest_path = self._resolve_manifest_path()
parser = ManifestParser(manifest_path)
# 2. Resolve selectors if provided
selected_ids: set[str] | None = None
if select is not None or exclude is not None:
if _resolved_profiles_dir is not None:
# Caller already resolved profiles β reuse directly.
selected_ids = resolve_selection(
project_dir=self._settings.project_dir,
profiles_dir=Path(_resolved_profiles_dir),
select=select,
exclude=exclude,
target_path=self._resolve_target_path(),
target=target,
)
else:
# Standalone call (e.g. from plan()) β resolve in a
# local context that is cleaned up immediately.
with self._settings.resolve_profiles_yml() as rpd:
if isinstance(self._executor, DbtCoreExecutor):
with self._executor.use_resolved_profiles_dir(rpd):
selected_ids = resolve_selection(
project_dir=self._settings.project_dir,
profiles_dir=Path(rpd),
select=select,
exclude=exclude,
target_path=self._resolve_target_path(),
target=target,
)
else:
selected_ids = resolve_selection(
project_dir=self._settings.project_dir,
profiles_dir=Path(rpd),
select=select,
exclude=exclude,
target_path=self._resolve_target_path(),
target=target,
)
# 3. Filter nodes
filtered_nodes = parser.filter_nodes(selected_node_ids=selected_ids)
# 4. Source freshness integration
freshness_results: dict = {}
skipped_results: dict[str, Any] = {}
if only_fresh_sources or (
self._cache is not None and self._cache.use_source_freshness_expiration
):
freshness_results = run_source_freshness(
self._settings,
target_path=self._resolve_target_path(),
target=target,
)
if only_fresh_sources and freshness_results:
filtered_nodes, skipped_results = filter_stale_nodes(
filtered_nodes, parser.all_nodes, freshness_results
)
# 5. Collect test nodes when strategy != SKIP
test_nodes: dict = {}
if self._test_strategy != TestStrategy.SKIP:
test_nodes = parser.filter_test_nodes(
selected_node_ids=selected_ids,
executable_node_ids=set(filtered_nodes.keys()),
)
# 6. Compute waves from remaining nodes
if self._test_strategy == TestStrategy.IMMEDIATE and test_nodes:
merged = {**filtered_nodes, **test_nodes}
augmented = self._augment_immediate_test_edges(merged, test_nodes)
waves = parser.compute_execution_waves(nodes=augmented)
phases: list[dict[str, DbtNode]] = [augmented]
elif self._test_strategy == TestStrategy.DEFERRED and test_nodes:
model_waves = parser.compute_execution_waves(nodes=filtered_nodes)
test_waves = parser.compute_execution_waves(nodes=test_nodes)
next_wave_num = (model_waves[-1].wave_number + 1) if model_waves else 0
for tw in test_waves:
tw.wave_number = next_wave_num
next_wave_num += 1
waves = model_waves + test_waves
phases = [filtered_nodes, test_nodes]
else:
waves = parser.compute_execution_waves(nodes=filtered_nodes)
phases = [filtered_nodes]
return waves, phases, filtered_nodes, skipped_results, freshness_results, parser
def plan(
self,
select: str | None = None,
exclude: str | None = None,
full_refresh: bool = False,
only_fresh_sources: bool = False,
target: str | None = None,
extra_cli_args: list[str] | None = None,
) -> BuildPlan:
"""Dry-run: preview what `run_build` would execute.
Performs steps 1-6 of the build pipeline (manifest parse, selector
resolution, node filtering, source freshness, test scheduling,
wave computation) **without** executing any dbt commands beyond
`dbt ls` (for selector resolution) and `dbt source freshness`
(when `only_fresh_sources` or freshness-based cache expiration
is enabled).
Args:
select: dbt selector expression (e.g. `"tag:daily"`)
exclude: dbt exclude expression
full_refresh: Whether `--full-refresh` would be passed
only_fresh_sources: When True, filter out models with stale
upstream sources
target: dbt target name override
extra_cli_args: Additional dbt CLI flags (validated the same
way as in `run_build`)
Returns:
A `BuildPlan` describing the waves, node count, cache
predictions, skipped nodes, and estimated parallelism.
"""
waves, _phases, filtered_nodes, skipped_results, freshness_results, parser = (
self._prepare_build(
select=select,
exclude=exclude,
full_refresh=full_refresh,
only_fresh_sources=only_fresh_sources,
target=target,
extra_cli_args=extra_cli_args,
)
)
node_count = sum(len(w.nodes) for w in waves)
estimated_parallelism = max((len(w.nodes) for w in waves), default=0)
# Cache predictions
cache_predictions: dict[str, str] | None = None
if self._cache is not None:
cache_predictions = {}
macro_paths = parser.get_macro_paths()
all_executable_nodes = parser.get_executable_nodes()
precomputed = self._precompute_all_cache_keys(
all_executable_nodes, full_refresh, macro_paths
)
execution_state = self._load_execution_state()
for wave in waves:
for node in wave.nodes:
nid = node.unique_id
if (
node.resource_type in self._cache.exclude_resource_types
or node.materialization in self._cache.exclude_materializations
):
cache_predictions[nid] = "excluded"
elif full_refresh:
cache_predictions[nid] = "miss"
elif nid in precomputed and precomputed[nid] == execution_state.get(
nid
):
cache_predictions[nid] = "hit"
else:
cache_predictions[nid] = "miss"
return BuildPlan(
waves=tuple(waves),
node_count=node_count,
cache_predictions=cache_predictions,
skipped_nodes=skipped_results,
estimated_parallelism=estimated_parallelism,
)
def run_build(
self,
select: str | None = None,
exclude: str | None = None,
full_refresh: bool = False,
only_fresh_sources: bool = False,
target: str | None = None,
extra_cli_args: list[str] | None = None,
) -> dict[str, Any]:
"""Execute a dbt build wave-by-wave or per-node.
Pipeline:
1. Parse the manifest
2. Optionally resolve selectors to filter nodes
3. Filter nodes
4. Optionally run source freshness and filter stale nodes
5. Compute execution waves (topological order)
6. Execute (per-wave or per-node depending on mode)
In **PER_NODE** mode, each node becomes a separate Prefect task with
individual retries. This requires `run_build()` to be called
inside a `@flow`.
Args:
select: dbt selector expression (e.g. `"tag:daily"`)
exclude: dbt exclude expression
full_refresh: Whether to pass `--full-refresh` to dbt
only_fresh_sources: When True, skip models whose upstream
sources are stale (freshness status "error" or "runtime
error"). Downstream dependents are also skipped.
target: dbt target name to override the default from
profiles.yml (maps to `--target` / `-t`)
extra_cli_args: Additional dbt CLI flags to pass through
to every dbt invocation. Useful for flags the
orchestrator does not expose as first-class parameters
(e.g. `["--store-failures", "--vars",
"{'my_var': 'value'}"]`). Flags that conflict with
orchestrator-managed settings are rejected with a
`ValueError`.
Returns:
Dict mapping node unique_id to result dict. Each result has:
- `status`: `"success"`, `"cached"`, `"error"`, or `"skipped"`
- `timing`: `{started_at, completed_at, duration_seconds}`
(not present for skipped nodes)
- `invocation`: `{command, args}` (not present for skipped)
- `error`: `{message, type}` (only for error status)
- `reason`: reason string (only for skipped status)
- `failed_upstream`: list of failed node IDs (only for skipped)
Raises:
ValueError: If `extra_cli_args` contains a blocked flag or
a flag that has a first-class parameter equivalent.
"""
with ExitStack() as stack:
resolved_profiles_dir: str | None = None
def _ensure_resolved_profiles_dir() -> str:
"""Resolve profiles lazily and pin them to the executor."""
nonlocal resolved_profiles_dir
if resolved_profiles_dir is None:
resolved_profiles_dir = stack.enter_context(
self._settings.resolve_profiles_yml()
)
if isinstance(self._executor, DbtCoreExecutor):
stack.enter_context(
self._executor.use_resolved_profiles_dir(
resolved_profiles_dir
)
)
return resolved_profiles_dir
# Eagerly resolve profiles when selectors will need them so
# the same temp dir is reused for execution later.
if select is not None or exclude is not None:
_ensure_resolved_profiles_dir()
(
waves,
phases,
filtered_nodes,
skipped_results,
freshness_results,
parser,
) = self._prepare_build(
select=select,
exclude=exclude,
full_refresh=full_refresh,
only_fresh_sources=only_fresh_sources,
target=target,
extra_cli_args=extra_cli_args,
_resolved_profiles_dir=resolved_profiles_dir,
)
# 7. Execute
build_started = datetime.now(timezone.utc)
# Ensure profiles are resolved for execution modes that
# invoke dbt directly.
if isinstance(self._executor, DbtCoreExecutor) and (
self._execution_mode == ExecutionMode.PER_WAVE or self._cache is None
):
_ensure_resolved_profiles_dir()
execution_results = self._run_execution(
waves,
phases,
full_refresh,
freshness_results,
parser,
target=target,
extra_cli_args=extra_cli_args,
)
build_completed = datetime.now(timezone.utc)
elapsed_time = (build_completed - build_started).total_seconds()
# Merge skipped results with execution results
if skipped_results:
execution_results.update(skipped_results)
# 8. Post-execution: artifacts
self._create_artifacts(execution_results, elapsed_time)
return execution_results
def _run_execution(
self,
waves: list[ExecutionWave],
phases: list[dict[str, DbtNode]],
full_refresh: bool,
freshness_results: dict,
parser: ManifestParser,
target: str | None = None,
extra_cli_args: list[str] | None = None,
) -> dict[str, Any]:
"""Dispatch execution to the appropriate mode handler."""
if self._execution_mode == ExecutionMode.PER_NODE:
macro_paths = parser.get_macro_paths() if self._cache is not None else {}
largest_wave = max((len(w.nodes) for w in waves), default=1)
return self._execute_per_node(
phases,
largest_wave,
full_refresh,
macro_paths,
freshness_results=freshness_results
if self._cache is not None
and self._cache.use_source_freshness_expiration
else None,
all_nodes=parser.all_nodes,
adapter_type=parser.adapter_type,
project_name=parser.project_name,
target=target,
extra_cli_args=extra_cli_args,
all_executable_nodes=parser.get_executable_nodes(),
)
else:
return self._execute_per_wave(
waves,
full_refresh,
target=target,
extra_cli_args=extra_cli_args,
)
# ------------------------------------------------------------------
# PER_WAVE execution
# ------------------------------------------------------------------
def _execute_per_wave(
self,
waves,
full_refresh,
target: str | None = None,
extra_cli_args: list[str] | None = None,
):
"""Execute waves one at a time, each as a single dbt invocation."""
results: dict[str, Any] = {}
failed_nodes: list[str] = []
# Always suppress dbt's automatic indirect test selection in
# PER_WAVE mode. The orchestrator owns test scheduling:
# SKIP β no tests at all (indirect selection would leak them)
# IMMEDIATE/DEFERRED β tests only in orchestrator-placed waves
indirect_selection = "empty"
try:
run_logger = get_run_logger()
except Exception:
run_logger = logger
for wave in waves:
if failed_nodes:
# Skip this wave -- upstream failure
for node in wave.nodes:
results[node.unique_id] = self._build_node_result(
status="skipped",
reason="upstream failure",
failed_upstream=list(failed_nodes),
)
continue
# Execute the wave
started_at = datetime.now(timezone.utc)
try:
wave_result: ExecutionResult = self._executor.execute_wave(
wave.nodes,
full_refresh=full_refresh,
indirect_selection=indirect_selection,
target=target,
extra_cli_args=extra_cli_args,
)
except Exception as exc:
wave_result = ExecutionResult(
success=False,
node_ids=[n.unique_id for n in wave.nodes],
error=exc,
)
completed_at = datetime.now(timezone.utc)
for node in wave.nodes:
_emit_log_messages(wave_result.log_messages, node.unique_id, run_logger)
_emit_log_messages(wave_result.log_messages, "", run_logger)
timing = {
"started_at": started_at.isoformat(),
"completed_at": completed_at.isoformat(),
"duration_seconds": (completed_at - started_at).total_seconds(),
}
invocation = {
"command": "build",
"args": [n.unique_id for n in wave.nodes],
}
if wave_result.success:
for node in wave.nodes:
node_result = self._build_node_result(
status="success",
timing=dict(timing),
invocation=dict(invocation),
)
# Enrich with per-node execution_time from artifacts
if (
wave_result.artifacts
and node.unique_id in wave_result.artifacts
):
artifact = wave_result.artifacts[node.unique_id]
if "execution_time" in artifact:
node_result["timing"]["execution_time"] = artifact[
"execution_time"
]
results[node.unique_id] = node_result
else:
# PER_WAVE failure: use per-node artifact status when
# available so that test failures don't incorrectly
# cascade to sibling models or downstream waves.
for node in wave.nodes:
# Check per-node artifact status to distinguish
# individually successful nodes from truly failed ones.
node_artifact = (
wave_result.artifacts.get(node.unique_id)
if wave_result.artifacts
else None
)
node_succeeded = node_artifact is not None and node_artifact.get(
"status"
) in ("success", "pass")
if node_succeeded:
node_result = self._build_node_result(
status="success",
timing=dict(timing),
invocation=dict(invocation),
)
if "execution_time" in node_artifact:
node_result["timing"]["execution_time"] = node_artifact[
"execution_time"
]
results[node.unique_id] = node_result
else:
# Prefer per-node artifact message (the real dbt
# error) over the wave-level exception which may
# be None when dbt records failures as node
# results rather than Python exceptions.
artifact_msg = (
node_artifact.get("message") if node_artifact else None
) or None
error_info = {
"message": artifact_msg
or (
str(wave_result.error)
if wave_result.error
else "unknown error"
),
"type": type(wave_result.error).__name__
if wave_result.error
else "UnknownError",
}
results[node.unique_id] = self._build_node_result(
status="error",
timing=dict(timing),
invocation=dict(invocation),
error=error_info,
)
# Propagate failures to downstream waves.
# Under IMMEDIATE, test failures also cascade
# to match `dbt build` semantics.
if (
node.resource_type not in _TEST_NODE_TYPES
or self._test_strategy == TestStrategy.IMMEDIATE
):
failed_nodes.append(node.unique_id)
return results
# ------------------------------------------------------------------
# PER_NODE execution
# ------------------------------------------------------------------
# ------------------------------------------------------------------
# Execution state β persistent record of each node's precomputed
# cache key at the time it was last successfully executed. Used to
# decide whether an unexecuted upstream's warehouse data matches
# the current file state (see _build_cache_options_for_node).
# ------------------------------------------------------------------
_EXECUTION_STATE_KEY = ".execution_state.json"
@staticmethod
def _resolve_maybe_coro(result):
"""Resolve a value that may be a coroutine (async impl) or plain value (sync impl)."""
import inspect
if inspect.isawaitable(result):
from prefect.utilities.asyncutils import run_coro_as_sync
return run_coro_as_sync(result)
return result
@staticmethod
def _is_block_slug(value: str) -> bool:
"""Return True if *value* looks like a block slug (e.g. ``type/name``)."""
return len(value.split("/")) == 2
def _resolve_storage(self) -> tuple[Path | None, Any]:
"""Resolve ``CacheConfig.key_storage`` into a local path or filesystem block.
Returns ``(path, None)`` for local paths and ``(None, block)`` for
``WritableFileSystem`` instances or block-slug strings. Returns
``(None, None)`` when caching is disabled or both key storage and
result storage are unconfigured.
When ``key_storage`` is ``None`` we fall back to
``result_storage`` because Prefect co-locates cache metadata with
results by default, so execution state should live there too.
"""
ks = self._cache.key_storage if self._cache else None
if ks is None and self._cache is not None:
# Fall back to result_storage β cache keys are co-located with
# results by default, so execution state should be too.
ks = self._cache.result_storage
if ks is None:
return None, None
if isinstance(ks, Path):
return ks, None
if isinstance(ks, str):
if self._is_block_slug(ks):
from prefect.results import resolve_result_storage
block = resolve_result_storage(ks, _sync=True)
return None, block
return Path(ks), None
# WritableFileSystem instance
return None, ks
def _load_execution_state(self) -> dict[str, str]:
"""Load ``{node_id: cache_key}`` from persisted execution state."""
try:
path, block = self._resolve_storage()
if path is not None:
return _json.loads((path / self._EXECUTION_STATE_KEY).read_text())
if block is not None:
data = self._resolve_maybe_coro(
block.read_path(self._EXECUTION_STATE_KEY)
)
return _json.loads(data)
except Exception:
pass
return {}
def _save_execution_state(self, state: dict[str, str]) -> None:
"""Persist the execution state dict."""
content = _json.dumps(state).encode()
try:
path, block = self._resolve_storage()
if path is not None:
(path / self._EXECUTION_STATE_KEY).write_bytes(content)
elif block is not None:
self._resolve_maybe_coro(
block.write_path(self._EXECUTION_STATE_KEY, content)
)
except Exception as exc:
logger.debug("Could not save execution state: %s", exc)
def _build_cache_options_for_node(
self,
node,
full_refresh,
computed_cache_keys,
macro_paths=None,
freshness_results=None,
all_nodes=None,
precomputed_cache_keys=None,
execution_state=None,
):
"""Build cache-related `with_options` kwargs and record the eager key.
Returns a dict of extra kwargs to merge into `with_options`.
As a side-effect, stores the pre-computed cache key in
*computed_cache_keys* so downstream nodes can incorporate it.
When *precomputed_cache_keys* is provided, upstream dependencies
that were not executed in this run (absent from
*computed_cache_keys*) can still be resolved from the
pre-computed dict.
*execution_state* maps each node to the precomputed key it had
when last successfully executed. When an upstream's persisted
state matches its current precomputed key the warehouse is
assumed current and the upstream key is used unsalted (same
cache namespace as a full build). Otherwise the key is salted
with ``":unexecuted"`` so independent upstream rebuilds
invalidate the downstream cache entry.
"""
precomputed = precomputed_cache_keys or {}
state = execution_state or {}
upstream_keys = {}
for dep_id in node.depends_on:
if dep_id in computed_cache_keys:
upstream_keys[dep_id] = computed_cache_keys[dep_id]
elif dep_id in precomputed:
if state.get(dep_id) == precomputed[dep_id]:
# Upstream was previously executed with the same
# file state it has now β warehouse data should be
# current. Use the unsalted key so this selective
# run shares the full-build cache namespace.
upstream_keys[dep_id] = precomputed[dep_id]
else:
# Upstream was never executed with the current file
# state. Salt the key so the cache entry is
# distinct and will be invalidated when upstream is
# eventually rebuilt.
upstream_keys[dep_id] = precomputed[dep_id] + ":unexecuted"
else:
# An upstream dependency has no cache key (e.g. its
# source file is missing from disk). We cannot
# guarantee the cached result is still valid so
# disable caching for this node.
logger.debug(
"Disabling cache for %s: upstream %s has no cache key",
node.unique_id,
dep_id,
)
return {}
policy = build_cache_policy_for_node(
node,
self._settings.project_dir,
full_refresh,
upstream_keys,
self._cache.key_storage if self._cache else None,
macro_paths=macro_paths,
)
key = policy.compute_key(None, {}, {})
if key is not None:
computed_cache_keys[node.unique_id] = key
opts: dict[str, Any] = {
"cache_policy": policy,
"persist_result": True,
}
if full_refresh:
opts["refresh_cache"] = True
# Determine cache_expiration: freshness-based or default
cache_expiration = self._cache.expiration if self._cache else None
if (
self._cache is not None
and self._cache.use_source_freshness_expiration
and freshness_results
and all_nodes
):
freshness_exp = compute_freshness_expiration(
node.unique_id, all_nodes, freshness_results
)
if freshness_exp is not None:
cache_expiration = freshness_exp
if cache_expiration is not None:
opts["cache_expiration"] = cache_expiration
if self._cache is not None and self._cache.result_storage is not None:
opts["result_storage"] = self._cache.result_storage
return opts
def _precompute_all_cache_keys(
self,
all_executable_nodes: dict[str, DbtNode],
full_refresh: bool,
macro_paths: dict[str, str | None],
) -> dict[str, str]:
"""Pre-compute cache keys for all executable nodes in topological order.
Walks *all_executable_nodes* using Kahn's algorithm so that each
node's upstream keys are available before its own key is computed.
This ensures nodes whose upstream dependencies are outside the
current ``select=`` filter still get valid cache keys, since cache
keys are pure functions of manifest metadata and file contents β
they don't require execution.
Returns:
Mapping of ``node.unique_id`` to its computed cache key string.
Nodes whose key could not be computed (e.g. missing file on
disk) are omitted from the dict.
"""
computed: dict[str, str] = {}
nodes = all_executable_nodes
# Build in-degree map scoped to *nodes* (same logic as
# ManifestParser.compute_execution_waves).
in_degree: dict[str, int] = {}
dependents: dict[str, list[str]] = {nid: [] for nid in nodes}
for nid, node in nodes.items():
deps_in_graph = [d for d in node.depends_on if d in nodes]
in_degree[nid] = len(deps_in_graph)
for dep_id in deps_in_graph:
dependents[dep_id].append(nid)
current_wave = [nid for nid, deg in in_degree.items() if deg == 0]
while current_wave:
next_wave: list[str] = []
for nid in current_wave:
node = nodes[nid]
# Gather upstream keys (only those within all_executable_nodes)
upstream_keys: dict[str, str] = {}
skip = False
for dep_id in node.depends_on:
if dep_id in computed:
upstream_keys[dep_id] = computed[dep_id]
elif dep_id in nodes:
# Dependency is in the graph but has no key (its
# own computation failed). Skip this node.
skip = True
break
# else: dependency is outside executable nodes (e.g.
# a source) β not an error, just not in upstream_keys.
if not skip:
# Guard: if the node declares a source file but we
# cannot read it, the resulting key would not track
# file-content changes. Refuse to record a key so
# that downstream nodes fall back to uncached
# execution (same as the pre-fix behaviour).
if node.original_file_path:
file_path = self._settings.project_dir / node.original_file_path
try:
file_path.read_bytes()
except (OSError, IOError):
logger.debug(
"Skipping cache key for %s: source file "
"unreadable at %s",
nid,
file_path,
)
for dependent_id in dependents[nid]:
in_degree[dependent_id] -= 1
if in_degree[dependent_id] == 0:
next_wave.append(dependent_id)
continue
policy = build_cache_policy_for_node(
node,
self._settings.project_dir,
full_refresh,
upstream_keys,
macro_paths=macro_paths,
)
key = policy.compute_key(None, {}, {})
if key is not None:
computed[nid] = key
for dependent_id in dependents[nid]:
in_degree[dependent_id] -= 1
if in_degree[dependent_id] == 0:
next_wave.append(dependent_id)
current_wave = next_wave
return computed
def _execute_per_node(
self,
phases,
largest_wave,
full_refresh,
macro_paths=None,
freshness_results=None,
all_nodes=None,
adapter_type=None,
project_name=None,
target: str | None = None,
extra_cli_args: list[str] | None = None,
all_executable_nodes=None,
):
"""Execute each node as an individual Prefect task.
Creates a separate Prefect task per node with individual retries.
Nodes are submitted eagerly as soon as all their individual
dependencies complete, maximizing concurrency without artificial
wave barriers. Failed nodes cause their downstream dependents
to be skipped.
For models, seeds, and snapshots with a `relation_name`, the
task is wrapped in a `MaterializingTask` that tracks asset
lineage in Prefect's asset graph.
Each subprocess gets its own dbt adapter registry (`FACTORY`
singleton), so there is no shared mutable state. Adapter
pooling is enabled so connections survive across invocations
within the same worker process.
Requires an active Prefect flow run context (call inside a `@flow`).
"""
if self._task_runner_type is None:
task_runner_type = ProcessPoolTaskRunner
else:
task_runner_type = self._task_runner_type
executor = self._executor
if issubclass(task_runner_type, ProcessPoolTaskRunner) and isinstance(
executor, DbtCoreExecutor
):
executor._pool_adapters = True
concurrency_name = (
self._concurrency if isinstance(self._concurrency, str) else None
)
build_result = self._build_node_result
all_nodes_map = all_nodes or {}
# Compute max_workers for the task runner. For ProcessPool-based
# execution, cap worker count to 2Γ local CPUs β dbt nodes are
# mostly I/O-bound (waiting on the database), so moderate
# oversubscription improves throughput without excessive overhead.
max_workers = self._determine_per_node_max_workers(
task_runner_type=task_runner_type,
largest_wave=largest_wave,
)
task_runner = task_runner_type(max_workers=max_workers)
is_process_pool_task_runner = isinstance(task_runner, ProcessPoolTaskRunner)
if is_process_pool_task_runner:
try:
existing_processor_factories = tuple(
task_runner.subprocess_message_processor_factories or ()
)
except (AttributeError, TypeError):
existing_processor_factories = ()
processor_factories = existing_processor_factories
if _dbt_global_log_dedupe_processor_factory not in processor_factories:
processor_factories = (
*processor_factories,
_dbt_global_log_dedupe_processor_factory,
)
if not _configure_process_pool_subprocess_message_processors(
task_runner, list(processor_factories)
):
logger.debug(
"Task runner %s does not support subprocess message processor "
"configuration; process-pool global-log dedupe injection disabled.",
type(task_runner).__name__,
)
# Unique token for this build invocation. Every result dict
# produced by `_run_dbt_node` carries this token under
# `_build_run_id`. On a cache hit Prefect returns the *stored*
# result from a prior run whose token differs, so comparing the
# token after `future.result()` reliably distinguishes fresh
# executions from cache hits β even across process boundaries
# (ProcessPoolTaskRunner).
build_run_id = uuid4().hex
if is_process_pool_task_runner:
# Process-pool runs dedupe dbt global logs in the parent-process
# message forwarder so task subprocesses can emit raw captured logs.
def _emit_global_log_messages(task_logger, result) -> None:
global_logger = task_logger.getChild("dbt_orchestrator_global")
_emit_log_messages(result.log_messages, "", global_logger)
else:
# In-process task runners emit captured global dbt logs directly.
def _emit_global_log_messages(task_logger, result) -> None:
global_logger = task_logger.getChild("dbt_orchestrator_global")
_emit_log_messages(result.log_messages, "", global_logger)
# The core task function. Shared by both regular Task and
# MaterializingTask paths; the only difference is how the task
# object wrapping this function is constructed.
def _run_dbt_node(
node,
command,
full_refresh,
target=None,
asset_key=None,
extra_cli_args=None,
):
# Acquire named concurrency slot if configured
if concurrency_name:
ctx = prefect_concurrency(concurrency_name, strict=True)
else:
ctx = nullcontext()
started_at = datetime.now(timezone.utc)
with ctx:
result = executor.execute_node(
node,
command,
full_refresh,
target=target,
extra_cli_args=extra_cli_args,
)
completed_at = datetime.now(timezone.utc)
try:
task_logger = get_run_logger()
_emit_log_messages(result.log_messages, node.unique_id, task_logger)
_emit_global_log_messages(task_logger, result)
except Exception:
pass
timing = {
"started_at": started_at.isoformat(),
"completed_at": completed_at.isoformat(),
"duration_seconds": (completed_at - started_at).total_seconds(),
}
invocation = {
"command": command,
"args": [node.unique_id],
}
if result.success:
node_result = build_result(
status="success", timing=timing, invocation=invocation
)
if result.artifacts and node.unique_id in result.artifacts:
artifact = result.artifacts[node.unique_id]
if "execution_time" in artifact:
node_result["timing"]["execution_time"] = artifact[
"execution_time"
]
# Add asset metadata when running inside a MaterializingTask.
if asset_key:
try:
asset_ctx = AssetContext.get()
if asset_ctx:
metadata: dict[str, Any] = {"status": "success"}
if result.artifacts and node.unique_id in result.artifacts:
metadata.update(result.artifacts[node.unique_id])
asset_ctx.add_asset_metadata(asset_key, metadata)
except Exception:
pass
node_result["_build_run_id"] = build_run_id
return node_result
# Ensure the error is pickle-safe before raising across processes.
# dbt exceptions may not be picklable, so convert to RuntimeError.
if result.error:
safe_error = RuntimeError(str(result.error))
result = ExecutionResult(
success=result.success,
node_ids=result.node_ids,
error=safe_error,
artifacts=result.artifacts,
)
raise _DbtNodeError(result, timing, invocation)
# Create a base task for non-asset nodes.
base_task = prefect_task(_run_dbt_node)
def _mark_failed(node_id):
failed_nodes.add(node_id)
computed_cache_keys.pop(node_id, None)
def _build_asset_task(node, with_opts):
"""Create a MaterializingTask for nodes that produce assets."""
if (
adapter_type
and node.resource_type in ASSET_NODE_TYPES
and node.relation_name
):
description_suffix = ""
if self._include_compiled_code and project_name:
description_suffix = get_compiled_code_for_node(
node,
self._settings.project_dir,
self._settings.target_path,
project_name,
)
asset = create_asset_for_node(node, adapter_type, description_suffix)
upstream_assets = get_upstream_assets_for_node(
node, all_nodes_map, adapter_type
)
asset_key = format_resource_id(adapter_type, node.relation_name)
return (
MaterializingTask(
fn=_run_dbt_node,
assets=[asset],
materialized_by="dbt",
asset_deps=upstream_assets or None,
**with_opts,
),
asset_key,
)
return None, None
results: dict[str, Any] = {}
failed_nodes: set[str] = set()
if self._cache is not None and all_executable_nodes:
precomputed_cache_keys = self._precompute_all_cache_keys(
all_executable_nodes,
full_refresh,
macro_paths or {},
)
execution_state = self._load_execution_state()
else:
precomputed_cache_keys: dict[str, str] = {}
execution_state: dict[str, str] = {}
computed_cache_keys: dict[str, str] = {}
def _submit_node(node, runner):
"""Build task options, submit a node, and register the done callback."""
command = _NODE_COMMAND.get(node.resource_type, "run")
node_type_label = node.resource_type.value
node_label = node.name if node.name else node.unique_id
task_run_name = f"{node_type_label} {node_label}"
with_opts: dict[str, Any] = {
"name": task_run_name,
"task_run_name": task_run_name,
"retries": self._retries,
"retry_delay_seconds": self._retry_delay_seconds,
}
if (
self._cache is not None
and node.resource_type not in self._cache.exclude_resource_types
and node.materialization not in self._cache.exclude_materializations
):
with_opts.update(
self._build_cache_options_for_node(
node,
full_refresh,
computed_cache_keys,
macro_paths,
freshness_results=freshness_results,
all_nodes=all_nodes,
precomputed_cache_keys=precomputed_cache_keys,
execution_state=execution_state,
)
)
elif self._cache is not None:
logger.debug(
"Skipping cache for %s: excluded by %s",
node.unique_id,
"resource_type"
if node.resource_type in self._cache.exclude_resource_types
else "materialization",
)
# Try to create a MaterializingTask for asset-eligible nodes.
if self._disable_assets:
asset_task, asset_key = None, None
else:
asset_task, asset_key = _build_asset_task(node, with_opts)
if asset_task is not None:
node_task = asset_task
else:
asset_key = None
node_task = base_task.with_options(**with_opts)
future = runner.submit(
node_task,
parameters={
"node": node,
"command": command,
"full_refresh": full_refresh,
"target": target,
"asset_key": asset_key,
"extra_cli_args": extra_cli_args,
},
)
return future
def _process_future_result(node_id, future):
"""Process a completed future β same logic as the old wave collector."""
try:
node_result = future.result()
result_token = node_result.get("_build_run_id")
if result_token != build_run_id:
# Cache hit β copy before mutating to avoid corrupting
# the stored value.
node_result = {
k: v for k, v in node_result.items() if k != "_build_run_id"
}
node_result["status"] = "cached"
else:
node_result.pop("_build_run_id", None)
if node_id in computed_cache_keys:
execution_state[node_id] = computed_cache_keys[node_id]
results[node_id] = node_result
except _DbtNodeError as exc:
artifact_msg = (
(exc.execution_result.artifacts or {})
.get(node_id, {})
.get("message")
) or None
error_info = {
"message": artifact_msg
or (
str(exc.execution_result.error)
if exc.execution_result.error
else "unknown error"
),
"type": type(exc.execution_result.error).__name__
if exc.execution_result.error
else "UnknownError",
}
results[node_id] = build_result(
status="error",
timing=exc.timing,
invocation=exc.invocation,
error=error_info,
)
execution_state.pop(node_id, None)
_mark_failed(node_id)
except Exception as exc:
results[node_id] = build_result(
status="error",
error={
"message": str(exc),
"type": type(exc).__name__,
},
)
execution_state.pop(node_id, None)
_mark_failed(node_id)
with (
temporary_settings(
updates={PREFECT_CLIENT_SERVER_VERSION_CHECK_ENABLED: False}
),
task_runner as runner,
):
for phase_nodes in phases:
# Build in-degree and dependents maps for this phase.
in_degree: dict[str, int] = {}
dependents: dict[str, list[str]] = {nid: [] for nid in phase_nodes}
for nid, node in phase_nodes.items():
deps_in_phase = [d for d in node.depends_on if d in phase_nodes]
in_degree[nid] = len(deps_in_phase)
for dep in deps_in_phase:
dependents[dep].append(nid)
# Completion queue: callbacks append here, main thread drains.
active_futures: dict[int, tuple[object, str]] = {}
completed_queue: deque[tuple[object, str]] = deque()
completion_event = threading.Event()
def _on_complete(
future, *, _nid, _queue=completed_queue, _event=completion_event
):
_queue.append((future, _nid))
_event.set()
def _propagate(completed_nid):
"""Decrement in-degree of dependents; submit newly ready nodes.
Uses an iterative BFS to avoid recursion when cascading
skips propagate through long dependency chains.
"""
propagation_queue: deque[str] = deque([completed_nid])
while propagation_queue:
source_nid = propagation_queue.popleft()
for dep_nid in dependents.get(source_nid, []):
in_degree[dep_nid] -= 1
if in_degree[dep_nid] == 0:
node = phase_nodes[dep_nid]
upstream_failures = [
dep
for dep in node.depends_on
if dep in failed_nodes
]
if upstream_failures:
results[node.unique_id] = build_result(
status="skipped",
reason="upstream failure",
failed_upstream=upstream_failures,
)
failed_nodes.add(node.unique_id)
propagation_queue.append(dep_nid)
else:
future = _submit_node(node, runner)
future.add_done_callback(
partial(_on_complete, _nid=dep_nid)
)
active_futures[id(future)] = (future, dep_nid)
# Submit root nodes (in_degree == 0).
for nid, degree in in_degree.items():
if degree == 0:
node = phase_nodes[nid]
# Root nodes have no in-phase deps so upstream
# failures only matter across phases (already in
# failed_nodes from a prior phase).
upstream_failures = [
dep for dep in node.depends_on if dep in failed_nodes
]
if upstream_failures:
results[node.unique_id] = build_result(
status="skipped",
reason="upstream failure",
failed_upstream=upstream_failures,
)
failed_nodes.add(node.unique_id)
_propagate(nid)
else:
future = _submit_node(node, runner)
future.add_done_callback(partial(_on_complete, _nid=nid))
active_futures[id(future)] = (future, nid)
# Process completions eagerly β no wave barriers.
while active_futures:
completion_event.wait()
completion_event.clear()
while completed_queue:
future, nid = completed_queue.popleft()
active_futures.pop(id(future), None)
_process_future_result(nid, future)
_propagate(nid)
# Safety: verify every node in this phase was processed.
missing = set(phase_nodes) - set(results) - failed_nodes
if missing:
raise RuntimeError(
f"Eager scheduler failed to process {len(missing)} nodes"
)
if self._cache is not None:
self._save_execution_state(execution_state)
return results
def _determine_per_node_max_workers(
self, task_runner_type: type, largest_wave: int
) -> int:
"""Determine max_workers for PER_NODE task submission."""
cpu_count = os.cpu_count()
if isinstance(self._concurrency, int):
# Respect explicit user-provided worker counts.
return max(1, self._concurrency)
elif isinstance(self._concurrency, str):
# Named concurrency limit: the server-side limit throttles
# execution, so clamp the pool to avoid spawning an excessive
# number of idle processes on large DAGs.
max_workers = min(largest_wave, cpu_count or 4)
else:
max_workers = largest_wave
if isinstance(task_runner_type, type) and issubclass(
task_runner_type, ProcessPoolTaskRunner
):
max_workers = min(max_workers, (cpu_count or 1) * 2)
# Windows ProcessPoolExecutor hard-caps max_workers at 61.
if sys.platform == "win32":
max_workers = min(max_workers, 61)
return max(1, max_workers)
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/integrations/prefect-dbt/prefect_dbt/core/_orchestrator.py",
"license": "Apache License 2.0",
"lines": 1865,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/prefect:src/integrations/prefect-dbt/tests/core/test_orchestrator_integration.py | """Integration tests for PrefectDbtOrchestrator against a real DuckDB dbt project.
These tests exercise the full orchestrator pipeline (manifest parsing,
selector resolution, wave execution) with no mocks β real dbtRunner,
real DuckDB, real manifest.
"""
import shutil
from pathlib import Path
import pytest
import yaml
from dbt.cli.main import dbtRunner
duckdb = pytest.importorskip("duckdb", reason="duckdb required for integration tests")
pytest.importorskip(
"dbt.adapters.duckdb", reason="dbt-duckdb required for integration tests"
)
from prefect_dbt.core._orchestrator import ( # noqa: E402
CacheConfig,
ExecutionMode,
PrefectDbtOrchestrator,
TestStrategy,
)
from prefect_dbt.core.settings import PrefectDbtSettings # noqa: E402
pytestmark = pytest.mark.integration
# Path to the bundled dbt test project
DBT_TEST_PROJECT = Path(__file__).resolve().parent.parent / "dbt_test_project"
# Node unique_ids we expect from the test project
SEED_CUSTOMERS = "seed.test_project.customers"
SEED_ORDERS = "seed.test_project.orders"
STG_CUSTOMERS = "model.test_project.stg_customers"
STG_ORDERS = "model.test_project.stg_orders"
INT_ORDERS_ENRICHED = "model.test_project.int_orders_enriched"
CUSTOMER_SUMMARY = "model.test_project.customer_summary"
ALL_EXECUTABLE = {
SEED_CUSTOMERS,
SEED_ORDERS,
STG_CUSTOMERS,
STG_ORDERS,
CUSTOMER_SUMMARY,
}
# Test node IDs generated from schema.yml files in the test project.
# These are deterministic: dbt derives them from model/column/test names
# plus a short hash suffix.
TEST_NOT_NULL_STG_CUSTOMERS_ID = (
"test.test_project.not_null_stg_customers_customer_id.e2cfb1f9aa"
)
TEST_NOT_NULL_STG_ORDERS_ORDER_ID = (
"test.test_project.not_null_stg_orders_order_id.81cfe2fe64"
)
TEST_NOT_NULL_STG_ORDERS_CUSTOMER_ID = (
"test.test_project.not_null_stg_orders_customer_id.af79d5e4b5"
)
TEST_UNIQUE_STG_CUSTOMERS_ID = (
"test.test_project.unique_stg_customers_customer_id.c7614daada"
)
TEST_UNIQUE_STG_ORDERS_ID = "test.test_project.unique_stg_orders_order_id.e3b841c71a"
TEST_RELATIONSHIPS_ORDERS_CUSTOMERS = "test.test_project.relationships_stg_orders_customer_id__customer_id__ref_stg_customers_.430bf21500"
TEST_NOT_NULL_SUMMARY_ID = (
"test.test_project.not_null_customer_summary_customer_id.a81d32eb67"
)
TEST_UNIQUE_SUMMARY_ID = (
"test.test_project.unique_customer_summary_customer_id.2fb01e9693"
)
ALL_TESTS = {
TEST_NOT_NULL_STG_CUSTOMERS_ID,
TEST_NOT_NULL_STG_ORDERS_ORDER_ID,
TEST_NOT_NULL_STG_ORDERS_CUSTOMER_ID,
TEST_UNIQUE_STG_CUSTOMERS_ID,
TEST_UNIQUE_STG_ORDERS_ID,
TEST_RELATIONSHIPS_ORDERS_CUSTOMERS,
TEST_NOT_NULL_SUMMARY_ID,
TEST_UNIQUE_SUMMARY_ID,
}
@pytest.fixture(scope="session")
def dbt_project(tmp_path_factory):
"""Set up a real dbt project with DuckDB and a parsed manifest.
Session-scoped: copies the test project to a temp directory, writes a
profiles.yml pointing DuckDB at a local file, and runs `dbt parse`
to generate `target/manifest.json`.
"""
project_dir = tmp_path_factory.mktemp("dbt_project")
# Copy the test project into the temp directory
for item in DBT_TEST_PROJECT.iterdir():
dest = project_dir / item.name
if item.is_dir():
shutil.copytree(item, dest)
else:
shutil.copy2(item, dest)
# Write profiles.yml with DuckDB pointing at a local file
profiles = {
"test": {
"target": "dev",
"outputs": {
"dev": {
"type": "duckdb",
"path": str(project_dir / "warehouse.duckdb"),
"schema": "main",
"threads": 1,
}
},
}
}
profiles_path = project_dir / "profiles.yml"
profiles_path.write_text(yaml.dump(profiles))
# Run dbt parse to generate manifest.json
runner = dbtRunner()
result = runner.invoke(
[
"parse",
"--project-dir",
str(project_dir),
"--profiles-dir",
str(project_dir),
]
)
assert result.success, f"dbt parse failed: {result.exception}"
manifest_path = project_dir / "target" / "manifest.json"
assert manifest_path.exists(), "manifest.json not generated"
return {
"project_dir": project_dir,
"profiles_dir": project_dir,
"manifest_path": manifest_path,
}
@pytest.fixture
def orchestrator(dbt_project):
"""Factory fixture that creates a PrefectDbtOrchestrator for the test project.
Defaults to `test_strategy=TestStrategy.SKIP` so that tests which are not
specifically exercising test-strategy behaviour get deterministic results
containing only model/seed nodes. Tests in `TestTestStrategyIntegration`
override this by passing an explicit `test_strategy`.
"""
def _factory(**kwargs):
kwargs.setdefault("test_strategy", TestStrategy.SKIP)
settings = PrefectDbtSettings(
project_dir=dbt_project["project_dir"],
profiles_dir=dbt_project["profiles_dir"],
)
return PrefectDbtOrchestrator(
settings=settings,
manifest_path=dbt_project["manifest_path"],
**kwargs,
)
return _factory
class TestOrchestratorIntegration:
"""Integration tests that run the full orchestrator against DuckDB."""
def test_full_build_all_nodes_succeed(self, orchestrator):
"""run_build() with no selectors executes all 5 executable nodes successfully."""
orch = orchestrator()
results = orch.run_build()
assert set(results.keys()) == ALL_EXECUTABLE
for node_id, result in results.items():
assert result["status"] == "success", (
f"{node_id} failed: {result.get('error')}"
)
def test_build_wave_ordering(self, orchestrator):
"""Builds execute in 3 waves: seeds -> staging -> marts."""
orch = orchestrator()
results = orch.run_build()
# All nodes succeeded β verify via timing that seeds started first,
# then staging, then marts. We check by comparing started_at timestamps.
seed_started = max(
results[SEED_CUSTOMERS]["timing"]["started_at"],
results[SEED_ORDERS]["timing"]["started_at"],
)
staging_started = min(
results[STG_CUSTOMERS]["timing"]["started_at"],
results[STG_ORDERS]["timing"]["started_at"],
)
mart_started = results[CUSTOMER_SUMMARY]["timing"]["started_at"]
# Seeds wave must start before or at staging wave
assert seed_started <= staging_started
# Staging wave must start before or at mart wave
assert staging_started <= mart_started
def test_build_with_select(self, orchestrator, dbt_project):
"""run_build(select='staging') returns only staging models."""
orch = orchestrator()
results = orch.run_build(select="staging")
# Should only contain the two staging models
assert set(results.keys()) == {STG_CUSTOMERS, STG_ORDERS}
for result in results.values():
assert result["status"] == "success"
# Verify the staging views were actually created in DuckDB
db_path = dbt_project["project_dir"] / "warehouse.duckdb"
conn = duckdb.connect(str(db_path))
try:
assert (
conn.execute("select count(*) from main.stg_customers").fetchone()[0]
== 5
)
assert (
conn.execute("select count(*) from main.stg_orders").fetchone()[0] == 10
)
finally:
conn.close()
def test_build_with_graph_selector(self, orchestrator):
"""run_build(select='+customer_summary') includes all upstream nodes."""
orch = orchestrator()
results = orch.run_build(select="+customer_summary")
# +customer_summary means customer_summary and all its ancestors.
# The ephemeral int_orders_enriched is resolved through, so we get:
# seeds + staging + customer_summary = all executable nodes
assert set(results.keys()) == ALL_EXECUTABLE
for result in results.values():
assert result["status"] == "success"
def test_build_result_has_timing_and_artifacts(self, orchestrator):
"""Results include timing fields and execution_time from artifacts."""
orch = orchestrator()
results = orch.run_build()
nodes_with_execution_time = []
for node_id, result in results.items():
timing = result["timing"]
assert "started_at" in timing
assert "completed_at" in timing
assert "duration_seconds" in timing
assert isinstance(timing["duration_seconds"], float)
assert timing["duration_seconds"] >= 0
# execution_time comes from dbt artifacts β track which nodes have it
if "execution_time" in timing:
assert isinstance(timing["execution_time"], float)
nodes_with_execution_time.append(node_id)
# At least the model nodes should have execution_time from artifacts
assert len(nodes_with_execution_time) > 0
def test_full_refresh_build(self, orchestrator):
"""run_build(full_refresh=True) succeeds."""
orch = orchestrator()
results = orch.run_build(full_refresh=True)
assert len(results) == len(ALL_EXECUTABLE)
for result in results.values():
assert result["status"] == "success"
def test_build_creates_database_objects(self, orchestrator, dbt_project):
"""After build, DuckDB contains the expected tables/views with correct row counts."""
orch = orchestrator()
results = orch.run_build()
# Verify the build succeeded before querying the database
for node_id, result in results.items():
assert result["status"] == "success", (
f"{node_id} failed: {result.get('error')}"
)
db_path = dbt_project["project_dir"] / "warehouse.duckdb"
conn = duckdb.connect(str(db_path))
try:
# Seeds should exist as tables
customers = conn.execute("select count(*) from main.customers").fetchone()
assert customers[0] == 5
orders = conn.execute("select count(*) from main.orders").fetchone()
assert orders[0] == 10
# Staging views
stg_customers = conn.execute(
"select count(*) from main.stg_customers"
).fetchone()
assert stg_customers[0] == 5
stg_orders = conn.execute("select count(*) from main.stg_orders").fetchone()
assert stg_orders[0] == 10
# Mart table
summary = conn.execute(
"select count(*) from main.customer_summary"
).fetchone()
assert summary[0] == 5 # 5 distinct customers
# Verify aggregation correctness for a specific customer
alice = conn.execute(
"select order_count, total_amount from main.customer_summary "
"where customer_name = 'alice'"
).fetchone()
assert alice[0] == 2 # alice has 2 orders
assert alice[1] == 300 # 100 + 200
finally:
conn.close()
def test_ephemeral_model_not_in_results(self, orchestrator):
"""The ephemeral int_orders_enriched does not appear in build results."""
orch = orchestrator()
results = orch.run_build()
assert INT_ORDERS_ENRICHED not in results
@pytest.fixture
def per_node_dbt_project(dbt_project, tmp_path):
"""Function-scoped copy of the dbt project for PER_NODE tests.
PER_WAVE tests run dbt in-process and the dbt-duckdb adapter keeps
an OS-level file lock on the `.duckdb` file that persists even
after `reset_adapters()` / `cleanup_connections()`. PER_NODE
tests run dbt in subprocesses that need their own file locks.
This fixture copies the session-scoped project to a fresh temp
directory so each PER_NODE test gets its own `.duckdb` file with
no lock conflicts from the parent process.
"""
project_dir = tmp_path / "dbt_project"
shutil.copytree(dbt_project["project_dir"], project_dir)
# Update profiles.yml to point at the new DuckDB path
profiles = {
"test": {
"target": "dev",
"outputs": {
"dev": {
"type": "duckdb",
"path": str(project_dir / "warehouse.duckdb"),
"schema": "main",
"threads": 1,
}
},
}
}
(project_dir / "profiles.yml").write_text(yaml.dump(profiles))
# Remove any existing DuckDB files from the copy so each test
# starts with a clean database.
for f in project_dir.glob("warehouse.duckdb*"):
f.unlink()
# Pre-load seed data so tests that select only models (e.g.
# `select="staging"`) find the underlying seed tables.
# Run in a subprocess to avoid the parent process acquiring a
# DuckDB file lock that would block PER_NODE subprocesses.
import subprocess
import sys
result = subprocess.run(
[
sys.executable,
"-c",
(
"from dbt.cli.main import dbtRunner; "
f'dbtRunner().invoke(["seed", "--project-dir", "{project_dir}", '
f'"--profiles-dir", "{project_dir}"])'
),
],
capture_output=True,
text=True,
)
assert result.returncode == 0, f"dbt seed failed: {result.stderr}"
return {
"project_dir": project_dir,
"profiles_dir": project_dir,
"manifest_path": project_dir / "target" / "manifest.json",
}
@pytest.fixture
def per_node_orchestrator(per_node_dbt_project):
"""Factory fixture that creates a PrefectDbtOrchestrator for PER_NODE tests.
Defaults to `test_strategy=TestStrategy.SKIP` for the same reason as
the `orchestrator` fixture.
"""
def _factory(**kwargs):
kwargs.setdefault("test_strategy", TestStrategy.SKIP)
settings = PrefectDbtSettings(
project_dir=per_node_dbt_project["project_dir"],
profiles_dir=per_node_dbt_project["profiles_dir"],
)
return PrefectDbtOrchestrator(
settings=settings,
manifest_path=per_node_dbt_project["manifest_path"],
**kwargs,
)
return _factory
@pytest.fixture
def caching_orchestrator(per_node_dbt_project, tmp_path):
"""Factory fixture for PER_NODE orchestrator with caching enabled.
Shares result_storage and key_storage across calls so
cross-instance cache tests can verify persistence.
Uses ThreadPoolTaskRunner to avoid ProcessPoolTaskRunner limitations
with Prefect's cache key resolution across process boundaries.
"""
from prefect.task_runners import ThreadPoolTaskRunner
result_dir = tmp_path / "result_storage"
result_dir.mkdir()
key_dir = tmp_path / "cache_key_storage"
key_dir.mkdir()
def _factory(**kwargs):
settings = PrefectDbtSettings(
project_dir=per_node_dbt_project["project_dir"],
profiles_dir=per_node_dbt_project["profiles_dir"],
)
defaults = {
"settings": settings,
"manifest_path": per_node_dbt_project["manifest_path"],
"execution_mode": ExecutionMode.PER_NODE,
"concurrency": 1,
"cache": CacheConfig(
result_storage=result_dir,
key_storage=str(key_dir),
),
"task_runner_type": ThreadPoolTaskRunner,
"test_strategy": TestStrategy.SKIP,
}
defaults.update(kwargs)
return PrefectDbtOrchestrator(**defaults)
return _factory
class TestPerNodeIntegration:
"""Integration tests for PER_NODE execution mode against DuckDB.
All tests use `concurrency=1` because DuckDB's file-based storage
only supports a single writer at a time. Without this, concurrent
`dbt run` invocations within the same wave would conflict on the
database write lock. Production databases (Postgres, Snowflake, etc.)
do not have this limitation.
"""
def test_per_node_full_build(self, per_node_orchestrator):
"""PER_NODE run_build() executes all 5 executable nodes successfully."""
from prefect import flow
orch = per_node_orchestrator(
execution_mode=ExecutionMode.PER_NODE,
concurrency=1,
)
@flow
def test_flow():
return orch.run_build()
results = test_flow()
assert set(results.keys()) == ALL_EXECUTABLE
for node_id, result in results.items():
assert result["status"] == "success", (
f"{node_id} failed: {result.get('error')}"
)
def test_per_node_wave_ordering(self, per_node_orchestrator):
"""PER_NODE builds respect wave ordering: seeds -> staging -> marts."""
from prefect import flow
orch = per_node_orchestrator(
execution_mode=ExecutionMode.PER_NODE,
concurrency=1,
)
@flow
def test_flow():
return orch.run_build()
results = test_flow()
seed_started = max(
results[SEED_CUSTOMERS]["timing"]["started_at"],
results[SEED_ORDERS]["timing"]["started_at"],
)
staging_started = min(
results[STG_CUSTOMERS]["timing"]["started_at"],
results[STG_ORDERS]["timing"]["started_at"],
)
mart_started = results[CUSTOMER_SUMMARY]["timing"]["started_at"]
assert seed_started <= staging_started
assert staging_started <= mart_started
def test_per_node_with_select(self, per_node_orchestrator, per_node_dbt_project):
"""PER_NODE run_build(select='staging') returns only staging models."""
from prefect import flow
orch = per_node_orchestrator(
execution_mode=ExecutionMode.PER_NODE,
concurrency=1,
)
@flow
def test_flow():
return orch.run_build(select="staging")
results = test_flow()
assert set(results.keys()) == {STG_CUSTOMERS, STG_ORDERS}
for result in results.values():
assert result["status"] == "success"
# Verify the staging views were actually created in DuckDB
db_path = per_node_dbt_project["project_dir"] / "warehouse.duckdb"
conn = duckdb.connect(str(db_path))
try:
assert (
conn.execute("select count(*) from main.stg_customers").fetchone()[0]
== 5
)
assert (
conn.execute("select count(*) from main.stg_orders").fetchone()[0] == 10
)
finally:
conn.close()
def test_per_node_uses_correct_commands(self, per_node_orchestrator):
"""PER_NODE uses 'seed' for seeds and 'run' for models."""
from prefect import flow
orch = per_node_orchestrator(
execution_mode=ExecutionMode.PER_NODE,
concurrency=1,
)
@flow
def test_flow():
return orch.run_build()
results = test_flow()
# Seeds should use 'seed' command
assert results[SEED_CUSTOMERS]["invocation"]["command"] == "seed"
assert results[SEED_ORDERS]["invocation"]["command"] == "seed"
# Models should use 'run' command
assert results[STG_CUSTOMERS]["invocation"]["command"] == "run"
assert results[STG_ORDERS]["invocation"]["command"] == "run"
assert results[CUSTOMER_SUMMARY]["invocation"]["command"] == "run"
def test_per_node_each_node_has_individual_timing(self, per_node_orchestrator):
"""Each node has its own timing, not shared with the wave."""
from prefect import flow
orch = per_node_orchestrator(
execution_mode=ExecutionMode.PER_NODE,
concurrency=1,
)
@flow
def test_flow():
return orch.run_build()
results = test_flow()
for node_id, result in results.items():
timing = result["timing"]
assert "started_at" in timing
assert "completed_at" in timing
assert "duration_seconds" in timing
assert isinstance(timing["duration_seconds"], float)
# Each node's invocation should list only its own unique_id
assert result["invocation"]["args"] == [node_id]
def test_per_node_ephemeral_not_in_results(self, per_node_orchestrator):
"""Ephemeral models are not executed in PER_NODE mode."""
from prefect import flow
orch = per_node_orchestrator(
execution_mode=ExecutionMode.PER_NODE,
concurrency=1,
)
@flow
def test_flow():
return orch.run_build()
results = test_flow()
assert INT_ORDERS_ENRICHED not in results
def test_per_node_creates_database_objects(
self, per_node_orchestrator, per_node_dbt_project
):
"""PER_NODE mode produces the same database objects as PER_WAVE."""
from prefect import flow
orch = per_node_orchestrator(
execution_mode=ExecutionMode.PER_NODE,
concurrency=1,
)
@flow
def test_flow():
return orch.run_build()
results = test_flow()
for node_id, result in results.items():
assert result["status"] == "success", (
f"{node_id} failed: {result.get('error')}"
)
db_path = per_node_dbt_project["project_dir"] / "warehouse.duckdb"
conn = duckdb.connect(str(db_path))
try:
customers = conn.execute("select count(*) from main.customers").fetchone()
assert customers[0] == 5
summary = conn.execute(
"select count(*) from main.customer_summary"
).fetchone()
assert summary[0] == 5
finally:
conn.close()
def test_per_node_with_concurrency_limit(self, per_node_orchestrator):
"""PER_NODE with an explicit concurrency limit still succeeds.
Uses concurrency=1 because DuckDB only supports a single writer.
The unit tests cover higher concurrency values.
"""
from prefect import flow
orch = per_node_orchestrator(
execution_mode=ExecutionMode.PER_NODE,
concurrency=1,
)
@flow
def test_flow():
return orch.run_build()
results = test_flow()
assert set(results.keys()) == ALL_EXECUTABLE
for node_id, result in results.items():
assert result["status"] == "success", (
f"{node_id} failed: {result.get('error')}"
)
def _object_exists(db_path: Path, name: str) -> bool:
"""Check whether a table or view exists in DuckDB."""
conn = duckdb.connect(str(db_path))
try:
conn.execute(f"select 1 from main.{name} limit 1")
return True
except duckdb.CatalogException:
return False
finally:
conn.close()
def _drop_object(db_path: Path, name: str, kind: str = "TABLE") -> None:
"""Drop a table or view from DuckDB."""
conn = duckdb.connect(str(db_path))
try:
conn.execute(f"DROP {kind} IF EXISTS main.{name}")
finally:
conn.close()
def _row_count(db_path: Path, name: str) -> int:
"""Return the row count of a table or view in DuckDB."""
conn = duckdb.connect(str(db_path))
try:
return conn.execute(f"select count(*) from main.{name}").fetchone()[0]
finally:
conn.close()
class TestPerNodeCachingIntegration:
"""Integration tests for PER_NODE caching against real DuckDB.
Cache-hit detection strategy ("drop table"):
1. Run 1: build all nodes (populates DuckDB + cache)
2. Drop a specific table/view from DuckDB
3. Run 2: build again
4. If the node was cached (not re-executed), the dropped object stays absent
5. If the node was re-executed, dbt recreates the object
"""
def test_second_run_uses_cache(self, caching_orchestrator, per_node_dbt_project):
"""A second identical build hits the cache β dbt does not re-execute."""
from prefect import flow
orch = caching_orchestrator()
db_path = per_node_dbt_project["project_dir"] / "warehouse.duckdb"
@flow
def run():
return orch.run_build()
# Run 1: full build
r1 = run()
assert all(r["status"] == "success" for r in r1.values())
assert _object_exists(db_path, "customer_summary")
# Drop a mart table between runs
_drop_object(db_path, "customer_summary", "TABLE")
assert not _object_exists(db_path, "customer_summary")
# Run 2: should be entirely cached
r2 = run()
assert all(r["status"] == "cached" for r in r2.values())
# customer_summary was NOT recreated β dbt never re-executed it
assert not _object_exists(db_path, "customer_summary")
def test_sql_change_invalidates_affected_nodes(
self, caching_orchestrator, per_node_dbt_project
):
"""Changing a SQL file invalidates that node and cascades downstream.
stg_customers.sql is modified between runs. The file-content hash
changes, which invalidates stg_customers' cache key. Because
customer_summary includes stg_customers in its upstream_cache_keys,
its own key also changes β so it is re-executed as well.
We verify with the "drop table" trick: drop customer_summary after
run 1. If it reappears after run 2 the cascade invalidation worked.
"""
from prefect import flow
project_dir = per_node_dbt_project["project_dir"]
db_path = project_dir / "warehouse.duckdb"
orch = caching_orchestrator()
@flow
def run():
return orch.run_build()
# Run 1: full build
r1 = run()
assert all(r["status"] == "success" for r in r1.values())
assert _object_exists(db_path, "customer_summary")
# Modify stg_customers β changes its file_content_hash
stg_path = project_dir / "models" / "staging" / "stg_customers.sql"
stg_path.write_text(
"select customer_id, name, created_at::date as created_at\n"
"from {{ ref('customers') }}\n"
"where customer_id <= 3\n"
)
# Drop customer_summary TABLE. If the downstream cascade works,
# dbt will re-create it in run 2.
_drop_object(db_path, "customer_summary", "TABLE")
assert not _object_exists(db_path, "customer_summary")
# Run 2: stg_customers cache miss (file changed) propagates to
# customer_summary cache miss (upstream key changed) β re-executed
r2 = run()
assert all(r["status"] in ("success", "cached") for r in r2.values())
# customer_summary was re-created β cascade invalidation worked
assert _object_exists(db_path, "customer_summary")
# stg_customers VIEW was also re-executed (file hash changed)
assert _row_count(db_path, "stg_customers") == 3
def test_full_refresh_bypasses_cache(
self, caching_orchestrator, per_node_dbt_project
):
"""full_refresh=True forces re-execution even when cache is warm."""
from prefect import flow
orch = caching_orchestrator()
db_path = per_node_dbt_project["project_dir"] / "warehouse.duckdb"
@flow
def run(**kwargs):
return orch.run_build(**kwargs)
# Run 1: warm cache
r1 = run()
assert all(r["status"] == "success" for r in r1.values())
# Drop customer_summary
_drop_object(db_path, "customer_summary", "TABLE")
# Run 2 with full_refresh β cache is bypassed
r2 = run(full_refresh=True)
assert all(r["status"] == "success" for r in r2.values())
# customer_summary was recreated because full_refresh bypassed cache
assert _object_exists(db_path, "customer_summary")
def test_cache_persists_across_orchestrator_instances(
self, caching_orchestrator, per_node_dbt_project
):
"""A second orchestrator instance (sharing storage) reuses the cache."""
from prefect import flow
db_path = per_node_dbt_project["project_dir"] / "warehouse.duckdb"
# Instance 1: build and populate cache
orch1 = caching_orchestrator()
@flow
def run1():
return orch1.run_build()
r1 = run1()
assert all(r["status"] == "success" for r in r1.values())
# Drop customer_summary between instances
_drop_object(db_path, "customer_summary", "TABLE")
# Instance 2: shares the same storage dirs
orch2 = caching_orchestrator()
@flow
def run2():
return orch2.run_build()
r2 = run2()
assert all(r["status"] == "cached" for r in r2.values())
# customer_summary was NOT recreated β orch2 used orch1's cache
assert not _object_exists(db_path, "customer_summary")
def test_caching_disabled_executes_every_time(
self, caching_orchestrator, per_node_dbt_project
):
"""With cache=None, every run re-executes all nodes."""
from prefect import flow
orch = caching_orchestrator(cache=None)
db_path = per_node_dbt_project["project_dir"] / "warehouse.duckdb"
@flow
def run():
return orch.run_build()
# Run 1
r1 = run()
assert all(r["status"] == "success" for r in r1.values())
# Drop customer_summary
_drop_object(db_path, "customer_summary", "TABLE")
# Run 2 without caching β dbt re-executes everything
r2 = run()
assert all(r["status"] == "success" for r in r2.values())
# customer_summary was recreated (no caching = always re-executes)
assert _object_exists(db_path, "customer_summary")
def test_full_refresh_always_re_executes(
self, caching_orchestrator, per_node_dbt_project
):
"""Repeated full_refresh=True runs always execute (never cached)."""
from prefect import flow
orch = caching_orchestrator()
db_path = per_node_dbt_project["project_dir"] / "warehouse.duckdb"
@flow
def run(**kwargs):
return orch.run_build(**kwargs)
# Run 1: full_refresh build β populates DB
r1 = run(full_refresh=True)
assert all(r["status"] == "success" for r in r1.values())
assert _object_exists(db_path, "customer_summary")
# Drop customer_summary between runs
_drop_object(db_path, "customer_summary", "TABLE")
assert not _object_exists(db_path, "customer_summary")
# Run 2: full_refresh again β must re-execute (not cached)
r2 = run(full_refresh=True)
assert all(r["status"] == "success" for r in r2.values())
# customer_summary was recreated β full_refresh never caches
assert _object_exists(db_path, "customer_summary")
def test_normal_run_after_full_refresh_uses_cache(
self, caching_orchestrator, per_node_dbt_project
):
"""A normal run's cache isn't poisoned by a full_refresh run."""
from prefect import flow
orch = caching_orchestrator()
db_path = per_node_dbt_project["project_dir"] / "warehouse.duckdb"
@flow
def run(**kwargs):
return orch.run_build(**kwargs)
# Run 1: normal build β warms cache
r1 = run()
assert all(r["status"] == "success" for r in r1.values())
# Run 2: full_refresh β always executes
r2 = run(full_refresh=True)
assert all(r["status"] == "success" for r in r2.values())
# Drop customer_summary
_drop_object(db_path, "customer_summary", "TABLE")
assert not _object_exists(db_path, "customer_summary")
# Run 3: normal build β should hit Run 1's cache
r3 = run()
assert all(r["status"] == "cached" for r in r3.values())
# customer_summary was NOT recreated β Run 1's cache was used
assert not _object_exists(db_path, "customer_summary")
def test_macro_change_invalidates_cache(
self, caching_orchestrator, per_node_dbt_project
):
"""Editing a macro file invalidates cache for dependent nodes."""
from prefect import flow
project_dir = per_node_dbt_project["project_dir"]
db_path = project_dir / "warehouse.duckdb"
orch = caching_orchestrator()
@flow
def run():
return orch.run_build()
# Run 1: full build β warms cache
r1 = run()
assert all(r["status"] == "success" for r in r1.values())
assert _object_exists(db_path, "customer_summary")
# Modify the macro file on disk β changes the macro content hash
macro_path = project_dir / "macros" / "format_amount.sql"
macro_path.write_text(
"{% macro format_amount(column) %}\n"
" coalesce({{ column }}, 0)\n"
"{% endmacro %}\n"
)
# Drop customer_summary β if cache miss, dbt will recreate it
_drop_object(db_path, "customer_summary", "TABLE")
assert not _object_exists(db_path, "customer_summary")
# Run 2: macro hash changed β customer_summary must re-execute
r2 = run()
assert all(r["status"] in ("success", "cached") for r in r2.values())
# customer_summary was recreated β macro change invalidated cache
assert _object_exists(db_path, "customer_summary")
class TestTestStrategyIntegration:
"""Integration tests for test strategies against a real DuckDB dbt project.
The dbt test project includes schema.yml files that define not_null,
unique, and relationship tests on staging and mart models. The expected
test node IDs are declared as constants (ALL_TESTS) so assertions are
deterministic.
"""
def test_fixture_contains_test_definitions(self, dbt_project):
"""Guard: verify the manifest contains the expected test nodes.
If this fails, the schema.yml files in dbt_test_project are missing
or dbt parse did not generate test nodes.
"""
import json
manifest = json.loads(dbt_project["manifest_path"].read_text())
manifest_test_ids = {
uid for uid in manifest["nodes"] if uid.startswith("test.")
}
assert manifest_test_ids == ALL_TESTS, (
f"Manifest test nodes don't match ALL_TESTS.\n"
f" Missing: {ALL_TESTS - manifest_test_ids}\n"
f" Extra: {manifest_test_ids - ALL_TESTS}"
)
def test_skip_produces_no_test_results(self, orchestrator):
"""SKIP strategy: no test node IDs in results."""
orch = orchestrator(test_strategy=TestStrategy.SKIP)
results = orch.run_build()
test_ids = {k for k in results if k.startswith("test.")}
assert test_ids == set()
assert set(results.keys()) == ALL_EXECUTABLE
def test_immediate_per_wave_includes_tests(self, orchestrator):
"""IMMEDIATE + PER_WAVE: all expected test nodes appear with success."""
orch = orchestrator(test_strategy=TestStrategy.IMMEDIATE)
results = orch.run_build()
model_ids = {k for k in results if not k.startswith("test.")}
test_ids = {k for k in results if k.startswith("test.")}
assert model_ids == ALL_EXECUTABLE
assert test_ids == ALL_TESTS
for tid in test_ids:
assert results[tid]["status"] == "success", (
f"Test {tid} failed: {results[tid].get('error')}"
)
def test_deferred_per_wave_includes_tests(self, orchestrator):
"""DEFERRED + PER_WAVE: all expected test nodes appear with success."""
orch = orchestrator(test_strategy=TestStrategy.DEFERRED)
results = orch.run_build()
model_ids = {k for k in results if not k.startswith("test.")}
test_ids = {k for k in results if k.startswith("test.")}
assert model_ids == ALL_EXECUTABLE
assert test_ids == ALL_TESTS
for tid in test_ids:
assert results[tid]["status"] == "success", (
f"Test {tid} failed: {results[tid].get('error')}"
)
def test_immediate_per_node_includes_tests(self, per_node_orchestrator):
"""IMMEDIATE + PER_NODE: all expected test nodes appear with success."""
from prefect import flow
orch = per_node_orchestrator(
execution_mode=ExecutionMode.PER_NODE,
concurrency=1,
test_strategy=TestStrategy.IMMEDIATE,
)
@flow
def test_flow():
return orch.run_build()
results = test_flow()
model_ids = {k for k in results if not k.startswith("test.")}
test_ids = {k for k in results if k.startswith("test.")}
assert model_ids == ALL_EXECUTABLE
assert test_ids == ALL_TESTS
for tid in test_ids:
assert results[tid]["status"] == "success", (
f"Test {tid} failed: {results[tid].get('error')}"
)
def test_deferred_per_node_includes_tests(self, per_node_orchestrator):
"""DEFERRED + PER_NODE: test nodes run after all models."""
from prefect import flow
orch = per_node_orchestrator(
execution_mode=ExecutionMode.PER_NODE,
concurrency=1,
test_strategy=TestStrategy.DEFERRED,
)
@flow
def test_flow():
return orch.run_build()
results = test_flow()
model_ids = {k for k in results if not k.startswith("test.")}
test_ids = {k for k in results if k.startswith("test.")}
assert model_ids == ALL_EXECUTABLE
assert test_ids == ALL_TESTS
# All models must complete before any test starts
latest_model = max(results[m]["timing"]["completed_at"] for m in model_ids)
earliest_test = min(results[t]["timing"]["started_at"] for t in test_ids)
assert latest_model <= earliest_test
def test_relationship_test_passes(self, orchestrator):
"""The relationship test between stg_orders.customer_id -> stg_customers passes."""
orch = orchestrator(test_strategy=TestStrategy.IMMEDIATE)
results = orch.run_build()
assert TEST_RELATIONSHIPS_ORDERS_CUSTOMERS in results
assert results[TEST_RELATIONSHIPS_ORDERS_CUSTOMERS]["status"] == "success", (
f"Relationship test failed: "
f"{results[TEST_RELATIONSHIPS_ORDERS_CUSTOMERS].get('error')}"
)
def test_per_node_skip_produces_no_test_results(self, per_node_orchestrator):
"""SKIP + PER_NODE: no test results, backward compatible."""
from prefect import flow
orch = per_node_orchestrator(
execution_mode=ExecutionMode.PER_NODE,
concurrency=1,
test_strategy=TestStrategy.SKIP,
)
@flow
def test_flow():
return orch.run_build()
results = test_flow()
test_ids = {k for k in results if k.startswith("test.")}
assert test_ids == set()
assert set(results.keys()) == ALL_EXECUTABLE
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/integrations/prefect-dbt/tests/core/test_orchestrator_integration.py",
"license": "Apache License 2.0",
"lines": 898,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:src/integrations/prefect-dbt/prefect_dbt/core/_executor.py | """
Executor for running dbt commands on individual nodes or waves.
This module provides:
- ExecutionResult: Result of a dbt command execution
- DbtExecutor: Protocol for dbt execution backends
- DbtCoreExecutor: Implementation using dbt-core's dbtRunner
"""
import atexit
from contextlib import contextmanager, nullcontext
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import Any, Protocol, runtime_checkable
from dbt.cli.main import dbtRunner
from dbt_common.events.base_types import EventLevel, EventMsg
from prefect.logging import get_logger
from prefect_dbt.core._manifest import DbtNode
from prefect_dbt.core.settings import PrefectDbtSettings
from prefect_dbt.utilities import kwargs_to_args
logger = get_logger(__name__)
class _PoolState(Enum):
INACTIVE = "inactive"
FIRST_CALL = "first_call"
POOLED = "pooled"
@contextmanager
def _setup_only_adapter_management(reset_fn):
"""First call: create adapter normally, skip teardown."""
reset_fn()
yield
@contextmanager
def _noop_adapter_management():
"""Subsequent calls: adapter already registered, do nothing."""
yield
class _AdapterPool:
"""Process-level dbt adapter pool that keeps connections alive.
Monkey-patches three dbt mechanisms to reuse database connections
between dbtRunner.invoke() calls in the same process:
1. `adapter_management()` β the context manager that creates and
destroys adapters around each dbt invocation.
2. `BaseAdapter.cleanup_connections()` β called independently by
`execute_with_hooks()` in its `finally` block to close all
thread connections after each task execution.
3. `BaseConnectionManager.get_if_exists()` β each invoke spawns
fresh worker threads with new thread IDs; this patch transplants
an open connection from a departed thread's key to the new
thread's key so the same database handle is reused.
Falls back to standard behavior if dbt internals change or any error
occurs.
"""
def __init__(self):
self._state = _PoolState.INACTIVE
self._runner: dbtRunner | None = None
self._original_adapter_management = None
self._factory = None
self._reset_adapters = None
self._cleanup_connections = None
self._cleanup_registered = False
self._original_base_cleanup = None
self._original_get_if_exists = None
try:
from dbt.adapters.factory import (
FACTORY,
adapter_management,
cleanup_connections,
reset_adapters,
)
self._original_adapter_management = adapter_management
self._factory = FACTORY
self._reset_adapters = reset_adapters
self._cleanup_connections = cleanup_connections
self._available = True
except ImportError:
self._available = False
@property
def available(self) -> bool:
return self._available
def activate(self):
"""Activate pooling for this process. Called before first invoke."""
if not self._available or self._state != _PoolState.INACTIVE:
return
self._state = _PoolState.FIRST_CALL
self._patch(_setup_only_adapter_management, self._reset_adapters)
self._suppress_cleanup()
self._patch_get_if_exists()
if not self._cleanup_registered:
atexit.register(self._cleanup)
self._cleanup_registered = True
def get_runner(self, callbacks: list) -> tuple[dbtRunner, bool]:
"""Get a dbtRunner. Returns (runner, is_pooled)."""
if self._state == _PoolState.POOLED and self._runner is not None:
self._runner.callbacks = callbacks
return self._runner, True
runner = dbtRunner(callbacks=callbacks)
self._runner = runner
return runner, False
def on_success(self):
"""Called after a successful invoke. Transitions state forward."""
if self._state == _PoolState.FIRST_CALL:
self._state = _PoolState.POOLED
self._patch_noop()
def revert(self):
"""Revert to INACTIVE. Called on failure."""
self._runner = None
self._state = _PoolState.INACTIVE
self._restore_cleanup()
self._restore_get_if_exists()
self._unpatch()
try:
if self._reset_adapters is not None:
self._reset_adapters()
except Exception:
pass
def _patch(self, ctx_manager_fn, *args):
"""Patch adapter_management in both dbt modules."""
if args:
from functools import partial
replacement = partial(ctx_manager_fn, *args)
else:
replacement = ctx_manager_fn
try:
import dbt.adapters.factory as factory_mod
factory_mod.adapter_management = replacement
except (ImportError, AttributeError):
self._available = False
return
try:
import dbt.cli.requires as requires_mod
requires_mod.adapter_management = replacement
except (ImportError, AttributeError):
pass
def _patch_noop(self):
"""Switch to no-op adapter management."""
self._patch(_noop_adapter_management)
def _unpatch(self):
"""Restore original adapter_management."""
if self._original_adapter_management is None:
return
try:
import dbt.adapters.factory as factory_mod
factory_mod.adapter_management = self._original_adapter_management
except (ImportError, AttributeError):
pass
try:
import dbt.cli.requires as requires_mod
requires_mod.adapter_management = self._original_adapter_management
except (ImportError, AttributeError):
pass
def _suppress_cleanup(self):
"""Patch BaseAdapter.cleanup_connections to a no-op while pooled.
dbt's execute_with_hooks() calls adapter.cleanup_connections() in its
finally block, which closes all thread connections independently of the
adapter_management() context manager. Suppressing this at the class
level keeps connections alive across dbtRunner.invoke() calls.
"""
if self._original_base_cleanup is not None:
return # already patched
try:
from dbt.adapters.base.impl import BaseAdapter
self._original_base_cleanup = BaseAdapter.cleanup_connections
BaseAdapter.cleanup_connections = lambda self: None
except (ImportError, AttributeError):
pass
def _restore_cleanup(self):
"""Restore BaseAdapter.cleanup_connections to the original method."""
if self._original_base_cleanup is None:
return
try:
from dbt.adapters.base.impl import BaseAdapter
BaseAdapter.cleanup_connections = self._original_base_cleanup
except (ImportError, AttributeError):
pass
self._original_base_cleanup = None
def _patch_get_if_exists(self):
"""Patch get_if_exists to transplant connections across threads.
Each dbtRunner.invoke() spawns fresh worker threads via DbtThreadPool.
These new threads have different thread IDs, so get_if_exists() returns
None even though an open connection exists under the old thread's key.
This patch moves an existing open connection to the requesting thread's
key so the same database handle is reused.
"""
if self._original_get_if_exists is not None:
return # already patched
try:
from dbt.adapters.base.connections import BaseConnectionManager
self._original_get_if_exists = BaseConnectionManager.get_if_exists
def _transplanting_get_if_exists(conn_mgr):
key = conn_mgr.get_thread_identifier()
with conn_mgr.lock:
conn = conn_mgr.thread_connections.get(key)
if conn is not None:
return conn
# Transplant: move an open connection from a departed thread.
for old_key in list(conn_mgr.thread_connections):
if old_key != key:
old_conn = conn_mgr.thread_connections[old_key]
if old_conn.state == "open":
del conn_mgr.thread_connections[old_key]
conn_mgr.thread_connections[key] = old_conn
return old_conn
return None
BaseConnectionManager.get_if_exists = _transplanting_get_if_exists
except (ImportError, AttributeError):
pass
def _restore_get_if_exists(self):
"""Restore original get_if_exists."""
if self._original_get_if_exists is None:
return
try:
from dbt.adapters.base.connections import BaseConnectionManager
BaseConnectionManager.get_if_exists = self._original_get_if_exists
except (ImportError, AttributeError):
pass
self._original_get_if_exists = None
def _cleanup(self):
"""atexit handler: close pooled connections and restore patches."""
self._unpatch()
self._restore_cleanup()
self._restore_get_if_exists()
try:
if self._cleanup_connections is not None:
self._cleanup_connections()
except Exception:
pass
# Process-level singleton β one per worker process (spawn isolation).
_adapter_pool = _AdapterPool()
_EVENT_LEVEL_MAP: dict[EventLevel, str] = {
EventLevel.DEBUG: "debug",
EventLevel.TEST: "debug",
EventLevel.INFO: "info",
EventLevel.WARN: "warning",
EventLevel.ERROR: "error",
}
_EVENT_LEVEL_PRIORITY: dict[EventLevel, int] = {
EventLevel.DEBUG: 0,
EventLevel.TEST: 1,
EventLevel.INFO: 2,
EventLevel.WARN: 3,
EventLevel.ERROR: 4,
}
@dataclass
class ExecutionResult:
"""Result of executing one or more dbt nodes.
Attributes:
success: Whether the execution completed successfully
node_ids: List of unique_ids that were executed
error: Exception captured on failure (None on success)
artifacts: Per-node result data extracted from dbt's RunExecutionResult.
Maps unique_id to {status, message, execution_time}.
log_messages: Per-node captured dbt log messages.
Maps unique_id to list of (level, message) tuples.
Messages not associated with a specific node use an empty string as a key.
"""
success: bool
node_ids: list[str] = field(default_factory=list)
error: Exception | None = None
artifacts: dict[str, Any] | None = None
log_messages: dict[str, list[tuple[str, str]]] | None = None
@runtime_checkable
class DbtExecutor(Protocol):
"""Protocol for dbt execution backends."""
def execute_node(
self,
node: DbtNode,
command: str,
full_refresh: bool = False,
target: str | None = None,
extra_cli_args: list[str] | None = None,
) -> ExecutionResult: ...
def execute_wave(
self,
nodes: list[DbtNode],
full_refresh: bool = False,
indirect_selection: str | None = None,
target: str | None = None,
extra_cli_args: list[str] | None = None,
) -> ExecutionResult: ...
def resolve_manifest_path(self) -> Path: ...
class DbtCoreExecutor:
"""Execute dbt commands via dbt-core's dbtRunner.
This is a thin wrapper that constructs CLI args, invokes dbt, and
captures results. It does not create Prefect tasks or callbacks β
that is the orchestrator's responsibility.
Args:
settings: PrefectDbtSettings with project_dir, profiles_dir, etc.
threads: Number of dbt threads (omitted if None, uses dbt default)
state_path: Path for --state flag (deferred state comparison)
defer: Whether to pass --defer flag
defer_state_path: Path for --defer-state flag
favor_state: Whether to pass --favor-state flag
run_deps: When True (default), automatically run `dbt deps`
before resolving the manifest. Set to False if packages
are pre-installed or managed externally.
pool_adapters: When True, reuse dbt adapter connections across
invocations in the same process. Intended for PER_NODE mode
where each worker process handles many sequential nodes.
"""
# Commands that accept the --full-refresh flag.
_FULL_REFRESH_COMMANDS = frozenset({"run", "build", "seed"})
def __init__(
self,
settings: PrefectDbtSettings,
threads: int | None = None,
state_path: Path | None = None,
defer: bool = False,
defer_state_path: Path | None = None,
favor_state: bool = False,
run_deps: bool = True,
pool_adapters: bool = False,
):
self._settings = settings
self._settings.validate_for_orchestrator()
self._threads = threads
self._state_path = state_path
self._defer = defer
self._defer_state_path = defer_state_path
self._favor_state = favor_state
self._run_deps = run_deps
self._pool_adapters = pool_adapters
self._profiles_dir_override: str | None = None
@contextmanager
def use_resolved_profiles_dir(self, profiles_dir: str):
"""Temporarily pin a resolved profiles dir across dbt invocations."""
previous = self._profiles_dir_override
self._profiles_dir_override = profiles_dir
try:
yield
finally:
self._profiles_dir_override = previous
def _invoke(
self,
command: str,
node_ids: list[str],
selectors: list[str],
full_refresh: bool = False,
indirect_selection: str | None = None,
target: str | None = None,
extra_cli_args: list[str] | None = None,
) -> ExecutionResult:
"""Build CLI args and invoke dbt.
Constructs a fresh kwargs dict each call (kwargs_to_args mutates it),
resolves profiles, and runs dbt via a fresh dbtRunner instance.
Errors are captured as data β this method does NOT raise.
Args:
command: dbt command to run (e.g. "build", "run", "seed")
node_ids: List of node unique_ids for tracking in the result
selectors: List of dbt selectors for `--select`
full_refresh: Whether to pass --full-refresh
indirect_selection: dbt indirect selection mode (e.g. "empty"
to suppress automatic test inclusion)
target: dbt target name to override the default from
profiles.yml (maps to `--target` / `-t`)
extra_cli_args: Additional CLI arguments to append after the
base args built by kwargs_to_args()
"""
invoke_kwargs: dict[str, Any] = {
"project_dir": str(self._settings.project_dir),
"target_path": str(self._settings.target_path),
"log_level": "none",
"log_level_file": str(self._settings.log_level.value),
"select": selectors,
}
if indirect_selection is not None:
invoke_kwargs["indirect_selection"] = indirect_selection
if target is not None:
invoke_kwargs["target"] = target
if self._threads is not None:
invoke_kwargs["threads"] = self._threads
if full_refresh and command in self._FULL_REFRESH_COMMANDS:
invoke_kwargs["full_refresh"] = True
if self._state_path is not None:
invoke_kwargs["state"] = str(self._state_path)
if self._defer:
invoke_kwargs["defer"] = True
if self._defer_state_path is not None:
invoke_kwargs["defer_state"] = str(self._defer_state_path)
if self._favor_state:
invoke_kwargs["favor_state"] = True
try:
captured_logs: dict[str, list[tuple[str, str]]] = {}
min_priority = _EVENT_LEVEL_PRIORITY.get(self._settings.log_level, 2)
def _capture_event(event: EventMsg) -> None:
try:
event_priority = _EVENT_LEVEL_PRIORITY.get(event.info.level, -1)
if event_priority < min_priority:
return
msg = event.info.msg
if not msg or (isinstance(msg, str) and not msg.strip()):
return
level_str = _EVENT_LEVEL_MAP.get(event.info.level, "info")
try:
node_id = event.data.node_info.unique_id or ""
except Exception:
node_id = ""
captured_logs.setdefault(node_id, []).append((level_str, str(msg)))
except Exception:
pass
profiles_ctx = (
nullcontext(self._profiles_dir_override)
if self._profiles_dir_override is not None
else self._settings.resolve_profiles_yml()
)
with profiles_ctx as profiles_dir:
assert profiles_dir is not None
invoke_kwargs["profiles_dir"] = profiles_dir
args = kwargs_to_args(invoke_kwargs, [command])
if extra_cli_args:
args.extend(extra_cli_args)
if self._pool_adapters:
_adapter_pool.activate()
runner, _pooled = _adapter_pool.get_runner(
callbacks=[_capture_event]
)
else:
runner = dbtRunner(callbacks=[_capture_event])
res = runner.invoke(args)
if self._pool_adapters:
if res.success:
_adapter_pool.on_success()
else:
_adapter_pool.revert()
artifacts = self._extract_artifacts(res)
# Union of requested nodes and actually-executed nodes. The
# node_ids list is always included so callers can rely on every
# requested node appearing in the result. Artifacts may add
# extra entries (e.g. tests attached to selected models).
if artifacts:
result_ids = list(dict.fromkeys(node_ids + list(artifacts)))
else:
result_ids = list(node_ids)
return ExecutionResult(
success=res.success,
node_ids=result_ids,
error=res.exception if not res.success else None,
artifacts=artifacts,
log_messages=captured_logs or None,
)
except Exception as exc:
if self._pool_adapters:
_adapter_pool.revert()
return ExecutionResult(
success=False,
node_ids=list(node_ids),
error=exc,
)
def _extract_artifacts(self, res: Any) -> dict[str, Any] | None:
"""Extract per-node result data from dbt's RunExecutionResult."""
if res.result is None or not hasattr(res.result, "results"):
return None
if not res.result.results:
return None
artifacts: dict[str, Any] = {}
for node_result in res.result.results:
uid = getattr(node_result, "unique_id", None)
if uid is None:
node = getattr(node_result, "node", None)
uid = getattr(node, "unique_id", None) if node else None
if uid is None:
continue
artifacts[uid] = {
"status": str(getattr(node_result, "status", "")),
"message": getattr(node_result, "message", ""),
"execution_time": getattr(node_result, "execution_time", 0.0),
}
return artifacts or None
def execute_node(
self,
node: DbtNode,
command: str,
full_refresh: bool = False,
target: str | None = None,
extra_cli_args: list[str] | None = None,
) -> ExecutionResult:
"""Execute a single dbt node with the specified command.
Args:
node: The DbtNode to execute
command: dbt command ("run", "seed", "snapshot", "test")
full_refresh: Whether to pass --full-refresh (ignored for
commands that don't support it, like "test" and "snapshot")
target: dbt target name (`--target` / `-t`)
extra_cli_args: Additional CLI arguments to append
Returns:
ExecutionResult with success/failure status and artifacts
"""
return self._invoke(
command,
node_ids=[node.unique_id],
selectors=[node.dbt_selector],
full_refresh=full_refresh,
target=target,
extra_cli_args=extra_cli_args,
)
def resolve_manifest_path(self) -> Path:
"""Return the path to manifest.json, running 'dbt parse' if it doesn't exist.
Resolves to `settings.project_dir / settings.target_path / manifest.json`.
If the file is not found, runs `dbt parse` to generate it.
Returns:
Resolved absolute `Path` to `manifest.json`.
Raises:
RuntimeError: If `dbt parse` fails or the manifest is still
missing after a successful parse.
"""
path = (
self._settings.project_dir / self._settings.target_path / "manifest.json"
).resolve()
if not path.exists():
if self._run_deps:
self.run_deps()
self._run_parse(path)
return path
def _run_parse(self, expected_path: Path) -> None:
"""Run `dbt parse` to generate a manifest at *expected_path*.
Args:
expected_path: Where the manifest should appear after parsing.
Used only for validation and error reporting.
Raises:
RuntimeError: If the `dbt parse` invocation fails or the
manifest file is still missing after a successful parse.
"""
logger.info(
"Manifest not found at %s; running 'dbt parse' to generate it.",
expected_path,
)
profiles_ctx = (
nullcontext(self._profiles_dir_override)
if self._profiles_dir_override is not None
else self._settings.resolve_profiles_yml()
)
with profiles_ctx as profiles_dir:
assert profiles_dir is not None
args = [
"parse",
"--project-dir",
str(self._settings.project_dir),
"--profiles-dir",
profiles_dir,
"--target-path",
str(self._settings.target_path),
"--log-level",
"none",
"--log-level-file",
str(self._settings.log_level.value),
]
result = dbtRunner().invoke(args)
if not result.success:
raise RuntimeError(
f"Failed to generate manifest via 'dbt parse': {result.exception}"
)
if not expected_path.exists():
raise RuntimeError(
f"'dbt parse' succeeded but manifest not found at {expected_path}."
)
def run_deps(self) -> None:
"""Run `dbt deps` to install packages declared in *packages.yml*.
Uses the same profiles-resolution logic as `_run_parse`: if a
pinned profiles dir is active it is reused, otherwise a temporary
resolved profiles directory is created.
Raises:
RuntimeError: If the `dbt deps` invocation fails.
"""
logger.info("Running 'dbt deps' to install packages.")
profiles_ctx = (
nullcontext(self._profiles_dir_override)
if self._profiles_dir_override is not None
else self._settings.resolve_profiles_yml()
)
with profiles_ctx as profiles_dir:
assert profiles_dir is not None
args = [
"deps",
"--project-dir",
str(self._settings.project_dir),
"--profiles-dir",
profiles_dir,
"--log-level",
"none",
"--log-level-file",
str(self._settings.log_level.value),
]
result = dbtRunner().invoke(args)
if not result.success:
raise RuntimeError(
f"Failed to install packages via 'dbt deps': {result.exception}"
)
def execute_wave(
self,
nodes: list[DbtNode],
full_refresh: bool = False,
indirect_selection: str | None = None,
target: str | None = None,
extra_cli_args: list[str] | None = None,
) -> ExecutionResult:
"""Execute a wave of nodes using `dbt build`.
Uses `dbt build` to handle mixed resource types in a single invocation.
Args:
nodes: List of DbtNode objects to execute
full_refresh: Whether to pass --full-refresh
indirect_selection: dbt indirect selection mode. Pass
`"empty"` to prevent dbt from automatically including
tests attached to selected models.
target: dbt target name (`--target` / `-t`)
extra_cli_args: Additional CLI arguments to append
Returns:
ExecutionResult with success/failure status and artifacts
Raises:
ValueError: If nodes list is empty
"""
if not nodes:
raise ValueError("Cannot execute an empty wave")
node_ids = [node.unique_id for node in nodes]
selectors = [node.dbt_selector for node in nodes]
return self._invoke(
"build",
node_ids=node_ids,
selectors=selectors,
full_refresh=full_refresh,
indirect_selection=indirect_selection,
target=target,
extra_cli_args=extra_cli_args,
)
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/integrations/prefect-dbt/prefect_dbt/core/_executor.py",
"license": "Apache License 2.0",
"lines": 619,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/prefect:src/integrations/prefect-dbt/tests/core/test_executor.py | """
Tests for ExecutionResult, DbtExecutor protocol, and DbtCoreExecutor.
"""
from contextlib import contextmanager
from pathlib import Path
from unittest.mock import MagicMock, patch
import pytest
from dbt.artifacts.resources.types import NodeType
from dbt_common.events.base_types import EventLevel
from prefect_dbt.core._executor import DbtCoreExecutor, DbtExecutor, ExecutionResult
from prefect_dbt.core._manifest import DbtNode
# =============================================================================
# Helpers & Fixtures
# =============================================================================
@pytest.fixture(autouse=True)
def _reset_adapter_pool():
"""Reset the process-level adapter pool between tests."""
from dbt.adapters.base.connections import BaseConnectionManager
from dbt.adapters.base.impl import BaseAdapter
from prefect_dbt.core._executor import _adapter_pool
_adapter_pool.revert()
saved_available = _adapter_pool._available
saved_cleanup = BaseAdapter.cleanup_connections
saved_get_if_exists = BaseConnectionManager.get_if_exists
yield
_adapter_pool.revert()
_adapter_pool._available = saved_available
BaseAdapter.cleanup_connections = saved_cleanup
BaseConnectionManager.get_if_exists = saved_get_if_exists
def _make_node(
unique_id: str = "model.test.my_model",
name: str = "my_model",
resource_type: NodeType = NodeType.Model,
) -> DbtNode:
return DbtNode(
unique_id=unique_id,
name=name,
resource_type=resource_type,
)
def _make_settings(**overrides: object) -> MagicMock:
"""Create a mock PrefectDbtSettings."""
settings = MagicMock()
settings.project_dir = overrides.get("project_dir", Path("/proj"))
settings.target_path = overrides.get("target_path", Path("target"))
settings.log_level = overrides.get("log_level", EventLevel.DEBUG)
@contextmanager
def _resolve():
yield "/tmp/profiles"
settings.resolve_profiles_yml = _resolve
return settings
def _mock_dbt_result(success: bool = True, results: list | None = None) -> MagicMock:
"""Create a mock dbt invocation result."""
res = MagicMock()
res.success = success
res.exception = None if success else RuntimeError("dbt failed")
if results is not None:
res.result.results = results
else:
res.result = None
return res
def _mock_node_result(
unique_id: str,
status: str = "success",
message: str = "",
execution_time: float = 1.0,
) -> MagicMock:
nr = MagicMock()
nr.unique_id = unique_id
nr.status = status
nr.message = message
nr.execution_time = execution_time
return nr
@pytest.fixture
def mock_dbt(monkeypatch):
"""Patch dbtRunner and return (mock_runner_cls, mock_runner) pair.
The runner is pre-wired to return a successful result with no artifacts.
Tests can override via ``mock_runner.invoke.return_value = ...``.
"""
mock_runner = MagicMock()
mock_runner_cls = MagicMock(return_value=mock_runner)
mock_runner.invoke.return_value = _mock_dbt_result(success=True)
monkeypatch.setattr("prefect_dbt.core._executor.dbtRunner", mock_runner_cls)
return mock_runner_cls, mock_runner
def _invoked_args(mock_runner: MagicMock) -> list[str]:
"""Extract the CLI args list from the most recent dbtRunner.invoke call."""
return mock_runner.invoke.call_args[0][0]
# =============================================================================
# TestExecutionResult
# =============================================================================
class TestExecutionResult:
def test_defaults(self):
r = ExecutionResult(success=True)
assert r.success is True
assert r.node_ids == []
assert r.error is None
assert r.artifacts is None
assert r.log_messages is None
def test_all_fields(self):
err = RuntimeError("boom")
logs = {"model.a": [("info", "OK created view")]}
r = ExecutionResult(
success=False,
node_ids=["model.a", "model.b"],
error=err,
artifacts={"model.a": {"status": "fail"}},
log_messages=logs,
)
assert r.success is False
assert r.node_ids == ["model.a", "model.b"]
assert r.error is err
assert "model.a" in r.artifacts
assert r.log_messages is logs
def test_mutable(self):
r = ExecutionResult(success=True)
r.success = False
assert r.success is False
# =============================================================================
# TestDbtExecutorProtocol
# =============================================================================
class TestDbtExecutorProtocol:
def test_dbt_core_executor_satisfies_protocol(self):
executor = DbtCoreExecutor(_make_settings())
assert isinstance(executor, DbtExecutor)
def test_protocol_is_runtime_checkable(self):
class FakeExecutor:
def execute_node(self, node, command, full_refresh=False): ...
def execute_wave(self, nodes, full_refresh=False): ...
def resolve_manifest_path(self): ...
assert isinstance(FakeExecutor(), DbtExecutor)
# =============================================================================
# TestDbtCoreExecutorInit
# =============================================================================
class TestDbtCoreExecutorInit:
def test_defaults(self):
settings = _make_settings()
executor = DbtCoreExecutor(settings)
assert executor._settings is settings
assert executor._threads is None
assert executor._state_path is None
assert executor._defer is False
assert executor._defer_state_path is None
assert executor._favor_state is False
assert executor._run_deps is True
def test_full_options(self):
settings = _make_settings()
executor = DbtCoreExecutor(
settings,
threads=4,
state_path=Path("/state"),
defer=True,
defer_state_path=Path("/defer-state"),
favor_state=True,
run_deps=False,
)
assert executor._threads == 4
assert executor._state_path == Path("/state")
assert executor._defer is True
assert executor._defer_state_path == Path("/defer-state")
assert executor._favor_state is True
assert executor._run_deps is False
# =============================================================================
# TestExecuteNode
# =============================================================================
class TestExecuteNode:
@pytest.mark.parametrize(
"command, unique_id, name, resource_type",
[
("run", "model.test.my_model", "my_model", NodeType.Model),
("seed", "seed.test.my_seed", "my_seed", NodeType.Seed),
("snapshot", "snapshot.test.my_snap", "my_snap", NodeType.Snapshot),
("test", "test.test.my_test", "my_test", NodeType.Test),
],
)
def test_command_types(self, mock_dbt, command, unique_id, name, resource_type):
_, mock_runner = mock_dbt
executor = DbtCoreExecutor(_make_settings())
node = _make_node(unique_id=unique_id, name=name, resource_type=resource_type)
result = executor.execute_node(node, command)
assert result.success is True
assert result.node_ids == [unique_id]
assert result.error is None
args = _invoked_args(mock_runner)
assert args[0] == command
assert name in args
def test_full_refresh(self, mock_dbt):
_, mock_runner = mock_dbt
executor = DbtCoreExecutor(_make_settings())
executor.execute_node(_make_node(), "run", full_refresh=True)
assert "--full-refresh" in _invoked_args(mock_runner)
def test_target_forwarded(self, mock_dbt):
_, mock_runner = mock_dbt
executor = DbtCoreExecutor(_make_settings())
executor.execute_node(_make_node(), "run", target="prod")
args = _invoked_args(mock_runner)
idx = args.index("--target")
assert args[idx + 1] == "prod"
def test_target_absent_by_default(self, mock_dbt):
_, mock_runner = mock_dbt
executor = DbtCoreExecutor(_make_settings())
executor.execute_node(_make_node(), "run")
assert "--target" not in _invoked_args(mock_runner)
def test_full_refresh_ignored_for_test_command(self, mock_dbt):
_, mock_runner = mock_dbt
executor = DbtCoreExecutor(_make_settings())
node = _make_node(
unique_id="test.test.t", name="t", resource_type=NodeType.Test
)
result = executor.execute_node(node, "test", full_refresh=True)
assert result.success is True
assert "--full-refresh" not in _invoked_args(mock_runner)
def test_full_refresh_ignored_for_snapshot_command(self, mock_dbt):
_, mock_runner = mock_dbt
executor = DbtCoreExecutor(_make_settings())
node = _make_node(
unique_id="snapshot.test.s", name="s", resource_type=NodeType.Snapshot
)
executor.execute_node(node, "snapshot", full_refresh=True)
assert "--full-refresh" not in _invoked_args(mock_runner)
def test_failure_result(self, mock_dbt):
_, mock_runner = mock_dbt
mock_runner.invoke.return_value = _mock_dbt_result(success=False)
executor = DbtCoreExecutor(_make_settings())
result = executor.execute_node(_make_node(), "run")
assert result.success is False
assert isinstance(result.error, RuntimeError)
assert result.node_ids == ["model.test.my_model"]
def test_artifacts_captured(self, mock_dbt):
_, mock_runner = mock_dbt
nr = _mock_node_result("model.test.my_model", "success", "OK", 2.5)
mock_runner.invoke.return_value = _mock_dbt_result(success=True, results=[nr])
executor = DbtCoreExecutor(_make_settings())
result = executor.execute_node(_make_node(), "run")
assert result.artifacts is not None
assert "model.test.my_model" in result.artifacts
art = result.artifacts["model.test.my_model"]
assert art["status"] == "success"
assert art["message"] == "OK"
assert art["execution_time"] == 2.5
def test_node_ids_union_of_select_and_artifacts(self, mock_dbt):
"""node_ids is the union of the select list and artifact keys."""
_, mock_runner = mock_dbt
nr1 = _mock_node_result("model.test.my_model")
nr2 = _mock_node_result("test.test.attached_test")
mock_runner.invoke.return_value = _mock_dbt_result(
success=True, results=[nr1, nr2]
)
executor = DbtCoreExecutor(_make_settings())
result = executor.execute_node(_make_node(), "run")
# Selected node always present, plus extra from artifacts
assert result.node_ids[0] == "model.test.my_model"
assert "test.test.attached_test" in result.node_ids
def test_node_ids_includes_select_even_if_missing_from_artifacts(self, mock_dbt):
"""A selected node missing from artifacts still appears in node_ids."""
_, mock_runner = mock_dbt
# Artifact only for an extra node, not the selected one
nr = _mock_node_result("test.test.extra")
mock_runner.invoke.return_value = _mock_dbt_result(success=True, results=[nr])
executor = DbtCoreExecutor(_make_settings())
result = executor.execute_node(_make_node(), "run")
assert "model.test.my_model" in result.node_ids
assert "test.test.extra" in result.node_ids
def test_node_ids_fallback_to_select_without_artifacts(self, mock_dbt):
"""Without artifacts, node_ids falls back to the select list."""
executor = DbtCoreExecutor(_make_settings())
result = executor.execute_node(_make_node(), "run")
assert result.node_ids == ["model.test.my_model"]
def test_extract_artifacts_none_results(self, mock_dbt):
"""res.result.results being None doesn't raise."""
_, mock_runner = mock_dbt
res = MagicMock()
res.success = True
res.exception = None
res.result.results = None
mock_runner.invoke.return_value = res
executor = DbtCoreExecutor(_make_settings())
result = executor.execute_node(_make_node(), "run")
assert result.artifacts is None
def test_exception_during_invoke_captured(self, mock_dbt):
mock_runner_cls, _ = mock_dbt
mock_runner_cls.side_effect = RuntimeError("import failed")
executor = DbtCoreExecutor(_make_settings())
result = executor.execute_node(_make_node(), "run")
assert result.success is False
assert isinstance(result.error, RuntimeError)
assert "import failed" in str(result.error)
# =============================================================================
# TestExecuteWave
# =============================================================================
class TestExecuteWave:
def test_single_node(self, mock_dbt):
_, mock_runner = mock_dbt
executor = DbtCoreExecutor(_make_settings())
result = executor.execute_wave([_make_node()])
assert result.success is True
assert _invoked_args(mock_runner)[0] == "build"
def test_multiple_nodes(self, mock_dbt):
_, mock_runner = mock_dbt
nr_a = _mock_node_result("model.test.a")
nr_b = _mock_node_result("model.test.b")
nr_c = _mock_node_result("model.test.c")
mock_runner.invoke.return_value = _mock_dbt_result(
success=True, results=[nr_a, nr_b, nr_c]
)
executor = DbtCoreExecutor(_make_settings())
nodes = [
_make_node("model.test.a", "a"),
_make_node("model.test.b", "b"),
_make_node("model.test.c", "c"),
]
result = executor.execute_wave(nodes)
assert result.success is True
assert set(result.node_ids) == {"model.test.a", "model.test.b", "model.test.c"}
args = _invoked_args(mock_runner)
# CLI args contain node names (dbt selectors), not unique_ids
assert "a" in args
assert "b" in args
assert "c" in args
def test_mixed_resource_types(self, mock_dbt):
_, mock_runner = mock_dbt
executor = DbtCoreExecutor(_make_settings())
nodes = [
_make_node("model.test.m1", "m1", NodeType.Model),
_make_node("seed.test.s1", "s1", NodeType.Seed),
_make_node("snapshot.test.snap1", "snap1", NodeType.Snapshot),
]
result = executor.execute_wave(nodes)
assert result.success is True
assert _invoked_args(mock_runner)[0] == "build"
def test_empty_raises_value_error(self):
executor = DbtCoreExecutor(_make_settings())
with pytest.raises(ValueError, match="Cannot execute an empty wave"):
executor.execute_wave([])
def test_failure_captured(self, mock_dbt):
_, mock_runner = mock_dbt
mock_runner.invoke.return_value = _mock_dbt_result(success=False)
executor = DbtCoreExecutor(_make_settings())
result = executor.execute_wave([_make_node()])
assert result.success is False
assert result.error is not None
def test_full_refresh_passed_for_build(self, mock_dbt):
_, mock_runner = mock_dbt
executor = DbtCoreExecutor(_make_settings())
executor.execute_wave([_make_node()], full_refresh=True)
assert "--full-refresh" in _invoked_args(mock_runner)
def test_target_forwarded(self, mock_dbt):
_, mock_runner = mock_dbt
executor = DbtCoreExecutor(_make_settings())
executor.execute_wave([_make_node()], target="staging")
args = _invoked_args(mock_runner)
idx = args.index("--target")
assert args[idx + 1] == "staging"
def test_target_absent_by_default(self, mock_dbt):
_, mock_runner = mock_dbt
executor = DbtCoreExecutor(_make_settings())
executor.execute_wave([_make_node()])
assert "--target" not in _invoked_args(mock_runner)
def test_indirect_selection_forwarded(self, mock_dbt):
_, mock_runner = mock_dbt
executor = DbtCoreExecutor(_make_settings())
executor.execute_wave([_make_node()], indirect_selection="empty")
args = _invoked_args(mock_runner)
idx = args.index("--indirect-selection")
assert args[idx + 1] == "empty"
def test_indirect_selection_absent_by_default(self, mock_dbt):
_, mock_runner = mock_dbt
executor = DbtCoreExecutor(_make_settings())
executor.execute_wave([_make_node()])
assert "--indirect-selection" not in _invoked_args(mock_runner)
# =============================================================================
# TestStateFlags
# =============================================================================
class TestStateFlags:
def test_state_flag(self, mock_dbt):
_, mock_runner = mock_dbt
executor = DbtCoreExecutor(_make_settings(), state_path=Path("/my/state"))
executor.execute_node(_make_node(), "run")
args = _invoked_args(mock_runner)
idx = args.index("--state")
assert args[idx + 1] == "/my/state"
def test_defer_flag(self, mock_dbt):
_, mock_runner = mock_dbt
executor = DbtCoreExecutor(_make_settings(), defer=True)
executor.execute_node(_make_node(), "run")
assert "--defer" in _invoked_args(mock_runner)
def test_defer_state_flag(self, mock_dbt):
_, mock_runner = mock_dbt
executor = DbtCoreExecutor(_make_settings(), defer_state_path=Path("/defer"))
executor.execute_node(_make_node(), "run")
args = _invoked_args(mock_runner)
idx = args.index("--defer-state")
assert args[idx + 1] == "/defer"
def test_favor_state_flag(self, mock_dbt):
_, mock_runner = mock_dbt
executor = DbtCoreExecutor(_make_settings(), favor_state=True)
executor.execute_node(_make_node(), "run")
assert "--favor-state" in _invoked_args(mock_runner)
def test_combined_state_flags(self, mock_dbt):
_, mock_runner = mock_dbt
executor = DbtCoreExecutor(
_make_settings(),
state_path=Path("/state"),
defer=True,
defer_state_path=Path("/defer-state"),
favor_state=True,
)
executor.execute_node(_make_node(), "run")
args = _invoked_args(mock_runner)
assert "--state" in args
assert "--defer" in args
assert "--defer-state" in args
assert "--favor-state" in args
# =============================================================================
# TestThreads
# =============================================================================
class TestThreads:
def test_threads_present(self, mock_dbt):
_, mock_runner = mock_dbt
executor = DbtCoreExecutor(_make_settings(), threads=8)
executor.execute_node(_make_node(), "run")
args = _invoked_args(mock_runner)
idx = args.index("--threads")
assert args[idx + 1] == "8"
def test_threads_absent(self, mock_dbt):
_, mock_runner = mock_dbt
executor = DbtCoreExecutor(_make_settings())
executor.execute_node(_make_node(), "run")
assert "--threads" not in _invoked_args(mock_runner)
# =============================================================================
# TestCommandConstruction
# =============================================================================
class TestCommandConstruction:
def test_project_dir(self, mock_dbt):
_, mock_runner = mock_dbt
executor = DbtCoreExecutor(_make_settings(project_dir=Path("/my/project")))
executor.execute_node(_make_node(), "run")
args = _invoked_args(mock_runner)
idx = args.index("--project-dir")
assert args[idx + 1] == "/my/project"
def test_target_path(self, mock_dbt):
_, mock_runner = mock_dbt
executor = DbtCoreExecutor(_make_settings(target_path=Path("custom_target")))
executor.execute_node(_make_node(), "run")
args = _invoked_args(mock_runner)
idx = args.index("--target-path")
assert args[idx + 1] == "custom_target"
def test_log_level_none(self, mock_dbt):
_, mock_runner = mock_dbt
executor = DbtCoreExecutor(_make_settings())
executor.execute_node(_make_node(), "run")
args = _invoked_args(mock_runner)
idx = args.index("--log-level")
assert args[idx + 1] == "none"
def test_profiles_dir_from_context_manager(self, mock_dbt):
_, mock_runner = mock_dbt
executor = DbtCoreExecutor(_make_settings())
executor.execute_node(_make_node(), "run")
args = _invoked_args(mock_runner)
idx = args.index("--profiles-dir")
assert args[idx + 1] == "/tmp/profiles"
def test_profiles_dir_override_context_manager(self, mock_dbt):
_, mock_runner = mock_dbt
settings = _make_settings()
calls = [0]
@contextmanager
def _resolve():
calls[0] += 1
yield "/tmp/profiles"
settings.resolve_profiles_yml = MagicMock(side_effect=_resolve)
executor = DbtCoreExecutor(settings)
with executor.use_resolved_profiles_dir("/stable/profiles"):
executor.execute_node(_make_node(), "run")
assert calls[0] == 0
args = _invoked_args(mock_runner)
idx = args.index("--profiles-dir")
assert args[idx + 1] == "/stable/profiles"
def test_fresh_runner_per_invoke(self, mock_dbt):
"""Each _invoke call creates a fresh dbtRunner instance."""
mock_runner_cls, _ = mock_dbt
executor = DbtCoreExecutor(_make_settings())
executor.execute_node(_make_node(), "run")
executor.execute_node(_make_node(), "run")
assert mock_runner_cls.call_count == 2
# =============================================================================
# TestEventCapture
# =============================================================================
class TestEventCapture:
def _make_event(self, level, msg, unique_id=None):
"""Build a minimal EventMsg-like object for callback testing."""
event = MagicMock()
event.info.level = level
event.info.msg = msg
if unique_id is not None:
event.data.node_info.unique_id = unique_id
else:
del event.data.node_info
return event
def test_callback_registered(self, monkeypatch):
"""dbtRunner is instantiated with a callbacks list."""
mock_runner = MagicMock()
mock_runner.invoke.return_value = _mock_dbt_result(success=True)
mock_cls = MagicMock(return_value=mock_runner)
monkeypatch.setattr("prefect_dbt.core._executor.dbtRunner", mock_cls)
executor = DbtCoreExecutor(_make_settings())
executor.execute_node(_make_node(), "run")
call_kwargs = mock_cls.call_args[1]
assert "callbacks" in call_kwargs
assert len(call_kwargs["callbacks"]) == 1
def test_log_messages_captured(self, monkeypatch):
"""Events fired during invoke are stored in result.log_messages."""
node = _make_node()
def _patched_cls(callbacks=None):
cb = callbacks[0] if callbacks else None
runner = MagicMock()
def _invoke(args):
cb(self._make_event(EventLevel.INFO, "1 of 3 OK", node.unique_id))
cb(self._make_event(EventLevel.WARN, "Deprecation", None))
return _mock_dbt_result(success=True)
runner.invoke.side_effect = _invoke
return runner
monkeypatch.setattr("prefect_dbt.core._executor.dbtRunner", _patched_cls)
executor = DbtCoreExecutor(_make_settings())
result = executor.execute_node(node, "run")
assert result.log_messages is not None
assert node.unique_id in result.log_messages
assert ("info", "1 of 3 OK") in result.log_messages[node.unique_id]
assert "" in result.log_messages
assert ("warning", "Deprecation") in result.log_messages[""]
def test_empty_messages_skipped(self, monkeypatch):
"""Blank or empty messages are not captured."""
def _patched_cls(callbacks=None):
cb = callbacks[0] if callbacks else None
runner = MagicMock()
def _invoke(args):
cb(self._make_event(EventLevel.INFO, "", None))
cb(self._make_event(EventLevel.INFO, " ", None))
cb(self._make_event(EventLevel.INFO, "real msg", None))
return _mock_dbt_result(success=True)
runner.invoke.side_effect = _invoke
return runner
monkeypatch.setattr("prefect_dbt.core._executor.dbtRunner", _patched_cls)
executor = DbtCoreExecutor(_make_settings())
result = executor.execute_node(_make_node(), "run")
assert result.log_messages is not None
all_msgs = [m for msgs in result.log_messages.values() for _, m in msgs]
assert "real msg" in all_msgs
assert "" not in all_msgs
assert " " not in all_msgs
def test_below_min_level_filtered(self, monkeypatch):
"""Events below settings.log_level are not captured."""
def _patched_cls(callbacks=None):
cb = callbacks[0] if callbacks else None
runner = MagicMock()
def _invoke(args):
cb(self._make_event(EventLevel.DEBUG, "debug noise", None))
cb(self._make_event(EventLevel.INFO, "useful info", None))
return _mock_dbt_result(success=True)
runner.invoke.side_effect = _invoke
return runner
monkeypatch.setattr("prefect_dbt.core._executor.dbtRunner", _patched_cls)
executor = DbtCoreExecutor(_make_settings(log_level=EventLevel.INFO))
result = executor.execute_node(_make_node(), "run")
assert result.log_messages is not None
all_msgs = [m for msgs in result.log_messages.values() for _, m in msgs]
assert "useful info" in all_msgs
assert "debug noise" not in all_msgs
def test_execute_node_keeps_global_info_logs(self, monkeypatch):
"""Per-node execution captures global INFO logs for run-level dedupe."""
node = _make_node()
def _patched_cls(callbacks=None):
cb = callbacks[0] if callbacks else None
runner = MagicMock()
def _invoke(args):
cb(self._make_event(EventLevel.INFO, "Running with dbt=1.x", None))
cb(self._make_event(EventLevel.WARN, "Deprecation warning", None))
cb(self._make_event(EventLevel.INFO, "node log", node.unique_id))
return _mock_dbt_result(success=True)
runner.invoke.side_effect = _invoke
return runner
monkeypatch.setattr("prefect_dbt.core._executor.dbtRunner", _patched_cls)
executor = DbtCoreExecutor(_make_settings())
result = executor.execute_node(node, "run")
assert result.log_messages is not None
assert node.unique_id in result.log_messages
assert ("info", "node log") in result.log_messages[node.unique_id]
assert "" in result.log_messages
assert ("warning", "Deprecation warning") in result.log_messages[""]
assert ("info", "Running with dbt=1.x") in result.log_messages[""]
def test_execute_wave_keeps_global_info_logs(self, monkeypatch):
"""Per-wave execution still captures global INFO logs."""
node = _make_node()
def _patched_cls(callbacks=None):
cb = callbacks[0] if callbacks else None
runner = MagicMock()
def _invoke(args):
cb(self._make_event(EventLevel.INFO, "Running with dbt=1.x", None))
cb(self._make_event(EventLevel.INFO, "node log", node.unique_id))
return _mock_dbt_result(success=True)
runner.invoke.side_effect = _invoke
return runner
monkeypatch.setattr("prefect_dbt.core._executor.dbtRunner", _patched_cls)
executor = DbtCoreExecutor(_make_settings())
result = executor.execute_wave([node])
assert result.log_messages is not None
assert "" in result.log_messages
assert ("info", "Running with dbt=1.x") in result.log_messages[""]
def test_no_events_yields_none(self, mock_dbt):
"""When no events are captured, log_messages is None."""
executor = DbtCoreExecutor(_make_settings())
result = executor.execute_node(_make_node(), "run")
assert result.log_messages is None
# =============================================================================
# TestExtraCliArgs
# =============================================================================
class TestExtraCliArgs:
def test_extra_cli_args_appended_execute_node(self, mock_dbt):
_, mock_runner = mock_dbt
executor = DbtCoreExecutor(_make_settings())
executor.execute_node(
_make_node(),
"run",
extra_cli_args=["--store-failures", "--vars", "{'x': 1}"],
)
args = _invoked_args(mock_runner)
assert "--store-failures" in args
assert "--vars" in args
assert "{'x': 1}" in args
def test_extra_cli_args_appended_execute_wave(self, mock_dbt):
_, mock_runner = mock_dbt
executor = DbtCoreExecutor(_make_settings())
executor.execute_wave(
[_make_node()],
extra_cli_args=["--warn-error", "--no-partial-parse"],
)
args = _invoked_args(mock_runner)
assert "--warn-error" in args
assert "--no-partial-parse" in args
def test_extra_cli_args_after_base_args(self, mock_dbt):
_, mock_runner = mock_dbt
executor = DbtCoreExecutor(_make_settings())
executor.execute_node(
_make_node(),
"run",
extra_cli_args=["--store-failures"],
)
args = _invoked_args(mock_runner)
base_end = args.index("--store-failures")
assert args[0] == "run"
assert "--project-dir" in args[:base_end]
def test_extra_cli_args_none_no_effect(self, mock_dbt):
_, mock_runner = mock_dbt
executor = DbtCoreExecutor(_make_settings())
executor.execute_node(_make_node(), "run", extra_cli_args=None)
args = _invoked_args(mock_runner)
assert "--store-failures" not in args
def test_extra_cli_args_empty_list_no_effect(self, mock_dbt):
_, mock_runner = mock_dbt
executor = DbtCoreExecutor(_make_settings())
executor.execute_node(_make_node(), "run", extra_cli_args=[])
args = _invoked_args(mock_runner)
assert args[0] == "run"
# =============================================================================
# TestDbtCoreExecutorResolveManifestPath
# =============================================================================
class TestDbtCoreExecutorResolveManifestPath:
def test_existing_manifest_returned(self, tmp_path):
"""When manifest.json already exists it is returned without running dbt parse."""
target_dir = tmp_path / "target"
target_dir.mkdir()
manifest = target_dir / "manifest.json"
manifest.write_text("{}")
settings = _make_settings(project_dir=tmp_path, target_path=Path("target"))
executor = DbtCoreExecutor(settings, run_deps=False)
result = executor.resolve_manifest_path()
assert result == manifest.resolve()
def test_missing_manifest_triggers_dbt_parse(self, tmp_path, monkeypatch):
"""When manifest.json is absent, dbt parse is invoked and the path returned."""
target_dir = tmp_path / "target"
target_dir.mkdir()
manifest_path = (target_dir / "manifest.json").resolve()
mock_runner = MagicMock()
mock_runner_cls = MagicMock(return_value=mock_runner)
def _write_manifest(args):
manifest_path.write_text("{}")
res = MagicMock()
res.success = True
res.exception = None
return res
mock_runner.invoke.side_effect = _write_manifest
monkeypatch.setattr("prefect_dbt.core._executor.dbtRunner", mock_runner_cls)
settings = _make_settings(project_dir=tmp_path, target_path=Path("target"))
executor = DbtCoreExecutor(settings, run_deps=False)
result = executor.resolve_manifest_path()
assert result == manifest_path
mock_runner.invoke.assert_called_once()
call_args = mock_runner.invoke.call_args[0][0]
assert call_args[0] == "parse"
def test_missing_manifest_uses_profiles_override(self, tmp_path, monkeypatch):
"""Pinned profiles dir is reused for dbt parse when manifest is missing."""
target_dir = tmp_path / "target"
target_dir.mkdir()
manifest_path = (target_dir / "manifest.json").resolve()
mock_runner = MagicMock()
mock_runner_cls = MagicMock(return_value=mock_runner)
def _write_manifest(args):
manifest_path.write_text("{}")
res = MagicMock()
res.success = True
res.exception = None
return res
mock_runner.invoke.side_effect = _write_manifest
monkeypatch.setattr("prefect_dbt.core._executor.dbtRunner", mock_runner_cls)
settings = _make_settings(project_dir=tmp_path, target_path=Path("target"))
calls = [0]
@contextmanager
def _resolve():
calls[0] += 1
yield "/tmp/profiles"
settings.resolve_profiles_yml = MagicMock(side_effect=_resolve)
executor = DbtCoreExecutor(settings, run_deps=False)
with executor.use_resolved_profiles_dir("/stable/profiles"):
result = executor.resolve_manifest_path()
assert result == manifest_path
assert calls[0] == 0
call_args = mock_runner.invoke.call_args[0][0]
idx = call_args.index("--profiles-dir")
assert call_args[idx + 1] == "/stable/profiles"
def test_dbt_parse_failure_raises(self, tmp_path, monkeypatch):
"""A failed dbt parse raises RuntimeError."""
target_dir = tmp_path / "target"
target_dir.mkdir()
mock_runner = MagicMock()
mock_runner_cls = MagicMock(return_value=mock_runner)
res = MagicMock()
res.success = False
res.exception = RuntimeError("compilation error")
mock_runner.invoke.return_value = res
monkeypatch.setattr("prefect_dbt.core._executor.dbtRunner", mock_runner_cls)
settings = _make_settings(project_dir=tmp_path, target_path=Path("target"))
executor = DbtCoreExecutor(settings, run_deps=False)
with pytest.raises(RuntimeError, match="Failed to generate manifest"):
executor.resolve_manifest_path()
def test_parse_succeeds_but_manifest_missing_raises(self, tmp_path, monkeypatch):
"""dbt parse succeeds but manifest still absent raises RuntimeError."""
target_dir = tmp_path / "target"
target_dir.mkdir()
mock_runner = MagicMock()
mock_runner_cls = MagicMock(return_value=mock_runner)
res = MagicMock()
res.success = True
res.exception = None
mock_runner.invoke.return_value = res
monkeypatch.setattr("prefect_dbt.core._executor.dbtRunner", mock_runner_cls)
settings = _make_settings(project_dir=tmp_path, target_path=Path("target"))
executor = DbtCoreExecutor(settings, run_deps=False)
with pytest.raises(RuntimeError, match="succeeded but manifest not found"):
executor.resolve_manifest_path()
def test_returns_absolute_path(self, tmp_path):
"""resolve_manifest_path always returns an absolute path."""
target_dir = tmp_path / "target"
target_dir.mkdir()
(target_dir / "manifest.json").write_text("{}")
settings = _make_settings(project_dir=tmp_path, target_path=Path("target"))
executor = DbtCoreExecutor(settings, run_deps=False)
result = executor.resolve_manifest_path()
assert result.is_absolute()
def test_run_parse_cli_args(self, tmp_path, monkeypatch):
"""_run_parse() passes the correct CLI args to dbt parse."""
target_dir = tmp_path / "target"
target_dir.mkdir()
manifest_path = (target_dir / "manifest.json").resolve()
mock_runner = MagicMock()
mock_runner_cls = MagicMock(return_value=mock_runner)
def _write_manifest(args):
manifest_path.write_text("{}")
res = MagicMock()
res.success = True
res.exception = None
return res
mock_runner.invoke.side_effect = _write_manifest
monkeypatch.setattr("prefect_dbt.core._executor.dbtRunner", mock_runner_cls)
settings = _make_settings(
project_dir=tmp_path,
target_path=Path("target"),
log_level=EventLevel.INFO,
)
executor = DbtCoreExecutor(settings, run_deps=False)
executor.resolve_manifest_path()
# dbtRunner instantiated without callbacks (unlike _invoke)
mock_runner_cls.assert_called_once_with()
args = mock_runner.invoke.call_args[0][0]
assert args[0] == "parse"
assert "--project-dir" in args
assert args[args.index("--project-dir") + 1] == str(tmp_path)
assert "--profiles-dir" in args
assert args[args.index("--profiles-dir") + 1] == "/tmp/profiles"
assert "--target-path" in args
assert args[args.index("--target-path") + 1] == "target"
assert "--log-level" in args
assert args[args.index("--log-level") + 1] == "none"
assert "--log-level-file" in args
assert args[args.index("--log-level-file") + 1] == str(EventLevel.INFO.value)
# =============================================================================
# TestDbtCoreExecutorRunDeps
# =============================================================================
class TestDbtCoreExecutorRunDeps:
def test_run_deps_invokes_dbt_deps(self, monkeypatch):
"""run_deps() invokes dbt deps via dbtRunner."""
mock_runner = MagicMock()
mock_runner_cls = MagicMock(return_value=mock_runner)
res = MagicMock()
res.success = True
res.exception = None
mock_runner.invoke.return_value = res
monkeypatch.setattr("prefect_dbt.core._executor.dbtRunner", mock_runner_cls)
settings = _make_settings(project_dir=Path("/proj"))
executor = DbtCoreExecutor(settings, run_deps=False)
executor.run_deps()
mock_runner.invoke.assert_called_once()
args = mock_runner.invoke.call_args[0][0]
assert args[0] == "deps"
def test_run_deps_cli_args(self, monkeypatch):
"""run_deps() passes the correct CLI args."""
mock_runner = MagicMock()
mock_runner_cls = MagicMock(return_value=mock_runner)
res = MagicMock()
res.success = True
res.exception = None
mock_runner.invoke.return_value = res
monkeypatch.setattr("prefect_dbt.core._executor.dbtRunner", mock_runner_cls)
settings = _make_settings(
project_dir=Path("/my/project"),
log_level=EventLevel.INFO,
)
executor = DbtCoreExecutor(settings, run_deps=False)
executor.run_deps()
args = mock_runner.invoke.call_args[0][0]
assert args[0] == "deps"
assert "--project-dir" in args
assert args[args.index("--project-dir") + 1] == "/my/project"
assert "--profiles-dir" in args
assert args[args.index("--profiles-dir") + 1] == "/tmp/profiles"
assert "--log-level" in args
assert args[args.index("--log-level") + 1] == "none"
assert "--log-level-file" in args
assert args[args.index("--log-level-file") + 1] == str(EventLevel.INFO.value)
# dbt deps does NOT accept --target-path
assert "--target-path" not in args
def test_run_deps_uses_profiles_override(self, monkeypatch):
"""Pinned profiles dir is reused for dbt deps."""
mock_runner = MagicMock()
mock_runner_cls = MagicMock(return_value=mock_runner)
res = MagicMock()
res.success = True
res.exception = None
mock_runner.invoke.return_value = res
monkeypatch.setattr("prefect_dbt.core._executor.dbtRunner", mock_runner_cls)
settings = _make_settings()
calls = [0]
@contextmanager
def _resolve():
calls[0] += 1
yield "/tmp/profiles"
settings.resolve_profiles_yml = MagicMock(side_effect=_resolve)
executor = DbtCoreExecutor(settings, run_deps=False)
with executor.use_resolved_profiles_dir("/stable/profiles"):
executor.run_deps()
# resolve_profiles_yml should NOT have been called (override active)
assert calls[0] == 0
args = mock_runner.invoke.call_args[0][0]
idx = args.index("--profiles-dir")
assert args[idx + 1] == "/stable/profiles"
def test_run_deps_failure_raises(self, monkeypatch):
"""A failed dbt deps raises RuntimeError."""
mock_runner = MagicMock()
mock_runner_cls = MagicMock(return_value=mock_runner)
res = MagicMock()
res.success = False
res.exception = RuntimeError("package download failed")
mock_runner.invoke.return_value = res
monkeypatch.setattr("prefect_dbt.core._executor.dbtRunner", mock_runner_cls)
settings = _make_settings()
executor = DbtCoreExecutor(settings, run_deps=False)
with pytest.raises(RuntimeError, match="Failed to install packages"):
executor.run_deps()
def test_run_deps_fresh_runner(self, monkeypatch):
"""run_deps() creates a fresh dbtRunner (no callbacks)."""
mock_runner = MagicMock()
mock_runner_cls = MagicMock(return_value=mock_runner)
res = MagicMock()
res.success = True
res.exception = None
mock_runner.invoke.return_value = res
monkeypatch.setattr("prefect_dbt.core._executor.dbtRunner", mock_runner_cls)
settings = _make_settings()
executor = DbtCoreExecutor(settings, run_deps=False)
executor.run_deps()
# dbtRunner instantiated without callbacks (like _run_parse)
mock_runner_cls.assert_called_once_with()
def test_resolve_manifest_path_calls_run_deps_when_manifest_missing(
self, tmp_path, monkeypatch
):
"""run_deps() is called before dbt parse when manifest is missing."""
target_dir = tmp_path / "target"
target_dir.mkdir()
manifest_path = target_dir / "manifest.json"
call_order: list[str] = []
mock_runner = MagicMock()
mock_runner_cls = MagicMock(return_value=mock_runner)
def _fake_invoke(args):
call_order.append(args[0])
# Write manifest when parse is called so the method succeeds
if args[0] == "parse":
manifest_path.write_text("{}")
res = MagicMock()
res.success = True
res.exception = None
return res
mock_runner.invoke.side_effect = _fake_invoke
monkeypatch.setattr("prefect_dbt.core._executor.dbtRunner", mock_runner_cls)
settings = _make_settings(project_dir=tmp_path, target_path=Path("target"))
executor = DbtCoreExecutor(settings) # run_deps=True by default
executor.resolve_manifest_path()
# deps runs before parse
assert call_order == ["deps", "parse"]
def test_resolve_manifest_path_skips_run_deps_when_manifest_exists(
self, tmp_path, monkeypatch
):
"""run_deps() is NOT called when manifest already exists on disk."""
target_dir = tmp_path / "target"
target_dir.mkdir()
(target_dir / "manifest.json").write_text("{}")
mock_runner = MagicMock()
mock_runner_cls = MagicMock(return_value=mock_runner)
monkeypatch.setattr("prefect_dbt.core._executor.dbtRunner", mock_runner_cls)
settings = _make_settings(project_dir=tmp_path, target_path=Path("target"))
executor = DbtCoreExecutor(settings) # run_deps=True by default
executor.resolve_manifest_path()
# dbtRunner should NOT have been called β manifest exists
mock_runner_cls.assert_not_called()
def test_resolve_manifest_path_skips_run_deps_when_false(
self, tmp_path, monkeypatch
):
"""run_deps() is NOT called when run_deps=False even if manifest is missing."""
target_dir = tmp_path / "target"
target_dir.mkdir()
manifest_path = target_dir / "manifest.json"
mock_runner = MagicMock()
mock_runner_cls = MagicMock(return_value=mock_runner)
def _fake_invoke(args):
# Only parse should be called, not deps
manifest_path.write_text("{}")
res = MagicMock()
res.success = True
res.exception = None
return res
mock_runner.invoke.side_effect = _fake_invoke
monkeypatch.setattr("prefect_dbt.core._executor.dbtRunner", mock_runner_cls)
settings = _make_settings(project_dir=tmp_path, target_path=Path("target"))
executor = DbtCoreExecutor(settings, run_deps=False)
executor.resolve_manifest_path()
# Only one invocation (parse), no deps
mock_runner.invoke.assert_called_once()
args = mock_runner.invoke.call_args[0][0]
assert args[0] == "parse"
# =============================================================================
# TestAdapterPool
# =============================================================================
class TestAdapterPool:
"""Tests for _AdapterPool state machine."""
def test_unavailable_when_import_fails(self):
"""Pool is unavailable when dbt adapter imports fail."""
with patch.dict("sys.modules", {"dbt.adapters.factory": None}):
from prefect_dbt.core._executor import _AdapterPool
pool = _AdapterPool()
assert not pool.available
def test_get_runner_returns_fresh_runner_when_inactive(self):
"""In INACTIVE state, get_runner returns a new dbtRunner."""
from prefect_dbt.core._executor import _AdapterPool
pool = _AdapterPool()
runner, pooled = pool.get_runner(callbacks=[])
assert runner is not None
assert pooled is False
def test_transitions_to_pooled_after_success(self):
"""After on_success, state transitions to POOLED."""
from prefect_dbt.core._executor import _AdapterPool, _PoolState
pool = _AdapterPool()
if not pool.available:
pytest.skip("dbt adapter imports not available")
pool.activate()
runner, pooled = pool.get_runner(callbacks=[])
pool.on_success()
assert pool._state == _PoolState.POOLED
def test_pooled_runner_is_reused(self):
"""In POOLED state, get_runner returns the cached runner."""
from prefect_dbt.core._executor import _AdapterPool
pool = _AdapterPool()
if not pool.available:
pytest.skip("dbt adapter imports not available")
pool.activate()
runner1, _ = pool.get_runner(callbacks=[])
pool.on_success()
runner2, pooled = pool.get_runner(callbacks=[])
assert pooled is True
assert runner2 is runner1
def test_revert_goes_to_inactive(self):
"""revert() transitions back to INACTIVE."""
from prefect_dbt.core._executor import _AdapterPool, _PoolState
pool = _AdapterPool()
if not pool.available:
pytest.skip("dbt adapter imports not available")
pool.activate()
pool.get_runner(callbacks=[])
pool.on_success()
pool.revert()
assert pool._state == _PoolState.INACTIVE
def test_revert_restores_original_adapter_management(self):
"""revert() restores the original adapter_management function."""
import dbt.adapters.factory as factory_mod
from prefect_dbt.core._executor import _AdapterPool
original = factory_mod.adapter_management
pool = _AdapterPool()
if not pool.available:
pytest.skip("dbt adapter imports not available")
pool.activate()
pool.get_runner(callbacks=[])
pool.revert()
assert factory_mod.adapter_management is original
def test_activate_suppresses_cleanup_connections(self):
"""activate() patches BaseAdapter.cleanup_connections to a no-op."""
from dbt.adapters.base.impl import BaseAdapter
from prefect_dbt.core._executor import _AdapterPool
original_cleanup = BaseAdapter.cleanup_connections
pool = _AdapterPool()
if not pool.available:
pytest.skip("dbt adapter imports not available")
pool.activate()
assert BaseAdapter.cleanup_connections is not original_cleanup
# The patched method should be a no-op (callable with an adapter)
BaseAdapter.cleanup_connections(MagicMock()) # should not raise
def test_revert_restores_cleanup_connections(self):
"""revert() restores BaseAdapter.cleanup_connections."""
from dbt.adapters.base.impl import BaseAdapter
from prefect_dbt.core._executor import _AdapterPool
original_cleanup = BaseAdapter.cleanup_connections
pool = _AdapterPool()
if not pool.available:
pytest.skip("dbt adapter imports not available")
pool.activate()
pool.revert()
assert BaseAdapter.cleanup_connections is original_cleanup
def test_cleanup_restores_cleanup_connections(self):
"""_cleanup() restores BaseAdapter.cleanup_connections."""
from dbt.adapters.base.impl import BaseAdapter
from prefect_dbt.core._executor import _AdapterPool
original_cleanup = BaseAdapter.cleanup_connections
pool = _AdapterPool()
if not pool.available:
pytest.skip("dbt adapter imports not available")
pool.activate()
pool._cleanup()
assert BaseAdapter.cleanup_connections is original_cleanup
def test_activate_patches_get_if_exists(self):
"""activate() patches get_if_exists for connection transplant."""
from dbt.adapters.base.connections import BaseConnectionManager
from prefect_dbt.core._executor import _AdapterPool
original = BaseConnectionManager.get_if_exists
pool = _AdapterPool()
if not pool.available:
pytest.skip("dbt adapter imports not available")
pool.activate()
assert BaseConnectionManager.get_if_exists is not original
def test_revert_restores_get_if_exists(self):
"""revert() restores original get_if_exists."""
from dbt.adapters.base.connections import BaseConnectionManager
from prefect_dbt.core._executor import _AdapterPool
original = BaseConnectionManager.get_if_exists
pool = _AdapterPool()
if not pool.available:
pytest.skip("dbt adapter imports not available")
pool.activate()
pool.revert()
assert BaseConnectionManager.get_if_exists is original
def test_get_if_exists_transplants_open_connection(self):
"""Patched get_if_exists moves an open conn to the new thread key."""
from dbt.adapters.base.connections import BaseConnectionManager
from prefect_dbt.core._executor import _AdapterPool
pool = _AdapterPool()
if not pool.available:
pytest.skip("dbt adapter imports not available")
pool.activate()
# Simulate a connection manager with an open connection under old key
conn_mgr = MagicMock(spec=BaseConnectionManager)
conn_mgr.get_thread_identifier.return_value = (1, 999) # new thread
old_conn = MagicMock()
old_conn.state = "open"
conn_mgr.thread_connections = {(1, 100): old_conn}
conn_mgr.lock = __import__("threading").RLock()
result = BaseConnectionManager.get_if_exists(conn_mgr)
assert result is old_conn
# Old key removed, new key set
assert (1, 100) not in conn_mgr.thread_connections
assert conn_mgr.thread_connections[(1, 999)] is old_conn
def test_get_if_exists_skips_non_open_connections(self):
"""Patched get_if_exists only transplants connections with state='open'."""
from dbt.adapters.base.connections import BaseConnectionManager
from prefect_dbt.core._executor import _AdapterPool
pool = _AdapterPool()
if not pool.available:
pytest.skip("dbt adapter imports not available")
pool.activate()
conn_mgr = MagicMock(spec=BaseConnectionManager)
conn_mgr.get_thread_identifier.return_value = (1, 999)
closed_conn = MagicMock()
closed_conn.state = "closed"
conn_mgr.thread_connections = {(1, 100): closed_conn}
conn_mgr.lock = __import__("threading").RLock()
result = BaseConnectionManager.get_if_exists(conn_mgr)
assert result is None
# Closed connection not transplanted
assert (1, 100) in conn_mgr.thread_connections
# =============================================================================
# TestAdapterPoolIntegration
# =============================================================================
class TestAdapterPoolIntegration:
"""Tests for _AdapterPool integration with DbtCoreExecutor._invoke."""
def test_invoke_activates_pool_on_first_call(self):
"""_invoke should activate the adapter pool before calling dbtRunner."""
from prefect_dbt.core._executor import _adapter_pool, _PoolState
mock_result = _mock_dbt_result(success=True)
with patch("prefect_dbt.core._executor.dbtRunner") as MockRunner:
MockRunner.return_value.invoke.return_value = mock_result
MockRunner.return_value.callbacks = []
executor = DbtCoreExecutor(settings=_make_settings(), pool_adapters=True)
node = _make_node()
executor.execute_node(node, "run")
# Pool should have been activated
assert _adapter_pool._state in (_PoolState.FIRST_CALL, _PoolState.POOLED)
def test_invoke_reverts_pool_on_failure(self):
"""On invoke failure in pooled mode, pool reverts to INACTIVE."""
from prefect_dbt.core._executor import _adapter_pool, _PoolState
mock_result = _mock_dbt_result(success=False)
mock_result.exception = RuntimeError("connection lost")
with patch("prefect_dbt.core._executor.dbtRunner") as MockRunner:
MockRunner.return_value.invoke.return_value = mock_result
MockRunner.return_value.callbacks = []
executor = DbtCoreExecutor(settings=_make_settings(), pool_adapters=True)
node = _make_node()
# Force into POOLED state
_adapter_pool._state = _PoolState.POOLED
_adapter_pool._runner = MockRunner.return_value
result = executor.execute_node(node, "run")
assert not result.success
assert _adapter_pool._state == _PoolState.INACTIVE
def test_invoke_works_without_pool(self):
"""When pool_adapters is False (default), pooling is not used."""
from prefect_dbt.core._executor import _adapter_pool, _PoolState
mock_result = _mock_dbt_result(success=True)
with patch("prefect_dbt.core._executor.dbtRunner") as MockRunner:
MockRunner.return_value.invoke.return_value = mock_result
executor = DbtCoreExecutor(settings=_make_settings())
node = _make_node()
result = executor.execute_node(node, "run")
assert result.success
assert _adapter_pool._state == _PoolState.INACTIVE
MockRunner.assert_called_once()
# =============================================================================
# TestAdapterPoolEdgeCases
# =============================================================================
class TestAdapterPoolEdgeCases:
"""Edge cases for adapter pool behavior."""
def test_activate_is_idempotent(self):
"""Calling activate() multiple times doesn't break state."""
from prefect_dbt.core._executor import _adapter_pool, _PoolState
_adapter_pool.activate()
_adapter_pool.activate() # second call should be a no-op
assert _adapter_pool._state == _PoolState.FIRST_CALL
def test_revert_when_already_inactive_is_safe(self):
"""revert() on INACTIVE state doesn't raise."""
from prefect_dbt.core._executor import _adapter_pool, _PoolState
assert _adapter_pool._state == _PoolState.INACTIVE
_adapter_pool.revert() # should not raise
assert _adapter_pool._state == _PoolState.INACTIVE
def test_pool_unavailable_skips_all_operations(self):
"""When imports failed, all pool operations are no-ops."""
from prefect_dbt.core._executor import _AdapterPool, _PoolState
pool = _AdapterPool.__new__(_AdapterPool)
pool._state = _PoolState.INACTIVE
pool._available = False
pool._runner = None
pool._original_adapter_management = None
pool._original_base_cleanup = None
pool._original_get_if_exists = None
pool._cleanup_registered = False
pool.activate() # no-op
assert pool._state == _PoolState.INACTIVE
runner, pooled = pool.get_runner(callbacks=[])
assert not pooled
assert runner is not None
def test_on_success_only_transitions_from_first_call(self):
"""on_success() is a no-op when not in FIRST_CALL state."""
from prefect_dbt.core._executor import _adapter_pool, _PoolState
assert _adapter_pool._state == _PoolState.INACTIVE
_adapter_pool.on_success() # should not raise or change state
assert _adapter_pool._state == _PoolState.INACTIVE
def test_cleanup_is_safe_after_revert(self):
"""_cleanup() after revert doesn't double-close connections."""
from prefect_dbt.core._executor import _adapter_pool
_adapter_pool.activate()
_adapter_pool.revert()
_adapter_pool._cleanup() # should not raise
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/integrations/prefect-dbt/tests/core/test_executor.py",
"license": "Apache License 2.0",
"lines": 1210,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.