sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
PrefectHQ/fastmcp:src/fastmcp/server/auth/authorization.py | """Authorization checks for FastMCP components.
This module provides callable-based authorization for tools, resources, and prompts.
Auth checks are functions that receive an AuthContext and return True to allow access
or False to deny.
Auth checks can also raise exceptions:
- AuthorizationError: Propagates with the custom message for explicit denial
- Other exceptions: Masked for security (logged, treated as auth failure)
Example:
```python
from fastmcp import FastMCP
from fastmcp.server.auth import require_scopes
mcp = FastMCP()
@mcp.tool(auth=require_scopes("write"))
def protected_tool(): ...
@mcp.resource("data://secret", auth=require_scopes("read"))
def secret_data(): ...
@mcp.prompt(auth=require_scopes("admin"))
def admin_prompt(): ...
```
"""
from __future__ import annotations
import inspect
import logging
from collections.abc import Awaitable, Callable
from dataclasses import dataclass
from typing import TYPE_CHECKING, cast
from fastmcp.exceptions import AuthorizationError
logger = logging.getLogger(__name__)
if TYPE_CHECKING:
from fastmcp.server.auth import AccessToken
from fastmcp.tools.tool import Tool
from fastmcp.utilities.components import FastMCPComponent
@dataclass
class AuthContext:
"""Context passed to auth check callables.
This object is passed to each auth check function and provides
access to the current authentication token and the component being accessed.
Attributes:
token: The current access token, or None if unauthenticated.
component: The component (tool, resource, or prompt) being accessed.
tool: Backwards-compatible alias for component when it's a Tool.
"""
token: AccessToken | None
component: FastMCPComponent
@property
def tool(self) -> Tool | None:
"""Backwards-compatible access to the component as a Tool.
Returns the component if it's a Tool, None otherwise.
"""
from fastmcp.tools.tool import Tool
return self.component if isinstance(self.component, Tool) else None
# Type alias for auth check functions (sync or async)
AuthCheck = Callable[[AuthContext], bool] | Callable[[AuthContext], Awaitable[bool]]
def require_scopes(*scopes: str) -> AuthCheck:
"""Require specific OAuth scopes.
Returns an auth check that requires ALL specified scopes to be present
in the token (AND logic).
Args:
*scopes: One or more scope strings that must all be present.
Example:
```python
@mcp.tool(auth=require_scopes("admin"))
def admin_tool(): ...
@mcp.tool(auth=require_scopes("read", "write"))
def read_write_tool(): ...
```
"""
required = set(scopes)
def check(ctx: AuthContext) -> bool:
if ctx.token is None:
return False
return required.issubset(set(ctx.token.scopes))
return check
def restrict_tag(tag: str, *, scopes: list[str]) -> AuthCheck:
"""Restrict components with a specific tag to require certain scopes.
If the component has the specified tag, the token must have ALL the
required scopes. If the component doesn't have the tag, access is allowed.
Args:
tag: The tag that triggers the scope requirement.
scopes: List of scopes required when the tag is present.
Example:
```python
# Components tagged "admin" require the "admin" scope
AuthMiddleware(auth=restrict_tag("admin", scopes=["admin"]))
```
"""
required = set(scopes)
def check(ctx: AuthContext) -> bool:
if tag not in ctx.component.tags:
return True # Tag not present, no restriction
if ctx.token is None:
return False
return required.issubset(set(ctx.token.scopes))
return check
async def run_auth_checks(
checks: AuthCheck | list[AuthCheck],
ctx: AuthContext,
) -> bool:
"""Run auth checks with AND logic.
All checks must pass for authorization to succeed. Checks can be
synchronous or asynchronous functions.
Auth checks can:
- Return True to allow access
- Return False to deny access
- Raise AuthorizationError to deny with a custom message (propagates)
- Raise other exceptions (masked for security, treated as denial)
Args:
checks: A single check function or list of check functions.
Each check can be sync (returns bool) or async (returns Awaitable[bool]).
ctx: The auth context to pass to each check.
Returns:
True if all checks pass, False if any check fails.
Raises:
AuthorizationError: If an auth check explicitly raises it.
"""
check_list = [checks] if not isinstance(checks, list) else checks
check_list = cast(list[AuthCheck], check_list)
for check in check_list:
try:
result = check(ctx)
if inspect.isawaitable(result):
result = await result
if not result:
return False
except AuthorizationError:
# Let AuthorizationError propagate with its custom message
raise
except Exception:
# Mask other exceptions for security - log and treat as auth failure
logger.warning(
f"Auth check {getattr(check, '__name__', repr(check))} "
"raised an unexpected exception",
exc_info=True,
)
return False
return True
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "src/fastmcp/server/auth/authorization.py",
"license": "Apache License 2.0",
"lines": 136,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
PrefectHQ/fastmcp:src/fastmcp/server/middleware/authorization.py | """Authorization middleware for FastMCP.
This module provides middleware-based authorization using callable auth checks.
AuthMiddleware applies auth checks globally to all components on the server.
Example:
```python
from fastmcp import FastMCP
from fastmcp.server.auth import require_scopes, restrict_tag
from fastmcp.server.middleware import AuthMiddleware
# Require specific scope for all components
mcp = FastMCP(middleware=[
AuthMiddleware(auth=require_scopes("api"))
])
# Tag-based: components tagged "admin" require "admin" scope
mcp = FastMCP(middleware=[
AuthMiddleware(auth=restrict_tag("admin", scopes=["admin"]))
])
```
"""
from __future__ import annotations
import logging
from collections.abc import Sequence
import mcp.types as mt
from fastmcp.exceptions import AuthorizationError
from fastmcp.prompts.prompt import Prompt, PromptResult
from fastmcp.resources.resource import Resource, ResourceResult
from fastmcp.resources.template import ResourceTemplate
from fastmcp.server.auth.authorization import (
AuthCheck,
AuthContext,
run_auth_checks,
)
from fastmcp.server.dependencies import get_access_token
from fastmcp.server.middleware.middleware import (
CallNext,
Middleware,
MiddlewareContext,
)
from fastmcp.tools.tool import Tool, ToolResult
logger = logging.getLogger(__name__)
class AuthMiddleware(Middleware):
"""Global authorization middleware using callable checks.
This middleware applies auth checks to all components (tools, resources,
prompts) on the server. It uses the same callable API as component-level
auth checks.
The middleware:
- Filters tools/resources/prompts from list responses based on auth checks
- Checks auth before tool execution, resource read, and prompt render
- Skips all auth checks for STDIO transport (no OAuth concept)
Args:
auth: A single auth check function or list of check functions.
All checks must pass for authorization to succeed (AND logic).
Example:
```python
from fastmcp import FastMCP
from fastmcp.server.auth import require_scopes
# Require specific scope for all components
mcp = FastMCP(middleware=[AuthMiddleware(auth=require_scopes("api"))])
# Multiple scopes (AND logic)
mcp = FastMCP(middleware=[
AuthMiddleware(auth=require_scopes("read", "api"))
])
```
"""
def __init__(self, auth: AuthCheck | list[AuthCheck]) -> None:
self.auth = auth
async def on_list_tools(
self,
context: MiddlewareContext[mt.ListToolsRequest],
call_next: CallNext[mt.ListToolsRequest, Sequence[Tool]],
) -> Sequence[Tool]:
"""Filter tools/list response based on auth checks."""
tools = await call_next(context)
# STDIO has no auth concept, skip filtering
# Late import to avoid circular import with context.py
from fastmcp.server.context import _current_transport
if _current_transport.get() == "stdio":
return tools
token = get_access_token()
authorized_tools: list[Tool] = []
for tool in tools:
ctx = AuthContext(token=token, component=tool)
try:
if await run_auth_checks(self.auth, ctx):
authorized_tools.append(tool)
except AuthorizationError:
continue
return authorized_tools
async def on_call_tool(
self,
context: MiddlewareContext[mt.CallToolRequestParams],
call_next: CallNext[mt.CallToolRequestParams, ToolResult],
) -> ToolResult:
"""Check auth before tool execution."""
# STDIO has no auth concept, skip enforcement
# Late import to avoid circular import with context.py
from fastmcp.server.context import _current_transport
if _current_transport.get() == "stdio":
return await call_next(context)
# Get the tool being called
tool_name = context.message.name
fastmcp = context.fastmcp_context
if fastmcp is None:
# Fail closed: deny access when context is missing
logger.warning(
f"AuthMiddleware: fastmcp_context is None for tool '{tool_name}'. "
"Denying access for security."
)
raise AuthorizationError(
f"Authorization failed for tool '{tool_name}': missing context"
)
# Get tool (component auth is checked in get_tool, raises if unauthorized)
tool = await fastmcp.fastmcp.get_tool(tool_name)
if tool is None:
raise AuthorizationError(
f"Authorization failed for tool '{tool_name}': tool not found"
)
# Global auth check
token = get_access_token()
ctx = AuthContext(token=token, component=tool)
if not await run_auth_checks(self.auth, ctx):
raise AuthorizationError(
f"Authorization failed for tool '{tool_name}': insufficient permissions"
)
return await call_next(context)
async def on_list_resources(
self,
context: MiddlewareContext[mt.ListResourcesRequest],
call_next: CallNext[mt.ListResourcesRequest, Sequence[Resource]],
) -> Sequence[Resource]:
"""Filter resources/list response based on auth checks."""
resources = await call_next(context)
# STDIO has no auth concept, skip filtering
from fastmcp.server.context import _current_transport
if _current_transport.get() == "stdio":
return resources
token = get_access_token()
authorized_resources: list[Resource] = []
for resource in resources:
ctx = AuthContext(token=token, component=resource)
try:
if await run_auth_checks(self.auth, ctx):
authorized_resources.append(resource)
except AuthorizationError:
continue
return authorized_resources
async def on_read_resource(
self,
context: MiddlewareContext[mt.ReadResourceRequestParams],
call_next: CallNext[mt.ReadResourceRequestParams, ResourceResult],
) -> ResourceResult:
"""Check auth before resource read."""
# STDIO has no auth concept, skip enforcement
from fastmcp.server.context import _current_transport
if _current_transport.get() == "stdio":
return await call_next(context)
# Get the resource being read
uri = context.message.uri
fastmcp = context.fastmcp_context
if fastmcp is None:
logger.warning(
f"AuthMiddleware: fastmcp_context is None for resource '{uri}'. "
"Denying access for security."
)
raise AuthorizationError(
f"Authorization failed for resource '{uri}': missing context"
)
# Get resource/template (component auth is checked in get_*, raises if unauthorized)
component = await fastmcp.fastmcp.get_resource(str(uri))
if component is None:
component = await fastmcp.fastmcp.get_resource_template(str(uri))
if component is None:
raise AuthorizationError(
f"Authorization failed for resource '{uri}': resource not found"
)
# Global auth check
token = get_access_token()
ctx = AuthContext(token=token, component=component)
if not await run_auth_checks(self.auth, ctx):
raise AuthorizationError(
f"Authorization failed for resource '{uri}': insufficient permissions"
)
return await call_next(context)
async def on_list_resource_templates(
self,
context: MiddlewareContext[mt.ListResourceTemplatesRequest],
call_next: CallNext[
mt.ListResourceTemplatesRequest, Sequence[ResourceTemplate]
],
) -> Sequence[ResourceTemplate]:
"""Filter resource templates/list response based on auth checks."""
templates = await call_next(context)
# STDIO has no auth concept, skip filtering
from fastmcp.server.context import _current_transport
if _current_transport.get() == "stdio":
return templates
token = get_access_token()
authorized_templates: list[ResourceTemplate] = []
for template in templates:
ctx = AuthContext(token=token, component=template)
try:
if await run_auth_checks(self.auth, ctx):
authorized_templates.append(template)
except AuthorizationError:
continue
return authorized_templates
async def on_list_prompts(
self,
context: MiddlewareContext[mt.ListPromptsRequest],
call_next: CallNext[mt.ListPromptsRequest, Sequence[Prompt]],
) -> Sequence[Prompt]:
"""Filter prompts/list response based on auth checks."""
prompts = await call_next(context)
# STDIO has no auth concept, skip filtering
from fastmcp.server.context import _current_transport
if _current_transport.get() == "stdio":
return prompts
token = get_access_token()
authorized_prompts: list[Prompt] = []
for prompt in prompts:
ctx = AuthContext(token=token, component=prompt)
try:
if await run_auth_checks(self.auth, ctx):
authorized_prompts.append(prompt)
except AuthorizationError:
continue
return authorized_prompts
async def on_get_prompt(
self,
context: MiddlewareContext[mt.GetPromptRequestParams],
call_next: CallNext[mt.GetPromptRequestParams, PromptResult],
) -> PromptResult:
"""Check auth before prompt render."""
# STDIO has no auth concept, skip enforcement
from fastmcp.server.context import _current_transport
if _current_transport.get() == "stdio":
return await call_next(context)
# Get the prompt being rendered
prompt_name = context.message.name
fastmcp = context.fastmcp_context
if fastmcp is None:
logger.warning(
f"AuthMiddleware: fastmcp_context is None for prompt '{prompt_name}'. "
"Denying access for security."
)
raise AuthorizationError(
f"Authorization failed for prompt '{prompt_name}': missing context"
)
# Get prompt (component auth is checked in get_prompt, raises if unauthorized)
prompt = await fastmcp.fastmcp.get_prompt(prompt_name)
if prompt is None:
raise AuthorizationError(
f"Authorization failed for prompt '{prompt_name}': prompt not found"
)
# Global auth check
token = get_access_token()
ctx = AuthContext(token=token, component=prompt)
if not await run_auth_checks(self.auth, ctx):
raise AuthorizationError(
f"Authorization failed for prompt '{prompt_name}': insufficient permissions"
)
return await call_next(context)
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "src/fastmcp/server/middleware/authorization.py",
"license": "Apache License 2.0",
"lines": 261,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/fastmcp:tests/server/auth/test_authorization.py | """Tests for authorization checks and AuthMiddleware."""
from unittest.mock import Mock
import mcp.types as mcp_types
import pytest
from mcp.server.auth.middleware.auth_context import auth_context_var
from mcp.server.auth.middleware.bearer_auth import AuthenticatedUser
from fastmcp import FastMCP
from fastmcp.client import Client
from fastmcp.exceptions import AuthorizationError
from fastmcp.server.auth import (
AccessToken,
AuthContext,
require_scopes,
restrict_tag,
run_auth_checks,
)
from fastmcp.server.middleware import AuthMiddleware
# =============================================================================
# Test helpers
# =============================================================================
def make_token(scopes: list[str] | None = None) -> AccessToken:
"""Create a test access token."""
return AccessToken(
token="test-token",
client_id="test-client",
scopes=scopes or [],
expires_at=None,
claims={},
)
def make_tool() -> Mock:
"""Create a mock tool for testing."""
tool = Mock()
tool.tags = set()
return tool
# =============================================================================
# Tests for require_scopes
# =============================================================================
class TestRequireScopes:
def test_returns_true_with_matching_scope(self):
token = make_token(scopes=["admin"])
ctx = AuthContext(token=token, component=make_tool())
check = require_scopes("admin")
assert check(ctx) is True
def test_returns_true_with_all_required_scopes(self):
token = make_token(scopes=["read", "write", "admin"])
ctx = AuthContext(token=token, component=make_tool())
check = require_scopes("read", "write")
assert check(ctx) is True
def test_returns_false_with_missing_scope(self):
token = make_token(scopes=["read"])
ctx = AuthContext(token=token, component=make_tool())
check = require_scopes("admin")
assert check(ctx) is False
def test_returns_false_with_partial_scopes(self):
token = make_token(scopes=["read"])
ctx = AuthContext(token=token, component=make_tool())
check = require_scopes("read", "write")
assert check(ctx) is False
def test_returns_false_without_token(self):
ctx = AuthContext(token=None, component=make_tool())
check = require_scopes("admin")
assert check(ctx) is False
# =============================================================================
# Tests for restrict_tag
# =============================================================================
class TestRestrictTag:
def test_allows_access_when_tag_not_present(self):
tool = make_tool()
tool.tags = {"other"}
ctx = AuthContext(token=None, component=tool)
check = restrict_tag("admin", scopes=["admin"])
assert check(ctx) is True
def test_blocks_access_when_tag_present_without_token(self):
tool = make_tool()
tool.tags = {"admin"}
ctx = AuthContext(token=None, component=tool)
check = restrict_tag("admin", scopes=["admin"])
assert check(ctx) is False
def test_blocks_access_when_tag_present_without_scope(self):
tool = make_tool()
tool.tags = {"admin"}
token = make_token(scopes=["read"])
ctx = AuthContext(token=token, component=tool)
check = restrict_tag("admin", scopes=["admin"])
assert check(ctx) is False
def test_allows_access_when_tag_present_with_scope(self):
tool = make_tool()
tool.tags = {"admin"}
token = make_token(scopes=["admin"])
ctx = AuthContext(token=token, component=tool)
check = restrict_tag("admin", scopes=["admin"])
assert check(ctx) is True
# =============================================================================
# Tests for run_auth_checks
# =============================================================================
class TestRunAuthChecks:
async def test_single_check_passes(self):
ctx = AuthContext(token=make_token(scopes=["test"]), component=make_tool())
assert await run_auth_checks(require_scopes("test"), ctx) is True
async def test_single_check_fails(self):
ctx = AuthContext(token=None, component=make_tool())
assert await run_auth_checks(require_scopes("test"), ctx) is False
async def test_multiple_checks_all_pass(self):
token = make_token(scopes=["test", "admin"])
ctx = AuthContext(token=token, component=make_tool())
checks = [require_scopes("test"), require_scopes("admin")]
assert await run_auth_checks(checks, ctx) is True
async def test_multiple_checks_one_fails(self):
token = make_token(scopes=["read"])
ctx = AuthContext(token=token, component=make_tool())
checks = [require_scopes("read"), require_scopes("admin")]
assert await run_auth_checks(checks, ctx) is False
async def test_empty_list_passes(self):
ctx = AuthContext(token=None, component=make_tool())
assert await run_auth_checks([], ctx) is True
async def test_custom_lambda_check(self):
token = make_token()
token.claims = {"level": 5}
ctx = AuthContext(token=token, component=make_tool())
def check(ctx: AuthContext) -> bool:
return ctx.token is not None and ctx.token.claims.get("level", 0) >= 3
assert await run_auth_checks(check, ctx) is True
async def test_authorization_error_propagates(self):
"""AuthorizationError from auth check should propagate with custom message."""
def custom_auth_check(ctx: AuthContext) -> bool:
raise AuthorizationError("Custom denial reason")
ctx = AuthContext(token=make_token(), component=make_tool())
with pytest.raises(AuthorizationError, match="Custom denial reason"):
await run_auth_checks(custom_auth_check, ctx)
async def test_generic_exception_is_masked(self):
"""Generic exceptions from auth checks should be masked (return False)."""
def buggy_auth_check(ctx: AuthContext) -> bool:
raise ValueError("Unexpected internal error")
ctx = AuthContext(token=make_token(), component=make_tool())
# Should return False, not raise the ValueError
assert await run_auth_checks(buggy_auth_check, ctx) is False
async def test_authorization_error_stops_chain(self):
"""AuthorizationError should stop the check chain and propagate."""
call_order = []
def check_1(ctx: AuthContext) -> bool:
call_order.append(1)
return True
def check_2(ctx: AuthContext) -> bool:
call_order.append(2)
raise AuthorizationError("Explicit denial")
def check_3(ctx: AuthContext) -> bool:
call_order.append(3)
return True
ctx = AuthContext(token=make_token(), component=make_tool())
with pytest.raises(AuthorizationError, match="Explicit denial"):
await run_auth_checks([check_1, check_2, check_3], ctx)
# Check 3 should not be called
assert call_order == [1, 2]
async def test_async_check_passes(self):
"""Async auth check functions should be awaited."""
async def async_check(ctx: AuthContext) -> bool:
return ctx.token is not None
ctx = AuthContext(token=make_token(), component=make_tool())
assert await run_auth_checks(async_check, ctx) is True
async def test_async_check_fails(self):
"""Async auth check that returns False should deny access."""
async def async_check(ctx: AuthContext) -> bool:
return False
ctx = AuthContext(token=make_token(), component=make_tool())
assert await run_auth_checks(async_check, ctx) is False
async def test_mixed_sync_and_async_checks(self):
"""A mix of sync and async checks should all be evaluated."""
def sync_check(ctx: AuthContext) -> bool:
return True
async def async_check(ctx: AuthContext) -> bool:
return ctx.token is not None
ctx = AuthContext(token=make_token(scopes=["test"]), component=make_tool())
checks = [sync_check, async_check, require_scopes("test")]
assert await run_auth_checks(checks, ctx) is True
async def test_async_check_exception_is_masked(self):
"""Async checks that raise non-AuthorizationError should be masked."""
async def buggy_async_check(ctx: AuthContext) -> bool:
raise ValueError("async error")
ctx = AuthContext(token=make_token(), component=make_tool())
assert await run_auth_checks(buggy_async_check, ctx) is False
async def test_async_check_authorization_error_propagates(self):
"""Async checks that raise AuthorizationError should propagate."""
async def async_denial(ctx: AuthContext) -> bool:
raise AuthorizationError("Async denial")
ctx = AuthContext(token=make_token(), component=make_tool())
with pytest.raises(AuthorizationError, match="Async denial"):
await run_auth_checks(async_denial, ctx)
# =============================================================================
# Tests for tool-level auth with FastMCP
# =============================================================================
def set_token(token: AccessToken | None):
"""Set the access token in the auth context var."""
if token is None:
return auth_context_var.set(None)
return auth_context_var.set(AuthenticatedUser(token))
class TestToolLevelAuth:
async def test_tool_without_auth_is_visible(self):
mcp = FastMCP()
@mcp.tool
def public_tool() -> str:
return "public"
tools = await mcp.list_tools()
assert len(tools) == 1
assert tools[0].name == "public_tool"
async def test_tool_with_auth_hidden_without_token(self):
mcp = FastMCP()
@mcp.tool(auth=require_scopes("test"))
def protected_tool() -> str:
return "protected"
# No token set - tool should be hidden
tools = await mcp.list_tools()
assert len(tools) == 0
async def test_tool_with_auth_visible_with_token(self):
mcp = FastMCP()
@mcp.tool(auth=require_scopes("test"))
def protected_tool() -> str:
return "protected"
# Set token in context
token = make_token(scopes=["test"])
tok = set_token(token)
try:
tools = await mcp.list_tools()
assert len(tools) == 1
assert tools[0].name == "protected_tool"
finally:
auth_context_var.reset(tok)
async def test_tool_with_scope_auth_hidden_without_scope(self):
mcp = FastMCP()
@mcp.tool(auth=require_scopes("admin"))
def admin_tool() -> str:
return "admin"
# Token without admin scope
token = make_token(scopes=["read"])
tok = set_token(token)
try:
tools = await mcp.list_tools()
assert len(tools) == 0
finally:
auth_context_var.reset(tok)
async def test_tool_with_scope_auth_visible_with_scope(self):
mcp = FastMCP()
@mcp.tool(auth=require_scopes("admin"))
def admin_tool() -> str:
return "admin"
# Token with admin scope
token = make_token(scopes=["admin"])
tok = set_token(token)
try:
tools = await mcp.list_tools()
assert len(tools) == 1
assert tools[0].name == "admin_tool"
finally:
auth_context_var.reset(tok)
async def test_get_tool_returns_none_without_auth(self):
"""get_tool() returns None for unauthorized tools (consistent with list filtering)."""
mcp = FastMCP()
@mcp.tool(auth=require_scopes("test"))
def protected_tool() -> str:
return "protected"
# get_tool() returns None for unauthorized tools
tool = await mcp.get_tool("protected_tool")
assert tool is None
async def test_get_tool_returns_tool_with_auth(self):
mcp = FastMCP()
@mcp.tool(auth=require_scopes("test"))
def protected_tool() -> str:
return "protected"
token = make_token(scopes=["test"])
tok = set_token(token)
try:
tool = await mcp.get_tool("protected_tool")
assert tool is not None
assert tool.name == "protected_tool"
finally:
auth_context_var.reset(tok)
# =============================================================================
# Tests for AuthMiddleware
# =============================================================================
class TestAuthMiddleware:
"""Tests for middleware filtering via MCP handler layer.
These tests call _list_tools_mcp() which applies middleware during list,
simulating what happens when a client calls list_tools over MCP.
"""
async def test_middleware_filters_tools_without_token(self):
mcp = FastMCP(middleware=[AuthMiddleware(auth=require_scopes("test"))])
@mcp.tool
def public_tool() -> str:
return "public"
# No token - all tools filtered by middleware
result = await mcp._list_tools_mcp(mcp_types.ListToolsRequest())
assert len(result.tools) == 0
async def test_middleware_allows_tools_with_token(self):
mcp = FastMCP(middleware=[AuthMiddleware(auth=require_scopes("test"))])
@mcp.tool
def public_tool() -> str:
return "public"
token = make_token(scopes=["test"])
tok = set_token(token)
try:
result = await mcp._list_tools_mcp(mcp_types.ListToolsRequest())
assert len(result.tools) == 1
finally:
auth_context_var.reset(tok)
async def test_middleware_with_scope_check(self):
mcp = FastMCP(middleware=[AuthMiddleware(auth=require_scopes("api"))])
@mcp.tool
def api_tool() -> str:
return "api"
# Token without api scope
token = make_token(scopes=["read"])
tok = set_token(token)
try:
result = await mcp._list_tools_mcp(mcp_types.ListToolsRequest())
assert len(result.tools) == 0
finally:
auth_context_var.reset(tok)
# Token with api scope
token = make_token(scopes=["api"])
tok = set_token(token)
try:
result = await mcp._list_tools_mcp(mcp_types.ListToolsRequest())
assert len(result.tools) == 1
finally:
auth_context_var.reset(tok)
async def test_middleware_with_restrict_tag(self):
mcp = FastMCP(
middleware=[AuthMiddleware(auth=restrict_tag("admin", scopes=["admin"]))]
)
@mcp.tool
def public_tool() -> str:
return "public"
@mcp.tool(tags={"admin"})
def admin_tool() -> str:
return "admin"
# No token - public tool allowed, admin tool blocked
result = await mcp._list_tools_mcp(mcp_types.ListToolsRequest())
assert len(result.tools) == 1
assert result.tools[0].name == "public_tool"
# Token with admin scope - both allowed
token = make_token(scopes=["admin"])
tok = set_token(token)
try:
result = await mcp._list_tools_mcp(mcp_types.ListToolsRequest())
assert len(result.tools) == 2
finally:
auth_context_var.reset(tok)
async def test_middleware_skips_tool_on_authorization_error(self):
def deny_blocked_tool(ctx: AuthContext) -> bool:
if ctx.component.name == "blocked_tool":
raise AuthorizationError(f"deny {ctx.component.name}")
return True
mcp = FastMCP(middleware=[AuthMiddleware(auth=deny_blocked_tool)])
@mcp.tool
def blocked_tool() -> str:
return "blocked"
@mcp.tool
def allowed_tool() -> str:
return "allowed"
result = await mcp._list_tools_mcp(mcp_types.ListToolsRequest())
assert [tool.name for tool in result.tools] == ["allowed_tool"]
async def test_middleware_skips_resource_on_authorization_error(self):
def deny_blocked_resource(ctx: AuthContext) -> bool:
if ctx.component.name == "blocked_resource":
raise AuthorizationError(f"deny {ctx.component.name}")
return True
mcp = FastMCP(middleware=[AuthMiddleware(auth=deny_blocked_resource)])
@mcp.resource("resource://blocked")
def blocked_resource() -> str:
return "blocked"
@mcp.resource("resource://allowed")
def allowed_resource() -> str:
return "allowed"
result = await mcp._list_resources_mcp(mcp_types.ListResourcesRequest())
assert [str(resource.uri) for resource in result.resources] == [
"resource://allowed"
]
async def test_middleware_skips_resource_template_on_authorization_error(self):
def deny_blocked_resource_template(ctx: AuthContext) -> bool:
if ctx.component.name == "blocked_resource_template":
raise AuthorizationError(f"deny {ctx.component.name}")
return True
mcp = FastMCP(middleware=[AuthMiddleware(auth=deny_blocked_resource_template)])
@mcp.resource("resource://blocked/{item}")
def blocked_resource_template(item: str) -> str:
return item
@mcp.resource("resource://allowed/{item}")
def allowed_resource_template(item: str) -> str:
return item
result = await mcp._list_resource_templates_mcp(
mcp_types.ListResourceTemplatesRequest()
)
assert [template.uriTemplate for template in result.resourceTemplates] == [
"resource://allowed/{item}"
]
async def test_middleware_skips_prompt_on_authorization_error(self):
def deny_blocked_prompt(ctx: AuthContext) -> bool:
if ctx.component.name == "blocked_prompt":
raise AuthorizationError(f"deny {ctx.component.name}")
return True
mcp = FastMCP(middleware=[AuthMiddleware(auth=deny_blocked_prompt)])
@mcp.prompt
def blocked_prompt() -> str:
return "blocked"
@mcp.prompt
def allowed_prompt() -> str:
return "allowed"
result = await mcp._list_prompts_mcp(mcp_types.ListPromptsRequest())
assert [prompt.name for prompt in result.prompts] == ["allowed_prompt"]
# =============================================================================
# Integration tests with Client
# =============================================================================
class TestAuthIntegration:
async def test_client_only_sees_authorized_tools(self):
mcp = FastMCP()
@mcp.tool
def public_tool() -> str:
return "public"
@mcp.tool(auth=require_scopes("test"))
def protected_tool() -> str:
return "protected"
async with Client(mcp) as client:
# No token - only public tool visible
tools = await client.list_tools()
assert len(tools) == 1
assert tools[0].name == "public_tool"
async def test_client_with_token_sees_all_authorized_tools(self):
mcp = FastMCP()
@mcp.tool
def public_tool() -> str:
return "public"
@mcp.tool(auth=require_scopes("test"))
def protected_tool() -> str:
return "protected"
# Set token before creating client
token = make_token(scopes=["test"])
tok = set_token(token)
try:
async with Client(mcp) as client:
tools = await client.list_tools()
tool_names = [t.name for t in tools]
# With token, both tools should be visible
assert "public_tool" in tool_names
assert "protected_tool" in tool_names
finally:
auth_context_var.reset(tok)
# =============================================================================
# Integration tests with async auth checks
# =============================================================================
class TestAsyncAuthIntegration:
async def test_async_auth_check_filters_tool_listing(self):
"""Async auth checks should work for filtering tool lists."""
mcp = FastMCP()
async def check_claims(ctx: AuthContext) -> bool:
return ctx.token is not None and ctx.token.claims.get("role") == "admin"
@mcp.tool(auth=check_claims)
def admin_tool() -> str:
return "admin"
@mcp.tool
def public_tool() -> str:
return "public"
# Without token, only public tool visible
tools = await mcp.list_tools()
assert len(tools) == 1
assert tools[0].name == "public_tool"
# With correct claims, both visible
token = make_token()
token.claims = {"role": "admin"}
tok = set_token(token)
try:
tools = await mcp.list_tools()
assert len(tools) == 2
finally:
auth_context_var.reset(tok)
async def test_async_auth_check_on_tool_call(self):
"""Async auth checks should work for tool execution via client."""
mcp = FastMCP()
async def check_claims(ctx: AuthContext) -> bool:
return ctx.token is not None and ctx.token.claims.get("role") == "admin"
@mcp.tool(auth=check_claims)
def admin_tool() -> str:
return "secret"
token = make_token()
token.claims = {"role": "admin"}
tok = set_token(token)
try:
async with Client(mcp) as client:
result = await client.call_tool("admin_tool", {})
assert result.content[0].text == "secret"
finally:
auth_context_var.reset(tok)
async def test_async_auth_middleware(self):
"""Async auth checks should work with AuthMiddleware."""
async def async_scope_check(ctx: AuthContext) -> bool:
return ctx.token is not None and "api" in ctx.token.scopes
mcp = FastMCP(middleware=[AuthMiddleware(auth=async_scope_check)])
@mcp.tool
def api_tool() -> str:
return "api"
# Without token, tool is hidden
result = await mcp._list_tools_mcp(__import__("mcp").types.ListToolsRequest())
assert len(result.tools) == 0
# With token containing "api" scope, tool is visible
token = make_token(scopes=["api"])
tok = set_token(token)
try:
result = await mcp._list_tools_mcp(
__import__("mcp").types.ListToolsRequest()
)
assert len(result.tools) == 1
finally:
auth_context_var.reset(tok)
# =============================================================================
# Tests for transformed tools preserving auth
# =============================================================================
class TestTransformedToolAuth:
async def test_transformed_tool_preserves_auth(self):
"""Transformed tools should inherit auth from parent."""
from fastmcp.tools.tool_transform import TransformedTool
mcp = FastMCP()
@mcp.tool(auth=require_scopes("test"))
def protected_tool(x: int) -> str:
return str(x)
# Get the tool and transform it
tools = await mcp._local_provider.list_tools()
original_tool = tools[0]
assert original_tool.auth is not None
# Transform the tool
transformed = TransformedTool.from_tool(
original_tool,
name="transformed_protected",
)
# Auth should be preserved
assert transformed.auth is not None
assert transformed.auth == original_tool.auth
async def test_transformed_tool_filtered_without_token(self):
"""Transformed tools with auth should be filtered without token."""
from fastmcp.tools.tool_transform import ToolTransformConfig
mcp = FastMCP()
@mcp.tool(auth=require_scopes("test"))
def protected_tool(x: int) -> str:
return str(x)
# Add transformation
mcp.add_tool_transformation(
"protected_tool", ToolTransformConfig(name="renamed_protected")
)
# Without token, transformed tool should not be visible
tools = await mcp.list_tools()
assert len(tools) == 0
async def test_transformed_tool_visible_with_token(self):
"""Transformed tools with auth should be visible with token."""
from fastmcp.tools.tool_transform import ToolTransformConfig
mcp = FastMCP()
@mcp.tool(auth=require_scopes("test"))
def protected_tool(x: int) -> str:
return str(x)
# Add transformation
mcp.add_tool_transformation(
"protected_tool", ToolTransformConfig(name="renamed_protected")
)
# With token, transformed tool should be visible
token = make_token(scopes=["test"])
tok = set_token(token)
try:
tools = await mcp.list_tools()
assert len(tools) == 1
assert tools[0].name == "renamed_protected"
finally:
auth_context_var.reset(tok)
# =============================================================================
# Tests for AuthMiddleware on_call_tool enforcement
# =============================================================================
class TestAuthMiddlewareCallTool:
async def test_middleware_blocks_call_without_auth(self):
"""AuthMiddleware should raise AuthorizationError on unauthorized call."""
mcp = FastMCP(middleware=[AuthMiddleware(auth=require_scopes("test"))])
@mcp.tool
def my_tool() -> str:
return "result"
# Without token, calling the tool should raise AuthorizationError
async with Client(mcp) as client:
with pytest.raises(Exception) as exc_info:
await client.call_tool("my_tool", {})
# The error message should indicate authorization failure
assert (
"authorization" in str(exc_info.value).lower()
or "insufficient" in str(exc_info.value).lower()
)
async def test_middleware_allows_call_with_auth(self):
"""AuthMiddleware should allow tool call with valid token."""
mcp = FastMCP(middleware=[AuthMiddleware(auth=require_scopes("test"))])
@mcp.tool
def my_tool() -> str:
return "result"
# With token, calling the tool should succeed
token = make_token(scopes=["test"])
tok = set_token(token)
try:
async with Client(mcp) as client:
result = await client.call_tool("my_tool", {})
assert result.content[0].text == "result"
finally:
auth_context_var.reset(tok)
async def test_middleware_blocks_call_with_wrong_scope(self):
"""AuthMiddleware should block calls when scope requirements aren't met."""
mcp = FastMCP(middleware=[AuthMiddleware(auth=require_scopes("admin"))])
@mcp.tool
def admin_tool() -> str:
return "admin result"
# With token that lacks admin scope
token = make_token(scopes=["read"])
tok = set_token(token)
try:
async with Client(mcp) as client:
with pytest.raises(Exception) as exc_info:
await client.call_tool("admin_tool", {})
assert (
"authorization" in str(exc_info.value).lower()
or "insufficient" in str(exc_info.value).lower()
)
finally:
auth_context_var.reset(tok)
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/server/auth/test_authorization.py",
"license": "Apache License 2.0",
"lines": 621,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:tests/integration_tests/test_timeout_fix.py | """Test that verifies the timeout fix for issue #2842 and #2845."""
import asyncio
import pytest
from fastmcp import FastMCP
from fastmcp.client import Client
from fastmcp.client.transports import StreamableHttpTransport
from fastmcp.utilities.tests import run_server_async
def create_test_server() -> FastMCP:
"""Create a FastMCP server with a slow tool."""
server = FastMCP("TestServer")
@server.tool
async def slow_tool(duration: int = 6) -> str:
"""A tool that takes some time to complete."""
await asyncio.sleep(duration)
return f"Completed in {duration} seconds"
return server
@pytest.fixture
async def streamable_http_server():
"""Start a test server and return its URL."""
server = create_test_server()
async with run_server_async(server) as url:
yield url
@pytest.mark.integration
@pytest.mark.timeout(15)
async def test_slow_tool_with_http_transport(streamable_http_server: str):
"""Test that tools taking >5 seconds work correctly with HTTP transport.
This test verifies the fix for:
- Issue #2842: Client can't get result after upgrading to 2.14.2
- Issue #2845: Server doesn't return results when tool takes >5 seconds
The root cause was that the httpx client was created without explicit
timeout configuration, defaulting to httpx's 5-second timeout.
"""
async with Client(
transport=StreamableHttpTransport(streamable_http_server)
) as client:
# This should NOT timeout since we fixed the default timeout
result = await client.call_tool("slow_tool", {"duration": 6})
assert result.data == "Completed in 6 seconds"
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/integration_tests/test_timeout_fix.py",
"license": "Apache License 2.0",
"lines": 38,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:src/fastmcp/utilities/version_check.py | """Version checking utilities for FastMCP."""
from __future__ import annotations
import json
import time
from pathlib import Path
import httpx
from packaging.version import Version
from fastmcp.utilities.logging import get_logger
logger = get_logger(__name__)
PYPI_URL = "https://pypi.org/pypi/fastmcp/json"
CACHE_TTL_SECONDS = 60 * 60 * 12 # 12 hours
REQUEST_TIMEOUT_SECONDS = 2.0
def _get_cache_path(include_prereleases: bool = False) -> Path:
"""Get the path to the version cache file."""
import fastmcp
suffix = "_prerelease" if include_prereleases else ""
return fastmcp.settings.home / f"version_cache{suffix}.json"
def _read_cache(include_prereleases: bool = False) -> tuple[str | None, float]:
"""Read cached version info.
Returns:
Tuple of (cached_version, cache_timestamp) or (None, 0) if no cache.
"""
cache_path = _get_cache_path(include_prereleases)
if not cache_path.exists():
return None, 0
try:
data = json.loads(cache_path.read_text())
return data.get("latest_version"), data.get("timestamp", 0)
except (json.JSONDecodeError, OSError):
return None, 0
def _write_cache(latest_version: str, include_prereleases: bool = False) -> None:
"""Write version info to cache."""
cache_path = _get_cache_path(include_prereleases)
try:
cache_path.parent.mkdir(parents=True, exist_ok=True)
cache_path.write_text(
json.dumps({"latest_version": latest_version, "timestamp": time.time()})
)
except OSError:
# Silently ignore cache write failures
pass
def _fetch_latest_version(include_prereleases: bool = False) -> str | None:
"""Fetch the latest version from PyPI.
Args:
include_prereleases: If True, include pre-release versions (alpha, beta, rc).
Returns:
The latest version string, or None if the fetch failed.
"""
try:
response = httpx.get(PYPI_URL, timeout=REQUEST_TIMEOUT_SECONDS)
response.raise_for_status()
data = response.json()
releases = data.get("releases", {})
if not releases:
return None
versions = []
for version_str in releases:
try:
v = Version(version_str)
# Skip prereleases if not requested
if not include_prereleases and v.is_prerelease:
continue
versions.append(v)
except ValueError:
logger.debug(f"Skipping invalid version string: {version_str}")
continue
if not versions:
return None
return str(max(versions))
except (httpx.HTTPError, json.JSONDecodeError, KeyError):
return None
def get_latest_version(include_prereleases: bool = False) -> str | None:
"""Get the latest version of FastMCP from PyPI, using cache when available.
Args:
include_prereleases: If True, include pre-release versions.
Returns:
The latest version string, or None if unavailable.
"""
# Check cache first
cached_version, cache_timestamp = _read_cache(include_prereleases)
if cached_version and (time.time() - cache_timestamp) < CACHE_TTL_SECONDS:
return cached_version
# Fetch from PyPI
latest_version = _fetch_latest_version(include_prereleases)
# Update cache if we got a valid version
if latest_version:
_write_cache(latest_version, include_prereleases)
return latest_version
# Return stale cache if available
return cached_version
def check_for_newer_version() -> str | None:
"""Check if a newer version of FastMCP is available.
Returns:
The latest version string if newer than current, None otherwise.
"""
import fastmcp
setting = fastmcp.settings.check_for_updates
if setting == "off":
return None
include_prereleases = setting == "prerelease"
latest_version = get_latest_version(include_prereleases)
if not latest_version:
return None
try:
current = Version(fastmcp.__version__)
latest = Version(latest_version)
if latest > current:
return latest_version
except ValueError:
logger.debug(
f"Could not compare versions: current={fastmcp.__version__!r}, "
f"latest={latest_version!r}"
)
return None
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "src/fastmcp/utilities/version_check.py",
"license": "Apache License 2.0",
"lines": 114,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/fastmcp:tests/utilities/test_version_check.py | """Tests for version checking utilities."""
import json
import time
from pathlib import Path
from unittest.mock import MagicMock, patch
import httpx
import pytest
from fastmcp.utilities.version_check import (
CACHE_TTL_SECONDS,
_fetch_latest_version,
_get_cache_path,
_read_cache,
_write_cache,
check_for_newer_version,
get_latest_version,
)
class TestCachePath:
def test_cache_path_in_home_directory(self):
"""Cache file should be in fastmcp home directory."""
cache_path = _get_cache_path()
assert cache_path.name == "version_cache.json"
assert "fastmcp" in str(cache_path).lower()
def test_cache_path_prerelease_suffix(self):
"""Prerelease cache uses different file."""
cache_path = _get_cache_path(include_prereleases=True)
assert cache_path.name == "version_cache_prerelease.json"
class TestReadCache:
def test_read_cache_no_file(self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch):
"""Reading non-existent cache returns None."""
monkeypatch.setattr(
"fastmcp.utilities.version_check._get_cache_path",
lambda include_prereleases=False: tmp_path / "nonexistent.json",
)
version, timestamp = _read_cache()
assert version is None
assert timestamp == 0
def test_read_cache_valid(self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch):
"""Reading valid cache returns version and timestamp."""
cache_file = tmp_path / "version_cache.json"
cache_file.write_text(
json.dumps({"latest_version": "2.5.0", "timestamp": 1000})
)
monkeypatch.setattr(
"fastmcp.utilities.version_check._get_cache_path",
lambda include_prereleases=False: cache_file,
)
version, timestamp = _read_cache()
assert version == "2.5.0"
assert timestamp == 1000
def test_read_cache_invalid_json(
self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch
):
"""Reading invalid JSON returns None."""
cache_file = tmp_path / "version_cache.json"
cache_file.write_text("not valid json")
monkeypatch.setattr(
"fastmcp.utilities.version_check._get_cache_path",
lambda include_prereleases=False: cache_file,
)
version, timestamp = _read_cache()
assert version is None
assert timestamp == 0
class TestWriteCache:
def test_write_cache_creates_file(
self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch
):
"""Writing cache creates the cache file."""
cache_file = tmp_path / "subdir" / "version_cache.json"
monkeypatch.setattr(
"fastmcp.utilities.version_check._get_cache_path",
lambda include_prereleases=False: cache_file,
)
_write_cache("2.6.0")
assert cache_file.exists()
data = json.loads(cache_file.read_text())
assert data["latest_version"] == "2.6.0"
assert "timestamp" in data
class TestFetchLatestVersion:
def test_fetch_success(self):
"""Successful fetch returns highest stable version."""
mock_response = MagicMock()
mock_response.json.return_value = {
"releases": {
"2.5.0": [],
"2.4.0": [],
"2.6.0b1": [], # prerelease should be skipped
}
}
with patch("httpx.get", return_value=mock_response) as mock_get:
version = _fetch_latest_version()
assert version == "2.5.0"
mock_get.assert_called_once()
def test_fetch_network_error(self):
"""Network error returns None."""
with patch("httpx.get", side_effect=httpx.HTTPError("Network error")):
version = _fetch_latest_version()
assert version is None
def test_fetch_invalid_response(self):
"""Invalid response returns None."""
mock_response = MagicMock()
mock_response.json.return_value = {"unexpected": "format"}
with patch("httpx.get", return_value=mock_response):
version = _fetch_latest_version()
assert version is None
def test_fetch_prereleases(self):
"""Fetching with prereleases returns highest version."""
mock_response = MagicMock()
mock_response.json.return_value = {
"info": {"version": "2.5.0"},
"releases": {
"2.5.0": [],
"2.6.0b1": [],
"2.6.0b2": [],
"2.4.0": [],
},
}
with patch("httpx.get", return_value=mock_response):
version = _fetch_latest_version(include_prereleases=True)
assert version == "2.6.0b2"
def test_fetch_prereleases_stable_is_highest(self):
"""Prerelease mode still returns stable if it's highest."""
mock_response = MagicMock()
mock_response.json.return_value = {
"info": {"version": "2.5.0"},
"releases": {
"2.5.0": [],
"2.5.0b1": [],
"2.4.0": [],
},
}
with patch("httpx.get", return_value=mock_response):
version = _fetch_latest_version(include_prereleases=True)
assert version == "2.5.0"
class TestGetLatestVersion:
def test_returns_cached_version_if_fresh(
self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch
):
"""Uses cached version if cache is fresh."""
cache_file = tmp_path / "version_cache.json"
cache_file.write_text(
json.dumps({"latest_version": "2.5.0", "timestamp": time.time()})
)
monkeypatch.setattr(
"fastmcp.utilities.version_check._get_cache_path",
lambda include_prereleases=False: cache_file,
)
with patch(
"fastmcp.utilities.version_check._fetch_latest_version"
) as mock_fetch:
version = get_latest_version()
assert version == "2.5.0"
mock_fetch.assert_not_called()
def test_fetches_if_cache_stale(
self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch
):
"""Fetches from PyPI if cache is stale."""
cache_file = tmp_path / "version_cache.json"
old_timestamp = time.time() - CACHE_TTL_SECONDS - 100
cache_file.write_text(
json.dumps({"latest_version": "2.4.0", "timestamp": old_timestamp})
)
monkeypatch.setattr(
"fastmcp.utilities.version_check._get_cache_path",
lambda include_prereleases=False: cache_file,
)
with patch(
"fastmcp.utilities.version_check._fetch_latest_version",
return_value="2.5.0",
) as mock_fetch:
version = get_latest_version()
assert version == "2.5.0"
mock_fetch.assert_called_once()
def test_returns_stale_cache_if_fetch_fails(
self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch
):
"""Returns stale cache if fetch fails."""
cache_file = tmp_path / "version_cache.json"
old_timestamp = time.time() - CACHE_TTL_SECONDS - 100
cache_file.write_text(
json.dumps({"latest_version": "2.4.0", "timestamp": old_timestamp})
)
monkeypatch.setattr(
"fastmcp.utilities.version_check._get_cache_path",
lambda include_prereleases=False: cache_file,
)
with patch(
"fastmcp.utilities.version_check._fetch_latest_version", return_value=None
):
version = get_latest_version()
assert version == "2.4.0"
class TestCheckForNewerVersion:
def test_returns_none_if_disabled(self, monkeypatch: pytest.MonkeyPatch):
"""Returns None if check_for_updates is off."""
import fastmcp
monkeypatch.setattr(fastmcp.settings, "check_for_updates", "off")
result = check_for_newer_version()
assert result is None
def test_returns_none_if_current(self, monkeypatch: pytest.MonkeyPatch):
"""Returns None if current version is latest."""
import fastmcp
monkeypatch.setattr(fastmcp.settings, "check_for_updates", "stable")
monkeypatch.setattr(fastmcp, "__version__", "2.5.0")
with patch(
"fastmcp.utilities.version_check.get_latest_version", return_value="2.5.0"
):
result = check_for_newer_version()
assert result is None
def test_returns_version_if_newer(self, monkeypatch: pytest.MonkeyPatch):
"""Returns new version if available."""
import fastmcp
monkeypatch.setattr(fastmcp.settings, "check_for_updates", "stable")
monkeypatch.setattr(fastmcp, "__version__", "2.4.0")
with patch(
"fastmcp.utilities.version_check.get_latest_version", return_value="2.5.0"
):
result = check_for_newer_version()
assert result == "2.5.0"
def test_returns_none_if_older_available(self, monkeypatch: pytest.MonkeyPatch):
"""Returns None if pypi version is older than current (dev version)."""
import fastmcp
monkeypatch.setattr(fastmcp.settings, "check_for_updates", "stable")
monkeypatch.setattr(fastmcp, "__version__", "3.0.0.dev1")
with patch(
"fastmcp.utilities.version_check.get_latest_version", return_value="2.5.0"
):
result = check_for_newer_version()
assert result is None
def test_handles_invalid_versions(self, monkeypatch: pytest.MonkeyPatch):
"""Handles invalid version strings gracefully."""
import fastmcp
monkeypatch.setattr(fastmcp.settings, "check_for_updates", "stable")
monkeypatch.setattr(fastmcp, "__version__", "invalid")
with patch(
"fastmcp.utilities.version_check.get_latest_version",
return_value="also-invalid",
):
result = check_for_newer_version()
assert result is None
def test_prerelease_setting(self, monkeypatch: pytest.MonkeyPatch):
"""Prerelease setting passes include_prereleases=True."""
import fastmcp
monkeypatch.setattr(fastmcp.settings, "check_for_updates", "prerelease")
monkeypatch.setattr(fastmcp, "__version__", "2.5.0")
with patch(
"fastmcp.utilities.version_check.get_latest_version", return_value="2.6.0b1"
) as mock_get:
result = check_for_newer_version()
assert result == "2.6.0b1"
mock_get.assert_called_once_with(True)
def test_stable_setting(self, monkeypatch: pytest.MonkeyPatch):
"""Stable setting passes include_prereleases=False."""
import fastmcp
monkeypatch.setattr(fastmcp.settings, "check_for_updates", "stable")
monkeypatch.setattr(fastmcp, "__version__", "2.4.0")
with patch(
"fastmcp.utilities.version_check.get_latest_version", return_value="2.5.0"
) as mock_get:
result = check_for_newer_version()
assert result == "2.5.0"
mock_get.assert_called_once_with(False)
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/utilities/test_version_check.py",
"license": "Apache License 2.0",
"lines": 260,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:src/fastmcp/server/middleware/ping.py | """Ping middleware for keeping client connections alive."""
from typing import Any
import anyio
from .middleware import CallNext, Middleware, MiddlewareContext
class PingMiddleware(Middleware):
"""Middleware that sends periodic pings to keep client connections alive.
Starts a background ping task on first message from each session. The task
sends server-to-client pings at the configured interval until the session
ends.
Example:
```python
from fastmcp import FastMCP
from fastmcp.server.middleware import PingMiddleware
mcp = FastMCP("MyServer")
mcp.add_middleware(PingMiddleware(interval_ms=5000))
```
"""
def __init__(self, interval_ms: int = 30000):
"""Initialize ping middleware.
Args:
interval_ms: Interval between pings in milliseconds (default: 30000)
Raises:
ValueError: If interval_ms is not positive
"""
if interval_ms <= 0:
raise ValueError("interval_ms must be positive")
self.interval_ms = interval_ms
self._active_sessions: set[int] = set()
self._lock = anyio.Lock()
async def on_message(self, context: MiddlewareContext, call_next: CallNext) -> Any:
"""Start ping task on first message from a session."""
if (
context.fastmcp_context is None
or context.fastmcp_context.request_context is None
):
return await call_next(context)
session = context.fastmcp_context.session
session_id = id(session)
async with self._lock:
if session_id not in self._active_sessions:
# _subscription_task_group is added by MiddlewareServerSession
tg = session._subscription_task_group # type: ignore[attr-defined]
if tg is not None:
self._active_sessions.add(session_id)
tg.start_soon(self._ping_loop, session, session_id)
return await call_next(context)
async def _ping_loop(self, session: Any, session_id: int) -> None:
"""Send periodic pings until session ends."""
try:
while True:
await anyio.sleep(self.interval_ms / 1000)
await session.send_ping()
finally:
self._active_sessions.discard(session_id)
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "src/fastmcp/server/middleware/ping.py",
"license": "Apache License 2.0",
"lines": 54,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
PrefectHQ/fastmcp:tests/server/middleware/test_ping.py | """Tests for ping middleware."""
from unittest.mock import AsyncMock, MagicMock
import anyio
import pytest
from fastmcp import FastMCP
from fastmcp.client import Client
from fastmcp.server.middleware.ping import PingMiddleware
class TestPingMiddlewareInit:
"""Test PingMiddleware initialization."""
def test_init_default(self):
"""Test default initialization."""
middleware = PingMiddleware()
assert middleware.interval_ms == 30000
assert middleware._active_sessions == set()
def test_init_custom(self):
"""Test custom interval initialization."""
middleware = PingMiddleware(interval_ms=5000)
assert middleware.interval_ms == 5000
def test_init_invalid_interval_zero(self):
"""Test that zero interval raises ValueError."""
with pytest.raises(ValueError, match="interval_ms must be positive"):
PingMiddleware(interval_ms=0)
def test_init_invalid_interval_negative(self):
"""Test that negative interval raises ValueError."""
with pytest.raises(ValueError, match="interval_ms must be positive"):
PingMiddleware(interval_ms=-1000)
class TestPingMiddlewareOnMessage:
"""Test on_message hook behavior."""
async def test_starts_ping_task_on_first_message(self):
"""Test that ping task is started on first message from a session."""
middleware = PingMiddleware(interval_ms=1000)
mock_session = MagicMock()
mock_session._subscription_task_group = MagicMock()
mock_session._subscription_task_group.start_soon = MagicMock()
mock_context = MagicMock()
mock_context.fastmcp_context.session = mock_session
mock_call_next = AsyncMock(return_value="result")
result = await middleware.on_message(mock_context, mock_call_next)
assert result == "result"
assert id(mock_session) in middleware._active_sessions
mock_session._subscription_task_group.start_soon.assert_called_once()
async def test_does_not_start_duplicate_task(self):
"""Test that duplicate messages from same session don't spawn duplicate tasks."""
middleware = PingMiddleware(interval_ms=1000)
mock_session = MagicMock()
mock_session._subscription_task_group = MagicMock()
mock_session._subscription_task_group.start_soon = MagicMock()
mock_context = MagicMock()
mock_context.fastmcp_context.session = mock_session
mock_call_next = AsyncMock(return_value="result")
# First message
await middleware.on_message(mock_context, mock_call_next)
# Second message from same session
await middleware.on_message(mock_context, mock_call_next)
# Third message from same session
await middleware.on_message(mock_context, mock_call_next)
# Should only start task once
assert mock_session._subscription_task_group.start_soon.call_count == 1
async def test_starts_separate_task_per_session(self):
"""Test that different sessions get separate ping tasks."""
middleware = PingMiddleware(interval_ms=1000)
mock_session1 = MagicMock()
mock_session1._subscription_task_group = MagicMock()
mock_session1._subscription_task_group.start_soon = MagicMock()
mock_session2 = MagicMock()
mock_session2._subscription_task_group = MagicMock()
mock_session2._subscription_task_group.start_soon = MagicMock()
mock_context1 = MagicMock()
mock_context1.fastmcp_context.session = mock_session1
mock_context2 = MagicMock()
mock_context2.fastmcp_context.session = mock_session2
mock_call_next = AsyncMock(return_value="result")
await middleware.on_message(mock_context1, mock_call_next)
await middleware.on_message(mock_context2, mock_call_next)
mock_session1._subscription_task_group.start_soon.assert_called_once()
mock_session2._subscription_task_group.start_soon.assert_called_once()
assert len(middleware._active_sessions) == 2
async def test_skips_task_when_no_task_group(self):
"""Test graceful handling when session has no task group."""
middleware = PingMiddleware(interval_ms=1000)
mock_session = MagicMock()
mock_session._subscription_task_group = None
mock_context = MagicMock()
mock_context.fastmcp_context.session = mock_session
mock_call_next = AsyncMock(return_value="result")
result = await middleware.on_message(mock_context, mock_call_next)
assert result == "result"
# Session should NOT be added if task group is None
assert id(mock_session) not in middleware._active_sessions
async def test_skips_when_fastmcp_context_is_none(self):
"""Test that middleware passes through when fastmcp_context is None."""
middleware = PingMiddleware(interval_ms=1000)
mock_context = MagicMock()
mock_context.fastmcp_context = None
mock_call_next = AsyncMock(return_value="result")
result = await middleware.on_message(mock_context, mock_call_next)
assert result == "result"
assert len(middleware._active_sessions) == 0
async def test_skips_when_request_context_is_none(self):
"""Test that middleware passes through when request_context is None."""
middleware = PingMiddleware(interval_ms=1000)
mock_context = MagicMock()
mock_context.fastmcp_context = MagicMock()
mock_context.fastmcp_context.request_context = None
mock_call_next = AsyncMock(return_value="result")
result = await middleware.on_message(mock_context, mock_call_next)
assert result == "result"
assert len(middleware._active_sessions) == 0
class TestPingLoop:
"""Test the ping loop behavior."""
async def test_ping_loop_sends_pings_at_interval(self):
"""Test that ping loop sends pings at configured interval."""
middleware = PingMiddleware(interval_ms=50)
mock_session = MagicMock()
mock_session.send_ping = AsyncMock()
session_id = id(mock_session)
middleware._active_sessions.add(session_id)
# Run ping loop for a short time then cancel
with anyio.move_on_after(0.35):
await middleware._ping_loop(mock_session, session_id)
# Should have sent at least 2 pings in 350ms with 50ms interval
assert mock_session.send_ping.call_count >= 2
async def test_ping_loop_cleans_up_on_cancellation(self):
"""Test that session is removed from active sessions on cancellation."""
middleware = PingMiddleware(interval_ms=50)
mock_session = MagicMock()
mock_session.send_ping = AsyncMock()
session_id = 12345
middleware._active_sessions.add(session_id)
# Run and cancel the ping loop
with anyio.move_on_after(0.1):
await middleware._ping_loop(mock_session, session_id)
# Session should be cleaned up after cancellation
assert session_id not in middleware._active_sessions
class TestPingMiddlewareIntegration:
"""Integration tests for PingMiddleware with real FastMCP server."""
async def test_ping_middleware_registers_session(self):
"""Test that PingMiddleware registers sessions on first request."""
mcp = FastMCP("PingTestServer")
middleware = PingMiddleware(interval_ms=50)
mcp.add_middleware(middleware)
@mcp.tool
def hello() -> str:
return "Hello!"
assert len(middleware._active_sessions) == 0
async with Client(mcp) as client:
result = await client.call_tool("hello")
assert result.content[0].text == "Hello!"
# Should have registered the session
assert len(middleware._active_sessions) == 1
# Make another request - should not add duplicate
await client.call_tool("hello")
assert len(middleware._active_sessions) == 1
async def test_ping_task_cancelled_on_disconnect(self):
"""Test that ping task is properly cancelled when client disconnects."""
mcp = FastMCP("PingTestServer")
middleware = PingMiddleware(interval_ms=50)
mcp.add_middleware(middleware)
@mcp.tool
def hello() -> str:
return "Hello!"
async with Client(mcp) as client:
await client.call_tool("hello")
# Should have one active session
assert len(middleware._active_sessions) == 1
# After disconnect, give a moment for cleanup
await anyio.sleep(0.01)
# Session should be cleaned up
assert len(middleware._active_sessions) == 0
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/server/middleware/test_ping.py",
"license": "Apache License 2.0",
"lines": 169,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:tests/prompts/test_standalone_decorator.py | """Tests for the standalone @prompt decorator.
The @prompt decorator attaches metadata to functions without registering them
to a server. Functions can be added explicitly via server.add_prompt() or
discovered by FileSystemProvider.
"""
from typing import cast
import pytest
from fastmcp import FastMCP
from fastmcp.client import Client
from fastmcp.prompts import prompt
from fastmcp.prompts.function_prompt import DecoratedPrompt, PromptMeta
class TestPromptDecorator:
"""Tests for the @prompt decorator."""
def test_prompt_without_parens(self):
"""@prompt without parentheses should attach metadata."""
@prompt
def analyze(topic: str) -> str:
return f"Analyze: {topic}"
decorated = cast(DecoratedPrompt, analyze)
assert callable(analyze)
assert hasattr(analyze, "__fastmcp__")
assert isinstance(decorated.__fastmcp__, PromptMeta)
assert decorated.__fastmcp__.name is None # Uses function name by default
def test_prompt_with_empty_parens(self):
"""@prompt() with empty parentheses should attach metadata."""
@prompt()
def analyze(topic: str) -> str:
return f"Analyze: {topic}"
decorated = cast(DecoratedPrompt, analyze)
assert callable(analyze)
assert hasattr(analyze, "__fastmcp__")
assert isinstance(decorated.__fastmcp__, PromptMeta)
def test_prompt_with_name_arg(self):
"""@prompt("name") with name as first arg should work."""
@prompt("custom-analyze")
def analyze(topic: str) -> str:
return f"Analyze: {topic}"
decorated = cast(DecoratedPrompt, analyze)
assert callable(analyze)
assert hasattr(analyze, "__fastmcp__")
assert decorated.__fastmcp__.name == "custom-analyze"
def test_prompt_with_name_kwarg(self):
"""@prompt(name="name") with keyword arg should work."""
@prompt(name="custom-analyze")
def analyze(topic: str) -> str:
return f"Analyze: {topic}"
decorated = cast(DecoratedPrompt, analyze)
assert callable(analyze)
assert hasattr(analyze, "__fastmcp__")
assert decorated.__fastmcp__.name == "custom-analyze"
def test_prompt_with_all_metadata(self):
"""@prompt with all metadata should store it all."""
@prompt(
name="custom-analyze",
title="Analysis Prompt",
description="Analyzes topics",
tags={"analysis", "demo"},
meta={"custom": "value"},
)
def analyze(topic: str) -> str:
return f"Analyze: {topic}"
decorated = cast(DecoratedPrompt, analyze)
assert callable(analyze)
assert hasattr(analyze, "__fastmcp__")
assert decorated.__fastmcp__.name == "custom-analyze"
assert decorated.__fastmcp__.title == "Analysis Prompt"
assert decorated.__fastmcp__.description == "Analyzes topics"
assert decorated.__fastmcp__.tags == {"analysis", "demo"}
assert decorated.__fastmcp__.meta == {"custom": "value"}
async def test_prompt_function_still_callable(self):
"""Decorated function should still be directly callable."""
@prompt
def analyze(topic: str) -> str:
"""Analyze a topic."""
return f"Please analyze: {topic}"
# The function is still callable even though it has metadata
result = cast(DecoratedPrompt, analyze)("Python")
assert result == "Please analyze: Python"
def test_prompt_rejects_classmethod_decorator(self):
"""@prompt should reject classmethod-decorated functions."""
with pytest.raises(TypeError, match="classmethod"):
class MyClass:
@prompt
@classmethod
def my_prompt(cls, topic: str) -> str:
return f"Analyze: {topic}"
def test_prompt_with_both_name_args_raises(self):
"""@prompt should raise if both positional and keyword name are given."""
with pytest.raises(TypeError, match="Cannot specify.*both.*argument.*keyword"):
@prompt("name1", name="name2") # type: ignore[call-overload]
def my_prompt() -> str:
return "hello"
async def test_prompt_added_to_server(self):
"""Prompt created by @prompt should work when added to a server."""
@prompt
def analyze(topic: str) -> str:
"""Analyze a topic."""
return f"Please analyze: {topic}"
mcp = FastMCP("Test")
mcp.add_prompt(analyze)
async with Client(mcp) as client:
prompts = await client.list_prompts()
assert any(p.name == "analyze" for p in prompts)
result = await client.get_prompt("analyze", {"topic": "FastMCP"})
assert "FastMCP" in str(result)
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/prompts/test_standalone_decorator.py",
"license": "Apache License 2.0",
"lines": 105,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:tests/resources/test_standalone_decorator.py | """Tests for the standalone @resource decorator.
The @resource decorator attaches metadata to functions without registering them
to a server. Functions can be added explicitly via server.add_resource() /
server.add_template() or discovered by FileSystemProvider.
"""
from typing import cast
import pytest
from fastmcp import FastMCP
from fastmcp.client import Client
from fastmcp.resources import resource
from fastmcp.resources.function_resource import DecoratedResource, ResourceMeta
class TestResourceDecorator:
"""Tests for the @resource decorator."""
def test_resource_requires_uri(self):
"""@resource should require a URI argument."""
with pytest.raises(TypeError, match="requires a URI|was used incorrectly"):
@resource # type: ignore[arg-type]
def get_config() -> str:
return "{}"
def test_resource_with_uri(self):
"""@resource("uri") should attach metadata."""
@resource("config://app")
def get_config() -> dict:
return {"setting": "value"}
decorated = cast(DecoratedResource, get_config)
assert callable(get_config)
assert hasattr(get_config, "__fastmcp__")
assert isinstance(decorated.__fastmcp__, ResourceMeta)
assert decorated.__fastmcp__.uri == "config://app"
def test_resource_with_template_uri(self):
"""@resource with template URI should attach metadata."""
@resource("users://{user_id}/profile")
def get_profile(user_id: str) -> dict:
return {"id": user_id}
decorated = cast(DecoratedResource, get_profile)
assert callable(get_profile)
assert hasattr(get_profile, "__fastmcp__")
assert decorated.__fastmcp__.uri == "users://{user_id}/profile"
def test_resource_with_function_params_becomes_template(self):
"""@resource with function params should attach metadata."""
@resource("data://items/{category}")
def get_items(category: str, limit: int = 10) -> list:
return list(range(limit))
decorated = cast(DecoratedResource, get_items)
assert callable(get_items)
assert hasattr(get_items, "__fastmcp__")
assert decorated.__fastmcp__.uri == "data://items/{category}"
def test_resource_with_all_metadata(self):
"""@resource with all metadata should store it all."""
@resource(
"config://app",
name="app-config",
title="Application Config",
description="Gets app configuration",
mime_type="application/json",
tags={"config"},
meta={"custom": "value"},
)
def get_config() -> dict:
return {"setting": "value"}
decorated = cast(DecoratedResource, get_config)
assert callable(get_config)
assert hasattr(get_config, "__fastmcp__")
assert decorated.__fastmcp__.uri == "config://app"
assert decorated.__fastmcp__.name == "app-config"
assert decorated.__fastmcp__.title == "Application Config"
assert decorated.__fastmcp__.description == "Gets app configuration"
assert decorated.__fastmcp__.mime_type == "application/json"
assert decorated.__fastmcp__.tags == {"config"}
assert decorated.__fastmcp__.meta == {"custom": "value"}
async def test_resource_function_still_callable(self):
"""Decorated function should still be directly callable."""
@resource("config://app")
def get_config() -> dict:
"""Get config."""
return {"setting": "value"}
# The function is still callable even though it has metadata
result = cast(DecoratedResource, get_config)()
assert result == {"setting": "value"}
def test_resource_rejects_classmethod_decorator(self):
"""@resource should reject classmethod-decorated functions."""
# Note: This now happens when added to server, not at decoration time
@resource("config://app")
def standalone() -> str:
return "{}"
# Should not raise at decoration
assert callable(standalone)
async def test_resource_added_to_server(self):
"""Resource created by @resource should work when added to a server."""
@resource("config://app")
def get_config() -> str:
"""Get config."""
return '{"version": "1.0"}'
assert callable(get_config)
mcp = FastMCP("Test")
mcp.add_resource(get_config)
async with Client(mcp) as client:
resources = await client.list_resources()
assert any(str(r.uri) == "config://app" for r in resources)
result = await client.read_resource("config://app")
assert "1.0" in str(result)
async def test_template_added_to_server(self):
"""Template created by @resource should work when added to a server."""
@resource("users://{user_id}/profile")
def get_profile(user_id: str) -> str:
"""Get user profile."""
return f'{{"id": "{user_id}"}}'
assert callable(get_profile)
mcp = FastMCP("Test")
# add_resource handles both resources and templates based on metadata
mcp.add_resource(get_profile)
async with Client(mcp) as client:
templates = await client.list_resource_templates()
assert any(t.uriTemplate == "users://{user_id}/profile" for t in templates)
result = await client.read_resource("users://123/profile")
assert "123" in str(result)
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/resources/test_standalone_decorator.py",
"license": "Apache License 2.0",
"lines": 116,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:tests/tools/test_standalone_decorator.py | """Tests for the standalone @tool decorator.
The @tool decorator attaches metadata to functions without registering them
to a server. Functions can be added explicitly via server.add_tool() or
discovered by FileSystemProvider.
"""
from typing import cast
import pytest
from fastmcp import FastMCP
from fastmcp.client import Client
from fastmcp.tools import tool
from fastmcp.tools.function_tool import DecoratedTool, ToolMeta
class TestToolDecorator:
"""Tests for the @tool decorator."""
def test_tool_without_parens(self):
"""@tool without parentheses should attach metadata."""
@tool
def greet(name: str) -> str:
return f"Hello, {name}!"
decorated = cast(DecoratedTool, greet)
assert callable(greet)
assert hasattr(greet, "__fastmcp__")
assert isinstance(decorated.__fastmcp__, ToolMeta)
assert decorated.__fastmcp__.name is None # Uses function name by default
def test_tool_with_empty_parens(self):
"""@tool() with empty parentheses should attach metadata."""
@tool()
def greet(name: str) -> str:
return f"Hello, {name}!"
decorated = cast(DecoratedTool, greet)
assert callable(greet)
assert hasattr(greet, "__fastmcp__")
assert isinstance(decorated.__fastmcp__, ToolMeta)
def test_tool_with_name_arg(self):
"""@tool("name") with name as first arg should work."""
@tool("custom-greet")
def greet(name: str) -> str:
return f"Hello, {name}!"
decorated = cast(DecoratedTool, greet)
assert callable(greet)
assert hasattr(greet, "__fastmcp__")
assert decorated.__fastmcp__.name == "custom-greet"
def test_tool_with_name_kwarg(self):
"""@tool(name="name") with keyword arg should work."""
@tool(name="custom-greet")
def greet(name: str) -> str:
return f"Hello, {name}!"
decorated = cast(DecoratedTool, greet)
assert callable(greet)
assert hasattr(greet, "__fastmcp__")
assert decorated.__fastmcp__.name == "custom-greet"
def test_tool_with_all_metadata(self):
"""@tool with all metadata should store it all."""
@tool(
name="custom-greet",
title="Greeting Tool",
description="Greets people",
tags={"greeting", "demo"},
meta={"custom": "value"},
)
def greet(name: str) -> str:
return f"Hello, {name}!"
decorated = cast(DecoratedTool, greet)
assert callable(greet)
assert hasattr(greet, "__fastmcp__")
assert decorated.__fastmcp__.name == "custom-greet"
assert decorated.__fastmcp__.title == "Greeting Tool"
assert decorated.__fastmcp__.description == "Greets people"
assert decorated.__fastmcp__.tags == {"greeting", "demo"}
assert decorated.__fastmcp__.meta == {"custom": "value"}
async def test_tool_function_still_callable(self):
"""Decorated function should still be directly callable."""
@tool
def greet(name: str) -> str:
"""Greet someone."""
return f"Hello, {name}!"
# The function is still callable even though it has metadata
result = cast(DecoratedTool, greet)("World")
assert result == "Hello, World!"
def test_tool_rejects_classmethod_decorator(self):
"""@tool should reject classmethod-decorated functions."""
with pytest.raises(TypeError, match="classmethod"):
class MyClass:
@tool
@classmethod
def my_method(cls) -> str:
return "hello"
def test_tool_with_both_name_args_raises(self):
"""@tool should raise if both positional and keyword name are given."""
with pytest.raises(TypeError, match="Cannot specify.*both.*argument.*keyword"):
@tool("name1", name="name2") # type: ignore[call-overload]
def my_tool() -> str:
return "hello"
async def test_tool_added_to_server(self):
"""Tool created by @tool should work when added to a server."""
@tool
def greet(name: str) -> str:
"""Greet someone."""
return f"Hello, {name}!"
mcp = FastMCP("Test")
mcp.add_tool(greet)
async with Client(mcp) as client:
tools = await client.list_tools()
assert any(t.name == "greet" for t in tools)
result = await client.call_tool("greet", {"name": "World"})
assert result.data == "Hello, World!"
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/tools/test_standalone_decorator.py",
"license": "Apache License 2.0",
"lines": 105,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:src/fastmcp/server/lifespan.py | """Composable lifespans for FastMCP servers.
This module provides a `@lifespan` decorator for creating composable server lifespans
that can be combined using the `|` operator.
Example:
```python
from fastmcp import FastMCP
from fastmcp.server.lifespan import lifespan
@lifespan
async def db_lifespan(server):
conn = await connect_db()
yield {"db": conn}
await conn.close()
@lifespan
async def cache_lifespan(server):
cache = await connect_cache()
yield {"cache": cache}
await cache.close()
mcp = FastMCP("server", lifespan=db_lifespan | cache_lifespan)
```
To compose with existing `@asynccontextmanager` lifespans, wrap them explicitly:
```python
from contextlib import asynccontextmanager
from fastmcp.server.lifespan import lifespan, ContextManagerLifespan
@asynccontextmanager
async def legacy_lifespan(server):
yield {"legacy": True}
@lifespan
async def new_lifespan(server):
yield {"new": True}
# Wrap the legacy lifespan explicitly
combined = ContextManagerLifespan(legacy_lifespan) | new_lifespan
```
"""
from __future__ import annotations
from collections.abc import AsyncIterator, Callable
from contextlib import AbstractAsyncContextManager, asynccontextmanager
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from fastmcp.server.server import FastMCP
LifespanFn = Callable[["FastMCP[Any]"], AsyncIterator[dict[str, Any] | None]]
LifespanContextManagerFn = Callable[
["FastMCP[Any]"], AbstractAsyncContextManager[dict[str, Any] | None]
]
class Lifespan:
"""Composable lifespan wrapper.
Wraps an async generator function and enables composition via the `|` operator.
The wrapped function should yield a dict that becomes part of the lifespan context.
"""
def __init__(self, fn: LifespanFn) -> None:
"""Initialize a Lifespan wrapper.
Args:
fn: An async generator function that takes a FastMCP server and yields
a dict for the lifespan context.
"""
self._fn = fn
@asynccontextmanager
async def __call__(self, server: FastMCP[Any]) -> AsyncIterator[dict[str, Any]]:
"""Execute the lifespan as an async context manager.
Args:
server: The FastMCP server instance.
Yields:
The lifespan context dict.
"""
async with asynccontextmanager(self._fn)(server) as result:
yield result if result is not None else {}
def __or__(self, other: Lifespan) -> ComposedLifespan:
"""Compose with another lifespan using the | operator.
Args:
other: Another Lifespan instance.
Returns:
A ComposedLifespan that runs both lifespans.
Raises:
TypeError: If other is not a Lifespan instance.
"""
if not isinstance(other, Lifespan):
raise TypeError(
f"Cannot compose Lifespan with {type(other).__name__}. "
f"Use @lifespan decorator or wrap with ContextManagerLifespan()."
)
return ComposedLifespan(self, other)
class ContextManagerLifespan(Lifespan):
"""Lifespan wrapper for already-wrapped context manager functions.
Use this for functions already decorated with @asynccontextmanager.
"""
_fn: LifespanContextManagerFn # Override type for this subclass
def __init__(self, fn: LifespanContextManagerFn) -> None:
"""Initialize with a context manager factory function."""
self._fn = fn
@asynccontextmanager
async def __call__(self, server: FastMCP[Any]) -> AsyncIterator[dict[str, Any]]:
"""Execute the lifespan as an async context manager.
Args:
server: The FastMCP server instance.
Yields:
The lifespan context dict.
"""
# self._fn is already a context manager factory, just call it
async with self._fn(server) as result:
yield result if result is not None else {}
class ComposedLifespan(Lifespan):
"""Two lifespans composed together.
Enters the left lifespan first, then the right. Exits in reverse order.
Results are shallow-merged into a single dict.
"""
def __init__(self, left: Lifespan, right: Lifespan) -> None:
"""Initialize a composed lifespan.
Args:
left: The first lifespan to enter.
right: The second lifespan to enter.
"""
# Don't call super().__init__ since we override __call__
self._left = left
self._right = right
@asynccontextmanager
async def __call__(self, server: FastMCP[Any]) -> AsyncIterator[dict[str, Any]]:
"""Execute both lifespans, merging their results.
Args:
server: The FastMCP server instance.
Yields:
The merged lifespan context dict from both lifespans.
"""
async with (
self._left(server) as left_result,
self._right(server) as right_result,
):
yield {**left_result, **right_result}
def lifespan(fn: LifespanFn) -> Lifespan:
"""Decorator to create a composable lifespan.
Use this decorator on an async generator function to make it composable
with other lifespans using the `|` operator.
Example:
```python
@lifespan
async def my_lifespan(server):
# Setup
resource = await create_resource()
yield {"resource": resource}
# Teardown
await resource.close()
mcp = FastMCP("server", lifespan=my_lifespan | other_lifespan)
```
Args:
fn: An async generator function that takes a FastMCP server and yields
a dict for the lifespan context.
Returns:
A composable Lifespan wrapper.
"""
return Lifespan(fn)
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "src/fastmcp/server/lifespan.py",
"license": "Apache License 2.0",
"lines": 148,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
PrefectHQ/fastmcp:src/fastmcp/utilities/lifespan.py | """Lifespan utilities for combining async context manager lifespans."""
from __future__ import annotations
from collections.abc import AsyncIterator, Callable, Mapping
from contextlib import AbstractAsyncContextManager, AsyncExitStack, asynccontextmanager
from typing import Any, TypeVar
AppT = TypeVar("AppT")
def combine_lifespans(
*lifespans: Callable[[AppT], AbstractAsyncContextManager[Mapping[str, Any] | None]],
) -> Callable[[AppT], AbstractAsyncContextManager[dict[str, Any]]]:
"""Combine multiple lifespans into a single lifespan.
Useful when mounting FastMCP into FastAPI and you need to run
both your app's lifespan and the MCP server's lifespan.
Works with both FastAPI-style lifespans (yield None) and FastMCP-style
lifespans (yield dict). Results are merged; later lifespans override
earlier ones on key conflicts.
Lifespans are entered in order and exited in reverse order (LIFO).
Example:
```python
from fastmcp import FastMCP
from fastmcp.utilities.lifespan import combine_lifespans
from fastapi import FastAPI
mcp = FastMCP("Tools")
mcp_app = mcp.http_app()
app = FastAPI(lifespan=combine_lifespans(app_lifespan, mcp_app.lifespan))
app.mount("/mcp", mcp_app) # MCP endpoint at /mcp
```
Args:
*lifespans: Lifespan context manager factories to combine.
Returns:
A combined lifespan context manager factory.
"""
@asynccontextmanager
async def combined(app: AppT) -> AsyncIterator[dict[str, Any]]:
merged: dict[str, Any] = {}
async with AsyncExitStack() as stack:
for ls in lifespans:
result = await stack.enter_async_context(ls(app))
if result is not None:
merged.update(result)
yield merged
return combined
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "src/fastmcp/utilities/lifespan.py",
"license": "Apache License 2.0",
"lines": 41,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
PrefectHQ/fastmcp:examples/filesystem-provider/mcp/prompts/assistant.py | """Assistant prompts."""
from fastmcp.prompts import prompt
@prompt
def code_review(code: str, language: str = "python") -> str:
"""Generate a code review prompt.
Args:
code: The code to review.
language: Programming language (default: python).
"""
return f"""Please review this {language} code:
```{language}
{code}
```
Focus on:
- Code quality and readability
- Potential bugs or issues
- Performance considerations
- Best practices"""
@prompt(
name="explain-concept",
description="Generate a prompt to explain a technical concept.",
tags={"education", "explanation"},
)
def explain(topic: str, audience: str = "developer") -> str:
"""Generate an explanation prompt.
Args:
topic: The concept to explain.
audience: Target audience level.
"""
return f"Explain {topic} to a {audience}. Use clear examples and analogies."
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "examples/filesystem-provider/mcp/prompts/assistant.py",
"license": "Apache License 2.0",
"lines": 30,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
PrefectHQ/fastmcp:examples/filesystem-provider/mcp/resources/config.py | """Configuration resources - static and templated."""
import json
from fastmcp.resources import resource
# Static resource - no parameters in URI
@resource("config://app")
def get_app_config() -> str:
"""Get application configuration."""
return json.dumps(
{
"name": "FilesystemDemo",
"version": "1.0.0",
"features": ["tools", "resources", "prompts"],
},
indent=2,
)
# Resource template - {env} is a parameter
@resource("config://env/{env}")
def get_env_config(env: str) -> str:
"""Get environment-specific configuration.
Args:
env: Environment name (dev, staging, prod).
"""
configs = {
"dev": {"debug": True, "log_level": "DEBUG", "database": "localhost"},
"staging": {"debug": True, "log_level": "INFO", "database": "staging-db"},
"prod": {"debug": False, "log_level": "WARNING", "database": "prod-db"},
}
config = configs.get(env, {"error": f"Unknown environment: {env}"})
return json.dumps(config, indent=2)
# Resource with custom metadata
@resource(
"config://features",
name="feature-flags",
mime_type="application/json",
tags={"config", "features"},
)
def get_feature_flags() -> str:
"""Get feature flags configuration."""
return json.dumps(
{
"dark_mode": True,
"beta_features": False,
"max_upload_size_mb": 100,
},
indent=2,
)
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "examples/filesystem-provider/mcp/resources/config.py",
"license": "Apache License 2.0",
"lines": 46,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
PrefectHQ/fastmcp:examples/filesystem-provider/mcp/tools/calculator.py | """Math tools with custom metadata."""
from fastmcp.tools import tool
@tool(
name="add-numbers", # Custom name (default would be "add")
description="Add two numbers together.",
tags={"math", "arithmetic"},
)
def add(a: float, b: float) -> float:
"""Add two numbers."""
return a + b
@tool(tags={"math", "arithmetic"})
def multiply(a: float, b: float) -> float:
"""Multiply two numbers.
Args:
a: First number.
b: Second number.
"""
return a * b
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "examples/filesystem-provider/mcp/tools/calculator.py",
"license": "Apache License 2.0",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
PrefectHQ/fastmcp:examples/filesystem-provider/mcp/tools/greeting.py | """Greeting tools - multiple tools in one file."""
from fastmcp.tools import tool
@tool
def greet(name: str) -> str:
"""Greet someone by name.
Args:
name: The person's name.
"""
return f"Hello, {name}!"
@tool
def farewell(name: str) -> str:
"""Say goodbye to someone.
Args:
name: The person's name.
"""
return f"Goodbye, {name}!"
# Helper functions without decorators are ignored
def _format_message(msg: str) -> str:
return msg.strip().capitalize()
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "examples/filesystem-provider/mcp/tools/greeting.py",
"license": "Apache License 2.0",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
PrefectHQ/fastmcp:examples/filesystem-provider/server.py | """Filesystem-based MCP server using FileSystemProvider.
This example demonstrates how to use FileSystemProvider to automatically
discover and register tools, resources, and prompts from the filesystem.
Run:
fastmcp run examples/filesystem-provider/server.py
Inspect:
fastmcp inspect examples/filesystem-provider/server.py
Dev mode (re-scan files on every request):
Change reload=True below, then modify files while the server runs.
"""
from pathlib import Path
from fastmcp import FastMCP
from fastmcp.server.providers import FileSystemProvider
# The provider scans all .py files in the directory recursively.
# Functions decorated with @tool, @resource, or @prompt are registered.
# Directory structure is purely organizational - decorators determine type.
provider = FileSystemProvider(
root=Path(__file__).parent / "mcp",
reload=True, # Set True for dev mode (re-scan on every request)
)
mcp = FastMCP("FilesystemDemo", providers=[provider])
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "examples/filesystem-provider/server.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
PrefectHQ/fastmcp:tests/fs/test_discovery.py | """Tests for filesystem discovery module."""
from pathlib import Path
from fastmcp.resources.template import FunctionResourceTemplate
from fastmcp.server.providers.filesystem_discovery import (
discover_and_import,
discover_files,
extract_components,
import_module_from_file,
)
from fastmcp.tools import FunctionTool
class TestDiscoverFiles:
"""Tests for discover_files function."""
def test_discover_files_empty_dir(self, tmp_path: Path):
"""Should return empty list for empty directory."""
files = discover_files(tmp_path)
assert files == []
def test_discover_files_nonexistent_dir(self, tmp_path: Path):
"""Should return empty list for nonexistent directory."""
nonexistent = tmp_path / "does_not_exist"
files = discover_files(nonexistent)
assert files == []
def test_discover_files_single_file(self, tmp_path: Path):
"""Should find a single Python file."""
py_file = tmp_path / "test.py"
py_file.write_text("# test")
files = discover_files(tmp_path)
assert files == [py_file]
def test_discover_files_skips_init(self, tmp_path: Path):
"""Should skip __init__.py files."""
init_file = tmp_path / "__init__.py"
init_file.write_text("# init")
py_file = tmp_path / "test.py"
py_file.write_text("# test")
files = discover_files(tmp_path)
assert files == [py_file]
def test_discover_files_recursive(self, tmp_path: Path):
"""Should find files in subdirectories."""
subdir = tmp_path / "subdir"
subdir.mkdir()
file1 = tmp_path / "a.py"
file2 = subdir / "b.py"
file1.write_text("# a")
file2.write_text("# b")
files = discover_files(tmp_path)
assert sorted(files) == sorted([file1, file2])
def test_discover_files_skips_pycache(self, tmp_path: Path):
"""Should skip __pycache__ directories."""
pycache = tmp_path / "__pycache__"
pycache.mkdir()
cache_file = pycache / "test.py"
cache_file.write_text("# cache")
py_file = tmp_path / "test.py"
py_file.write_text("# test")
files = discover_files(tmp_path)
assert files == [py_file]
def test_discover_files_sorted(self, tmp_path: Path):
"""Files should be returned in sorted order."""
(tmp_path / "z.py").write_text("# z")
(tmp_path / "a.py").write_text("# a")
(tmp_path / "m.py").write_text("# m")
files = discover_files(tmp_path)
names = [f.name for f in files]
assert names == ["a.py", "m.py", "z.py"]
class TestImportModuleFromFile:
"""Tests for import_module_from_file function."""
def test_import_simple_module(self, tmp_path: Path):
"""Should import a simple module."""
py_file = tmp_path / "simple.py"
py_file.write_text("VALUE = 42")
module = import_module_from_file(py_file)
assert module.VALUE == 42
def test_import_module_with_function(self, tmp_path: Path):
"""Should import a module with functions."""
py_file = tmp_path / "funcs.py"
py_file.write_text(
"""\
def greet(name):
return f"Hello, {name}!"
"""
)
module = import_module_from_file(py_file)
assert module.greet("World") == "Hello, World!"
def test_import_module_with_imports(self, tmp_path: Path):
"""Should handle modules with standard library imports."""
py_file = tmp_path / "with_imports.py"
py_file.write_text(
"""\
import os
import sys
def get_cwd():
return os.getcwd()
"""
)
module = import_module_from_file(py_file)
assert callable(module.get_cwd)
def test_import_as_package_with_init(self, tmp_path: Path):
"""Should import as package when __init__.py exists."""
# Create package structure (use unique name to avoid module caching)
pkg = tmp_path / "testpkg_init"
pkg.mkdir()
(pkg / "__init__.py").write_text("PKG_VAR = 'package'")
module_file = pkg / "module.py"
module_file.write_text("MODULE_VAR = 'module'")
module = import_module_from_file(module_file)
assert module.MODULE_VAR == "module"
def test_import_with_relative_import(self, tmp_path: Path):
"""Should support relative imports when in a package."""
# Create package with relative import (use unique name to avoid module caching)
pkg = tmp_path / "testpkg_relative"
pkg.mkdir()
(pkg / "__init__.py").write_text("")
(pkg / "helper.py").write_text("HELPER_VALUE = 123")
(pkg / "main.py").write_text(
"""\
from .helper import HELPER_VALUE
MAIN_VALUE = HELPER_VALUE * 2
"""
)
module = import_module_from_file(pkg / "main.py")
assert module.MAIN_VALUE == 246
def test_import_package_module_reload(self, tmp_path: Path):
"""Re-importing a package module should return updated content."""
# Create package (use unique name to avoid conflicts)
pkg = tmp_path / "testpkg_reload"
pkg.mkdir()
(pkg / "__init__.py").write_text("")
module_file = pkg / "reloadable.py"
module_file.write_text("VALUE = 'original'")
# First import
module = import_module_from_file(module_file)
assert module.VALUE == "original"
# Modify the file
module_file.write_text("VALUE = 'updated'")
# Re-import should see the updated value
module = import_module_from_file(module_file)
assert module.VALUE == "updated"
class TestExtractComponents:
"""Tests for extract_components function."""
def test_extract_no_components(self, tmp_path: Path):
"""Should return empty list for module with no components."""
py_file = tmp_path / "plain.py"
py_file.write_text(
"""\
def plain_function():
pass
SOME_VAR = 42
"""
)
module = import_module_from_file(py_file)
components = extract_components(module)
assert components == []
def test_extract_tool_component(self, tmp_path: Path):
"""Should extract Tool objects."""
py_file = tmp_path / "tools.py"
py_file.write_text(
"""\
from fastmcp.tools import tool
@tool
def greet(name: str) -> str:
return f"Hello, {name}!"
"""
)
module = import_module_from_file(py_file)
components = extract_components(module)
assert len(components) == 1
component = components[0]
assert isinstance(component, FunctionTool)
assert component.name == "greet"
def test_extract_multiple_components(self, tmp_path: Path):
"""Should extract multiple component types."""
py_file = tmp_path / "multi.py"
py_file.write_text(
"""\
from fastmcp.tools import tool
from fastmcp.resources import resource
from fastmcp.prompts import prompt
@tool
def greet(name: str) -> str:
return f"Hello, {name}!"
@resource("config://app")
def get_config() -> dict:
return {}
@prompt
def analyze(topic: str) -> str:
return f"Analyze: {topic}"
"""
)
module = import_module_from_file(py_file)
components = extract_components(module)
assert len(components) == 3
types = {type(c).__name__ for c in components}
assert types == {"FunctionTool", "FunctionResource", "FunctionPrompt"}
def test_extract_skips_private_components(self, tmp_path: Path):
"""Should skip private components (those starting with _)."""
py_file = tmp_path / "private.py"
py_file.write_text(
"""\
from fastmcp.tools import tool
@tool
def public_tool() -> str:
return "public"
# The module attribute starts with _, so it's skipped during discovery
@tool("private_tool_name")
def _private_tool() -> str:
return "private"
"""
)
module = import_module_from_file(py_file)
components = extract_components(module)
# Only public_tool should be found (_private_tool starts with _, so skipped)
assert len(components) == 1
component = components[0]
assert component.name == "public_tool"
def test_extract_resource_template(self, tmp_path: Path):
"""Should extract ResourceTemplate objects."""
py_file = tmp_path / "templates.py"
py_file.write_text(
"""\
from fastmcp.resources import resource
@resource("users://{user_id}/profile")
def get_profile(user_id: str) -> dict:
return {"id": user_id}
"""
)
module = import_module_from_file(py_file)
components = extract_components(module)
assert len(components) == 1
component = components[0]
assert isinstance(component, FunctionResourceTemplate)
assert component.uri_template == "users://{user_id}/profile"
class TestDiscoverAndImport:
"""Tests for discover_and_import function."""
def test_discover_and_import_empty(self, tmp_path: Path):
"""Should return empty result for empty directory."""
result = discover_and_import(tmp_path)
assert result.components == []
assert result.failed_files == {}
def test_discover_and_import_with_tools(self, tmp_path: Path):
"""Should discover and import tools."""
tools_dir = tmp_path / "tools"
tools_dir.mkdir()
(tools_dir / "greet.py").write_text(
"""\
from fastmcp.tools import tool
@tool
def greet(name: str) -> str:
return f"Hello, {name}!"
"""
)
result = discover_and_import(tmp_path)
assert len(result.components) == 1
file_path, component = result.components[0]
assert file_path.name == "greet.py"
assert isinstance(component, FunctionTool)
assert component.name == "greet"
def test_discover_and_import_skips_bad_imports(self, tmp_path: Path):
"""Should skip files that fail to import and track them."""
(tmp_path / "good.py").write_text(
"""\
from fastmcp.tools import tool
@tool
def good_tool() -> str:
return "good"
"""
)
(tmp_path / "bad.py").write_text(
"""\
import nonexistent_module_xyz123
def bad_function():
pass
"""
)
result = discover_and_import(tmp_path)
# Only good.py should be imported
assert len(result.components) == 1
_, component = result.components[0]
assert component.name == "good_tool"
# bad.py should be in failed_files
assert len(result.failed_files) == 1
failed_path = tmp_path / "bad.py"
assert failed_path in result.failed_files
assert "nonexistent_module_xyz123" in result.failed_files[failed_path]
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/fs/test_discovery.py",
"license": "Apache License 2.0",
"lines": 282,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:tests/fs/test_provider.py | """Tests for FileSystemProvider."""
import time
from pathlib import Path
from fastmcp import FastMCP
from fastmcp.client import Client
from fastmcp.server.providers import FileSystemProvider
class TestFileSystemProvider:
"""Tests for FileSystemProvider."""
def test_provider_empty_directory(self, tmp_path: Path):
"""Provider should work with empty directory."""
provider = FileSystemProvider(tmp_path)
assert repr(provider).startswith("FileSystemProvider")
def test_provider_discovers_tools(self, tmp_path: Path):
"""Provider should discover @tool decorated functions."""
tools_dir = tmp_path / "tools"
tools_dir.mkdir()
(tools_dir / "greet.py").write_text(
"""\
from fastmcp.tools import tool
@tool
def greet(name: str) -> str:
'''Greet someone by name.'''
return f"Hello, {name}!"
"""
)
provider = FileSystemProvider(tmp_path)
# Check tool was registered
assert len(provider._components) == 1
def test_provider_discovers_resources(self, tmp_path: Path):
"""Provider should discover @resource decorated functions."""
(tmp_path / "config.py").write_text(
"""\
from fastmcp.resources import resource
@resource("config://app")
def get_config() -> dict:
'''Get app config.'''
return {"setting": "value"}
"""
)
provider = FileSystemProvider(tmp_path)
assert len(provider._components) == 1
def test_provider_discovers_resource_templates(self, tmp_path: Path):
"""Provider should discover resource templates."""
(tmp_path / "users.py").write_text(
"""\
from fastmcp.resources import resource
@resource("users://{user_id}/profile")
def get_profile(user_id: str) -> dict:
'''Get user profile.'''
return {"id": user_id}
"""
)
provider = FileSystemProvider(tmp_path)
assert len(provider._components) == 1
def test_provider_discovers_prompts(self, tmp_path: Path):
"""Provider should discover @prompt decorated functions."""
(tmp_path / "analyze.py").write_text(
"""\
from fastmcp.prompts import prompt
@prompt
def analyze(topic: str) -> list:
'''Analyze a topic.'''
return [{"role": "user", "content": f"Analyze: {topic}"}]
"""
)
provider = FileSystemProvider(tmp_path)
assert len(provider._components) == 1
def test_provider_discovers_multiple_in_one_file(self, tmp_path: Path):
"""Provider should discover multiple components in one file."""
(tmp_path / "multi.py").write_text(
"""\
from fastmcp.tools import tool
from fastmcp.resources import resource
@tool
def tool1() -> str:
return "tool1"
@tool
def tool2() -> str:
return "tool2"
@resource("config://app")
def get_config() -> dict:
return {}
"""
)
provider = FileSystemProvider(tmp_path)
assert len(provider._components) == 3
def test_provider_skips_undecorated_files(self, tmp_path: Path):
"""Provider should skip files with no decorated functions."""
(tmp_path / "utils.py").write_text(
"""\
def helper_function():
return "helper"
SOME_CONSTANT = 42
"""
)
(tmp_path / "tool.py").write_text(
"""\
from fastmcp.tools import tool
@tool
def my_tool() -> str:
return "tool"
"""
)
provider = FileSystemProvider(tmp_path)
# Only the tool should be registered
assert len(provider._components) == 1
class TestFileSystemProviderReloadMode:
"""Tests for FileSystemProvider reload mode."""
def test_reload_false_caches_at_init(self, tmp_path: Path):
"""With reload=False, components are cached at init."""
(tmp_path / "tool.py").write_text(
"""\
from fastmcp.tools import tool
@tool
def original() -> str:
return "original"
"""
)
provider = FileSystemProvider(tmp_path, reload=False)
assert len(provider._components) == 1
# Add another file - should NOT be picked up
(tmp_path / "tool2.py").write_text(
"""\
from fastmcp.tools import tool
@tool
def added() -> str:
return "added"
"""
)
# Still only one component
assert len(provider._components) == 1
async def test_reload_true_rescans(self, tmp_path: Path):
"""With reload=True, components are rescanned on each request."""
(tmp_path / "tool.py").write_text(
"""\
from fastmcp.tools import tool
@tool
def original() -> str:
return "original"
"""
)
provider = FileSystemProvider(tmp_path, reload=True)
# Always loaded once at init (to catch errors early)
assert provider._loaded
assert len(provider._components) == 1
# Add another file - should be picked up on next _ensure_loaded
(tmp_path / "tool2.py").write_text(
"""\
from fastmcp.tools import tool
@tool
def added() -> str:
return "added"
"""
)
# With reload=True, _ensure_loaded re-scans
await provider._ensure_loaded()
assert len(provider._components) == 2
async def test_warning_deduplication_same_file(self, tmp_path: Path, capsys):
"""Warnings for the same broken file should not repeat."""
bad_file = tmp_path / "bad.py"
bad_file.write_text("1/0 # division by zero")
provider = FileSystemProvider(tmp_path, reload=True)
# First load - should warn
captured = capsys.readouterr()
# Check for warning indicator (rich may truncate long paths)
assert "WARNING" in captured.err and "Failed to import" in captured.err
# Second load (same file, unchanged) - should NOT warn again
await provider._ensure_loaded()
captured = capsys.readouterr()
assert "Failed to import" not in captured.err
async def test_warning_on_file_change(self, tmp_path: Path, capsys):
"""Warnings should reappear when a broken file changes."""
bad_file = tmp_path / "bad.py"
bad_file.write_text("1/0 # division by zero")
provider = FileSystemProvider(tmp_path, reload=True)
# First load - should warn
captured = capsys.readouterr()
# Check for warning indicator (rich may truncate long paths)
assert "WARNING" in captured.err and "Failed to import" in captured.err
# Modify the file (different error) - need to ensure mtime changes
time.sleep(0.01) # Ensure mtime differs
bad_file.write_text("syntax error here !!!")
# Next load - should warn again (file changed)
await provider._ensure_loaded()
captured = capsys.readouterr()
# Check for warning indicator (rich may truncate long paths)
assert "WARNING" in captured.err and "Failed to import" in captured.err
async def test_warning_cleared_when_fixed(self, tmp_path: Path, capsys):
"""Warnings should clear when a file is fixed, and reappear if broken again."""
bad_file = tmp_path / "tool.py"
bad_file.write_text("1/0 # broken")
provider = FileSystemProvider(tmp_path, reload=True)
# First load - should warn
captured = capsys.readouterr()
# Check for warning indicator (rich may truncate long paths)
assert "WARNING" in captured.err and "Failed to import" in captured.err
# Fix the file
time.sleep(0.01)
bad_file.write_text(
"""\
from fastmcp.tools import tool
@tool
def my_tool() -> str:
return "fixed"
"""
)
# Load again - should NOT warn, file is fixed
await provider._ensure_loaded()
captured = capsys.readouterr()
assert "Failed to import" not in captured.err
assert len(provider._components) == 1
# Break it again
time.sleep(0.01)
bad_file.write_text("1/0 # broken again")
# Should warn again
await provider._ensure_loaded()
captured = capsys.readouterr()
# Check for warning indicator (rich may truncate long paths)
assert "WARNING" in captured.err and "Failed to import" in captured.err
class TestFileSystemProviderIntegration:
"""Integration tests with FastMCP server."""
async def test_provider_with_fastmcp_server(self, tmp_path: Path):
"""FileSystemProvider should work with FastMCP server."""
(tmp_path / "greet.py").write_text(
"""\
from fastmcp.tools import tool
@tool
def greet(name: str) -> str:
'''Greet someone.'''
return f"Hello, {name}!"
"""
)
provider = FileSystemProvider(tmp_path)
mcp = FastMCP("TestServer", providers=[provider])
async with Client(mcp) as client:
# List tools
tools = await client.list_tools()
assert len(tools) == 1
assert tools[0].name == "greet"
# Call tool
result = await client.call_tool("greet", {"name": "World"})
assert "Hello, World!" in str(result)
async def test_provider_with_resources(self, tmp_path: Path):
"""FileSystemProvider should work with resources."""
(tmp_path / "config.py").write_text(
"""\
from fastmcp.resources import resource
@resource("config://app")
def get_config() -> str:
'''Get app config.'''
return '{"version": "1.0"}'
"""
)
provider = FileSystemProvider(tmp_path)
mcp = FastMCP("TestServer", providers=[provider])
async with Client(mcp) as client:
# List resources
resources = await client.list_resources()
assert len(resources) == 1
assert str(resources[0].uri) == "config://app"
# Read resource
result = await client.read_resource("config://app")
assert "1.0" in str(result)
async def test_provider_with_resource_templates(self, tmp_path: Path):
"""FileSystemProvider should work with resource templates."""
(tmp_path / "users.py").write_text(
"""\
from fastmcp.resources import resource
@resource("users://{user_id}/profile")
def get_profile(user_id: str) -> str:
'''Get user profile.'''
return f'{{"id": "{user_id}", "name": "User {user_id}"}}'
"""
)
provider = FileSystemProvider(tmp_path)
mcp = FastMCP("TestServer", providers=[provider])
async with Client(mcp) as client:
# List templates
templates = await client.list_resource_templates()
assert len(templates) == 1
# Read with parameter
result = await client.read_resource("users://123/profile")
assert "123" in str(result)
async def test_provider_with_prompts(self, tmp_path: Path):
"""FileSystemProvider should work with prompts."""
(tmp_path / "analyze.py").write_text(
"""\
from fastmcp.prompts import prompt
@prompt
def analyze(topic: str) -> str:
'''Analyze a topic.'''
return f"Please analyze: {topic}"
"""
)
provider = FileSystemProvider(tmp_path)
mcp = FastMCP("TestServer", providers=[provider])
async with Client(mcp) as client:
# List prompts
prompts = await client.list_prompts()
assert len(prompts) == 1
assert prompts[0].name == "analyze"
# Get prompt
result = await client.get_prompt("analyze", {"topic": "Python"})
assert "Python" in str(result)
async def test_nested_directory_structure(self, tmp_path: Path):
"""FileSystemProvider should work with nested directories."""
# Create nested structure
tools = tmp_path / "tools"
tools.mkdir()
(tools / "greet.py").write_text(
"""\
from fastmcp.tools import tool
@tool
def greet(name: str) -> str:
return f"Hello, {name}!"
"""
)
payments = tools / "payments"
payments.mkdir()
(payments / "charge.py").write_text(
"""\
from fastmcp.tools import tool
@tool
def charge(amount: float) -> str:
return f"Charged ${amount}"
"""
)
provider = FileSystemProvider(tmp_path)
mcp = FastMCP("TestServer", providers=[provider])
async with Client(mcp) as client:
tools_list = await client.list_tools()
assert len(tools_list) == 2
names = {t.name for t in tools_list}
assert names == {"greet", "charge"}
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/fs/test_provider.py",
"license": "Apache License 2.0",
"lines": 334,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:examples/custom_tool_serializer_decorator.py | """Example of custom tool serialization using ToolResult and a wrapper decorator.
This pattern provides explicit control over how tool outputs are serialized,
making the serialization visible in each tool's code.
"""
import asyncio
import inspect
from collections.abc import Callable
from functools import wraps
from typing import Any
import yaml
from fastmcp import FastMCP
from fastmcp.tools.tool import ToolResult
def with_serializer(serializer: Callable[[Any], str]):
"""Decorator to apply custom serialization to tool output."""
def decorator(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
result = fn(*args, **kwargs)
return ToolResult(content=serializer(result), structured_content=result)
@wraps(fn)
async def async_wrapper(*args, **kwargs):
result = await fn(*args, **kwargs)
return ToolResult(content=serializer(result), structured_content=result)
return async_wrapper if inspect.iscoroutinefunction(fn) else wrapper
return decorator
# Create reusable serializer decorators
with_yaml = with_serializer(lambda d: yaml.dump(d, width=100, sort_keys=False))
server = FastMCP(name="CustomSerializerExample")
@server.tool
@with_yaml
def get_example_data() -> dict:
"""Returns some example data serialized as YAML."""
return {"name": "Test", "value": 123, "status": True}
@server.tool
def get_json_data() -> dict:
"""Returns data with default JSON serialization."""
return {"format": "json", "data": [1, 2, 3]}
async def example_usage():
# YAML serialized tool
yaml_result = await server._call_tool_mcp("get_example_data", {})
print("YAML Tool Result:")
print(yaml_result)
print()
# Default JSON serialized tool
json_result = await server._call_tool_mcp("get_json_data", {})
print("JSON Tool Result:")
print(json_result)
if __name__ == "__main__":
asyncio.run(example_usage())
server.run()
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "examples/custom_tool_serializer_decorator.py",
"license": "Apache License 2.0",
"lines": 50,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
PrefectHQ/fastmcp:tests/deprecated/test_tool_serializer.py | """Tests for deprecated tool serializer functionality.
These tests verify that serializer parameters still work but are deprecated.
All serializer-related tests should be moved here.
"""
import warnings
import pytest
from inline_snapshot import snapshot
from mcp.types import TextContent
from fastmcp import FastMCP
from fastmcp.contrib.mcp_mixin import mcp_tool
from fastmcp.server.providers import LocalProvider
from fastmcp.tools.tool import Tool, _convert_to_content
from fastmcp.tools.tool_transform import TransformedTool
from fastmcp.utilities.tests import temporary_settings
# Reset deprecation warnings for this module
pytestmark = pytest.mark.filterwarnings("default::DeprecationWarning")
class TestToolSerializerDeprecated:
"""Tests for deprecated serializer functionality."""
async def test_tool_serializer(self):
"""Test that a tool's serializer is used to serialize the result."""
def custom_serializer(data) -> str:
return f"Custom serializer: {data}"
def process_list(items: list[int]) -> int:
return sum(items)
tool = Tool.from_function(process_list, serializer=custom_serializer)
result = await tool.run(arguments={"items": [1, 2, 3, 4, 5]})
# Custom serializer affects unstructured content
assert isinstance(result.content[0], TextContent)
assert result.content[0].text == "Custom serializer: 15"
# Structured output should have the raw value
assert result.structured_content == {"result": 15}
def test_custom_serializer(self):
"""Test that a custom serializer is used for non-MCP types."""
def custom_serializer(data):
return f"Serialized: {data}"
result = _convert_to_content({"a": 1}, serializer=custom_serializer)
assert result == snapshot(
[TextContent(type="text", text="Serialized: {'a': 1}")]
)
def test_custom_serializer_error_fallback(self, caplog):
"""Test that if a custom serializer fails, it falls back to the default."""
def custom_serializer_that_fails(data):
raise ValueError("Serialization failed")
result = _convert_to_content({"a": 1}, serializer=custom_serializer_that_fails)
assert isinstance(result, list)
assert result == snapshot([TextContent(type="text", text='{"a":1}')])
assert "Error serializing tool result" in caplog.text
class TestSerializerDeprecationWarnings:
"""Tests that deprecation warnings are raised when serializer is used."""
def test_tool_from_function_serializer_warning(self):
"""Test that Tool.from_function warns when serializer is provided."""
def custom_serializer(data) -> str:
return f"Custom: {data}"
def my_tool(x: int) -> int:
return x * 2
with temporary_settings(deprecation_warnings=True):
with pytest.warns(DeprecationWarning, match="serializer.*deprecated"):
Tool.from_function(my_tool, serializer=custom_serializer)
def test_tool_from_function_serializer_no_warning_when_disabled(self):
"""Test that no warning is raised when deprecation_warnings is False."""
def custom_serializer(data) -> str:
return f"Custom: {data}"
def my_tool(x: int) -> int:
return x * 2
with temporary_settings(deprecation_warnings=False):
with warnings.catch_warnings():
warnings.simplefilter("error")
# Should not raise
Tool.from_function(my_tool, serializer=custom_serializer)
def test_local_provider_tool_serializer_warning(self):
"""Test that LocalProvider.tool warns when serializer is provided."""
provider = LocalProvider()
def custom_serializer(data) -> str:
return f"Custom: {data}"
def my_tool(x: int) -> int:
return x * 2
with temporary_settings(deprecation_warnings=True):
with pytest.warns(DeprecationWarning, match="serializer.*deprecated"):
provider.tool(my_tool, serializer=custom_serializer)
def test_local_provider_tool_decorator_serializer_warning(self):
"""Test that LocalProvider.tool decorator warns when serializer is provided."""
provider = LocalProvider()
def custom_serializer(data) -> str:
return f"Custom: {data}"
with temporary_settings(deprecation_warnings=True):
with pytest.warns(DeprecationWarning, match="serializer.*deprecated"):
@provider.tool(serializer=custom_serializer)
def my_tool(x: int) -> int:
return x * 2
def test_fastmcp_tool_serializer_warning(self):
"""Test that FastMCP.tool warns when serializer is provided via LocalProvider."""
def custom_serializer(data) -> str:
return f"Custom: {data}"
def my_tool(x: int) -> int:
return x * 2
# FastMCP.tool doesn't accept serializer directly, it goes through LocalProvider
# So we test LocalProvider.tool which is what FastMCP uses internally
provider = LocalProvider()
with temporary_settings(deprecation_warnings=True):
with pytest.warns(DeprecationWarning, match="serializer.*deprecated"):
provider.tool(my_tool, serializer=custom_serializer)
def test_fastmcp_tool_serializer_parameter_raises_type_error(self):
"""Test that FastMCP tool_serializer parameter raises TypeError."""
def custom_serializer(data) -> str:
return f"Custom: {data}"
with pytest.raises(TypeError, match="no longer accepts `tool_serializer`"):
FastMCP("TestServer", tool_serializer=custom_serializer)
def test_transformed_tool_from_tool_serializer_warning(self):
"""Test that TransformedTool.from_tool warns when serializer is provided."""
def custom_serializer(data) -> str:
return f"Custom: {data}"
def my_tool(x: int) -> int:
return x * 2
parent_tool = Tool.from_function(my_tool)
with temporary_settings(deprecation_warnings=True):
with pytest.warns(DeprecationWarning, match="serializer.*deprecated"):
TransformedTool.from_tool(parent_tool, serializer=custom_serializer)
def test_mcp_mixin_tool_serializer_warning(self):
"""Test that mcp_tool decorator warns when serializer is provided."""
def custom_serializer(data) -> str:
return f"Custom: {data}"
with temporary_settings(deprecation_warnings=True):
with pytest.warns(DeprecationWarning, match="serializer.*deprecated"):
@mcp_tool(serializer=custom_serializer)
def my_tool(x: int) -> int:
return x * 2
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/deprecated/test_tool_serializer.py",
"license": "Apache License 2.0",
"lines": 126,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:tests/server/tasks/test_resource_task_meta_parameter.py | """
Tests for the explicit task_meta parameter on FastMCP.read_resource().
These tests verify that the task_meta parameter provides explicit control
over sync vs task execution for resources and resource templates.
"""
import pytest
from mcp.shared.exceptions import McpError
from fastmcp import FastMCP
from fastmcp.client import Client
from fastmcp.resources.resource import Resource
from fastmcp.resources.template import ResourceTemplate
from fastmcp.server.tasks.config import TaskMeta
class TestResourceTaskMetaParameter:
"""Tests for task_meta parameter on FastMCP.read_resource()."""
async def test_task_meta_none_returns_resource_result(self):
"""With task_meta=None (default), read_resource returns ResourceResult."""
server = FastMCP("test")
@server.resource("data://test")
async def simple_resource() -> str:
return "hello world"
result = await server.read_resource("data://test")
assert result.contents[0].content == "hello world"
async def test_task_meta_none_on_task_enabled_resource_still_returns_result(self):
"""Even for task=True resources, task_meta=None returns ResourceResult."""
server = FastMCP("test")
@server.resource("data://test", task=True)
async def task_enabled_resource() -> str:
return "hello world"
# Without task_meta, should execute synchronously
result = await server.read_resource("data://test")
assert result.contents[0].content == "hello world"
async def test_task_meta_on_forbidden_resource_raises_error(self):
"""Providing task_meta to a task=False resource raises McpError."""
server = FastMCP("test")
@server.resource("data://test", task=False)
async def sync_only_resource() -> str:
return "hello"
with pytest.raises(McpError) as exc_info:
await server.read_resource("data://test", task_meta=TaskMeta())
assert "does not support task-augmented execution" in str(exc_info.value)
async def test_task_meta_fn_key_enrichment_for_resource(self):
"""Verify that fn_key enrichment uses Resource.make_key()."""
resource_uri = "data://my-resource"
expected_key = Resource.make_key(resource_uri)
assert expected_key == "resource:data://my-resource"
async def test_task_meta_fn_key_enrichment_for_template(self):
"""Verify that fn_key enrichment uses ResourceTemplate.make_key()."""
template_pattern = "data://{id}"
expected_key = ResourceTemplate.make_key(template_pattern)
assert expected_key == "template:data://{id}"
class TestResourceTemplateTaslMeta:
"""Tests for task_meta with resource templates."""
async def test_template_task_meta_none_returns_resource_result(self):
"""With task_meta=None, template read returns ResourceResult."""
server = FastMCP("test")
@server.resource("item://{id}")
async def get_item(id: str) -> str:
return f"Item {id}"
result = await server.read_resource("item://42")
assert result.contents[0].content == "Item 42"
async def test_template_task_meta_on_task_enabled_template_returns_result(self):
"""Even for task=True templates, task_meta=None returns ResourceResult."""
server = FastMCP("test")
@server.resource("item://{id}", task=True)
async def get_item(id: str) -> str:
return f"Item {id}"
# Without task_meta, should execute synchronously
result = await server.read_resource("item://42")
assert result.contents[0].content == "Item 42"
async def test_template_task_meta_on_forbidden_template_raises_error(self):
"""Providing task_meta to a task=False template raises McpError."""
server = FastMCP("test")
@server.resource("item://{id}", task=False)
async def sync_only_template(id: str) -> str:
return f"Item {id}"
with pytest.raises(McpError) as exc_info:
await server.read_resource("item://42", task_meta=TaskMeta())
assert "does not support task-augmented execution" in str(exc_info.value)
class TestResourceTaskMetaClientIntegration:
"""Tests that task_meta works correctly with the Client for resources."""
async def test_client_read_resource_without_task_gets_immediate_result(self):
"""Client without task=True gets immediate result."""
server = FastMCP("test")
@server.resource("data://test", task=True)
async def immediate_resource() -> str:
return "hello"
async with Client(server) as client:
result = await client.read_resource("data://test")
# Should get ReadResourceResult directly
assert "hello" in str(result)
async def test_client_read_resource_with_task_creates_task(self):
"""Client with task=True creates a background task."""
server = FastMCP("test")
@server.resource("data://test", task=True)
async def task_resource() -> str:
return "hello"
async with Client(server) as client:
from fastmcp.client.tasks import ResourceTask
task = await client.read_resource("data://test", task=True)
assert isinstance(task, ResourceTask)
# Wait for result
result = await task.result()
assert "hello" in str(result)
async def test_client_read_template_with_task_creates_task(self):
"""Client with task=True on template creates a background task."""
server = FastMCP("test")
@server.resource("item://{id}", task=True)
async def get_item(id: str) -> str:
return f"Item {id}"
async with Client(server) as client:
from fastmcp.client.tasks import ResourceTask
task = await client.read_resource("item://42", task=True)
assert isinstance(task, ResourceTask)
# Wait for result
result = await task.result()
assert "Item 42" in str(result)
class TestResourceTaskMetaDirectServerCall:
"""Tests for direct server read_resource calls with task_meta."""
async def test_resource_can_read_another_resource_with_task(self):
"""A resource can read another resource as a background task."""
server = FastMCP("test")
@server.resource("data://inner", task=True)
async def inner_resource() -> str:
return "inner data"
@server.tool
async def outer_tool() -> str:
# Read inner resource as background task
result = await server.read_resource("data://inner", task_meta=TaskMeta())
# Should get CreateTaskResult since we provided task_meta
return f"Created task: {result.task.taskId}"
async with Client(server) as client:
result = await client.call_tool("outer_tool", {})
assert "Created task:" in str(result)
async def test_resource_can_read_another_resource_synchronously(self):
"""A resource can read another resource synchronously (no task_meta)."""
server = FastMCP("test")
@server.resource("data://inner", task=True)
async def inner_resource() -> str:
return "inner data"
@server.tool
async def outer_tool() -> str:
# Read inner resource synchronously (no task_meta)
result = await server.read_resource("data://inner")
# Should get ResourceResult directly
return f"Got result: {result.contents[0].content}"
async with Client(server) as client:
result = await client.call_tool("outer_tool", {})
assert "Got result: inner data" in str(result)
async def test_resource_can_read_template_with_task(self):
"""A tool can read a resource template as a background task."""
server = FastMCP("test")
@server.resource("item://{id}", task=True)
async def get_item(id: str) -> str:
return f"Item {id}"
@server.tool
async def outer_tool() -> str:
result = await server.read_resource("item://99", task_meta=TaskMeta())
return f"Created task: {result.task.taskId}"
async with Client(server) as client:
result = await client.call_tool("outer_tool", {})
assert "Created task:" in str(result)
async def test_resource_can_read_with_custom_ttl(self):
"""A tool can read a resource as a background task with custom TTL."""
server = FastMCP("test")
@server.resource("data://inner", task=True)
async def inner_resource() -> str:
return "inner data"
@server.tool
async def outer_tool() -> str:
custom_ttl = 45000 # 45 seconds
result = await server.read_resource(
"data://inner", task_meta=TaskMeta(ttl=custom_ttl)
)
return f"Task TTL: {result.task.ttl}"
async with Client(server) as client:
result = await client.call_tool("outer_tool", {})
assert "Task TTL: 45000" in str(result)
class TestResourceTaskMetaTypeNarrowing:
"""Tests for type narrowing based on task_meta parameter."""
async def test_read_resource_without_task_meta_type_is_resource_result(self):
"""Calling read_resource without task_meta returns ResourceResult type."""
server = FastMCP("test")
@server.resource("data://test")
async def simple_resource() -> str:
return "hello"
# This should type-check as ResourceResult, not the union type
result = await server.read_resource("data://test")
# No isinstance check needed - type is narrowed by overload
content = result.contents[0].content
assert content == "hello"
async def test_read_resource_with_task_meta_type_is_create_task_result(self):
"""Calling read_resource with task_meta returns CreateTaskResult type."""
server = FastMCP("test")
@server.resource("data://test", task=True)
async def task_resource() -> str:
return "hello"
async with Client(server) as client:
# Need to use client to get full task infrastructure
from fastmcp.client.tasks import ResourceTask
task = await client.read_resource("data://test", task=True)
assert isinstance(task, ResourceTask)
# For direct server call, we need the Client context for Docket
# This test verifies the overload works via client integration
result = await task.result()
assert "hello" in str(result)
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/server/tasks/test_resource_task_meta_parameter.py",
"license": "Apache License 2.0",
"lines": 205,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:tests/server/tasks/test_task_meta_parameter.py | """
Tests for the explicit task_meta parameter on FastMCP.call_tool().
These tests verify that the task_meta parameter provides explicit control
over sync vs task execution, replacing implicit contextvar-based behavior.
"""
import mcp.types
import pytest
from fastmcp import FastMCP
from fastmcp.client import Client
from fastmcp.exceptions import ToolError
from fastmcp.server.middleware import CallNext, Middleware, MiddlewareContext
from fastmcp.server.tasks.config import TaskMeta
from fastmcp.tools.tool import Tool, ToolResult
class TestTaskMetaParameter:
"""Tests for task_meta parameter on FastMCP.call_tool()."""
async def test_task_meta_none_returns_tool_result(self):
"""With task_meta=None (default), call_tool returns ToolResult."""
server = FastMCP("test")
@server.tool
async def simple_tool(x: int) -> int:
return x * 2
result = await server.call_tool("simple_tool", {"x": 5})
first_content = result.content[0]
assert isinstance(first_content, mcp.types.TextContent)
assert first_content.text == "10"
async def test_task_meta_none_on_task_enabled_tool_still_returns_tool_result(self):
"""Even for task=True tools, task_meta=None returns ToolResult synchronously."""
server = FastMCP("test")
@server.tool(task=True)
async def task_enabled_tool(x: int) -> int:
return x * 2
# Without task_meta, should execute synchronously
result = await server.call_tool("task_enabled_tool", {"x": 5})
first_content = result.content[0]
assert isinstance(first_content, mcp.types.TextContent)
assert first_content.text == "10"
async def test_task_meta_on_forbidden_tool_raises_error(self):
"""Providing task_meta to a task=False tool raises ToolError."""
server = FastMCP("test")
@server.tool(task=False)
async def sync_only_tool(x: int) -> int:
return x * 2
# Error is raised before docket is needed (McpError wrapped as ToolError)
with pytest.raises(ToolError) as exc_info:
await server.call_tool("sync_only_tool", {"x": 5}, task_meta=TaskMeta())
assert "does not support task-augmented execution" in str(exc_info.value)
async def test_task_meta_fn_key_auto_populated_in_call_tool(self):
"""fn_key is auto-populated from tool name in call_tool()."""
server = FastMCP("test")
@server.tool(task=True)
async def auto_key_tool() -> str:
return "done"
# Verify fn_key starts as None
task_meta = TaskMeta()
assert task_meta.fn_key is None
# call_tool enriches the task_meta before passing to _run
# We test this via the client integration path
async with Client(server) as client:
result = await client.call_tool("auto_key_tool", {}, task=True)
# Should succeed because fn_key was auto-populated
from fastmcp.client.tasks import ToolTask
assert isinstance(result, ToolTask)
async def test_task_meta_fn_key_enrichment_logic(self):
"""Verify that fn_key enrichment uses Tool.make_key()."""
# Direct test of the enrichment logic
tool_name = "my_tool"
expected_key = Tool.make_key(tool_name)
assert expected_key == "tool:my_tool"
class TestTaskMetaTTL:
"""Tests for task_meta.ttl behavior."""
async def test_task_with_custom_ttl_creates_task(self):
"""task_meta.ttl is passed through when creating tasks."""
server = FastMCP("test")
@server.tool(task=True)
async def ttl_tool() -> str:
return "done"
custom_ttl_ms = 30000 # 30 seconds
async with Client(server) as client:
# Use client.call_tool with task=True and ttl
task = await client.call_tool("ttl_tool", {}, task=True, ttl=custom_ttl_ms)
from fastmcp.client.tasks import ToolTask
assert isinstance(task, ToolTask)
# Verify task completes successfully
result = await task.result()
assert "done" in str(result)
async def test_task_without_ttl_uses_default(self):
"""task_meta.ttl=None uses docket.execution_ttl default."""
server = FastMCP("test")
@server.tool(task=True)
async def default_ttl_tool() -> str:
return "done"
async with Client(server) as client:
# Use client.call_tool with task=True, default ttl
task = await client.call_tool("default_ttl_tool", {}, task=True)
from fastmcp.client.tasks import ToolTask
assert isinstance(task, ToolTask)
# Verify task completes successfully
result = await task.result()
assert "done" in str(result)
class TrackingMiddleware(Middleware):
"""Middleware that tracks tool calls."""
def __init__(self, calls: list[str]):
super().__init__()
self._calls = calls
async def on_call_tool(
self,
context: MiddlewareContext[mcp.types.CallToolRequestParams],
call_next: CallNext[mcp.types.CallToolRequestParams, ToolResult],
) -> ToolResult:
if context.method:
self._calls.append(context.method)
return await call_next(context)
class TestTaskMetaMiddleware:
"""Tests that task_meta is properly propagated through middleware."""
async def test_task_meta_propagated_through_middleware(self):
"""task_meta is passed through middleware chain."""
server = FastMCP("test")
middleware_saw_request: list[str] = []
@server.tool(task=True)
async def middleware_test_tool() -> str:
return "done"
server.add_middleware(TrackingMiddleware(middleware_saw_request))
async with Client(server) as client:
# Use client to trigger the middleware chain
task = await client.call_tool("middleware_test_tool", {}, task=True)
# Middleware should have run
assert "tools/call" in middleware_saw_request
# And task should have been created
from fastmcp.client.tasks import ToolTask
assert isinstance(task, ToolTask)
class TestTaskMetaClientIntegration:
"""Tests that task_meta works correctly with the Client."""
async def test_client_task_true_maps_to_task_meta(self):
"""Client's task=True creates proper task_meta on server."""
server = FastMCP("test")
@server.tool(task=True)
async def client_test_tool(x: int) -> int:
return x * 2
async with Client(server) as client:
# Client passes task=True, server receives as task_meta
task = await client.call_tool("client_test_tool", {"x": 5}, task=True)
# Should get back a ToolTask (client wrapper)
from fastmcp.client.tasks import ToolTask
assert isinstance(task, ToolTask)
# Wait for result
result = await task.result()
assert "10" in str(result)
async def test_client_without_task_gets_immediate_result(self):
"""Client without task=True gets immediate result."""
server = FastMCP("test")
@server.tool(task=True)
async def immediate_tool(x: int) -> int:
return x * 2
async with Client(server) as client:
# No task=True, should execute synchronously
result = await client.call_tool("immediate_tool", {"x": 5})
# Should get CallToolResult directly
assert "10" in str(result)
async def test_client_task_with_custom_ttl(self):
"""Client can pass custom TTL for task execution."""
server = FastMCP("test")
@server.tool(task=True)
async def custom_ttl_tool() -> str:
return "done"
custom_ttl_ms = 60000 # 60 seconds
async with Client(server) as client:
task = await client.call_tool(
"custom_ttl_tool", {}, task=True, ttl=custom_ttl_ms
)
from fastmcp.client.tasks import ToolTask
assert isinstance(task, ToolTask)
# Verify task completes successfully
result = await task.result()
assert "done" in str(result)
class TestTaskMetaDirectServerCall:
"""Tests for direct server calls (tool calling another tool)."""
async def test_tool_can_call_another_tool_with_task(self):
"""A tool can call another tool as a background task."""
server = FastMCP("test")
@server.tool(task=True)
async def inner_tool(x: int) -> int:
return x * 2
@server.tool
async def outer_tool(x: int) -> str:
# Call inner tool as background task
result = await server.call_tool(
"inner_tool", {"x": x}, task_meta=TaskMeta()
)
# Should get CreateTaskResult since we're in server context
return f"Created task: {result.task.taskId}"
async with Client(server) as client:
# Call outer_tool which internally calls inner_tool with task_meta
result = await client.call_tool("outer_tool", {"x": 5})
# The outer tool should have successfully created a background task
assert "Created task:" in str(result)
async def test_tool_can_call_another_tool_synchronously(self):
"""A tool can call another tool synchronously (no task_meta)."""
server = FastMCP("test")
@server.tool(task=True)
async def inner_tool(x: int) -> int:
return x * 2
@server.tool
async def outer_tool(x: int) -> str:
# Call inner tool synchronously (no task_meta)
result = await server.call_tool("inner_tool", {"x": x})
# Should get ToolResult directly
first_content = result.content[0]
assert isinstance(first_content, mcp.types.TextContent)
return f"Got result: {first_content.text}"
async with Client(server) as client:
result = await client.call_tool("outer_tool", {"x": 5})
assert "Got result: 10" in str(result)
async def test_tool_can_call_another_tool_with_custom_ttl(self):
"""A tool can call another tool as a background task with custom TTL."""
server = FastMCP("test")
@server.tool(task=True)
async def inner_tool(x: int) -> int:
return x * 2
@server.tool
async def outer_tool(x: int) -> str:
custom_ttl = 45000 # 45 seconds
result = await server.call_tool(
"inner_tool", {"x": x}, task_meta=TaskMeta(ttl=custom_ttl)
)
return f"Task TTL: {result.task.ttl}"
async with Client(server) as client:
result = await client.call_tool("outer_tool", {"x": 5})
# The inner tool task should have the custom TTL
assert "Task TTL: 45000" in str(result)
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/server/tasks/test_task_meta_parameter.py",
"license": "Apache License 2.0",
"lines": 229,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:tests/server/providers/test_base_provider.py | """Tests for base Provider class behavior."""
from typing import Any
from fastmcp.server.providers.base import Provider
from fastmcp.server.tasks.config import TaskConfig
from fastmcp.server.transforms import Namespace
from fastmcp.tools.tool import Tool, ToolResult
class CustomTool(Tool):
"""A custom Tool subclass (not FunctionTool) with task support."""
task_config: TaskConfig = TaskConfig(mode="optional")
parameters: dict[str, Any] = {"type": "object", "properties": {}}
async def run(self, arguments: dict[str, Any]) -> ToolResult:
return ToolResult(content="custom result")
class SimpleProvider(Provider):
"""Minimal provider that returns custom components from list methods."""
def __init__(self, tools: list[Tool] | None = None):
super().__init__()
self._tools = tools or []
async def _list_tools(self) -> list[Tool]:
return self._tools
class TestBaseProviderGetTasks:
"""Tests for Provider.get_tasks() base implementation."""
async def test_get_tasks_includes_custom_tool_subclasses(self):
"""Base Provider.get_tasks() should include custom Tool subclasses."""
custom_tool = CustomTool(name="custom", description="A custom tool")
provider = SimpleProvider(tools=[custom_tool])
tasks = await provider.get_tasks()
assert len(tasks) == 1
assert tasks[0].name == "custom"
assert tasks[0] is custom_tool
async def test_get_tasks_filters_forbidden_custom_tools(self):
"""Base Provider.get_tasks() should exclude tools with forbidden task mode."""
class ForbiddenTool(Tool):
task_config: TaskConfig = TaskConfig(mode="forbidden")
parameters: dict[str, Any] = {"type": "object", "properties": {}}
async def run(self, arguments: dict[str, Any]) -> ToolResult:
return ToolResult(content="forbidden")
forbidden_tool = ForbiddenTool(name="forbidden", description="Forbidden tool")
provider = SimpleProvider(tools=[forbidden_tool])
tasks = await provider.get_tasks()
assert len(tasks) == 0
async def test_get_tasks_mixed_custom_and_forbidden(self):
"""Base Provider.get_tasks() filters correctly with mixed task modes."""
class ForbiddenTool(Tool):
task_config: TaskConfig = TaskConfig(mode="forbidden")
parameters: dict[str, Any] = {"type": "object", "properties": {}}
async def run(self, arguments: dict[str, Any]) -> ToolResult:
return ToolResult(content="forbidden")
enabled_tool = CustomTool(name="enabled", description="Task enabled")
forbidden_tool = ForbiddenTool(name="forbidden", description="Task forbidden")
provider = SimpleProvider(tools=[enabled_tool, forbidden_tool])
tasks = await provider.get_tasks()
assert len(tasks) == 1
assert tasks[0].name == "enabled"
async def test_get_tasks_applies_transforms(self):
"""get_tasks should apply provider transforms to component names."""
tool = CustomTool(name="my_tool", description="A tool")
provider = SimpleProvider(tools=[tool])
provider.add_transform(Namespace("api"))
tasks = await provider.get_tasks()
assert len(tasks) == 1
assert tasks[0].name == "api_my_tool"
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/server/providers/test_base_provider.py",
"license": "Apache License 2.0",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:src/fastmcp/utilities/async_utils.py | """Async utilities for FastMCP."""
import asyncio
import functools
import inspect
from collections.abc import Awaitable, Callable
from typing import Any, Literal, TypeVar, overload
import anyio
from anyio.to_thread import run_sync as run_sync_in_threadpool
T = TypeVar("T")
def is_coroutine_function(fn: Any) -> bool:
"""Check if a callable is a coroutine function, unwrapping functools.partial.
``inspect.iscoroutinefunction`` returns ``False`` for
``functools.partial`` objects wrapping an async function on Python < 3.12.
This helper unwraps any layers of ``partial`` before checking.
"""
while isinstance(fn, functools.partial):
fn = fn.func
return inspect.iscoroutinefunction(fn) or asyncio.iscoroutinefunction(fn)
async def call_sync_fn_in_threadpool(
fn: Callable[..., Any], *args: Any, **kwargs: Any
) -> Any:
"""Call a sync function in a threadpool to avoid blocking the event loop.
Uses anyio.to_thread.run_sync which properly propagates contextvars,
making this safe for functions that depend on context (like dependency injection).
"""
return await run_sync_in_threadpool(functools.partial(fn, *args, **kwargs))
@overload
async def gather(
*awaitables: Awaitable[T],
return_exceptions: Literal[True],
) -> list[T | BaseException]: ...
@overload
async def gather(
*awaitables: Awaitable[T],
return_exceptions: Literal[False] = ...,
) -> list[T]: ...
async def gather(
*awaitables: Awaitable[T],
return_exceptions: bool = False,
) -> list[T] | list[T | BaseException]:
"""Run awaitables concurrently and return results in order.
Uses anyio TaskGroup for structured concurrency.
Args:
*awaitables: Awaitables to run concurrently
return_exceptions: If True, exceptions are returned in results.
If False, first exception cancels all and raises.
Returns:
List of results in the same order as input awaitables.
"""
results: list[T | BaseException] = [None] * len(awaitables) # type: ignore[assignment]
async def run_at(i: int, aw: Awaitable[T]) -> None:
try:
results[i] = await aw
except BaseException as e:
if return_exceptions:
results[i] = e
else:
raise
async with anyio.create_task_group() as tg:
for i, aw in enumerate(awaitables):
tg.start_soon(run_at, i, aw)
return results
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "src/fastmcp/utilities/async_utils.py",
"license": "Apache License 2.0",
"lines": 62,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
PrefectHQ/fastmcp:tests/deprecated/server/test_include_exclude_tags.py | """Tests for removed include_tags/exclude_tags parameters."""
import pytest
from fastmcp import FastMCP
class TestIncludeExcludeTagsRemoved:
"""Test that include_tags/exclude_tags raise TypeError with migration hints."""
def test_exclude_tags_raises_type_error(self):
with pytest.raises(TypeError, match="no longer accepts `exclude_tags`"):
FastMCP(exclude_tags={"internal"})
def test_include_tags_raises_type_error(self):
with pytest.raises(TypeError, match="no longer accepts `include_tags`"):
FastMCP(include_tags={"public"})
def test_exclude_tags_error_mentions_disable(self):
with pytest.raises(TypeError, match="server.disable"):
FastMCP(exclude_tags={"internal"})
def test_include_tags_error_mentions_enable(self):
with pytest.raises(TypeError, match="server.enable"):
FastMCP(include_tags={"public"})
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/deprecated/server/test_include_exclude_tags.py",
"license": "Apache License 2.0",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:tests/server/providers/test_local_provider.py | """Comprehensive tests for LocalProvider.
Tests cover:
- Storage operations (add/remove tools, resources, templates, prompts)
- Provider interface (list/get operations)
- Decorator patterns (all calling styles)
- Tool transformations
- Standalone usage (provider attached to multiple servers)
- Task registration
"""
from typing import Any
import pytest
from fastmcp import FastMCP
from fastmcp.client import Client
from fastmcp.prompts.prompt import Prompt
from fastmcp.server.providers.local_provider import LocalProvider
from fastmcp.server.tasks import TaskConfig
from fastmcp.tools.tool import Tool, ToolResult
class TestLocalProviderStorage:
"""Tests for LocalProvider storage operations."""
def test_add_tool(self):
"""Test adding a tool to LocalProvider."""
provider = LocalProvider()
tool = Tool(
name="test_tool",
description="A test tool",
parameters={"type": "object", "properties": {}},
)
provider.add_tool(tool)
assert "tool:test_tool@" in provider._components
assert provider._components["tool:test_tool@"] is tool
def test_add_multiple_tools(self):
"""Test adding multiple tools."""
provider = LocalProvider()
tool1 = Tool(
name="tool1",
description="First tool",
parameters={"type": "object", "properties": {}},
)
tool2 = Tool(
name="tool2",
description="Second tool",
parameters={"type": "object", "properties": {}},
)
provider.add_tool(tool1)
provider.add_tool(tool2)
assert "tool:tool1@" in provider._components
assert "tool:tool2@" in provider._components
def test_remove_tool(self):
"""Test removing a tool from LocalProvider."""
provider = LocalProvider()
tool = Tool(
name="test_tool",
description="A test tool",
parameters={"type": "object", "properties": {}},
)
provider.add_tool(tool)
provider.remove_tool("test_tool")
assert "tool:test_tool@" not in provider._components
def test_remove_nonexistent_tool_raises(self):
"""Test that removing a nonexistent tool raises KeyError."""
provider = LocalProvider()
with pytest.raises(KeyError):
provider.remove_tool("nonexistent")
def test_add_resource(self):
"""Test adding a resource to LocalProvider."""
provider = LocalProvider()
@provider.resource("resource://test")
def test_resource() -> str:
return "content"
assert "resource:resource://test@" in provider._components
def test_remove_resource(self):
"""Test removing a resource from LocalProvider."""
provider = LocalProvider()
@provider.resource("resource://test")
def test_resource() -> str:
return "content"
provider.remove_resource("resource://test")
assert "resource:resource://test@" not in provider._components
def test_add_template(self):
"""Test adding a resource template to LocalProvider."""
provider = LocalProvider()
@provider.resource("resource://{id}")
def template_fn(id: str) -> str:
return f"Resource {id}"
assert "template:resource://{id}@" in provider._components
def test_remove_template(self):
"""Test removing a resource template from LocalProvider."""
provider = LocalProvider()
@provider.resource("resource://{id}")
def template_fn(id: str) -> str:
return f"Resource {id}"
provider.remove_template("resource://{id}")
assert "template:resource://{id}@" not in provider._components
def test_add_prompt(self):
"""Test adding a prompt to LocalProvider."""
provider = LocalProvider()
prompt = Prompt(
name="test_prompt",
description="A test prompt",
)
provider.add_prompt(prompt)
assert "prompt:test_prompt@" in provider._components
def test_remove_prompt(self):
"""Test removing a prompt from LocalProvider."""
provider = LocalProvider()
prompt = Prompt(
name="test_prompt",
description="A test prompt",
)
provider.add_prompt(prompt)
provider.remove_prompt("test_prompt")
assert "prompt:test_prompt@" not in provider._components
class TestLocalProviderInterface:
"""Tests for LocalProvider's Provider interface."""
async def test_list_tools_empty(self):
"""Test listing tools when empty."""
provider = LocalProvider()
tools = await provider.list_tools()
assert tools == []
async def test_list_tools(self):
"""Test listing tools returns all stored tools."""
provider = LocalProvider()
tool1 = Tool(name="tool1", description="First", parameters={"type": "object"})
tool2 = Tool(name="tool2", description="Second", parameters={"type": "object"})
provider.add_tool(tool1)
provider.add_tool(tool2)
tools = await provider.list_tools()
assert len(tools) == 2
names = {t.name for t in tools}
assert names == {"tool1", "tool2"}
async def test_get_tool_found(self):
"""Test getting a tool that exists."""
provider = LocalProvider()
tool = Tool(
name="test_tool",
description="A test tool",
parameters={"type": "object"},
)
provider.add_tool(tool)
result = await provider.get_tool("test_tool")
assert result is not None
assert result.name == "test_tool"
async def test_get_tool_not_found(self):
"""Test getting a tool that doesn't exist returns None."""
provider = LocalProvider()
result = await provider.get_tool("nonexistent")
assert result is None
async def test_list_resources(self):
"""Test listing resources."""
provider = LocalProvider()
@provider.resource("resource://test")
def test_resource() -> str:
return "content"
resources = await provider.list_resources()
assert len(resources) == 1
assert str(resources[0].uri) == "resource://test"
async def test_get_resource_found(self):
"""Test getting a resource that exists."""
provider = LocalProvider()
@provider.resource("resource://test")
def test_resource() -> str:
return "content"
result = await provider.get_resource("resource://test")
assert result is not None
assert str(result.uri) == "resource://test"
async def test_get_resource_not_found(self):
"""Test getting a resource that doesn't exist returns None."""
provider = LocalProvider()
result = await provider.get_resource("resource://nonexistent")
assert result is None
async def test_list_resource_templates(self):
"""Test listing resource templates."""
provider = LocalProvider()
@provider.resource("resource://{id}")
def template_fn(id: str) -> str:
return f"Resource {id}"
templates = await provider.list_resource_templates()
assert len(templates) == 1
assert templates[0].uri_template == "resource://{id}"
async def test_get_resource_template_match(self):
"""Test getting a template that matches a URI."""
provider = LocalProvider()
@provider.resource("resource://{id}")
def template_fn(id: str) -> str:
return f"Resource {id}"
result = await provider.get_resource_template("resource://123")
assert result is not None
assert result.uri_template == "resource://{id}"
async def test_get_resource_template_no_match(self):
"""Test getting a template with no match returns None."""
provider = LocalProvider()
@provider.resource("resource://{id}")
def template_fn(id: str) -> str:
return f"Resource {id}"
result = await provider.get_resource_template("other://123")
assert result is None
async def test_list_prompts(self):
"""Test listing prompts."""
provider = LocalProvider()
prompt = Prompt(
name="test_prompt",
description="A test prompt",
)
provider.add_prompt(prompt)
prompts = await provider.list_prompts()
assert len(prompts) == 1
assert prompts[0].name == "test_prompt"
async def test_get_prompt_found(self):
"""Test getting a prompt that exists."""
provider = LocalProvider()
prompt = Prompt(
name="test_prompt",
description="A test prompt",
)
provider.add_prompt(prompt)
result = await provider.get_prompt("test_prompt")
assert result is not None
assert result.name == "test_prompt"
async def test_get_prompt_not_found(self):
"""Test getting a prompt that doesn't exist returns None."""
provider = LocalProvider()
result = await provider.get_prompt("nonexistent")
assert result is None
class TestLocalProviderDecorators:
"""Tests for LocalProvider decorator registration.
Note: Decorator calling patterns and metadata are tested in the standalone
decorator tests (tests/tools/test_standalone_decorator.py, etc.). These tests
focus on LocalProvider-specific behavior: registration into _components,
the enabled flag, and round-trip execution via Client.
"""
def test_tool_decorator_registers(self):
"""Tool decorator should register in _components."""
provider = LocalProvider()
@provider.tool
def my_tool(x: int) -> int:
return x * 2
assert "tool:my_tool@" in provider._components
assert provider._components["tool:my_tool@"].name == "my_tool"
def test_tool_decorator_with_custom_name_registers(self):
"""Tool with custom name should register under that name."""
provider = LocalProvider()
@provider.tool(name="custom_name")
def my_tool(x: int) -> int:
return x * 2
assert "tool:custom_name@" in provider._components
assert "tool:my_tool@" not in provider._components
def test_tool_direct_call(self):
"""provider.tool(fn) should register the function."""
provider = LocalProvider()
def my_tool(x: int) -> int:
return x * 2
provider.tool(my_tool, name="direct_tool")
assert "tool:direct_tool@" in provider._components
def test_tool_enabled_false(self):
"""Tool with enabled=False should add a Visibility transform."""
provider = LocalProvider()
@provider.tool(enabled=False)
def disabled_tool() -> str:
return "should be disabled"
assert "tool:disabled_tool@" in provider._components
# enabled=False adds a Visibility transform to disable the tool
from fastmcp.server.transforms.visibility import Visibility
enabled_transforms = [
t for t in provider.transforms if isinstance(t, Visibility)
]
assert len(enabled_transforms) == 1
assert enabled_transforms[0]._enabled is False
assert enabled_transforms[0].keys == {"tool:disabled_tool@"}
async def test_tool_enabled_false_not_listed(self):
"""Disabled tool should not appear in get_tools (filtering happens at server level)."""
provider = LocalProvider()
@provider.tool(enabled=False)
def disabled_tool() -> str:
return "should be disabled"
@provider.tool
def enabled_tool() -> str:
return "should be enabled"
# Filtering happens at the server level, not provider level
server = FastMCP("Test", providers=[provider])
tools = await server.list_tools()
names = {t.name for t in tools}
assert "enabled_tool" in names
assert "disabled_tool" not in names
async def test_server_enable_overrides_provider_disable(self):
"""Server-level enable should override provider-level disable."""
provider = LocalProvider()
@provider.tool(enabled=False)
def my_tool() -> str:
return "result"
server = FastMCP("Test", providers=[provider])
# Tool is disabled at provider level
assert await server.get_tool("my_tool") is None
# Server-level enable overrides it
server.enable(names={"my_tool"})
tool = await server.get_tool("my_tool")
assert tool is not None
assert tool.name == "my_tool"
async def test_tool_roundtrip(self):
"""Tool should execute correctly via Client."""
provider = LocalProvider()
@provider.tool
def add(a: int, b: int) -> int:
return a + b
server = FastMCP("Test", providers=[provider])
async with Client(server) as client:
result = await client.call_tool("add", {"a": 2, "b": 3})
assert result.data == 5
def test_resource_decorator_registers(self):
"""Resource decorator should register in _components."""
provider = LocalProvider()
@provider.resource("resource://test")
def my_resource() -> str:
return "test content"
assert "resource:resource://test@" in provider._components
def test_resource_with_custom_name_registers(self):
"""Resource with custom name should register with that name."""
provider = LocalProvider()
@provider.resource("resource://test", name="custom_name")
def my_resource() -> str:
return "test content"
assert provider._components["resource:resource://test@"].name == "custom_name"
def test_resource_enabled_false(self):
"""Resource with enabled=False should add a Visibility transform."""
provider = LocalProvider()
@provider.resource("resource://test", enabled=False)
def disabled_resource() -> str:
return "should be disabled"
assert "resource:resource://test@" in provider._components
# enabled=False adds a Visibility transform to disable the resource
from fastmcp.server.transforms.visibility import Visibility
enabled_transforms = [
t for t in provider.transforms if isinstance(t, Visibility)
]
assert len(enabled_transforms) == 1
assert enabled_transforms[0]._enabled is False
assert enabled_transforms[0].keys == {"resource:resource://test@"}
async def test_resource_enabled_false_not_listed(self):
"""Disabled resource should not appear in get_resources (filtering at server level)."""
provider = LocalProvider()
@provider.resource("resource://disabled", enabled=False)
def disabled_resource() -> str:
return "should be disabled"
@provider.resource("resource://enabled")
def enabled_resource() -> str:
return "should be enabled"
# Filtering happens at the server level, not provider level
server = FastMCP("Test", providers=[provider])
resources = await server.list_resources()
uris = {str(r.uri) for r in resources}
assert "resource://enabled" in uris
assert "resource://disabled" not in uris
def test_template_enabled_false(self):
"""Template with enabled=False should add a Visibility transform."""
provider = LocalProvider()
@provider.resource("data://{id}", enabled=False)
def disabled_template(id: str) -> str:
return f"Data {id}"
assert "template:data://{id}@" in provider._components
# enabled=False adds a Visibility transform to disable the template
from fastmcp.server.transforms.visibility import Visibility
enabled_transforms = [
t for t in provider.transforms if isinstance(t, Visibility)
]
assert len(enabled_transforms) == 1
assert enabled_transforms[0]._enabled is False
assert enabled_transforms[0].keys == {"template:data://{id}@"}
async def test_template_enabled_false_not_listed(self):
"""Disabled template should not appear in get_resource_templates (filtering at server level)."""
provider = LocalProvider()
@provider.resource("data://{id}", enabled=False)
def disabled_template(id: str) -> str:
return f"Data {id}"
@provider.resource("items://{id}")
def enabled_template(id: str) -> str:
return f"Item {id}"
# Filtering happens at the server level, not provider level
server = FastMCP("Test", providers=[provider])
templates = await server.list_resource_templates()
uris = {t.uri_template for t in templates}
assert "items://{id}" in uris
assert "data://{id}" not in uris
async def test_resource_roundtrip(self):
"""Resource should execute correctly via Client."""
provider = LocalProvider()
@provider.resource("resource://greeting")
def greeting() -> str:
return "Hello, World!"
server = FastMCP("Test", providers=[provider])
async with Client(server) as client:
result = await client.read_resource("resource://greeting")
assert "Hello, World!" in str(result)
def test_prompt_decorator_registers(self):
"""Prompt decorator should register in _components."""
provider = LocalProvider()
@provider.prompt
def my_prompt() -> str:
return "A prompt"
assert "prompt:my_prompt@" in provider._components
def test_prompt_with_custom_name_registers(self):
"""Prompt with custom name should register under that name."""
provider = LocalProvider()
@provider.prompt(name="custom_prompt")
def my_prompt() -> str:
return "A prompt"
assert "prompt:custom_prompt@" in provider._components
assert "prompt:my_prompt@" not in provider._components
def test_prompt_enabled_false(self):
"""Prompt with enabled=False should add a Visibility transform."""
provider = LocalProvider()
@provider.prompt(enabled=False)
def disabled_prompt() -> str:
return "should be disabled"
assert "prompt:disabled_prompt@" in provider._components
# enabled=False adds a Visibility transform to disable the prompt
from fastmcp.server.transforms.visibility import Visibility
enabled_transforms = [
t for t in provider.transforms if isinstance(t, Visibility)
]
assert len(enabled_transforms) == 1
assert enabled_transforms[0]._enabled is False
assert enabled_transforms[0].keys == {"prompt:disabled_prompt@"}
async def test_prompt_enabled_false_not_listed(self):
"""Disabled prompt should not appear in get_prompts (filtering at server level)."""
provider = LocalProvider()
@provider.prompt(enabled=False)
def disabled_prompt() -> str:
return "should be disabled"
@provider.prompt
def enabled_prompt() -> str:
return "should be enabled"
# Filtering happens at the server level, not provider level
server = FastMCP("Test", providers=[provider])
prompts = await server.list_prompts()
names = {p.name for p in prompts}
assert "enabled_prompt" in names
assert "disabled_prompt" not in names
async def test_prompt_roundtrip(self):
"""Prompt should execute correctly via Client."""
provider = LocalProvider()
@provider.prompt
def greeting(name: str) -> str:
return f"Hello, {name}!"
server = FastMCP("Test", providers=[provider])
async with Client(server) as client:
result = await client.get_prompt("greeting", {"name": "World"})
assert "Hello, World!" in str(result)
class TestProviderToolTransformations:
"""Tests for tool transformations via add_transform()."""
async def test_add_transform_applies_tool_transforms(self):
"""Test that add_transform with ToolTransform applies tool transformations."""
from fastmcp.server.transforms import ToolTransform
from fastmcp.tools.tool_transform import ToolTransformConfig
provider = LocalProvider()
@provider.tool
def my_tool(x: int) -> int:
return x
# Add transform layer
layer = ToolTransform({"my_tool": ToolTransformConfig(name="renamed_tool")})
provider.add_transform(layer)
# Get tools and pass directly to transform
tools = await provider.list_tools()
transformed_tools = await layer.list_tools(tools)
assert len(transformed_tools) == 1
assert transformed_tools[0].name == "renamed_tool"
async def test_transform_layer_get_tool(self):
"""Test that ToolTransform.get_tool works correctly."""
from fastmcp.server.transforms import ToolTransform
from fastmcp.tools.tool_transform import ToolTransformConfig
provider = LocalProvider()
@provider.tool
def original_tool(x: int) -> int:
return x
layer = ToolTransform(
{"original_tool": ToolTransformConfig(name="transformed_tool")}
)
# Get tool through layer with call_next
async def get_tool(name: str, version=None):
return await provider._get_tool(name, version)
tool = await layer.get_tool("transformed_tool", get_tool)
assert tool is not None
assert tool.name == "transformed_tool"
# Original name should not work
tool = await layer.get_tool("original_tool", get_tool)
assert tool is None
async def test_transform_layer_description_change(self):
"""Test that ToolTransform can change description."""
from fastmcp.server.transforms import ToolTransform
from fastmcp.tools.tool_transform import ToolTransformConfig
provider = LocalProvider()
@provider.tool
def my_tool(x: int) -> int:
return x
layer = ToolTransform(
{"my_tool": ToolTransformConfig(description="New description")}
)
async def get_tool(name: str, version=None):
return await provider._get_tool(name, version)
tool = await layer.get_tool("my_tool", get_tool)
assert tool is not None
assert tool.description == "New description"
async def test_provider_unaffected_by_transforms(self):
"""Test that provider's own tools are unchanged by layers stored on it."""
from fastmcp.server.transforms import ToolTransform
from fastmcp.tools.tool_transform import ToolTransformConfig
provider = LocalProvider()
@provider.tool
def my_tool(x: int) -> int:
return x
# Add layer to provider (layers are applied by server, not _list_tools)
layer = ToolTransform({"my_tool": ToolTransformConfig(name="renamed")})
provider.add_transform(layer)
# Provider's _list_tools returns raw tools (transforms applied when queried via list_tools)
original_tools = await provider._list_tools()
assert original_tools[0].name == "my_tool"
# Transform modifies them when applied directly
transformed_tools = await layer.list_tools(original_tools)
assert transformed_tools[0].name == "renamed"
def test_transform_layer_duplicate_target_name_raises_error(self):
"""Test that ToolTransform with duplicate target names raises ValueError."""
from fastmcp.server.transforms import ToolTransform
from fastmcp.tools.tool_transform import ToolTransformConfig
with pytest.raises(ValueError, match="duplicate target name"):
ToolTransform(
{
"tool_a": ToolTransformConfig(name="same_name"),
"tool_b": ToolTransformConfig(name="same_name"),
}
)
class TestLocalProviderTaskRegistration:
"""Tests for task registration in LocalProvider."""
async def test_get_tasks_returns_task_eligible_tools(self):
"""Test that get_tasks returns tools with task support."""
provider = LocalProvider()
@provider.tool(task=True)
async def background_tool(x: int) -> int:
return x
tasks = await provider.get_tasks()
assert len(tasks) == 1
assert tasks[0].name == "background_tool"
async def test_get_tasks_filters_forbidden_tools(self):
"""Test that get_tasks excludes tools with forbidden task mode."""
provider = LocalProvider()
@provider.tool(task=False)
def sync_only_tool(x: int) -> int:
return x
tasks = await provider.get_tasks()
assert len(tasks) == 0
async def test_get_tasks_includes_custom_tool_subclasses(self):
"""Test that custom Tool subclasses are included in get_tasks."""
class CustomTool(Tool):
task_config: TaskConfig = TaskConfig(mode="optional")
parameters: dict[str, Any] = {"type": "object", "properties": {}}
async def run(self, arguments: dict[str, Any]) -> ToolResult:
return ToolResult(content="custom")
provider = LocalProvider()
provider.add_tool(CustomTool(name="custom", description="Custom tool"))
tasks = await provider.get_tasks()
assert len(tasks) == 1
assert tasks[0].name == "custom"
class TestLocalProviderStandaloneUsage:
"""Tests for standalone LocalProvider usage patterns."""
async def test_attach_provider_to_server(self):
"""Test that LocalProvider can be attached to a server."""
provider = LocalProvider()
@provider.tool
def greet(name: str) -> str:
return f"Hello, {name}!"
server = FastMCP("Test", providers=[provider])
async with Client(server) as client:
tools = await client.list_tools()
assert any(t.name == "greet" for t in tools)
async def test_attach_provider_to_multiple_servers(self):
"""Test that same provider can be attached to multiple servers."""
provider = LocalProvider()
@provider.tool
def shared_tool() -> str:
return "shared"
server1 = FastMCP("Server1", providers=[provider])
server2 = FastMCP("Server2", providers=[provider])
async with Client(server1) as client1:
tools1 = await client1.list_tools()
assert any(t.name == "shared_tool" for t in tools1)
async with Client(server2) as client2:
tools2 = await client2.list_tools()
assert any(t.name == "shared_tool" for t in tools2)
async def test_tools_visible_via_server_get_tools(self):
"""Test that provider tools are visible via server.list_tools()."""
provider = LocalProvider()
@provider.tool
def provider_tool() -> str:
return "from provider"
server = FastMCP("Test", providers=[provider])
tools = await server.list_tools()
assert any(t.name == "provider_tool" for t in tools)
async def test_server_decorator_and_provider_tools_coexist(self):
"""Test that server decorators and provider tools coexist."""
provider = LocalProvider()
@provider.tool
def provider_tool() -> str:
return "from provider"
server = FastMCP("Test", providers=[provider])
@server.tool
def server_tool() -> str:
return "from server"
tools = await server.list_tools()
assert any(t.name == "provider_tool" for t in tools)
assert any(t.name == "server_tool" for t in tools)
async def test_local_provider_first_wins_duplicates(self):
"""Test that LocalProvider tools take precedence over added providers."""
provider = LocalProvider()
@provider.tool
def duplicate_tool() -> str:
return "from added provider"
server = FastMCP("Test", providers=[provider])
@server.tool
def duplicate_tool() -> str: # noqa: F811
return "from server"
# Server's LocalProvider is first, so its tool wins
tools = await server.list_tools()
assert any(t.name == "duplicate_tool" for t in tools)
async with Client(server) as client:
result = await client.call_tool("duplicate_tool", {})
assert result.data == "from server"
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/server/providers/test_local_provider.py",
"license": "Apache License 2.0",
"lines": 623,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:tests/server/providers/test_local_provider_prompts.py | """Tests for prompt behavior in LocalProvider.
Tests cover:
- Prompt context injection
- Prompt decorator patterns
"""
import pytest
from mcp.types import TextContent
from fastmcp import Client, Context, FastMCP
from fastmcp.prompts.prompt import Prompt, PromptResult
class TestPromptContext:
async def test_prompt_context(self):
mcp = FastMCP()
@mcp.prompt
def prompt_fn(name: str, ctx: Context) -> str:
assert isinstance(ctx, Context)
return f"Hello, {name}! {ctx.request_id}"
async with Client(mcp) as client:
result = await client.get_prompt("prompt_fn", {"name": "World"})
assert len(result.messages) == 1
message = result.messages[0]
assert message.role == "user"
async def test_prompt_context_with_callable_object(self):
mcp = FastMCP()
class MyPrompt:
def __call__(self, name: str, ctx: Context) -> str:
return f"Hello, {name}! {ctx.request_id}"
mcp.add_prompt(Prompt.from_function(MyPrompt(), name="my_prompt"))
async with Client(mcp) as client:
result = await client.get_prompt("my_prompt", {"name": "World"})
assert len(result.messages) == 1
message = result.messages[0]
assert message.role == "user"
assert isinstance(message.content, TextContent)
assert message.content.text == "Hello, World! 1"
class TestPromptDecorator:
async def test_prompt_decorator(self):
mcp = FastMCP()
@mcp.prompt
def fn() -> str:
return "Hello, world!"
prompts = await mcp.list_prompts()
assert len(prompts) == 1
prompt = next(p for p in prompts if p.name == "fn")
assert prompt.name == "fn"
content = await prompt.render()
assert isinstance(content, PromptResult)
assert isinstance(content.messages[0].content, TextContent)
assert content.messages[0].content.text == "Hello, world!"
async def test_prompt_decorator_without_parentheses(self):
mcp = FastMCP()
@mcp.prompt
def fn() -> str:
return "Hello, world!"
prompts = await mcp.list_prompts()
assert any(p.name == "fn" for p in prompts)
result = await mcp.render_prompt("fn")
assert len(result.messages) == 1
assert isinstance(result.messages[0].content, TextContent)
assert result.messages[0].content.text == "Hello, world!"
async def test_prompt_decorator_with_name(self):
mcp = FastMCP()
@mcp.prompt(name="custom_name")
def fn() -> str:
return "Hello, world!"
prompts_list = await mcp.list_prompts()
assert len(prompts_list) == 1
prompt = next(p for p in prompts_list if p.name == "custom_name")
assert prompt.name == "custom_name"
content = await prompt.render()
assert isinstance(content, PromptResult)
assert isinstance(content.messages[0].content, TextContent)
assert content.messages[0].content.text == "Hello, world!"
async def test_prompt_decorator_with_description(self):
mcp = FastMCP()
@mcp.prompt(description="A custom description")
def fn() -> str:
return "Hello, world!"
prompts_list = await mcp.list_prompts()
assert len(prompts_list) == 1
prompt = next(p for p in prompts_list if p.name == "fn")
assert prompt.description == "A custom description"
content = await prompt.render()
assert isinstance(content, PromptResult)
assert isinstance(content.messages[0].content, TextContent)
assert content.messages[0].content.text == "Hello, world!"
async def test_prompt_decorator_with_parameters(self):
mcp = FastMCP()
@mcp.prompt
def test_prompt(name: str, greeting: str = "Hello") -> str:
return f"{greeting}, {name}!"
prompts = await mcp.list_prompts()
assert len(prompts) == 1
prompt = next(p for p in prompts if p.name == "test_prompt")
assert prompt.arguments is not None
assert len(prompt.arguments) == 2
assert prompt.arguments[0].name == "name"
assert prompt.arguments[0].required is True
assert prompt.arguments[1].name == "greeting"
assert prompt.arguments[1].required is False
result = await mcp.render_prompt("test_prompt", {"name": "World"})
assert len(result.messages) == 1
message = result.messages[0]
assert isinstance(message.content, TextContent)
assert message.content.text == "Hello, World!"
result = await mcp.render_prompt(
"test_prompt", {"name": "World", "greeting": "Hi"}
)
assert len(result.messages) == 1
message = result.messages[0]
assert isinstance(message.content, TextContent)
assert message.content.text == "Hi, World!"
async def test_prompt_decorator_instance_method(self):
mcp = FastMCP()
class MyClass:
def __init__(self, prefix: str):
self.prefix = prefix
def test_prompt(self) -> str:
return f"{self.prefix} Hello, world!"
obj = MyClass("My prefix:")
mcp.add_prompt(Prompt.from_function(obj.test_prompt, name="test_prompt"))
result = await mcp.render_prompt("test_prompt")
assert len(result.messages) == 1
message = result.messages[0]
assert isinstance(message.content, TextContent)
assert message.content.text == "My prefix: Hello, world!"
async def test_prompt_decorator_classmethod(self):
mcp = FastMCP()
class MyClass:
prefix = "Class prefix:"
@classmethod
def test_prompt(cls) -> str:
return f"{cls.prefix} Hello, world!"
mcp.add_prompt(Prompt.from_function(MyClass.test_prompt, name="test_prompt"))
result = await mcp.render_prompt("test_prompt")
assert len(result.messages) == 1
message = result.messages[0]
assert isinstance(message.content, TextContent)
assert message.content.text == "Class prefix: Hello, world!"
async def test_prompt_decorator_classmethod_error(self):
mcp = FastMCP()
with pytest.raises(TypeError, match="classmethod"):
class MyClass:
@mcp.prompt
@classmethod
def test_prompt(cls) -> None:
pass
async def test_prompt_decorator_staticmethod(self):
mcp = FastMCP()
class MyClass:
@mcp.prompt
@staticmethod
def test_prompt() -> str:
return "Static Hello, world!"
result = await mcp.render_prompt("test_prompt")
assert len(result.messages) == 1
message = result.messages[0]
assert isinstance(message.content, TextContent)
assert message.content.text == "Static Hello, world!"
async def test_prompt_decorator_async_function(self):
mcp = FastMCP()
@mcp.prompt
async def test_prompt() -> str:
return "Async Hello, world!"
result = await mcp.render_prompt("test_prompt")
assert len(result.messages) == 1
message = result.messages[0]
assert isinstance(message.content, TextContent)
assert message.content.text == "Async Hello, world!"
async def test_prompt_decorator_with_tags(self):
"""Test that the prompt decorator properly sets tags."""
mcp = FastMCP()
@mcp.prompt(tags={"example", "test-tag"})
def sample_prompt() -> str:
return "Hello, world!"
prompts = await mcp.list_prompts()
assert len(prompts) == 1
prompt = next(p for p in prompts if p.name == "sample_prompt")
assert prompt.tags == {"example", "test-tag"}
async def test_prompt_decorator_with_string_name(self):
"""Test that @prompt(\"custom_name\") syntax works correctly."""
mcp = FastMCP()
@mcp.prompt("string_named_prompt")
def my_function() -> str:
"""A function with a string name."""
return "Hello from string named prompt!"
prompts = await mcp.list_prompts()
assert any(p.name == "string_named_prompt" for p in prompts)
assert not any(p.name == "my_function" for p in prompts)
result = await mcp.render_prompt("string_named_prompt")
assert len(result.messages) == 1
assert isinstance(result.messages[0].content, TextContent)
assert result.messages[0].content.text == "Hello from string named prompt!"
async def test_prompt_direct_function_call(self):
"""Test that prompts can be registered via direct function call."""
from typing import cast
from fastmcp.prompts.function_prompt import DecoratedPrompt
mcp = FastMCP()
def standalone_function() -> str:
"""A standalone function to be registered."""
return "Hello from direct call!"
result_fn = mcp.prompt(standalone_function, name="direct_call_prompt")
# In new decorator mode, returns the function with metadata
decorated = cast(DecoratedPrompt, result_fn)
assert hasattr(result_fn, "__fastmcp__")
assert decorated.__fastmcp__.name == "direct_call_prompt"
assert result_fn is standalone_function
prompts = await mcp.list_prompts()
prompt = next(p for p in prompts if p.name == "direct_call_prompt")
# Prompt is registered separately, not same object as decorated function
assert prompt.name == "direct_call_prompt"
result = await mcp.render_prompt("direct_call_prompt")
assert len(result.messages) == 1
assert isinstance(result.messages[0].content, TextContent)
assert result.messages[0].content.text == "Hello from direct call!"
async def test_prompt_decorator_conflicting_names_error(self):
"""Test that providing both positional and keyword names raises an error."""
mcp = FastMCP()
with pytest.raises(
TypeError,
match="Cannot specify both a name as first argument and as keyword argument",
):
@mcp.prompt("positional_name", name="keyword_name")
def my_function() -> str:
return "Hello, world!"
async def test_prompt_decorator_staticmethod_order(self):
"""Test that both decorator orders work for static methods"""
mcp = FastMCP()
class MyClass:
@mcp.prompt
@staticmethod
def test_prompt() -> str:
return "Static Hello, world!"
result = await mcp.render_prompt("test_prompt")
assert len(result.messages) == 1
message = result.messages[0]
assert isinstance(message.content, TextContent)
assert message.content.text == "Static Hello, world!"
async def test_prompt_decorator_with_meta(self):
"""Test that meta parameter is passed through the prompt decorator."""
mcp = FastMCP()
meta_data = {"version": "3.0", "type": "prompt"}
@mcp.prompt(meta=meta_data)
def test_prompt(message: str) -> str:
return f"Response: {message}"
prompts = await mcp.list_prompts()
prompt = next(p for p in prompts if p.name == "test_prompt")
assert prompt.meta == meta_data
class TestPromptEnabled:
async def test_toggle_enabled(self):
mcp = FastMCP()
@mcp.prompt
def sample_prompt() -> str:
return "Hello, world!"
prompts = await mcp.list_prompts()
assert any(p.name == "sample_prompt" for p in prompts)
mcp.disable(names={"sample_prompt"}, components={"prompt"})
prompts = await mcp.list_prompts()
assert not any(p.name == "sample_prompt" for p in prompts)
mcp.enable(names={"sample_prompt"}, components={"prompt"})
prompts = await mcp.list_prompts()
assert any(p.name == "sample_prompt" for p in prompts)
async def test_prompt_disabled(self):
mcp = FastMCP()
@mcp.prompt
def sample_prompt() -> str:
return "Hello, world!"
mcp.disable(names={"sample_prompt"}, components={"prompt"})
prompts = await mcp.list_prompts()
assert len(prompts) == 0
async def test_prompt_toggle_enabled(self):
mcp = FastMCP()
@mcp.prompt
def sample_prompt() -> str:
return "Hello, world!"
mcp.disable(names={"sample_prompt"}, components={"prompt"})
prompts = await mcp.list_prompts()
assert not any(p.name == "sample_prompt" for p in prompts)
mcp.enable(names={"sample_prompt"}, components={"prompt"})
prompts = await mcp.list_prompts()
assert len(prompts) == 1
async def test_prompt_toggle_disabled(self):
mcp = FastMCP()
@mcp.prompt
def sample_prompt() -> str:
return "Hello, world!"
mcp.disable(names={"sample_prompt"}, components={"prompt"})
prompts = await mcp.list_prompts()
assert len(prompts) == 0
# get_prompt() applies enabled transform, returns None for disabled
prompt = await mcp.get_prompt("sample_prompt")
assert prompt is None
async def test_get_prompt_and_disable(self):
mcp = FastMCP()
@mcp.prompt
def sample_prompt() -> str:
return "Hello, world!"
prompt = await mcp.get_prompt("sample_prompt")
assert prompt is not None
mcp.disable(names={"sample_prompt"}, components={"prompt"})
prompts = await mcp.list_prompts()
assert len(prompts) == 0
# get_prompt() applies enabled transform, returns None for disabled
prompt = await mcp.get_prompt("sample_prompt")
assert prompt is None
async def test_cant_get_disabled_prompt(self):
mcp = FastMCP()
@mcp.prompt
def sample_prompt() -> str:
return "Hello, world!"
mcp.disable(names={"sample_prompt"}, components={"prompt"})
# get_prompt() applies enabled transform, returns None for disabled
prompt = await mcp.get_prompt("sample_prompt")
assert prompt is None
class TestPromptTags:
def create_server(self, include_tags=None, exclude_tags=None):
mcp = FastMCP()
@mcp.prompt(tags={"a", "b"})
def prompt_1() -> str:
return "1"
@mcp.prompt(tags={"b", "c"})
def prompt_2() -> str:
return "2"
if include_tags:
mcp.enable(tags=include_tags, only=True)
if exclude_tags:
mcp.disable(tags=exclude_tags)
return mcp
async def test_include_tags_all_prompts(self):
mcp = self.create_server(include_tags={"a", "b"})
prompts = await mcp.list_prompts()
assert {p.name for p in prompts} == {"prompt_1", "prompt_2"}
async def test_include_tags_some_prompts(self):
mcp = self.create_server(include_tags={"a"})
prompts = await mcp.list_prompts()
assert {p.name for p in prompts} == {"prompt_1"}
async def test_exclude_tags_all_prompts(self):
mcp = self.create_server(exclude_tags={"a", "b"})
prompts = await mcp.list_prompts()
assert {p.name for p in prompts} == set()
async def test_exclude_tags_some_prompts(self):
mcp = self.create_server(exclude_tags={"a"})
prompts = await mcp.list_prompts()
assert {p.name for p in prompts} == {"prompt_2"}
async def test_exclude_takes_precedence_over_include(self):
mcp = self.create_server(exclude_tags={"a"}, include_tags={"b"})
prompts = await mcp.list_prompts()
assert {p.name for p in prompts} == {"prompt_2"}
async def test_read_prompt_includes_tags(self):
mcp = self.create_server(include_tags={"a"})
# _get_prompt applies enabled transform (tag filtering)
prompt = await mcp._get_prompt("prompt_1")
result = await prompt.render({})
assert result.messages[0].content.text == "1"
prompt = await mcp.get_prompt("prompt_2")
assert prompt is None
async def test_read_prompt_excludes_tags(self):
mcp = self.create_server(exclude_tags={"a"})
# get_prompt applies enabled transform (tag filtering)
prompt = await mcp.get_prompt("prompt_1")
assert prompt is None
prompt = await mcp.get_prompt("prompt_2")
result = await prompt.render({})
assert result.messages[0].content.text == "2"
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/server/providers/test_local_provider_prompts.py",
"license": "Apache License 2.0",
"lines": 363,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:tests/server/providers/test_local_provider_resources.py | """Tests for resource and template behavior in LocalProvider.
Tests cover:
- Resource context injection
- Resource templates and URI parsing
- Resource template context injection
- Resource decorator patterns
- Template decorator patterns
"""
import pytest
from mcp.types import TextResourceContents
from pydantic import AnyUrl
from fastmcp import Client, Context, FastMCP
from fastmcp.exceptions import NotFoundError
from fastmcp.resources import (
Resource,
ResourceContent,
ResourceResult,
ResourceTemplate,
)
class TestResourceContext:
async def test_resource_with_context_annotation_gets_context(self):
mcp = FastMCP()
@mcp.resource("resource://test")
def resource_with_context(ctx: Context) -> str:
assert isinstance(ctx, Context)
return ctx.request_id
async with Client(mcp) as client:
result = await client.read_resource(AnyUrl("resource://test"))
assert isinstance(result[0], TextResourceContents)
assert result[0].text == "1"
class TestResourceTemplates:
async def test_resource_with_params_not_in_uri(self):
"""Test that a resource with function parameters raises an error if the URI
parameters don't match"""
mcp = FastMCP()
with pytest.raises(
ValueError,
match="URI template must contain at least one parameter",
):
@mcp.resource("resource://data")
def get_data_fn(param: str) -> str:
return f"Data: {param}"
async def test_resource_with_uri_params_without_args(self):
"""Test that a resource with URI parameters is automatically a template"""
mcp = FastMCP()
with pytest.raises(
ValueError,
match="URI parameters .* must be a subset of the function arguments",
):
@mcp.resource("resource://{param}")
def get_data() -> str:
return "Data"
async def test_resource_with_untyped_params(self):
"""Test that a resource with untyped parameters raises an error"""
mcp = FastMCP()
@mcp.resource("resource://{param}")
def get_data(param) -> str:
return "Data"
async def test_resource_matching_params(self):
"""Test that a resource with matching URI and function parameters works"""
mcp = FastMCP()
@mcp.resource("resource://{name}/data")
def get_data(name: str) -> str:
return f"Data for {name}"
result = await mcp.read_resource("resource://test/data")
assert result.contents[0].content == "Data for test"
async def test_resource_mismatched_params(self):
"""Test that mismatched parameters raise an error"""
mcp = FastMCP()
with pytest.raises(
ValueError,
match="Required function arguments .* must be a subset of the URI path parameters",
):
@mcp.resource("resource://{name}/data")
def get_data(user: str) -> str:
return f"Data for {user}"
async def test_resource_multiple_params(self):
"""Test that multiple parameters work correctly"""
mcp = FastMCP()
@mcp.resource("resource://{org}/{repo}/data")
def get_data(org: str, repo: str) -> str:
return f"Data for {org}/{repo}"
result = await mcp.read_resource("resource://cursor/fastmcp/data")
assert result.contents[0].content == "Data for cursor/fastmcp"
async def test_resource_multiple_mismatched_params(self):
"""Test that mismatched parameters raise an error"""
mcp = FastMCP()
with pytest.raises(
ValueError,
match="Required function arguments .* must be a subset of the URI path parameters",
):
@mcp.resource("resource://{org}/{repo}/data")
def get_data_mismatched(org: str, repo_2: str) -> str:
return f"Data for {org}"
async def test_template_with_varkwargs(self):
"""Test that a template can have **kwargs."""
mcp = FastMCP()
@mcp.resource("test://{x}/{y}/{z}")
def func(**kwargs: int) -> str:
return str(sum(int(v) for v in kwargs.values()))
result = await mcp.read_resource("test://1/2/3")
assert result.contents[0].content == "6"
async def test_template_with_default_params(self):
"""Test that a template can have default parameters."""
mcp = FastMCP()
@mcp.resource("math://add/{x}")
def add(x: int, y: int = 10) -> str:
return str(int(x) + y)
templates = await mcp.list_resource_templates()
assert len(templates) == 1
assert templates[0].uri_template == "math://add/{x}"
result = await mcp.read_resource("math://add/5")
assert result.contents[0].content == "15"
result2 = await mcp.read_resource("math://add/7")
assert result2.contents[0].content == "17"
async def test_template_to_resource_conversion(self):
"""Test that a template can be converted to a resource."""
mcp = FastMCP()
@mcp.resource("resource://{name}/data")
def get_data(name: str) -> str:
return f"Data for {name}"
templates = await mcp.list_resource_templates()
assert len(templates) == 1
assert templates[0].uri_template == "resource://{name}/data"
result = await mcp.read_resource("resource://test/data")
assert result.contents[0].content == "Data for test"
async def test_template_decorator_with_tags(self):
mcp = FastMCP()
@mcp.resource("resource://{param}", tags={"template", "test-tag"})
def template_resource(param: str) -> str:
return f"Template resource: {param}"
templates = await mcp.list_resource_templates()
template = next(t for t in templates if t.uri_template == "resource://{param}")
assert template.tags == {"template", "test-tag"}
async def test_template_decorator_wildcard_param(self):
mcp = FastMCP()
@mcp.resource("resource://{param*}")
def template_resource(param: str) -> str:
return f"Template resource: {param}"
result = await mcp.read_resource("resource://test/data")
assert result.contents[0].content == "Template resource: test/data"
async def test_template_with_query_params(self):
"""Test RFC 6570 query parameters in resource templates."""
mcp = FastMCP()
@mcp.resource("data://{id}{?format,limit}")
def get_data(id: str, format: str = "json", limit: int = 10) -> str:
return f"id={id}, format={format}, limit={limit}"
result = await mcp.read_resource("data://123")
assert result.contents[0].content == "id=123, format=json, limit=10"
result = await mcp.read_resource("data://123?format=xml")
assert result.contents[0].content == "id=123, format=xml, limit=10"
result = await mcp.read_resource("data://123?format=csv&limit=50")
assert result.contents[0].content == "id=123, format=csv, limit=50"
async def test_templates_match_in_order_of_definition(self):
"""If a wildcard template is defined first, it will take priority."""
mcp = FastMCP()
@mcp.resource("resource://{param*}")
def template_resource(param: str) -> str:
return f"Template resource 1: {param}"
@mcp.resource("resource://{x}/{y}")
def template_resource_with_params(x: str, y: str) -> str:
return f"Template resource 2: {x}/{y}"
result = await mcp.read_resource("resource://a/b/c")
assert result.contents[0].content == "Template resource 1: a/b/c"
result = await mcp.read_resource("resource://a/b")
assert result.contents[0].content == "Template resource 1: a/b"
async def test_templates_shadow_each_other_reorder(self):
"""If a wildcard template is defined second, it will *not* take priority."""
mcp = FastMCP()
@mcp.resource("resource://{x}/{y}")
def template_resource_with_params(x: str, y: str) -> str:
return f"Template resource 1: {x}/{y}"
@mcp.resource("resource://{param*}")
def template_resource(param: str) -> str:
return f"Template resource 2: {param}"
result = await mcp.read_resource("resource://a/b/c")
assert result.contents[0].content == "Template resource 2: a/b/c"
result = await mcp.read_resource("resource://a/b")
assert result.contents[0].content == "Template resource 1: a/b"
async def test_resource_template_with_annotations(self):
"""Test that resource template annotations are visible."""
mcp = FastMCP()
@mcp.resource(
"api://users/{user_id}",
annotations={"httpMethod": "GET", "Cache-Control": "no-cache"},
)
def get_user(user_id: str) -> str:
return f"User {user_id} data"
templates = await mcp.list_resource_templates()
assert len(templates) == 1
template = templates[0]
assert template.uri_template == "api://users/{user_id}"
assert template.annotations is not None
assert hasattr(template.annotations, "httpMethod")
assert getattr(template.annotations, "httpMethod") == "GET"
assert hasattr(template.annotations, "Cache-Control")
assert getattr(template.annotations, "Cache-Control") == "no-cache"
class TestResourceTemplateContext:
async def test_resource_template_context(self):
mcp = FastMCP()
@mcp.resource("resource://{param}")
def resource_template(param: str, ctx: Context) -> str:
assert isinstance(ctx, Context)
return f"Resource template: {param} {ctx.request_id}"
async with Client(mcp) as client:
result = await client.read_resource(AnyUrl("resource://test"))
assert isinstance(result[0], TextResourceContents)
assert result[0].text.startswith("Resource template: test 1")
async def test_resource_template_context_with_callable_object(self):
mcp = FastMCP()
class MyResource:
def __call__(self, param: str, ctx: Context) -> str:
return f"Resource template: {param} {ctx.request_id}"
template = ResourceTemplate.from_function(
MyResource(), uri_template="resource://{param}"
)
mcp.add_template(template)
async with Client(mcp) as client:
result = await client.read_resource(AnyUrl("resource://test"))
assert isinstance(result[0], TextResourceContents)
assert result[0].text.startswith("Resource template: test 1")
class TestResourceDecorator:
async def test_no_resources_before_decorator(self):
mcp = FastMCP()
with pytest.raises(NotFoundError, match="Unknown resource"):
await mcp.read_resource("resource://data")
async def test_resource_decorator(self):
mcp = FastMCP()
@mcp.resource("resource://data")
def get_data() -> str:
return "Hello, world!"
result = await mcp.read_resource("resource://data")
assert result.contents[0].content == "Hello, world!"
async def test_resource_decorator_incorrect_usage(self):
mcp = FastMCP()
with pytest.raises(
TypeError, match="The @resource decorator was used incorrectly"
):
@mcp.resource # Missing parentheses #type: ignore
def get_data() -> str:
return "Hello, world!"
async def test_resource_decorator_with_name(self):
mcp = FastMCP()
@mcp.resource("resource://data", name="custom-data")
def get_data() -> str:
return "Hello, world!"
resources = await mcp.list_resources()
assert len(resources) == 1
assert resources[0].name == "custom-data"
result = await mcp.read_resource("resource://data")
assert result.contents[0].content == "Hello, world!"
async def test_resource_decorator_with_description(self):
mcp = FastMCP()
@mcp.resource("resource://data", description="Data resource")
def get_data() -> str:
return "Hello, world!"
resources = await mcp.list_resources()
assert len(resources) == 1
assert resources[0].description == "Data resource"
async def test_resource_decorator_with_tags(self):
"""Test that the resource decorator properly sets tags."""
mcp = FastMCP()
@mcp.resource("resource://data", tags={"example", "test-tag"})
def get_data() -> str:
return "Hello, world!"
resources = await mcp.list_resources()
assert len(resources) == 1
assert resources[0].tags == {"example", "test-tag"}
async def test_resource_decorator_instance_method(self):
mcp = FastMCP()
class MyClass:
def __init__(self, prefix: str):
self.prefix = prefix
def get_data(self) -> str:
return f"{self.prefix} Hello, world!"
obj = MyClass("My prefix:")
mcp.add_resource(
Resource.from_function(
obj.get_data, uri="resource://data", name="instance-resource"
)
)
result = await mcp.read_resource("resource://data")
assert result.contents[0].content == "My prefix: Hello, world!"
async def test_resource_decorator_classmethod(self):
mcp = FastMCP()
class MyClass:
prefix = "Class prefix:"
@classmethod
def get_data(cls) -> str:
return f"{cls.prefix} Hello, world!"
mcp.add_resource(
Resource.from_function(
MyClass.get_data, uri="resource://data", name="class-resource"
)
)
result = await mcp.read_resource("resource://data")
assert result.contents[0].content == "Class prefix: Hello, world!"
async def test_resource_decorator_classmethod_error(self):
mcp = FastMCP()
with pytest.raises(TypeError, match="classmethod"):
class MyClass:
@mcp.resource("resource://data")
@classmethod
def get_data(cls) -> None:
pass
async def test_resource_decorator_staticmethod(self):
mcp = FastMCP()
class MyClass:
@mcp.resource("resource://data")
@staticmethod
def get_data() -> str:
return "Static Hello, world!"
result = await mcp.read_resource("resource://data")
assert result.contents[0].content == "Static Hello, world!"
async def test_resource_decorator_async_function(self):
mcp = FastMCP()
@mcp.resource("resource://data")
async def get_data() -> str:
return "Async Hello, world!"
result = await mcp.read_resource("resource://data")
assert result.contents[0].content == "Async Hello, world!"
async def test_resource_decorator_staticmethod_order(self):
"""Test that both decorator orders work for static methods"""
mcp = FastMCP()
class MyClass:
@mcp.resource("resource://data")
@staticmethod
def get_data() -> str:
return "Static Hello, world!"
result = await mcp.read_resource("resource://data")
assert result.contents[0].content == "Static Hello, world!"
async def test_resource_decorator_with_meta(self):
"""Test that meta parameter is passed through the resource decorator."""
mcp = FastMCP()
meta_data = {"version": "1.0", "author": "test"}
@mcp.resource("resource://data", meta=meta_data)
def get_data() -> str:
return "Hello, world!"
resources = await mcp.list_resources()
resource = next(r for r in resources if str(r.uri) == "resource://data")
assert resource.meta == meta_data
async def test_resource_content_with_meta_in_response(self):
"""Test that ResourceContent meta is passed through."""
mcp = FastMCP()
@mcp.resource("resource://widget")
def get_widget() -> ResourceResult:
return ResourceResult(
[
ResourceContent(
content="<widget>content</widget>",
mime_type="text/html",
meta={"csp": "script-src 'self'", "version": "1.0"},
)
]
)
result = await mcp.read_resource("resource://widget")
assert len(result.contents) == 1
assert result.contents[0].content == "<widget>content</widget>"
assert result.contents[0].mime_type == "text/html"
assert result.contents[0].meta == {"csp": "script-src 'self'", "version": "1.0"}
async def test_resource_content_binary_with_meta(self):
"""Test that ResourceContent with binary content and meta works."""
mcp = FastMCP()
@mcp.resource("resource://binary")
def get_binary() -> ResourceResult:
return ResourceResult(
[
ResourceContent(
content=b"\x00\x01\x02",
meta={"encoding": "raw"},
)
]
)
result = await mcp.read_resource("resource://binary")
assert len(result.contents) == 1
assert result.contents[0].content == b"\x00\x01\x02"
assert result.contents[0].meta == {"encoding": "raw"}
async def test_resource_content_without_meta(self):
"""Test that ResourceContent without meta works (meta is None)."""
mcp = FastMCP()
@mcp.resource("resource://plain")
def get_plain() -> ResourceResult:
return ResourceResult([ResourceContent(content="plain content")])
result = await mcp.read_resource("resource://plain")
assert len(result.contents) == 1
assert result.contents[0].content == "plain content"
assert result.contents[0].meta is None
class TestTemplateDecorator:
async def test_template_decorator(self):
mcp = FastMCP()
@mcp.resource("resource://{name}/data")
def get_data(name: str) -> str:
return f"Data for {name}"
templates = await mcp.list_resource_templates()
assert len(templates) == 1
assert templates[0].name == "get_data"
assert templates[0].uri_template == "resource://{name}/data"
result = await mcp.read_resource("resource://test/data")
assert result.contents[0].content == "Data for test"
async def test_template_decorator_incorrect_usage(self):
mcp = FastMCP()
with pytest.raises(
TypeError, match="The @resource decorator was used incorrectly"
):
@mcp.resource # Missing parentheses #type: ignore
def get_data(name: str) -> str:
return f"Data for {name}"
async def test_template_decorator_with_name(self):
mcp = FastMCP()
@mcp.resource("resource://{name}/data", name="custom-template")
def get_data(name: str) -> str:
return f"Data for {name}"
templates = await mcp.list_resource_templates()
assert len(templates) == 1
assert templates[0].name == "custom-template"
result = await mcp.read_resource("resource://test/data")
assert result.contents[0].content == "Data for test"
async def test_template_decorator_with_description(self):
mcp = FastMCP()
@mcp.resource("resource://{name}/data", description="Template description")
def get_data(name: str) -> str:
return f"Data for {name}"
templates = await mcp.list_resource_templates()
assert len(templates) == 1
assert templates[0].description == "Template description"
async def test_template_decorator_instance_method(self):
mcp = FastMCP()
class MyClass:
def __init__(self, prefix: str):
self.prefix = prefix
def get_data(self, name: str) -> str:
return f"{self.prefix} Data for {name}"
obj = MyClass("My prefix:")
template = ResourceTemplate.from_function(
obj.get_data,
uri_template="resource://{name}/data",
name="instance-template",
)
mcp.add_template(template)
result = await mcp.read_resource("resource://test/data")
assert result.contents[0].content == "My prefix: Data for test"
async def test_template_decorator_classmethod(self):
mcp = FastMCP()
class MyClass:
prefix = "Class prefix:"
@classmethod
def get_data(cls, name: str) -> str:
return f"{cls.prefix} Data for {name}"
template = ResourceTemplate.from_function(
MyClass.get_data,
uri_template="resource://{name}/data",
name="class-template",
)
mcp.add_template(template)
result = await mcp.read_resource("resource://test/data")
assert result.contents[0].content == "Class prefix: Data for test"
async def test_template_decorator_staticmethod(self):
mcp = FastMCP()
class MyClass:
@mcp.resource("resource://{name}/data")
@staticmethod
def get_data(name: str) -> str:
return f"Static Data for {name}"
result = await mcp.read_resource("resource://test/data")
assert result.contents[0].content == "Static Data for test"
async def test_template_decorator_async_function(self):
mcp = FastMCP()
@mcp.resource("resource://{name}/data")
async def get_data(name: str) -> str:
return f"Async Data for {name}"
result = await mcp.read_resource("resource://test/data")
assert result.contents[0].content == "Async Data for test"
async def test_template_decorator_with_tags(self):
"""Test that the template decorator properly sets tags."""
mcp = FastMCP()
@mcp.resource("resource://{param}", tags={"template", "test-tag"})
def template_resource(param: str) -> str:
return f"Template resource: {param}"
templates = await mcp.list_resource_templates()
template = next(t for t in templates if t.uri_template == "resource://{param}")
assert template.tags == {"template", "test-tag"}
async def test_template_decorator_wildcard_param(self):
mcp = FastMCP()
@mcp.resource("resource://{param*}")
def template_resource(param: str) -> str:
return f"Template resource: {param}"
templates = await mcp.list_resource_templates()
template = next(t for t in templates if t.uri_template == "resource://{param*}")
assert template.uri_template == "resource://{param*}"
assert template.name == "template_resource"
async def test_template_decorator_with_meta(self):
"""Test that meta parameter is passed through the template decorator."""
mcp = FastMCP()
meta_data = {"version": "2.0", "template": "test"}
@mcp.resource("resource://{param}/data", meta=meta_data)
def get_template_data(param: str) -> str:
return f"Data for {param}"
templates = await mcp.list_resource_templates()
template = next(
t for t in templates if t.uri_template == "resource://{param}/data"
)
assert template.meta == meta_data
class TestResourceTags:
def create_server(self, include_tags=None, exclude_tags=None):
mcp = FastMCP()
@mcp.resource("resource://1", tags={"a", "b"})
def resource_1() -> str:
return "1"
@mcp.resource("resource://2", tags={"b", "c"})
def resource_2() -> str:
return "2"
if include_tags:
mcp.enable(tags=include_tags, only=True)
if exclude_tags:
mcp.disable(tags=exclude_tags)
return mcp
async def test_include_tags_all_resources(self):
mcp = self.create_server(include_tags={"a", "b"})
resources = await mcp.list_resources()
assert {r.name for r in resources} == {"resource_1", "resource_2"}
async def test_include_tags_some_resources(self):
mcp = self.create_server(include_tags={"a", "z"})
resources = await mcp.list_resources()
assert {r.name for r in resources} == {"resource_1"}
async def test_exclude_tags_all_resources(self):
mcp = self.create_server(exclude_tags={"a", "b"})
resources = await mcp.list_resources()
assert {r.name for r in resources} == set()
async def test_exclude_tags_some_resources(self):
mcp = self.create_server(exclude_tags={"a"})
resources = await mcp.list_resources()
assert {r.name for r in resources} == {"resource_2"}
async def test_exclude_precedence(self):
mcp = self.create_server(exclude_tags={"a"}, include_tags={"b"})
resources = await mcp.list_resources()
assert {r.name for r in resources} == {"resource_2"}
async def test_read_included_resource(self):
mcp = self.create_server(include_tags={"a"})
result = await mcp.read_resource("resource://1")
assert result.contents[0].content == "1"
with pytest.raises(NotFoundError, match="Unknown resource"):
await mcp.read_resource("resource://2")
async def test_read_excluded_resource(self):
mcp = self.create_server(exclude_tags={"a"})
with pytest.raises(NotFoundError, match="Unknown resource"):
await mcp.read_resource("resource://1")
class TestResourceEnabled:
async def test_toggle_enabled(self):
mcp = FastMCP()
@mcp.resource("resource://data")
def sample_resource() -> str:
return "Hello, world!"
resources = await mcp.list_resources()
assert any(str(r.uri) == "resource://data" for r in resources)
mcp.disable(names={"resource://data"}, components={"resource"})
resources = await mcp.list_resources()
assert not any(str(r.uri) == "resource://data" for r in resources)
mcp.enable(names={"resource://data"}, components={"resource"})
resources = await mcp.list_resources()
assert any(str(r.uri) == "resource://data" for r in resources)
async def test_resource_disabled(self):
mcp = FastMCP()
@mcp.resource("resource://data")
def sample_resource() -> str:
return "Hello, world!"
mcp.disable(names={"resource://data"}, components={"resource"})
resources = await mcp.list_resources()
assert len(resources) == 0
with pytest.raises(NotFoundError, match="Unknown resource"):
await mcp.read_resource("resource://data")
async def test_resource_toggle_enabled(self):
mcp = FastMCP()
@mcp.resource("resource://data")
def sample_resource() -> str:
return "Hello, world!"
mcp.disable(names={"resource://data"}, components={"resource"})
resources = await mcp.list_resources()
assert not any(str(r.uri) == "resource://data" for r in resources)
mcp.enable(names={"resource://data"}, components={"resource"})
resources = await mcp.list_resources()
assert len(resources) == 1
async def test_resource_toggle_disabled(self):
mcp = FastMCP()
@mcp.resource("resource://data")
def sample_resource() -> str:
return "Hello, world!"
mcp.disable(names={"resource://data"}, components={"resource"})
resources = await mcp.list_resources()
assert len(resources) == 0
with pytest.raises(NotFoundError, match="Unknown resource"):
await mcp.read_resource("resource://data")
async def test_get_resource_and_disable(self):
mcp = FastMCP()
@mcp.resource("resource://data")
def sample_resource() -> str:
return "Hello, world!"
resource = await mcp.get_resource("resource://data")
assert resource is not None
mcp.disable(names={"resource://data"}, components={"resource"})
resources = await mcp.list_resources()
assert len(resources) == 0
with pytest.raises(NotFoundError, match="Unknown resource"):
await mcp.read_resource("resource://data")
async def test_cant_read_disabled_resource(self):
mcp = FastMCP()
@mcp.resource("resource://data")
def sample_resource() -> str:
return "Hello, world!"
mcp.disable(names={"resource://data"}, components={"resource"})
with pytest.raises(NotFoundError, match="Unknown resource"):
await mcp.read_resource("resource://data")
class TestResourceTemplatesTags:
def create_server(self, include_tags=None, exclude_tags=None):
mcp = FastMCP()
@mcp.resource("resource://1/{param}", tags={"a", "b"})
def template_resource_1(param: str) -> str:
return f"Template resource 1: {param}"
@mcp.resource("resource://2/{param}", tags={"b", "c"})
def template_resource_2(param: str) -> str:
return f"Template resource 2: {param}"
if include_tags:
mcp.enable(tags=include_tags, only=True)
if exclude_tags:
mcp.disable(tags=exclude_tags)
return mcp
async def test_include_tags_all_resources(self):
mcp = self.create_server(include_tags={"a", "b"})
templates = await mcp.list_resource_templates()
assert {t.name for t in templates} == {
"template_resource_1",
"template_resource_2",
}
async def test_include_tags_some_resources(self):
mcp = self.create_server(include_tags={"a"})
templates = await mcp.list_resource_templates()
assert {t.name for t in templates} == {"template_resource_1"}
async def test_exclude_tags_all_resources(self):
mcp = self.create_server(exclude_tags={"a", "b"})
templates = await mcp.list_resource_templates()
assert {t.name for t in templates} == set()
async def test_exclude_tags_some_resources(self):
mcp = self.create_server(exclude_tags={"a"})
templates = await mcp.list_resource_templates()
assert {t.name for t in templates} == {"template_resource_2"}
async def test_exclude_takes_precedence_over_include(self):
mcp = self.create_server(exclude_tags={"a"}, include_tags={"b"})
templates = await mcp.list_resource_templates()
assert {t.name for t in templates} == {"template_resource_2"}
async def test_read_resource_template_includes_tags(self):
mcp = self.create_server(include_tags={"a"})
result = await mcp.read_resource("resource://1/x")
assert result.contents[0].content == "Template resource 1: x"
with pytest.raises(NotFoundError, match="Unknown resource"):
await mcp.read_resource("resource://2/x")
async def test_read_resource_template_excludes_tags(self):
mcp = self.create_server(exclude_tags={"a"})
with pytest.raises(NotFoundError, match="Unknown resource"):
await mcp.read_resource("resource://1/x")
result = await mcp.read_resource("resource://2/x")
assert result.contents[0].content == "Template resource 2: x"
class TestResourceTemplateEnabled:
async def test_toggle_enabled(self):
mcp = FastMCP()
@mcp.resource("resource://{param}")
def sample_template(param: str) -> str:
return f"Template: {param}"
templates = await mcp.list_resource_templates()
assert any(t.uri_template == "resource://{param}" for t in templates)
mcp.disable(names={"resource://{param}"}, components={"template"})
templates = await mcp.list_resource_templates()
assert not any(t.uri_template == "resource://{param}" for t in templates)
mcp.enable(names={"resource://{param}"}, components={"template"})
templates = await mcp.list_resource_templates()
assert any(t.uri_template == "resource://{param}" for t in templates)
async def test_template_disabled(self):
mcp = FastMCP()
@mcp.resource("resource://{param}")
def sample_template(param: str) -> str:
return f"Template: {param}"
mcp.disable(names={"resource://{param}"}, components={"template"})
templates = await mcp.list_resource_templates()
assert len(templates) == 0
with pytest.raises(NotFoundError, match="Unknown resource"):
await mcp.read_resource("resource://test")
async def test_template_toggle_enabled(self):
mcp = FastMCP()
@mcp.resource("resource://{param}")
def sample_template(param: str) -> str:
return f"Template: {param}"
mcp.disable(names={"resource://{param}"}, components={"template"})
templates = await mcp.list_resource_templates()
assert not any(t.uri_template == "resource://{param}" for t in templates)
mcp.enable(names={"resource://{param}"}, components={"template"})
templates = await mcp.list_resource_templates()
assert len(templates) == 1
async def test_template_toggle_disabled(self):
mcp = FastMCP()
@mcp.resource("resource://{param}")
def sample_template(param: str) -> str:
return f"Template: {param}"
mcp.disable(names={"resource://{param}"}, components={"template"})
templates = await mcp.list_resource_templates()
assert len(templates) == 0
with pytest.raises(NotFoundError, match="Unknown resource"):
await mcp.read_resource("resource://test")
async def test_get_template_and_disable(self):
mcp = FastMCP()
@mcp.resource("resource://{param}")
def sample_template(param: str) -> str:
return f"Template: {param}"
template = await mcp.get_resource_template("resource://{param}")
assert template is not None
mcp.disable(names={"resource://{param}"}, components={"template"})
templates = await mcp.list_resource_templates()
assert len(templates) == 0
with pytest.raises(NotFoundError, match="Unknown resource"):
await mcp.read_resource("resource://test")
async def test_cant_read_disabled_template(self):
mcp = FastMCP()
@mcp.resource("resource://{param}")
def sample_template(param: str) -> str:
return f"Template: {param}"
mcp.disable(names={"resource://{param}"}, components={"template"})
with pytest.raises(NotFoundError, match="Unknown resource"):
await mcp.read_resource("resource://test")
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/server/providers/test_local_provider_resources.py",
"license": "Apache License 2.0",
"lines": 725,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:src/fastmcp/server/providers/openapi/components.py | """OpenAPI component classes: Tool, Resource, and ResourceTemplate."""
from __future__ import annotations
import json
import re
import warnings
from collections.abc import Callable
from typing import TYPE_CHECKING, Any
import httpx
from mcp.types import ToolAnnotations
from pydantic.networks import AnyUrl
import fastmcp
from fastmcp.resources import (
Resource,
ResourceContent,
ResourceResult,
ResourceTemplate,
)
from fastmcp.server.dependencies import get_http_headers
from fastmcp.server.tasks.config import TaskConfig
from fastmcp.tools.tool import Tool, ToolResult
from fastmcp.utilities.logging import get_logger
from fastmcp.utilities.openapi import HTTPRoute
from fastmcp.utilities.openapi.director import RequestDirector
if TYPE_CHECKING:
from fastmcp.server import Context
_SAFE_HEADERS = frozenset(
{
"accept",
"accept-encoding",
"accept-language",
"cache-control",
"connection",
"content-length",
"content-type",
"host",
"user-agent",
}
)
def _redact_headers(headers: httpx.Headers) -> dict[str, str]:
return {k: v if k.lower() in _SAFE_HEADERS else "***" for k, v in headers.items()}
__all__ = [
"OpenAPIResource",
"OpenAPIResourceTemplate",
"OpenAPITool",
"_extract_mime_type_from_route",
]
logger = get_logger(__name__)
# Default MIME type when no response content type can be inferred
_DEFAULT_MIME_TYPE = "application/json"
def _extract_mime_type_from_route(route: HTTPRoute) -> str:
"""Extract the primary MIME type from an HTTPRoute's response definitions.
Looks for the first successful response (2xx) and returns its content type.
Prefers JSON-compatible types when multiple are available.
Falls back to "application/json" when no response content type is declared.
"""
if not route.responses:
return _DEFAULT_MIME_TYPE
# Priority order for success status codes
success_codes = ["200", "201", "202", "204"]
response_info = None
for status_code in success_codes:
if status_code in route.responses:
response_info = route.responses[status_code]
break
# If no explicit success codes, try any 2xx response
if response_info is None:
for status_code, resp_info in route.responses.items():
if status_code.startswith("2"):
response_info = resp_info
break
if response_info is None or not response_info.content_schema:
return _DEFAULT_MIME_TYPE
# If there's only one content type, use it directly
content_types = list(response_info.content_schema.keys())
if len(content_types) == 1:
return content_types[0]
# When multiple types exist, prefer JSON-compatible types
json_compatible_types = [
"application/json",
"application/vnd.api+json",
"application/hal+json",
"application/ld+json",
"text/json",
]
for ct in json_compatible_types:
if ct in response_info.content_schema:
return ct
# Fall back to the first available content type
return content_types[0]
def _slugify(text: str) -> str:
"""Convert text to a URL-friendly slug format.
Only contains lowercase letters, uppercase letters, numbers, and underscores.
"""
if not text:
return ""
# Replace spaces and common separators with underscores
slug = re.sub(r"[\s\-\.]+", "_", text)
# Remove non-alphanumeric characters except underscores
slug = re.sub(r"[^a-zA-Z0-9_]", "", slug)
# Remove multiple consecutive underscores
slug = re.sub(r"_+", "_", slug)
# Remove leading/trailing underscores
slug = slug.strip("_")
return slug
class OpenAPITool(Tool):
"""Tool implementation for OpenAPI endpoints."""
task_config: TaskConfig = TaskConfig(mode="forbidden")
def __init__(
self,
client: httpx.AsyncClient,
route: HTTPRoute,
director: RequestDirector,
name: str,
description: str,
parameters: dict[str, Any],
output_schema: dict[str, Any] | None = None,
tags: set[str] | None = None,
annotations: ToolAnnotations | None = None,
serializer: Callable[[Any], str] | None = None, # Deprecated
):
if serializer is not None and fastmcp.settings.deprecation_warnings:
warnings.warn(
"The `serializer` parameter is deprecated. "
"Return ToolResult from your tools for full control over serialization. "
"See https://gofastmcp.com/servers/tools#custom-serialization for migration examples.",
DeprecationWarning,
stacklevel=2,
)
super().__init__(
name=name,
description=description,
parameters=parameters,
output_schema=output_schema,
tags=tags or set(),
annotations=annotations,
serializer=serializer,
)
self._client = client
self._route = route
self._director = director
def __repr__(self) -> str:
return f"OpenAPITool(name={self.name!r}, method={self._route.method}, path={self._route.path})"
async def run(self, arguments: dict[str, Any]) -> ToolResult:
"""Execute the HTTP request using RequestDirector."""
# Build the request β errors here are programming/schema issues,
# not HTTP failures, so we catch them separately.
try:
base_url = str(self._client.base_url) or "http://localhost"
request = self._director.build(self._route, arguments, base_url)
if self._client.headers:
for key, value in self._client.headers.items():
if key not in request.headers:
request.headers[key] = value
mcp_headers = get_http_headers()
if mcp_headers:
for key, value in mcp_headers.items():
if key not in request.headers:
request.headers[key] = value
except Exception as e:
raise ValueError(
f"Error building request for {self._route.method.upper()} "
f"{self._route.path}: {type(e).__name__}: {e}"
) from e
# Send the request and process the response.
try:
logger.debug(
f"run - sending request; headers: {_redact_headers(request.headers)}"
)
response = await self._client.send(request)
response.raise_for_status()
# Try to parse as JSON first
try:
result = response.json()
# Handle structured content based on output schema
if self.output_schema is not None:
if self.output_schema.get("x-fastmcp-wrap-result"):
structured_output = {"result": result}
else:
structured_output = result
elif not isinstance(result, dict):
structured_output = {"result": result}
else:
structured_output = result
# Structured content must be a dict for the MCP protocol.
# Wrap non-dict values that slipped through (e.g. a backend
# returning an array when the schema declared an object).
if not isinstance(structured_output, dict):
structured_output = {"result": structured_output}
return ToolResult(structured_content=structured_output)
except json.JSONDecodeError:
return ToolResult(content=response.text)
except httpx.HTTPStatusError as e:
error_message = (
f"HTTP error {e.response.status_code}: {e.response.reason_phrase}"
)
try:
error_data = e.response.json()
error_message += f" - {error_data}"
except (json.JSONDecodeError, ValueError):
if e.response.text:
error_message += f" - {e.response.text}"
raise ValueError(error_message) from e
except httpx.TimeoutException as e:
raise ValueError(f"HTTP request timed out ({type(e).__name__})") from e
except httpx.RequestError as e:
raise ValueError(f"Request error ({type(e).__name__}): {e!s}") from e
class OpenAPIResource(Resource):
"""Resource implementation for OpenAPI endpoints."""
task_config: TaskConfig = TaskConfig(mode="forbidden")
def __init__(
self,
client: httpx.AsyncClient,
route: HTTPRoute,
director: RequestDirector,
uri: str,
name: str,
description: str,
mime_type: str = "application/json",
tags: set[str] | None = None,
):
super().__init__(
uri=AnyUrl(uri),
name=name,
description=description,
mime_type=mime_type,
tags=tags or set(),
)
self._client = client
self._route = route
self._director = director
def __repr__(self) -> str:
return f"OpenAPIResource(name={self.name!r}, uri={self.uri!r}, path={self._route.path})"
async def read(self) -> ResourceResult:
"""Fetch the resource data by making an HTTP request."""
try:
path = self._route.path
resource_uri = str(self.uri)
# If this is a templated resource, extract path parameters from the URI
if "{" in path and "}" in path:
parts = resource_uri.split("/")
if len(parts) > 1:
path_params = {}
param_matches = re.findall(r"\{([^}]+)\}", path)
if param_matches:
param_matches.sort(reverse=True)
expected_param_count = len(parts) - 1
for i, param_name in enumerate(param_matches):
if i < expected_param_count:
param_value = parts[-1 - i]
path_params[param_name] = param_value
for param_name, param_value in path_params.items():
path = path.replace(f"{{{param_name}}}", str(param_value))
# Build headers with correct precedence
headers: dict[str, str] = {}
if self._client.headers:
headers.update(self._client.headers)
mcp_headers = get_http_headers()
if mcp_headers:
headers.update(mcp_headers)
response = await self._client.request(
method=self._route.method,
url=path,
headers=headers,
)
response.raise_for_status()
content_type = response.headers.get("content-type", "").lower()
if "application/json" in content_type:
result = response.json()
return ResourceResult(
contents=[
ResourceContent(
content=json.dumps(result), mime_type="application/json"
)
]
)
elif any(ct in content_type for ct in ["text/", "application/xml"]):
return ResourceResult(
contents=[
ResourceContent(content=response.text, mime_type=self.mime_type)
]
)
else:
return ResourceResult(
contents=[
ResourceContent(
content=response.content, mime_type=self.mime_type
)
]
)
except httpx.HTTPStatusError as e:
error_message = (
f"HTTP error {e.response.status_code}: {e.response.reason_phrase}"
)
try:
error_data = e.response.json()
error_message += f" - {error_data}"
except (json.JSONDecodeError, ValueError):
if e.response.text:
error_message += f" - {e.response.text}"
raise ValueError(error_message) from e
except httpx.TimeoutException as e:
raise ValueError(f"HTTP request timed out ({type(e).__name__})") from e
except httpx.RequestError as e:
raise ValueError(f"Request error ({type(e).__name__}): {e!s}") from e
class OpenAPIResourceTemplate(ResourceTemplate):
"""Resource template implementation for OpenAPI endpoints."""
task_config: TaskConfig = TaskConfig(mode="forbidden")
def __init__(
self,
client: httpx.AsyncClient,
route: HTTPRoute,
director: RequestDirector,
uri_template: str,
name: str,
description: str,
parameters: dict[str, Any],
tags: set[str] | None = None,
mime_type: str = _DEFAULT_MIME_TYPE,
):
super().__init__(
uri_template=uri_template,
name=name,
description=description,
parameters=parameters,
tags=tags or set(),
mime_type=mime_type,
)
self._client = client
self._route = route
self._director = director
def __repr__(self) -> str:
return f"OpenAPIResourceTemplate(name={self.name!r}, uri_template={self.uri_template!r}, path={self._route.path})"
async def create_resource(
self,
uri: str,
params: dict[str, Any],
context: Context | None = None,
) -> Resource:
"""Create a resource with the given parameters."""
uri_parts = [f"{key}={value}" for key, value in params.items()]
return OpenAPIResource(
client=self._client,
route=self._route,
director=self._director,
uri=uri,
name=f"{self.name}-{'-'.join(uri_parts)}",
description=self.description or f"Resource for {self._route.path}",
mime_type=self.mime_type,
tags=set(self._route.tags or []),
)
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "src/fastmcp/server/providers/openapi/components.py",
"license": "Apache License 2.0",
"lines": 350,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/fastmcp:src/fastmcp/server/providers/openapi/provider.py | """OpenAPIProvider for creating MCP components from OpenAPI specifications."""
from __future__ import annotations
from collections import Counter
from collections.abc import AsyncIterator, Sequence
from contextlib import asynccontextmanager
from typing import Any, Literal, cast
import httpx
from jsonschema_path import SchemaPath
from fastmcp.prompts import Prompt
from fastmcp.resources import Resource, ResourceTemplate
from fastmcp.server.providers.base import Provider
from fastmcp.server.providers.openapi.components import (
OpenAPIResource,
OpenAPIResourceTemplate,
OpenAPITool,
_extract_mime_type_from_route,
_slugify,
)
from fastmcp.server.providers.openapi.routing import (
DEFAULT_ROUTE_MAPPINGS,
ComponentFn,
MCPType,
RouteMap,
RouteMapFn,
_determine_route_type,
)
from fastmcp.tools.tool import Tool
from fastmcp.utilities.components import FastMCPComponent
from fastmcp.utilities.logging import get_logger
from fastmcp.utilities.openapi import (
HTTPRoute,
extract_output_schema_from_responses,
parse_openapi_to_http_routes,
)
from fastmcp.utilities.openapi.director import RequestDirector
from fastmcp.utilities.versions import VersionSpec, version_sort_key
__all__ = [
"OpenAPIProvider",
]
logger = get_logger(__name__)
DEFAULT_TIMEOUT: float = 30.0
class OpenAPIProvider(Provider):
"""Provider that creates MCP components from an OpenAPI specification.
Components are created eagerly during initialization by parsing the OpenAPI
spec. Each component makes HTTP calls to the described API endpoints.
Example:
```python
from fastmcp import FastMCP
from fastmcp.server.providers.openapi import OpenAPIProvider
import httpx
client = httpx.AsyncClient(base_url="https://api.example.com")
provider = OpenAPIProvider(openapi_spec=spec, client=client)
mcp = FastMCP("API Server")
mcp.add_provider(provider)
```
"""
def __init__(
self,
openapi_spec: dict[str, Any],
client: httpx.AsyncClient | None = None,
*,
route_maps: list[RouteMap] | None = None,
route_map_fn: RouteMapFn | None = None,
mcp_component_fn: ComponentFn | None = None,
mcp_names: dict[str, str] | None = None,
tags: set[str] | None = None,
validate_output: bool = True,
):
"""Initialize provider by parsing OpenAPI spec and creating components.
Args:
openapi_spec: OpenAPI schema as a dictionary
client: Optional httpx AsyncClient for making HTTP requests.
If not provided, a default client is created using the first
server URL from the OpenAPI spec with a 30-second timeout.
To customize timeout or other settings, pass your own client.
route_maps: Optional list of RouteMap objects defining route mappings
route_map_fn: Optional callable for advanced route type mapping
mcp_component_fn: Optional callable for component customization
mcp_names: Optional dictionary mapping operationId to component names
tags: Optional set of tags to add to all components
validate_output: If True (default), tools use the output schema
extracted from the OpenAPI spec for response validation. If
False, a permissive schema is used instead, allowing any
response structure while still returning structured JSON.
"""
super().__init__()
self._owns_client = client is None
if client is None:
client = self._create_default_client(openapi_spec)
self._client = client
self._mcp_component_fn = mcp_component_fn
self._validate_output = validate_output
# Keep track of names to detect collisions
self._used_names: dict[str, Counter[str]] = {
"tool": Counter(),
"resource": Counter(),
"resource_template": Counter(),
"prompt": Counter(),
}
# Pre-created component storage
self._tools: dict[str, OpenAPITool] = {}
self._resources: dict[str, OpenAPIResource] = {}
self._templates: dict[str, OpenAPIResourceTemplate] = {}
# Create openapi-core Spec and RequestDirector
try:
self._spec = SchemaPath.from_dict(cast(Any, openapi_spec))
self._director = RequestDirector(self._spec)
except Exception as e:
logger.exception("Failed to initialize RequestDirector")
raise ValueError(f"Invalid OpenAPI specification: {e}") from e
http_routes = parse_openapi_to_http_routes(openapi_spec)
# Process routes
route_maps = (route_maps or []) + DEFAULT_ROUTE_MAPPINGS
for route in http_routes:
route_map = _determine_route_type(route, route_maps)
route_type = route_map.mcp_type
if route_map_fn is not None:
try:
result = route_map_fn(route, route_type)
if result is not None:
route_type = result
logger.debug(
f"Route {route.method} {route.path} mapping customized: "
f"type={route_type.name}"
)
except Exception as e:
logger.warning(
f"Error in route_map_fn for {route.method} {route.path}: {e}. "
f"Using default values."
)
component_name = self._generate_default_name(route, mcp_names)
route_tags = set(route.tags) | route_map.mcp_tags | (tags or set())
if route_type == MCPType.TOOL:
self._create_openapi_tool(route, component_name, tags=route_tags)
elif route_type == MCPType.RESOURCE:
self._create_openapi_resource(route, component_name, tags=route_tags)
elif route_type == MCPType.RESOURCE_TEMPLATE:
self._create_openapi_template(route, component_name, tags=route_tags)
elif route_type == MCPType.EXCLUDE:
logger.debug(f"Excluding route: {route.method} {route.path}")
logger.debug(f"Created OpenAPIProvider with {len(http_routes)} routes")
@classmethod
def _create_default_client(cls, openapi_spec: dict[str, Any]) -> httpx.AsyncClient:
"""Create a default httpx client from the OpenAPI spec's server URL."""
servers = openapi_spec.get("servers", [])
if not servers or not servers[0].get("url"):
raise ValueError(
"No server URL found in OpenAPI spec. Either add a 'servers' "
"entry to the spec or provide an httpx.AsyncClient explicitly."
)
base_url = servers[0]["url"]
return httpx.AsyncClient(base_url=base_url, timeout=DEFAULT_TIMEOUT)
@asynccontextmanager
async def lifespan(self) -> AsyncIterator[None]:
"""Manage the lifecycle of the auto-created httpx client."""
if self._owns_client:
async with self._client:
yield
else:
yield
def _generate_default_name(
self, route: HTTPRoute, mcp_names_map: dict[str, str] | None = None
) -> str:
"""Generate a default name from the route."""
mcp_names_map = mcp_names_map or {}
if route.operation_id:
if route.operation_id in mcp_names_map:
name = mcp_names_map[route.operation_id]
else:
name = route.operation_id.split("__")[0]
else:
name = route.summary or f"{route.method}_{route.path}"
name = _slugify(name)
if len(name) > 56:
name = name[:56]
return name
def _get_unique_name(
self,
name: str,
component_type: Literal["tool", "resource", "resource_template", "prompt"],
) -> str:
"""Ensure the name is unique by appending numbers if needed."""
self._used_names[component_type][name] += 1
if self._used_names[component_type][name] == 1:
return name
new_name = f"{name}_{self._used_names[component_type][name]}"
logger.debug(
f"Name collision: '{name}' exists as {component_type}. Using '{new_name}'."
)
return new_name
def _create_openapi_tool(
self,
route: HTTPRoute,
name: str,
tags: set[str],
) -> None:
"""Create and register an OpenAPITool."""
combined_schema = route.flat_param_schema
output_schema = extract_output_schema_from_responses(
route.responses,
route.response_schemas,
route.openapi_version,
)
if not self._validate_output and output_schema is not None:
# Use a permissive schema that accepts any object, preserving
# the wrap-result flag so non-object responses still get wrapped
permissive: dict[str, Any] = {
"type": "object",
"additionalProperties": True,
}
if output_schema.get("x-fastmcp-wrap-result"):
permissive["x-fastmcp-wrap-result"] = True
output_schema = permissive
tool_name = self._get_unique_name(name, "tool")
base_description = (
route.description
or route.summary
or f"Executes {route.method} {route.path}"
)
tool = OpenAPITool(
client=self._client,
route=route,
director=self._director,
name=tool_name,
description=base_description,
parameters=combined_schema,
output_schema=output_schema,
tags=set(route.tags or []) | tags,
)
if self._mcp_component_fn is not None:
try:
self._mcp_component_fn(route, tool)
logger.debug(f"Tool {tool_name} customized by component_fn")
except Exception as e:
logger.warning(f"Error in component_fn for tool {tool_name}: {e}")
self._tools[tool.name] = tool
def _create_openapi_resource(
self,
route: HTTPRoute,
name: str,
tags: set[str],
) -> None:
"""Create and register an OpenAPIResource."""
resource_name = self._get_unique_name(name, "resource")
resource_uri = f"resource://{resource_name}"
base_description = (
route.description or route.summary or f"Represents {route.path}"
)
resource = OpenAPIResource(
client=self._client,
route=route,
director=self._director,
uri=resource_uri,
name=resource_name,
description=base_description,
mime_type=_extract_mime_type_from_route(route),
tags=set(route.tags or []) | tags,
)
if self._mcp_component_fn is not None:
try:
self._mcp_component_fn(route, resource)
logger.debug(f"Resource {resource_uri} customized by component_fn")
except Exception as e:
logger.warning(
f"Error in component_fn for resource {resource_uri}: {e}"
)
self._resources[str(resource.uri)] = resource
def _create_openapi_template(
self,
route: HTTPRoute,
name: str,
tags: set[str],
) -> None:
"""Create and register an OpenAPIResourceTemplate."""
template_name = self._get_unique_name(name, "resource_template")
path_params = sorted(p.name for p in route.parameters if p.location == "path")
uri_template_str = f"resource://{template_name}"
if path_params:
uri_template_str += "/" + "/".join(f"{{{p}}}" for p in path_params)
base_description = (
route.description or route.summary or f"Template for {route.path}"
)
template_params_schema = {
"type": "object",
"properties": {
p.name: {
**(p.schema_.copy() if isinstance(p.schema_, dict) else {}),
**(
{"description": p.description}
if p.description
and not (
isinstance(p.schema_, dict) and "description" in p.schema_
)
else {}
),
}
for p in route.parameters
if p.location == "path"
},
"required": [
p.name for p in route.parameters if p.location == "path" and p.required
],
}
template = OpenAPIResourceTemplate(
client=self._client,
route=route,
director=self._director,
uri_template=uri_template_str,
name=template_name,
description=base_description,
parameters=template_params_schema,
tags=set(route.tags or []) | tags,
mime_type=_extract_mime_type_from_route(route),
)
if self._mcp_component_fn is not None:
try:
self._mcp_component_fn(route, template)
logger.debug(f"Template {uri_template_str} customized by component_fn")
except Exception as e:
logger.warning(
f"Error in component_fn for template {uri_template_str}: {e}"
)
self._templates[template.uri_template] = template
# -------------------------------------------------------------------------
# Provider interface
# -------------------------------------------------------------------------
async def _list_tools(self) -> Sequence[Tool]:
"""Return all tools created from the OpenAPI spec."""
return list(self._tools.values())
async def _get_tool(
self, name: str, version: VersionSpec | None = None
) -> Tool | None:
"""Get a tool by name."""
tool = self._tools.get(name)
if tool is None:
return None
if version is not None and not version.matches(tool.version):
return None
return tool
async def _list_resources(self) -> Sequence[Resource]:
"""Return all resources created from the OpenAPI spec."""
return list(self._resources.values())
async def _get_resource(
self, uri: str, version: VersionSpec | None = None
) -> Resource | None:
"""Get a resource by URI."""
resource = self._resources.get(uri)
if resource is None:
return None
if version is not None and not version.matches(resource.version):
return None
return resource
async def _list_resource_templates(self) -> Sequence[ResourceTemplate]:
"""Return all resource templates created from the OpenAPI spec."""
return list(self._templates.values())
async def _get_resource_template(
self, uri: str, version: VersionSpec | None = None
) -> ResourceTemplate | None:
"""Get a resource template that matches the given URI."""
matching = [t for t in self._templates.values() if t.matches(uri) is not None]
if not matching:
return None
if version is not None:
matching = [t for t in matching if version.matches(t.version)]
if not matching:
return None
return max(matching, key=version_sort_key) # type: ignore[type-var]
async def _list_prompts(self) -> Sequence[Prompt]:
"""Return empty list - OpenAPI doesn't create prompts."""
return []
async def get_tasks(self) -> Sequence[FastMCPComponent]:
"""Return empty list - OpenAPI components don't support tasks."""
return []
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "src/fastmcp/server/providers/openapi/provider.py",
"license": "Apache License 2.0",
"lines": 373,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/fastmcp:src/fastmcp/server/providers/openapi/routing.py | """Route mapping logic for OpenAPI operations."""
from __future__ import annotations
import enum
import re
from collections.abc import Callable
from dataclasses import dataclass, field
from re import Pattern
from typing import TYPE_CHECKING, Literal
if TYPE_CHECKING:
from fastmcp.server.providers.openapi.components import (
OpenAPIResource,
OpenAPIResourceTemplate,
OpenAPITool,
)
from fastmcp.utilities.logging import get_logger
from fastmcp.utilities.openapi import HttpMethod, HTTPRoute
__all__ = [
"ComponentFn",
"MCPType",
"RouteMap",
"RouteMapFn",
]
logger = get_logger(__name__)
# Type definitions for the mapping functions
RouteMapFn = Callable[[HTTPRoute, "MCPType"], "MCPType | None"]
ComponentFn = Callable[
[
HTTPRoute,
"OpenAPITool | OpenAPIResource | OpenAPIResourceTemplate",
],
None,
]
class MCPType(enum.Enum):
"""Type of FastMCP component to create from a route.
Enum values:
TOOL: Convert the route to a callable Tool
RESOURCE: Convert the route to a Resource (typically GET endpoints)
RESOURCE_TEMPLATE: Convert the route to a ResourceTemplate (typically GET with path params)
EXCLUDE: Exclude the route from being converted to any MCP component
"""
TOOL = "TOOL"
RESOURCE = "RESOURCE"
RESOURCE_TEMPLATE = "RESOURCE_TEMPLATE"
EXCLUDE = "EXCLUDE"
@dataclass(kw_only=True)
class RouteMap:
"""Mapping configuration for HTTP routes to FastMCP component types."""
methods: list[HttpMethod] | Literal["*"] = field(default="*")
pattern: Pattern[str] | str = field(default=r".*")
tags: set[str] = field(
default_factory=set,
metadata={"description": "A set of tags to match. All tags must match."},
)
mcp_type: MCPType = field(
metadata={"description": "The type of FastMCP component to create."},
)
mcp_tags: set[str] = field(
default_factory=set,
metadata={
"description": "A set of tags to apply to the generated FastMCP component."
},
)
# Default route mapping: all routes become tools.
DEFAULT_ROUTE_MAPPINGS = [
RouteMap(mcp_type=MCPType.TOOL),
]
def _determine_route_type(
route: HTTPRoute,
mappings: list[RouteMap],
) -> RouteMap:
"""Determine the FastMCP component type based on the route and mappings."""
for route_map in mappings:
if route_map.methods == "*" or route.method in route_map.methods:
if isinstance(route_map.pattern, Pattern):
pattern_matches = route_map.pattern.search(route.path)
else:
pattern_matches = re.search(route_map.pattern, route.path)
if pattern_matches:
if route_map.tags:
route_tags_set = set(route.tags or [])
if not route_map.tags.issubset(route_tags_set):
continue
logger.debug(
f"Route {route.method} {route.path} mapped to {route_map.mcp_type.name}"
)
return route_map
return RouteMap(mcp_type=MCPType.TOOL)
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "src/fastmcp/server/providers/openapi/routing.py",
"license": "Apache License 2.0",
"lines": 87,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
PrefectHQ/fastmcp:tests/deprecated/openapi/test_openapi.py | """Tests for deprecated OpenAPI imports.
These tests verify that the old import paths still work and emit
deprecation warnings, ensuring backwards compatibility.
"""
import warnings
import httpx
class TestDeprecatedServerOpenAPIImports:
"""Test deprecated imports from fastmcp.server.openapi."""
def test_import_fastmcp_openapi_emits_warning(self):
"""Importing from fastmcp.server.openapi should emit deprecation warning."""
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
# Force reimport
import importlib
import fastmcp.server.openapi
importlib.reload(fastmcp.server.openapi)
deprecation_warnings = [
x for x in w if issubclass(x.category, DeprecationWarning)
]
assert len(deprecation_warnings) >= 1
assert "providers.openapi" in str(deprecation_warnings[0].message)
def test_import_routing_emits_warning(self):
"""Importing from fastmcp.server.openapi.routing should emit deprecation warning."""
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
import importlib
import fastmcp.server.openapi.routing
importlib.reload(fastmcp.server.openapi.routing)
deprecation_warnings = [
x for x in w if issubclass(x.category, DeprecationWarning)
]
assert len(deprecation_warnings) >= 1
assert "providers.openapi" in str(deprecation_warnings[0].message)
def test_fastmcp_openapi_class_emits_warning(self):
"""Using FastMCPOpenAPI should emit deprecation warning."""
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
from fastmcp.server.openapi.server import FastMCPOpenAPI
spec = {
"openapi": "3.0.0",
"info": {"title": "Test", "version": "1.0.0"},
"paths": {},
}
client = httpx.AsyncClient(base_url="https://example.com")
FastMCPOpenAPI(openapi_spec=spec, client=client)
deprecation_warnings = [
x for x in w if issubclass(x.category, DeprecationWarning)
]
assert len(deprecation_warnings) >= 1
assert "FastMCPOpenAPI" in str(deprecation_warnings[-1].message)
def test_deprecated_imports_still_work(self):
"""All expected symbols should be importable from deprecated locations."""
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
from fastmcp.server.openapi import (
FastMCPOpenAPI,
MCPType,
OpenAPIProvider,
RouteMap,
)
# Verify they're the right types
assert FastMCPOpenAPI is not None
assert OpenAPIProvider is not None
assert MCPType.TOOL.value == "TOOL"
assert RouteMap is not None
def test_deprecated_routing_imports_still_work(self):
"""Routing symbols should be importable from deprecated location."""
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
from fastmcp.server.openapi.routing import (
DEFAULT_ROUTE_MAPPINGS,
MCPType,
_determine_route_type,
)
assert DEFAULT_ROUTE_MAPPINGS is not None
assert len(DEFAULT_ROUTE_MAPPINGS) > 0
assert MCPType.TOOL.value == "TOOL"
assert _determine_route_type is not None
class TestDeprecatedExperimentalOpenAPIImports:
"""Test deprecated imports from fastmcp.experimental.server.openapi."""
def test_experimental_import_emits_warning(self):
"""Importing from experimental should emit deprecation warning."""
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
import importlib
import fastmcp.experimental.server.openapi
importlib.reload(fastmcp.experimental.server.openapi)
deprecation_warnings = [
x for x in w if issubclass(x.category, DeprecationWarning)
]
assert len(deprecation_warnings) >= 1
assert "providers.openapi" in str(deprecation_warnings[0].message)
def test_experimental_imports_still_work(self):
"""All expected symbols should be importable from experimental."""
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
from fastmcp.experimental.server.openapi import (
DEFAULT_ROUTE_MAPPINGS,
FastMCPOpenAPI,
MCPType,
)
assert FastMCPOpenAPI is not None
assert DEFAULT_ROUTE_MAPPINGS is not None
assert MCPType.TOOL.value == "TOOL"
class TestDeprecatedComponentsImports:
"""Test deprecated imports from fastmcp.server.openapi.components."""
def test_components_import_emits_warning(self):
"""Importing from components should emit deprecation warning."""
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
import importlib
import fastmcp.server.openapi.components
importlib.reload(fastmcp.server.openapi.components)
deprecation_warnings = [
x for x in w if issubclass(x.category, DeprecationWarning)
]
assert len(deprecation_warnings) >= 1
assert "providers.openapi" in str(deprecation_warnings[0].message)
def test_components_imports_still_work(self):
"""Component classes should be importable from deprecated location."""
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
from fastmcp.server.openapi.components import (
OpenAPIResource,
OpenAPIResourceTemplate,
OpenAPITool,
)
assert OpenAPITool is not None
assert OpenAPIResource is not None
assert OpenAPIResourceTemplate is not None
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/deprecated/openapi/test_openapi.py",
"license": "Apache License 2.0",
"lines": 130,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:tests/server/providers/openapi/test_end_to_end_compatibility.py | """End-to-end tests for OpenAPIProvider implementation."""
import httpx
import pytest
from fastmcp import FastMCP
from fastmcp.client import Client
from fastmcp.server.providers.openapi import OpenAPIProvider
def create_openapi_server(
openapi_spec: dict,
client,
name: str = "OpenAPI Server",
) -> FastMCP:
"""Helper to create a FastMCP server with OpenAPIProvider."""
provider = OpenAPIProvider(openapi_spec=openapi_spec, client=client)
mcp = FastMCP(name)
mcp.add_provider(provider)
return mcp
class TestEndToEndFunctionality:
"""Test end-to-end functionality of OpenAPIProvider."""
@pytest.fixture
def simple_spec(self):
"""Simple OpenAPI spec for testing."""
return {
"openapi": "3.0.0",
"info": {"title": "Test API", "version": "1.0.0"},
"paths": {
"/users/{id}": {
"get": {
"operationId": "get_user",
"summary": "Get user by ID",
"parameters": [
{
"name": "id",
"in": "path",
"required": True,
"schema": {"type": "integer"},
},
{
"name": "include_details",
"in": "query",
"required": False,
"schema": {"type": "boolean"},
},
],
"responses": {"200": {"description": "User found"}},
}
}
},
}
@pytest.fixture
def collision_spec(self):
"""OpenAPI spec with parameter collisions."""
return {
"openapi": "3.0.0",
"info": {"title": "Collision API", "version": "1.0.0"},
"paths": {
"/users/{id}": {
"put": {
"operationId": "update_user",
"summary": "Update user",
"parameters": [
{
"name": "id",
"in": "path",
"required": True,
"schema": {"type": "integer"},
}
],
"requestBody": {
"required": True,
"content": {
"application/json": {
"schema": {
"type": "object",
"properties": {
"id": {"type": "integer"},
"name": {"type": "string"},
},
"required": ["name"],
}
}
},
},
"responses": {"200": {"description": "User updated"}},
}
}
},
}
async def test_tool_schema_generation(self, simple_spec):
"""Test that tools have correct input schemas."""
async with httpx.AsyncClient(base_url="https://api.example.com") as client:
server = create_openapi_server(
openapi_spec=simple_spec,
client=client,
name="Test Server",
)
async with Client(server) as mcp_client:
tools = await mcp_client.list_tools()
# Should have one tool
assert len(tools) == 1
tool = tools[0]
assert tool.name == "get_user"
assert tool.description
# Check schema structure
schema = tool.inputSchema
assert schema["type"] == "object"
properties = schema.get("properties", {})
assert "id" in properties
assert "include_details" in properties
# Required fields should include path parameter
required = schema.get("required", [])
assert "id" in required
async def test_collision_handling(self, collision_spec):
"""Test that parameter collision handling works correctly."""
async with httpx.AsyncClient(base_url="https://api.example.com") as client:
server = create_openapi_server(
openapi_spec=collision_spec,
client=client,
name="Collision Test Server",
)
async with Client(server) as mcp_client:
tools = await mcp_client.list_tools()
# Should have one tool
assert len(tools) == 1
tool = tools[0]
schema = tool.inputSchema
# Both should have collision-resolved parameters
properties = schema.get("properties", {})
# Should have: id__path (path param), id (body param), name (body param)
expected_props = {"id__path", "id", "name"}
assert set(properties.keys()) == expected_props
# Required should include path param and required body params
required = set(schema.get("required", []))
assert "id__path" in required
assert "name" in required
# Path parameter should have integer type
assert properties["id__path"]["type"] == "integer"
# Body parameters should match
assert properties["id"]["type"] == "integer"
assert properties["name"]["type"] == "string"
async def test_tool_execution_parameter_mapping(self, collision_spec):
"""Test that tool execution with collisions works correctly."""
async with httpx.AsyncClient(base_url="https://api.example.com") as client:
server = create_openapi_server(
openapi_spec=collision_spec,
client=client,
name="Test Server",
)
# Test arguments that should work with collision resolution
test_args = {
"id__path": 123, # Path parameter (suffixed)
"id": 456, # Body parameter (not suffixed)
"name": "John Doe", # Body parameter
}
async with Client(server) as mcp_client:
tools = await mcp_client.list_tools()
tool_name = tools[0].name
# Should fail at HTTP level (not argument validation)
# since we don't have an actual server
with pytest.raises(Exception) as exc_info:
await mcp_client.call_tool(tool_name, test_args)
# Should fail at HTTP level, not schema validation
error_msg = str(exc_info.value).lower()
assert "schema" not in error_msg
assert "validation" not in error_msg
async def test_optional_parameter_handling(self, simple_spec):
"""Test that optional parameters are handled correctly."""
async with httpx.AsyncClient(base_url="https://api.example.com") as client:
server = create_openapi_server(
openapi_spec=simple_spec,
client=client,
name="Test Server",
)
# Test with optional parameter omitted
test_args_minimal = {"id": 123}
# Test with optional parameter included
test_args_full = {"id": 123, "include_details": True}
async with Client(server) as mcp_client:
tools = await mcp_client.list_tools()
tool_name = tools[0].name
# Both should fail at HTTP level (not argument validation)
for test_args in [test_args_minimal, test_args_full]:
with pytest.raises(Exception) as exc_info:
await mcp_client.call_tool(tool_name, test_args)
error_msg = str(exc_info.value).lower()
assert "schema" not in error_msg
assert "validation" not in error_msg
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/server/providers/openapi/test_end_to_end_compatibility.py",
"license": "Apache License 2.0",
"lines": 186,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:src/fastmcp/server/providers/proxy.py | """ProxyProvider for proxying to remote MCP servers.
This module provides the `ProxyProvider` class that proxies components from
a remote MCP server via a client factory. It also provides proxy component
classes that forward execution to remote servers.
"""
from __future__ import annotations
import base64
import inspect
from collections.abc import Awaitable, Callable, Sequence
from typing import TYPE_CHECKING, Any, cast
from urllib.parse import quote
import mcp.types
from mcp import ServerSession
from mcp.client.session import ClientSession
from mcp.server.lowlevel.server import request_ctx
from mcp.shared.context import LifespanContextT, RequestContext
from mcp.shared.exceptions import McpError
from mcp.types import (
METHOD_NOT_FOUND,
BlobResourceContents,
ElicitRequestFormParams,
TextResourceContents,
)
from pydantic.networks import AnyUrl
from fastmcp.client.client import Client, FastMCP1Server
from fastmcp.client.elicitation import ElicitResult
from fastmcp.client.logging import LogMessage
from fastmcp.client.roots import RootsList
from fastmcp.client.telemetry import client_span
from fastmcp.client.transports import ClientTransportT
from fastmcp.exceptions import ResourceError, ToolError
from fastmcp.mcp_config import MCPConfig
from fastmcp.prompts import Message, Prompt, PromptResult
from fastmcp.prompts.prompt import PromptArgument
from fastmcp.resources import Resource, ResourceTemplate
from fastmcp.resources.resource import ResourceContent, ResourceResult
from fastmcp.server.context import Context
from fastmcp.server.dependencies import get_context
from fastmcp.server.providers.base import Provider
from fastmcp.server.server import FastMCP
from fastmcp.server.tasks.config import TaskConfig
from fastmcp.tools.tool import Tool, ToolResult
from fastmcp.utilities.components import FastMCPComponent, get_fastmcp_metadata
from fastmcp.utilities.logging import get_logger
if TYPE_CHECKING:
from pathlib import Path
from fastmcp.client.transports import ClientTransport
logger = get_logger(__name__)
# Type alias for client factory functions
ClientFactoryT = Callable[[], Client] | Callable[[], Awaitable[Client]]
# -----------------------------------------------------------------------------
# Proxy Component Classes
# -----------------------------------------------------------------------------
class ProxyTool(Tool):
"""A Tool that represents and executes a tool on a remote server."""
task_config: TaskConfig = TaskConfig(mode="forbidden")
_backend_name: str | None = None
def __init__(self, client_factory: ClientFactoryT, **kwargs: Any):
super().__init__(**kwargs)
self._client_factory = client_factory
async def _get_client(self) -> Client:
"""Gets a client instance by calling the sync or async factory."""
client = self._client_factory()
if inspect.isawaitable(client):
client = cast(Client, await client)
return client
def model_copy(self, **kwargs: Any) -> ProxyTool:
"""Override to preserve _backend_name when name changes."""
update = kwargs.get("update", {})
if "name" in update and self._backend_name is None:
# First time name is being changed, preserve original for backend calls
update = {**update, "_backend_name": self.name}
kwargs["update"] = update
return super().model_copy(**kwargs)
@classmethod
def from_mcp_tool(
cls, client_factory: ClientFactoryT, mcp_tool: mcp.types.Tool
) -> ProxyTool:
"""Factory method to create a ProxyTool from a raw MCP tool schema."""
return cls(
client_factory=client_factory,
name=mcp_tool.name,
title=mcp_tool.title,
description=mcp_tool.description,
parameters=mcp_tool.inputSchema,
annotations=mcp_tool.annotations,
output_schema=mcp_tool.outputSchema,
icons=mcp_tool.icons,
meta=mcp_tool.meta,
tags=get_fastmcp_metadata(mcp_tool.meta).get("tags", []),
)
async def run(
self,
arguments: dict[str, Any],
context: Context | None = None,
) -> ToolResult:
"""Executes the tool by making a call through the client."""
backend_name = self._backend_name or self.name
with client_span(
f"tools/call {backend_name}", "tools/call", backend_name
) as span:
span.set_attribute("fastmcp.provider.type", "ProxyProvider")
client = await self._get_client()
async with client:
ctx = context or get_context()
# StatefulProxyClient reuses sessions across requests, so
# its receive-loop task has stale ContextVars from the first
# request. Stash the current RequestContext in the shared
# ref so handlers can restore it before forwarding.
if isinstance(client, StatefulProxyClient):
cast(list[Any], client._proxy_rc_ref)[0] = (
ctx.request_context,
ctx._fastmcp, # weakref to FastMCP, not the Context
)
# Build meta dict from request context
meta: dict[str, Any] | None = None
if hasattr(ctx, "request_context"):
req_ctx = ctx.request_context
# Start with existing meta if present
if hasattr(req_ctx, "meta") and req_ctx.meta:
meta = dict(req_ctx.meta)
# Add task metadata if this is a task request
if (
hasattr(req_ctx, "experimental")
and hasattr(req_ctx.experimental, "is_task")
and req_ctx.experimental.is_task
):
task_metadata = req_ctx.experimental.task_metadata
if task_metadata:
meta = meta or {}
meta["modelcontextprotocol.io/task"] = (
task_metadata.model_dump(exclude_none=True)
)
result = await client.call_tool_mcp(
name=backend_name, arguments=arguments, meta=meta
)
if result.isError:
raise ToolError(cast(mcp.types.TextContent, result.content[0]).text)
# Preserve backend's meta (includes task metadata for background tasks)
return ToolResult(
content=result.content,
structured_content=result.structuredContent,
meta=result.meta,
)
def get_span_attributes(self) -> dict[str, Any]:
return super().get_span_attributes() | {
"fastmcp.provider.type": "ProxyProvider",
"fastmcp.proxy.backend_name": self._backend_name,
}
class ProxyResource(Resource):
"""A Resource that represents and reads a resource from a remote server."""
task_config: TaskConfig = TaskConfig(mode="forbidden")
_cached_content: ResourceResult | None = None
_backend_uri: str | None = None
def __init__(
self,
client_factory: ClientFactoryT,
*,
_cached_content: ResourceResult | None = None,
**kwargs,
):
super().__init__(**kwargs)
self._client_factory = client_factory
self._cached_content = _cached_content
async def _get_client(self) -> Client:
"""Gets a client instance by calling the sync or async factory."""
client = self._client_factory()
if inspect.isawaitable(client):
client = cast(Client, await client)
return client
def model_copy(self, **kwargs: Any) -> ProxyResource:
"""Override to preserve _backend_uri when uri changes."""
update = kwargs.get("update", {})
if "uri" in update and self._backend_uri is None:
# First time uri is being changed, preserve original for backend calls
update = {**update, "_backend_uri": str(self.uri)}
kwargs["update"] = update
return super().model_copy(**kwargs)
@classmethod
def from_mcp_resource(
cls,
client_factory: ClientFactoryT,
mcp_resource: mcp.types.Resource,
) -> ProxyResource:
"""Factory method to create a ProxyResource from a raw MCP resource schema."""
return cls(
client_factory=client_factory,
uri=mcp_resource.uri,
name=mcp_resource.name,
title=mcp_resource.title,
description=mcp_resource.description,
mime_type=mcp_resource.mimeType or "text/plain",
icons=mcp_resource.icons,
meta=mcp_resource.meta,
tags=get_fastmcp_metadata(mcp_resource.meta).get("tags", []),
task_config=TaskConfig(mode="forbidden"),
)
async def read(self) -> ResourceResult:
"""Read the resource content from the remote server."""
if self._cached_content is not None:
return self._cached_content
backend_uri = self._backend_uri or str(self.uri)
with client_span(
f"resources/read {backend_uri}",
"resources/read",
backend_uri,
resource_uri=backend_uri,
) as span:
span.set_attribute("fastmcp.provider.type", "ProxyProvider")
client = await self._get_client()
async with client:
result = await client.read_resource(backend_uri)
if not result:
raise ResourceError(
f"Remote server returned empty content for {backend_uri}"
)
# Process all items in the result list, not just the first one
contents: list[ResourceContent] = []
for item in result:
if isinstance(item, TextResourceContents):
contents.append(
ResourceContent(
content=item.text,
mime_type=item.mimeType,
meta=item.meta,
)
)
elif isinstance(item, BlobResourceContents):
contents.append(
ResourceContent(
content=base64.b64decode(item.blob),
mime_type=item.mimeType,
meta=item.meta,
)
)
else:
raise ResourceError(f"Unsupported content type: {type(item)}")
return ResourceResult(contents=contents)
def get_span_attributes(self) -> dict[str, Any]:
return super().get_span_attributes() | {
"fastmcp.provider.type": "ProxyProvider",
"fastmcp.proxy.backend_uri": self._backend_uri,
}
class ProxyTemplate(ResourceTemplate):
"""A ResourceTemplate that represents and creates resources from a remote server template."""
task_config: TaskConfig = TaskConfig(mode="forbidden")
_backend_uri_template: str | None = None
def __init__(self, client_factory: ClientFactoryT, **kwargs: Any):
super().__init__(**kwargs)
self._client_factory = client_factory
async def _get_client(self) -> Client:
"""Gets a client instance by calling the sync or async factory."""
client = self._client_factory()
if inspect.isawaitable(client):
client = cast(Client, await client)
return client
def model_copy(self, **kwargs: Any) -> ProxyTemplate:
"""Override to preserve _backend_uri_template when uri_template changes."""
update = kwargs.get("update", {})
if "uri_template" in update and self._backend_uri_template is None:
# First time uri_template is being changed, preserve original for backend
update = {**update, "_backend_uri_template": self.uri_template}
kwargs["update"] = update
return super().model_copy(**kwargs)
@classmethod
def from_mcp_template( # type: ignore[override]
cls, client_factory: ClientFactoryT, mcp_template: mcp.types.ResourceTemplate
) -> ProxyTemplate:
"""Factory method to create a ProxyTemplate from a raw MCP template schema."""
return cls(
client_factory=client_factory,
uri_template=mcp_template.uriTemplate,
name=mcp_template.name,
title=mcp_template.title,
description=mcp_template.description,
mime_type=mcp_template.mimeType or "text/plain",
icons=mcp_template.icons,
parameters={}, # Remote templates don't have local parameters
meta=mcp_template.meta,
tags=get_fastmcp_metadata(mcp_template.meta).get("tags", []),
task_config=TaskConfig(mode="forbidden"),
)
async def create_resource(
self,
uri: str,
params: dict[str, Any],
context: Context | None = None,
) -> ProxyResource:
"""Create a resource from the template by calling the remote server."""
# don't use the provided uri, because it may not be the same as the
# uri_template on the remote server.
# quote params to ensure they are valid for the uri_template
backend_template = self._backend_uri_template or self.uri_template
parameterized_uri = backend_template.format(
**{k: quote(v, safe="") for k, v in params.items()}
)
client = await self._get_client()
async with client:
result = await client.read_resource(parameterized_uri)
if not result:
raise ResourceError(
f"Remote server returned empty content for {parameterized_uri}"
)
# Process all items in the result list, not just the first one
contents: list[ResourceContent] = []
for item in result:
if isinstance(item, TextResourceContents):
contents.append(
ResourceContent(
content=item.text,
mime_type=item.mimeType,
meta=item.meta,
)
)
elif isinstance(item, BlobResourceContents):
contents.append(
ResourceContent(
content=base64.b64decode(item.blob),
mime_type=item.mimeType,
meta=item.meta,
)
)
else:
raise ResourceError(f"Unsupported content type: {type(item)}")
cached_content = ResourceResult(contents=contents)
return ProxyResource(
client_factory=self._client_factory,
uri=parameterized_uri,
name=self.name,
title=self.title,
description=self.description,
mime_type=result[
0
].mimeType, # Use first item's mimeType for backward compatibility
icons=self.icons,
meta=self.meta,
tags=get_fastmcp_metadata(self.meta).get("tags", []),
_cached_content=cached_content,
)
def get_span_attributes(self) -> dict[str, Any]:
return super().get_span_attributes() | {
"fastmcp.provider.type": "ProxyProvider",
"fastmcp.proxy.backend_uri_template": self._backend_uri_template,
}
class ProxyPrompt(Prompt):
"""A Prompt that represents and renders a prompt from a remote server."""
task_config: TaskConfig = TaskConfig(mode="forbidden")
_backend_name: str | None = None
def __init__(self, client_factory: ClientFactoryT, **kwargs):
super().__init__(**kwargs)
self._client_factory = client_factory
async def _get_client(self) -> Client:
"""Gets a client instance by calling the sync or async factory."""
client = self._client_factory()
if inspect.isawaitable(client):
client = cast(Client, await client)
return client
def model_copy(self, **kwargs: Any) -> ProxyPrompt:
"""Override to preserve _backend_name when name changes."""
update = kwargs.get("update", {})
if "name" in update and self._backend_name is None:
# First time name is being changed, preserve original for backend calls
update = {**update, "_backend_name": self.name}
kwargs["update"] = update
return super().model_copy(**kwargs)
@classmethod
def from_mcp_prompt(
cls, client_factory: ClientFactoryT, mcp_prompt: mcp.types.Prompt
) -> ProxyPrompt:
"""Factory method to create a ProxyPrompt from a raw MCP prompt schema."""
arguments = [
PromptArgument(
name=arg.name,
description=arg.description,
required=arg.required or False,
)
for arg in mcp_prompt.arguments or []
]
return cls(
client_factory=client_factory,
name=mcp_prompt.name,
title=mcp_prompt.title,
description=mcp_prompt.description,
arguments=arguments,
icons=mcp_prompt.icons,
meta=mcp_prompt.meta,
tags=get_fastmcp_metadata(mcp_prompt.meta).get("tags", []),
task_config=TaskConfig(mode="forbidden"),
)
async def render(self, arguments: dict[str, Any]) -> PromptResult: # type: ignore[override]
"""Render the prompt by making a call through the client."""
backend_name = self._backend_name or self.name
with client_span(
f"prompts/get {backend_name}", "prompts/get", backend_name
) as span:
span.set_attribute("fastmcp.provider.type", "ProxyProvider")
client = await self._get_client()
async with client:
result = await client.get_prompt(backend_name, arguments)
# Convert GetPromptResult to PromptResult, preserving meta from result
# (not the static prompt meta which includes fastmcp tags)
# Convert PromptMessages to Messages
messages = [
Message(content=m.content, role=m.role) for m in result.messages
]
return PromptResult(
messages=messages,
description=result.description,
meta=result.meta,
)
def get_span_attributes(self) -> dict[str, Any]:
return super().get_span_attributes() | {
"fastmcp.provider.type": "ProxyProvider",
"fastmcp.proxy.backend_name": self._backend_name,
}
# -----------------------------------------------------------------------------
# ProxyProvider
# -----------------------------------------------------------------------------
class ProxyProvider(Provider):
"""Provider that proxies to a remote MCP server via a client factory.
This provider fetches components from a remote server and returns Proxy*
component instances that forward execution to the remote server.
All components returned by this provider have task_config.mode="forbidden"
because tasks cannot be executed through a proxy.
Example:
```python
from fastmcp import FastMCP
from fastmcp.server.providers.proxy import ProxyProvider, ProxyClient
# Create a proxy provider for a remote server
proxy = ProxyProvider(lambda: ProxyClient("http://localhost:8000/mcp"))
mcp = FastMCP("Proxy Server")
mcp.add_provider(proxy)
# Can also add with namespace
mcp.add_provider(proxy.with_namespace("remote"))
```
"""
def __init__(
self,
client_factory: ClientFactoryT,
):
"""Initialize a ProxyProvider.
Args:
client_factory: A callable that returns a Client instance when called.
This gives you full control over session creation and reuse.
Can be either a synchronous or asynchronous function.
"""
super().__init__()
self.client_factory = client_factory
async def _get_client(self) -> Client:
"""Gets a client instance by calling the sync or async factory."""
client = self.client_factory()
if inspect.isawaitable(client):
client = cast(Client, await client)
return client
# -------------------------------------------------------------------------
# Tool methods
# -------------------------------------------------------------------------
async def _list_tools(self) -> Sequence[Tool]:
"""List all tools from the remote server."""
try:
client = await self._get_client()
async with client:
mcp_tools = await client.list_tools()
return [
ProxyTool.from_mcp_tool(self.client_factory, t) for t in mcp_tools
]
except McpError as e:
if e.error.code == METHOD_NOT_FOUND:
return []
raise
# -------------------------------------------------------------------------
# Resource methods
# -------------------------------------------------------------------------
async def _list_resources(self) -> Sequence[Resource]:
"""List all resources from the remote server."""
try:
client = await self._get_client()
async with client:
mcp_resources = await client.list_resources()
return [
ProxyResource.from_mcp_resource(self.client_factory, r)
for r in mcp_resources
]
except McpError as e:
if e.error.code == METHOD_NOT_FOUND:
return []
raise
# -------------------------------------------------------------------------
# Resource template methods
# -------------------------------------------------------------------------
async def _list_resource_templates(self) -> Sequence[ResourceTemplate]:
"""List all resource templates from the remote server."""
try:
client = await self._get_client()
async with client:
mcp_templates = await client.list_resource_templates()
return [
ProxyTemplate.from_mcp_template(self.client_factory, t)
for t in mcp_templates
]
except McpError as e:
if e.error.code == METHOD_NOT_FOUND:
return []
raise
# -------------------------------------------------------------------------
# Prompt methods
# -------------------------------------------------------------------------
async def _list_prompts(self) -> Sequence[Prompt]:
"""List all prompts from the remote server."""
try:
client = await self._get_client()
async with client:
mcp_prompts = await client.list_prompts()
return [
ProxyPrompt.from_mcp_prompt(self.client_factory, p)
for p in mcp_prompts
]
except McpError as e:
if e.error.code == METHOD_NOT_FOUND:
return []
raise
# -------------------------------------------------------------------------
# Task methods
# -------------------------------------------------------------------------
async def get_tasks(self) -> Sequence[FastMCPComponent]:
"""Return empty list since proxy components don't support tasks.
Override the base implementation to avoid calling list_tools() during
server lifespan initialization, which would open the client before any
context is set. All Proxy* components have task_config.mode="forbidden".
"""
return []
# lifespan() uses default implementation (empty context manager)
# because client cleanup is handled per-request
# -----------------------------------------------------------------------------
# Factory Functions
# -----------------------------------------------------------------------------
def _create_client_factory(
target: (
Client[ClientTransportT]
| ClientTransport
| FastMCP[Any]
| FastMCP1Server
| AnyUrl
| Path
| MCPConfig
| dict[str, Any]
| str
),
) -> ClientFactoryT:
"""Create a client factory from the given target.
Internal helper that handles the session strategy based on the target type:
- Connected Client: reuses existing session (with warning about context mixing)
- Disconnected Client: creates fresh sessions per request
- Other targets: creates ProxyClient and fresh sessions per request
"""
if isinstance(target, Client):
client = target
if client.is_connected() and type(client) is ProxyClient:
logger.info(
"Proxy detected connected ProxyClient - creating fresh sessions for each "
"request to avoid request context leakage."
)
def fresh_client_factory() -> Client:
return client.new()
return fresh_client_factory
if client.is_connected():
logger.info(
"Proxy detected connected client - reusing existing session for all requests. "
"This may cause context mixing in concurrent scenarios."
)
def reuse_client_factory() -> Client:
return client
return reuse_client_factory
def fresh_client_factory() -> Client:
return client.new()
return fresh_client_factory
else:
# target is not a Client, so it's compatible with ProxyClient.__init__
base_client = ProxyClient(cast(Any, target))
def proxy_client_factory() -> Client:
return base_client.new()
return proxy_client_factory
# -----------------------------------------------------------------------------
# FastMCPProxy - Convenience Wrapper
# -----------------------------------------------------------------------------
class FastMCPProxy(FastMCP):
"""A FastMCP server that acts as a proxy to a remote MCP-compliant server.
This is a convenience wrapper that creates a FastMCP server with a
ProxyProvider. For more control, use FastMCP with add_provider(ProxyProvider(...)).
Example:
```python
from fastmcp.server import create_proxy
from fastmcp.server.providers.proxy import FastMCPProxy, ProxyClient
# Create a proxy server using create_proxy (recommended)
proxy = create_proxy("http://localhost:8000/mcp")
# Or use FastMCPProxy directly with explicit client factory
proxy = FastMCPProxy(client_factory=lambda: ProxyClient("http://localhost:8000/mcp"))
```
"""
def __init__(
self,
*,
client_factory: ClientFactoryT,
**kwargs,
):
"""Initialize the proxy server.
FastMCPProxy requires explicit session management via client_factory.
Use create_proxy() for convenience with automatic session strategy.
Args:
client_factory: A callable that returns a Client instance when called.
This gives you full control over session creation and reuse.
Can be either a synchronous or asynchronous function.
**kwargs: Additional settings for the FastMCP server.
"""
super().__init__(**kwargs)
self.client_factory = client_factory
provider: Provider = ProxyProvider(client_factory)
self.add_provider(provider)
# -----------------------------------------------------------------------------
# ProxyClient and Related
# -----------------------------------------------------------------------------
async def default_proxy_roots_handler(
context: RequestContext[ClientSession, LifespanContextT],
) -> RootsList:
"""Forward list roots request from remote server to proxy's connected clients."""
ctx = get_context()
return await ctx.list_roots()
async def default_proxy_sampling_handler(
messages: list[mcp.types.SamplingMessage],
params: mcp.types.CreateMessageRequestParams,
context: RequestContext[ClientSession, LifespanContextT],
) -> mcp.types.CreateMessageResult:
"""Forward sampling request from remote server to proxy's connected clients."""
ctx = get_context()
result = await ctx.sample(
list(messages),
system_prompt=params.systemPrompt,
temperature=params.temperature,
max_tokens=params.maxTokens,
model_preferences=params.modelPreferences,
)
content = mcp.types.TextContent(type="text", text=result.text or "")
return mcp.types.CreateMessageResult(
role="assistant",
model="fastmcp-client",
# TODO(ty): remove when ty supports isinstance exclusion narrowing
content=content,
)
async def default_proxy_elicitation_handler(
message: str,
response_type: type,
params: mcp.types.ElicitRequestParams,
context: RequestContext[ClientSession, LifespanContextT],
) -> ElicitResult:
"""Forward elicitation request from remote server to proxy's connected clients."""
ctx = get_context()
# requestedSchema only exists on ElicitRequestFormParams, not ElicitRequestURLParams
requested_schema = (
params.requestedSchema
if isinstance(params, ElicitRequestFormParams)
else {"type": "object", "properties": {}}
)
result = await ctx.session.elicit(
message=message,
requestedSchema=requested_schema,
related_request_id=ctx.request_id,
)
return ElicitResult(action=result.action, content=result.content)
async def default_proxy_log_handler(message: LogMessage) -> None:
"""Forward log notification from remote server to proxy's connected clients."""
ctx = get_context()
msg = message.data.get("msg")
extra = message.data.get("extra")
await ctx.log(msg, level=message.level, logger_name=message.logger, extra=extra)
async def default_proxy_progress_handler(
progress: float,
total: float | None,
message: str | None,
) -> None:
"""Forward progress notification from remote server to proxy's connected clients."""
ctx = get_context()
await ctx.report_progress(progress, total, message)
def _restore_request_context(
rc_ref: list[Any],
) -> None:
"""Set the ``request_ctx`` and ``_current_context`` ContextVars from stashed values.
Called at the start of proxy handler invocations in
``StatefulProxyClient`` to fix stale ContextVars in the receive-loop
task. Only overrides when the ContextVar is genuinely stale (same
session, different request_id) to avoid corrupting the concurrent
case where multiple sessions share the same ref via ``copy.copy``.
We stash a ``(RequestContext, weakref[FastMCP])`` tuple β never a
``Context`` instance β because ``Context`` properties are themselves
ContextVar-dependent and would resolve stale values in the receive
loop. Instead we construct a fresh ``Context`` here after restoring
``request_ctx``, so its property accesses read the correct values.
"""
from fastmcp.server.context import Context, _current_context
stashed = rc_ref[0]
if stashed is None:
return
rc, fastmcp_ref = stashed
try:
current_rc = request_ctx.get()
except LookupError:
request_ctx.set(rc)
fastmcp = fastmcp_ref()
if fastmcp is not None:
_current_context.set(Context(fastmcp))
return
if current_rc.session is rc.session and current_rc.request_id != rc.request_id:
request_ctx.set(rc)
fastmcp = fastmcp_ref()
if fastmcp is not None:
_current_context.set(Context(fastmcp))
def _make_restoring_handler(handler: Callable, rc_ref: list[Any]) -> Callable:
"""Wrap a proxy handler to restore request_ctx before delegating.
The wrapper is a plain ``async def`` so it passes
``inspect.isfunction()`` checks in handler registration paths
(e.g., ``create_roots_callback``).
"""
async def wrapper(*args: Any, **kwargs: Any) -> Any:
_restore_request_context(rc_ref)
return await handler(*args, **kwargs)
return wrapper
class ProxyClient(Client[ClientTransportT]):
"""A proxy client that forwards advanced interactions between a remote MCP server and the proxy's connected clients.
Supports forwarding roots, sampling, elicitation, logging, and progress.
"""
def __init__(
self,
transport: ClientTransportT
| FastMCP[Any]
| FastMCP1Server
| AnyUrl
| Path
| MCPConfig
| dict[str, Any]
| str,
**kwargs,
):
if "name" not in kwargs:
kwargs["name"] = self.generate_name()
if "roots" not in kwargs:
kwargs["roots"] = default_proxy_roots_handler
if "sampling_handler" not in kwargs:
kwargs["sampling_handler"] = default_proxy_sampling_handler
if "elicitation_handler" not in kwargs:
kwargs["elicitation_handler"] = default_proxy_elicitation_handler
if "log_handler" not in kwargs:
kwargs["log_handler"] = default_proxy_log_handler
if "progress_handler" not in kwargs:
kwargs["progress_handler"] = default_proxy_progress_handler
super().__init__(**kwargs | {"transport": transport})
class StatefulProxyClient(ProxyClient[ClientTransportT]):
"""A proxy client that provides a stateful client factory for the proxy server.
The stateful proxy client bound its copy to the server session.
And it will be disconnected when the session is exited.
This is useful to proxy a stateful mcp server such as the Playwright MCP server.
Note that it is essential to ensure that the proxy server itself is also stateful.
Because session reuse means the receive-loop task inherits a stale
``request_ctx`` ContextVar snapshot, the default proxy handlers are
replaced with versions that restore the ContextVar before forwarding.
``ProxyTool.run`` stashes the current ``RequestContext`` in
``_proxy_rc_ref`` before each backend call, and the handlers consult
it to detect (and correct) staleness.
"""
# Mutable list shared across copies (Client.new() uses copy.copy,
# which preserves references to mutable containers). ProxyTool.run
# writes [0] before each backend call; handlers read it to detect
# stale ContextVars and restore the correct request_ctx.
#
# Stores a (RequestContext, weakref[FastMCP]) tuple β never a Context
# instance β because Context properties are ContextVar-dependent and
# would resolve stale values in the receive loop. The restore helper
# constructs a fresh Context from the weakref after setting request_ctx.
_proxy_rc_ref: list[Any]
def __init__(self, *args: Any, **kwargs: Any):
# Install context-restoring handler wrappers BEFORE super().__init__
# registers them with the Client's session kwargs.
self._proxy_rc_ref = [None]
for key, default_fn in (
("roots", default_proxy_roots_handler),
("sampling_handler", default_proxy_sampling_handler),
("elicitation_handler", default_proxy_elicitation_handler),
("log_handler", default_proxy_log_handler),
("progress_handler", default_proxy_progress_handler),
):
if key not in kwargs:
kwargs[key] = _make_restoring_handler(default_fn, self._proxy_rc_ref)
super().__init__(*args, **kwargs)
self._caches: dict[ServerSession, Client[ClientTransportT]] = {}
async def __aexit__(self, exc_type, exc_value, traceback) -> None: # type: ignore[override]
"""The stateful proxy client will be forced disconnected when the session is exited.
So we do nothing here.
"""
async def clear(self):
"""Clear all cached clients and force disconnect them."""
while self._caches:
_, cache = self._caches.popitem()
await cache._disconnect(force=True)
def new_stateful(self) -> Client[ClientTransportT]:
"""Create a new stateful proxy client instance with the same configuration.
Use this method as the client factory for stateful proxy server.
"""
session = get_context().session
proxy_client = self._caches.get(session, None)
if proxy_client is None:
proxy_client = self.new()
logger.debug(f"{proxy_client} created for {session}")
self._caches[session] = proxy_client
async def _on_session_exit():
self._caches.pop(session)
logger.debug(f"{proxy_client} will be disconnect")
await proxy_client._disconnect(force=True)
session._exit_stack.push_async_callback(_on_session_exit)
return proxy_client
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "src/fastmcp/server/providers/proxy.py",
"license": "Apache License 2.0",
"lines": 817,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/fastmcp:src/fastmcp/server/tasks/routing.py | """Task routing helper for MCP components.
Provides unified task mode enforcement and docket routing logic.
"""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Literal
import mcp.types
from mcp.shared.exceptions import McpError
from mcp.types import METHOD_NOT_FOUND, ErrorData
from fastmcp.server.tasks.config import TaskMeta
from fastmcp.server.tasks.handlers import submit_to_docket
if TYPE_CHECKING:
from fastmcp.prompts.prompt import Prompt
from fastmcp.resources.resource import Resource
from fastmcp.resources.template import ResourceTemplate
from fastmcp.tools.tool import Tool
TaskType = Literal["tool", "resource", "template", "prompt"]
async def check_background_task(
component: Tool | Resource | ResourceTemplate | Prompt,
task_type: TaskType,
arguments: dict[str, Any] | None = None,
task_meta: TaskMeta | None = None,
) -> mcp.types.CreateTaskResult | None:
"""Check task mode and submit to background if requested.
Args:
component: The MCP component
task_type: Type of task ("tool", "resource", "template", "prompt")
arguments: Arguments for tool/prompt/template execution
task_meta: Task execution metadata. If provided, execute as background task.
Returns:
CreateTaskResult if submitted to docket, None for sync execution
Raises:
McpError: If mode="required" but no task metadata, or mode="forbidden"
but task metadata is present
"""
task_config = component.task_config
# Infer label from component
entity_label = f"{type(component).__name__} '{component.title or component.key}'"
# Enforce mode="required" - must have task metadata
if task_config.mode == "required" and not task_meta:
raise McpError(
ErrorData(
code=METHOD_NOT_FOUND,
message=f"{entity_label} requires task-augmented execution",
)
)
# Enforce mode="forbidden" - cannot be called with task metadata
if not task_config.supports_tasks() and task_meta:
raise McpError(
ErrorData(
code=METHOD_NOT_FOUND,
message=f"{entity_label} does not support task-augmented execution",
)
)
# No task metadata - synchronous execution
if not task_meta:
return None
# fn_key is expected to be set; fall back to component.key for direct calls
fn_key = task_meta.fn_key or component.key
return await submit_to_docket(task_type, fn_key, component, arguments, task_meta)
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "src/fastmcp/server/tasks/routing.py",
"license": "Apache License 2.0",
"lines": 59,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/fastmcp:tests/server/tasks/test_custom_subclass_tasks.py | """Tests for custom component subclasses with task support.
Verifies that custom Tool, Resource, and Prompt subclasses can use
background task execution by setting task_config.
"""
import asyncio
from typing import Any
from unittest.mock import MagicMock
import pytest
from fastmcp import FastMCP
from fastmcp.client import Client
from fastmcp.server.tasks import TaskConfig
from fastmcp.tools.tool import Tool, ToolResult
from fastmcp.utilities.components import FastMCPComponent
class CustomTool(Tool):
"""A custom tool subclass with task support."""
task_config: TaskConfig = TaskConfig(mode="optional")
parameters: dict[str, Any] = {"type": "object", "properties": {}}
async def run(self, arguments: dict[str, Any]) -> ToolResult:
return ToolResult(content=f"Custom tool executed with {arguments}")
class CustomToolWithLogic(Tool):
"""A custom tool with actual async work."""
task_config: TaskConfig = TaskConfig(mode="optional")
parameters: dict[str, Any] = {
"type": "object",
"properties": {"duration": {"type": "integer"}},
}
async def run(self, arguments: dict[str, Any]) -> ToolResult:
duration = arguments.get("duration", 0)
await asyncio.sleep(duration * 0.01) # Short sleep for testing
return ToolResult(content=f"Completed after {duration} units")
class CustomToolForbidden(Tool):
"""A custom tool with task_config forbidden (default)."""
parameters: dict[str, Any] = {"type": "object", "properties": {}}
async def run(self, arguments: dict[str, Any]) -> ToolResult:
return ToolResult(content="Sync only")
@pytest.fixture
def custom_tool_server():
"""Create a server with custom tool subclasses."""
mcp = FastMCP("custom-tool-server")
mcp.add_tool(CustomTool(name="custom_tool", description="A custom tool"))
mcp.add_tool(
CustomToolWithLogic(name="custom_logic", description="Custom tool with logic")
)
mcp.add_tool(
CustomToolForbidden(name="custom_forbidden", description="No task support")
)
return mcp
async def test_custom_tool_sync_execution(custom_tool_server):
"""Custom tool executes synchronously when no task metadata."""
async with Client(custom_tool_server) as client:
result = await client.call_tool("custom_tool", {})
assert "Custom tool executed" in str(result)
async def test_custom_tool_background_execution(custom_tool_server):
"""Custom tool executes as background task when task=True."""
async with Client(custom_tool_server) as client:
task = await client.call_tool("custom_tool", {}, task=True)
assert task is not None
assert not task.returned_immediately
assert task.task_id is not None
# Wait for result
result = await task.result()
assert "Custom tool executed" in str(result)
async def test_custom_tool_with_arguments(custom_tool_server):
"""Custom tool receives arguments correctly in background execution."""
async with Client(custom_tool_server) as client:
task = await client.call_tool("custom_logic", {"duration": 1}, task=True)
assert task is not None
result = await task.result()
assert "Completed after 1 units" in str(result)
async def test_custom_tool_forbidden_sync_only(custom_tool_server):
"""Custom tool with forbidden mode executes sync only."""
async with Client(custom_tool_server) as client:
# Sync execution works
result = await client.call_tool("custom_forbidden", {})
assert "Sync only" in str(result)
async def test_custom_tool_forbidden_rejects_task(custom_tool_server):
"""Custom tool with forbidden mode returns error for task request."""
async with Client(custom_tool_server) as client:
task = await client.call_tool("custom_forbidden", {}, task=True)
# Should return immediately with error
assert task.returned_immediately
async def test_custom_tool_registers_with_docket():
"""Verify custom tool's register_with_docket is called during server startup."""
from unittest.mock import MagicMock
tool = CustomTool(name="test", description="test")
mock_docket = MagicMock()
tool.register_with_docket(mock_docket)
# Should register self.run with docket using prefixed key
mock_docket.register.assert_called_once()
call_args = mock_docket.register.call_args
assert call_args[1]["names"] == ["tool:test@"]
async def test_custom_tool_forbidden_does_not_register():
"""Verify custom tool with forbidden mode doesn't register with docket."""
tool = CustomToolForbidden(name="test", description="test")
mock_docket = MagicMock()
tool.register_with_docket(mock_docket)
# Should NOT register
mock_docket.register.assert_not_called()
# ==============================================================================
# Base FastMCPComponent Tests
# ==============================================================================
class TestFastMCPComponentDocketMethods:
"""Tests for base FastMCPComponent docket integration."""
def test_default_task_config_is_forbidden(self):
"""Base component defaults to task_config mode='forbidden'."""
component = FastMCPComponent(name="test")
assert component.task_config.mode == "forbidden"
def test_register_with_docket_is_noop(self):
"""Base register_with_docket does nothing (subclasses override)."""
component = FastMCPComponent(name="test")
mock_docket = MagicMock()
# Should not raise, just no-op
component.register_with_docket(mock_docket)
# Should not have called any docket methods
mock_docket.register.assert_not_called()
async def test_add_to_docket_raises_when_forbidden(self):
"""Base add_to_docket raises RuntimeError when mode is 'forbidden'."""
component = FastMCPComponent(name="test")
mock_docket = MagicMock()
with pytest.raises(RuntimeError, match="task execution not supported"):
await component.add_to_docket(mock_docket)
async def test_add_to_docket_raises_not_implemented_when_allowed(self):
"""Base add_to_docket raises NotImplementedError when not forbidden."""
component = FastMCPComponent(
name="test", task_config=TaskConfig(mode="optional")
)
mock_docket = MagicMock()
with pytest.raises(
NotImplementedError, match="does not implement add_to_docket"
):
await component.add_to_docket(mock_docket)
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/server/tasks/test_custom_subclass_tasks.py",
"license": "Apache License 2.0",
"lines": 131,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:src/fastmcp/server/providers/fastmcp_provider.py | """FastMCPProvider for wrapping FastMCP servers as providers.
This module provides the `FastMCPProvider` class that wraps a FastMCP server
and exposes its components through the Provider interface.
It also provides FastMCPProvider* component classes that delegate execution to
the wrapped server's middleware, ensuring middleware runs when components are
executed.
"""
from __future__ import annotations
import re
from collections.abc import AsyncIterator, Sequence
from contextlib import asynccontextmanager
from typing import TYPE_CHECKING, Any, overload
from urllib.parse import quote
import mcp.types
from mcp.types import AnyUrl
from fastmcp.prompts.prompt import Prompt, PromptResult
from fastmcp.resources.resource import Resource, ResourceResult
from fastmcp.resources.template import ResourceTemplate
from fastmcp.server.providers.base import Provider
from fastmcp.server.tasks.config import TaskMeta
from fastmcp.server.telemetry import delegate_span
from fastmcp.tools.tool import Tool, ToolResult
from fastmcp.utilities.components import FastMCPComponent
from fastmcp.utilities.versions import VersionSpec
if TYPE_CHECKING:
from docket import Docket
from docket.execution import Execution
from fastmcp.server.server import FastMCP
def _expand_uri_template(template: str, params: dict[str, Any]) -> str:
"""Expand a URI template with parameters.
Handles both {name} path placeholders and RFC 6570 {?param1,param2}
query parameter syntax.
"""
result = template
# Replace {name} path placeholders
for key, value in params.items():
result = re.sub(rf"\{{{key}\}}", str(value), result)
# Expand {?param1,param2,...} query parameter blocks
def _expand_query_block(match: re.Match[str]) -> str:
names = [n.strip() for n in match.group(1).split(",")]
parts = []
for name in names:
if name in params:
parts.append(f"{quote(name)}={quote(str(params[name]))}")
if parts:
return "?" + "&".join(parts)
return ""
result = re.sub(r"\{\?([^}]+)\}", _expand_query_block, result)
return result
# -----------------------------------------------------------------------------
# FastMCPProvider component classes
# -----------------------------------------------------------------------------
class FastMCPProviderTool(Tool):
"""Tool that delegates execution to a wrapped server's middleware.
When `run()` is called, this tool invokes the wrapped server's
`_call_tool_middleware()` method, ensuring the server's middleware
chain is executed.
"""
_server: Any = None # FastMCP, but Any to avoid circular import
_original_name: str | None = None
def __init__(
self,
server: Any,
original_name: str,
**kwargs: Any,
):
super().__init__(**kwargs)
self._server = server
self._original_name = original_name
@classmethod
def wrap(cls, server: Any, tool: Tool) -> FastMCPProviderTool:
"""Wrap a Tool to delegate execution to the server's middleware."""
return cls(
server=server,
original_name=tool.name,
name=tool.name,
version=tool.version,
description=tool.description,
parameters=tool.parameters,
output_schema=tool.output_schema,
tags=tool.tags,
annotations=tool.annotations,
task_config=tool.task_config,
meta=tool.get_meta(),
title=tool.title,
icons=tool.icons,
)
@overload
async def _run(
self,
arguments: dict[str, Any],
task_meta: None = None,
) -> ToolResult: ...
@overload
async def _run(
self,
arguments: dict[str, Any],
task_meta: TaskMeta,
) -> mcp.types.CreateTaskResult: ...
async def _run(
self,
arguments: dict[str, Any],
task_meta: TaskMeta | None = None,
) -> ToolResult | mcp.types.CreateTaskResult:
"""Delegate to child server's call_tool() with task_meta.
Passes task_meta through to the child server so it can handle
backgrounding appropriately. fn_key is already set by the parent
server before calling this method.
"""
# Pass exact version so child executes the correct version
version = VersionSpec(eq=self.version) if self.version else None
with delegate_span(
self._original_name or "", "FastMCPProvider", self._original_name or ""
):
return await self._server.call_tool(
self._original_name, arguments, version=version, task_meta=task_meta
)
async def run(self, arguments: dict[str, Any]) -> ToolResult:
"""Delegate to child server's call_tool() without task_meta.
This is called when the tool is used within a TransformedTool
forwarding function or other contexts where task_meta is not available.
"""
# Pass exact version so child executes the correct version
version = VersionSpec(eq=self.version) if self.version else None
result = await self._server.call_tool(
self._original_name, arguments, version=version
)
# Result from call_tool should always be ToolResult when no task_meta
if isinstance(result, mcp.types.CreateTaskResult):
raise RuntimeError(
"Unexpected CreateTaskResult from call_tool without task_meta"
)
return result
def get_span_attributes(self) -> dict[str, Any]:
return super().get_span_attributes() | {
"fastmcp.provider.type": "FastMCPProvider",
"fastmcp.delegate.original_name": self._original_name,
}
class FastMCPProviderResource(Resource):
"""Resource that delegates reading to a wrapped server's read_resource().
When `read()` is called, this resource invokes the wrapped server's
`read_resource()` method, ensuring the server's middleware chain is executed.
"""
_server: Any = None # FastMCP, but Any to avoid circular import
_original_uri: str | None = None
def __init__(
self,
server: Any,
original_uri: str,
**kwargs: Any,
):
super().__init__(**kwargs)
self._server = server
self._original_uri = original_uri
@classmethod
def wrap(cls, server: Any, resource: Resource) -> FastMCPProviderResource:
"""Wrap a Resource to delegate reading to the server's middleware."""
return cls(
server=server,
original_uri=str(resource.uri),
uri=resource.uri,
version=resource.version,
name=resource.name,
description=resource.description,
mime_type=resource.mime_type,
tags=resource.tags,
annotations=resource.annotations,
task_config=resource.task_config,
meta=resource.get_meta(),
title=resource.title,
icons=resource.icons,
)
@overload
async def _read(self, task_meta: None = None) -> ResourceResult: ...
@overload
async def _read(self, task_meta: TaskMeta) -> mcp.types.CreateTaskResult: ...
async def _read(
self, task_meta: TaskMeta | None = None
) -> ResourceResult | mcp.types.CreateTaskResult:
"""Delegate to child server's read_resource() with task_meta.
Passes task_meta through to the child server so it can handle
backgrounding appropriately. fn_key is already set by the parent
server before calling this method.
"""
# Pass exact version so child reads the correct version
version = VersionSpec(eq=self.version) if self.version else None
with delegate_span(
self._original_uri or "", "FastMCPProvider", self._original_uri or ""
):
return await self._server.read_resource(
self._original_uri, version=version, task_meta=task_meta
)
def get_span_attributes(self) -> dict[str, Any]:
return super().get_span_attributes() | {
"fastmcp.provider.type": "FastMCPProvider",
"fastmcp.delegate.original_uri": self._original_uri,
}
class FastMCPProviderPrompt(Prompt):
"""Prompt that delegates rendering to a wrapped server's render_prompt().
When `render()` is called, this prompt invokes the wrapped server's
`render_prompt()` method, ensuring the server's middleware chain is executed.
"""
_server: Any = None # FastMCP, but Any to avoid circular import
_original_name: str | None = None
def __init__(
self,
server: Any,
original_name: str,
**kwargs: Any,
):
super().__init__(**kwargs)
self._server = server
self._original_name = original_name
@classmethod
def wrap(cls, server: Any, prompt: Prompt) -> FastMCPProviderPrompt:
"""Wrap a Prompt to delegate rendering to the server's middleware."""
return cls(
server=server,
original_name=prompt.name,
name=prompt.name,
version=prompt.version,
description=prompt.description,
arguments=prompt.arguments,
tags=prompt.tags,
task_config=prompt.task_config,
meta=prompt.get_meta(),
title=prompt.title,
icons=prompt.icons,
)
@overload
async def _render(
self,
arguments: dict[str, Any] | None = None,
task_meta: None = None,
) -> PromptResult: ...
@overload
async def _render(
self,
arguments: dict[str, Any] | None,
task_meta: TaskMeta,
) -> mcp.types.CreateTaskResult: ...
async def _render(
self,
arguments: dict[str, Any] | None = None,
task_meta: TaskMeta | None = None,
) -> PromptResult | mcp.types.CreateTaskResult:
"""Delegate to child server's render_prompt() with task_meta.
Passes task_meta through to the child server so it can handle
backgrounding appropriately. fn_key is already set by the parent
server before calling this method.
"""
# Pass exact version so child renders the correct version
version = VersionSpec(eq=self.version) if self.version else None
with delegate_span(
self._original_name or "", "FastMCPProvider", self._original_name or ""
):
return await self._server.render_prompt(
self._original_name, arguments, version=version, task_meta=task_meta
)
async def render(self, arguments: dict[str, Any] | None = None) -> PromptResult:
"""Delegate to child server's render_prompt() without task_meta.
This is called when the prompt is used within a transformed context
or other contexts where task_meta is not available.
"""
# Pass exact version so child renders the correct version
version = VersionSpec(eq=self.version) if self.version else None
result = await self._server.render_prompt(
self._original_name, arguments, version=version
)
# Result from render_prompt should always be PromptResult when no task_meta
if isinstance(result, mcp.types.CreateTaskResult):
raise RuntimeError(
"Unexpected CreateTaskResult from render_prompt without task_meta"
)
return result
def get_span_attributes(self) -> dict[str, Any]:
return super().get_span_attributes() | {
"fastmcp.provider.type": "FastMCPProvider",
"fastmcp.delegate.original_name": self._original_name,
}
class FastMCPProviderResourceTemplate(ResourceTemplate):
"""Resource template that creates FastMCPProviderResources.
When `create_resource()` is called, this template creates a
FastMCPProviderResource that will invoke the wrapped server's middleware
when read.
"""
_server: Any = None # FastMCP, but Any to avoid circular import
_original_uri_template: str | None = None
def __init__(
self,
server: Any,
original_uri_template: str,
**kwargs: Any,
):
super().__init__(**kwargs)
self._server = server
self._original_uri_template = original_uri_template
@classmethod
def wrap(
cls, server: Any, template: ResourceTemplate
) -> FastMCPProviderResourceTemplate:
"""Wrap a ResourceTemplate to create FastMCPProviderResources."""
return cls(
server=server,
original_uri_template=template.uri_template,
uri_template=template.uri_template,
version=template.version,
name=template.name,
description=template.description,
mime_type=template.mime_type,
parameters=template.parameters,
tags=template.tags,
annotations=template.annotations,
task_config=template.task_config,
meta=template.get_meta(),
title=template.title,
icons=template.icons,
)
async def create_resource(self, uri: str, params: dict[str, Any]) -> Resource:
"""Create a FastMCPProviderResource for the given URI.
The `uri` is the external/transformed URI (e.g., with namespace prefix).
We use `_original_uri_template` with `params` to construct the internal
URI that the nested server understands.
"""
# Expand the original template with params to get internal URI
original_uri = _expand_uri_template(self._original_uri_template or "", params)
return FastMCPProviderResource(
server=self._server,
original_uri=original_uri,
uri=AnyUrl(uri),
name=self.name,
description=self.description,
mime_type=self.mime_type,
)
@overload
async def _read(
self, uri: str, params: dict[str, Any], task_meta: None = None
) -> ResourceResult: ...
@overload
async def _read(
self, uri: str, params: dict[str, Any], task_meta: TaskMeta
) -> mcp.types.CreateTaskResult: ...
async def _read(
self, uri: str, params: dict[str, Any], task_meta: TaskMeta | None = None
) -> ResourceResult | mcp.types.CreateTaskResult:
"""Delegate to child server's read_resource() with task_meta.
Passes task_meta through to the child server so it can handle
backgrounding appropriately. fn_key is already set by the parent
server before calling this method.
"""
# Expand the original template with params to get internal URI
original_uri = _expand_uri_template(self._original_uri_template or "", params)
# Pass exact version so child reads the correct version
version = VersionSpec(eq=self.version) if self.version else None
with delegate_span(
original_uri, "FastMCPProvider", self._original_uri_template or ""
):
return await self._server.read_resource(
original_uri, version=version, task_meta=task_meta
)
async def read(self, arguments: dict[str, Any]) -> str | bytes | ResourceResult:
"""Read the resource content for background task execution.
Reads the resource via the wrapped server and returns the ResourceResult.
This method is called by Docket during background task execution.
"""
# Expand the original template with arguments to get internal URI
original_uri = _expand_uri_template(
self._original_uri_template or "", arguments
)
# Pass exact version so child reads the correct version
version = VersionSpec(eq=self.version) if self.version else None
# Read from the wrapped server
result = await self._server.read_resource(original_uri, version=version)
if isinstance(result, mcp.types.CreateTaskResult):
raise RuntimeError("Unexpected CreateTaskResult during Docket execution")
return result
def register_with_docket(self, docket: Docket) -> None:
"""No-op: the child's actual template is registered via get_tasks()."""
async def add_to_docket(
self,
docket: Docket,
params: dict[str, Any],
*,
fn_key: str | None = None,
task_key: str | None = None,
**kwargs: Any,
) -> Execution:
"""Schedule this template for background execution via docket.
The child's FunctionResourceTemplate.fn is registered (via get_tasks),
and it expects splatted **kwargs, so we splat params here.
"""
lookup_key = fn_key or self.key
if task_key:
kwargs["key"] = task_key
return await docket.add(lookup_key, **kwargs)(**params)
def get_span_attributes(self) -> dict[str, Any]:
return super().get_span_attributes() | {
"fastmcp.provider.type": "FastMCPProvider",
"fastmcp.delegate.original_uri_template": self._original_uri_template,
}
# -----------------------------------------------------------------------------
# FastMCPProvider
# -----------------------------------------------------------------------------
class FastMCPProvider(Provider):
"""Provider that wraps a FastMCP server.
This provider enables mounting one FastMCP server onto another, exposing
the mounted server's tools, resources, and prompts through the parent
server.
Components returned by this provider are wrapped in FastMCPProvider*
classes that delegate execution to the wrapped server's middleware chain.
This ensures middleware runs when components are executed.
Example:
```python
from fastmcp import FastMCP
from fastmcp.server.providers import FastMCPProvider
main = FastMCP("Main")
sub = FastMCP("Sub")
@sub.tool
def greet(name: str) -> str:
return f"Hello, {name}!"
# Mount directly - tools accessible by original names
main.add_provider(FastMCPProvider(sub))
# Or with namespace
from fastmcp.server.transforms import Namespace
provider = FastMCPProvider(sub)
provider.add_transform(Namespace("sub"))
main.add_provider(provider)
```
Note:
Normally you would use `FastMCP.mount()` which handles proxy conversion
and creates the provider with namespace automatically.
"""
def __init__(self, server: FastMCP[Any]):
"""Initialize a FastMCPProvider.
Args:
server: The FastMCP server to wrap.
"""
super().__init__()
self.server = server
# -------------------------------------------------------------------------
# Tool methods
# -------------------------------------------------------------------------
async def _list_tools(self) -> Sequence[Tool]:
"""List all tools from the mounted server as FastMCPProviderTools.
Runs the mounted server's middleware so filtering/transformation applies.
Wraps each tool as a FastMCPProviderTool that delegates execution to
the nested server's middleware.
"""
raw_tools = await self.server.list_tools()
return [FastMCPProviderTool.wrap(self.server, t) for t in raw_tools]
async def _get_tool(
self, name: str, version: VersionSpec | None = None
) -> Tool | None:
"""Get a tool by name as a FastMCPProviderTool.
Passes the full VersionSpec to the nested server, which handles both
exact version matching and range filtering. Uses get_tool to ensure
the nested server's transforms are applied.
"""
raw_tool = await self.server.get_tool(name, version)
if raw_tool is None:
return None
return FastMCPProviderTool.wrap(self.server, raw_tool)
# -------------------------------------------------------------------------
# Resource methods
# -------------------------------------------------------------------------
async def _list_resources(self) -> Sequence[Resource]:
"""List all resources from the mounted server as FastMCPProviderResources.
Runs the mounted server's middleware so filtering/transformation applies.
Wraps each resource as a FastMCPProviderResource that delegates reading
to the nested server's middleware.
"""
raw_resources = await self.server.list_resources()
return [FastMCPProviderResource.wrap(self.server, r) for r in raw_resources]
async def _get_resource(
self, uri: str, version: VersionSpec | None = None
) -> Resource | None:
"""Get a concrete resource by URI as a FastMCPProviderResource.
Passes the full VersionSpec to the nested server, which handles both
exact version matching and range filtering. Uses get_resource to ensure
the nested server's transforms are applied.
"""
raw_resource = await self.server.get_resource(uri, version)
if raw_resource is None:
return None
return FastMCPProviderResource.wrap(self.server, raw_resource)
# -------------------------------------------------------------------------
# Resource template methods
# -------------------------------------------------------------------------
async def _list_resource_templates(self) -> Sequence[ResourceTemplate]:
"""List all resource templates from the mounted server.
Runs the mounted server's middleware so filtering/transformation applies.
Returns FastMCPProviderResourceTemplate instances that create
FastMCPProviderResources when materialized.
"""
raw_templates = await self.server.list_resource_templates()
return [
FastMCPProviderResourceTemplate.wrap(self.server, t) for t in raw_templates
]
async def _get_resource_template(
self, uri: str, version: VersionSpec | None = None
) -> ResourceTemplate | None:
"""Get a resource template that matches the given URI.
Passes the full VersionSpec to the nested server, which handles both
exact version matching and range filtering. Uses get_resource_template
to ensure the nested server's transforms are applied.
"""
raw_template = await self.server.get_resource_template(uri, version)
if raw_template is None:
return None
return FastMCPProviderResourceTemplate.wrap(self.server, raw_template)
# -------------------------------------------------------------------------
# Prompt methods
# -------------------------------------------------------------------------
async def _list_prompts(self) -> Sequence[Prompt]:
"""List all prompts from the mounted server as FastMCPProviderPrompts.
Runs the mounted server's middleware so filtering/transformation applies.
Returns FastMCPProviderPrompt instances that delegate rendering to the
wrapped server's middleware.
"""
raw_prompts = await self.server.list_prompts()
return [FastMCPProviderPrompt.wrap(self.server, p) for p in raw_prompts]
async def _get_prompt(
self, name: str, version: VersionSpec | None = None
) -> Prompt | None:
"""Get a prompt by name as a FastMCPProviderPrompt.
Passes the full VersionSpec to the nested server, which handles both
exact version matching and range filtering. Uses get_prompt to ensure
the nested server's transforms are applied.
"""
raw_prompt = await self.server.get_prompt(name, version)
if raw_prompt is None:
return None
return FastMCPProviderPrompt.wrap(self.server, raw_prompt)
# -------------------------------------------------------------------------
# Task registration
# -------------------------------------------------------------------------
async def get_tasks(self) -> Sequence[FastMCPComponent]:
"""Return task-eligible components from the mounted server.
Returns the child's ACTUAL components (not wrapped) so their actual
functions get registered with Docket. Gets components with child
server's transforms applied, then applies this provider's transforms
for correct registration keys.
"""
# Get tasks with child server's transforms already applied
components = list(await self.server.get_tasks())
# Separate by type for this provider's transform application
tools = [c for c in components if isinstance(c, Tool)]
resources = [c for c in components if isinstance(c, Resource)]
templates = [c for c in components if isinstance(c, ResourceTemplate)]
prompts = [c for c in components if isinstance(c, Prompt)]
# Apply this provider's transforms sequentially
for transform in self.transforms:
tools = await transform.list_tools(tools)
resources = await transform.list_resources(resources)
templates = await transform.list_resource_templates(templates)
prompts = await transform.list_prompts(prompts)
# Filter to only task-eligible components (same as base Provider)
return [
c
for c in [
*tools,
*resources,
*templates,
*prompts,
]
if c.task_config.supports_tasks()
]
# -------------------------------------------------------------------------
# Lifecycle methods
# -------------------------------------------------------------------------
@asynccontextmanager
async def lifespan(self) -> AsyncIterator[None]:
"""Start the mounted server's user lifespan.
This starts only the wrapped server's user-defined lifespan, NOT its
full _lifespan_manager() (which includes Docket). The parent server's
Docket handles all background tasks.
"""
async with self.server._lifespan(self.server):
yield
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "src/fastmcp/server/providers/fastmcp_provider.py",
"license": "Apache License 2.0",
"lines": 579,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/fastmcp:tests/server/providers/test_fastmcp_provider.py | """Tests for FastMCPProvider."""
import mcp.types as mt
from fastmcp import FastMCP
from fastmcp.client import Client
from fastmcp.prompts.prompt import PromptResult
from fastmcp.resources.resource import ResourceResult
from fastmcp.server.middleware import CallNext, Middleware, MiddlewareContext
from fastmcp.server.providers import FastMCPProvider
from fastmcp.tools.tool import ToolResult
class ToolTracingMiddleware(Middleware):
"""Middleware that traces tool calls."""
def __init__(self, name: str, calls: list[str]):
super().__init__()
self._name = name
self._calls = calls
async def on_call_tool(
self,
context: MiddlewareContext[mt.CallToolRequestParams],
call_next: CallNext[mt.CallToolRequestParams, ToolResult],
) -> ToolResult:
self._calls.append(f"{self._name}:before")
result = await call_next(context)
self._calls.append(f"{self._name}:after")
return result
class ResourceTracingMiddleware(Middleware):
"""Middleware that traces resource reads."""
def __init__(self, name: str, calls: list[str]):
super().__init__()
self._name = name
self._calls = calls
async def on_read_resource(
self,
context: MiddlewareContext[mt.ReadResourceRequestParams],
call_next: CallNext[mt.ReadResourceRequestParams, ResourceResult],
) -> ResourceResult:
self._calls.append(f"{self._name}:before")
result = await call_next(context)
self._calls.append(f"{self._name}:after")
return result
class PromptTracingMiddleware(Middleware):
"""Middleware that traces prompt gets."""
def __init__(self, name: str, calls: list[str]):
super().__init__()
self._name = name
self._calls = calls
async def on_get_prompt(
self,
context: MiddlewareContext[mt.GetPromptRequestParams],
call_next: CallNext[mt.GetPromptRequestParams, PromptResult],
) -> PromptResult:
self._calls.append(f"{self._name}:before")
result = await call_next(context)
self._calls.append(f"{self._name}:after")
return result
class TestToolOperations:
"""Test tool operations through FastMCPProvider."""
async def test_list_tools(self):
"""Test listing tools from wrapped server."""
server = FastMCP("Test")
@server.tool
def tool_one() -> str:
return "one"
@server.tool
def tool_two() -> str:
return "two"
provider = FastMCPProvider(server)
tools = await provider.list_tools()
assert len(tools) == 2
names = {t.name for t in tools}
assert names == {"tool_one", "tool_two"}
async def test_get_tool(self):
"""Test getting a specific tool by name."""
server = FastMCP("Test")
@server.tool
def my_tool() -> str:
return "result"
provider = FastMCPProvider(server)
tool = await provider.get_tool("my_tool")
assert tool is not None
assert tool.name == "my_tool"
async def test_get_nonexistent_tool_returns_none(self):
"""Test that getting a nonexistent tool returns None."""
server = FastMCP("Test")
provider = FastMCPProvider(server)
tool = await provider.get_tool("nonexistent")
assert tool is None
async def test_call_tool_via_client(self):
"""Test calling a tool through a server using the provider."""
sub = FastMCP("Sub")
@sub.tool
def greet(name: str) -> str:
return f"Hello, {name}!"
main = FastMCP("Main")
main.add_provider(FastMCPProvider(sub))
async with Client(main) as client:
result = await client.call_tool("greet", {"name": "World"})
assert result.data == "Hello, World!"
class TestResourceOperations:
"""Test resource operations through FastMCPProvider."""
async def test_list_resources(self):
"""Test listing resources from wrapped server."""
server = FastMCP("Test")
@server.resource("resource://one")
def resource_one() -> str:
return "one"
@server.resource("resource://two")
def resource_two() -> str:
return "two"
provider = FastMCPProvider(server)
resources = await provider.list_resources()
assert len(resources) == 2
uris = {str(r.uri) for r in resources}
assert uris == {"resource://one", "resource://two"}
async def test_get_resource(self):
"""Test getting a specific resource by URI."""
server = FastMCP("Test")
@server.resource("resource://data")
def my_resource() -> str:
return "content"
provider = FastMCPProvider(server)
resource = await provider.get_resource("resource://data")
assert resource is not None
assert str(resource.uri) == "resource://data"
async def test_read_resource_via_client(self):
"""Test reading a resource through a server using the provider."""
sub = FastMCP("Sub")
@sub.resource("resource://data")
def my_resource() -> str:
return "content"
main = FastMCP("Main")
main.add_provider(FastMCPProvider(sub))
async with Client(main) as client:
result = await client.read_resource("resource://data")
assert isinstance(result[0], mt.TextResourceContents)
assert result[0].text == "content"
class TestResourceTemplateOperations:
"""Test resource template operations through FastMCPProvider."""
async def test_list_resource_templates(self):
"""Test listing resource templates from wrapped server."""
server = FastMCP("Test")
@server.resource("resource://{id}/data")
def my_template(id: str) -> str:
return f"data for {id}"
provider = FastMCPProvider(server)
templates = await provider.list_resource_templates()
assert len(templates) == 1
assert templates[0].uri_template == "resource://{id}/data"
async def test_get_resource_template(self):
"""Test getting a template that matches a URI."""
server = FastMCP("Test")
@server.resource("resource://{id}/data")
def my_template(id: str) -> str:
return f"data for {id}"
provider = FastMCPProvider(server)
template = await provider.get_resource_template("resource://123/data")
assert template is not None
async def test_read_resource_template_via_client(self):
"""Test reading a resource via template through a server using the provider."""
sub = FastMCP("Sub")
@sub.resource("resource://{id}/data")
def my_template(id: str) -> str:
return f"data for {id}"
main = FastMCP("Main")
main.add_provider(FastMCPProvider(sub))
async with Client(main) as client:
result = await client.read_resource("resource://123/data")
assert isinstance(result[0], mt.TextResourceContents)
assert result[0].text == "data for 123"
class TestPromptOperations:
"""Test prompt operations through FastMCPProvider."""
async def test_list_prompts(self):
"""Test listing prompts from wrapped server."""
server = FastMCP("Test")
@server.prompt
def prompt_one() -> str:
return "one"
@server.prompt
def prompt_two() -> str:
return "two"
provider = FastMCPProvider(server)
prompts = await provider.list_prompts()
assert len(prompts) == 2
names = {p.name for p in prompts}
assert names == {"prompt_one", "prompt_two"}
async def test_get_prompt(self):
"""Test getting a specific prompt by name."""
server = FastMCP("Test")
@server.prompt
def my_prompt() -> str:
return "content"
provider = FastMCPProvider(server)
prompt = await provider.get_prompt("my_prompt")
assert prompt is not None
assert prompt.name == "my_prompt"
async def test_render_prompt_via_client(self):
"""Test rendering a prompt through a server using the provider."""
sub = FastMCP("Sub")
@sub.prompt
def greet(name: str) -> str:
return f"Hello, {name}!"
main = FastMCP("Main")
main.add_provider(FastMCPProvider(sub))
async with Client(main) as client:
result = await client.get_prompt("greet", {"name": "World"})
assert isinstance(result.messages[0].content, mt.TextContent)
assert result.messages[0].content.text == "Hello, World!"
class TestServerReference:
"""Test that provider maintains reference to wrapped server."""
def test_server_attribute(self):
"""Test that provider exposes the wrapped server."""
server = FastMCP("Test")
provider = FastMCPProvider(server)
assert provider.server is server
def test_server_name_accessible(self):
"""Test that server name is accessible through provider."""
server = FastMCP("MyServer")
provider = FastMCPProvider(server)
assert provider.server.name == "MyServer"
class TestMiddlewareChain:
"""Test that middleware runs at each level of mounted servers."""
async def test_tool_middleware_three_levels(self):
"""Middleware runs at parent, child, and grandchild levels for tools."""
calls: list[str] = []
grandchild = FastMCP("Grandchild")
@grandchild.tool
async def compute(x: int) -> int:
calls.append("grandchild:tool")
return x * 2
grandchild.add_middleware(ToolTracingMiddleware("grandchild", calls))
child = FastMCP("Child")
child.mount(grandchild, namespace="gc")
child.add_middleware(ToolTracingMiddleware("child", calls))
parent = FastMCP("Parent")
parent.mount(child, namespace="c")
parent.add_middleware(ToolTracingMiddleware("parent", calls))
async with Client(parent) as client:
result = await client.call_tool("c_gc_compute", {"x": 5})
assert result.data == 10
assert calls == [
"parent:before",
"child:before",
"grandchild:before",
"grandchild:tool",
"grandchild:after",
"child:after",
"parent:after",
]
async def test_resource_middleware_three_levels(self):
"""Middleware runs at parent, child, and grandchild levels for resources."""
calls: list[str] = []
grandchild = FastMCP("Grandchild")
@grandchild.resource("data://value")
async def get_data() -> str:
calls.append("grandchild:resource")
return "result"
grandchild.add_middleware(ResourceTracingMiddleware("grandchild", calls))
child = FastMCP("Child")
child.mount(grandchild, namespace="gc")
child.add_middleware(ResourceTracingMiddleware("child", calls))
parent = FastMCP("Parent")
parent.mount(child, namespace="c")
parent.add_middleware(ResourceTracingMiddleware("parent", calls))
async with Client(parent) as client:
result = await client.read_resource("data://c/gc/value")
assert isinstance(result[0], mt.TextResourceContents)
assert result[0].text == "result"
assert calls == [
"parent:before",
"child:before",
"grandchild:before",
"grandchild:resource",
"grandchild:after",
"child:after",
"parent:after",
]
async def test_prompt_middleware_three_levels(self):
"""Middleware runs at parent, child, and grandchild levels for prompts."""
calls: list[str] = []
grandchild = FastMCP("Grandchild")
@grandchild.prompt
async def greet(name: str) -> str:
calls.append("grandchild:prompt")
return f"Hello, {name}!"
grandchild.add_middleware(PromptTracingMiddleware("grandchild", calls))
child = FastMCP("Child")
child.mount(grandchild, namespace="gc")
child.add_middleware(PromptTracingMiddleware("child", calls))
parent = FastMCP("Parent")
parent.mount(child, namespace="c")
parent.add_middleware(PromptTracingMiddleware("parent", calls))
async with Client(parent) as client:
result = await client.get_prompt("c_gc_greet", {"name": "World"})
assert isinstance(result.messages[0].content, mt.TextContent)
assert result.messages[0].content.text == "Hello, World!"
assert calls == [
"parent:before",
"child:before",
"grandchild:before",
"grandchild:prompt",
"grandchild:after",
"child:after",
"parent:after",
]
async def test_resource_template_middleware_three_levels(self):
"""Middleware runs at all levels for resource templates."""
calls: list[str] = []
grandchild = FastMCP("Grandchild")
@grandchild.resource("item://{id}")
async def get_item(id: str) -> str:
calls.append("grandchild:template")
return f"item-{id}"
grandchild.add_middleware(ResourceTracingMiddleware("grandchild", calls))
child = FastMCP("Child")
child.mount(grandchild, namespace="gc")
child.add_middleware(ResourceTracingMiddleware("child", calls))
parent = FastMCP("Parent")
parent.mount(child, namespace="c")
parent.add_middleware(ResourceTracingMiddleware("parent", calls))
async with Client(parent) as client:
result = await client.read_resource("item://c/gc/42")
assert isinstance(result[0], mt.TextResourceContents)
assert result[0].text == "item-42"
assert calls == [
"parent:before",
"child:before",
"grandchild:before",
"grandchild:template",
"grandchild:after",
"child:after",
"parent:after",
]
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/server/providers/test_fastmcp_provider.py",
"license": "Apache License 2.0",
"lines": 331,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:tests/server/providers/test_transforming_provider.py | """Tests for Namespace and ToolTransform."""
import pytest
from fastmcp import FastMCP
from fastmcp.client import Client
from fastmcp.server.providers import FastMCPProvider
from fastmcp.server.transforms import Namespace, ToolTransform
from fastmcp.tools.tool_transform import ToolTransformConfig
class TestNamespaceTransform:
"""Test Namespace transform transformations."""
async def test_namespace_prefixes_tool_names(self):
"""Test that namespace is applied as prefix to tool names."""
server = FastMCP("Test")
@server.tool
def my_tool() -> str:
return "result"
provider = FastMCPProvider(server)
layer = Namespace("ns")
# Get tools and pass directly to transform
tools = await provider.list_tools()
transformed_tools = await layer.list_tools(tools)
assert len(transformed_tools) == 1
assert transformed_tools[0].name == "ns_my_tool"
async def test_namespace_prefixes_prompt_names(self):
"""Test that namespace is applied as prefix to prompt names."""
server = FastMCP("Test")
@server.prompt
def my_prompt() -> str:
return "prompt content"
provider = FastMCPProvider(server)
layer = Namespace("ns")
prompts = await provider.list_prompts()
transformed_prompts = await layer.list_prompts(prompts)
assert len(transformed_prompts) == 1
assert transformed_prompts[0].name == "ns_my_prompt"
async def test_namespace_prefixes_resource_uris(self):
"""Test that namespace is inserted into resource URIs."""
server = FastMCP("Test")
@server.resource("resource://data")
def my_resource() -> str:
return "content"
provider = FastMCPProvider(server)
layer = Namespace("ns")
resources = await provider.list_resources()
transformed_resources = await layer.list_resources(resources)
assert len(transformed_resources) == 1
assert str(transformed_resources[0].uri) == "resource://ns/data"
async def test_namespace_prefixes_template_uris(self):
"""Test that namespace is inserted into resource template URIs."""
server = FastMCP("Test")
@server.resource("resource://{name}/data")
def my_template(name: str) -> str:
return f"content for {name}"
provider = FastMCPProvider(server)
layer = Namespace("ns")
templates = await provider.list_resource_templates()
transformed_templates = await layer.list_resource_templates(templates)
assert len(transformed_templates) == 1
assert transformed_templates[0].uri_template == "resource://ns/{name}/data"
class TestToolTransformRenames:
"""Test ToolTransform renaming functionality."""
async def test_tool_rename(self):
"""Test tool renaming with ToolTransform."""
server = FastMCP("Test")
@server.tool
def verbose_tool_name() -> str:
return "result"
provider = FastMCPProvider(server)
layer = ToolTransform({"verbose_tool_name": ToolTransformConfig(name="short")})
tools = await provider.list_tools()
transformed_tools = await layer.list_tools(tools)
assert len(transformed_tools) == 1
assert transformed_tools[0].name == "short"
async def test_renamed_tool_is_callable_via_mount(self):
"""Test that renamed tools can be called by new name via mount."""
sub = FastMCP("Sub")
@sub.tool
def original() -> str:
return "success"
main = FastMCP("Main")
# Add provider with transform layer
provider = FastMCPProvider(sub)
provider.add_transform(
ToolTransform({"original": ToolTransformConfig(name="renamed")})
)
main.add_provider(provider)
async with Client(main) as client:
result = await client.call_tool("renamed", {})
assert result.data == "success"
def test_duplicate_rename_targets_raises_error(self):
"""Test that duplicate target names in ToolTransform raises ValueError."""
with pytest.raises(ValueError, match="duplicate target name"):
ToolTransform(
{
"tool_a": ToolTransformConfig(name="same"),
"tool_b": ToolTransformConfig(name="same"),
}
)
class TestTransformReverseLookup:
"""Test reverse lookups for routing."""
async def test_namespace_get_tool(self):
"""Test that tools can be looked up by transformed name."""
server = FastMCP("Test")
@server.tool
def my_tool() -> str:
return "result"
provider = FastMCPProvider(server)
layer = Namespace("ns")
# Create call_next that delegates to provider
async def get_tool(name: str, version=None):
return await provider._get_tool(name, version)
tool = await layer.get_tool("ns_my_tool", get_tool)
assert tool is not None
assert tool.name == "ns_my_tool"
async def test_transform_layer_get_tool(self):
"""Test that renamed tools can be looked up by new name."""
server = FastMCP("Test")
@server.tool
def original() -> str:
return "result"
provider = FastMCPProvider(server)
layer = ToolTransform({"original": ToolTransformConfig(name="renamed")})
async def get_tool(name: str, version=None):
return await provider._get_tool(name, version)
tool = await layer.get_tool("renamed", get_tool)
assert tool is not None
assert tool.name == "renamed"
async def test_namespace_get_resource(self):
"""Test that resources can be looked up by transformed URI."""
server = FastMCP("Test")
@server.resource("resource://data")
def my_resource() -> str:
return "content"
provider = FastMCPProvider(server)
layer = Namespace("ns")
async def get_resource(uri: str, version=None):
return await provider._get_resource(uri, version)
resource = await layer.get_resource("resource://ns/data", get_resource)
assert resource is not None
assert str(resource.uri) == "resource://ns/data"
async def test_nonmatching_namespace_returns_none(self):
"""Test that lookups with wrong namespace return None."""
server = FastMCP("Test")
@server.tool
def my_tool() -> str:
return "result"
provider = FastMCPProvider(server)
layer = Namespace("ns")
async def get_tool(name: str, version=None):
return await provider._get_tool(name, version)
# Wrong namespace prefix
assert await layer.get_tool("wrong_my_tool", get_tool) is None
# No prefix at all
assert await layer.get_tool("my_tool", get_tool) is None
class TestTransformStacking:
"""Test stacking multiple transforms via provider add_transform."""
async def test_stacked_namespaces_compose(self):
"""Test that stacked namespaces are applied in order."""
server = FastMCP("Test")
@server.tool
def my_tool() -> str:
return "result"
provider = FastMCPProvider(server)
inner_layer = Namespace("inner")
outer_layer = Namespace("outer")
# Apply transforms sequentially: base -> inner -> outer
tools = await provider.list_tools()
tools = await inner_layer.list_tools(tools)
tools = await outer_layer.list_tools(tools)
assert len(tools) == 1
assert tools[0].name == "outer_inner_my_tool"
async def test_stacked_transforms_are_callable(self):
"""Test that stacked transforms still allow tool calls."""
sub = FastMCP("Sub")
@sub.tool
def my_tool() -> str:
return "success"
main = FastMCP("Main")
provider = FastMCPProvider(sub)
# Add namespace layer then rename layer
provider.add_transform(Namespace("ns"))
provider.add_transform(
ToolTransform({"ns_my_tool": ToolTransformConfig(name="short")})
)
main.add_provider(provider)
async with Client(main) as client:
result = await client.call_tool("short", {})
assert result.data == "success"
class TestNoTransformation:
"""Test behavior when no transformations are applied."""
async def test_transform_passthrough(self):
"""Test that base Transform passes through unchanged."""
from fastmcp.server.transforms import Transform
server = FastMCP("Test")
@server.tool
def my_tool() -> str:
return "result"
provider = FastMCPProvider(server)
transform = Transform()
tools = await provider.list_tools()
transformed_tools = await transform.list_tools(tools)
assert len(transformed_tools) == 1
assert transformed_tools[0].name == "my_tool"
async def test_empty_transform_layer_passthrough(self):
"""Test that empty ToolTransform has no effect."""
server = FastMCP("Test")
@server.tool
def my_tool() -> str:
return "result"
provider = FastMCPProvider(server)
layer = ToolTransform({})
tools = await provider.list_tools()
transformed_tools = await layer.list_tools(tools)
assert len(transformed_tools) == 1
assert transformed_tools[0].name == "my_tool"
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/server/providers/test_transforming_provider.py",
"license": "Apache License 2.0",
"lines": 214,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:examples/providers/sqlite/server.py | # /// script
# dependencies = ["aiosqlite", "fastmcp"]
# ///
"""
MCP server with database-configured tools.
Tools are loaded from tools.db on each request, so you can add/modify/disable
tools in the database without restarting the server.
Run with: uv run fastmcp run examples/providers/sqlite/server.py
"""
from __future__ import annotations
import asyncio
import json
from collections.abc import Sequence
from pathlib import Path
from typing import Any
import aiosqlite
from rich import print
from fastmcp import Client, FastMCP
from fastmcp.server.providers import Provider
from fastmcp.tools.tool import Tool, ToolResult
DB_PATH = Path(__file__).parent / "tools.db"
class ConfigurableTool(Tool):
"""A tool that performs a configured arithmetic operation.
This demonstrates the pattern: Tool subclass = schema + execution in one place.
"""
operation: str # "add", "multiply", "subtract", "divide"
default_value: float = 0
async def run(self, arguments: dict[str, Any]) -> ToolResult:
a = arguments.get("a", self.default_value)
b = arguments.get("b", self.default_value)
if self.operation == "add":
result = a + b
elif self.operation == "multiply":
result = a * b
elif self.operation == "subtract":
result = a - b
elif self.operation == "divide":
if b == 0:
return ToolResult(
structured_content={
"error": "Division by zero",
"operation": self.operation,
}
)
result = a / b
else:
result = a + b
return ToolResult(
structured_content={"result": result, "operation": self.operation}
)
class SQLiteToolProvider(Provider):
"""Queries SQLite for tool configurations.
Called on every list_tools/get_tool request, so database changes
are reflected immediately without server restart.
"""
def __init__(self, db_path: str):
super().__init__()
self.db_path = db_path
async def list_tools(self) -> Sequence[Tool]:
async with aiosqlite.connect(self.db_path) as db:
db.row_factory = aiosqlite.Row
async with db.execute("SELECT * FROM tools WHERE enabled = 1") as cursor:
rows = await cursor.fetchall()
return [self._make_tool(row) for row in rows]
async def get_tool(self, name: str) -> Tool | None:
async with aiosqlite.connect(self.db_path) as db:
db.row_factory = aiosqlite.Row
async with db.execute(
"SELECT * FROM tools WHERE name = ? AND enabled = 1", (name,)
) as cursor:
row = await cursor.fetchone()
return self._make_tool(row) if row else None
def _make_tool(self, row: aiosqlite.Row) -> ConfigurableTool:
return ConfigurableTool(
name=row["name"],
description=row["description"],
parameters=json.loads(row["parameters_schema"]),
operation=row["operation"],
default_value=row["default_value"] or 0,
)
provider = SQLiteToolProvider(db_path=str(DB_PATH))
mcp = FastMCP("DynamicToolsServer", providers=[provider])
@mcp.tool
def server_info() -> dict[str, str]:
"""Get information about this server (static tool)."""
return {
"name": "DynamicToolsServer",
"description": "A server with database-configured tools",
"database": str(DB_PATH),
}
async def main():
async with Client(mcp) as client:
tools = await client.list_tools()
print(f"[bold]Available tools ({len(tools)}):[/bold]")
for tool in tools:
print(f" β’ {tool.name}: {tool.description}")
print()
print("[bold]Calling add_numbers(10, 5):[/bold]")
result = await client.call_tool("add_numbers", {"a": 10, "b": 5})
print(f" Result: {result.structured_content}")
print()
print("[bold]Calling multiply_numbers(7, 6):[/bold]")
result = await client.call_tool("multiply_numbers", {"a": 7, "b": 6})
print(f" Result: {result.structured_content}")
if __name__ == "__main__":
asyncio.run(main())
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "examples/providers/sqlite/server.py",
"license": "Apache License 2.0",
"lines": 106,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/fastmcp:examples/providers/sqlite/setup_db.py | # /// script
# dependencies = ["aiosqlite"]
# ///
"""
Creates and seeds the tools database.
Run with: uv run examples/providers/sqlite/setup_db.py
"""
import asyncio
import json
from pathlib import Path
import aiosqlite
DB_PATH = Path(__file__).parent / "tools.db"
async def setup_database() -> None:
"""Create the tools table and seed with example tools."""
async with aiosqlite.connect(DB_PATH) as db:
await db.execute("""
CREATE TABLE IF NOT EXISTS tools (
name TEXT PRIMARY KEY,
description TEXT NOT NULL,
parameters_schema TEXT NOT NULL,
operation TEXT NOT NULL,
default_value REAL,
enabled INTEGER DEFAULT 1
)
""")
tools_data = [
(
"add_numbers",
"Add two numbers together",
json.dumps(
{
"type": "object",
"properties": {
"a": {"type": "number", "description": "First number"},
"b": {"type": "number", "description": "Second number"},
},
"required": ["a", "b"],
}
),
"add",
0,
1,
),
(
"multiply_numbers",
"Multiply two numbers",
json.dumps(
{
"type": "object",
"properties": {
"a": {"type": "number", "description": "First number"},
"b": {"type": "number", "description": "Second number"},
},
"required": ["a", "b"],
}
),
"multiply",
1,
1,
),
(
"divide_numbers",
"Divide two numbers",
json.dumps(
{
"type": "object",
"properties": {
"a": {"type": "number", "description": "Dividend"},
"b": {"type": "number", "description": "Divisor"},
},
"required": ["a", "b"],
}
),
"divide",
0,
1,
),
]
await db.executemany(
"""
INSERT OR REPLACE INTO tools
(name, description, parameters_schema, operation, default_value, enabled)
VALUES (?, ?, ?, ?, ?, ?)
""",
tools_data,
)
await db.commit()
print(f"Database created at: {DB_PATH}")
print("Seeded 3 tools: add_numbers, multiply_numbers, divide_numbers")
if __name__ == "__main__":
asyncio.run(setup_database())
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "examples/providers/sqlite/setup_db.py",
"license": "Apache License 2.0",
"lines": 91,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
PrefectHQ/fastmcp:tests/server/test_providers.py | """Tests for providers."""
from collections.abc import Sequence
from typing import Any
import pytest
from mcp.types import AnyUrl, TextContent
from fastmcp import FastMCP
from fastmcp.prompts.function_prompt import FunctionPrompt
from fastmcp.prompts.prompt import Prompt
from fastmcp.resources.function_resource import FunctionResource
from fastmcp.resources.resource import Resource
from fastmcp.resources.template import FunctionResourceTemplate, ResourceTemplate
from fastmcp.server.providers import Provider
from fastmcp.tools.tool import Tool, ToolResult
from fastmcp.utilities.versions import VersionSpec
class SimpleTool(Tool):
"""A simple tool for testing that performs a configured operation."""
operation: str
value: int = 0
async def run(self, arguments: dict[str, Any]) -> ToolResult:
a = arguments.get("a", 0)
b = arguments.get("b", 0)
if self.operation == "add":
result = a + b + self.value
elif self.operation == "multiply":
result = a * b + self.value
else:
result = a + b
return ToolResult(
structured_content={"result": result, "operation": self.operation}
)
class SimpleToolProvider(Provider):
"""A simple provider that returns a configurable list of tools."""
def __init__(self, tools: list[Tool] | None = None):
super().__init__()
self._tools = tools or []
self.list_tools_call_count = 0
self.get_tool_call_count = 0
async def _list_tools(self) -> list[Tool]:
self.list_tools_call_count += 1
return self._tools
async def _get_tool(
self, name: str, version: VersionSpec | None = None
) -> Tool | None:
self.get_tool_call_count += 1
matching = [t for t in self._tools if t.name == name]
if not matching:
return None
if version is None:
return matching[0] # Return first (for testing simplicity)
matching = [t for t in matching if version.matches(t.version)]
return matching[0] if matching else None
class ListOnlyProvider(Provider):
"""A provider that only implements list_tools (uses default get_tool)."""
def __init__(self, tools: list[Tool]):
super().__init__()
self._tools = tools
self.list_tools_call_count = 0
async def _list_tools(self) -> list[Tool]:
self.list_tools_call_count += 1
return self._tools
class TestProvider:
"""Tests for Provider."""
@pytest.fixture
def base_server(self):
"""Create a base FastMCP server with static tools."""
mcp = FastMCP("BaseServer")
@mcp.tool
def static_add(a: int, b: int) -> int:
"""Add two numbers (static tool)."""
return a + b
@mcp.tool
def static_subtract(a: int, b: int) -> int:
"""Subtract two numbers (static tool)."""
return a - b
return mcp
@pytest.fixture
def dynamic_tools(self) -> list[Tool]:
"""Create dynamic tools for testing."""
return [
SimpleTool(
name="dynamic_multiply",
description="Multiply two numbers",
parameters={
"type": "object",
"properties": {
"a": {"type": "integer"},
"b": {"type": "integer"},
},
},
operation="multiply",
),
SimpleTool(
name="dynamic_add",
description="Add two numbers with offset",
parameters={
"type": "object",
"properties": {
"a": {"type": "integer"},
"b": {"type": "integer"},
},
},
operation="add",
value=100,
),
]
async def test_list_tools_includes_dynamic_tools(
self, base_server: FastMCP, dynamic_tools: list[Tool]
):
"""Test that list_tools returns both static and dynamic tools."""
provider = SimpleToolProvider(tools=dynamic_tools)
base_server.add_provider(provider)
tools = await base_server.list_tools()
# Should have all tools: 2 static + 2 dynamic
assert len(tools) == 4
tool_names = [tool.name for tool in tools]
assert "static_add" in tool_names
assert "static_subtract" in tool_names
assert "dynamic_multiply" in tool_names
assert "dynamic_add" in tool_names
async def test_list_tools_calls_provider_each_time(
self, base_server: FastMCP, dynamic_tools: list[Tool]
):
"""Test that provider.list_tools() is called on every list_tools request."""
provider = SimpleToolProvider(tools=dynamic_tools)
base_server.add_provider(provider)
# Call get_tools multiple times
await base_server.list_tools()
await base_server.list_tools()
await base_server.list_tools()
# Provider should have been called 3 times (once per get_tools call)
assert provider.list_tools_call_count == 3
async def test_call_dynamic_tool(
self, base_server: FastMCP, dynamic_tools: list[Tool]
):
"""Test that dynamic tools can be called successfully."""
provider = SimpleToolProvider(tools=dynamic_tools)
base_server.add_provider(provider)
result = await base_server.call_tool(
name="dynamic_multiply", arguments={"a": 7, "b": 6}
)
assert result.structured_content is not None
assert isinstance(result.structured_content, dict)
assert result.structured_content["result"] == 42
assert result.structured_content["operation"] == "multiply"
async def test_call_dynamic_tool_with_config(
self, base_server: FastMCP, dynamic_tools: list[Tool]
):
"""Test that dynamic tool config (like value offset) is used."""
provider = SimpleToolProvider(tools=dynamic_tools)
base_server.add_provider(provider)
result = await base_server.call_tool(
name="dynamic_add", arguments={"a": 5, "b": 3}
)
assert result.structured_content is not None
# 5 + 3 + 100 (value offset) = 108
assert isinstance(result.structured_content, dict)
assert result.structured_content["result"] == 108
async def test_call_static_tool_still_works(
self, base_server: FastMCP, dynamic_tools: list[Tool]
):
"""Test that static tools still work after adding dynamic tools."""
provider = SimpleToolProvider(tools=dynamic_tools)
base_server.add_provider(provider)
result = await base_server.call_tool(
name="static_add", arguments={"a": 10, "b": 5}
)
assert result.structured_content is not None
assert isinstance(result.structured_content, dict)
assert result.structured_content["result"] == 15
async def test_call_tool_uses_get_tool_for_efficient_lookup(
self, base_server: FastMCP, dynamic_tools: list[Tool]
):
"""Test that call_tool uses get_tool() for efficient single-tool lookup."""
provider = SimpleToolProvider(tools=dynamic_tools)
base_server.add_provider(provider)
await base_server.call_tool(name="dynamic_multiply", arguments={"a": 2, "b": 3})
# get_tool is called once for efficient lookup:
# call_tool() calls provider.get_tool() to get the tool and execute it
# Key point: list_tools is NOT called during tool execution (efficient lookup)
assert provider.get_tool_call_count == 1
async def test_default_get_tool_falls_back_to_list(self, base_server: FastMCP):
"""Test that BaseToolProvider's default get_tool calls list_tools."""
tools = [
SimpleTool(
name="test_tool",
description="A test tool",
parameters={"type": "object", "properties": {}},
operation="add",
),
]
provider = ListOnlyProvider(tools=tools)
base_server.add_provider(provider)
result = await base_server.call_tool(
name="test_tool", arguments={"a": 1, "b": 2}
)
assert result.structured_content is not None
# Default get_tool should have called list_tools
assert provider.list_tools_call_count >= 1
async def test_local_tools_come_first(
self, base_server: FastMCP, dynamic_tools: list[Tool]
):
"""Test that local tools (from LocalProvider) appear before other provider tools."""
provider = SimpleToolProvider(tools=dynamic_tools)
base_server.add_provider(provider)
tools = await base_server.list_tools()
tool_names = [tool.name for tool in tools]
# Local tools should come first (LocalProvider is first in _providers)
assert tool_names[:2] == ["static_add", "static_subtract"]
async def test_empty_provider(self, base_server: FastMCP):
"""Test that empty provider doesn't affect behavior."""
provider = SimpleToolProvider(tools=[])
base_server.add_provider(provider)
tools = await base_server.list_tools()
# Should only have static tools
assert len(tools) == 2
async def test_tool_not_found_falls_through_to_static(
self, base_server: FastMCP, dynamic_tools: list[Tool]
):
"""Test that unknown tool name falls through to static tools."""
provider = SimpleToolProvider(tools=dynamic_tools)
base_server.add_provider(provider)
# This tool is static, not in the dynamic provider
result = await base_server.call_tool(
name="static_subtract", arguments={"a": 10, "b": 3}
)
assert result.structured_content is not None
assert isinstance(result.structured_content, dict)
assert result.structured_content["result"] == 7
class TestProviderClass:
"""Tests for the Provider class."""
async def test_subclass_is_instance(self):
"""Test that subclasses are instances of Provider."""
provider = SimpleToolProvider(tools=[])
assert isinstance(provider, Provider)
async def test_default_get_tool_works(self):
"""Test that the default get_tool implementation works."""
tool = SimpleTool(
name="test",
description="Test",
parameters={"type": "object", "properties": {}},
operation="add",
)
provider = ListOnlyProvider(tools=[tool])
# Default get_tool should find by name
found = await provider.get_tool("test")
assert found is not None
assert found.name == "test"
# Should return None for unknown names
not_found = await provider.get_tool("unknown")
assert not_found is None
class TestDynamicToolUpdates:
"""Tests demonstrating dynamic tool updates without restart."""
async def test_tools_update_without_restart(self):
"""Test that tools can be updated dynamically."""
mcp = FastMCP("DynamicServer")
# Start with one tool
initial_tools = [
SimpleTool(
name="tool_v1",
description="Version 1",
parameters={"type": "object", "properties": {}},
operation="add",
),
]
provider = SimpleToolProvider(tools=initial_tools)
mcp.add_provider(provider)
tools = await mcp.list_tools()
assert len(tools) == 1
assert tools[0].name == "tool_v1"
# Update the provider's tools (simulating DB update)
provider._tools = [
SimpleTool(
name="tool_v2",
description="Version 2",
parameters={"type": "object", "properties": {}},
operation="multiply",
),
SimpleTool(
name="tool_v3",
description="Version 3",
parameters={"type": "object", "properties": {}},
operation="add",
),
]
# List tools again - should see new tools
tools = await mcp.list_tools()
assert len(tools) == 2
tool_names = [t.name for t in tools]
assert "tool_v1" not in tool_names
assert "tool_v2" in tool_names
assert "tool_v3" in tool_names
class TestProviderExecutionMethods:
"""Tests for Provider execution methods (call_tool, read_resource, render_prompt)."""
async def test_call_tool_default_implementation(self):
"""Test that default call_tool uses get_tool and runs the tool."""
tool = SimpleTool(
name="test_tool",
description="Test",
parameters={"type": "object", "properties": {"a": {}, "b": {}}},
operation="add",
)
provider = SimpleToolProvider(tools=[tool])
mcp = FastMCP("TestServer")
mcp.add_provider(provider)
result = await mcp.call_tool("test_tool", {"a": 1, "b": 2})
assert result.structured_content is not None
assert isinstance(result.structured_content, dict)
assert result.structured_content["result"] == 3
async def test_read_resource_default_implementation(self):
"""Test that default read_resource uses get_resource and reads it."""
class ResourceProvider(Provider):
async def _list_resources(self) -> Sequence[Resource]:
return [
FunctionResource(
uri=AnyUrl("test://data"),
name="Test Data",
fn=lambda: "hello world",
)
]
provider = ResourceProvider()
mcp = FastMCP("TestServer")
mcp.add_provider(provider)
result = await mcp.read_resource("test://data")
assert len(result.contents) == 1
assert result.contents[0].content == "hello world"
async def test_read_resource_template_default(self):
"""Test that read_resource_template handles template-based resources."""
class TemplateProvider(Provider):
async def _list_resource_templates(self) -> Sequence[ResourceTemplate]:
return [
FunctionResourceTemplate.from_function(
fn=lambda name: f"content of {name}",
uri_template="data://files/{name}",
name="Data Template",
)
]
provider = TemplateProvider()
mcp = FastMCP("TestServer")
mcp.add_provider(provider)
result = await mcp.read_resource("data://files/test.txt")
assert len(result.contents) == 1
assert result.contents[0].content == "content of test.txt"
async def test_render_prompt_default_implementation(self):
"""Test that default render_prompt uses get_prompt and renders it."""
class PromptProvider(Provider):
async def _list_prompts(self) -> Sequence[Prompt]:
return [
FunctionPrompt.from_function(
fn=lambda name: f"Hello, {name}!",
name="greeting",
description="Greet someone",
)
]
provider = PromptProvider()
mcp = FastMCP("TestServer")
mcp.add_provider(provider)
result = await mcp.render_prompt("greeting", {"name": "World"})
assert len(result.messages) == 1
assert isinstance(result.messages[0].content, TextContent)
assert result.messages[0].content.text == "Hello, World!"
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/server/test_providers.py",
"license": "Apache License 2.0",
"lines": 360,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:examples/sampling/server_fallback.py | # /// script
# dependencies = ["anthropic", "fastmcp", "rich"]
# ///
"""
Server-Side Fallback Handler
Demonstrates configuring a sampling handler on the server. This ensures
sampling works even when the client doesn't provide a handler.
The server runs as an HTTP server that can be connected to by any MCP client.
Run:
uv run examples/sampling/server_fallback.py
Then connect with any MCP client (e.g., Claude Desktop) or test with:
curl http://localhost:8000/mcp/
"""
import asyncio
from rich.console import Console
from rich.panel import Panel
from fastmcp import FastMCP
from fastmcp.client.sampling.handlers.anthropic import AnthropicSamplingHandler
from fastmcp.server.context import Context
console = Console()
# Create server with a fallback sampling handler
# This handler is used when the client doesn't support sampling
mcp = FastMCP(
"Server with Fallback Handler",
sampling_handler=AnthropicSamplingHandler(default_model="claude-sonnet-4-5"),
sampling_handler_behavior="fallback", # Use only if client lacks sampling
)
@mcp.tool
async def summarize(text: str, ctx: Context) -> str:
"""Summarize the given text."""
console.print(f"[bold cyan]SERVER[/] Summarizing text ({len(text)} chars)...")
result = await ctx.sample(
messages=f"Summarize this text in 1-2 sentences:\n\n{text}",
system_prompt="You are a concise summarizer.",
max_tokens=150,
)
console.print("[bold cyan]SERVER[/] Summary complete")
return result.text or ""
@mcp.tool
async def translate(text: str, target_language: str, ctx: Context) -> str:
"""Translate text to the target language."""
console.print(f"[bold cyan]SERVER[/] Translating to {target_language}...")
result = await ctx.sample(
messages=f"Translate to {target_language}:\n\n{text}",
system_prompt=f"You are a translator. Output only the {target_language} translation.",
max_tokens=500,
)
console.print("[bold cyan]SERVER[/] Translation complete")
return result.text or ""
async def main():
console.print(
Panel.fit(
"[bold]Server-Side Fallback Handler Demo[/]\n\n"
"This server has a built-in Anthropic handler that activates\n"
"when clients don't provide their own sampling support.",
subtitle="server_fallback.py",
)
)
console.print()
console.print("[bold yellow]Starting HTTP server on http://localhost:8000[/]")
console.print("Connect with an MCP client or press Ctrl+C to stop")
console.print()
await mcp.run_http_async(host="localhost", port=8000)
if __name__ == "__main__":
asyncio.run(main())
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "examples/sampling/server_fallback.py",
"license": "Apache License 2.0",
"lines": 65,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
PrefectHQ/fastmcp:examples/sampling/structured_output.py | # /// script
# dependencies = ["anthropic", "fastmcp", "rich"]
# ///
"""
Structured Output Sampling
Demonstrates using `result_type` to get validated Pydantic models from an LLM.
The server exposes a sentiment analysis tool that returns structured data.
Run:
uv run examples/sampling/structured_output.py
"""
import asyncio
from pydantic import BaseModel
from rich.console import Console
from rich.panel import Panel
from rich.table import Table
from fastmcp import Client, Context, FastMCP
from fastmcp.client.sampling import SamplingMessage, SamplingParams
from fastmcp.client.sampling.handlers.anthropic import AnthropicSamplingHandler
console = Console()
class LoggingAnthropicHandler(AnthropicSamplingHandler):
async def __call__(
self, messages: list[SamplingMessage], params: SamplingParams, context
): # type: ignore[override]
console.print(" [bold blue]SAMPLING[/] Calling Claude API...")
result = await super().__call__(messages, params, context)
console.print(" [bold blue]SAMPLING[/] Response received")
return result
# Define a structured output model
class SentimentAnalysis(BaseModel):
sentiment: str # "positive", "negative", or "neutral"
confidence: float # 0.0 to 1.0
keywords: list[str] # Keywords that influenced the analysis
explanation: str # Brief explanation of the analysis
# Create the MCP server
mcp = FastMCP("Sentiment Analyzer")
@mcp.tool
async def analyze_sentiment(text: str, ctx: Context) -> dict:
"""Analyze the sentiment of the given text."""
console.print(" [bold cyan]SERVER[/] Analyzing sentiment...")
result = await ctx.sample(
messages=f"Analyze the sentiment of this text:\n\n{text}",
system_prompt="You are a sentiment analysis expert. Analyze text carefully.",
result_type=SentimentAnalysis,
)
console.print(" [bold cyan]SERVER[/] Analysis complete")
return result.result.model_dump() # type: ignore[attr-defined]
async def main():
console.print(
Panel.fit("[bold]MCP Sampling Flow Demo[/]", subtitle="structured_output.py")
)
console.print()
handler = LoggingAnthropicHandler(default_model="claude-sonnet-4-5")
async with Client(mcp, sampling_handler=handler) as client:
texts = [
"I absolutely love this product! It exceeded all my expectations.",
"The service was okay, nothing special but got the job done.",
"This is the worst experience I've ever had. Never again.",
]
for text in texts:
console.print(f"[bold green]CLIENT[/] Analyzing: [italic]{text[:50]}...[/]")
console.print()
result = await client.call_tool("analyze_sentiment", {"text": text})
data = result.data
# Display results in a table
table = Table(show_header=False, box=None, padding=(0, 2))
table.add_column(style="bold")
table.add_column()
sentiment_color = {
"positive": "green",
"negative": "red",
"neutral": "yellow",
}.get(
data["sentiment"],
"white", # type: ignore[union-attr]
)
table.add_row("Sentiment", f"[{sentiment_color}]{data['sentiment']}[/]") # type: ignore[index]
table.add_row("Confidence", f"{data['confidence']:.0%}") # type: ignore[index]
table.add_row("Keywords", ", ".join(data["keywords"])) # type: ignore[index]
table.add_row("Explanation", data["explanation"]) # type: ignore[index]
console.print(Panel(table, border_style=sentiment_color))
console.print()
if __name__ == "__main__":
asyncio.run(main())
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "examples/sampling/structured_output.py",
"license": "Apache License 2.0",
"lines": 83,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
PrefectHQ/fastmcp:examples/sampling/text.py | # /// script
# dependencies = ["anthropic", "fastmcp", "rich"]
# ///
"""
Simple Text Sampling
Demonstrates the basic MCP sampling flow where a server tool requests
an LLM completion from the client.
Run:
uv run examples/sampling/text.py
"""
import asyncio
from rich.console import Console
from rich.panel import Panel
from fastmcp import Client, Context, FastMCP
from fastmcp.client.sampling import SamplingMessage, SamplingParams
from fastmcp.client.sampling.handlers.anthropic import AnthropicSamplingHandler
console = Console()
# Create a wrapper handler that logs when the LLM is called
class LoggingAnthropicHandler(AnthropicSamplingHandler):
async def __call__(
self, messages: list[SamplingMessage], params: SamplingParams, context
): # type: ignore[override]
console.print(" [bold blue]SAMPLING[/] Calling Claude API...")
result = await super().__call__(messages, params, context)
console.print(" [bold blue]SAMPLING[/] Response received")
return result
# Create the MCP server
mcp = FastMCP("Haiku Generator")
@mcp.tool
async def write_haiku(topic: str, ctx: Context) -> str:
"""Write a haiku about any topic."""
console.print(
f" [bold cyan]SERVER[/] Tool 'write_haiku' called with topic: {topic}"
)
result = await ctx.sample(
messages=f"Write a haiku about: {topic}",
system_prompt="You are a poet. Write only the haiku, nothing else.",
max_tokens=100,
)
console.print(" [bold cyan]SERVER[/] Returning haiku to client")
return result.text or ""
async def main():
console.print(Panel.fit("[bold]MCP Sampling Flow Demo[/]", subtitle="text.py"))
console.print()
# Create the sampling handler
handler = LoggingAnthropicHandler(default_model="claude-sonnet-4-5")
# Connect client to server with the sampling handler
async with Client(mcp, sampling_handler=handler) as client:
console.print("[bold green]CLIENT[/] Calling tool 'write_haiku'...")
console.print()
result = await client.call_tool("write_haiku", {"topic": "Python programming"})
console.print()
console.print("[bold green]CLIENT[/] Received result:")
console.print(Panel(result.data, title="Haiku", border_style="green")) # type: ignore[arg-type]
if __name__ == "__main__":
asyncio.run(main())
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "examples/sampling/text.py",
"license": "Apache License 2.0",
"lines": 56,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
PrefectHQ/fastmcp:examples/sampling/tool_use.py | # /// script
# dependencies = ["anthropic", "fastmcp", "rich"]
# ///
"""
Sampling with Tools
Demonstrates giving an LLM tools to use during sampling. The LLM can call
helper functions to gather information before responding.
Run:
uv run examples/sampling/tool_use.py
"""
import asyncio
import random
from datetime import datetime
from pydantic import BaseModel, Field
from rich.console import Console
from rich.panel import Panel
from fastmcp import Client, Context, FastMCP
from fastmcp.client.sampling import SamplingMessage, SamplingParams
from fastmcp.client.sampling.handlers.anthropic import AnthropicSamplingHandler
console = Console()
class LoggingAnthropicHandler(AnthropicSamplingHandler):
async def __call__(
self, messages: list[SamplingMessage], params: SamplingParams, context
): # type: ignore[override]
console.print(" [bold blue]SAMPLING[/] Calling Claude API...")
result = await super().__call__(messages, params, context)
console.print(" [bold blue]SAMPLING[/] Response received")
return result
# Define tools available to the LLM during sampling
def add(a: float, b: float) -> str:
"""Add two numbers together."""
result = a + b
console.print(f" [bold magenta]TOOL[/] add({a}, {b}) = {result}")
return str(result)
def multiply(a: float, b: float) -> str:
"""Multiply two numbers together."""
result = a * b
console.print(f" [bold magenta]TOOL[/] multiply({a}, {b}) = {result}")
return str(result)
def get_current_time() -> str:
"""Get the current date and time."""
console.print(" [bold magenta]TOOL[/] get_current_time()")
return datetime.now().strftime("%Y-%m-%d %H:%M:%S")
def roll_dice(sides: int = 6) -> str:
"""Roll a die with the specified number of sides."""
result = random.randint(1, sides)
console.print(f" [bold magenta]TOOL[/] roll_dice({sides}) = {result}")
return str(result)
# Structured output for the response
class AssistantResponse(BaseModel):
answer: str = Field(description="The answer to the user's question")
tools_used: list[str] = Field(description="List of tools that were used")
reasoning: str = Field(
description="Brief explanation of how the answer was determined"
)
# Create the MCP server
mcp = FastMCP("Smart Assistant")
@mcp.tool
async def ask_assistant(question: str, ctx: Context) -> dict:
"""Ask the assistant a question. It can use tools to help answer."""
console.print(" [bold cyan]SERVER[/] Processing question...")
result = await ctx.sample(
messages=question,
system_prompt="You are a helpful assistant with access to tools. Use them when needed to answer questions accurately.",
tools=[add, multiply, get_current_time, roll_dice],
result_type=AssistantResponse,
)
console.print(" [bold cyan]SERVER[/] Response ready")
return result.result.model_dump() # type: ignore[attr-defined]
async def main():
console.print(Panel.fit("[bold]MCP Sampling Flow Demo[/]", subtitle="tool_use.py"))
console.print()
handler = LoggingAnthropicHandler(default_model="claude-sonnet-4-5")
async with Client(mcp, sampling_handler=handler) as client:
questions = [
"What is 15 times 7, plus 23?",
"Roll a 20-sided dice for me",
"What time is it right now?",
]
for question in questions:
console.print(f"[bold green]CLIENT[/] Question: {question}")
console.print()
result = await client.call_tool("ask_assistant", {"question": question})
data = result.data
console.print(f"[bold green]CLIENT[/] Answer: {data['answer']}") # type: ignore[index]
console.print(
f" Tools used: {', '.join(data['tools_used']) or 'none'}"
) # type: ignore[index]
console.print(f" Reasoning: {data['reasoning']}") # type: ignore[index]
console.print()
if __name__ == "__main__":
asyncio.run(main())
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "examples/sampling/tool_use.py",
"license": "Apache License 2.0",
"lines": 92,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
PrefectHQ/fastmcp:src/fastmcp/client/sampling/handlers/anthropic.py | """Anthropic sampling handler for FastMCP."""
from collections.abc import Iterator, Sequence
from typing import Any
from mcp.types import CreateMessageRequestParams as SamplingParams
from mcp.types import (
CreateMessageResult,
CreateMessageResultWithTools,
ModelPreferences,
SamplingMessage,
SamplingMessageContentBlock,
StopReason,
TextContent,
Tool,
ToolChoice,
ToolResultContent,
ToolUseContent,
)
try:
from anthropic import AsyncAnthropic
from anthropic.types import (
Message,
MessageParam,
TextBlock,
TextBlockParam,
ToolParam,
ToolResultBlockParam,
ToolUseBlock,
ToolUseBlockParam,
)
from anthropic.types.model_param import ModelParam
from anthropic.types.tool_choice_any_param import ToolChoiceAnyParam
from anthropic.types.tool_choice_auto_param import ToolChoiceAutoParam
from anthropic.types.tool_choice_param import ToolChoiceParam
except ImportError as e:
raise ImportError(
"The `anthropic` package is not installed. "
"Install it with `pip install fastmcp[anthropic]` or add `anthropic` to your dependencies."
) from e
__all__ = ["AnthropicSamplingHandler"]
class AnthropicSamplingHandler:
"""Sampling handler that uses the Anthropic API.
Example:
```python
from anthropic import AsyncAnthropic
from fastmcp import FastMCP
from fastmcp.client.sampling.handlers.anthropic import AnthropicSamplingHandler
handler = AnthropicSamplingHandler(
default_model="claude-sonnet-4-5",
client=AsyncAnthropic(),
)
server = FastMCP(sampling_handler=handler)
```
"""
def __init__(
self, default_model: ModelParam, client: AsyncAnthropic | None = None
) -> None:
self.client: AsyncAnthropic = client or AsyncAnthropic()
self.default_model: ModelParam = default_model
async def __call__(
self,
messages: list[SamplingMessage],
params: SamplingParams,
context: Any,
) -> CreateMessageResult | CreateMessageResultWithTools:
anthropic_messages: list[MessageParam] = self._convert_to_anthropic_messages(
messages=messages,
)
model: ModelParam = self._select_model_from_preferences(params.modelPreferences)
# Convert MCP tools to Anthropic format
anthropic_tools: list[ToolParam] | None = None
if params.tools:
anthropic_tools = self._convert_tools_to_anthropic(params.tools)
# Convert tool_choice to Anthropic format
# Returns None if mode is "none", signaling tools should be omitted
anthropic_tool_choice: ToolChoiceParam | None = None
if params.toolChoice:
converted = self._convert_tool_choice_to_anthropic(params.toolChoice)
if converted is None:
# tool_choice="none" means don't use tools
anthropic_tools = None
else:
anthropic_tool_choice = converted
# Build kwargs to avoid sentinel type compatibility issues across
# anthropic SDK versions (NotGiven vs Omit)
kwargs: dict[str, Any] = {
"model": model,
"messages": anthropic_messages,
"max_tokens": params.maxTokens,
}
if params.systemPrompt is not None:
kwargs["system"] = params.systemPrompt
if params.temperature is not None:
kwargs["temperature"] = params.temperature
if params.stopSequences is not None:
kwargs["stop_sequences"] = params.stopSequences
if anthropic_tools is not None:
kwargs["tools"] = anthropic_tools
if anthropic_tool_choice is not None:
kwargs["tool_choice"] = anthropic_tool_choice
response = await self.client.messages.create(**kwargs)
# Return appropriate result type based on whether tools were provided
if params.tools:
return self._message_to_result_with_tools(response)
return self._message_to_create_message_result(response)
@staticmethod
def _iter_models_from_preferences(
model_preferences: ModelPreferences | str | list[str] | None,
) -> Iterator[str]:
if model_preferences is None:
return
if isinstance(model_preferences, str):
yield model_preferences
elif isinstance(model_preferences, list):
yield from model_preferences
elif isinstance(model_preferences, ModelPreferences):
if not (hints := model_preferences.hints):
return
for hint in hints:
if not (name := hint.name):
continue
yield name
@staticmethod
def _convert_to_anthropic_messages(
messages: Sequence[SamplingMessage],
) -> list[MessageParam]:
anthropic_messages: list[MessageParam] = []
for message in messages:
content = message.content
# Handle list content (from CreateMessageResultWithTools)
if isinstance(content, list):
content_blocks: list[
TextBlockParam | ToolUseBlockParam | ToolResultBlockParam
] = []
for item in content:
if isinstance(item, ToolUseContent):
content_blocks.append(
ToolUseBlockParam(
type="tool_use",
id=item.id,
name=item.name,
input=item.input,
)
)
elif isinstance(item, TextContent):
content_blocks.append(
TextBlockParam(type="text", text=item.text)
)
elif isinstance(item, ToolResultContent):
# Extract text content from the result
result_content: str | list[TextBlockParam] = ""
if item.content:
text_blocks: list[TextBlockParam] = []
for sub_item in item.content:
if isinstance(sub_item, TextContent):
text_blocks.append(
TextBlockParam(type="text", text=sub_item.text)
)
if len(text_blocks) == 1:
result_content = text_blocks[0]["text"]
elif text_blocks:
result_content = text_blocks
content_blocks.append(
ToolResultBlockParam(
type="tool_result",
tool_use_id=item.toolUseId,
content=result_content,
is_error=item.isError if item.isError else False,
)
)
if content_blocks:
anthropic_messages.append(
MessageParam(
role=message.role,
content=content_blocks,
)
)
continue
# Handle ToolUseContent (assistant's tool calls)
if isinstance(content, ToolUseContent):
anthropic_messages.append(
MessageParam(
role="assistant",
content=[
ToolUseBlockParam(
type="tool_use",
id=content.id,
name=content.name,
input=content.input,
)
],
)
)
continue
# Handle ToolResultContent (user's tool results)
if isinstance(content, ToolResultContent):
result_content_str: str | list[TextBlockParam] = ""
if content.content:
text_parts: list[TextBlockParam] = []
for item in content.content:
if isinstance(item, TextContent):
text_parts.append(
TextBlockParam(type="text", text=item.text)
)
if len(text_parts) == 1:
result_content_str = text_parts[0]["text"]
elif text_parts:
result_content_str = text_parts
anthropic_messages.append(
MessageParam(
role="user",
content=[
ToolResultBlockParam(
type="tool_result",
tool_use_id=content.toolUseId,
content=result_content_str,
is_error=content.isError if content.isError else False,
)
],
)
)
continue
# Handle TextContent
if isinstance(content, TextContent):
anthropic_messages.append(
MessageParam(
role=message.role,
content=content.text,
)
)
continue
raise ValueError(f"Unsupported content type: {type(content)}")
return anthropic_messages
@staticmethod
def _message_to_create_message_result(
message: Message,
) -> CreateMessageResult:
if len(message.content) == 0:
raise ValueError("No content in response from Anthropic")
# Join all text blocks to avoid dropping content
text = "".join(
block.text for block in message.content if isinstance(block, TextBlock)
)
if text:
return CreateMessageResult(
content=TextContent(type="text", text=text),
role="assistant",
model=message.model,
)
raise ValueError(
f"No text content in response from Anthropic: {[type(b).__name__ for b in message.content]}"
)
def _select_model_from_preferences(
self, model_preferences: ModelPreferences | str | list[str] | None
) -> ModelParam:
for model_option in self._iter_models_from_preferences(model_preferences):
# Accept any model that starts with "claude"
if model_option.startswith("claude"):
return model_option
return self.default_model
@staticmethod
def _convert_tools_to_anthropic(tools: list[Tool]) -> list[ToolParam]:
"""Convert MCP tools to Anthropic tool format."""
anthropic_tools: list[ToolParam] = []
for tool in tools:
# Build input_schema dict, ensuring required fields
input_schema: dict[str, Any] = dict(tool.inputSchema)
if "type" not in input_schema:
input_schema["type"] = "object"
anthropic_tools.append(
ToolParam(
name=tool.name,
description=tool.description or "",
input_schema=input_schema,
)
)
return anthropic_tools
@staticmethod
def _convert_tool_choice_to_anthropic(
tool_choice: ToolChoice,
) -> ToolChoiceParam | None:
"""Convert MCP tool_choice to Anthropic format.
Returns None for "none" mode, signaling that tools should be omitted
from the request entirely (Anthropic doesn't have an explicit "none" option).
"""
if tool_choice.mode == "auto":
return ToolChoiceAutoParam(type="auto")
elif tool_choice.mode == "required":
return ToolChoiceAnyParam(type="any")
elif tool_choice.mode == "none":
# Anthropic doesn't have a "none" option - return None to signal
# that tools should be omitted from the request entirely
return None
else:
raise ValueError(f"Unsupported tool_choice mode: {tool_choice.mode!r}")
@staticmethod
def _message_to_result_with_tools(
message: Message,
) -> CreateMessageResultWithTools:
"""Convert Anthropic response to CreateMessageResultWithTools."""
if len(message.content) == 0:
raise ValueError("No content in response from Anthropic")
# Determine stop reason
stop_reason: StopReason
if message.stop_reason == "tool_use":
stop_reason = "toolUse"
elif message.stop_reason == "end_turn":
stop_reason = "endTurn"
elif message.stop_reason == "max_tokens":
stop_reason = "maxTokens"
elif message.stop_reason == "stop_sequence":
stop_reason = "endTurn"
else:
stop_reason = "endTurn"
# Build content list
content: list[SamplingMessageContentBlock] = []
for block in message.content:
if isinstance(block, TextBlock):
content.append(TextContent(type="text", text=block.text))
elif isinstance(block, ToolUseBlock):
# Anthropic returns input as dict directly
arguments = block.input if isinstance(block.input, dict) else {}
content.append(
ToolUseContent(
type="tool_use",
id=block.id,
name=block.name,
input=arguments,
)
)
# Must have at least some content
if not content:
raise ValueError("No content in response from Anthropic")
return CreateMessageResultWithTools(
content=content,
role="assistant",
model=message.model,
stopReason=stop_reason,
)
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "src/fastmcp/client/sampling/handlers/anthropic.py",
"license": "Apache License 2.0",
"lines": 338,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/fastmcp:tests/client/sampling/handlers/test_anthropic_handler.py | from unittest.mock import MagicMock
import pytest
from anthropic import AsyncAnthropic
from anthropic.types import Message, TextBlock, ToolUseBlock, Usage
from mcp.types import (
CreateMessageResult,
CreateMessageResultWithTools,
ModelHint,
ModelPreferences,
SamplingMessage,
TextContent,
ToolUseContent,
)
from fastmcp.client.sampling.handlers.anthropic import AnthropicSamplingHandler
def test_convert_sampling_messages_to_anthropic_messages():
msgs = AnthropicSamplingHandler._convert_to_anthropic_messages(
messages=[
SamplingMessage(
role="user", content=TextContent(type="text", text="hello")
),
SamplingMessage(
role="assistant", content=TextContent(type="text", text="ok")
),
],
)
assert msgs == [
{"role": "user", "content": "hello"},
{"role": "assistant", "content": "ok"},
]
def test_convert_to_anthropic_messages_raises_on_non_text():
from fastmcp.utilities.types import Image
with pytest.raises(ValueError):
AnthropicSamplingHandler._convert_to_anthropic_messages(
messages=[
SamplingMessage(
role="user",
content=Image(data=b"abc").to_image_content(),
)
],
)
@pytest.mark.parametrize(
"prefs,expected",
[
("claude-3-5-sonnet-20241022", "claude-3-5-sonnet-20241022"),
(
ModelPreferences(hints=[ModelHint(name="claude-3-5-sonnet-20241022")]),
"claude-3-5-sonnet-20241022",
),
(["claude-3-5-sonnet-20241022", "other"], "claude-3-5-sonnet-20241022"),
(None, "fallback-model"),
(["unknown-model"], "fallback-model"),
],
)
def test_select_model_from_preferences(prefs, expected):
mock_client = MagicMock(spec=AsyncAnthropic)
handler = AnthropicSamplingHandler(
default_model="fallback-model", client=mock_client
)
assert handler._select_model_from_preferences(prefs) == expected
def test_message_to_create_message_result():
mock_client = MagicMock(spec=AsyncAnthropic)
handler = AnthropicSamplingHandler(
default_model="fallback-model", client=mock_client
)
message = Message(
id="msg_123",
type="message",
role="assistant",
content=[TextBlock(type="text", text="HELPFUL CONTENT FROM A VERY SMART LLM")],
model="claude-3-5-sonnet-20241022",
stop_reason="end_turn",
stop_sequence=None,
usage=Usage(input_tokens=10, output_tokens=20),
)
result: CreateMessageResult = handler._message_to_create_message_result(message)
assert result == CreateMessageResult(
content=TextContent(type="text", text="HELPFUL CONTENT FROM A VERY SMART LLM"),
role="assistant",
model="claude-3-5-sonnet-20241022",
)
def test_message_to_result_with_tools():
message = Message(
id="msg_123",
type="message",
role="assistant",
content=[
TextBlock(type="text", text="I'll help you with that."),
ToolUseBlock(
type="tool_use",
id="toolu_123",
name="get_weather",
input={"location": "San Francisco"},
),
],
model="claude-3-5-sonnet-20241022",
stop_reason="tool_use",
stop_sequence=None,
usage=Usage(input_tokens=10, output_tokens=20),
)
result: CreateMessageResultWithTools = (
AnthropicSamplingHandler._message_to_result_with_tools(message)
)
assert result.role == "assistant"
assert result.model == "claude-3-5-sonnet-20241022"
assert result.stopReason == "toolUse"
content = result.content_as_list
assert len(content) == 2
assert content[0] == TextContent(type="text", text="I'll help you with that.")
assert content[1] == ToolUseContent(
type="tool_use",
id="toolu_123",
name="get_weather",
input={"location": "San Francisco"},
)
def test_convert_tool_choice_auto():
result = AnthropicSamplingHandler._convert_tool_choice_to_anthropic(
MagicMock(mode="auto")
)
assert result is not None
assert result["type"] == "auto"
def test_convert_tool_choice_required():
result = AnthropicSamplingHandler._convert_tool_choice_to_anthropic(
MagicMock(mode="required")
)
assert result is not None
assert result["type"] == "any"
def test_convert_tool_choice_none():
result = AnthropicSamplingHandler._convert_tool_choice_to_anthropic(
MagicMock(mode="none")
)
# Anthropic doesn't have "none", returns None to signal tools should be omitted
assert result is None
def test_convert_tool_choice_unknown_raises():
with pytest.raises(ValueError, match="Unsupported tool_choice mode"):
AnthropicSamplingHandler._convert_tool_choice_to_anthropic(
MagicMock(mode="unknown")
)
def test_convert_tools_to_anthropic():
from mcp.types import Tool
tools = [
Tool(
name="get_weather",
description="Get the current weather",
inputSchema={
"type": "object",
"properties": {"location": {"type": "string"}},
"required": ["location"],
},
)
]
result = AnthropicSamplingHandler._convert_tools_to_anthropic(tools)
assert len(result) == 1
assert result[0]["name"] == "get_weather"
assert result[0]["description"] == "Get the current weather"
assert result[0]["input_schema"] == {
"type": "object",
"properties": {"location": {"type": "string"}},
"required": ["location"],
}
def test_convert_messages_with_tool_use_content():
"""Test converting messages that include tool use content from assistant."""
msgs = AnthropicSamplingHandler._convert_to_anthropic_messages(
messages=[
SamplingMessage(
role="assistant",
content=ToolUseContent(
type="tool_use",
id="toolu_123",
name="get_weather",
input={"location": "NYC"},
),
),
],
)
assert len(msgs) == 1
assert msgs[0]["role"] == "assistant"
assert msgs[0]["content"] == [
{
"type": "tool_use",
"id": "toolu_123",
"name": "get_weather",
"input": {"location": "NYC"},
}
]
def test_convert_messages_with_tool_result_content():
"""Test converting messages that include tool result content from user."""
from mcp.types import ToolResultContent
msgs = AnthropicSamplingHandler._convert_to_anthropic_messages(
messages=[
SamplingMessage(
role="user",
content=ToolResultContent(
type="tool_result",
toolUseId="toolu_123",
content=[TextContent(type="text", text="72F and sunny")],
),
),
],
)
assert len(msgs) == 1
assert msgs[0]["role"] == "user"
assert msgs[0]["content"] == [
{
"type": "tool_result",
"tool_use_id": "toolu_123",
"content": "72F and sunny",
"is_error": False,
}
]
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/client/sampling/handlers/test_anthropic_handler.py",
"license": "Apache License 2.0",
"lines": 209,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:src/fastmcp/client/sampling/handlers/openai.py | """OpenAI sampling handler for FastMCP."""
import json
from collections.abc import Iterator, Sequence
from typing import Any, get_args
from mcp import ClientSession, ServerSession
from mcp.shared.context import LifespanContextT, RequestContext
from mcp.types import CreateMessageRequestParams as SamplingParams
from mcp.types import (
CreateMessageResult,
CreateMessageResultWithTools,
ModelPreferences,
SamplingMessage,
StopReason,
TextContent,
Tool,
ToolChoice,
ToolResultContent,
ToolUseContent,
)
try:
from openai import AsyncOpenAI
from openai.types.chat import (
ChatCompletion,
ChatCompletionAssistantMessageParam,
ChatCompletionMessageParam,
ChatCompletionMessageToolCallParam,
ChatCompletionSystemMessageParam,
ChatCompletionToolChoiceOptionParam,
ChatCompletionToolMessageParam,
ChatCompletionToolParam,
ChatCompletionUserMessageParam,
)
from openai.types.shared.chat_model import ChatModel
from openai.types.shared_params import FunctionDefinition
except ImportError as e:
raise ImportError(
"The `openai` package is not installed. "
"Please install `fastmcp[openai]` or add `openai` to your dependencies manually."
) from e
class OpenAISamplingHandler:
"""Sampling handler that uses the OpenAI API."""
def __init__(
self,
default_model: ChatModel,
client: AsyncOpenAI | None = None,
) -> None:
self.client: AsyncOpenAI = client or AsyncOpenAI()
self.default_model: ChatModel = default_model
async def __call__(
self,
messages: list[SamplingMessage],
params: SamplingParams,
context: RequestContext[ServerSession, LifespanContextT]
| RequestContext[ClientSession, LifespanContextT],
) -> CreateMessageResult | CreateMessageResultWithTools:
openai_messages: list[ChatCompletionMessageParam] = (
self._convert_to_openai_messages(
system_prompt=params.systemPrompt,
messages=messages,
)
)
model: ChatModel = self._select_model_from_preferences(params.modelPreferences)
# Convert MCP tools to OpenAI format
openai_tools: list[ChatCompletionToolParam] | None = None
if params.tools:
openai_tools = self._convert_tools_to_openai(params.tools)
# Convert tool_choice to OpenAI format
openai_tool_choice: ChatCompletionToolChoiceOptionParam | None = None
if params.toolChoice:
openai_tool_choice = self._convert_tool_choice_to_openai(params.toolChoice)
# Build kwargs to avoid sentinel type compatibility issues across
# openai SDK versions (NotGiven vs Omit)
kwargs: dict[str, Any] = {
"model": model,
"messages": openai_messages,
}
if params.maxTokens is not None:
kwargs["max_completion_tokens"] = params.maxTokens
if params.temperature is not None:
kwargs["temperature"] = params.temperature
if params.stopSequences:
kwargs["stop"] = params.stopSequences
if openai_tools is not None:
kwargs["tools"] = openai_tools
if openai_tool_choice is not None:
kwargs["tool_choice"] = openai_tool_choice
response = await self.client.chat.completions.create(**kwargs)
# Return appropriate result type based on whether tools were provided
if params.tools:
return self._chat_completion_to_result_with_tools(response)
return self._chat_completion_to_create_message_result(response)
@staticmethod
def _iter_models_from_preferences(
model_preferences: ModelPreferences | str | list[str] | None,
) -> Iterator[str]:
if model_preferences is None:
return
if isinstance(model_preferences, str) and model_preferences in get_args(
ChatModel
):
yield model_preferences
elif isinstance(model_preferences, list):
yield from model_preferences
elif isinstance(model_preferences, ModelPreferences):
if not (hints := model_preferences.hints):
return
for hint in hints:
if not (name := hint.name):
continue
yield name
@staticmethod
def _convert_to_openai_messages(
system_prompt: str | None, messages: Sequence[SamplingMessage]
) -> list[ChatCompletionMessageParam]:
openai_messages: list[ChatCompletionMessageParam] = []
if system_prompt:
openai_messages.append(
ChatCompletionSystemMessageParam(
role="system",
content=system_prompt,
)
)
for message in messages:
content = message.content
# Handle list content (from CreateMessageResultWithTools)
if isinstance(content, list):
# Collect tool calls and text from the list
tool_calls: list[ChatCompletionMessageToolCallParam] = []
text_parts: list[str] = []
# Collect tool results separately to maintain correct ordering
tool_messages: list[ChatCompletionToolMessageParam] = []
for item in content:
if isinstance(item, ToolUseContent):
tool_calls.append(
ChatCompletionMessageToolCallParam(
id=item.id,
type="function",
function={
"name": item.name,
"arguments": json.dumps(item.input),
},
)
)
elif isinstance(item, TextContent):
text_parts.append(item.text)
elif isinstance(item, ToolResultContent):
# Collect tool results (added after assistant message)
content_text = ""
if item.content:
result_texts = []
for sub_item in item.content:
if isinstance(sub_item, TextContent):
result_texts.append(sub_item.text)
content_text = "\n".join(result_texts)
tool_messages.append(
ChatCompletionToolMessageParam(
role="tool",
tool_call_id=item.toolUseId,
content=content_text,
)
)
# Add assistant message with tool calls if present
# OpenAI requires: assistant (with tool_calls) -> tool messages
if tool_calls or text_parts:
msg_content = "\n".join(text_parts) if text_parts else None
if tool_calls:
openai_messages.append(
ChatCompletionAssistantMessageParam(
role="assistant",
content=msg_content,
tool_calls=tool_calls,
)
)
# Add tool messages AFTER assistant message
openai_messages.extend(tool_messages)
elif msg_content:
if message.role == "user":
openai_messages.append(
ChatCompletionUserMessageParam(
role="user",
content=msg_content,
)
)
else:
openai_messages.append(
ChatCompletionAssistantMessageParam(
role="assistant",
content=msg_content,
)
)
elif tool_messages:
# Tool results only (assistant message was in previous message)
openai_messages.extend(tool_messages)
continue
# Handle ToolUseContent (assistant's tool calls)
if isinstance(content, ToolUseContent):
openai_messages.append(
ChatCompletionAssistantMessageParam(
role="assistant",
tool_calls=[
ChatCompletionMessageToolCallParam(
id=content.id,
type="function",
function={
"name": content.name,
"arguments": json.dumps(content.input),
},
)
],
)
)
continue
# Handle ToolResultContent (user's tool results)
if isinstance(content, ToolResultContent):
# Extract text parts from the content list
result_texts: list[str] = []
if content.content:
for item in content.content:
if isinstance(item, TextContent):
result_texts.append(item.text)
openai_messages.append(
ChatCompletionToolMessageParam(
role="tool",
tool_call_id=content.toolUseId,
content="\n".join(result_texts),
)
)
continue
# Handle TextContent
if isinstance(content, TextContent):
if message.role == "user":
openai_messages.append(
ChatCompletionUserMessageParam(
role="user",
content=content.text,
)
)
else:
openai_messages.append(
ChatCompletionAssistantMessageParam(
role="assistant",
content=content.text,
)
)
continue
raise ValueError(f"Unsupported content type: {type(content)}")
return openai_messages
@staticmethod
def _chat_completion_to_create_message_result(
chat_completion: ChatCompletion,
) -> CreateMessageResult:
if len(chat_completion.choices) == 0:
raise ValueError("No response for completion")
first_choice = chat_completion.choices[0]
if content := first_choice.message.content:
return CreateMessageResult(
content=TextContent(type="text", text=content),
role="assistant",
model=chat_completion.model,
)
raise ValueError("No content in response from completion")
def _select_model_from_preferences(
self, model_preferences: ModelPreferences | str | list[str] | None
) -> ChatModel:
for model_option in self._iter_models_from_preferences(model_preferences):
if model_option in get_args(ChatModel):
chosen_model: ChatModel = model_option # type: ignore[assignment]
return chosen_model
return self.default_model
@staticmethod
def _convert_tools_to_openai(tools: list[Tool]) -> list[ChatCompletionToolParam]:
"""Convert MCP tools to OpenAI tool format."""
openai_tools: list[ChatCompletionToolParam] = []
for tool in tools:
# Build parameters dict, ensuring required fields
parameters: dict[str, Any] = dict(tool.inputSchema)
if "type" not in parameters:
parameters["type"] = "object"
openai_tools.append(
ChatCompletionToolParam(
type="function",
function=FunctionDefinition(
name=tool.name,
description=tool.description or "",
parameters=parameters,
),
)
)
return openai_tools
@staticmethod
def _convert_tool_choice_to_openai(
tool_choice: ToolChoice,
) -> ChatCompletionToolChoiceOptionParam:
"""Convert MCP tool_choice to OpenAI format."""
if tool_choice.mode == "auto":
return "auto"
elif tool_choice.mode == "required":
return "required"
elif tool_choice.mode == "none":
return "none"
else:
raise ValueError(f"Unsupported tool_choice mode: {tool_choice.mode!r}")
@staticmethod
def _chat_completion_to_result_with_tools(
chat_completion: ChatCompletion,
) -> CreateMessageResultWithTools:
"""Convert OpenAI response to CreateMessageResultWithTools."""
if len(chat_completion.choices) == 0:
raise ValueError("No response for completion")
first_choice = chat_completion.choices[0]
message = first_choice.message
# Determine stop reason
stop_reason: StopReason
if first_choice.finish_reason == "tool_calls":
stop_reason = "toolUse"
elif first_choice.finish_reason == "stop":
stop_reason = "endTurn"
elif first_choice.finish_reason == "length":
stop_reason = "maxTokens"
else:
stop_reason = "endTurn"
# Build content list
content: list[TextContent | ToolUseContent] = []
# Add text content if present
if message.content:
content.append(TextContent(type="text", text=message.content))
# Add tool calls if present
if message.tool_calls:
for tool_call in message.tool_calls:
# Skip non-function tool calls
if not hasattr(tool_call, "function"):
continue
func = tool_call.function
# Parse the arguments JSON string
try:
arguments = json.loads(func.arguments) # type: ignore[union-attr]
except json.JSONDecodeError as e:
raise ValueError(
f"Invalid JSON in tool arguments for "
f"'{func.name}': {func.arguments}" # type: ignore[union-attr]
) from e
content.append(
ToolUseContent(
type="tool_use",
id=tool_call.id,
name=func.name, # type: ignore[union-attr]
input=arguments,
)
)
# Must have at least some content
if not content:
raise ValueError("No content in response from completion")
return CreateMessageResultWithTools(
content=content, # type: ignore[arg-type]
role="assistant",
model=chat_completion.model,
stopReason=stop_reason,
)
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "src/fastmcp/client/sampling/handlers/openai.py",
"license": "Apache License 2.0",
"lines": 358,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/fastmcp:src/fastmcp/server/sampling/run.py | """Sampling types and helper functions for FastMCP servers."""
from __future__ import annotations
import inspect
import json
from collections.abc import Callable, Sequence
from dataclasses import dataclass
from typing import TYPE_CHECKING, Any, Generic, Literal, cast
import anyio
from mcp.types import (
ClientCapabilities,
CreateMessageResult,
CreateMessageResultWithTools,
ModelHint,
ModelPreferences,
SamplingCapability,
SamplingMessage,
SamplingMessageContentBlock,
SamplingToolsCapability,
TextContent,
ToolChoice,
ToolResultContent,
ToolUseContent,
)
from mcp.types import CreateMessageRequestParams as SamplingParams
from mcp.types import Tool as SDKTool
from pydantic import ValidationError
from typing_extensions import TypeVar
from fastmcp import settings
from fastmcp.exceptions import ToolError
from fastmcp.server.sampling.sampling_tool import SamplingTool
from fastmcp.tools.function_tool import FunctionTool
from fastmcp.tools.tool_transform import TransformedTool
from fastmcp.utilities.async_utils import gather
from fastmcp.utilities.json_schema import compress_schema
from fastmcp.utilities.logging import get_logger
from fastmcp.utilities.types import get_cached_typeadapter
logger = get_logger(__name__)
if TYPE_CHECKING:
from fastmcp.server.context import Context
ResultT = TypeVar("ResultT")
# Simplified tool choice type - just the mode string instead of the full MCP object
ToolChoiceOption = Literal["auto", "required", "none"]
@dataclass
class SamplingResult(Generic[ResultT]):
"""Result of a sampling operation.
Attributes:
text: The text representation of the result (raw text or JSON for structured).
result: The typed result (str for text, parsed object for structured output).
history: All messages exchanged during sampling.
"""
text: str | None
result: ResultT
history: list[SamplingMessage]
@dataclass
class SampleStep:
"""Result of a single sampling call.
Represents what the LLM returned in this step plus the message history.
"""
response: CreateMessageResult | CreateMessageResultWithTools
history: list[SamplingMessage]
@property
def is_tool_use(self) -> bool:
"""True if the LLM is requesting tool execution."""
if isinstance(self.response, CreateMessageResultWithTools):
return self.response.stopReason == "toolUse"
return False
@property
def text(self) -> str | None:
"""Extract text from the response, if available."""
content = self.response.content
if isinstance(content, list):
for block in content:
if isinstance(block, TextContent):
return block.text
return None
elif isinstance(content, TextContent):
return content.text
return None
@property
def tool_calls(self) -> list[ToolUseContent]:
"""Get the list of tool calls from the response."""
content = self.response.content
if isinstance(content, list):
return [c for c in content if isinstance(c, ToolUseContent)]
elif isinstance(content, ToolUseContent):
return [content]
return []
def _parse_model_preferences(
model_preferences: ModelPreferences | str | list[str] | None,
) -> ModelPreferences | None:
"""Convert model preferences to ModelPreferences object."""
if model_preferences is None:
return None
elif isinstance(model_preferences, ModelPreferences):
return model_preferences
elif isinstance(model_preferences, str):
return ModelPreferences(hints=[ModelHint(name=model_preferences)])
elif isinstance(model_preferences, list):
if not all(isinstance(h, str) for h in model_preferences):
raise ValueError("All elements of model_preferences list must be strings.")
return ModelPreferences(hints=[ModelHint(name=h) for h in model_preferences])
else:
raise ValueError(
"model_preferences must be one of: ModelPreferences, str, list[str], or None."
)
# --- Standalone functions for sample_step() ---
def determine_handler_mode(context: Context, needs_tools: bool) -> bool:
"""Determine whether to use fallback handler or client for sampling.
Args:
context: The MCP context.
needs_tools: Whether the sampling request requires tool support.
Returns:
True if fallback handler should be used, False to use client.
Raises:
ValueError: If client lacks required capability and no fallback configured.
"""
fastmcp = context.fastmcp
session = context.session
# Check what capabilities the client has
has_sampling = session.check_client_capability(
capability=ClientCapabilities(sampling=SamplingCapability())
)
has_tools_capability = session.check_client_capability(
capability=ClientCapabilities(
sampling=SamplingCapability(tools=SamplingToolsCapability())
)
)
if fastmcp.sampling_handler_behavior == "always":
if fastmcp.sampling_handler is None:
raise ValueError(
"sampling_handler_behavior is 'always' but no handler configured"
)
return True
elif fastmcp.sampling_handler_behavior == "fallback":
client_sufficient = has_sampling and (not needs_tools or has_tools_capability)
if not client_sufficient:
if fastmcp.sampling_handler is None:
if needs_tools and has_sampling and not has_tools_capability:
raise ValueError(
"Client does not support sampling with tools. "
"The client must advertise the sampling.tools capability."
)
raise ValueError("Client does not support sampling")
return True
elif fastmcp.sampling_handler_behavior is not None:
raise ValueError(
f"Invalid sampling_handler_behavior: {fastmcp.sampling_handler_behavior!r}. "
"Must be 'always', 'fallback', or None."
)
elif not has_sampling:
raise ValueError("Client does not support sampling")
elif needs_tools and not has_tools_capability:
raise ValueError(
"Client does not support sampling with tools. "
"The client must advertise the sampling.tools capability."
)
return False
async def call_sampling_handler(
context: Context,
messages: list[SamplingMessage],
*,
system_prompt: str | None,
temperature: float | None,
max_tokens: int,
model_preferences: ModelPreferences | str | list[str] | None,
sdk_tools: list[SDKTool] | None,
tool_choice: ToolChoice | None,
) -> CreateMessageResult | CreateMessageResultWithTools:
"""Make LLM call using the fallback handler.
Note: This function expects the caller (sample_step) to have validated that
sampling_handler is set via determine_handler_mode(). The checks below are
safeguards against internal misuse.
"""
if context.fastmcp.sampling_handler is None:
raise RuntimeError("sampling_handler is None")
if context.request_context is None:
raise RuntimeError("request_context is None")
result = context.fastmcp.sampling_handler(
messages,
SamplingParams(
systemPrompt=system_prompt,
messages=messages,
temperature=temperature,
maxTokens=max_tokens,
modelPreferences=_parse_model_preferences(model_preferences),
tools=sdk_tools,
toolChoice=tool_choice,
),
context.request_context,
)
if inspect.isawaitable(result):
result = await result
result = cast("str | CreateMessageResult | CreateMessageResultWithTools", result)
# Convert string to CreateMessageResult
if isinstance(result, str):
return CreateMessageResult(
role="assistant",
content=TextContent(type="text", text=result),
model="unknown",
stopReason="endTurn",
)
return result
async def execute_tools(
tool_calls: list[ToolUseContent],
tool_map: dict[str, SamplingTool],
mask_error_details: bool = False,
tool_concurrency: int | None = None,
) -> list[ToolResultContent]:
"""Execute tool calls and return results.
Args:
tool_calls: List of tool use requests from the LLM.
tool_map: Mapping from tool name to SamplingTool.
mask_error_details: If True, mask detailed error messages from tool execution.
When masked, only generic error messages are returned to the LLM.
Tools can explicitly raise ToolError to bypass masking when they want
to provide specific error messages to the LLM.
tool_concurrency: Controls parallel execution of tools:
- None (default): Sequential execution (one at a time)
- 0: Unlimited parallel execution
- N > 0: Execute at most N tools concurrently
If any tool has sequential=True, all tools execute sequentially
regardless of this setting.
Returns:
List of tool result content blocks in the same order as tool_calls.
"""
if tool_concurrency is not None and tool_concurrency < 0:
raise ValueError(
f"tool_concurrency must be None, 0 (unlimited), or a positive integer, "
f"got {tool_concurrency}"
)
async def _execute_single_tool(tool_use: ToolUseContent) -> ToolResultContent:
"""Execute a single tool and return its result."""
tool = tool_map.get(tool_use.name)
if tool is None:
return ToolResultContent(
type="tool_result",
toolUseId=tool_use.id,
content=[
TextContent(
type="text",
text=f"Error: Unknown tool '{tool_use.name}'",
)
],
isError=True,
)
try:
result_value = await tool.run(tool_use.input)
return ToolResultContent(
type="tool_result",
toolUseId=tool_use.id,
content=[TextContent(type="text", text=str(result_value))],
)
except ToolError as e:
# ToolError is the escape hatch - always pass message through
logger.exception(f"Error calling sampling tool '{tool_use.name}'")
return ToolResultContent(
type="tool_result",
toolUseId=tool_use.id,
content=[TextContent(type="text", text=str(e))],
isError=True,
)
except Exception as e:
# Generic exceptions - mask based on setting
logger.exception(f"Error calling sampling tool '{tool_use.name}'")
if mask_error_details:
error_text = f"Error executing tool '{tool_use.name}'"
else:
error_text = f"Error executing tool '{tool_use.name}': {e}"
return ToolResultContent(
type="tool_result",
toolUseId=tool_use.id,
content=[TextContent(type="text", text=error_text)],
isError=True,
)
# Check if any tool requires sequential execution
requires_sequential = any(
tool.sequential
for tool_use in tool_calls
if (tool := tool_map.get(tool_use.name)) is not None
)
# Execute sequentially if required or if concurrency is None (default)
if tool_concurrency is None or requires_sequential:
tool_results: list[ToolResultContent] = []
for tool_use in tool_calls:
result = await _execute_single_tool(tool_use)
tool_results.append(result)
return tool_results
# Execute in parallel
if tool_concurrency == 0:
# Unlimited parallel execution
return await gather(*[_execute_single_tool(tc) for tc in tool_calls])
else:
# Bounded parallel execution with semaphore
semaphore = anyio.Semaphore(tool_concurrency)
async def bounded_execute(tool_use: ToolUseContent) -> ToolResultContent:
async with semaphore:
return await _execute_single_tool(tool_use)
return await gather(*[bounded_execute(tc) for tc in tool_calls])
# --- Helper functions for sampling ---
def prepare_messages(
messages: str | Sequence[str | SamplingMessage],
) -> list[SamplingMessage]:
"""Convert various message formats to a list of SamplingMessage objects."""
if isinstance(messages, str):
return [
SamplingMessage(
content=TextContent(text=messages, type="text"), role="user"
)
]
else:
return [
SamplingMessage(content=TextContent(text=m, type="text"), role="user")
if isinstance(m, str)
else m
for m in messages
]
def prepare_tools(
tools: Sequence[SamplingTool | FunctionTool | TransformedTool | Callable[..., Any]]
| None,
) -> list[SamplingTool] | None:
"""Convert tools to SamplingTool objects.
Accepts SamplingTool instances, FunctionTool instances, TransformedTool instances,
or plain callable functions. FunctionTool and TransformedTool are converted using
from_callable_tool(), while plain functions use from_function().
Args:
tools: Sequence of tools to prepare. Can be SamplingTool, FunctionTool,
TransformedTool, or plain callable functions.
Returns:
List of SamplingTool instances, or None if tools is None.
"""
if tools is None:
return None
sampling_tools: list[SamplingTool] = []
for t in tools:
if isinstance(t, SamplingTool):
sampling_tools.append(t)
elif isinstance(t, (FunctionTool, TransformedTool)):
sampling_tools.append(SamplingTool.from_callable_tool(t))
elif callable(t):
sampling_tools.append(SamplingTool.from_function(t))
else:
raise TypeError(
f"Expected SamplingTool, FunctionTool, TransformedTool, or callable, got {type(t)}"
)
return sampling_tools if sampling_tools else None
def extract_tool_calls(
response: CreateMessageResult | CreateMessageResultWithTools,
) -> list[ToolUseContent]:
"""Extract tool calls from a response."""
content = response.content
if isinstance(content, list):
return [c for c in content if isinstance(c, ToolUseContent)]
elif isinstance(content, ToolUseContent):
return [content]
return []
def create_final_response_tool(result_type: type) -> SamplingTool:
"""Create a synthetic 'final_response' tool for structured output.
This tool is used to capture structured responses from the LLM.
The tool's schema is derived from the result_type.
"""
type_adapter = get_cached_typeadapter(result_type)
schema = type_adapter.json_schema()
schema = compress_schema(schema, prune_titles=True)
# Tool parameters must be object-shaped. Wrap primitives in {"value": <schema>}
if schema.get("type") != "object":
schema = {
"type": "object",
"properties": {"value": schema},
"required": ["value"],
}
# The fn just returns the input as-is (validation happens in the loop)
def final_response(**kwargs: Any) -> dict[str, Any]:
return kwargs
return SamplingTool(
name="final_response",
description=(
"Call this tool to provide your final response. "
"Use this when you have completed the task and are ready to return the result."
),
parameters=schema,
fn=final_response,
)
# --- Implementation functions for Context methods ---
async def sample_step_impl(
context: Context,
messages: str | Sequence[str | SamplingMessage],
*,
system_prompt: str | None = None,
temperature: float | None = None,
max_tokens: int | None = None,
model_preferences: ModelPreferences | str | list[str] | None = None,
tools: Sequence[SamplingTool | FunctionTool | TransformedTool | Callable[..., Any]]
| None = None,
tool_choice: ToolChoiceOption | str | None = None,
auto_execute_tools: bool = True,
mask_error_details: bool | None = None,
tool_concurrency: int | None = None,
) -> SampleStep:
"""Implementation of Context.sample_step().
Make a single LLM sampling call. This is a stateless function that makes
exactly one LLM call and optionally executes any requested tools.
"""
# Convert messages to SamplingMessage objects
current_messages = prepare_messages(messages)
# Convert tools to SamplingTools
sampling_tools = prepare_tools(tools)
sdk_tools: list[SDKTool] | None = (
[t._to_sdk_tool() for t in sampling_tools] if sampling_tools else None
)
tool_map: dict[str, SamplingTool] = (
{t.name: t for t in sampling_tools} if sampling_tools else {}
)
# Determine whether to use fallback handler or client
use_fallback = determine_handler_mode(context, bool(sampling_tools))
# Build tool choice
effective_tool_choice: ToolChoice | None = None
if tool_choice is not None:
if tool_choice not in ("auto", "required", "none"):
raise ValueError(
f"Invalid tool_choice: {tool_choice!r}. "
"Must be 'auto', 'required', or 'none'."
)
effective_tool_choice = ToolChoice(
mode=cast(Literal["auto", "required", "none"], tool_choice)
)
# Effective max_tokens
effective_max_tokens = max_tokens if max_tokens is not None else 512
# Make the LLM call
if use_fallback:
response = await call_sampling_handler(
context,
current_messages,
system_prompt=system_prompt,
temperature=temperature,
max_tokens=effective_max_tokens,
model_preferences=model_preferences,
sdk_tools=sdk_tools,
tool_choice=effective_tool_choice,
)
else:
response = await context.session.create_message(
messages=current_messages,
system_prompt=system_prompt,
temperature=temperature,
max_tokens=effective_max_tokens,
model_preferences=_parse_model_preferences(model_preferences),
tools=sdk_tools,
tool_choice=effective_tool_choice,
related_request_id=context.request_id,
)
# Check if this is a tool use response
is_tool_use_response = (
isinstance(response, CreateMessageResultWithTools)
and response.stopReason == "toolUse"
)
# Always include the assistant response in history
current_messages.append(SamplingMessage(role="assistant", content=response.content))
# If not a tool use, return immediately
if not is_tool_use_response:
return SampleStep(response=response, history=current_messages)
# If not executing tools, return with assistant message but no tool results
if not auto_execute_tools:
return SampleStep(response=response, history=current_messages)
# Execute tools and add results to history
step_tool_calls = extract_tool_calls(response)
if step_tool_calls:
effective_mask = (
mask_error_details
if mask_error_details is not None
else settings.mask_error_details
)
tool_results: list[ToolResultContent] = await execute_tools(
step_tool_calls,
tool_map,
mask_error_details=effective_mask,
tool_concurrency=tool_concurrency,
)
if tool_results:
current_messages.append(
SamplingMessage(
role="user",
content=cast(list[SamplingMessageContentBlock], tool_results),
)
)
return SampleStep(response=response, history=current_messages)
async def sample_impl(
context: Context,
messages: str | Sequence[str | SamplingMessage],
*,
system_prompt: str | None = None,
temperature: float | None = None,
max_tokens: int | None = None,
model_preferences: ModelPreferences | str | list[str] | None = None,
tools: Sequence[SamplingTool | FunctionTool | TransformedTool | Callable[..., Any]]
| None = None,
result_type: type[ResultT] | None = None,
mask_error_details: bool | None = None,
tool_concurrency: int | None = None,
) -> SamplingResult[ResultT]:
"""Implementation of Context.sample().
Send a sampling request to the client and await the response. This method
runs to completion automatically, executing a tool loop until the LLM
provides a final text response.
"""
# Safety limit to prevent infinite loops
max_iterations = 100
# Convert tools to SamplingTools
sampling_tools = prepare_tools(tools)
# Handle structured output with result_type
tool_choice: str | None = None
if result_type is not None and result_type is not str:
final_response_tool = create_final_response_tool(result_type)
sampling_tools = list(sampling_tools) if sampling_tools else []
sampling_tools.append(final_response_tool)
# Always require tool calls when result_type is set - the LLM must
# eventually call final_response (text responses are not accepted)
tool_choice = "required"
# Convert messages for the loop
current_messages: str | Sequence[str | SamplingMessage] = messages
for _iteration in range(max_iterations):
step = await sample_step_impl(
context,
messages=current_messages,
system_prompt=system_prompt,
temperature=temperature,
max_tokens=max_tokens,
model_preferences=model_preferences,
tools=sampling_tools,
tool_choice=tool_choice,
mask_error_details=mask_error_details,
tool_concurrency=tool_concurrency,
)
# Check for final_response tool call for structured output
if result_type is not None and result_type is not str and step.is_tool_use:
for tool_call in step.tool_calls:
if tool_call.name == "final_response":
# Validate and return the structured result
type_adapter = get_cached_typeadapter(result_type)
# Unwrap if we wrapped primitives (non-object schemas)
input_data = tool_call.input
original_schema = compress_schema(
type_adapter.json_schema(), prune_titles=True
)
if (
original_schema.get("type") != "object"
and isinstance(input_data, dict)
and "value" in input_data
):
input_data = input_data["value"]
try:
validated_result = type_adapter.validate_python(input_data)
text = json.dumps(
type_adapter.dump_python(validated_result, mode="json")
)
return SamplingResult(
text=text,
result=validated_result,
history=step.history,
)
except ValidationError as e:
# Validation failed - add error as tool result
step.history.append(
SamplingMessage(
role="user",
content=[
ToolResultContent(
type="tool_result",
toolUseId=tool_call.id,
content=[
TextContent(
type="text",
text=(
f"Validation error: {e}. "
"Please try again with valid data."
),
)
],
isError=True,
)
],
)
)
# If not a tool use response, we're done
if not step.is_tool_use:
# For structured output, the LLM must use the final_response tool
if result_type is not None and result_type is not str:
raise RuntimeError(
f"Expected structured output of type {result_type.__name__}, "
"but the LLM returned a text response instead of calling "
"the final_response tool."
)
return SamplingResult(
text=step.text,
result=cast(ResultT, step.text if step.text else ""),
history=step.history,
)
# Continue with the updated history
current_messages = step.history
# After first iteration, reset tool_choice to auto (unless structured output is required)
if result_type is None or result_type is str:
tool_choice = None
raise RuntimeError(f"Sampling exceeded maximum iterations ({max_iterations})")
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "src/fastmcp/server/sampling/run.py",
"license": "Apache License 2.0",
"lines": 602,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/fastmcp:src/fastmcp/server/sampling/sampling_tool.py | """SamplingTool for use during LLM sampling requests."""
from __future__ import annotations
import inspect
from collections.abc import Callable
from typing import Any
from mcp.types import TextContent
from mcp.types import Tool as SDKTool
from pydantic import ConfigDict
from fastmcp.tools.function_parsing import ParsedFunction
from fastmcp.tools.function_tool import FunctionTool
from fastmcp.tools.tool import ToolResult
from fastmcp.tools.tool_transform import TransformedTool
from fastmcp.utilities.types import FastMCPBaseModel
class SamplingTool(FastMCPBaseModel):
"""A tool that can be used during LLM sampling.
SamplingTools bundle a tool's schema (name, description, parameters) with
an executor function, enabling servers to execute agentic workflows where
the LLM can request tool calls during sampling.
In most cases, pass functions directly to ctx.sample():
def search(query: str) -> str:
'''Search the web.'''
return web_search(query)
result = await context.sample(
messages="Find info about Python",
tools=[search], # Plain functions work directly
)
Create a SamplingTool explicitly when you need custom name/description:
tool = SamplingTool.from_function(search, name="web_search")
"""
name: str
description: str | None = None
parameters: dict[str, Any]
fn: Callable[..., Any]
sequential: bool = False
model_config = ConfigDict(arbitrary_types_allowed=True)
async def run(self, arguments: dict[str, Any] | None = None) -> Any:
"""Execute the tool with the given arguments.
Args:
arguments: Dictionary of arguments to pass to the tool function.
Returns:
The result of executing the tool function.
"""
if arguments is None:
arguments = {}
result = self.fn(**arguments)
if inspect.isawaitable(result):
result = await result
return result
def _to_sdk_tool(self) -> SDKTool:
"""Convert to an mcp.types.Tool for SDK compatibility.
This is used internally when passing tools to the MCP SDK's
create_message() method.
"""
return SDKTool(
name=self.name,
description=self.description,
inputSchema=self.parameters,
)
@classmethod
def from_function(
cls,
fn: Callable[..., Any],
*,
name: str | None = None,
description: str | None = None,
sequential: bool = False,
) -> SamplingTool:
"""Create a SamplingTool from a function.
The function's signature is analyzed to generate a JSON schema for
the tool's parameters. Type hints are used to determine parameter types.
Args:
fn: The function to create a tool from.
name: Optional name override. Defaults to the function's name.
description: Optional description override. Defaults to the function's docstring.
sequential: If True, this tool requires sequential execution and prevents
parallel execution of all tools in the batch. Set to True for tools
with shared state, file writes, or other operations that cannot run
concurrently. Defaults to False.
Returns:
A SamplingTool wrapping the function.
Raises:
ValueError: If the function is a lambda without a name override.
"""
parsed = ParsedFunction.from_function(fn, validate=True)
if name is None and parsed.name == "<lambda>":
raise ValueError("You must provide a name for lambda functions")
return cls(
name=name or parsed.name,
description=description or parsed.description,
parameters=parsed.input_schema,
fn=parsed.fn,
sequential=sequential,
)
@classmethod
def from_callable_tool(
cls,
tool: FunctionTool | TransformedTool,
*,
name: str | None = None,
description: str | None = None,
) -> SamplingTool:
"""Create a SamplingTool from a FunctionTool or TransformedTool.
Reuses existing server tools in sampling contexts. For TransformedTool,
the tool's .run() method is used to ensure proper argument transformation,
and the ToolResult is automatically unwrapped.
Args:
tool: A FunctionTool or TransformedTool to convert.
name: Optional name override. Defaults to tool.name.
description: Optional description override. Defaults to tool.description.
Raises:
TypeError: If the tool is not a FunctionTool or TransformedTool.
"""
# Validate that the tool is a supported type
if not isinstance(tool, (FunctionTool, TransformedTool)):
raise TypeError(
f"Expected FunctionTool or TransformedTool, got {type(tool).__name__}. "
"Only callable tools can be converted to SamplingTools."
)
# Both FunctionTool and TransformedTool need .run() to ensure proper
# result processing (serializers, output_schema, wrap-result flags)
async def wrapper(**kwargs: Any) -> Any:
result = await tool.run(kwargs)
# Unwrap ToolResult - extract the actual value
if isinstance(result, ToolResult):
# If there's structured_content, use that
if result.structured_content is not None:
# Check tool's schema - this is the source of truth
if tool.output_schema and tool.output_schema.get(
"x-fastmcp-wrap-result"
):
# Tool wraps results: {"result": value} -> value
return result.structured_content.get("result")
else:
# No wrapping: use structured_content directly
return result.structured_content
# Otherwise, extract from text content
if result.content and len(result.content) > 0:
first_content = result.content[0]
if isinstance(first_content, TextContent):
return first_content.text
return result
fn = wrapper
# Extract the callable function, name, description, and parameters
return cls(
name=name or tool.name,
description=description or tool.description,
parameters=tool.parameters,
fn=fn,
)
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "src/fastmcp/server/sampling/sampling_tool.py",
"license": "Apache License 2.0",
"lines": 149,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/fastmcp:tests/server/sampling/test_sampling_tool.py | """Tests for SamplingTool."""
import pytest
from fastmcp.server.sampling import SamplingTool
from fastmcp.tools.function_tool import FunctionTool
from fastmcp.tools.tool_transform import ArgTransform, TransformedTool
class TestSamplingToolFromFunction:
"""Tests for SamplingTool.from_function()."""
def test_from_simple_function(self):
def search(query: str) -> str:
"""Search the web."""
return f"Results for: {query}"
tool = SamplingTool.from_function(search)
assert tool.name == "search"
assert tool.description == "Search the web."
assert "query" in tool.parameters.get("properties", {})
assert tool.fn is search
def test_from_function_with_overrides(self):
def search(query: str) -> str:
return f"Results for: {query}"
tool = SamplingTool.from_function(
search,
name="web_search",
description="Search the internet",
)
assert tool.name == "web_search"
assert tool.description == "Search the internet"
def test_from_lambda_requires_name(self):
with pytest.raises(ValueError, match="must provide a name for lambda"):
SamplingTool.from_function(lambda x: x)
def test_from_lambda_with_name(self):
tool = SamplingTool.from_function(lambda x: x * 2, name="double")
assert tool.name == "double"
def test_from_async_function(self):
async def async_search(query: str) -> str:
"""Async search."""
return f"Async results for: {query}"
tool = SamplingTool.from_function(async_search)
assert tool.name == "async_search"
assert tool.description == "Async search."
def test_multiple_parameters(self):
def search(query: str, limit: int = 10, include_images: bool = False) -> str:
"""Search with options."""
return f"Results for: {query}"
tool = SamplingTool.from_function(search)
props = tool.parameters.get("properties", {})
assert "query" in props
assert "limit" in props
assert "include_images" in props
class TestSamplingToolRun:
"""Tests for SamplingTool.run()."""
async def test_run_sync_function(self):
def add(a: int, b: int) -> int:
"""Add two numbers."""
return a + b
tool = SamplingTool.from_function(add)
result = await tool.run({"a": 2, "b": 3})
assert result == 5
async def test_run_async_function(self):
async def async_add(a: int, b: int) -> int:
"""Add two numbers asynchronously."""
return a + b
tool = SamplingTool.from_function(async_add)
result = await tool.run({"a": 2, "b": 3})
assert result == 5
async def test_run_with_no_arguments(self):
def get_value() -> str:
"""Return a fixed value."""
return "hello"
tool = SamplingTool.from_function(get_value)
result = await tool.run()
assert result == "hello"
async def test_run_with_none_arguments(self):
def get_value() -> str:
"""Return a fixed value."""
return "hello"
tool = SamplingTool.from_function(get_value)
result = await tool.run(None)
assert result == "hello"
class TestSamplingToolSDKConversion:
"""Tests for SamplingTool._to_sdk_tool() internal method."""
def test_to_sdk_tool(self):
def search(query: str) -> str:
"""Search the web."""
return f"Results for: {query}"
tool = SamplingTool.from_function(search)
sdk_tool = tool._to_sdk_tool()
assert sdk_tool.name == "search"
assert sdk_tool.description == "Search the web."
assert "query" in sdk_tool.inputSchema.get("properties", {})
class TestSamplingToolFromCallableTool:
"""Tests for SamplingTool.from_callable_tool()."""
def test_from_function_tool(self):
"""Test converting a FunctionTool to SamplingTool."""
def search(query: str) -> str:
"""Search the web."""
return f"Results for: {query}"
function_tool = FunctionTool.from_function(search)
sampling_tool = SamplingTool.from_callable_tool(function_tool)
assert sampling_tool.name == "search"
assert sampling_tool.description == "Search the web."
assert "query" in sampling_tool.parameters.get("properties", {})
# fn is now a wrapper that calls tool.run() for proper result processing
assert callable(sampling_tool.fn)
def test_from_function_tool_with_overrides(self):
"""Test converting FunctionTool with name/description overrides."""
def search(query: str) -> str:
"""Search the web."""
return f"Results for: {query}"
function_tool = FunctionTool.from_function(search)
sampling_tool = SamplingTool.from_callable_tool(
function_tool,
name="web_search",
description="Search the internet",
)
assert sampling_tool.name == "web_search"
assert sampling_tool.description == "Search the internet"
def test_from_transformed_tool(self):
"""Test converting a TransformedTool to SamplingTool."""
def original(query: str, limit: int) -> str:
"""Original tool."""
return f"Results for: {query} (limit: {limit})"
function_tool = FunctionTool.from_function(original)
transformed_tool = TransformedTool.from_tool(
function_tool,
name="search_transformed",
transform_args={"query": ArgTransform(name="q")},
)
sampling_tool = SamplingTool.from_callable_tool(transformed_tool)
assert sampling_tool.name == "search_transformed"
assert sampling_tool.description == "Original tool."
# The transformed tool should have 'q' instead of 'query'
assert "q" in sampling_tool.parameters.get("properties", {})
assert "limit" in sampling_tool.parameters.get("properties", {})
async def test_from_function_tool_execution(self):
"""Test that converted FunctionTool executes correctly."""
def add(a: int, b: int) -> int:
"""Add two numbers."""
return a + b
function_tool = FunctionTool.from_function(add)
sampling_tool = SamplingTool.from_callable_tool(function_tool)
result = await sampling_tool.run({"a": 2, "b": 3})
assert result == 5
async def test_from_transformed_tool_execution(self):
"""Test that converted TransformedTool executes correctly."""
def multiply(x: int, y: int) -> int:
"""Multiply two numbers."""
return x * y
function_tool = FunctionTool.from_function(multiply)
transformed_tool = TransformedTool.from_tool(
function_tool,
transform_args={"x": ArgTransform(name="a"), "y": ArgTransform(name="b")},
)
sampling_tool = SamplingTool.from_callable_tool(transformed_tool)
# Use the transformed parameter names
result = await sampling_tool.run({"a": 3, "b": 4})
# Result should be unwrapped from ToolResult
assert result == 12
def test_from_invalid_tool_type(self):
"""Test that from_callable_tool rejects non-tool objects."""
class NotATool:
pass
with pytest.raises(
TypeError,
match="Expected FunctionTool or TransformedTool",
):
SamplingTool.from_callable_tool(NotATool()) # type: ignore[arg-type]
def test_from_plain_function_fails(self):
"""Test that plain functions are rejected by from_callable_tool."""
def my_function():
pass
with pytest.raises(TypeError, match="Expected FunctionTool or TransformedTool"):
SamplingTool.from_callable_tool(my_function) # type: ignore[arg-type]
async def test_from_function_tool_with_output_schema(self):
"""Test that FunctionTool with output_schema is handled correctly."""
def search(query: str) -> dict:
"""Search for something."""
return {"results": ["item1", "item2"], "count": 2}
# Create FunctionTool with x-fastmcp-wrap-result
function_tool = FunctionTool.from_function(
search,
output_schema={
"type": "object",
"properties": {
"results": {"type": "array"},
"count": {"type": "integer"},
},
"x-fastmcp-wrap-result": True,
},
)
sampling_tool = SamplingTool.from_callable_tool(function_tool)
# Run the tool - should unwrap the {"result": {...}} wrapper
result = await sampling_tool.run({"query": "test"})
# Should get the unwrapped dict, not ToolResult
assert isinstance(result, dict)
assert result == {"results": ["item1", "item2"], "count": 2}
async def test_from_function_tool_without_wrap_result(self):
"""Test that FunctionTool without x-fastmcp-wrap-result is handled correctly."""
def get_data() -> dict:
"""Get some data."""
return {"status": "ok", "value": 42}
# Create FunctionTool with output_schema but no wrap-result flag
function_tool = FunctionTool.from_function(
get_data,
output_schema={
"type": "object",
"properties": {
"status": {"type": "string"},
"value": {"type": "integer"},
},
},
)
sampling_tool = SamplingTool.from_callable_tool(function_tool)
# Run the tool - should return structured_content directly
result = await sampling_tool.run({})
assert isinstance(result, dict)
assert result == {"status": "ok", "value": 42}
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/server/sampling/test_sampling_tool.py",
"license": "Apache License 2.0",
"lines": 216,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:src/fastmcp/server/tasks/capabilities.py | """SEP-1686 task capabilities declaration."""
from importlib.util import find_spec
from mcp.types import (
ServerTasksCapability,
ServerTasksRequestsCapability,
TasksCallCapability,
TasksCancelCapability,
TasksListCapability,
TasksToolsCapability,
)
def _is_docket_available() -> bool:
"""Check if pydocket is installed (local to avoid circular import)."""
return find_spec("docket") is not None
def get_task_capabilities() -> ServerTasksCapability | None:
"""Return the SEP-1686 task capabilities.
Returns task capabilities as a first-class ServerCapabilities field,
declaring support for list, cancel, and request operations per SEP-1686.
Returns None if pydocket is not installed (no task support).
Note: prompts/resources are passed via extra_data since the SDK types
don't include them yet (FastMCP supports them ahead of the spec).
"""
if not _is_docket_available():
return None
return ServerTasksCapability(
list=TasksListCapability(),
cancel=TasksCancelCapability(),
requests=ServerTasksRequestsCapability(
tools=TasksToolsCapability(call=TasksCallCapability()),
prompts={"get": {}}, # type: ignore[call-arg] # extra_data for forward compat
resources={"read": {}}, # type: ignore[call-arg] # extra_data for forward compat
),
)
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "src/fastmcp/server/tasks/capabilities.py",
"license": "Apache License 2.0",
"lines": 32,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
PrefectHQ/fastmcp:src/fastmcp/server/event_store.py | """EventStore implementation backed by AsyncKeyValue.
This module provides an EventStore implementation that enables SSE polling/resumability
for Streamable HTTP transports. Events are stored using the key_value package's
AsyncKeyValue protocol, allowing users to configure any compatible backend
(in-memory, Redis, etc.) following the same pattern as ResponseCachingMiddleware.
"""
from __future__ import annotations
from uuid import uuid4
from key_value.aio.adapters.pydantic import PydanticAdapter
from key_value.aio.protocols import AsyncKeyValue
from key_value.aio.stores.memory import MemoryStore
from mcp.server.streamable_http import EventCallback, EventId, EventMessage, StreamId
from mcp.server.streamable_http import EventStore as SDKEventStore
from mcp.types import JSONRPCMessage
from fastmcp.utilities.logging import get_logger
from fastmcp.utilities.types import FastMCPBaseModel
logger = get_logger(__name__)
class EventEntry(FastMCPBaseModel):
"""Stored event entry."""
event_id: str
stream_id: str
message: dict | None # JSONRPCMessage serialized to dict
class StreamEventList(FastMCPBaseModel):
"""List of event IDs for a stream."""
event_ids: list[str]
class EventStore(SDKEventStore):
"""EventStore implementation backed by AsyncKeyValue.
Enables SSE polling/resumability by storing events that can be replayed
when clients reconnect. Works with any AsyncKeyValue backend (memory, Redis, etc.)
following the same pattern as ResponseCachingMiddleware and OAuthProxy.
Example:
```python
from fastmcp import FastMCP
from fastmcp.server.event_store import EventStore
# Default in-memory storage
event_store = EventStore()
# Or with a custom backend
from key_value.aio.stores.redis import RedisStore
redis_backend = RedisStore(url="redis://localhost")
event_store = EventStore(storage=redis_backend)
mcp = FastMCP("MyServer")
app = mcp.http_app(event_store=event_store, retry_interval=2000)
```
Args:
storage: AsyncKeyValue backend. Defaults to MemoryStore.
max_events_per_stream: Maximum events to retain per stream. Default 100.
ttl: Event TTL in seconds. Default 3600 (1 hour). Set to None for no expiration.
"""
def __init__(
self,
storage: AsyncKeyValue | None = None,
max_events_per_stream: int = 100,
ttl: int | None = 3600,
):
self._storage: AsyncKeyValue = storage or MemoryStore()
self._max_events_per_stream = max_events_per_stream
self._ttl = ttl
# PydanticAdapter for type-safe storage (following OAuth proxy pattern)
self._event_store: PydanticAdapter[EventEntry] = PydanticAdapter[EventEntry](
key_value=self._storage,
pydantic_model=EventEntry,
default_collection="fastmcp_events",
)
self._stream_store: PydanticAdapter[StreamEventList] = PydanticAdapter[
StreamEventList
](
key_value=self._storage,
pydantic_model=StreamEventList,
default_collection="fastmcp_streams",
)
async def store_event(
self, stream_id: StreamId, message: JSONRPCMessage | None
) -> EventId:
"""Store an event and return its ID.
Args:
stream_id: ID of the stream the event belongs to
message: The JSON-RPC message to store, or None for priming events
Returns:
The generated event ID for the stored event
"""
event_id = str(uuid4())
# Store the event entry
entry = EventEntry(
event_id=event_id,
stream_id=stream_id,
message=message.model_dump(mode="json") if message else None,
)
await self._event_store.put(key=event_id, value=entry, ttl=self._ttl)
# Update stream's event list
stream_data = await self._stream_store.get(key=stream_id)
event_ids = stream_data.event_ids if stream_data else []
event_ids.append(event_id)
# Trim to max events (delete old events)
if len(event_ids) > self._max_events_per_stream:
for old_id in event_ids[: -self._max_events_per_stream]:
await self._event_store.delete(key=old_id)
event_ids = event_ids[-self._max_events_per_stream :]
await self._stream_store.put(
key=stream_id,
value=StreamEventList(event_ids=event_ids),
ttl=self._ttl,
)
return event_id
async def replay_events_after(
self,
last_event_id: EventId,
send_callback: EventCallback,
) -> StreamId | None:
"""Replay events that occurred after the specified event ID.
Args:
last_event_id: The ID of the last event the client received
send_callback: A callback function to send events to the client
Returns:
The stream ID of the replayed events, or None if the event ID was not found
"""
# Look up the event to find its stream
entry = await self._event_store.get(key=last_event_id)
if not entry:
logger.warning(f"Event ID {last_event_id} not found in store")
return None
stream_id = entry.stream_id
stream_data = await self._stream_store.get(key=stream_id)
if not stream_data:
logger.warning(f"Stream {stream_id} not found in store")
return None
event_ids = stream_data.event_ids
# Find events after last_event_id
try:
start_idx = event_ids.index(last_event_id) + 1
except ValueError:
logger.warning(f"Event ID {last_event_id} not found in stream {stream_id}")
return None
# Replay events after the last one
for event_id in event_ids[start_idx:]:
event = await self._event_store.get(key=event_id)
if event and event.message:
msg = JSONRPCMessage.model_validate(event.message)
await send_callback(EventMessage(msg, event.event_id))
return stream_id
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "src/fastmcp/server/event_store.py",
"license": "Apache License 2.0",
"lines": 139,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/fastmcp:tests/server/test_event_store.py | """Tests for the EventStore implementation."""
import pytest
from mcp.server.streamable_http import EventMessage
from mcp.types import JSONRPCMessage, JSONRPCRequest
from fastmcp.server.event_store import EventEntry, EventStore, StreamEventList
class TestEventEntry:
def test_event_entry_with_message(self):
entry = EventEntry(
event_id="event-1",
stream_id="stream-1",
message={"jsonrpc": "2.0", "method": "test", "id": 1},
)
assert entry.event_id == "event-1"
assert entry.stream_id == "stream-1"
assert entry.message == {"jsonrpc": "2.0", "method": "test", "id": 1}
def test_event_entry_without_message(self):
entry = EventEntry(
event_id="event-1",
stream_id="stream-1",
message=None,
)
assert entry.message is None
class TestStreamEventList:
def test_stream_event_list(self):
stream_list = StreamEventList(event_ids=["event-1", "event-2", "event-3"])
assert stream_list.event_ids == ["event-1", "event-2", "event-3"]
def test_stream_event_list_empty(self):
stream_list = StreamEventList(event_ids=[])
assert stream_list.event_ids == []
class TestEventStore:
@pytest.fixture
def event_store(self):
return EventStore(max_events_per_stream=5, ttl=3600)
@pytest.fixture
def sample_message(self):
return JSONRPCMessage(root=JSONRPCRequest(jsonrpc="2.0", method="test", id=1))
async def test_store_event_returns_event_id(self, event_store, sample_message):
event_id = await event_store.store_event("stream-1", sample_message)
assert event_id is not None
assert isinstance(event_id, str)
assert len(event_id) > 0
async def test_store_event_priming_event(self, event_store):
"""Test storing a priming event (message=None)."""
event_id = await event_store.store_event("stream-1", None)
assert event_id is not None
async def test_store_multiple_events(self, event_store, sample_message):
event_ids = []
for _ in range(3):
event_id = await event_store.store_event("stream-1", sample_message)
event_ids.append(event_id)
# All event IDs should be unique
assert len(set(event_ids)) == 3
async def test_replay_events_after_returns_stream_id(
self, event_store, sample_message
):
# Store some events
first_event_id = await event_store.store_event("stream-1", sample_message)
await event_store.store_event("stream-1", sample_message)
# Replay events after the first one
replayed_events: list[EventMessage] = []
async def callback(event: EventMessage):
replayed_events.append(event)
stream_id = await event_store.replay_events_after(first_event_id, callback)
assert stream_id == "stream-1"
assert len(replayed_events) == 1
async def test_replay_events_after_skips_priming_events(self, event_store):
"""Priming events (message=None) should not be replayed."""
# Store a priming event
priming_id = await event_store.store_event("stream-1", None)
# Store a real event
real_message = JSONRPCMessage(
root=JSONRPCRequest(jsonrpc="2.0", method="test", id=1)
)
await event_store.store_event("stream-1", real_message)
# Replay after priming event
replayed_events: list[EventMessage] = []
async def callback(event: EventMessage):
replayed_events.append(event)
await event_store.replay_events_after(priming_id, callback)
# Only the real event should be replayed
assert len(replayed_events) == 1
async def test_replay_events_after_unknown_event_id(self, event_store):
replayed_events: list[EventMessage] = []
async def callback(event: EventMessage):
replayed_events.append(event)
result = await event_store.replay_events_after("unknown-event-id", callback)
assert result is None
assert len(replayed_events) == 0
async def test_max_events_per_stream_trims_old_events(self, event_store):
"""Test that old events are trimmed when max_events_per_stream is exceeded."""
# Store more events than the limit
event_ids = []
for i in range(7):
msg = JSONRPCMessage(
root=JSONRPCRequest(jsonrpc="2.0", method=f"test-{i}", id=i)
)
event_id = await event_store.store_event("stream-1", msg)
event_ids.append(event_id)
# The first 2 events should have been trimmed (7 - 5 = 2)
# Trying to replay from the first event should fail
replayed_events: list[EventMessage] = []
async def callback(event: EventMessage):
replayed_events.append(event)
result = await event_store.replay_events_after(event_ids[0], callback)
assert result is None # First event was trimmed
# But replaying from a more recent event should work
result = await event_store.replay_events_after(event_ids[3], callback)
assert result == "stream-1"
async def test_multiple_streams_are_isolated(self, event_store):
"""Events from different streams should not interfere with each other."""
msg1 = JSONRPCMessage(
root=JSONRPCRequest(jsonrpc="2.0", method="stream1-test", id=1)
)
msg2 = JSONRPCMessage(
root=JSONRPCRequest(jsonrpc="2.0", method="stream2-test", id=2)
)
stream1_event = await event_store.store_event("stream-1", msg1)
await event_store.store_event("stream-1", msg1)
stream2_event = await event_store.store_event("stream-2", msg2)
await event_store.store_event("stream-2", msg2)
# Replay stream 1
stream1_replayed: list[EventMessage] = []
async def callback1(event: EventMessage):
stream1_replayed.append(event)
stream_id = await event_store.replay_events_after(stream1_event, callback1)
assert stream_id == "stream-1"
assert len(stream1_replayed) == 1
# Replay stream 2
stream2_replayed: list[EventMessage] = []
async def callback2(event: EventMessage):
stream2_replayed.append(event)
stream_id = await event_store.replay_events_after(stream2_event, callback2)
assert stream_id == "stream-2"
assert len(stream2_replayed) == 1
async def test_default_storage_is_memory(self):
"""Test that EventStore defaults to in-memory storage."""
event_store = EventStore()
msg = JSONRPCMessage(root=JSONRPCRequest(jsonrpc="2.0", method="test", id=1))
event_id = await event_store.store_event("stream-1", msg)
assert event_id is not None
replayed: list[EventMessage] = []
async def callback(event: EventMessage):
replayed.append(event)
# Store another event and replay
await event_store.store_event("stream-1", msg)
await event_store.replay_events_after(event_id, callback)
assert len(replayed) == 1
class TestEventStoreIntegration:
"""Integration tests for EventStore with actual message types."""
async def test_roundtrip_jsonrpc_message(self):
event_store = EventStore()
# Create a realistic JSON-RPC request wrapped in JSONRPCMessage
original_msg = JSONRPCMessage(
root=JSONRPCRequest(
jsonrpc="2.0",
method="tools/call",
id="request-123",
params={"name": "my_tool", "arguments": {"x": 1, "y": 2}},
)
)
# Store it
event_id = await event_store.store_event("stream-1", original_msg)
# Store another event so we have something to replay
second_msg = JSONRPCMessage(
root=JSONRPCRequest(
jsonrpc="2.0",
method="tools/call",
id="request-456",
params={"name": "my_tool", "arguments": {"x": 3, "y": 4}},
)
)
await event_store.store_event("stream-1", second_msg)
# Replay and verify the message content
replayed: list[EventMessage] = []
async def callback(event: EventMessage):
replayed.append(event)
await event_store.replay_events_after(event_id, callback)
assert len(replayed) == 1
assert isinstance(replayed[0].message.root, JSONRPCRequest)
assert replayed[0].message.root.method == "tools/call"
assert replayed[0].message.root.id == "request-456"
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/server/test_event_store.py",
"license": "Apache License 2.0",
"lines": 181,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:tests/server/tasks/test_task_mount.py | """
Tests for MCP SEP-1686 task protocol support through mounted servers.
Verifies that tasks work seamlessly when calling tools/prompts/resources
on mounted child servers through a parent server.
"""
import asyncio
import mcp.types as mt
import pytest
from docket import Docket
from fastmcp import FastMCP
from fastmcp.client import Client
from fastmcp.prompts.prompt import PromptResult
from fastmcp.resources.resource import ResourceResult
from fastmcp.server.dependencies import CurrentDocket, CurrentFastMCP
from fastmcp.server.middleware import CallNext, Middleware, MiddlewareContext
from fastmcp.server.tasks import TaskConfig
from fastmcp.tools.tool import ToolResult
@pytest.fixture(autouse=True)
def reset_docket_memory_server():
"""Reset the shared Docket memory server between tests.
Docket uses a class-level FakeServer instance for memory:// URLs which
persists between tests, causing test isolation issues. This fixture
clears that shared state before each test.
"""
# Clear the shared FakeServer before each test
if hasattr(Docket, "_memory_server"):
delattr(Docket, "_memory_server")
yield
# Clean up after test as well
if hasattr(Docket, "_memory_server"):
delattr(Docket, "_memory_server")
@pytest.fixture
def child_server():
"""Create a child server with task-enabled components."""
mcp = FastMCP("child-server")
@mcp.tool(task=True)
async def multiply(a: int, b: int) -> int:
"""Multiply two numbers."""
return a * b
@mcp.tool(task=True)
async def slow_child_tool(duration: float = 0.1) -> str:
"""A child tool that takes time to execute."""
await asyncio.sleep(duration)
return "child completed"
@mcp.tool(task=False)
async def sync_child_tool(message: str) -> str:
"""Child tool that only supports synchronous execution."""
return f"child sync: {message}"
@mcp.prompt(task=True)
async def child_prompt(topic: str) -> str:
"""A child prompt that can execute as a task."""
return f"Here is information about {topic} from the child server."
@mcp.resource("child://data.txt", task=True)
async def child_resource() -> str:
"""A child resource that can be read as a task."""
return "Data from child server"
@mcp.resource("child://item/{item_id}.json", task=True)
async def child_item_resource(item_id: str) -> str:
"""A child resource template that can execute as a task."""
return f'{{"itemId": "{item_id}", "source": "child"}}'
return mcp
@pytest.fixture
def parent_server(child_server):
"""Create a parent server with the child mounted."""
parent = FastMCP("parent-server")
@parent.tool(task=True)
async def parent_tool(value: int) -> int:
"""A tool on the parent server."""
return value * 10
# Mount child with prefix
parent.mount(child_server, namespace="child")
return parent
@pytest.fixture
def parent_server_no_prefix(child_server):
"""Create a parent server with child mounted without prefix."""
parent = FastMCP("parent-no-prefix")
parent.mount(child_server) # No prefix
return parent
class TestMountedToolTasks:
"""Test task execution for mounted tools."""
async def test_mounted_tool_task_returns_task_object(self, parent_server):
"""Mounted tool called with task=True returns a task object."""
async with Client(parent_server) as client:
# Tool name is prefixed: child_multiply
task = await client.call_tool("child_multiply", {"a": 6, "b": 7}, task=True)
assert task is not None
assert hasattr(task, "task_id")
assert isinstance(task.task_id, str)
assert len(task.task_id) > 0
async def test_mounted_tool_task_executes_in_background(self, parent_server):
"""Mounted tool task executes in background."""
async with Client(parent_server) as client:
task = await client.call_tool("child_multiply", {"a": 3, "b": 4}, task=True)
# Should execute in background
assert not task.returned_immediately
async def test_mounted_tool_task_returns_correct_result(
self, parent_server: FastMCP
):
"""Mounted tool task returns correct result."""
async with Client(parent_server) as client:
task = await client.call_tool("child_multiply", {"a": 8, "b": 9}, task=True)
result = await task.result()
assert result.data == 72
async def test_mounted_tool_task_status(self, parent_server):
"""Can poll task status for mounted tool."""
async with Client(parent_server) as client:
task = await client.call_tool(
"child_slow_child_tool", {"duration": 0.5}, task=True
)
# Check status while running
status = await task.status()
assert status.status in ["working", "completed"]
# Wait for completion
await task.wait(timeout=2.0)
# Check status after completion
status = await task.status()
assert status.status == "completed"
async def test_mounted_tool_task_cancellation(self, parent_server):
"""Can cancel a mounted tool task."""
async with Client(parent_server) as client:
task = await client.call_tool(
"child_slow_child_tool", {"duration": 10.0}, task=True
)
# Let it start
await asyncio.sleep(0.1)
# Cancel the task
await task.cancel()
# Check status
status = await task.status()
assert status.status == "cancelled"
async def test_graceful_degradation_sync_mounted_tool(self, parent_server):
"""Sync-only mounted tool returns error with task=True."""
async with Client(parent_server) as client:
task = await client.call_tool(
"child_sync_child_tool", {"message": "hello"}, task=True
)
# Should return immediately with an error
assert task.returned_immediately
result = await task.result()
assert result.is_error
async def test_parent_and_mounted_tools_both_work(self, parent_server):
"""Both parent and mounted tools work as tasks."""
async with Client(parent_server) as client:
# Parent tool
parent_task = await client.call_tool("parent_tool", {"value": 5}, task=True)
# Mounted tool
child_task = await client.call_tool(
"child_multiply", {"a": 2, "b": 3}, task=True
)
parent_result = await parent_task.result()
child_result = await child_task.result()
assert parent_result.data == 50
assert child_result.data == 6
class TestMountedToolTasksNoPrefix:
"""Test task execution for mounted tools without prefix."""
async def test_mounted_tool_without_prefix_task_works(
self, parent_server_no_prefix
):
"""Mounted tool without prefix works as task."""
async with Client(parent_server_no_prefix) as client:
# No prefix, so tool keeps original name
task = await client.call_tool("multiply", {"a": 5, "b": 6}, task=True)
assert not task.returned_immediately
result = await task.result()
assert result.data == 30
class TestMountedPromptTasks:
"""Test task execution for mounted prompts."""
async def test_mounted_prompt_task_returns_task_object(self, parent_server):
"""Mounted prompt called with task=True returns a task object."""
async with Client(parent_server) as client:
# Prompt name is prefixed: child_child_prompt
task = await client.get_prompt(
"child_child_prompt", {"topic": "FastMCP"}, task=True
)
assert task is not None
assert hasattr(task, "task_id")
assert isinstance(task.task_id, str)
async def test_mounted_prompt_task_executes_in_background(self, parent_server):
"""Mounted prompt task executes in background."""
async with Client(parent_server) as client:
task = await client.get_prompt(
"child_child_prompt", {"topic": "testing"}, task=True
)
assert not task.returned_immediately
async def test_mounted_prompt_task_returns_correct_result(
self, parent_server: FastMCP
):
"""Mounted prompt task returns correct result."""
async with Client(parent_server) as client:
task = await client.get_prompt(
"child_child_prompt", {"topic": "MCP protocol"}, task=True
)
result = await task.result()
assert "MCP protocol" in result.messages[0].content.text
assert "child server" in result.messages[0].content.text
class TestMountedResourceTasks:
"""Test task execution for mounted resources."""
async def test_mounted_resource_task_returns_task_object(self, parent_server):
"""Mounted resource read with task=True returns a task object."""
async with Client(parent_server) as client:
# Resource URI is prefixed: child://child/data.txt
task = await client.read_resource("child://child/data.txt", task=True)
assert task is not None
assert hasattr(task, "task_id")
assert isinstance(task.task_id, str)
async def test_mounted_resource_task_executes_in_background(self, parent_server):
"""Mounted resource task executes in background."""
async with Client(parent_server) as client:
task = await client.read_resource("child://child/data.txt", task=True)
assert not task.returned_immediately
async def test_mounted_resource_task_returns_correct_result(self, parent_server):
"""Mounted resource task returns correct result."""
async with Client(parent_server) as client:
task = await client.read_resource("child://child/data.txt", task=True)
result = await task.result()
assert len(result) > 0
assert "Data from child server" in result[0].text
async def test_mounted_resource_template_task(self, parent_server):
"""Mounted resource template with task=True works."""
async with Client(parent_server) as client:
task = await client.read_resource("child://child/item/99.json", task=True)
assert not task.returned_immediately
result = await task.result()
assert '"itemId": "99"' in result[0].text
assert '"source": "child"' in result[0].text
class TestMountedTaskDependencies:
"""Test that dependencies work correctly in mounted task execution."""
async def test_mounted_task_receives_docket_dependency(self):
"""Mounted tool task receives CurrentDocket dependency."""
child = FastMCP("dep-child")
received_docket = []
@child.tool(task=True)
async def tool_with_docket(docket: CurrentDocket = CurrentDocket()) -> str: # type: ignore[invalid-type-form]
received_docket.append(docket)
return f"docket available: {docket is not None}"
parent = FastMCP("dep-parent")
parent.mount(child, namespace="child")
async with Client(parent) as client:
task = await client.call_tool("child_tool_with_docket", {}, task=True)
result = await task.result()
assert "docket available: True" in str(result)
assert len(received_docket) == 1
assert received_docket[0] is not None
async def test_mounted_task_receives_server_dependency(self):
"""Mounted tool task receives CurrentFastMCP dependency."""
child = FastMCP("server-dep-child")
received_server = []
@child.tool(task=True)
async def tool_with_server(server: CurrentFastMCP = CurrentFastMCP()) -> str: # type: ignore[invalid-type-form]
received_server.append(server)
return f"server name: {server.name}"
parent = FastMCP("server-dep-parent")
parent.mount(child, namespace="child")
async with Client(parent) as client:
task = await client.call_tool("child_tool_with_server", {}, task=True)
await task.result()
# The server should be the child server since that's where the tool is defined
assert len(received_server) == 1
# Note: It might be parent or child depending on implementation
assert received_server[0] is not None
class TestMultipleMounts:
"""Test tasks with multiple mounted servers."""
async def test_tasks_work_with_multiple_mounts(self):
"""Tasks work correctly with multiple mounted servers."""
child1 = FastMCP("child1")
child2 = FastMCP("child2")
@child1.tool(task=True)
async def add(a: int, b: int) -> int:
return a + b
@child2.tool(task=True)
async def subtract(a: int, b: int) -> int:
return a - b
parent = FastMCP("multi-parent")
parent.mount(child1, namespace="math1")
parent.mount(child2, namespace="math2")
async with Client(parent) as client:
task1 = await client.call_tool("math1_add", {"a": 10, "b": 5}, task=True)
task2 = await client.call_tool(
"math2_subtract", {"a": 10, "b": 5}, task=True
)
result1 = await task1.result()
result2 = await task2.result()
assert result1.data == 15
assert result2.data == 5
class TestMountedFunctionNameCollisions:
"""Test task execution when mounted servers have identically-named functions."""
async def test_multiple_mounts_with_same_function_names(self):
"""Two mounted servers with identically-named functions don't collide."""
child1 = FastMCP("child1")
child2 = FastMCP("child2")
@child1.tool(task=True)
async def process(value: int) -> int:
return value * 2 # Double
@child2.tool(task=True)
async def process(value: int) -> int: # noqa: F811
return value * 3 # Triple
parent = FastMCP("parent")
parent.mount(child1, namespace="c1")
parent.mount(child2, namespace="c2")
async with Client(parent) as client:
# Both should execute their own implementation
task1 = await client.call_tool("c1_process", {"value": 10}, task=True)
task2 = await client.call_tool("c2_process", {"value": 10}, task=True)
result1 = await task1.result()
result2 = await task2.result()
assert result1.data == 20 # child1's process (doubles)
assert result2.data == 30 # child2's process (triples)
async def test_no_prefix_mount_collision(self):
"""No-prefix mounts with same tool name - last mount wins."""
child1 = FastMCP("child1")
child2 = FastMCP("child2")
@child1.tool(task=True)
async def process(value: int) -> int:
return value * 2
@child2.tool(task=True)
async def process(value: int) -> int: # noqa: F811
return value * 3
parent = FastMCP("parent")
parent.mount(child1) # No prefix
parent.mount(child2) # No prefix - overwrites child1's "process"
async with Client(parent) as client:
# Last mount wins - child2's process should execute
task = await client.call_tool("process", {"value": 10}, task=True)
result = await task.result()
assert result.data == 30 # child2's process (triples)
async def test_nested_mount_prefix_accumulation(self):
"""Nested mounts accumulate prefixes correctly for tasks."""
grandchild = FastMCP("gc")
child = FastMCP("child")
parent = FastMCP("parent")
@grandchild.tool(task=True)
async def deep_tool() -> str:
return "deep"
child.mount(grandchild, namespace="gc")
parent.mount(child, namespace="child")
async with Client(parent) as client:
# Tool should be accessible and execute correctly
task = await client.call_tool("child_gc_deep_tool", {}, task=True)
result = await task.result()
assert result.data == "deep"
class TestMountedTaskList:
"""Test task listing with mounted servers."""
async def test_list_tasks_includes_mounted_tasks(self, parent_server):
"""Task list includes tasks from mounted server tools."""
async with Client(parent_server) as client:
# Create tasks on both parent and mounted tools
parent_task = await client.call_tool("parent_tool", {"value": 1}, task=True)
child_task = await client.call_tool(
"child_multiply", {"a": 2, "b": 2}, task=True
)
# Wait for completion
await parent_task.wait(timeout=2.0)
await child_task.wait(timeout=2.0)
# List all tasks - returns dict with "tasks" key
tasks_response = await client.list_tasks()
task_ids = [t["taskId"] for t in tasks_response["tasks"]]
assert parent_task.task_id in task_ids
assert child_task.task_id in task_ids
class TestMountedTaskConfigModes:
"""Test TaskConfig mode enforcement for mounted tools."""
@pytest.fixture
def child_with_modes(self):
"""Create a child server with tools in all three TaskConfig modes."""
mcp = FastMCP("child-modes", tasks=False)
@mcp.tool(task=TaskConfig(mode="optional"))
async def optional_tool() -> str:
"""Tool that supports both sync and task execution."""
return "optional result"
@mcp.tool(task=TaskConfig(mode="required"))
async def required_tool() -> str:
"""Tool that requires task execution."""
return "required result"
@mcp.tool(task=TaskConfig(mode="forbidden"))
async def forbidden_tool() -> str:
"""Tool that forbids task execution."""
return "forbidden result"
return mcp
@pytest.fixture
def parent_with_modes(self, child_with_modes):
"""Create a parent server with the child mounted."""
parent = FastMCP("parent-modes")
parent.mount(child_with_modes, namespace="child")
return parent
async def test_optional_mode_sync_through_mount(self, parent_with_modes):
"""Optional mode tool works without task through mount."""
async with Client(parent_with_modes) as client:
result = await client.call_tool("child_optional_tool", {})
assert "optional result" in str(result)
async def test_optional_mode_task_through_mount(self, parent_with_modes):
"""Optional mode tool works with task through mount."""
async with Client(parent_with_modes) as client:
task = await client.call_tool("child_optional_tool", {}, task=True)
assert task is not None
result = await task.result()
assert result.data == "optional result"
async def test_required_mode_with_task_through_mount(self, parent_with_modes):
"""Required mode tool succeeds with task through mount."""
async with Client(parent_with_modes) as client:
task = await client.call_tool("child_required_tool", {}, task=True)
assert task is not None
result = await task.result()
assert result.data == "required result"
async def test_required_mode_without_task_through_mount(self, parent_with_modes):
"""Required mode tool errors without task through mount."""
from fastmcp.exceptions import ToolError
async with Client(parent_with_modes) as client:
with pytest.raises(ToolError) as exc_info:
await client.call_tool("child_required_tool", {})
assert "requires task-augmented execution" in str(exc_info.value)
async def test_forbidden_mode_sync_through_mount(self, parent_with_modes):
"""Forbidden mode tool works without task through mount."""
async with Client(parent_with_modes) as client:
result = await client.call_tool("child_forbidden_tool", {})
assert "forbidden result" in str(result)
async def test_forbidden_mode_with_task_through_mount(self, parent_with_modes):
"""Forbidden mode tool degrades gracefully with task through mount."""
async with Client(parent_with_modes) as client:
task = await client.call_tool("child_forbidden_tool", {}, task=True)
# Should return immediately (graceful degradation)
assert task.returned_immediately
result = await task.result()
# Result is available but may indicate error or sync execution
assert result is not None
# -----------------------------------------------------------------------------
# Middleware classes for tracing tests
# -----------------------------------------------------------------------------
class ToolTracingMiddleware(Middleware):
"""Middleware that traces tool calls."""
def __init__(self, name: str, calls: list[str]):
super().__init__()
self._name = name
self._calls = calls
async def on_call_tool(
self,
context: MiddlewareContext[mt.CallToolRequestParams],
call_next: CallNext[mt.CallToolRequestParams, ToolResult],
) -> ToolResult:
self._calls.append(f"{self._name}:before")
result = await call_next(context)
self._calls.append(f"{self._name}:after")
return result
class ResourceTracingMiddleware(Middleware):
"""Middleware that traces resource reads."""
def __init__(self, name: str, calls: list[str]):
super().__init__()
self._name = name
self._calls = calls
async def on_read_resource(
self,
context: MiddlewareContext[mt.ReadResourceRequestParams],
call_next: CallNext[mt.ReadResourceRequestParams, ResourceResult],
) -> ResourceResult:
self._calls.append(f"{self._name}:before")
result = await call_next(context)
self._calls.append(f"{self._name}:after")
return result
class PromptTracingMiddleware(Middleware):
"""Middleware that traces prompt gets."""
def __init__(self, name: str, calls: list[str]):
super().__init__()
self._name = name
self._calls = calls
async def on_get_prompt(
self,
context: MiddlewareContext[mt.GetPromptRequestParams],
call_next: CallNext[mt.GetPromptRequestParams, PromptResult],
) -> PromptResult:
self._calls.append(f"{self._name}:before")
result = await call_next(context)
self._calls.append(f"{self._name}:after")
return result
class TestMiddlewareWithMountedTasks:
"""Test that middleware runs at all levels when executing background tasks.
For background tasks, middleware runs during task submission (wrapping the MCP
request handling that queues to Docket). The actual function execution happens
later in the Docket worker, after the middleware chain completes.
"""
async def test_tool_middleware_runs_with_background_task(self):
"""Middleware runs at parent, child, and grandchild levels for tool tasks."""
calls: list[str] = []
grandchild = FastMCP("Grandchild")
@grandchild.tool(task=True)
async def compute(x: int) -> int:
calls.append("grandchild:tool")
return x * 2
grandchild.add_middleware(ToolTracingMiddleware("grandchild", calls))
child = FastMCP("Child")
child.mount(grandchild, namespace="gc")
child.add_middleware(ToolTracingMiddleware("child", calls))
parent = FastMCP("Parent")
parent.mount(child, namespace="c")
parent.add_middleware(ToolTracingMiddleware("parent", calls))
async with Client(parent) as client:
task = await client.call_tool("c_gc_compute", {"x": 5}, task=True)
result = await task.result()
assert result.data == 10
# Middleware runs during task submission (before/after queuing to Docket)
# Function executes later in Docket worker
assert calls == [
"parent:before",
"child:before",
"grandchild:before",
"grandchild:after",
"child:after",
"parent:after",
"grandchild:tool", # Executes in Docket after middleware completes
]
async def test_resource_middleware_runs_with_background_task(self):
"""Middleware runs at parent, child, and grandchild levels for resource tasks."""
calls: list[str] = []
grandchild = FastMCP("Grandchild")
@grandchild.resource("data://value", task=True)
async def get_data() -> str:
calls.append("grandchild:resource")
return "result"
grandchild.add_middleware(ResourceTracingMiddleware("grandchild", calls))
child = FastMCP("Child")
child.mount(grandchild, namespace="gc")
child.add_middleware(ResourceTracingMiddleware("child", calls))
parent = FastMCP("Parent")
parent.mount(child, namespace="c")
parent.add_middleware(ResourceTracingMiddleware("parent", calls))
async with Client(parent) as client:
task = await client.read_resource("data://c/gc/value", task=True)
result = await task.result()
assert result[0].text == "result"
# Middleware runs during task submission, function in Docket
assert calls == [
"parent:before",
"child:before",
"grandchild:before",
"grandchild:after",
"child:after",
"parent:after",
"grandchild:resource",
]
async def test_prompt_middleware_runs_with_background_task(self):
"""Middleware runs at parent, child, and grandchild levels for prompt tasks."""
calls: list[str] = []
grandchild = FastMCP("Grandchild")
@grandchild.prompt(task=True)
async def greet(name: str) -> str:
calls.append("grandchild:prompt")
return f"Hello, {name}!"
grandchild.add_middleware(PromptTracingMiddleware("grandchild", calls))
child = FastMCP("Child")
child.mount(grandchild, namespace="gc")
child.add_middleware(PromptTracingMiddleware("child", calls))
parent = FastMCP("Parent")
parent.mount(child, namespace="c")
parent.add_middleware(PromptTracingMiddleware("parent", calls))
async with Client(parent) as client:
task = await client.get_prompt("c_gc_greet", {"name": "World"}, task=True)
result = await task.result()
assert result.messages[0].content.text == "Hello, World!"
# Middleware runs during task submission, function in Docket
assert calls == [
"parent:before",
"child:before",
"grandchild:before",
"grandchild:after",
"child:after",
"parent:after",
"grandchild:prompt",
]
async def test_resource_template_middleware_runs_with_background_task(self):
"""Middleware runs at all levels for resource template tasks."""
calls: list[str] = []
grandchild = FastMCP("Grandchild")
@grandchild.resource("item://{id}", task=True)
async def get_item(id: str) -> str:
calls.append("grandchild:template")
return f"item-{id}"
grandchild.add_middleware(ResourceTracingMiddleware("grandchild", calls))
child = FastMCP("Child")
child.mount(grandchild, namespace="gc")
child.add_middleware(ResourceTracingMiddleware("child", calls))
parent = FastMCP("Parent")
parent.mount(child, namespace="c")
parent.add_middleware(ResourceTracingMiddleware("parent", calls))
async with Client(parent) as client:
task = await client.read_resource("item://c/gc/42", task=True)
result = await task.result()
assert result[0].text == "item-42"
# Middleware runs during task submission, function in Docket
assert calls == [
"parent:before",
"child:before",
"grandchild:before",
"grandchild:after",
"child:after",
"parent:after",
"grandchild:template",
]
class TestMountedTasksWithTaskMetaParameter:
"""Test mounted components called directly with task_meta parameter.
These tests verify the programmatic API where server.call_tool() or
server.read_resource() is called with an explicit task_meta parameter,
as opposed to using the Client with task=True.
Direct server calls require a running server context, so we use an outer
tool that makes the direct call internally.
"""
async def test_mounted_tool_with_task_meta_creates_task(self):
"""Mounted tool called with task_meta returns CreateTaskResult."""
from fastmcp.server.tasks.config import TaskMeta
child = FastMCP("Child")
@child.tool(task=True)
async def add(a: int, b: int) -> int:
return a + b
parent = FastMCP("Parent")
parent.mount(child, namespace="child")
@parent.tool
async def outer() -> str:
# Direct call with task_meta from within server context
result = await parent.call_tool(
"child_add", {"a": 2, "b": 3}, task_meta=TaskMeta(ttl=300)
)
return f"task:{result.task.taskId}"
async with Client(parent) as client:
result = await client.call_tool("outer", {})
assert "task:" in str(result)
async def test_mounted_resource_with_task_meta_creates_task(self):
"""Mounted resource called with task_meta returns CreateTaskResult."""
from fastmcp.server.tasks.config import TaskMeta
child = FastMCP("Child")
@child.resource("data://info", task=True)
async def get_info() -> str:
return "child info"
parent = FastMCP("Parent")
parent.mount(child, namespace="child")
@parent.tool
async def outer() -> str:
result = await parent.read_resource(
"data://child/info", task_meta=TaskMeta(ttl=300)
)
return f"task:{result.task.taskId}"
async with Client(parent) as client:
result = await client.call_tool("outer", {})
assert "task:" in str(result)
async def test_mounted_template_with_task_meta_creates_task(self):
"""Mounted resource template with task_meta returns CreateTaskResult."""
from fastmcp.server.tasks.config import TaskMeta
child = FastMCP("Child")
@child.resource("item://{id}", task=True)
async def get_item(id: str) -> str:
return f"item-{id}"
parent = FastMCP("Parent")
parent.mount(child, namespace="child")
@parent.tool
async def outer() -> str:
result = await parent.read_resource(
"item://child/42", task_meta=TaskMeta(ttl=300)
)
return f"task:{result.task.taskId}"
async with Client(parent) as client:
result = await client.call_tool("outer", {})
assert "task:" in str(result)
async def test_deeply_nested_tool_with_task_meta(self):
"""Three-level nested tool works with task_meta."""
from fastmcp.server.tasks.config import TaskMeta
grandchild = FastMCP("Grandchild")
@grandchild.tool(task=True)
async def compute(n: int) -> int:
return n * 3
child = FastMCP("Child")
child.mount(grandchild, namespace="gc")
parent = FastMCP("Parent")
parent.mount(child, namespace="c")
@parent.tool
async def outer() -> str:
result = await parent.call_tool(
"c_gc_compute", {"n": 7}, task_meta=TaskMeta(ttl=300)
)
return f"task:{result.task.taskId}"
async with Client(parent) as client:
result = await client.call_tool("outer", {})
assert "task:" in str(result)
async def test_deeply_nested_template_with_task_meta(self):
"""Three-level nested template works with task_meta."""
from fastmcp.server.tasks.config import TaskMeta
grandchild = FastMCP("Grandchild")
@grandchild.resource("doc://{name}", task=True)
async def get_doc(name: str) -> str:
return f"doc: {name}"
child = FastMCP("Child")
child.mount(grandchild, namespace="gc")
parent = FastMCP("Parent")
parent.mount(child, namespace="c")
@parent.tool
async def outer() -> str:
result = await parent.read_resource(
"doc://c/gc/readme", task_meta=TaskMeta(ttl=300)
)
return f"task:{result.task.taskId}"
async with Client(parent) as client:
result = await client.call_tool("outer", {})
assert "task:" in str(result)
async def test_mounted_prompt_with_task_meta_creates_task(self):
"""Mounted prompt called with task_meta returns CreateTaskResult."""
from fastmcp.server.tasks.config import TaskMeta
child = FastMCP("Child")
@child.prompt(task=True)
async def greet(name: str) -> str:
return f"Hello, {name}!"
parent = FastMCP("Parent")
parent.mount(child, namespace="child")
@parent.tool
async def outer() -> str:
result = await parent.render_prompt(
"child_greet", {"name": "World"}, task_meta=TaskMeta(ttl=300)
)
return f"task:{result.task.taskId}"
async with Client(parent) as client:
result = await client.call_tool("outer", {})
assert "task:" in str(result)
async def test_deeply_nested_prompt_with_task_meta(self):
"""Three-level nested prompt works with task_meta."""
from fastmcp.server.tasks.config import TaskMeta
grandchild = FastMCP("Grandchild")
@grandchild.prompt(task=True)
async def describe(topic: str) -> str:
return f"Information about {topic}"
child = FastMCP("Child")
child.mount(grandchild, namespace="gc")
parent = FastMCP("Parent")
parent.mount(child, namespace="c")
@parent.tool
async def outer() -> str:
result = await parent.render_prompt(
"c_gc_describe", {"topic": "FastMCP"}, task_meta=TaskMeta(ttl=300)
)
return f"task:{result.task.taskId}"
async with Client(parent) as client:
result = await client.call_tool("outer", {})
assert "task:" in str(result)
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/server/tasks/test_task_mount.py",
"license": "Apache License 2.0",
"lines": 735,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:tests/server/tasks/test_task_proxy.py | """
Tests for MCP SEP-1686 task protocol behavior through proxy servers.
Proxy servers explicitly forbid task-augmented execution. All proxy components
(tools, prompts, resources) have task_config.mode="forbidden".
Clients connecting through proxies can:
- Execute tools/prompts/resources normally (sync execution)
- NOT use task-augmented execution (task=True fails gracefully for tools,
raises McpError for prompts/resources)
"""
import pytest
from mcp.shared.exceptions import McpError
from mcp.types import TextContent, TextResourceContents
from fastmcp import FastMCP
from fastmcp.client import Client
from fastmcp.client.transports import FastMCPTransport
from fastmcp.server.providers.proxy import ProxyClient
@pytest.fixture
def backend_server() -> FastMCP:
"""Create a backend server with task-enabled components.
The backend has tasks enabled, but the proxy should NOT forward
task execution - it should treat all components as forbidden.
"""
mcp = FastMCP("backend-server")
@mcp.tool(task=True)
async def add_numbers(a: int, b: int) -> int:
"""Add two numbers together."""
return a + b
@mcp.tool(task=False)
async def sync_only_tool(message: str) -> str:
"""Tool that only supports synchronous execution."""
return f"sync: {message}"
@mcp.prompt(task=True)
async def greeting_prompt(name: str) -> str:
"""A prompt that can execute as a task."""
return f"Hello, {name}! Welcome to the system."
@mcp.resource("data://info.txt", task=True)
async def info_resource() -> str:
"""A resource that can be read as a task."""
return "Important information from the backend"
@mcp.resource("data://user/{user_id}.json", task=True)
async def user_resource(user_id: str) -> str:
"""A resource template that can execute as a task."""
return f'{{"id": "{user_id}", "name": "User {user_id}"}}'
return mcp
@pytest.fixture
def proxy_server(backend_server: FastMCP) -> FastMCP:
"""Create a proxy server that forwards to the backend."""
return FastMCP.as_proxy(ProxyClient(transport=FastMCPTransport(backend_server)))
class TestProxyToolsSyncExecution:
"""Test that tools work normally through proxy (sync execution)."""
async def test_tool_sync_execution_works(self, proxy_server: FastMCP):
"""Tool called without task=True works through proxy."""
async with Client(proxy_server) as client:
result = await client.call_tool("add_numbers", {"a": 5, "b": 3})
assert "8" in str(result)
async def test_sync_only_tool_works(self, proxy_server: FastMCP):
"""Sync-only tool works through proxy."""
async with Client(proxy_server) as client:
result = await client.call_tool("sync_only_tool", {"message": "test"})
assert "sync: test" in str(result)
class TestProxyToolsTaskForbidden:
"""Test that tools with task=True are forbidden through proxy."""
async def test_tool_task_returns_error_immediately(self, proxy_server: FastMCP):
"""Tool called with task=True through proxy returns error immediately."""
async with Client(proxy_server) as client:
task = await client.call_tool("add_numbers", {"a": 5, "b": 3}, task=True)
# Should return immediately (forbidden behavior)
assert task.returned_immediately
# Result should be an error
result = await task.result()
assert result.is_error
async def test_sync_only_tool_task_returns_error_immediately(
self, proxy_server: FastMCP
):
"""Sync-only tool with task=True also returns error immediately."""
async with Client(proxy_server) as client:
task = await client.call_tool(
"sync_only_tool", {"message": "test"}, task=True
)
assert task.returned_immediately
result = await task.result()
assert result.is_error
class TestProxyPromptsSyncExecution:
"""Test that prompts work normally through proxy (sync execution)."""
async def test_prompt_sync_execution_works(self, proxy_server: FastMCP):
"""Prompt called without task=True works through proxy."""
async with Client(proxy_server) as client:
result = await client.get_prompt("greeting_prompt", {"name": "Alice"})
assert isinstance(result.messages[0].content, TextContent)
assert "Hello, Alice!" in result.messages[0].content.text
class TestProxyPromptsTaskForbidden:
"""Test that prompts with task=True are forbidden through proxy."""
async def test_prompt_task_raises_mcp_error(self, proxy_server: FastMCP):
"""Prompt called with task=True through proxy raises McpError."""
async with Client(proxy_server) as client:
with pytest.raises(McpError) as exc_info:
await client.get_prompt("greeting_prompt", {"name": "Alice"}, task=True)
assert "does not support task-augmented execution" in str(exc_info.value)
class TestProxyResourcesSyncExecution:
"""Test that resources work normally through proxy (sync execution)."""
async def test_resource_sync_execution_works(self, proxy_server: FastMCP):
"""Resource read without task=True works through proxy."""
async with Client(proxy_server) as client:
result = await client.read_resource("data://info.txt")
assert isinstance(result[0], TextResourceContents)
assert "Important information from the backend" in result[0].text
async def test_resource_template_sync_execution_works(self, proxy_server: FastMCP):
"""Resource template without task=True works through proxy."""
async with Client(proxy_server) as client:
result = await client.read_resource("data://user/42.json")
assert isinstance(result[0], TextResourceContents)
assert '"id": "42"' in result[0].text
class TestProxyResourcesTaskForbidden:
"""Test that resources with task=True are forbidden through proxy."""
async def test_resource_task_raises_mcp_error(self, proxy_server: FastMCP):
"""Resource read with task=True through proxy raises McpError."""
async with Client(proxy_server) as client:
with pytest.raises(McpError) as exc_info:
await client.read_resource("data://info.txt", task=True)
assert "does not support task-augmented execution" in str(exc_info.value)
async def test_resource_template_task_raises_mcp_error(self, proxy_server: FastMCP):
"""Resource template with task=True through proxy raises McpError."""
async with Client(proxy_server) as client:
with pytest.raises(McpError) as exc_info:
await client.read_resource("data://user/42.json", task=True)
assert "does not support task-augmented execution" in str(exc_info.value)
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/server/tasks/test_task_proxy.py",
"license": "Apache License 2.0",
"lines": 126,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:src/fastmcp/server/tasks/config.py | """TaskConfig for MCP SEP-1686 background task execution modes.
This module defines the configuration for how tools, resources, and prompts
handle task-augmented execution as specified in SEP-1686.
"""
from __future__ import annotations
import functools
import inspect
from collections.abc import Callable
from dataclasses import dataclass
from datetime import timedelta
from typing import Any, Literal
from fastmcp.utilities.async_utils import is_coroutine_function
# Task execution modes per SEP-1686 / MCP ToolExecution.taskSupport
TaskMode = Literal["forbidden", "optional", "required"]
# Default values for task metadata (single source of truth)
DEFAULT_POLL_INTERVAL = timedelta(seconds=5) # Default poll interval
DEFAULT_POLL_INTERVAL_MS = int(DEFAULT_POLL_INTERVAL.total_seconds() * 1000)
DEFAULT_TTL_MS = 60_000 # Default TTL in milliseconds
@dataclass
class TaskMeta:
"""Metadata for task-augmented execution requests.
When passed to call_tool/read_resource/get_prompt, signals that
the operation should be submitted as a background task.
Attributes:
ttl: Client-requested TTL in milliseconds. If None, uses server default.
fn_key: Docket routing key. Auto-derived from component name if None.
"""
ttl: int | None = None
fn_key: str | None = None
@dataclass
class TaskConfig:
"""Configuration for MCP background task execution (SEP-1686).
Controls how a component handles task-augmented requests:
- "forbidden": Component does not support task execution. Clients must not
request task augmentation; server returns -32601 if they do.
- "optional": Component supports both synchronous and task execution.
Client may request task augmentation or call normally.
- "required": Component requires task execution. Clients must request task
augmentation; server returns -32601 if they don't.
Important:
Task-enabled components must be available at server startup to be
registered with all Docket workers. Components added dynamically after
startup will not be registered for background execution.
Example:
```python
from fastmcp import FastMCP
from fastmcp.server.tasks import TaskConfig
mcp = FastMCP("MyServer")
# Background execution required
@mcp.tool(task=TaskConfig(mode="required"))
async def long_running_task(): ...
# Supports both modes (default when task=True)
@mcp.tool(task=TaskConfig(mode="optional"))
async def flexible_task(): ...
```
"""
mode: TaskMode = "optional"
poll_interval: timedelta = DEFAULT_POLL_INTERVAL
@classmethod
def from_bool(cls, value: bool) -> TaskConfig:
"""Convert boolean task flag to TaskConfig.
Args:
value: True for "optional" mode, False for "forbidden" mode.
Returns:
TaskConfig with appropriate mode.
"""
return cls(mode="optional" if value else "forbidden")
def supports_tasks(self) -> bool:
"""Check if this component supports task execution.
Returns:
True if mode is "optional" or "required", False if "forbidden".
"""
return self.mode != "forbidden"
def validate_function(self, fn: Callable[..., Any], name: str) -> None:
"""Validate that function is compatible with this task config.
Task execution requires:
1. fastmcp[tasks] to be installed (pydocket)
2. Async functions
Raises ImportError if mode is "optional" or "required" but pydocket
is not installed. Raises ValueError if function is synchronous.
Args:
fn: The function to validate (handles callable classes and staticmethods).
name: Name for error messages.
Raises:
ImportError: If task execution is enabled but pydocket not installed.
ValueError: If task execution is enabled but function is sync.
"""
if not self.supports_tasks():
return
# Check that docket is available for task execution
# Lazy import to avoid circular: dependencies.py β http.py β tasks/__init__.py β config.py
from fastmcp.server.dependencies import require_docket
require_docket(f"`task=True` on function '{name}'")
# Unwrap callable classes and staticmethods
fn_to_check = fn
if (
not inspect.isroutine(fn)
and not isinstance(fn, functools.partial)
and callable(fn)
):
fn_to_check = fn.__call__
if isinstance(fn_to_check, staticmethod):
fn_to_check = fn_to_check.__func__
if not is_coroutine_function(fn_to_check):
raise ValueError(
f"'{name}' uses a sync function but has task execution enabled. "
"Background tasks require async functions."
)
# Note: Context IS now available in background task workers (SEP-1686)
# The wiring in _CurrentContext creates a task-aware Context with task_id
# and session from the registry. No warning needed.
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "src/fastmcp/server/tasks/config.py",
"license": "Apache License 2.0",
"lines": 111,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
PrefectHQ/fastmcp:examples/tasks/client.py | """
FastMCP Tasks Example Client
Demonstrates calling tools both immediately and as background tasks,
with real-time progress updates via status callbacks.
Usage:
# Make sure environment is configured (source .envrc or use direnv)
source .envrc
# Background task execution with progress callbacks (default)
python client.py --duration 10
# Immediate execution (blocks until complete)
python client.py immediate --duration 5
"""
import asyncio
import sys
from pathlib import Path
from typing import Annotated
import cyclopts
from mcp.types import GetTaskResult, TextContent
from rich.console import Console
from fastmcp.client import Client
console = Console()
app = cyclopts.App(name="tasks-client", help="FastMCP Tasks Example Client")
def load_server():
"""Load the example server."""
examples_dir = Path(__file__).parent.parent.parent
if str(examples_dir) not in sys.path:
sys.path.insert(0, str(examples_dir))
import examples.tasks.server as server_module
return server_module.mcp
# Track last message to deduplicate consecutive identical notifications
# Note: Docket fires separate events for progress.increment() and progress.set_message(),
# but MCP's statusMessage field only carries the text message (no numerical progress).
# This means we often get duplicate notifications with identical messages.
_last_notification_message = None
def print_notification(status: GetTaskResult) -> None:
"""Callback function for push notifications from server.
This is called automatically when the server sends notifications/tasks/status.
Deduplicates identical consecutive messages to keep output clean.
"""
global _last_notification_message
# Skip if this is the same message we just printed
if status.statusMessage == _last_notification_message:
return
_last_notification_message = status.statusMessage
color = {
"working": "yellow",
"completed": "green",
"failed": "red",
}.get(status.status, "yellow")
icon = {
"working": "π",
"completed": "β
",
"failed": "β",
}.get(status.status, "β οΈ")
console.print(
f"[{color}]π’ Notification: {status.status} {icon} - {status.statusMessage}[/{color}]"
)
@app.default
async def task(
duration: Annotated[
int,
cyclopts.Parameter(help="Duration of computation in seconds (1-60)"),
] = 10,
):
"""Execute as background task with real-time progress callbacks."""
if duration < 1 or duration > 60:
console.print("[red]Error: Duration must be between 1 and 60 seconds[/red]")
sys.exit(1)
server = load_server()
console.print(f"\n[bold]Calling slow_computation(duration={duration})[/bold]")
console.print("Mode: [cyan]Background task[/cyan]\n")
async with Client(server) as client:
task_obj = await client.call_tool(
"slow_computation",
arguments={"duration": duration},
task=True,
)
console.print(f"Task started: [cyan]{task_obj.task_id}[/cyan]\n")
# Register callback for real-time push notifications
task_obj.on_status_change(print_notification)
console.print(
"[dim]Notifications will appear as the server sends them...[/dim]\n"
)
# Do other work while task runs in background
for i in range(3):
await asyncio.sleep(0.5)
console.print(f"[dim]Client doing other work... ({i + 1}/3)[/dim]")
console.print()
# Wait for task to complete
console.print("[dim]Waiting for final result...[/dim]")
result = await task_obj.result()
console.print("\n[bold]Result:[/bold]")
assert isinstance(result.content[0], TextContent)
console.print(f" {result.content[0].text}")
@app.command
async def immediate(
duration: Annotated[
int,
cyclopts.Parameter(help="Duration of computation in seconds (1-60)"),
] = 5,
):
"""Execute the tool immediately (blocks until complete)."""
if duration < 1 or duration > 60:
console.print("[red]Error: Duration must be between 1 and 60 seconds[/red]")
sys.exit(1)
server = load_server()
console.print(f"\n[bold]Calling slow_computation(duration={duration})[/bold]")
console.print("Mode: [cyan]Immediate execution[/cyan]\n")
async with Client(server) as client:
result = await client.call_tool(
"slow_computation",
arguments={"duration": duration},
)
console.print("\n[bold]Result:[/bold]")
assert isinstance(result.content[0], TextContent)
console.print(f" {result.content[0].text}")
if __name__ == "__main__":
app()
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "examples/tasks/client.py",
"license": "Apache License 2.0",
"lines": 118,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/fastmcp:examples/tasks/server.py | """
FastMCP Tasks Example Server
Demonstrates background task execution with progress tracking using Docket.
Setup:
1. Start Redis: docker compose up -d
2. Load environment: source .envrc
3. Run server: fastmcp run server.py
The example uses Redis by default to demonstrate distributed task execution
and the fastmcp tasks CLI commands.
"""
import asyncio
import logging
from typing import Annotated
from docket import Logged
from fastmcp import FastMCP
from fastmcp.dependencies import Progress
# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Create server
mcp = FastMCP("Tasks Example")
@mcp.tool(task=True)
async def slow_computation(
duration: Annotated[int, Logged],
progress: Progress = Progress(),
) -> str:
"""
Perform a slow computation that takes `duration` seconds.
This tool demonstrates progress tracking with background tasks.
It logs progress every 1-2 seconds and reports progress via Docket.
Args:
duration: Number of seconds the computation should take (1-60)
Returns:
A completion message with the total duration
"""
if duration < 1 or duration > 60:
raise ValueError("Duration must be between 1 and 60 seconds")
logger.info(f"Starting slow computation for {duration} seconds")
# Set total progress units
await progress.set_total(duration)
# Process each second
for i in range(duration):
# Sleep for 1 second
await asyncio.sleep(1)
# Update progress
elapsed = i + 1
remaining = duration - elapsed
await progress.increment()
await progress.set_message(
f"Working... {elapsed}/{duration}s ({remaining}s remaining)"
)
# Log every 1-2 seconds
if elapsed % 2 == 0 or elapsed == duration:
logger.info(f"Progress: {elapsed}/{duration}s")
logger.info(f"Completed computation in {duration} seconds")
return f"Computation completed successfully in {duration} seconds!"
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "examples/tasks/server.py",
"license": "Apache License 2.0",
"lines": 56,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
PrefectHQ/fastmcp:src/fastmcp/cli/tasks.py | """FastMCP tasks CLI for Docket task management."""
import asyncio
import sys
from typing import Annotated
import cyclopts
from rich.console import Console
from fastmcp.utilities.cli import load_and_merge_config
from fastmcp.utilities.logging import get_logger
logger = get_logger("cli.tasks")
console = Console()
tasks_app = cyclopts.App(
name="tasks",
help="Manage FastMCP background tasks using Docket",
)
def check_distributed_backend() -> None:
"""Check if Docket is configured with a distributed backend.
The CLI worker runs as a separate process, so it needs Redis/Valkey
to coordinate with the main server process.
Raises:
SystemExit: If using memory:// URL
"""
import fastmcp
docket_url = fastmcp.settings.docket.url
# Check for memory:// URL and provide helpful error
if docket_url.startswith("memory://"):
console.print(
"[bold red]β In-memory backend not supported by CLI[/bold red]\n\n"
"Your Docket configuration uses an in-memory backend (memory://) which\n"
"only works within a single process.\n\n"
"To use [cyan]fastmcp tasks[/cyan] CLI commands (which run in separate\n"
"processes), you need a distributed backend:\n\n"
"[bold]1. Install Redis or Valkey:[/bold]\n"
" [dim]macOS:[/dim] brew install redis\n"
" [dim]Ubuntu:[/dim] apt install redis-server\n"
" [dim]Valkey:[/dim] See https://valkey.io/\n\n"
"[bold]2. Start the service:[/bold]\n"
" redis-server\n\n"
"[bold]3. Configure Docket URL:[/bold]\n"
" [dim]Environment variable:[/dim]\n"
" export FASTMCP_DOCKET_URL=redis://localhost:6379/0\n\n"
"[bold]4. Try again[/bold]\n\n"
"The memory backend works great for single-process servers, but the CLI\n"
"commands need a distributed backend to coordinate across processes.\n\n"
"Need help? See: [cyan]https://gofastmcp.com/docs/tasks[/cyan]"
)
sys.exit(1)
@tasks_app.command
def worker(
server_spec: Annotated[
str | None,
cyclopts.Parameter(
help="Python file to run, optionally with :object suffix, or None to auto-detect fastmcp.json"
),
] = None,
) -> None:
"""Start an additional worker to process background tasks.
Connects to your Docket backend and processes tasks in parallel with
any other running workers. Configure via environment variables
(FASTMCP_DOCKET_*).
Example:
fastmcp tasks worker server.py
fastmcp tasks worker examples/tasks/server.py
"""
import fastmcp
check_distributed_backend()
# Load server to get task functions
try:
config, _resolved_spec = load_and_merge_config(server_spec)
except FileNotFoundError:
sys.exit(1)
# Load the server
server = asyncio.run(config.source.load_server())
async def run_worker():
"""Enter server lifespan and camp forever."""
async with server._lifespan_manager():
console.print(
f"[bold green]β[/bold green] Starting worker for [cyan]{server.name}[/cyan]"
)
console.print(f" Docket: {fastmcp.settings.docket.name}")
console.print(f" Backend: {fastmcp.settings.docket.url}")
console.print(f" Concurrency: {fastmcp.settings.docket.concurrency}")
# Server's lifespan has started its worker - just camp here forever
while True:
await asyncio.sleep(3600)
try:
asyncio.run(run_worker())
except KeyboardInterrupt:
console.print("\n[yellow]Worker stopped[/yellow]")
sys.exit(0)
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "src/fastmcp/cli/tasks.py",
"license": "Apache License 2.0",
"lines": 89,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/fastmcp:src/fastmcp/client/tasks.py | """SEP-1686 client Task classes."""
from __future__ import annotations
import abc
import asyncio
import inspect
import time
import weakref
from collections.abc import Awaitable, Callable
from datetime import datetime, timezone
from typing import TYPE_CHECKING, Generic, TypeVar
import mcp.types
from mcp.types import GetTaskResult, TaskStatusNotification
from fastmcp.client.messages import Message, MessageHandler
from fastmcp.utilities.logging import get_logger
logger = get_logger(__name__)
if TYPE_CHECKING:
from fastmcp.client.client import CallToolResult, Client
class TaskNotificationHandler(MessageHandler):
"""MessageHandler that routes task status notifications to Task objects."""
def __init__(self, client: Client):
super().__init__()
self._client_ref: weakref.ref[Client] = weakref.ref(client)
async def dispatch(self, message: Message) -> None:
"""Dispatch messages, including task status notifications."""
if isinstance(message, mcp.types.ServerNotification):
if isinstance(message.root, TaskStatusNotification):
client = self._client_ref()
if client:
client._handle_task_status_notification(message.root)
await super().dispatch(message)
TaskResultT = TypeVar("TaskResultT")
class Task(abc.ABC, Generic[TaskResultT]):
"""
Abstract base class for MCP background tasks (SEP-1686).
Provides a uniform API whether the server accepts background execution
or executes synchronously (graceful degradation per SEP-1686).
Subclasses:
- ToolTask: For tool calls (result type: CallToolResult)
- PromptTask: For prompts (future, result type: GetPromptResult)
- ResourceTask: For resources (future, result type: ReadResourceResult)
"""
def __init__(
self,
client: Client,
task_id: str,
immediate_result: TaskResultT | None = None,
):
"""
Create a Task wrapper.
Args:
client: The FastMCP client
task_id: The task identifier
immediate_result: If server executed synchronously, the immediate result
"""
self._client = client
self._task_id = task_id
self._immediate_result = immediate_result
self._is_immediate = immediate_result is not None
# Notification-based optimization (SEP-1686 notifications/tasks/status)
self._status_cache: GetTaskResult | None = None
self._status_event: asyncio.Event | None = None # Lazy init
self._status_callbacks: list[
Callable[[GetTaskResult], None | Awaitable[None]]
] = []
self._cached_result: TaskResultT | None = None
def _check_client_connected(self) -> None:
"""Validate that client context is still active.
Raises:
RuntimeError: If accessed outside client context (unless immediate)
"""
if self._is_immediate:
return # Already resolved, no client needed
try:
_ = self._client.session
except RuntimeError as e:
raise RuntimeError(
"Cannot access task results outside client context. "
"Task futures must be used within 'async with client:' block."
) from e
@property
def task_id(self) -> str:
"""Get the task ID."""
return self._task_id
@property
def returned_immediately(self) -> bool:
"""Check if server executed the task immediately.
Returns:
True if server executed synchronously (graceful degradation or no task support)
False if server accepted background execution
"""
return self._is_immediate
def _handle_status_notification(self, status: GetTaskResult) -> None:
"""Process incoming notifications/tasks/status (internal).
Called by Client when a notification is received for this task.
Updates cache, triggers events, and invokes user callbacks.
Args:
status: Task status from notification
"""
# Update cache for next status() call
self._status_cache = status
# Wake up any wait() calls
if self._status_event is not None:
self._status_event.set()
# Invoke user callbacks
for callback in self._status_callbacks:
try:
result = callback(status)
if inspect.isawaitable(result):
# Fire and forget async callbacks
asyncio.create_task(result) # type: ignore[arg-type] # noqa: RUF006
except Exception as e:
logger.warning(f"Task callback error: {e}", exc_info=True)
def on_status_change(
self,
callback: Callable[[GetTaskResult], None | Awaitable[None]],
) -> None:
"""Register callback for status change notifications.
The callback will be invoked when a notifications/tasks/status is received
for this task (optional server feature per SEP-1686 lines 436-444).
Supports both sync and async callbacks (auto-detected).
Args:
callback: Function to call with GetTaskResult when status changes.
Can return None (sync) or Awaitable[None] (async).
Example:
>>> task = await client.call_tool("slow_operation", {}, task=True)
>>>
>>> def on_update(status: GetTaskResult):
... print(f"Task {status.taskId} is now {status.status}")
>>>
>>> task.on_status_change(on_update)
>>> result = await task # Callback fires when status changes
"""
self._status_callbacks.append(callback)
async def status(self) -> GetTaskResult:
"""Get current task status.
If server executed immediately, returns synthetic completed status.
Otherwise queries the server for current status.
"""
self._check_client_connected()
if self._is_immediate:
# Return synthetic completed status
now = datetime.now(timezone.utc)
return GetTaskResult(
taskId=self._task_id,
status="completed",
createdAt=now,
lastUpdatedAt=now,
ttl=None,
pollInterval=1000,
)
# Return cached status if available (from notification)
if self._status_cache is not None:
cached = self._status_cache
# Don't clear cache - keep it for next call
return cached
# Query server and cache the result
self._status_cache = await self._client.get_task_status(self._task_id)
return self._status_cache
@abc.abstractmethod
async def result(self) -> TaskResultT:
"""Wait for and return the task result.
Must be implemented by subclasses to return the appropriate result type.
"""
...
async def wait(
self, *, state: str | None = None, timeout: float = 300.0
) -> GetTaskResult:
"""Wait for task to reach a specific state or complete.
Uses event-based waiting when notifications are available (fast),
with fallback to polling (reliable). Optimally wakes up immediately
on status changes when server sends notifications/tasks/status.
Args:
state: Desired state ('submitted', 'working', 'completed', 'failed').
If None, waits for any terminal state (completed/failed)
timeout: Maximum time to wait in seconds
Returns:
GetTaskResult: Final task status
Raises:
TimeoutError: If desired state not reached within timeout
"""
self._check_client_connected()
if self._is_immediate:
# Already done
return await self.status()
# Initialize event for notification wake-ups
if self._status_event is None:
self._status_event = asyncio.Event()
start = time.time()
terminal_states = {"completed", "failed", "cancelled"}
poll_interval = 0.5 # Fallback polling interval (500ms)
while True:
# Check cached status first (updated by notifications)
if self._status_cache:
current = self._status_cache.status
if state is None:
if current in terminal_states:
return self._status_cache
elif current == state:
return self._status_cache
# Check timeout
elapsed = time.time() - start
if elapsed >= timeout:
raise TimeoutError(
f"Task {self._task_id} did not reach {state or 'terminal state'} within {timeout}s"
)
remaining = timeout - elapsed
# Wait for notification event OR poll timeout
try:
await asyncio.wait_for(
self._status_event.wait(), timeout=min(poll_interval, remaining)
)
self._status_event.clear()
except asyncio.TimeoutError:
# Fallback: poll server (notification didn't arrive in time)
self._status_cache = await self._client.get_task_status(self._task_id)
async def cancel(self) -> None:
"""Cancel this task, transitioning it to cancelled state.
Sends a tasks/cancel protocol request. The server will attempt to halt
execution and move the task to cancelled state.
Note: If server executed immediately (graceful degradation), this is a no-op
as there's no server-side task to cancel.
"""
if self._is_immediate:
# No server-side task to cancel
return
self._check_client_connected()
await self._client.cancel_task(self._task_id)
# Invalidate cache to force fresh status fetch
self._status_cache = None
def __await__(self):
"""Allow 'await task' to get result."""
return self.result().__await__()
class ToolTask(Task["CallToolResult"]):
"""
Represents a tool call that may execute in background or immediately.
Provides a uniform API whether the server accepts background execution
or executes synchronously (graceful degradation per SEP-1686).
Usage:
task = await client.call_tool_as_task("analyze", args)
# Check status
status = await task.status()
# Wait for completion
await task.wait()
# Get result (waits if needed)
result = await task.result() # Returns CallToolResult
# Or just await the task directly
result = await task
"""
def __init__(
self,
client: Client,
task_id: str,
tool_name: str,
immediate_result: CallToolResult | None = None,
):
"""
Create a ToolTask wrapper.
Args:
client: The FastMCP client
task_id: The task identifier
tool_name: Name of the tool being executed
immediate_result: If server executed synchronously, the immediate result
"""
super().__init__(client, task_id, immediate_result)
self._tool_name = tool_name
async def result(self) -> CallToolResult:
"""Wait for and return the tool result.
If server executed immediately, returns the immediate result.
Otherwise waits for background task to complete and retrieves result.
Returns:
CallToolResult: The parsed tool result (same as call_tool returns)
"""
# Check cache first
if self._cached_result is not None:
return self._cached_result
if self._is_immediate:
assert self._immediate_result is not None # Type narrowing
result = self._immediate_result
else:
# Check client connected
self._check_client_connected()
# Wait for completion using event-based wait (respects notifications)
await self.wait()
# Get the raw result (dict or CallToolResult)
raw_result = await self._client.get_task_result(self._task_id)
# Convert to CallToolResult if needed and parse
if isinstance(raw_result, dict):
# Raw dict from get_task_result - parse as CallToolResult
mcp_result = mcp.types.CallToolResult.model_validate(raw_result)
result = await self._client._parse_call_tool_result(
self._tool_name, mcp_result, raise_on_error=True
)
elif isinstance(raw_result, mcp.types.CallToolResult):
# Already a CallToolResult from MCP protocol - parse it
result = await self._client._parse_call_tool_result(
self._tool_name, raw_result, raise_on_error=True
)
else:
# Legacy ToolResult format - convert to MCP type
if hasattr(raw_result, "content") and hasattr(
raw_result, "structured_content"
):
mcp_result = mcp.types.CallToolResult(
content=raw_result.content,
structuredContent=raw_result.structured_content,
_meta=raw_result.meta, # type: ignore[call-arg] # _meta is Pydantic alias for meta field
)
result = await self._client._parse_call_tool_result(
self._tool_name, mcp_result, raise_on_error=True
)
else:
# Unknown type - just return it
result = raw_result
# Cache before returning
self._cached_result = result
return result
class PromptTask(Task[mcp.types.GetPromptResult]):
"""
Represents a prompt call that may execute in background or immediately.
Provides a uniform API whether the server accepts background execution
or executes synchronously (graceful degradation per SEP-1686).
Usage:
task = await client.get_prompt_as_task("analyze", args)
result = await task # Returns GetPromptResult
"""
def __init__(
self,
client: Client,
task_id: str,
prompt_name: str,
immediate_result: mcp.types.GetPromptResult | None = None,
):
"""
Create a PromptTask wrapper.
Args:
client: The FastMCP client
task_id: The task identifier
prompt_name: Name of the prompt being executed
immediate_result: If server executed synchronously, the immediate result
"""
super().__init__(client, task_id, immediate_result)
self._prompt_name = prompt_name
async def result(self) -> mcp.types.GetPromptResult:
"""Wait for and return the prompt result.
If server executed immediately, returns the immediate result.
Otherwise waits for background task to complete and retrieves result.
Returns:
GetPromptResult: The prompt result with messages and description
"""
# Check cache first
if self._cached_result is not None:
return self._cached_result
if self._is_immediate:
assert self._immediate_result is not None
result = self._immediate_result
else:
# Check client connected
self._check_client_connected()
# Wait for completion using event-based wait (respects notifications)
await self.wait()
# Get the raw MCP result
mcp_result = await self._client.get_task_result(self._task_id)
# Parse as GetPromptResult
result = mcp.types.GetPromptResult.model_validate(mcp_result)
# Cache before returning
self._cached_result = result
return result
class ResourceTask(
Task[list[mcp.types.TextResourceContents | mcp.types.BlobResourceContents]]
):
"""
Represents a resource read that may execute in background or immediately.
Provides a uniform API whether the server accepts background execution
or executes synchronously (graceful degradation per SEP-1686).
Usage:
task = await client.read_resource_as_task("file://data.txt")
contents = await task # Returns list[ReadResourceContents]
"""
def __init__(
self,
client: Client,
task_id: str,
uri: str,
immediate_result: list[
mcp.types.TextResourceContents | mcp.types.BlobResourceContents
]
| None = None,
):
"""
Create a ResourceTask wrapper.
Args:
client: The FastMCP client
task_id: The task identifier
uri: URI of the resource being read
immediate_result: If server executed synchronously, the immediate result
"""
super().__init__(client, task_id, immediate_result)
self._uri = uri
async def result(
self,
) -> list[mcp.types.TextResourceContents | mcp.types.BlobResourceContents]:
"""Wait for and return the resource contents.
If server executed immediately, returns the immediate result.
Otherwise waits for background task to complete and retrieves result.
Returns:
list[ReadResourceContents]: The resource contents
"""
# Check cache first
if self._cached_result is not None:
return self._cached_result
if self._is_immediate:
assert self._immediate_result is not None
result = self._immediate_result
else:
# Check client connected
self._check_client_connected()
# Wait for completion using event-based wait (respects notifications)
await self.wait()
# Get the raw MCP result
mcp_result = await self._client.get_task_result(self._task_id)
# Parse as ReadResourceResult or extract contents
if isinstance(mcp_result, mcp.types.ReadResourceResult):
# Already parsed by TasksResponse - extract contents
result = list(mcp_result.contents)
elif isinstance(mcp_result, dict) and "contents" in mcp_result:
# Dict format - parse each content item
parsed_contents = []
for item in mcp_result["contents"]:
if isinstance(item, dict):
if "blob" in item:
parsed_contents.append(
mcp.types.BlobResourceContents.model_validate(item)
)
else:
parsed_contents.append(
mcp.types.TextResourceContents.model_validate(item)
)
else:
parsed_contents.append(item)
result = parsed_contents
else:
# Fallback - might be the list directly
result = mcp_result if isinstance(mcp_result, list) else [mcp_result]
# Cache before returning
self._cached_result = result
return result
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "src/fastmcp/client/tasks.py",
"license": "Apache License 2.0",
"lines": 446,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/fastmcp:src/fastmcp/dependencies.py | """Dependency injection exports for FastMCP.
This module re-exports dependency injection symbols to provide a clean,
centralized import location for all dependency-related functionality.
DI features (Depends, CurrentContext, CurrentFastMCP) work without pydocket
using the uncalled-for DI engine. Only task-related dependencies (CurrentDocket,
CurrentWorker) and background task execution require fastmcp[tasks].
"""
from uncalled_for import Dependency, Depends, Shared
from fastmcp.server.dependencies import (
CurrentAccessToken,
CurrentContext,
CurrentDocket,
CurrentFastMCP,
CurrentHeaders,
CurrentRequest,
CurrentWorker,
Progress,
ProgressLike,
TokenClaim,
)
__all__ = [
"CurrentAccessToken",
"CurrentContext",
"CurrentDocket",
"CurrentFastMCP",
"CurrentHeaders",
"CurrentRequest",
"CurrentWorker",
"Dependency",
"Depends",
"Progress",
"ProgressLike",
"Shared",
"TokenClaim",
]
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "src/fastmcp/dependencies.py",
"license": "Apache License 2.0",
"lines": 35,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
PrefectHQ/fastmcp:src/fastmcp/server/tasks/handlers.py | """SEP-1686 task execution handlers.
Handles queuing tool/prompt/resource executions to Docket as background tasks.
"""
from __future__ import annotations
import uuid
from contextlib import suppress
from datetime import datetime, timezone
from typing import TYPE_CHECKING, Any, Literal
import mcp.types
from mcp.shared.exceptions import McpError
from mcp.types import INTERNAL_ERROR, ErrorData
from fastmcp.server.dependencies import _current_docket, get_access_token, get_context
from fastmcp.server.tasks.config import TaskMeta
from fastmcp.server.tasks.keys import build_task_key
from fastmcp.utilities.logging import get_logger
if TYPE_CHECKING:
from fastmcp.prompts.prompt import Prompt
from fastmcp.resources.resource import Resource
from fastmcp.resources.template import ResourceTemplate
from fastmcp.tools.tool import Tool
logger = get_logger(__name__)
# Redis mapping TTL buffer: Add 15 minutes to Docket's execution_ttl
TASK_MAPPING_TTL_BUFFER_SECONDS = 15 * 60
async def submit_to_docket(
task_type: Literal["tool", "resource", "template", "prompt"],
key: str,
component: Tool | Resource | ResourceTemplate | Prompt,
arguments: dict[str, Any] | None = None,
task_meta: TaskMeta | None = None,
) -> mcp.types.CreateTaskResult:
"""Submit any component to Docket for background execution (SEP-1686).
Unified handler for all component types. Called by component's internal
methods (_run, _read, _render) when task metadata is present and mode allows.
Queues the component's method to Docket, stores raw return values,
and converts to MCP types on retrieval.
Args:
task_type: Component type for task key construction
key: The component key as seen by MCP layer (with namespace prefix)
component: The component instance (Tool, Resource, ResourceTemplate, Prompt)
arguments: Arguments/params (None for Resource which has no args)
task_meta: Task execution metadata. If task_meta.ttl is provided, it
overrides the server default (docket.execution_ttl).
Returns:
CreateTaskResult: Task stub with proper Task object
"""
# Generate server-side task ID per SEP-1686 final spec (line 375-377)
# Server MUST generate task IDs, clients no longer provide them
server_task_id = str(uuid.uuid4())
# Record creation timestamp per SEP-1686 final spec (line 430)
created_at = datetime.now(timezone.utc)
# Get session ID - use "internal" for programmatic calls without MCP session
ctx = get_context()
try:
session_id = ctx.session_id
except RuntimeError:
session_id = "internal"
docket = _current_docket.get()
if docket is None:
raise McpError(
ErrorData(
code=INTERNAL_ERROR,
message="Background tasks require a running FastMCP server context",
)
)
# Build full task key with embedded metadata
task_key = build_task_key(session_id, server_task_id, task_type, key)
# Determine TTL: use task_meta.ttl if provided, else docket default
if task_meta is not None and task_meta.ttl is not None:
ttl_ms = task_meta.ttl
else:
ttl_ms = int(docket.execution_ttl.total_seconds() * 1000)
ttl_seconds = int(ttl_ms / 1000) + TASK_MAPPING_TTL_BUFFER_SECONDS
# Store task metadata in Redis for protocol handlers
task_meta_key = docket.key(f"fastmcp:task:{session_id}:{server_task_id}")
created_at_key = docket.key(
f"fastmcp:task:{session_id}:{server_task_id}:created_at"
)
poll_interval_key = docket.key(
f"fastmcp:task:{session_id}:{server_task_id}:poll_interval"
)
origin_request_id_key = docket.key(
f"fastmcp:task:{session_id}:{server_task_id}:origin_request_id"
)
poll_interval_ms = int(component.task_config.poll_interval.total_seconds() * 1000)
origin_request_id = (
str(ctx.request_context.request_id) if ctx.request_context is not None else None
)
# Snapshot the current access token (if any) for background task access (#3095)
access_token = get_access_token()
access_token_key = docket.key(
f"fastmcp:task:{session_id}:{server_task_id}:access_token"
)
async with docket.redis() as redis:
await redis.set(task_meta_key, task_key, ex=ttl_seconds)
await redis.set(created_at_key, created_at.isoformat(), ex=ttl_seconds)
await redis.set(poll_interval_key, str(poll_interval_ms), ex=ttl_seconds)
if origin_request_id is not None:
await redis.set(origin_request_id_key, origin_request_id, ex=ttl_seconds)
if access_token is not None:
await redis.set(
access_token_key, access_token.model_dump_json(), ex=ttl_seconds
)
# Register session for Context access in background workers (SEP-1686)
# This enables elicitation/sampling from background tasks via weakref
# Skip for "internal" sessions (programmatic calls without MCP session)
if session_id != "internal":
from fastmcp.server.dependencies import register_task_session
register_task_session(session_id, ctx.session)
# Send an initial tasks/status notification before queueing.
# This guarantees clients can observe task creation immediately.
notification = mcp.types.TaskStatusNotification.model_validate(
{
"method": "notifications/tasks/status",
"params": {
"taskId": server_task_id,
"status": "working",
"statusMessage": "Task submitted",
"createdAt": created_at,
"lastUpdatedAt": created_at,
"ttl": ttl_ms,
"pollInterval": poll_interval_ms,
},
"_meta": {
"io.modelcontextprotocol/related-task": {
"taskId": server_task_id,
}
},
}
)
server_notification = mcp.types.ServerNotification(notification)
with suppress(Exception):
# Don't let notification failures break task creation
await ctx.session.send_notification(server_notification)
# Queue function to Docket by key (result storage via execution_ttl)
# Use component.add_to_docket() which handles calling conventions
# `fn_key` is the function lookup key (e.g., "child_multiply")
# `task_key` is the task result key (e.g., "fastmcp:task:{session}:{task_id}:tool:child_multiply")
# Resources don't take arguments; tools/prompts/templates always pass arguments (even if None/empty)
if task_type == "resource":
await component.add_to_docket(docket, fn_key=key, task_key=task_key) # type: ignore[call-arg]
else:
await component.add_to_docket(docket, arguments, fn_key=key, task_key=task_key) # type: ignore[call-arg]
# Spawn subscription task to send status notifications (SEP-1686 optional feature)
from fastmcp.server.tasks.subscriptions import subscribe_to_task_updates
# Start subscription in session's task group (persists for connection lifetime)
if hasattr(ctx.session, "_subscription_task_group"):
tg = ctx.session._subscription_task_group
if tg:
tg.start_soon( # type: ignore[union-attr]
subscribe_to_task_updates,
server_task_id,
task_key,
ctx.session,
docket,
poll_interval_ms,
)
# Start notification subscriber for distributed elicitation (idempotent)
# This enables ctx.elicit() to work when workers run in separate processes
# Subscriber forwards notifications from Redis queue to client session
from fastmcp.server.tasks.notifications import (
ensure_subscriber_running,
stop_subscriber,
)
try:
await ensure_subscriber_running(session_id, ctx.session, docket, ctx.fastmcp)
# Register cleanup callback on session exit (once per session)
# This ensures subscriber is stopped when the session disconnects
if (
hasattr(ctx.session, "_exit_stack")
and ctx.session._exit_stack is not None
and not getattr(ctx.session, "_notification_cleanup_registered", False)
):
async def _cleanup_subscriber() -> None:
await stop_subscriber(session_id)
ctx.session._exit_stack.push_async_callback(_cleanup_subscriber)
ctx.session._notification_cleanup_registered = True # type: ignore[attr-defined]
except Exception as e:
# Non-fatal: elicitation will still work via polling fallback
logger.debug("Failed to start notification subscriber: %s", e)
# Return CreateTaskResult with proper Task object
# Tasks MUST begin in "working" status per SEP-1686 final spec (line 381)
return mcp.types.CreateTaskResult(
task=mcp.types.Task(
taskId=server_task_id,
status="working",
createdAt=created_at,
lastUpdatedAt=created_at,
ttl=ttl_ms,
pollInterval=poll_interval_ms,
)
)
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "src/fastmcp/server/tasks/handlers.py",
"license": "Apache License 2.0",
"lines": 191,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/fastmcp:src/fastmcp/server/tasks/keys.py | """Task key management for SEP-1686 background tasks.
Task keys encode security scoping and metadata in the Docket key format:
`{session_id}:{client_task_id}:{task_type}:{component_identifier}`
This format provides:
- Session-based security scoping (prevents cross-session access)
- Task type identification (tool/prompt/resource)
- Component identification (name or URI for result conversion)
"""
from urllib.parse import quote, unquote
def build_task_key(
session_id: str,
client_task_id: str,
task_type: str,
component_identifier: str,
) -> str:
"""Build Docket task key with embedded metadata.
Format: `{session_id}:{client_task_id}:{task_type}:{component_identifier}`
The component_identifier is URI-encoded to handle special characters (colons, slashes, etc.).
Args:
session_id: Session ID for security scoping
client_task_id: Client-provided task ID
task_type: Type of task ("tool", "prompt", "resource")
component_identifier: Tool name, prompt name, or resource URI
Returns:
Encoded task key for Docket
Examples:
>>> build_task_key("session123", "task456", "tool", "my_tool")
'session123:task456:tool:my_tool'
>>> build_task_key("session123", "task456", "resource", "file://data.txt")
'session123:task456:resource:file%3A%2F%2Fdata.txt'
"""
encoded_identifier = quote(component_identifier, safe="")
return f"{session_id}:{client_task_id}:{task_type}:{encoded_identifier}"
def parse_task_key(task_key: str) -> dict[str, str]:
"""Parse Docket task key to extract metadata.
Args:
task_key: Encoded task key from Docket
Returns:
Dict with keys: session_id, client_task_id, task_type, component_identifier
Examples:
>>> parse_task_key("session123:task456:tool:my_tool")
`{'session_id': 'session123', 'client_task_id': 'task456', 'task_type': 'tool', 'component_identifier': 'my_tool'}`
>>> parse_task_key("session123:task456:resource:file%3A%2F%2Fdata.txt")
`{'session_id': 'session123', 'client_task_id': 'task456', 'task_type': 'resource', 'component_identifier': 'file://data.txt'}`
"""
parts = task_key.split(":", 3)
if len(parts) != 4:
raise ValueError(
f"Invalid task key format: {task_key}. "
f"Expected: {{session_id}}:{{client_task_id}}:{{task_type}}:{{component_identifier}}"
)
return {
"session_id": parts[0],
"client_task_id": parts[1],
"task_type": parts[2],
"component_identifier": unquote(parts[3]),
}
def get_client_task_id_from_key(task_key: str) -> str:
"""Extract just the client task ID from a task key.
Args:
task_key: Full encoded task key
Returns:
Client-provided task ID (second segment)
Example:
>>> get_client_task_id_from_key("session123:task456:tool:my_tool")
'task456'
"""
return task_key.split(":", 3)[1]
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "src/fastmcp/server/tasks/keys.py",
"license": "Apache License 2.0",
"lines": 68,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
PrefectHQ/fastmcp:src/fastmcp/server/tasks/subscriptions.py | """Task subscription helpers for sending MCP notifications (SEP-1686).
Subscribes to Docket execution state changes and sends notifications/tasks/status
to clients when their tasks change state.
This module requires fastmcp[tasks] (pydocket). It is only imported when docket is available.
"""
from __future__ import annotations
from contextlib import suppress
from datetime import datetime, timezone
from typing import TYPE_CHECKING
from docket.execution import ExecutionState
from mcp.types import TaskStatusNotification, TaskStatusNotificationParams
from fastmcp.server.tasks.config import DEFAULT_TTL_MS
from fastmcp.server.tasks.keys import parse_task_key
from fastmcp.server.tasks.requests import DOCKET_TO_MCP_STATE
from fastmcp.utilities.logging import get_logger
if TYPE_CHECKING:
from docket import Docket
from docket.execution import Execution
from mcp.server.session import ServerSession
logger = get_logger(__name__)
async def subscribe_to_task_updates(
task_id: str,
task_key: str,
session: ServerSession,
docket: Docket,
poll_interval_ms: int = 5000,
) -> None:
"""Subscribe to Docket execution events and send MCP notifications.
Per SEP-1686 lines 436-444, servers MAY send notifications/tasks/status
when task state changes. This is an optional optimization that reduces
client polling frequency.
Args:
task_id: Client-visible task ID (server-generated UUID)
task_key: Internal Docket execution key (includes session, type, component)
session: MCP ServerSession for sending notifications
docket: Docket instance for subscribing to execution events
poll_interval_ms: Poll interval in milliseconds to include in notifications
"""
try:
execution = await docket.get_execution(task_key)
if execution is None:
logger.warning(f"No execution found for task {task_id}")
return
# Subscribe to state and progress events from Docket
async for event in execution.subscribe():
if event["type"] == "state":
# Send notifications/tasks/status when state changes
await _send_status_notification(
session=session,
task_id=task_id,
task_key=task_key,
docket=docket,
state=ExecutionState(event["state"]),
poll_interval_ms=poll_interval_ms,
)
elif event["type"] == "progress":
# Send notification when progress message changes
await _send_progress_notification(
session=session,
task_id=task_id,
task_key=task_key,
docket=docket,
execution=execution,
poll_interval_ms=poll_interval_ms,
)
except Exception as e:
logger.warning(f"Subscription task failed for {task_id}: {e}", exc_info=True)
async def _send_status_notification(
session: ServerSession,
task_id: str,
task_key: str,
docket: Docket,
state: ExecutionState,
poll_interval_ms: int = 5000,
) -> None:
"""Send notifications/tasks/status to client.
Per SEP-1686 line 454: notification SHOULD NOT include related-task metadata
(taskId is already in params).
Args:
session: MCP ServerSession
task_id: Client-visible task ID
task_key: Internal task key (for metadata lookup)
docket: Docket instance
state: Docket execution state (enum)
poll_interval_ms: Poll interval in milliseconds
"""
# Map Docket state to MCP status
state_map = DOCKET_TO_MCP_STATE
mcp_status = state_map.get(state, "failed")
# Extract session_id from task_key for Redis lookup
key_parts = parse_task_key(task_key)
session_id = key_parts["session_id"]
created_at_key = docket.key(f"fastmcp:task:{session_id}:{task_id}:created_at")
async with docket.redis() as redis:
created_at_bytes = await redis.get(created_at_key)
created_at = (
created_at_bytes.decode("utf-8")
if created_at_bytes
else datetime.now(timezone.utc).isoformat()
)
# Build status message
status_message = None
if state == ExecutionState.COMPLETED:
status_message = "Task completed successfully"
elif state == ExecutionState.FAILED:
status_message = "Task failed"
elif state == ExecutionState.CANCELLED:
status_message = "Task cancelled"
params_dict = {
"taskId": task_id,
"status": mcp_status,
"createdAt": created_at,
"lastUpdatedAt": datetime.now(timezone.utc).isoformat(),
"ttl": DEFAULT_TTL_MS,
"pollInterval": poll_interval_ms,
}
if status_message:
params_dict["statusMessage"] = status_message
# Create notification (no related-task metadata per spec line 454)
notification = TaskStatusNotification(
params=TaskStatusNotificationParams.model_validate(params_dict),
)
# Send notification (don't let failures break the subscription)
with suppress(Exception):
await session.send_notification(notification) # type: ignore[arg-type]
async def _send_progress_notification(
session: ServerSession,
task_id: str,
task_key: str,
docket: Docket,
execution: Execution,
poll_interval_ms: int = 5000,
) -> None:
"""Send notifications/tasks/status when progress updates.
Args:
session: MCP ServerSession
task_id: Client-visible task ID
task_key: Internal task key
docket: Docket instance
execution: Execution object with current progress
poll_interval_ms: Poll interval in milliseconds
"""
# Sync execution to get latest progress
await execution.sync()
# Only send if there's a progress message
if not execution.progress or not execution.progress.message:
return
# Map Docket state to MCP status
state_map = DOCKET_TO_MCP_STATE
mcp_status = state_map.get(execution.state, "failed")
# Extract session_id from task_key for Redis lookup
key_parts = parse_task_key(task_key)
session_id = key_parts["session_id"]
created_at_key = docket.key(f"fastmcp:task:{session_id}:{task_id}:created_at")
async with docket.redis() as redis:
created_at_bytes = await redis.get(created_at_key)
created_at = (
created_at_bytes.decode("utf-8")
if created_at_bytes
else datetime.now(timezone.utc).isoformat()
)
params_dict = {
"taskId": task_id,
"status": mcp_status,
"createdAt": created_at,
"lastUpdatedAt": datetime.now(timezone.utc).isoformat(),
"ttl": DEFAULT_TTL_MS,
"pollInterval": poll_interval_ms,
"statusMessage": execution.progress.message,
}
# Create and send notification
notification = TaskStatusNotification(
params=TaskStatusNotificationParams.model_validate(params_dict),
)
with suppress(Exception):
await session.send_notification(notification) # type: ignore[arg-type]
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "src/fastmcp/server/tasks/subscriptions.py",
"license": "Apache License 2.0",
"lines": 176,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/fastmcp:tests/cli/test_tasks.py | """Tests for the fastmcp tasks CLI."""
import pytest
from fastmcp.cli.tasks import check_distributed_backend, tasks_app
from fastmcp.utilities.tests import temporary_settings
class TestCheckDistributedBackend:
"""Test the distributed backend checker function."""
def test_succeeds_with_redis_url(self):
"""Test that it succeeds with Redis URL."""
with temporary_settings(docket__url="redis://localhost:6379/0"):
check_distributed_backend()
def test_exits_with_helpful_error_for_memory_url(self):
"""Test that it exits with helpful error for memory:// URLs."""
with temporary_settings(docket__url="memory://test-123"):
with pytest.raises(SystemExit) as exc_info:
check_distributed_backend()
assert isinstance(exc_info.value, SystemExit)
assert exc_info.value.code == 1
class TestWorkerCommand:
"""Test the worker command."""
def test_worker_command_parsing(self):
"""Test that worker command parses arguments correctly."""
command, bound, _ = tasks_app.parse_args(["worker", "server.py"])
assert callable(command)
assert command.__name__ == "worker" # type: ignore[attr-defined]
assert bound.arguments["server_spec"] == "server.py"
class TestTasksAppIntegration:
"""Test the tasks app integration."""
def test_tasks_app_exists(self):
"""Test that the tasks app is properly configured."""
assert "tasks" in tasks_app.name
assert "Docket" in tasks_app.help
def test_tasks_app_has_commands(self):
"""Test that all expected commands are registered."""
# Just verify the app exists and has the right metadata
# Detailed command testing is done in individual test classes
assert "tasks" in tasks_app.name
assert tasks_app.help
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/cli/test_tasks.py",
"license": "Apache License 2.0",
"lines": 37,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:tests/client/tasks/test_client_prompt_tasks.py | """
Tests for client-side prompt task methods.
Tests the client's get_prompt_as_task method.
"""
import pytest
from fastmcp import FastMCP
from fastmcp.client import Client
from fastmcp.client.tasks import PromptTask
@pytest.fixture
async def prompt_server():
"""Create a test server with background-enabled prompts."""
mcp = FastMCP("prompt-client-test")
@mcp.prompt(task=True)
async def analysis_prompt(topic: str, style: str = "formal") -> str:
"""Generate an analysis prompt."""
return f"Analyze {topic} in a {style} style"
@mcp.prompt(task=True)
async def creative_prompt(theme: str) -> str:
"""Generate a creative writing prompt."""
return f"Write a story about {theme}"
return mcp
async def test_get_prompt_as_task_returns_prompt_task(prompt_server):
"""get_prompt with task=True returns a PromptTask object."""
async with Client(prompt_server) as client:
task = await client.get_prompt("analysis_prompt", {"topic": "AI"}, task=True)
assert isinstance(task, PromptTask)
assert isinstance(task.task_id, str)
async def test_prompt_task_server_generated_id(prompt_server):
"""get_prompt with task=True gets server-generated task ID."""
async with Client(prompt_server) as client:
task = await client.get_prompt(
"creative_prompt",
{"theme": "future"},
task=True,
)
# Server should generate a UUID task ID
assert task.task_id is not None
assert isinstance(task.task_id, str)
# UUIDs have hyphens
assert "-" in task.task_id
async def test_prompt_task_result_returns_get_prompt_result(prompt_server):
"""PromptTask.result() returns GetPromptResult."""
async with Client(prompt_server) as client:
task = await client.get_prompt(
"analysis_prompt", {"topic": "Robotics", "style": "casual"}, task=True
)
# Verify background execution
assert not task.returned_immediately
# Get result
result = await task.result()
# Result should be GetPromptResult
assert hasattr(result, "description")
assert hasattr(result, "messages")
# Check the rendered message content, not the description
assert len(result.messages) > 0
assert "Analyze Robotics" in result.messages[0].content.text
async def test_prompt_task_await_syntax(prompt_server):
"""PromptTask can be awaited directly."""
async with Client(prompt_server) as client:
task = await client.get_prompt("creative_prompt", {"theme": "ocean"}, task=True)
# Can await task directly
result = await task
assert "Write a story about ocean" in result.messages[0].content.text
async def test_prompt_task_status_and_wait(prompt_server):
"""PromptTask supports status() and wait() methods."""
async with Client(prompt_server) as client:
task = await client.get_prompt("analysis_prompt", {"topic": "Space"}, task=True)
# Check status
status = await task.status()
assert status.status in ["working", "completed"]
# Wait for completion
await task.wait(timeout=2.0)
# Get result
result = await task.result()
assert "Analyze Space" in result.messages[0].content.text
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/client/tasks/test_client_prompt_tasks.py",
"license": "Apache License 2.0",
"lines": 75,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:tests/client/tasks/test_client_resource_tasks.py | """
Tests for client-side resource task methods.
Tests the client's read_resource_as_task method.
"""
import pytest
from fastmcp import FastMCP
from fastmcp.client import Client
from fastmcp.client.tasks import ResourceTask
@pytest.fixture
async def resource_server():
"""Create a test server with background-enabled resources."""
mcp = FastMCP("resource-client-test")
@mcp.resource("file://document.txt", task=True)
async def document() -> str:
"""A document resource."""
return "Document content here"
@mcp.resource("file://data/{id}.json", task=True)
async def data_file(id: str) -> str:
"""A parameterized data resource."""
return f'{{"id": "{id}", "value": 42}}'
return mcp
async def test_read_resource_as_task_returns_resource_task(resource_server):
"""read_resource with task=True returns a ResourceTask object."""
async with Client(resource_server) as client:
task = await client.read_resource("file://document.txt", task=True)
assert isinstance(task, ResourceTask)
assert isinstance(task.task_id, str)
async def test_resource_task_server_generated_id(resource_server):
"""read_resource with task=True gets server-generated task ID."""
async with Client(resource_server) as client:
task = await client.read_resource("file://document.txt", task=True)
# Server should generate a UUID task ID
assert task.task_id is not None
assert isinstance(task.task_id, str)
# UUIDs have hyphens
assert "-" in task.task_id
async def test_resource_task_result_returns_read_resource_result(resource_server):
"""ResourceTask.result() returns list of ReadResourceContents."""
async with Client(resource_server) as client:
task = await client.read_resource("file://document.txt", task=True)
# Verify background execution
assert not task.returned_immediately
# Get result
result = await task.result()
# Result should be list of ReadResourceContents
assert isinstance(result, list)
assert len(result) > 0
assert result[0].text == "Document content here"
async def test_resource_task_await_syntax(resource_server):
"""ResourceTask can be awaited directly."""
async with Client(resource_server) as client:
task = await client.read_resource("file://document.txt", task=True)
# Can await task directly
result = await task
assert result[0].text == "Document content here"
async def test_resource_template_task(resource_server):
"""Resource templates work with task support."""
async with Client(resource_server) as client:
task = await client.read_resource("file://data/999.json", task=True)
# Verify background execution
assert not task.returned_immediately
# Get result
result = await task.result()
assert '"id": "999"' in result[0].text
async def test_resource_task_status_and_wait(resource_server):
"""ResourceTask supports status() and wait() methods."""
async with Client(resource_server) as client:
task = await client.read_resource("file://document.txt", task=True)
# Check status
status = await task.status()
assert status.status in ["working", "completed"]
# Wait for completion
await task.wait(timeout=2.0)
# Get result
result = await task.result()
assert "Document content" in result[0].text
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/client/tasks/test_client_resource_tasks.py",
"license": "Apache License 2.0",
"lines": 76,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:tests/client/tasks/test_client_task_notifications.py | """
Tests for client-side handling of notifications/tasks/status (SEP-1686 lines 436-444).
Verifies that Task objects receive notifications, update their cache, wake up wait() calls,
and invoke user callbacks.
"""
import asyncio
import time
import pytest
from mcp.types import GetTaskResult
from fastmcp import FastMCP
from fastmcp.client import Client
@pytest.fixture
async def task_notification_server():
"""Server that sends task status notifications."""
mcp = FastMCP("task-notification-test")
@mcp.tool(task=True)
async def quick_task(value: int) -> int:
"""Quick background task."""
await asyncio.sleep(0.05)
return value * 2
@mcp.tool(task=True)
async def slow_task(duration: float = 0.2) -> str:
"""Slow background task."""
await asyncio.sleep(duration)
return "done"
@mcp.tool(task=True)
async def failing_task() -> str:
"""Task that fails."""
raise ValueError("Intentional failure")
return mcp
async def test_task_receives_status_notification(task_notification_server):
"""Task object receives and processes status notifications."""
async with Client(task_notification_server) as client:
task = await client.call_tool("quick_task", {"value": 5}, task=True)
# Wait for task to complete (notification should arrive)
status = await task.wait(timeout=2.0)
# Verify task completed
assert status.status == "completed"
async def test_status_cache_updated_by_notification(task_notification_server):
"""Cached status is updated when notification arrives."""
async with Client(task_notification_server) as client:
task = await client.call_tool("quick_task", {"value": 10}, task=True)
# Wait for completion (notification should update cache)
await task.wait(timeout=2.0)
# Status should be cached (no server call needed)
# Call status() twice - should return same cached object
status1 = await task.status()
status2 = await task.status()
# Should be the exact same object (from cache)
assert status1 is status2
assert status1.status == "completed"
async def test_callback_invoked_on_notification(task_notification_server):
"""User callback is invoked when notification arrives."""
callback_invocations = []
def status_callback(status: GetTaskResult):
"""Sync callback."""
callback_invocations.append(status)
async with Client(task_notification_server) as client:
task = await client.call_tool("quick_task", {"value": 7}, task=True)
# Register callback
task.on_status_change(status_callback)
# Wait for completion
await task.wait(timeout=2.0)
# Give callbacks a moment to fire
await asyncio.sleep(0.1)
# Callback should have been invoked at least once
assert len(callback_invocations) > 0
# Should have received completed status
completed_statuses = [s for s in callback_invocations if s.status == "completed"]
assert len(completed_statuses) > 0
async def test_async_callback_invoked(task_notification_server):
"""Async callback is invoked when notification arrives."""
callback_invocations = []
async def async_status_callback(status: GetTaskResult):
"""Async callback."""
await asyncio.sleep(0.01) # Simulate async work
callback_invocations.append(status)
async with Client(task_notification_server) as client:
task = await client.call_tool("quick_task", {"value": 3}, task=True)
# Register async callback
task.on_status_change(async_status_callback)
# Wait for completion
await task.wait(timeout=2.0)
# Give async callbacks time to complete
await asyncio.sleep(0.2)
# Async callback should have been invoked
assert len(callback_invocations) > 0
async def test_multiple_callbacks_all_invoked(task_notification_server):
"""Multiple callbacks are all invoked."""
callback1_calls = []
callback2_calls = []
def callback1(status: GetTaskResult):
callback1_calls.append(status.status)
def callback2(status: GetTaskResult):
callback2_calls.append(status.status)
async with Client(task_notification_server) as client:
task = await client.call_tool("quick_task", {"value": 8}, task=True)
task.on_status_change(callback1)
task.on_status_change(callback2)
await task.wait(timeout=2.0)
await asyncio.sleep(0.1)
# Both callbacks should have been invoked
assert len(callback1_calls) > 0
assert len(callback2_calls) > 0
async def test_callback_error_doesnt_break_notification(task_notification_server):
"""Callback errors don't prevent other callbacks from running."""
callback1_calls = []
callback2_calls = []
def failing_callback(status: GetTaskResult):
callback1_calls.append("called")
raise ValueError("Callback intentionally fails")
def working_callback(status: GetTaskResult):
callback2_calls.append(status.status)
async with Client(task_notification_server) as client:
task = await client.call_tool("quick_task", {"value": 12}, task=True)
task.on_status_change(failing_callback)
task.on_status_change(working_callback)
await task.wait(timeout=2.0)
await asyncio.sleep(0.1)
# Failing callback was called (and errored)
assert len(callback1_calls) > 0
# Working callback should still have been invoked
assert len(callback2_calls) > 0
async def test_wait_wakes_early_on_notification(task_notification_server):
"""wait() wakes up immediately when notification arrives, not after poll interval."""
async with Client(task_notification_server) as client:
task = await client.call_tool("quick_task", {"value": 15}, task=True)
# Record timing
start = time.time()
status = await task.wait(timeout=5.0)
elapsed = time.time() - start
# Should complete much faster than the fallback poll interval (500ms)
# With notifications, should be < 200ms for quick task
# Without notifications, would take 500ms+ due to polling
assert elapsed < 1.0 # Very generous bound
assert status.status == "completed"
async def test_notification_with_failed_task(task_notification_server):
"""Notifications work for failed tasks too."""
async with Client(task_notification_server) as client:
task = await client.call_tool("failing_task", {}, task=True)
with pytest.raises(Exception):
await task
# Should have cached the failed status from notification
status = await task.status()
assert status.status == "failed"
assert (
status.statusMessage is not None
) # Error details in statusMessage per spec
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/client/tasks/test_client_task_notifications.py",
"license": "Apache License 2.0",
"lines": 148,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:tests/client/tasks/test_client_task_protocol.py | """
Tests for client-side task protocol.
Generic protocol tests that use tools as test fixtures.
"""
import asyncio
from fastmcp import FastMCP
from fastmcp.client import Client
async def test_end_to_end_task_flow():
"""Complete end-to-end flow: submit, poll, retrieve."""
start_signal = asyncio.Event()
complete_signal = asyncio.Event()
mcp = FastMCP("protocol-test")
@mcp.tool(task=True)
async def controlled_tool(message: str) -> str:
"""Tool with controlled execution."""
start_signal.set()
await complete_signal.wait()
return f"Processed: {message}"
async with Client(mcp) as client:
# Submit task
task = await client.call_tool(
"controlled_tool", {"message": "integration test"}, task=True
)
# Wait for execution to start
await asyncio.wait_for(start_signal.wait(), timeout=2.0)
# Check status while running
status = await task.status()
assert status.status in ["working"]
# Signal completion
complete_signal.set()
# Wait for task to finish and retrieve result
result = await task.result()
assert result.data == "Processed: integration test"
async def test_multiple_concurrent_tasks():
"""Multiple tasks can run concurrently."""
mcp = FastMCP("concurrent-test")
@mcp.tool(task=True)
async def multiply(a: int, b: int) -> int:
return a * b
async with Client(mcp) as client:
# Submit multiple tasks
tasks = []
for i in range(5):
task = await client.call_tool("multiply", {"a": i, "b": 2}, task=True)
tasks.append((task, i * 2))
# Wait for all to complete and verify results
for task, expected in tasks:
result = await task.result()
assert result.data == expected
async def test_task_id_auto_generation():
"""Task IDs are auto-generated if not provided."""
mcp = FastMCP("id-test")
@mcp.tool(task=True)
async def echo(message: str) -> str:
return f"Echo: {message}"
async with Client(mcp) as client:
# Submit without custom task ID
task_1 = await client.call_tool("echo", {"message": "first"}, task=True)
task_2 = await client.call_tool("echo", {"message": "second"}, task=True)
# Should generate different IDs
assert task_1.task_id != task_2.task_id
assert len(task_1.task_id) > 0
assert len(task_2.task_id) > 0
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/client/tasks/test_client_task_protocol.py",
"license": "Apache License 2.0",
"lines": 63,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:tests/client/tasks/test_client_tool_tasks.py | """
Tests for client-side tool task methods.
Tests the client's tool-specific task functionality, parallel to
test_client_prompt_tasks.py and test_client_resource_tasks.py.
"""
import pytest
from fastmcp import FastMCP
from fastmcp.client import Client
from fastmcp.client.tasks import ToolTask
@pytest.fixture
async def tool_task_server():
"""Create a test server with task-enabled tools."""
mcp = FastMCP("tool-task-test")
@mcp.tool(task=True)
async def echo(message: str) -> str:
"""Echo back the message."""
return f"Echo: {message}"
@mcp.tool(task=True)
async def multiply(a: int, b: int) -> int:
"""Multiply two numbers."""
return a * b
return mcp
async def test_call_tool_as_task_returns_tool_task(tool_task_server):
"""call_tool with task=True returns a ToolTask object."""
async with Client(tool_task_server) as client:
task = await client.call_tool("echo", {"message": "hello"}, task=True)
assert isinstance(task, ToolTask)
assert isinstance(task.task_id, str)
assert len(task.task_id) > 0
async def test_tool_task_server_generated_id(tool_task_server):
"""call_tool with task=True gets server-generated task ID."""
async with Client(tool_task_server) as client:
task = await client.call_tool("echo", {"message": "test"}, task=True)
# Server should generate a UUID task ID
assert task.task_id is not None
assert isinstance(task.task_id, str)
# UUIDs have hyphens
assert "-" in task.task_id
async def test_tool_task_result_returns_call_tool_result(tool_task_server):
"""ToolTask.result() returns CallToolResult with tool data."""
async with Client(tool_task_server) as client:
task = await client.call_tool("multiply", {"a": 6, "b": 7}, task=True)
assert not task.returned_immediately
result = await task.result()
assert result.data == 42
async def test_tool_task_await_syntax(tool_task_server):
"""Tool tasks can be awaited directly to get result."""
async with Client(tool_task_server) as client:
task = await client.call_tool("multiply", {"a": 7, "b": 6}, task=True)
# Can await task directly (syntactic sugar for task.result())
result = await task
assert result.data == 42
async def test_tool_task_status_and_wait(tool_task_server):
"""ToolTask.status() returns GetTaskResult."""
async with Client(tool_task_server) as client:
task = await client.call_tool("echo", {"message": "test"}, task=True)
status = await task.status()
assert status.taskId == task.task_id
assert status.status in ["working", "completed"]
# Wait for completion
await task.wait(timeout=2.0)
final_status = await task.status()
assert final_status.status == "completed"
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/client/tasks/test_client_tool_tasks.py",
"license": "Apache License 2.0",
"lines": 63,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:tests/client/tasks/test_task_context_validation.py | """
Tests for Task client context validation.
Verifies that Task methods properly validate client context and that
cached results remain accessible outside context.
"""
import pytest
from fastmcp import FastMCP
from fastmcp.client import Client
@pytest.fixture
async def task_server():
"""Create a test server with background tasks."""
mcp = FastMCP("context-test-server")
@mcp.tool(task=True)
async def background_tool(value: str) -> str:
"""Tool that runs in background."""
return f"Result: {value}"
@mcp.prompt(task=True)
async def background_prompt(topic: str) -> str:
"""Prompt that runs in background."""
return f"Prompt about {topic}"
@mcp.resource("file://background.txt", task=True)
async def background_resource() -> str:
"""Resource that runs in background."""
return "Background resource content"
return mcp
async def test_task_status_outside_context_raises(task_server):
"""Calling task.status() outside client context raises error."""
task = None
async with Client(task_server) as client:
task = await client.call_tool("background_tool", {"value": "test"}, task=True)
assert not task.returned_immediately
# Now outside context
with pytest.raises(RuntimeError, match="outside client context"):
await task.status()
async def test_task_result_outside_context_raises(task_server):
"""Calling task.result() outside context raises error."""
task = None
async with Client(task_server) as client:
task = await client.call_tool("background_tool", {"value": "test"}, task=True)
assert not task.returned_immediately
# Now outside context
with pytest.raises(RuntimeError, match="outside client context"):
await task.result()
async def test_task_wait_outside_context_raises(task_server):
"""Calling task.wait() outside context raises error."""
task = None
async with Client(task_server) as client:
task = await client.call_tool("background_tool", {"value": "test"}, task=True)
assert not task.returned_immediately
# Now outside context
with pytest.raises(RuntimeError, match="outside client context"):
await task.wait()
async def test_task_cancel_outside_context_raises(task_server):
"""Calling task.cancel() outside context raises error."""
task = None
async with Client(task_server) as client:
task = await client.call_tool("background_tool", {"value": "test"}, task=True)
assert not task.returned_immediately
# Now outside context
with pytest.raises(RuntimeError, match="outside client context"):
await task.cancel()
async def test_cached_tool_task_accessible_outside_context(task_server):
"""Tool tasks with cached results work outside context."""
task = None
async with Client(task_server) as client:
task = await client.call_tool("background_tool", {"value": "test"}, task=True)
assert not task.returned_immediately
# Get result once to cache it
result1 = await task.result()
assert result1.data == "Result: test"
# Now outside context
# Should work because result is cached
result2 = await task.result()
assert result2 is result1 # Same object
assert result2.data == "Result: test"
async def test_cached_prompt_task_accessible_outside_context(task_server):
"""Prompt tasks with cached results work outside context."""
task = None
async with Client(task_server) as client:
task = await client.get_prompt(
"background_prompt", {"topic": "test"}, task=True
)
assert not task.returned_immediately
# Get result once to cache it
result1 = await task.result()
assert result1.description == "Prompt that runs in background."
# Now outside context
# Should work because result is cached
result2 = await task.result()
assert result2 is result1 # Same object
assert result2.description == "Prompt that runs in background."
async def test_cached_resource_task_accessible_outside_context(task_server):
"""Resource tasks with cached results work outside context."""
task = None
async with Client(task_server) as client:
task = await client.read_resource("file://background.txt", task=True)
assert not task.returned_immediately
# Get result once to cache it
result1 = await task.result()
assert len(result1) > 0
# Now outside context
# Should work because result is cached
result2 = await task.result()
assert result2 is result1 # Same object
async def test_uncached_status_outside_context_raises(task_server):
"""Even after caching result, status() still requires client context."""
task = None
async with Client(task_server) as client:
task = await client.call_tool("background_tool", {"value": "test"}, task=True)
assert not task.returned_immediately
# Cache the result
await task.result()
# Now outside context
# result() works (cached)
result = await task.result()
assert result.data == "Result: test"
# But status() still needs client connection
with pytest.raises(RuntimeError, match="outside client context"):
await task.status()
async def test_task_await_syntax_outside_context_raises(task_server):
"""Using await task syntax outside context raises error for background tasks."""
task = None
async with Client(task_server) as client:
task = await client.call_tool("background_tool", {"value": "test"}, task=True)
assert not task.returned_immediately
# Now outside context
with pytest.raises(RuntimeError, match="outside client context"):
await task # Same as await task.result()
async def test_task_await_syntax_works_for_cached_results(task_server):
"""Using await task syntax works outside context when result is cached."""
task = None
async with Client(task_server) as client:
task = await client.call_tool("background_tool", {"value": "test"}, task=True)
result1 = await task # Cache it
# Now outside context
result2 = await task # Should work (cached)
assert result2 is result1
assert result2.data == "Result: test"
async def test_multiple_result_calls_return_same_cached_object(task_server):
"""Multiple result() calls return the same cached object."""
async with Client(task_server) as client:
task = await client.call_tool("background_tool", {"value": "test"}, task=True)
result1 = await task.result()
result2 = await task.result()
result3 = await task.result()
# Should all be the same object (cached)
assert result1 is result2
assert result2 is result3
async def test_background_task_properties_accessible_outside_context(task_server):
"""Background task properties like task_id accessible outside context."""
task = None
async with Client(task_server) as client:
task = await client.call_tool("background_tool", {"value": "test"}, task=True)
task_id_inside = task.task_id
assert not task.returned_immediately
# Now outside context
# Properties should still be accessible (they don't need client connection)
assert task.task_id == task_id_inside
assert task.returned_immediately is False
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/client/tasks/test_task_context_validation.py",
"license": "Apache License 2.0",
"lines": 159,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:tests/client/tasks/test_task_result_caching.py | """
Tests for Task result caching behavior.
Verifies that Task.result() and await task cache results properly to avoid
redundant server calls and ensure consistent object identity.
"""
from fastmcp import FastMCP
from fastmcp.client import Client
async def test_tool_task_result_cached_on_first_call():
"""First call caches result, subsequent calls return cached value."""
call_count = 0
mcp = FastMCP("test")
@mcp.tool(task=True)
async def counting_tool() -> int:
nonlocal call_count
call_count += 1
return call_count
async with Client(mcp) as client:
task = await client.call_tool("counting_tool", task=True)
result1 = await task.result()
result2 = await task.result()
result3 = await task.result()
# All should return 1 (first execution value)
assert result1.data == 1
assert result2.data == 1
assert result3.data == 1
# Verify they're the same object (cached)
assert result1 is result2 is result3
async def test_prompt_task_result_cached():
"""PromptTask caches results on first call."""
call_count = 0
mcp = FastMCP("test")
@mcp.prompt(task=True)
async def counting_prompt() -> str:
nonlocal call_count
call_count += 1
return f"Call number: {call_count}"
async with Client(mcp) as client:
task = await client.get_prompt("counting_prompt", task=True)
result1 = await task.result()
result2 = await task.result()
result3 = await task.result()
# All should return same content
assert result1.messages[0].content.text == "Call number: 1"
assert result2.messages[0].content.text == "Call number: 1"
assert result3.messages[0].content.text == "Call number: 1"
# Verify they're the same object (cached)
assert result1 is result2 is result3
async def test_resource_task_result_cached():
"""ResourceTask caches results on first call."""
call_count = 0
mcp = FastMCP("test")
@mcp.resource("file://counter.txt", task=True)
async def counting_resource() -> str:
nonlocal call_count
call_count += 1
return f"Count: {call_count}"
async with Client(mcp) as client:
task = await client.read_resource("file://counter.txt", task=True)
result1 = await task.result()
result2 = await task.result()
result3 = await task.result()
# All should return same content
assert result1[0].text == "Count: 1"
assert result2[0].text == "Count: 1"
assert result3[0].text == "Count: 1"
# Verify they're the same object (cached)
assert result1 is result2 is result3
async def test_multiple_await_returns_same_object():
"""Multiple await task calls return identical object."""
mcp = FastMCP("test")
@mcp.tool(task=True)
async def sample_tool() -> str:
return "result"
async with Client(mcp) as client:
task = await client.call_tool("sample_tool", task=True)
result1 = await task
result2 = await task
result3 = await task
# Should be exact same object in memory
assert result1 is result2 is result3
assert id(result1) == id(result2) == id(result3)
async def test_result_and_await_share_cache():
"""task.result() and await task share the same cache."""
mcp = FastMCP("test")
@mcp.tool(task=True)
async def sample_tool() -> str:
return "cached"
async with Client(mcp) as client:
task = await client.call_tool("sample_tool", task=True)
# Call result() first
result_via_method = await task.result()
# Then await directly
result_via_await = await task
# Should be the same cached object
assert result_via_method is result_via_await
assert id(result_via_method) == id(result_via_await)
async def test_forbidden_mode_tool_caches_error_result():
"""Tools with task=False (mode=forbidden) cache error results."""
mcp = FastMCP("test")
@mcp.tool(task=False)
async def non_task_tool() -> int:
return 1
async with Client(mcp) as client:
# Request as task, but mode="forbidden" will reject with error
task = await client.call_tool("non_task_tool", task=True)
# Should be immediate (error returned immediately)
assert task.returned_immediately
result1 = await task.result()
result2 = await task.result()
result3 = await task.result()
# All should return cached error
assert result1.is_error
assert "does not support task-augmented execution" in str(result1)
# Verify they're the same object (cached)
assert result1 is result2 is result3
async def test_forbidden_mode_prompt_raises_error():
"""Prompts with task=False (mode=forbidden) raise error."""
import pytest
from mcp.shared.exceptions import McpError
mcp = FastMCP("test")
@mcp.prompt(task=False)
async def non_task_prompt() -> str:
return "Immediate"
async with Client(mcp) as client:
# Prompts with mode="forbidden" raise McpError when called with task=True
with pytest.raises(McpError):
await client.get_prompt("non_task_prompt", task=True)
async def test_forbidden_mode_resource_raises_error():
"""Resources with task=False (mode=forbidden) raise error."""
import pytest
from mcp.shared.exceptions import McpError
mcp = FastMCP("test")
@mcp.resource("file://immediate.txt", task=False)
async def non_task_resource() -> str:
return "Immediate"
async with Client(mcp) as client:
# Resources with mode="forbidden" raise McpError when called with task=True
with pytest.raises(McpError):
await client.read_resource("file://immediate.txt", task=True)
async def test_immediate_task_caches_result():
"""Immediate tasks (optional mode called without background) cache results."""
call_count = 0
mcp = FastMCP("test", tasks=True)
# Tool with task=True (optional mode) - but without docket will execute immediately
@mcp.tool(task=True)
async def task_tool() -> int:
nonlocal call_count
call_count += 1
return call_count
async with Client(mcp) as client:
# Call with task=True
task = await client.call_tool("task_tool", task=True)
# Get result multiple times
result1 = await task.result()
result2 = await task.result()
result3 = await task.result()
# All should return cached value
assert result1.data == 1
assert result2.data == 1
assert result3.data == 1
# Verify they're the same object (cached)
assert result1 is result2 is result3
async def test_cache_persists_across_mixed_access_patterns():
"""Cache works correctly when mixing result() and await."""
mcp = FastMCP("test")
@mcp.tool(task=True)
async def mixed_tool() -> str:
return "mixed"
async with Client(mcp) as client:
task = await client.call_tool("mixed_tool", task=True)
# Access in various orders
result1 = await task
result2 = await task.result()
result3 = await task
result4 = await task.result()
# All should be the same cached object
assert result1 is result2 is result3 is result4
async def test_different_tasks_have_separate_caches():
"""Different task instances maintain separate caches."""
mcp = FastMCP("test")
@mcp.tool(task=True)
async def separate_tool(value: str) -> str:
return f"Result: {value}"
async with Client(mcp) as client:
task1 = await client.call_tool("separate_tool", {"value": "A"}, task=True)
task2 = await client.call_tool("separate_tool", {"value": "B"}, task=True)
result1 = await task1.result()
result2 = await task2.result()
# Different results
assert result1.data == "Result: A"
assert result2.data == "Result: B"
# Not the same object
assert result1 is not result2
# But each task's cache works independently
result1_again = await task1.result()
result2_again = await task2.result()
assert result1 is result1_again
assert result2 is result2_again
async def test_cache_survives_status_checks():
"""Calling status() doesn't affect result caching."""
mcp = FastMCP("test")
@mcp.tool(task=True)
async def status_check_tool() -> str:
return "status"
async with Client(mcp) as client:
task = await client.call_tool("status_check_tool", task=True)
# Check status multiple times
await task.status()
await task.status()
result1 = await task.result()
# Check status again
await task.status()
result2 = await task.result()
# Cache should still work
assert result1 is result2
async def test_cache_survives_wait_calls():
"""Calling wait() doesn't affect result caching."""
mcp = FastMCP("test")
@mcp.tool(task=True)
async def wait_test_tool() -> str:
return "waited"
async with Client(mcp) as client:
task = await client.call_tool("wait_test_tool", task=True)
# Wait for completion
await task.wait()
result1 = await task.result()
# Wait again (no-op since completed)
await task.wait()
result2 = await task.result()
# Cache should still work
assert result1 is result2
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/client/tasks/test_task_result_caching.py",
"license": "Apache License 2.0",
"lines": 231,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:tests/server/tasks/test_progress_dependency.py | """Tests for FastMCP Progress dependency."""
from fastmcp import FastMCP
from fastmcp.client import Client
from fastmcp.dependencies import Progress
async def test_progress_in_immediate_execution():
"""Test Progress dependency when calling tool immediately with Docket enabled."""
mcp = FastMCP("test")
@mcp.tool()
async def test_tool(progress: Progress = Progress()) -> str:
await progress.set_total(10)
await progress.increment()
await progress.set_message("Testing")
return "done"
async with Client(mcp) as client:
result = await client.call_tool("test_tool", {})
from mcp.types import TextContent
assert isinstance(result.content[0], TextContent)
assert result.content[0].text == "done"
async def test_progress_in_background_task():
"""Test Progress dependency in background task execution."""
mcp = FastMCP("test")
@mcp.tool(task=True)
async def test_task(progress: Progress = Progress()) -> str:
await progress.set_total(5)
await progress.increment()
await progress.set_message("Step 1")
return "done"
async with Client(mcp) as client:
task = await client.call_tool("test_task", {}, task=True)
result = await task.result()
from mcp.types import TextContent
assert isinstance(result.content[0], TextContent)
assert result.content[0].text == "done"
async def test_progress_tracks_multiple_increments():
"""Test that Progress correctly tracks multiple increment calls."""
mcp = FastMCP("test")
@mcp.tool()
async def count_to_ten(progress: Progress = Progress()) -> str:
await progress.set_total(10)
for i in range(10):
await progress.increment()
return "counted"
async with Client(mcp) as client:
result = await client.call_tool("count_to_ten", {})
from mcp.types import TextContent
assert isinstance(result.content[0], TextContent)
assert result.content[0].text == "counted"
async def test_progress_status_message_in_background_task():
"""Regression test: TaskStatusResponse must include statusMessage field."""
import asyncio
mcp = FastMCP("test")
step_started = asyncio.Event()
@mcp.tool(task=True)
async def task_with_progress(progress: Progress = Progress()) -> str:
await progress.set_total(3)
await progress.set_message("Step 1 of 3")
await progress.increment()
step_started.set()
# Give test time to poll status
await asyncio.sleep(0.2)
await progress.set_message("Step 2 of 3")
await progress.increment()
await progress.set_message("Step 3 of 3")
await progress.increment()
return "done"
async with Client(mcp) as client:
task = await client.call_tool("task_with_progress", {}, task=True)
# Wait for first step to start
await step_started.wait()
# Get status and verify progress message
status = await task.status()
# Verify statusMessage field is accessible and contains progress info
# Should not raise AttributeError
msg = status.statusMessage
assert msg is None or msg.startswith("Step")
# Wait for completion
result = await task.result()
from mcp.types import TextContent
assert isinstance(result.content[0], TextContent)
assert result.content[0].text == "done"
async def test_inmemory_progress_state():
"""Test that in-memory progress stores and returns state correctly."""
mcp = FastMCP("test")
@mcp.tool()
async def test_tool(progress: Progress = Progress()) -> dict:
# Initial state
assert progress.current is None
assert progress.total == 1
assert progress.message is None
# Set total
await progress.set_total(10)
assert progress.total == 10
# Increment
await progress.increment()
assert progress.current == 1
# Increment again
await progress.increment(2)
assert progress.current == 3
# Set message
await progress.set_message("Testing")
assert progress.message == "Testing"
return {
"current": progress.current,
"total": progress.total,
"message": progress.message,
}
async with Client(mcp) as client:
result = await client.call_tool("test_tool", {})
from mcp.types import TextContent
assert isinstance(result.content[0], TextContent)
# The tool returns a dict showing the final state
import json
state = json.loads(result.content[0].text)
assert state["current"] == 3
assert state["total"] == 10
assert state["message"] == "Testing"
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/server/tasks/test_progress_dependency.py",
"license": "Apache License 2.0",
"lines": 116,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:tests/server/tasks/test_server_tasks_parameter.py | """
Tests for server `tasks` parameter default inheritance.
Verifies that the server's `tasks` parameter correctly sets defaults for all
components (tools, prompts, resources), and that explicit component-level
settings properly override the server default.
"""
from fastmcp import FastMCP
from fastmcp.client import Client
async def test_server_tasks_true_defaults_all_components():
"""Server with tasks=True makes all components default to supporting tasks."""
mcp = FastMCP("test", tasks=True)
@mcp.tool()
async def my_tool() -> str:
return "tool result"
@mcp.prompt()
async def my_prompt() -> str:
return "prompt result"
@mcp.resource("test://resource")
async def my_resource() -> str:
return "resource result"
async with Client(mcp) as client:
# Verify all task-enabled components are registered with docket
# Components use prefixed keys: tool:name, prompt:name, resource:uri
docket = mcp.docket
assert docket is not None
assert "tool:my_tool@" in docket.tasks
assert "prompt:my_prompt@" in docket.tasks
assert "resource:test://resource@" in docket.tasks
# Tool should support background execution
tool_task = await client.call_tool("my_tool", task=True)
assert not tool_task.returned_immediately
# Prompt should support background execution
prompt_task = await client.get_prompt("my_prompt", task=True)
assert not prompt_task.returned_immediately
# Resource should support background execution
resource_task = await client.read_resource("test://resource", task=True)
assert not resource_task.returned_immediately
async def test_server_tasks_false_defaults_all_components():
"""Server with tasks=False makes all components default to mode=forbidden."""
import pytest
from mcp.shared.exceptions import McpError
mcp = FastMCP("test", tasks=False)
@mcp.tool()
async def my_tool() -> str:
return "tool result"
@mcp.prompt()
async def my_prompt() -> str:
return "prompt result"
@mcp.resource("test://resource")
async def my_resource() -> str:
return "resource result"
async with Client(mcp) as client:
# Tool with mode="forbidden" returns error when called with task=True
tool_task = await client.call_tool("my_tool", task=True)
assert tool_task.returned_immediately
result = await tool_task.result()
assert result.is_error
assert "does not support task-augmented execution" in str(result)
# Prompt with mode="forbidden" raises McpError when called with task=True
with pytest.raises(McpError):
await client.get_prompt("my_prompt", task=True)
# Resource with mode="forbidden" raises McpError when called with task=True
with pytest.raises(McpError):
await client.read_resource("test://resource", task=True)
async def test_server_tasks_none_defaults_to_false():
"""Server with tasks=None (or omitted) defaults to False."""
mcp = FastMCP("test") # tasks=None, defaults to False
@mcp.tool()
async def my_tool() -> str:
return "tool result"
async with Client(mcp) as client:
# Tool should NOT support background execution (mode="forbidden" from default)
tool_task = await client.call_tool("my_tool", task=True)
assert tool_task.returned_immediately
result = await tool_task.result()
assert result.is_error
assert "does not support task-augmented execution" in str(result)
async def test_component_explicit_false_overrides_server_true():
"""Component with task=False overrides server default of tasks=True."""
mcp = FastMCP("test", tasks=True)
@mcp.tool(task=False)
async def no_task_tool() -> str:
return "immediate result"
@mcp.tool()
async def default_tool() -> str:
return "background result"
async with Client(mcp) as client:
# Verify docket registration matches task settings (prefixed keys)
docket = mcp.docket
assert docket is not None
assert (
"tool:no_task_tool@" not in docket.tasks
) # task=False means not registered
assert "tool:default_tool@" in docket.tasks # Inherits tasks=True
# Explicit False (mode="forbidden") returns error when called with task=True
no_task = await client.call_tool("no_task_tool", task=True)
assert no_task.returned_immediately
result = await no_task.result()
assert result.is_error
assert "does not support task-augmented execution" in str(result)
# Default should support background execution
default_task = await client.call_tool("default_tool", task=True)
assert not default_task.returned_immediately
async def test_component_explicit_true_overrides_server_false():
"""Component with task=True overrides server default of tasks=False."""
mcp = FastMCP("test", tasks=False)
@mcp.tool(task=True)
async def task_tool() -> str:
return "background result"
@mcp.tool()
async def default_tool() -> str:
return "immediate result"
async with Client(mcp) as client:
# Verify docket registration matches task settings (prefixed keys)
docket = mcp.docket
assert docket is not None
assert "tool:task_tool@" in docket.tasks # task=True means registered
assert "tool:default_tool@" not in docket.tasks # Inherits tasks=False
# Explicit True should support background execution despite server default
task = await client.call_tool("task_tool", task=True)
assert not task.returned_immediately
# Default (mode="forbidden") returns error when called with task=True
default = await client.call_tool("default_tool", task=True)
assert default.returned_immediately
result = await default.result()
assert result.is_error
async def test_mixed_explicit_and_inherited():
"""Mix of explicit True/False/None on different components."""
import pytest
from mcp.shared.exceptions import McpError
mcp = FastMCP("test", tasks=True) # Server default is True
@mcp.tool()
async def inherited_tool() -> str:
return "inherits True"
@mcp.tool(task=True)
async def explicit_true_tool() -> str:
return "explicit True"
@mcp.tool(task=False)
async def explicit_false_tool() -> str:
return "explicit False"
@mcp.prompt()
async def inherited_prompt() -> str:
return "inherits True"
@mcp.prompt(task=False)
async def explicit_false_prompt() -> str:
return "explicit False"
@mcp.resource("test://inherited")
async def inherited_resource() -> str:
return "inherits True"
@mcp.resource("test://explicit_false", task=False)
async def explicit_false_resource() -> str:
return "explicit False"
async with Client(mcp) as client:
# Verify docket registration matches task settings
# Components use prefixed keys: tool:name, prompt:name, resource:uri
docket = mcp.docket
assert docket is not None
# task=True (explicit or inherited) means registered (with prefixed keys)
assert "tool:inherited_tool@" in docket.tasks
assert "tool:explicit_true_tool@" in docket.tasks
assert "prompt:inherited_prompt@" in docket.tasks
assert "resource:test://inherited@" in docket.tasks
# task=False means NOT registered
assert "tool:explicit_false_tool@" not in docket.tasks
assert "prompt:explicit_false_prompt@" not in docket.tasks
assert "resource:test://explicit_false@" not in docket.tasks
# Tools
inherited = await client.call_tool("inherited_tool", task=True)
assert not inherited.returned_immediately
explicit_true = await client.call_tool("explicit_true_tool", task=True)
assert not explicit_true.returned_immediately
# Explicit False (mode="forbidden") returns error
explicit_false = await client.call_tool("explicit_false_tool", task=True)
assert explicit_false.returned_immediately
result = await explicit_false.result()
assert result.is_error
# Prompts
inherited_prompt_task = await client.get_prompt("inherited_prompt", task=True)
assert not inherited_prompt_task.returned_immediately
# Explicit False prompt (mode="forbidden") raises McpError
with pytest.raises(McpError):
await client.get_prompt("explicit_false_prompt", task=True)
# Resources
inherited_resource_task = await client.read_resource(
"test://inherited", task=True
)
assert not inherited_resource_task.returned_immediately
# Explicit False resource (mode="forbidden") raises McpError
with pytest.raises(McpError):
await client.read_resource("test://explicit_false", task=True)
async def test_server_tasks_parameter_sets_component_defaults():
"""Server tasks parameter sets component defaults."""
# Server tasks=True sets component defaults
mcp = FastMCP("test", tasks=True)
@mcp.tool()
async def tool_inherits_true() -> str:
return "tool result"
async with Client(mcp) as client:
# Tool inherits tasks=True from server
tool_task = await client.call_tool("tool_inherits_true", task=True)
assert not tool_task.returned_immediately
# Server tasks=False sets component defaults
mcp2 = FastMCP("test2", tasks=False)
@mcp2.tool()
async def tool_inherits_false() -> str:
return "tool result"
async with Client(mcp2) as client:
# Tool inherits tasks=False (mode="forbidden") - returns error
tool_task = await client.call_tool("tool_inherits_false", task=True)
assert tool_task.returned_immediately
result = await tool_task.result()
assert result.is_error
async def test_resource_template_inherits_server_tasks_default():
"""Resource templates inherit server tasks default."""
mcp = FastMCP("test", tasks=True)
@mcp.resource("test://{item_id}")
async def templated_resource(item_id: str) -> str:
return f"resource {item_id}"
async with Client(mcp) as client:
# Template should support background execution
resource_task = await client.read_resource("test://123", task=True)
assert not resource_task.returned_immediately
async def test_multiple_components_same_name_different_tasks():
"""Different component types with same name can have different task settings."""
import pytest
from mcp.shared.exceptions import McpError
mcp = FastMCP("test", tasks=False)
@mcp.tool(task=True)
async def shared_name() -> str:
return "tool result"
@mcp.prompt()
async def shared_name_prompt() -> str:
return "prompt result"
async with Client(mcp) as client:
# Tool with explicit True should support background execution
tool_task = await client.call_tool("shared_name", task=True)
assert not tool_task.returned_immediately
# Prompt inheriting False (mode="forbidden") raises McpError
with pytest.raises(McpError):
await client.get_prompt("shared_name_prompt", task=True)
async def test_task_with_custom_tool_name():
"""Tools with custom names work correctly as tasks (issue #2642).
When a tool is registered with a custom name different from the function
name, task execution should use the custom name for Docket lookup.
"""
mcp = FastMCP("test", tasks=True)
async def my_function() -> str:
return "result from custom-named tool"
mcp.tool(my_function, name="custom-tool-name")
async with Client(mcp) as client:
# Verify the tool is registered with its custom name in Docket (prefixed key)
docket = mcp.docket
assert docket is not None
assert "tool:custom-tool-name@" in docket.tasks
# Call the tool as a task using its custom name
task = await client.call_tool("custom-tool-name", task=True)
assert not task.returned_immediately
result = await task
assert result.data == "result from custom-named tool"
async def test_task_with_custom_resource_name():
"""Resources with custom names work correctly as tasks.
Resources are registered/looked up by their .key (URI), not their name.
"""
mcp = FastMCP("test", tasks=True)
@mcp.resource("test://resource", name="custom-resource-name")
async def my_resource_func() -> str:
return "result from custom-named resource"
async with Client(mcp) as client:
# Verify the resource is registered with its key (prefixed URI) in Docket
docket = mcp.docket
assert docket is not None
assert "resource:test://resource@" in docket.tasks
# Call the resource as a task
task = await client.read_resource("test://resource", task=True)
assert not task.returned_immediately
result = await task.result()
assert result[0].text == "result from custom-named resource"
async def test_task_with_custom_template_name():
"""Resource templates with custom names work correctly as tasks.
Templates are registered/looked up by their .key (uri_template), not their name.
"""
mcp = FastMCP("test", tasks=True)
@mcp.resource("test://{item_id}", name="custom-template-name")
async def my_template_func(item_id: str) -> str:
return f"result for {item_id}"
async with Client(mcp) as client:
# Verify the template is registered with its key (prefixed uri_template) in Docket
docket = mcp.docket
assert docket is not None
assert "template:test://{item_id}@" in docket.tasks
# Call the template as a task
task = await client.read_resource("test://123", task=True)
assert not task.returned_immediately
result = await task.result()
assert result[0].text == "result for 123"
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/server/tasks/test_server_tasks_parameter.py",
"license": "Apache License 2.0",
"lines": 295,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:tests/server/tasks/test_sync_function_task_disabled.py | """
Tests that synchronous functions cannot be used as background tasks.
Docket requires async functions for background execution. FastMCP raises
ValueError when task=True is used with a sync function.
"""
import pytest
from fastmcp import FastMCP
from fastmcp.prompts.function_prompt import FunctionPrompt
from fastmcp.resources.function_resource import FunctionResource
from fastmcp.tools.function_tool import FunctionTool
async def test_sync_tool_with_explicit_task_true_raises():
"""Sync tool with task=True raises ValueError."""
mcp = FastMCP("test")
with pytest.raises(
ValueError, match="uses a sync function but has task execution enabled"
):
@mcp.tool(task=True)
def sync_tool(x: int) -> int:
"""A synchronous tool."""
return x * 2
async def test_sync_tool_with_inherited_task_true_raises():
"""Sync tool inheriting task=True from server raises ValueError."""
mcp = FastMCP("test", tasks=True)
with pytest.raises(
ValueError, match="uses a sync function but has task execution enabled"
):
@mcp.tool() # Inherits task=True from server
def sync_tool(x: int) -> int:
"""A synchronous tool."""
return x * 2
async def test_sync_prompt_with_explicit_task_true_raises():
"""Sync prompt with task=True raises ValueError."""
mcp = FastMCP("test")
with pytest.raises(
ValueError, match="uses a sync function but has task execution enabled"
):
@mcp.prompt(task=True)
def sync_prompt() -> str:
"""A synchronous prompt."""
return "Hello"
async def test_sync_prompt_with_inherited_task_true_raises():
"""Sync prompt inheriting task=True from server raises ValueError."""
mcp = FastMCP("test", tasks=True)
with pytest.raises(
ValueError, match="uses a sync function but has task execution enabled"
):
@mcp.prompt() # Inherits task=True from server
def sync_prompt() -> str:
"""A synchronous prompt."""
return "Hello"
async def test_sync_resource_with_explicit_task_true_raises():
"""Sync resource with task=True raises ValueError."""
mcp = FastMCP("test")
with pytest.raises(
ValueError, match="uses a sync function but has task execution enabled"
):
@mcp.resource("test://sync", task=True)
def sync_resource() -> str:
"""A synchronous resource."""
return "data"
async def test_sync_resource_with_inherited_task_true_raises():
"""Sync resource inheriting task=True from server raises ValueError."""
mcp = FastMCP("test", tasks=True)
with pytest.raises(
ValueError, match="uses a sync function but has task execution enabled"
):
@mcp.resource("test://sync") # Inherits task=True from server
def sync_resource() -> str:
"""A synchronous resource."""
return "data"
async def test_async_tool_with_task_true_remains_enabled():
"""Async tools with task=True keep task support enabled."""
mcp = FastMCP("test")
@mcp.tool(task=True)
async def async_tool(x: int) -> int:
"""An async tool."""
return x * 2
# Tool should have task mode="optional" and be a FunctionTool
tool = await mcp.get_tool("async_tool")
assert isinstance(tool, FunctionTool)
assert tool.task_config.mode == "optional"
async def test_async_prompt_with_task_true_remains_enabled():
"""Async prompts with task=True keep task support enabled."""
mcp = FastMCP("test")
@mcp.prompt(task=True)
async def async_prompt() -> str:
"""An async prompt."""
return "Hello"
# Prompt should have task mode="optional" and be a FunctionPrompt
prompt = await mcp.get_prompt("async_prompt")
assert isinstance(prompt, FunctionPrompt)
assert prompt.task_config.mode == "optional"
async def test_async_resource_with_task_true_remains_enabled():
"""Async resources with task=True keep task support enabled."""
mcp = FastMCP("test")
@mcp.resource("test://async", task=True)
async def async_resource() -> str:
"""An async resource."""
return "data"
# Resource should have task mode="optional" and be a FunctionResource
resource = await mcp.get_resource("test://async")
assert isinstance(resource, FunctionResource)
assert resource.task_config.mode == "optional"
async def test_sync_tool_with_task_false_works():
"""Sync tool with explicit task=False works (no error)."""
mcp = FastMCP("test", tasks=True)
@mcp.tool(task=False) # Explicitly disable
def sync_tool(x: int) -> int:
"""A synchronous tool."""
return x * 2
tool = await mcp.get_tool("sync_tool")
assert isinstance(tool, FunctionTool)
assert tool.task_config.mode == "forbidden"
async def test_sync_prompt_with_task_false_works():
"""Sync prompt with explicit task=False works (no error)."""
mcp = FastMCP("test", tasks=True)
@mcp.prompt(task=False) # Explicitly disable
def sync_prompt() -> str:
"""A synchronous prompt."""
return "Hello"
prompt = await mcp.get_prompt("sync_prompt")
assert isinstance(prompt, FunctionPrompt)
assert prompt.task_config.mode == "forbidden"
async def test_sync_resource_with_task_false_works():
"""Sync resource with explicit task=False works (no error)."""
mcp = FastMCP("test", tasks=True)
@mcp.resource("test://sync", task=False) # Explicitly disable
def sync_resource() -> str:
"""A synchronous resource."""
return "data"
resource = await mcp.get_resource("test://sync")
assert isinstance(resource, FunctionResource)
assert resource.task_config.mode == "forbidden"
# =============================================================================
# Callable classes and staticmethods with async __call__
# =============================================================================
async def test_async_callable_class_tool_with_task_true_works():
"""Callable class with async __call__ and task=True should work."""
from fastmcp.tools import Tool
class AsyncCallableTool:
async def __call__(self, x: int) -> int:
return x * 2
# Callable classes use Tool.from_function() directly
tool = Tool.from_function(AsyncCallableTool(), task=True)
assert tool.task_config.mode == "optional"
async def test_async_callable_class_prompt_with_task_true_works():
"""Callable class with async __call__ and task=True should work."""
from fastmcp.prompts import Prompt
class AsyncCallablePrompt:
async def __call__(self) -> str:
return "Hello"
# Callable classes use Prompt.from_function() directly
prompt = Prompt.from_function(AsyncCallablePrompt(), task=True)
assert prompt.task_config.mode == "optional"
async def test_sync_callable_class_tool_with_task_true_raises():
"""Callable class with sync __call__ and task=True should raise."""
from fastmcp.tools import Tool
class SyncCallableTool:
def __call__(self, x: int) -> int:
return x * 2
with pytest.raises(
ValueError, match="uses a sync function but has task execution enabled"
):
Tool.from_function(SyncCallableTool(), task=True)
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/server/tasks/test_sync_function_task_disabled.py",
"license": "Apache License 2.0",
"lines": 164,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:tests/server/tasks/test_task_capabilities.py | """
Tests for SEP-1686 task capabilities declaration.
Verifies that the server correctly advertises task support.
Task protocol is now always enabled.
"""
from fastmcp import FastMCP
from fastmcp.client import Client
from fastmcp.server.tasks import get_task_capabilities
async def test_capabilities_include_tasks():
"""Server capabilities always include tasks in first-class field (SEP-1686)."""
mcp = FastMCP("capability-test")
@mcp.tool()
async def test_tool() -> str:
return "test"
async with Client(mcp) as client:
# Get server initialization result which includes capabilities
init_result = client.initialize_result
# Verify tasks capability is present as a first-class field (not experimental)
assert init_result.capabilities.tasks is not None
assert init_result.capabilities.tasks == get_task_capabilities()
# Verify it's NOT in experimental
assert "tasks" not in (init_result.capabilities.experimental or {})
async def test_client_uses_task_capable_session():
"""Client uses task-capable initialization."""
mcp = FastMCP("client-cap-test")
@mcp.tool()
async def test_tool() -> str:
return "test"
async with Client(mcp) as client:
# Client should have connected successfully with task capabilities
assert client.initialize_result is not None
# Session should be a ClientSession (task-capable init uses standard session)
assert type(client.session).__name__ == "ClientSession"
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/server/tasks/test_task_capabilities.py",
"license": "Apache License 2.0",
"lines": 33,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:tests/server/tasks/test_task_dependencies.py | """Tests for dependency injection in background tasks.
These tests verify that Docket's dependency system works correctly when
user functions are queued as background tasks. Dependencies like CurrentDocket(),
CurrentFastMCP(), and Depends() should be resolved in the worker context.
"""
from contextlib import asynccontextmanager
from typing import Any, cast
import pytest
from fastmcp import FastMCP
from fastmcp.client import Client
from fastmcp.dependencies import CurrentDocket, CurrentFastMCP, Depends
@pytest.fixture
async def dependency_server():
"""Create a FastMCP server with dependency-using background tasks."""
mcp = FastMCP("dependency-test-server")
# Track dependency injection
injected_values = []
@mcp.tool(task=True)
async def tool_with_docket_dependency(docket=CurrentDocket()) -> str:
"""Background tool that uses CurrentDocket dependency."""
injected_values.append(("docket", docket))
return f"Docket: {docket is not None}"
@mcp.tool(task=True)
async def tool_with_server_dependency(server=CurrentFastMCP()) -> str:
"""Background tool that uses CurrentFastMCP dependency."""
injected_values.append(("server", server))
return f"Server: {server.name}"
@mcp.tool(task=True)
async def tool_with_custom_dependency(
value: int, multiplier: int = Depends(lambda: 10)
) -> int:
"""Background tool with custom Depends()."""
injected_values.append(("multiplier", multiplier))
return value * multiplier
@mcp.tool(task=True)
async def tool_with_multiple_dependencies(
name: str,
docket=CurrentDocket(),
server=CurrentFastMCP(),
) -> str:
"""Background tool with multiple dependencies."""
injected_values.append(("multi_docket", docket))
injected_values.append(("multi_server", server))
return f"{name} on {server.name}"
@mcp.prompt(task=True)
async def prompt_with_server_dependency(topic: str, server=CurrentFastMCP()) -> str:
"""Background prompt that uses CurrentFastMCP dependency."""
injected_values.append(("prompt_server", server))
return f"Prompt from {server.name} about {topic}"
@mcp.resource("file://data.txt", task=True)
async def resource_with_docket_dependency(docket=CurrentDocket()) -> str:
"""Background resource that uses CurrentDocket dependency."""
injected_values.append(("resource_docket", docket))
return f"Resource via Docket: {docket is not None}"
# Expose for test assertions
mcp._injected_values = injected_values # type: ignore[attr-defined]
return mcp
async def test_background_tool_receives_docket_dependency(dependency_server):
"""Background tools can use CurrentDocket() and it resolves correctly."""
async with Client(dependency_server) as client:
task = await client.call_tool("tool_with_docket_dependency", {}, task=True)
# Verify it's background
assert not task.returned_immediately
# Get result - will execute in Docket worker
result = await task
# Verify dependency was injected
assert len(dependency_server._injected_values) == 1
dep_type, dep_value = dependency_server._injected_values[0]
assert dep_type == "docket"
assert dep_value is not None
assert "Docket: True" in result.data
async def test_background_tool_receives_server_dependency(dependency_server):
"""Background tools can use CurrentFastMCP() and get the actual FastMCP server."""
dependency_server._injected_values.clear()
async with Client(dependency_server) as client:
task = await client.call_tool("tool_with_server_dependency", {}, task=True)
# Verify background execution
assert not task.returned_immediately
result = await task
# Check the server instance was injected
assert len(dependency_server._injected_values) == 1
dep_type, dep_value = dependency_server._injected_values[0]
assert dep_type == "server"
assert dep_value is dependency_server # Same instance!
assert f"Server: {dependency_server.name}" in result.data
async def test_background_tool_receives_custom_depends(dependency_server):
"""Background tools can use Depends() with custom functions."""
dependency_server._injected_values.clear()
async with Client(dependency_server) as client:
task = await client.call_tool(
"tool_with_custom_dependency", {"value": 5}, task=True
)
assert not task.returned_immediately
result = await task
# Check dependency was resolved
assert len(dependency_server._injected_values) == 1
dep_type, dep_value = dependency_server._injected_values[0]
assert dep_type == "multiplier"
assert dep_value == 10
assert result.data == 50 # 5 * 10
async def test_background_tool_with_multiple_dependencies(dependency_server):
"""Background tools can have multiple dependencies injected simultaneously."""
dependency_server._injected_values.clear()
async with Client(dependency_server) as client:
task = await client.call_tool(
"tool_with_multiple_dependencies", {"name": "test"}, task=True
)
assert not task.returned_immediately
await task
# Both dependencies should be injected
assert len(dependency_server._injected_values) == 2
dep_types = {item[0] for item in dependency_server._injected_values}
assert "multi_docket" in dep_types
assert "multi_server" in dep_types
# Verify values
server_dep = next(
v for t, v in dependency_server._injected_values if t == "multi_server"
)
assert server_dep is dependency_server
async def test_background_prompt_receives_dependencies(dependency_server):
"""Background prompts can use dependency injection."""
dependency_server._injected_values.clear()
async with Client(dependency_server) as client:
task = await client.get_prompt(
"prompt_with_server_dependency", {"topic": "AI"}, task=True
)
assert not task.returned_immediately
await task
# Check dependency was injected
assert len(dependency_server._injected_values) == 1
dep_type, dep_value = dependency_server._injected_values[0]
assert dep_type == "prompt_server"
assert dep_value is dependency_server
async def test_background_resource_receives_dependencies(dependency_server):
"""Background resources can use dependency injection."""
dependency_server._injected_values.clear()
async with Client(dependency_server) as client:
task = await client.read_resource("file://data.txt", task=True)
assert not task.returned_immediately
await task
# Check dependency was injected
assert len(dependency_server._injected_values) == 1
dep_type, dep_value = dependency_server._injected_values[0]
assert dep_type == "resource_docket"
assert dep_value is not None
async def test_foreground_tool_dependencies_unaffected(dependency_server):
"""Synchronous tools (task=False) still get dependencies as before."""
dependency_server._injected_values.clear()
@dependency_server.tool() # task=False
async def sync_tool(server=CurrentFastMCP()) -> str:
dependency_server._injected_values.append(("sync_server", server))
return f"Sync: {server.name}"
async with Client(dependency_server) as client:
await client.call_tool("sync_tool", {})
# Should execute immediately
assert len(dependency_server._injected_values) == 1
assert dependency_server._injected_values[0][1] is dependency_server
async def test_dependency_context_managers_cleaned_up_in_background():
"""Context manager dependencies are properly cleaned up after background task."""
cleanup_called = []
mcp = FastMCP("cleanup-test")
@asynccontextmanager
async def tracked_connection():
try:
cleanup_called.append("enter")
yield "connection"
finally:
cleanup_called.append("exit")
@mcp.tool(task=True)
async def use_connection(name: str, conn: str = Depends(tracked_connection)) -> str:
assert conn == "connection"
assert "enter" in cleanup_called
assert "exit" not in cleanup_called # Still open during execution
return f"Used: {conn}"
async with Client(mcp) as client:
task = await client.call_tool("use_connection", {"name": "test"}, task=True)
result = await task
# After task completes, cleanup should have been called
assert cleanup_called == ["enter", "exit"]
assert "Used: connection" in result.data
async def test_dependency_errors_propagate_to_task_failure():
"""If dependency resolution fails, the background task should fail."""
mcp = FastMCP("error-test")
async def failing_dependency():
raise ValueError("Dependency failed!")
@mcp.tool(task=True)
async def tool_with_failing_dep(
value: str, dep: str = cast(Any, Depends(failing_dependency))
) -> str:
return f"Got: {dep}"
from fastmcp.exceptions import ToolError
async with Client(mcp) as client:
task = await client.call_tool(
"tool_with_failing_dep", {"value": "test"}, task=True
)
# Task should fail due to dependency error
with pytest.raises(ToolError, match="Failed to resolve dependencies"):
await task.result()
# Verify it reached failed state
status = await task.status()
assert status.status == "failed"
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/server/tasks/test_task_dependencies.py",
"license": "Apache License 2.0",
"lines": 201,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:tests/server/tasks/test_task_metadata.py | """
Tests for SEP-1686 related-task metadata in protocol responses.
Per the spec, all task-related responses MUST include
io.modelcontextprotocol/related-task in _meta.
"""
import pytest
from fastmcp import FastMCP
from fastmcp.client import Client
@pytest.fixture
async def metadata_server():
"""Create a server for testing metadata."""
mcp = FastMCP("metadata-test")
@mcp.tool(task=True)
async def test_tool(value: int) -> int:
return value * 2
return mcp
async def test_tasks_get_includes_related_task_metadata(metadata_server: FastMCP):
"""tasks/get response includes io.modelcontextprotocol/related-task in _meta."""
async with Client(metadata_server) as client:
# Submit a task
task = await client.call_tool("test_tool", {"value": 5}, task=True)
task_id = task.task_id
# Get status via client (which uses protocol properly)
status = await client.get_task_status(task_id)
# GetTaskResult is returned from response with metadata
# Verify the protocol included related-task metadata by checking the response worked
assert status.taskId == task_id
assert status.status in ["working", "completed"]
async def test_tasks_result_includes_related_task_metadata(metadata_server: FastMCP):
"""tasks/result response includes io.modelcontextprotocol/related-task in _meta."""
async with Client(metadata_server) as client:
# Submit and complete a task
task = await client.call_tool("test_tool", {"value": 7}, task=True)
result = await task.result()
# Result should have metadata (added by task.result() or protocol)
# Just verify the result is valid and contains the expected value
assert result.content
assert result.data == 14 # 7 * 2
async def test_tasks_list_includes_related_task_metadata(metadata_server: FastMCP):
"""tasks/list response includes io.modelcontextprotocol/related-task in _meta."""
async with Client(metadata_server) as client:
# List tasks via client (which uses protocol properly)
result = await client.list_tasks()
# Verify list_tasks works and returns proper structure
assert "tasks" in result
assert isinstance(result["tasks"], list)
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/server/tasks/test_task_metadata.py",
"license": "Apache License 2.0",
"lines": 46,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:tests/server/tasks/test_task_methods.py | """
Tests for task protocol methods.
Tests the tasks/get, tasks/result, and tasks/list JSON-RPC protocol methods.
"""
import asyncio
import pytest
from mcp.shared.exceptions import McpError
from fastmcp import FastMCP
from fastmcp.client import Client
@pytest.fixture
async def endpoint_server():
"""Create a server with background tasks and HTTP transport."""
mcp = FastMCP("endpoint-test-server")
@mcp.tool(task=True) # Enable background execution
async def quick_tool(value: int) -> int:
"""Returns the value immediately."""
return value * 2
@mcp.tool(task=True) # Enable background execution
async def error_tool() -> str:
"""Always raises an error."""
raise RuntimeError("Task failed!")
@mcp.tool(task=True) # Enable background execution
async def slow_tool() -> str:
"""A slow tool for testing cancellation."""
await asyncio.sleep(10)
return "done"
return mcp
async def test_tasks_get_endpoint_returns_status(endpoint_server):
"""POST /tasks/get returns task status."""
async with Client(endpoint_server) as client:
# Submit a task
task = await client.call_tool("quick_tool", {"value": 21}, task=True)
# Check status immediately - should be submitted or working
status = await task.status()
assert status.taskId == task.task_id
assert status.status in ["working", "completed"]
# Wait for completion
await task.wait(timeout=2.0)
# Check again - should be completed
status = await task.status()
assert status.status == "completed"
async def test_tasks_get_endpoint_includes_poll_interval(endpoint_server):
"""Task status includes pollFrequency hint."""
async with Client(endpoint_server) as client:
task = await client.call_tool("quick_tool", {"value": 42}, task=True)
status = await task.status()
assert status.pollInterval is not None
assert isinstance(status.pollInterval, int)
async def test_tasks_result_endpoint_returns_result_when_completed(endpoint_server):
"""POST /tasks/result returns the tool result when completed."""
async with Client(endpoint_server) as client:
task = await client.call_tool("quick_tool", {"value": 21}, task=True)
# Wait for completion and get result
result = await task.result()
assert result.data == 42 # 21 * 2
async def test_tasks_result_endpoint_errors_if_not_completed(endpoint_server):
"""POST /tasks/result returns error if task not completed yet."""
# Create a task that won't complete until signaled
completion_signal = asyncio.Event()
@endpoint_server.tool(task=True) # Enable background execution
async def blocked_tool() -> str:
await completion_signal.wait()
return "done"
async with Client(endpoint_server) as client:
task = await client.call_tool("blocked_tool", task=True)
# Try to get result immediately (task still running)
with pytest.raises(Exception): # Should raise or return error
await client.get_task_result(task.task_id)
# Cleanup - signal completion
completion_signal.set()
async def test_tasks_result_endpoint_errors_if_task_not_found(endpoint_server):
"""POST /tasks/result returns error for non-existent task."""
async with Client(endpoint_server) as client:
# Try to get result for non-existent task
with pytest.raises(Exception):
await client.get_task_result("non-existent-task-id")
async def test_tasks_result_endpoint_returns_error_for_failed_task(endpoint_server):
"""POST /tasks/result returns error information for failed tasks."""
async with Client(endpoint_server) as client:
task = await client.call_tool("error_tool", task=True)
# Wait for task to fail
await task.wait(state="failed", timeout=2.0)
# Getting result should raise or return error info
with pytest.raises(Exception) as exc_info:
await task.result()
assert (
"failed" in str(exc_info.value).lower()
or "error" in str(exc_info.value).lower()
)
async def test_tasks_list_endpoint_session_isolation(endpoint_server):
"""list_tasks returns only tasks submitted by this client."""
# Since client tracks tasks locally, this tests client-side tracking
async with Client(endpoint_server) as client:
# Submit multiple tasks (server generates IDs)
tasks = []
for i in range(3):
task = await client.call_tool("quick_tool", {"value": i}, task=True)
tasks.append(task)
# Wait for all to complete
for task in tasks:
await task.wait(timeout=2.0)
# List tasks - should see all 3
response = await client.list_tasks()
returned_ids = [t["taskId"] for t in response["tasks"]]
task_ids = [t.task_id for t in tasks]
assert len(returned_ids) == 3
assert all(tid in task_ids for tid in returned_ids)
async def test_get_status_nonexistent_task_raises_error(endpoint_server):
"""Getting status for nonexistent task raises MCP error (per SEP-1686 SDK behavior)."""
async with Client(endpoint_server) as client:
# Try to get status for task that was never created
# Per SDK implementation: raises ValueError which becomes JSON-RPC error
with pytest.raises(McpError, match="Task nonexistent-task-id not found"):
await client.get_task_status("nonexistent-task-id")
async def test_task_cancellation_workflow(endpoint_server):
"""Task can be cancelled, transitioning to cancelled state."""
async with Client(endpoint_server) as client:
# Submit slow task
task = await client.call_tool("slow_tool", {}, task=True)
# Give it a moment to start
await asyncio.sleep(0.1)
# Cancel the task
await task.cancel()
# Give cancellation a moment to process
await asyncio.sleep(0.1)
# Task should be in cancelled state
status = await task.status()
assert status.status == "cancelled"
async def test_task_cancellation_interrupts_running_coroutine(endpoint_server):
"""Task cancellation actually interrupts the running coroutine.
This verifies that when a task is cancelled, the underlying asyncio
coroutine receives CancelledError rather than continuing to completion.
Requires pydocket >= 0.16.2.
See: https://github.com/PrefectHQ/fastmcp/issues/2679
"""
started = asyncio.Event()
was_interrupted = asyncio.Event()
completed_normally = asyncio.Event()
@endpoint_server.tool(task=True)
async def interruptible_tool() -> str:
started.set()
try:
await asyncio.sleep(60)
completed_normally.set()
return "completed"
except asyncio.CancelledError:
was_interrupted.set()
raise
async with Client(endpoint_server) as client:
task = await client.call_tool("interruptible_tool", {}, task=True)
# Wait for the tool to actually start executing
await asyncio.wait_for(started.wait(), timeout=5.0)
# Cancel the task
await task.cancel()
# Wait for cancellation to propagate
await asyncio.wait_for(was_interrupted.wait(), timeout=5.0)
# The coroutine should have been interrupted, not completed normally
assert was_interrupted.is_set(), "Task was not interrupted by cancellation"
assert not completed_normally.is_set(), (
"Task completed instead of being cancelled"
)
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/server/tasks/test_task_methods.py",
"license": "Apache License 2.0",
"lines": 161,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:tests/server/tasks/test_task_prompts.py | """
Tests for SEP-1686 background task support for prompts.
Tests that prompts with task=True can execute in background.
"""
import pytest
from fastmcp import FastMCP
from fastmcp.client import Client
from fastmcp.client.tasks import PromptTask
@pytest.fixture
async def prompt_server():
"""Create a FastMCP server with task-enabled prompts."""
mcp = FastMCP("prompt-test-server")
@mcp.prompt()
async def simple_prompt(topic: str) -> str:
"""A simple prompt template."""
return f"Write about: {topic}"
@mcp.prompt(task=True)
async def background_prompt(topic: str, depth: str = "detailed") -> str:
"""A prompt that can execute in background."""
return f"Write a {depth} analysis of: {topic}"
return mcp
async def test_synchronous_prompt_unchanged(prompt_server):
"""Prompts without task metadata execute synchronously as before."""
async with Client(prompt_server) as client:
# Regular call without task metadata
result = await client.get_prompt("simple_prompt", {"topic": "AI"})
# Should execute immediately and return result
assert "Write about: AI" in str(result)
async def test_prompt_with_task_metadata_returns_immediately(prompt_server):
"""Prompts with task metadata return immediately with PromptTask object."""
async with Client(prompt_server) as client:
# Call with task metadata
task = await client.get_prompt("background_prompt", {"topic": "AI"}, task=True)
# Should return a PromptTask object immediately
assert isinstance(task, PromptTask)
assert isinstance(task.task_id, str)
assert len(task.task_id) > 0
async def test_prompt_task_executes_in_background(prompt_server):
"""Prompt task executes via Docket in background."""
async with Client(prompt_server) as client:
task = await client.get_prompt(
"background_prompt",
{"topic": "Machine Learning", "depth": "comprehensive"},
task=True,
)
# Verify background execution
assert not task.returned_immediately
# Get the result
result = await task.result()
assert "comprehensive" in result.messages[0].content.text.lower()
async def test_forbidden_mode_prompt_rejects_task_calls(prompt_server):
"""Prompts with task=False (mode=forbidden) reject task-augmented calls."""
from mcp.shared.exceptions import McpError
from mcp.types import METHOD_NOT_FOUND
@prompt_server.prompt(task=False) # Explicitly disable task support
async def sync_only_prompt(topic: str) -> str:
return f"Sync prompt: {topic}"
async with Client(prompt_server) as client:
# Calling with task=True when task=False should raise McpError
import pytest
with pytest.raises(McpError) as exc_info:
await client.get_prompt("sync_only_prompt", {"topic": "test"}, task=True)
# New behavior: mode="forbidden" returns METHOD_NOT_FOUND error
assert exc_info.value.error.code == METHOD_NOT_FOUND
assert (
"does not support task-augmented execution" in exc_info.value.error.message
)
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/server/tasks/test_task_prompts.py",
"license": "Apache License 2.0",
"lines": 67,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:tests/server/tasks/test_task_protocol.py | """
Tests for SEP-1686 protocol-level task handling.
Generic protocol tests that use tools as test fixtures.
Tests metadata, notifications, and error handling at the protocol level.
"""
import pytest
from fastmcp import FastMCP
from fastmcp.client import Client
@pytest.fixture
async def task_enabled_server():
"""Create a FastMCP server with task-enabled tools."""
mcp = FastMCP("task-test-server")
@mcp.tool(task=True)
async def simple_tool(message: str) -> str:
"""A simple tool for testing."""
return f"Processed: {message}"
@mcp.tool(task=True)
async def failing_tool() -> str:
"""A tool that always fails."""
raise ValueError("This tool always fails")
return mcp
async def test_task_metadata_includes_task_id_and_ttl(task_enabled_server):
"""Task metadata properly includes server-generated taskId and ttl."""
async with Client(task_enabled_server) as client:
# Submit with specific ttl (server generates task ID)
task = await client.call_tool(
"simple_tool",
{"message": "test"},
task=True,
ttl=30000,
)
assert task
assert not task.returned_immediately
# Server should have generated a task ID
assert task.task_id is not None
assert isinstance(task.task_id, str)
async def test_task_notification_sent_after_submission(task_enabled_server):
"""Server sends an initial task status notification after submission."""
@task_enabled_server.tool(task=True)
async def background_tool(message: str) -> str:
return f"Processed: {message}"
async with Client(task_enabled_server) as client:
task = await client.call_tool("background_tool", {"message": "test"}, task=True)
assert task
assert not task.returned_immediately
# Verify we can query the task
status = await task.status()
assert status.taskId == task.task_id
async def test_failed_task_stores_error(task_enabled_server):
"""Failed tasks store the error in results."""
@task_enabled_server.tool(task=True)
async def failing_task_tool() -> str:
raise ValueError("This tool always fails")
async with Client(task_enabled_server) as client:
task = await client.call_tool("failing_task_tool", task=True)
assert task
assert not task.returned_immediately
# Wait for task to fail
status = await task.wait(state="failed", timeout=2.0)
assert status.status == "failed"
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/server/tasks/test_task_protocol.py",
"license": "Apache License 2.0",
"lines": 60,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:tests/server/tasks/test_task_resources.py | """
Tests for SEP-1686 background task support for resources.
Tests that resources with task=True can execute in background.
"""
import pytest
from fastmcp import FastMCP
from fastmcp.client import Client
from fastmcp.client.tasks import ResourceTask
@pytest.fixture
async def resource_server():
"""Create a FastMCP server with task-enabled resources."""
mcp = FastMCP("resource-test-server")
@mcp.resource("file://data.txt")
async def simple_resource() -> str:
"""A simple resource."""
return "Simple content"
@mcp.resource("file://large.txt", task=True)
async def background_resource() -> str:
"""A resource that can execute in background."""
return "Large file content that takes time to load"
@mcp.resource("file://user/{user_id}/data.json", task=True)
async def template_resource(user_id: str) -> str:
"""A resource template that can execute in background."""
return f'{{"userId": "{user_id}", "data": "value"}}'
return mcp
async def test_synchronous_resource_unchanged(resource_server):
"""Resources without task metadata execute synchronously as before."""
async with Client(resource_server) as client:
# Regular call without task metadata
result = await client.read_resource("file://data.txt")
# Should execute immediately and return result
assert "Simple content" in str(result)
async def test_resource_with_task_metadata_returns_immediately(resource_server):
"""Resources with task metadata return immediately with ResourceTask object."""
async with Client(resource_server) as client:
# Call with task metadata
task = await client.read_resource("file://large.txt", task=True)
# Should return a ResourceTask object immediately
assert isinstance(task, ResourceTask)
assert isinstance(task.task_id, str)
assert len(task.task_id) > 0
async def test_resource_task_executes_in_background(resource_server):
"""Resource task executes via Docket in background."""
async with Client(resource_server) as client:
task = await client.read_resource("file://large.txt", task=True)
# Verify background execution
assert not task.returned_immediately
# Get the result
result = await task.result()
assert len(result) > 0
assert result[0].text == "Large file content that takes time to load"
async def test_resource_template_with_task(resource_server):
"""Resource templates with task=True execute in background."""
async with Client(resource_server) as client:
task = await client.read_resource("file://user/123/data.json", task=True)
# Verify background execution
assert not task.returned_immediately
# Get the result
result = await task.result()
assert '"userId": "123"' in result[0].text
async def test_forbidden_mode_resource_rejects_task_calls(resource_server):
"""Resources with task=False (mode=forbidden) reject task-augmented calls."""
import pytest
from mcp.shared.exceptions import McpError
from mcp.types import METHOD_NOT_FOUND
@resource_server.resource(
"file://sync.txt/", task=False
) # Explicitly disable task support
async def sync_only_resource() -> str:
return "Sync content"
async with Client(resource_server) as client:
# Calling with task=True when task=False should raise McpError
with pytest.raises(McpError) as exc_info:
await client.read_resource("file://sync.txt", task=True)
# New behavior: mode="forbidden" returns METHOD_NOT_FOUND error
assert exc_info.value.error.code == METHOD_NOT_FOUND
assert (
"does not support task-augmented execution" in exc_info.value.error.message
)
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/server/tasks/test_task_resources.py",
"license": "Apache License 2.0",
"lines": 79,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:tests/server/tasks/test_task_return_types.py | """
Tests to verify all return types work identically with task=True.
These tests ensure that enabling background task support doesn't break
existing functionality - any tool/prompt/resource should work exactly
the same whether task=True or task=False.
"""
from dataclasses import dataclass
from datetime import datetime
from pathlib import Path
from typing import Any
from uuid import UUID
import pytest
from pydantic import BaseModel
from typing_extensions import TypedDict
from fastmcp import FastMCP
from fastmcp.client import Client
from fastmcp.utilities.types import Audio, File, Image
class UserData(BaseModel):
"""Example structured output."""
name: str
age: int
active: bool
@pytest.fixture
async def return_type_server():
"""Server with tools that return various types."""
mcp = FastMCP("return-type-test")
# String return
@mcp.tool(task=True)
async def return_string() -> str:
return "Hello, World!"
# Integer return
@mcp.tool(task=True)
async def return_int() -> int:
return 42
# Float return
@mcp.tool(task=True)
async def return_float() -> float:
return 3.14159
# Boolean return
@mcp.tool(task=True)
async def return_bool() -> bool:
return True
# Dict return
@mcp.tool(task=True)
async def return_dict() -> dict[str, int]:
return {"count": 100, "total": 500}
# List return
@mcp.tool(task=True)
async def return_list() -> list[str]:
return ["apple", "banana", "cherry"]
# BaseModel return (structured output)
@mcp.tool(task=True)
async def return_model() -> UserData:
return UserData(name="Alice", age=30, active=True)
# None/null return
@mcp.tool(task=True)
async def return_none() -> None:
return None
return mcp
@pytest.mark.parametrize(
"tool_name,expected_type,expected_value",
[
("return_string", str, "Hello, World!"),
("return_int", int, 42),
("return_float", float, 3.14159),
("return_bool", bool, True),
("return_dict", dict, {"count": 100, "total": 500}),
("return_list", list, ["apple", "banana", "cherry"]),
("return_none", type(None), None),
],
)
async def test_task_basic_types(
return_type_server: FastMCP,
tool_name: str,
expected_type: type,
expected_value: Any,
):
"""Task mode returns basic types correctly."""
async with Client(return_type_server) as client:
task = await client.call_tool(tool_name, task=True)
result = await task
assert isinstance(result.data, expected_type)
assert result.data == expected_value
async def test_task_model_return(return_type_server):
"""Task mode returns same BaseModel (as dict) as immediate mode."""
async with Client(return_type_server) as client:
task = await client.call_tool("return_model", task=True)
result = await task
# Client deserializes to dynamic class (type name lost with title pruning)
assert result.data.__class__.__name__ == "Root"
assert result.data.name == "Alice"
assert result.data.age == 30
assert result.data.active is True
async def test_task_vs_immediate_equivalence(return_type_server):
"""Verify task mode and immediate mode return identical results."""
async with Client(return_type_server) as client:
# Test a few types to verify equivalence
tools_to_test = ["return_string", "return_int", "return_dict"]
for tool_name in tools_to_test:
# Call as task
task = await client.call_tool(tool_name, task=True)
task_result = await task
# Call immediately (server should decline background execution when no task meta)
immediate_result = await client.call_tool(tool_name)
# Results should be identical
assert task_result.data == immediate_result.data, (
f"Mismatch for {tool_name}"
)
@pytest.fixture
async def prompt_return_server():
"""Server with prompts that return various message structures."""
mcp = FastMCP("prompt-return-test")
@mcp.prompt(task=True)
async def single_message_prompt() -> str:
"""Return a single string message."""
return "Single message content"
@mcp.prompt(task=True)
async def multi_message_prompt() -> list[str]:
"""Return multiple messages."""
return [
"First message",
"Second message",
"Third message",
]
return mcp
async def test_prompt_task_single_message(prompt_return_server):
"""Prompt task returns single message correctly."""
async with Client(prompt_return_server) as client:
task = await client.get_prompt("single_message_prompt", task=True)
result = await task
assert len(result.messages) == 1
assert result.messages[0].content.text == "Single message content"
async def test_prompt_task_multiple_messages(prompt_return_server):
"""Prompt task returns multiple messages correctly."""
async with Client(prompt_return_server) as client:
task = await client.get_prompt("multi_message_prompt", task=True)
result = await task
assert len(result.messages) == 3
assert result.messages[0].content.text == "First message"
assert result.messages[1].content.text == "Second message"
assert result.messages[2].content.text == "Third message"
@pytest.fixture
async def resource_return_server():
"""Server with resources that return various content types."""
mcp = FastMCP("resource-return-test")
@mcp.resource("text://simple", task=True)
async def simple_text() -> str:
"""Return simple text content."""
return "Simple text resource"
@mcp.resource("data://json", task=True)
async def json_data() -> str:
"""Return JSON-like data."""
import json
return json.dumps({"key": "value", "count": 123})
return mcp
async def test_resource_task_text_content(resource_return_server):
"""Resource task returns text content correctly."""
async with Client(resource_return_server) as client:
task = await client.read_resource("text://simple", task=True)
contents = await task
assert len(contents) == 1
assert contents[0].text == "Simple text resource"
async def test_resource_task_json_content(resource_return_server):
"""Resource task returns structured content correctly."""
async with Client(resource_return_server) as client:
task = await client.read_resource("data://json", task=True)
contents = await task
# Content should be JSON serialized
assert len(contents) == 1
import json
data = json.loads(contents[0].text)
assert data == {"key": "value", "count": 123}
# ==============================================================================
# Binary & Special Types
# ==============================================================================
@pytest.fixture
async def binary_type_server():
"""Server with tools returning binary and special types."""
mcp = FastMCP("binary-test")
@mcp.tool(task=True)
async def return_bytes() -> bytes:
return b"Hello bytes!"
@mcp.tool(task=True)
async def return_uuid() -> UUID:
return UUID("12345678-1234-5678-1234-567812345678")
@mcp.tool(task=True)
async def return_path() -> Path:
return Path("/tmp/test.txt")
@mcp.tool(task=True)
async def return_datetime() -> datetime:
return datetime(2025, 11, 5, 12, 30, 45)
return mcp
@pytest.mark.parametrize(
"tool_name,expected_type,assertion_fn",
[
(
"return_bytes",
str,
lambda r: "Hello bytes!" in r.data or "SGVsbG8gYnl0ZXMh" in r.data,
),
(
"return_uuid",
str,
lambda r: r.data == "12345678-1234-5678-1234-567812345678",
),
(
"return_path",
str,
lambda r: "tmp" in r.data and "test.txt" in r.data,
),
(
"return_datetime",
datetime,
lambda r: r.data == datetime(2025, 11, 5, 12, 30, 45),
),
],
)
async def test_task_binary_types(
binary_type_server: FastMCP,
tool_name: str,
expected_type: type,
assertion_fn: Any,
):
"""Task mode handles binary and special types."""
async with Client(binary_type_server) as client:
task = await client.call_tool(tool_name, task=True)
result = await task
assert isinstance(result.data, expected_type)
assert assertion_fn(result)
# ==============================================================================
# Collection Varieties
# ==============================================================================
@pytest.fixture
async def collection_server():
"""Server with tools returning various collection types."""
mcp = FastMCP("collection-test")
@mcp.tool(task=True)
async def return_tuple() -> tuple[int, str, bool]:
return (42, "hello", True)
@mcp.tool(task=True)
async def return_set() -> set[int]:
return {1, 2, 3}
@mcp.tool(task=True)
async def return_empty_list() -> list[str]:
return []
@mcp.tool(task=True)
async def return_empty_dict() -> dict[str, Any]:
return {}
return mcp
@pytest.mark.parametrize(
"tool_name,expected_type,expected_value",
[
("return_tuple", list, [42, "hello", True]),
("return_set", set, {1, 2, 3}),
("return_empty_list", list, []),
],
)
async def test_task_collection_types(
collection_server: FastMCP,
tool_name: str,
expected_type: type,
expected_value: Any,
):
"""Task mode handles collection types."""
async with Client(collection_server) as client:
task = await client.call_tool(tool_name, task=True)
result = await task
assert isinstance(result.data, expected_type)
assert result.data == expected_value
async def test_task_empty_dict_return(collection_server):
"""Task mode handles empty dict return."""
async with Client(collection_server) as client:
task = await client.call_tool("return_empty_dict", task=True)
result = await task
# Empty structured content becomes None in data
assert result.data is None
# But structured content is still {}
assert result.structured_content == {}
# ==============================================================================
# Media Types (Image, Audio, File)
# ==============================================================================
@pytest.fixture
async def media_server(tmp_path):
"""Server with tools returning media types."""
mcp = FastMCP("media-test")
# Create test files
test_image = tmp_path / "test.png"
test_image.write_bytes(b"\x89PNG\r\n\x1a\n" + b"fake png data")
test_audio = tmp_path / "test.mp3"
test_audio.write_bytes(b"ID3" + b"fake mp3 data")
test_file = tmp_path / "test.txt"
test_file.write_text("test file content")
@mcp.tool(task=True)
async def return_image_path() -> Image:
return Image(path=str(test_image))
@mcp.tool(task=True)
async def return_image_data() -> Image:
return Image(data=test_image.read_bytes(), format="png")
@mcp.tool(task=True)
async def return_audio() -> Audio:
return Audio(path=str(test_audio))
@mcp.tool(task=True)
async def return_file() -> File:
return File(path=str(test_file))
return mcp
@pytest.mark.parametrize(
"tool_name,assertion_fn",
[
(
"return_image_path",
lambda r: len(r.content) == 1 and r.content[0].type == "image",
),
(
"return_image_data",
lambda r: (
len(r.content) == 1
and r.content[0].type == "image"
and r.content[0].mimeType == "image/png"
),
),
(
"return_audio",
lambda r: len(r.content) == 1 and r.content[0].type in ["text", "audio"],
),
(
"return_file",
lambda r: len(r.content) == 1 and r.content[0].type == "resource",
),
],
)
async def test_task_media_types(
media_server: FastMCP,
tool_name: str,
assertion_fn: Any,
):
"""Task mode handles media types (Image, Audio, File)."""
async with Client(media_server) as client:
task = await client.call_tool(tool_name, task=True)
result = await task
assert assertion_fn(result)
# ==============================================================================
# Structured Types (TypedDict, dataclass, unions)
# ==============================================================================
class PersonTypedDict(TypedDict):
"""Example TypedDict."""
name: str
age: int
@dataclass
class PersonDataclass:
"""Example dataclass."""
name: str
age: int
@pytest.fixture
async def structured_type_server():
"""Server with tools returning structured types."""
mcp = FastMCP("structured-test")
@mcp.tool(task=True)
async def return_typeddict() -> PersonTypedDict:
return {"name": "Bob", "age": 25}
@mcp.tool(task=True)
async def return_dataclass() -> PersonDataclass:
return PersonDataclass(name="Charlie", age=35)
@mcp.tool(task=True)
async def return_union() -> str | int:
return "string value"
@mcp.tool(task=True)
async def return_union_int() -> str | int:
return 123
@mcp.tool(task=True)
async def return_optional() -> str | None:
return "has value"
@mcp.tool(task=True)
async def return_optional_none() -> str | None:
return None
return mcp
@pytest.mark.parametrize(
"tool_name,expected_name,expected_age",
[
("return_typeddict", "Bob", 25),
("return_dataclass", "Charlie", 35),
],
)
async def test_task_structured_dict_types(
structured_type_server: FastMCP,
tool_name: str,
expected_name: str,
expected_age: int,
):
"""Task mode handles TypedDict and dataclass returns."""
async with Client(structured_type_server) as client:
task = await client.call_tool(tool_name, task=True)
result = await task
# Both deserialize to dynamic Root class
assert result.data.name == expected_name
assert result.data.age == expected_age
@pytest.mark.parametrize(
"tool_name,expected_type,expected_value",
[
("return_union", str, "string value"),
("return_union_int", int, 123),
],
)
async def test_task_union_types(
structured_type_server: FastMCP,
tool_name: str,
expected_type: type,
expected_value: Any,
):
"""Task mode handles union type branches."""
async with Client(structured_type_server) as client:
task = await client.call_tool(tool_name, task=True)
result = await task
assert isinstance(result.data, expected_type)
assert result.data == expected_value
@pytest.mark.parametrize(
"tool_name,expected_type,expected_value",
[
("return_optional", str, "has value"),
("return_optional_none", type(None), None),
],
)
async def test_task_optional_types(
structured_type_server: FastMCP,
tool_name: str,
expected_type: type,
expected_value: Any,
):
"""Task mode handles Optional types."""
async with Client(structured_type_server) as client:
task = await client.call_tool(tool_name, task=True)
result = await task
assert isinstance(result.data, expected_type)
assert result.data == expected_value
# ==============================================================================
# MCP Content Blocks
# ==============================================================================
@pytest.fixture
async def mcp_content_server(tmp_path):
"""Server with tools returning MCP content blocks."""
import base64
from mcp.types import (
AnyUrl,
EmbeddedResource,
ImageContent,
ResourceLink,
TextContent,
TextResourceContents,
)
mcp = FastMCP("content-test")
test_image = tmp_path / "content.png"
test_image.write_bytes(b"\x89PNG\r\n\x1a\n" + b"content")
@mcp.tool(task=True)
async def return_text_content() -> TextContent:
return TextContent(type="text", text="Direct text content")
@mcp.tool(task=True)
async def return_image_content() -> ImageContent:
return ImageContent(
type="image",
data=base64.b64encode(test_image.read_bytes()).decode(),
mimeType="image/png",
)
@mcp.tool(task=True)
async def return_embedded_resource() -> EmbeddedResource:
return EmbeddedResource(
type="resource",
resource=TextResourceContents(
uri=AnyUrl("test://resource"), text="embedded"
),
)
@mcp.tool(task=True)
async def return_resource_link() -> ResourceLink:
return ResourceLink(
type="resource_link", uri=AnyUrl("test://linked"), name="Test Resource"
)
@mcp.tool(task=True)
async def return_mixed_content() -> list[TextContent | ImageContent]:
return [
TextContent(type="text", text="First block"),
ImageContent(
type="image",
data=base64.b64encode(test_image.read_bytes()).decode(),
mimeType="image/png",
),
TextContent(type="text", text="Third block"),
]
return mcp
@pytest.mark.parametrize(
"tool_name,assertion_fn",
[
(
"return_text_content",
lambda r: (
len(r.content) == 1
and r.content[0].type == "text"
and r.content[0].text == "Direct text content"
),
),
(
"return_image_content",
lambda r: (
len(r.content) == 1
and r.content[0].type == "image"
and r.content[0].mimeType == "image/png"
),
),
(
"return_embedded_resource",
lambda r: len(r.content) == 1 and r.content[0].type == "resource",
),
(
"return_resource_link",
lambda r: (
len(r.content) == 1
and r.content[0].type == "resource_link"
and str(r.content[0].uri) == "test://linked"
),
),
],
)
async def test_task_mcp_content_types(
mcp_content_server: FastMCP,
tool_name: str,
assertion_fn: Any,
):
"""Task mode handles MCP content block types."""
async with Client(mcp_content_server) as client:
task = await client.call_tool(tool_name, task=True)
result = await task
assert assertion_fn(result)
async def test_task_mixed_content_return(mcp_content_server):
"""Task mode handles mixed content list return."""
async with Client(mcp_content_server) as client:
task = await client.call_tool("return_mixed_content", task=True)
result = await task
assert len(result.content) == 3
assert result.content[0].type == "text"
assert result.content[0].text == "First block"
assert result.content[1].type == "image"
assert result.content[2].type == "text"
assert result.content[2].text == "Third block"
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/server/tasks/test_task_return_types.py",
"license": "Apache License 2.0",
"lines": 540,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:tests/server/tasks/test_task_security.py | """
Tests for session-based task ID isolation (CRITICAL SECURITY).
Ensures that tasks are properly scoped to sessions and clients cannot
access each other's tasks.
"""
import pytest
from fastmcp import FastMCP
from fastmcp.client import Client
@pytest.fixture
async def task_server():
"""Create a server with background tasks enabled."""
mcp = FastMCP("security-test-server")
@mcp.tool(task=True) # Enable background execution
async def secret_tool(data: str) -> str:
"""A tool that processes sensitive data."""
return f"Secret result: {data}"
return mcp
async def test_same_session_can_access_all_its_tasks(task_server):
"""A single session can access all tasks it created."""
async with Client(task_server) as client:
# Submit multiple tasks
task1 = await client.call_tool(
"secret_tool", {"data": "first"}, task=True, task_id="task-1"
)
task2 = await client.call_tool(
"secret_tool", {"data": "second"}, task=True, task_id="task-2"
)
# Wait for both to complete
await task1.wait(timeout=2.0)
await task2.wait(timeout=2.0)
# Should be able to access both
result1 = await task1.result()
result2 = await task2.result()
assert "first" in str(result1.data)
assert "second" in str(result2.data)
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/server/tasks/test_task_security.py",
"license": "Apache License 2.0",
"lines": 35,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:tests/server/tasks/test_task_status_notifications.py | """
Tests for notifications/tasks/status subscription mechanism (SEP-1686 lines 436-444).
Per the spec, servers MAY send notifications/tasks/status when task state changes.
This is an optional optimization that reduces client polling frequency.
These tests verify that the subscription mechanism works correctly without breaking
existing functionality. Notification delivery is best-effort and clients MUST NOT
rely on receiving them.
"""
import asyncio
import pytest
from fastmcp import FastMCP
from fastmcp.client import Client
@pytest.fixture
async def notification_server():
"""Create a server for testing task status notifications."""
mcp = FastMCP("notification-test")
@mcp.tool(task=True)
async def quick_task(value: int) -> int:
"""Quick task that completes immediately."""
return value * 2
@mcp.tool(task=True)
async def slow_task(duration: float = 0.1) -> str:
"""Slow task for testing working status."""
await asyncio.sleep(duration)
return "completed"
@mcp.tool(task=True)
async def failing_task() -> str:
"""Task that always fails."""
raise ValueError("Task failed intentionally")
@mcp.prompt(task=True)
async def test_prompt(name: str) -> str:
"""Test prompt for background execution."""
await asyncio.sleep(0.05)
return f"Hello, {name}!"
@mcp.resource("test://resource", task=True)
async def test_resource() -> str:
"""Test resource for background execution."""
await asyncio.sleep(0.05)
return "resource content"
return mcp
async def test_subscription_spawned_for_tool_task(notification_server: FastMCP):
"""Subscription task is spawned when tool task is created."""
async with Client(notification_server) as client:
# Create task - should spawn subscription
task = await client.call_tool("quick_task", {"value": 5}, task=True)
# Task should complete normally
result = await task
assert result.data == 10
# Subscription should clean up automatically
# (No way to directly test, but shouldn't cause issues)
async def test_subscription_handles_task_completion(notification_server: FastMCP):
"""Subscription properly handles task completion and cleanup."""
async with Client(notification_server) as client:
# Multiple tasks should each get their own subscription
task1 = await client.call_tool("quick_task", {"value": 1}, task=True)
task2 = await client.call_tool("quick_task", {"value": 2}, task=True)
task3 = await client.call_tool("quick_task", {"value": 3}, task=True)
# All should complete successfully
result1 = await task1
result2 = await task2
result3 = await task3
assert result1.data == 2
assert result2.data == 4
assert result3.data == 6
# Subscriptions should all clean up
# Give them a moment
await asyncio.sleep(0.1)
async def test_subscription_handles_task_failure(notification_server: FastMCP):
"""Subscription properly handles task failure."""
async with Client(notification_server) as client:
task = await client.call_tool("failing_task", {}, task=True)
# Task should fail
with pytest.raises(Exception):
await task
# Subscription should handle failure and clean up
await asyncio.sleep(0.1)
async def test_subscription_for_prompt_tasks(notification_server: FastMCP):
"""Subscriptions work for prompt tasks."""
async with Client(notification_server) as client:
task = await client.get_prompt("test_prompt", {"name": "World"}, task=True)
result = await task
# Prompt result has messages
assert result
# Subscription should clean up
await asyncio.sleep(0.1)
async def test_subscription_for_resource_tasks(notification_server: FastMCP):
"""Subscriptions work for resource tasks."""
async with Client(notification_server) as client:
task = await client.read_resource("test://resource", task=True)
result = await task
assert result # Resource contents
# Subscription should clean up
await asyncio.sleep(0.1)
async def test_subscriptions_cleanup_on_session_disconnect(
notification_server: FastMCP,
):
"""Subscriptions are cleaned up when session disconnects."""
# Start session and create task
async with Client(notification_server) as client:
task = await client.call_tool("slow_task", {"duration": 1.0}, task=True)
task_id = task.task_id
# Disconnect before task completes (session __aexit__ cancels subscriptions)
# Session is now closed, subscription should be cancelled
# Task continues in Docket but notification subscription is gone
# This test passing means no crash occurred during cleanup
assert task_id # Task was created
async def test_multiple_concurrent_subscriptions(notification_server: FastMCP):
"""Multiple concurrent tasks each have their own subscription."""
async with Client(notification_server) as client:
# Start many tasks concurrently
tasks = []
for i in range(10):
task = await client.call_tool("quick_task", {"value": i}, task=True)
tasks.append(task)
# All should complete
results = await asyncio.gather(*tasks)
assert len(results) == 10
# All subscriptions should clean up
await asyncio.sleep(0.1)
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/server/tasks/test_task_status_notifications.py",
"license": "Apache License 2.0",
"lines": 119,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:tests/server/tasks/test_task_tools.py | """
Tests for server-side tool task behavior.
Tests tool-specific task handling, parallel to test_task_prompts.py
and test_task_resources.py.
"""
import asyncio
import pytest
from fastmcp import FastMCP
from fastmcp.client import Client
from fastmcp.client.tasks import ToolTask
@pytest.fixture
async def tool_server():
"""Create a FastMCP server with task-enabled tools."""
mcp = FastMCP("tool-task-server")
@mcp.tool(task=True)
async def simple_tool(message: str) -> str:
"""A simple tool for testing."""
return f"Processed: {message}"
@mcp.tool(task=False)
async def sync_only_tool(message: str) -> str:
"""Tool with task=False."""
return f"Sync: {message}"
return mcp
async def test_synchronous_tool_call_unchanged(tool_server):
"""Tools without task metadata execute synchronously as before."""
async with Client(tool_server) as client:
# Regular call without task metadata
result = await client.call_tool("simple_tool", {"message": "hello"})
# Should execute immediately and return result
assert "Processed: hello" in str(result)
async def test_tool_with_task_metadata_returns_immediately(tool_server):
"""Tools with task metadata return immediately with ToolTask object."""
async with Client(tool_server) as client:
# Call with task metadata
task = await client.call_tool("simple_tool", {"message": "test"}, task=True)
assert task
assert not task.returned_immediately
assert isinstance(task, ToolTask)
assert isinstance(task.task_id, str)
assert len(task.task_id) > 0
async def test_tool_task_executes_in_background(tool_server):
"""Tool task is submitted to Docket and executes in background."""
execution_started = asyncio.Event()
execution_completed = asyncio.Event()
@tool_server.tool(task=True)
async def coordinated_tool() -> str:
"""Tool with coordination points."""
execution_started.set()
await execution_completed.wait()
return "completed"
async with Client(tool_server) as client:
task = await client.call_tool("coordinated_tool", task=True)
assert task
assert not task.returned_immediately
# Wait for execution to start
await asyncio.wait_for(execution_started.wait(), timeout=2.0)
# Task should still be working
status = await task.status()
assert status.status in ["working"]
# Signal completion
execution_completed.set()
await task.wait(timeout=2.0)
result = await task.result()
assert result.data == "completed"
async def test_forbidden_mode_tool_rejects_task_calls(tool_server):
"""Tools with task=False (mode=forbidden) reject task-augmented calls."""
async with Client(tool_server) as client:
# Calling with task=True when task=False should return error
task = await client.call_tool("sync_only_tool", {"message": "test"}, task=True)
assert task
assert task.returned_immediately
result = await task.result()
# New behavior: mode="forbidden" returns an error
assert result.is_error
assert "does not support task-augmented execution" in str(result)
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/server/tasks/test_task_tools.py",
"license": "Apache License 2.0",
"lines": 75,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:tests/server/tasks/test_task_ttl.py | """
Tests for SEP-1686 ttl parameter handling.
Per the spec, servers MUST return ttl in all tasks/get responses,
and results should be retained for ttl milliseconds after completion.
"""
import asyncio
import pytest
from fastmcp import FastMCP
from fastmcp.client import Client
@pytest.fixture
async def keepalive_server():
"""Create a server for testing ttl behavior."""
mcp = FastMCP("keepalive-test")
@mcp.tool(task=True)
async def quick_task(value: int) -> int:
return value * 2
@mcp.tool(task=True)
async def slow_task() -> str:
await asyncio.sleep(1)
return "done"
return mcp
async def test_keepalive_returned_in_submitted_state(keepalive_server: FastMCP):
"""ttl is returned in tasks/get even when task is submitted/working."""
async with Client(keepalive_server) as client:
# Submit task with explicit ttl
task = await client.call_tool(
"slow_task",
{},
task=True,
ttl=30000, # 30 seconds (client-requested)
)
# Check status immediately - should be submitted or working
status = await task.status()
assert status.status in ["working"]
# ttl should be present per spec (MUST return in all responses)
# TODO: Docket uses a global execution_ttl for all tasks, not per-task TTLs.
# The spec allows servers to override client-requested TTL (line 431).
# FastMCP returns the server's actual global TTL (60000ms default from Docket).
# If Docket gains per-task TTL support, update this to verify client-requested TTL is respected.
assert status.ttl == 60000 # Server's global TTL, not client-requested 30000
async def test_keepalive_returned_in_completed_state(keepalive_server: FastMCP):
"""ttl is returned in tasks/get after task completes."""
async with Client(keepalive_server) as client:
# Submit and complete task
task = await client.call_tool(
"quick_task",
{"value": 5},
task=True,
ttl=45000, # Client-requested TTL
)
await task.wait(timeout=2.0)
# Check status - should be completed
status = await task.status()
assert status.status == "completed"
# TODO: Docket uses global execution_ttl, not per-task TTLs.
# Server returns its global TTL (60000ms), not the client-requested 45000ms.
# This is spec-compliant - servers MAY override requested TTL (spec line 431).
assert status.ttl == 60000 # Server's global TTL, not client-requested 45000
async def test_default_keepalive_when_not_specified(keepalive_server: FastMCP):
"""Default ttl is used when client doesn't specify."""
async with Client(keepalive_server) as client:
# Submit without explicit ttl
task = await client.call_tool("quick_task", {"value": 3}, task=True)
await task.wait(timeout=2.0)
status = await task.status()
# Should have default ttl (60000ms = 60 seconds)
assert status.ttl == 60000
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/server/tasks/test_task_ttl.py",
"license": "Apache License 2.0",
"lines": 67,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:tests/server/test_dependencies.py | """Tests for Docket-style dependency injection in FastMCP."""
from contextlib import asynccontextmanager, contextmanager
import mcp.types as mcp_types
import pytest
from mcp.types import TextContent, TextResourceContents
from fastmcp import FastMCP
from fastmcp.client import Client
from fastmcp.dependencies import CurrentContext, Depends, Shared
from fastmcp.server.context import Context
HUZZAH = "huzzah!"
class Connection:
"""Test connection that tracks whether it's currently open."""
def __init__(self):
self.is_open = False
async def __aenter__(self):
self.is_open = True
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
self.is_open = False
@asynccontextmanager
async def get_connection():
"""Dependency that provides an open connection."""
async with Connection() as conn:
yield conn
@pytest.fixture
def mcp():
"""Create a FastMCP server for testing."""
return FastMCP("test-server")
async def test_depends_with_sync_function(mcp: FastMCP):
"""Test that Depends works with sync dependency functions."""
def get_config() -> dict[str, str]:
return {"api_key": "secret123", "endpoint": "https://api.example.com"}
@mcp.tool()
def fetch_data(query: str, config: dict[str, str] = Depends(get_config)) -> str:
return (
f"Fetching '{query}' from {config['endpoint']} with key {config['api_key']}"
)
result = await mcp.call_tool("fetch_data", {"query": "users"})
assert result.structured_content is not None
text = result.structured_content["result"]
assert "Fetching 'users' from https://api.example.com" in text
assert "secret123" in text
async def test_depends_with_async_function(mcp: FastMCP):
"""Test that Depends works with async dependency functions."""
async def get_user_id() -> int:
return 42
@mcp.tool()
async def greet_user(name: str, user_id: int = Depends(get_user_id)) -> str:
return f"Hello {name}, your ID is {user_id}"
result = await mcp.call_tool("greet_user", {"name": "Alice"})
assert result.structured_content is not None
assert result.structured_content["result"] == "Hello Alice, your ID is 42"
async def test_depends_with_async_context_manager(mcp: FastMCP):
"""Test that Depends works with async context managers for resource management."""
cleanup_called = False
@asynccontextmanager
async def get_database():
db = "db_connection"
try:
yield db
finally:
nonlocal cleanup_called
cleanup_called = True
@mcp.tool()
async def query_db(sql: str, db: str = Depends(get_database)) -> str:
return f"Executing '{sql}' on {db}"
result = await mcp.call_tool("query_db", {"sql": "SELECT * FROM users"})
assert result.structured_content is not None
assert (
"Executing 'SELECT * FROM users' on db_connection"
in result.structured_content["result"]
)
assert cleanup_called
async def test_nested_dependencies(mcp: FastMCP):
"""Test that dependencies can depend on other dependencies."""
def get_base_url() -> str:
return "https://api.example.com"
def get_api_client(base_url: str = Depends(get_base_url)) -> dict[str, str]:
return {"base_url": base_url, "version": "v1"}
@mcp.tool()
async def call_api(
endpoint: str, client: dict[str, str] = Depends(get_api_client)
) -> str:
return f"Calling {client['base_url']}/{client['version']}/{endpoint}"
result = await mcp.call_tool("call_api", {"endpoint": "users"})
assert result.structured_content is not None
assert (
result.structured_content["result"]
== "Calling https://api.example.com/v1/users"
)
async def test_dependencies_excluded_from_schema(mcp: FastMCP):
"""Test that dependency parameters don't appear in the tool schema."""
def get_config() -> dict[str, str]:
return {"key": "value"}
@mcp.tool()
async def my_tool(
name: str, age: int, config: dict[str, str] = Depends(get_config)
) -> str:
return f"{name} is {age} years old"
result = await mcp._list_tools_mcp(mcp_types.ListToolsRequest())
tool = next(t for t in result.tools if t.name == "my_tool")
assert "name" in tool.inputSchema["properties"]
assert "age" in tool.inputSchema["properties"]
assert "config" not in tool.inputSchema["properties"]
assert len(tool.inputSchema["properties"]) == 2
async def test_current_context_dependency(mcp: FastMCP):
"""Test that CurrentContext dependency provides access to FastMCP Context."""
@mcp.tool()
def use_context(ctx: Context = CurrentContext()) -> str:
assert isinstance(ctx, Context)
return HUZZAH
result = await mcp.call_tool("use_context", {})
assert result.structured_content is not None
assert result.structured_content["result"] == HUZZAH
async def test_current_context_and_legacy_context_coexist(mcp: FastMCP):
"""Test that CurrentContext dependency and legacy Context injection work together."""
@mcp.tool()
def use_both_contexts(
legacy_ctx: Context,
dep_ctx: Context = CurrentContext(),
) -> str:
assert isinstance(legacy_ctx, Context)
assert isinstance(dep_ctx, Context)
assert legacy_ctx is dep_ctx
return HUZZAH
result = await mcp.call_tool("use_both_contexts", {})
assert result.structured_content is not None
assert result.structured_content["result"] == HUZZAH
async def test_backward_compat_context_still_works(mcp: FastMCP):
"""Test that existing Context injection via type annotation still works."""
@mcp.tool()
async def get_request_id(ctx: Context) -> str:
return ctx.request_id
async with Client(mcp) as client:
result = await client.call_tool("get_request_id", {})
assert len(result.content) == 1
content = result.content[0]
assert isinstance(content, TextContent)
assert len(content.text) > 0
async def test_sync_tool_with_async_dependency(mcp: FastMCP):
"""Test that sync tools work with async dependencies."""
async def fetch_config() -> str:
return "loaded_config"
@mcp.tool()
def process_data(value: int, config: str = Depends(fetch_config)) -> str:
return f"Processing {value} with {config}"
result = await mcp.call_tool("process_data", {"value": 100})
assert result.structured_content is not None
assert result.structured_content["result"] == "Processing 100 with loaded_config"
async def test_dependency_caching(mcp: FastMCP):
"""Test that dependencies are cached within a single tool call."""
call_count = 0
def expensive_dependency() -> int:
nonlocal call_count
call_count += 1
return 42
@mcp.tool()
async def tool_with_cached_dep(
dep1: int = Depends(expensive_dependency),
dep2: int = Depends(expensive_dependency),
) -> str:
return f"{dep1} + {dep2} = {dep1 + dep2}"
result = await mcp.call_tool("tool_with_cached_dep", {})
assert result.structured_content is not None
assert result.structured_content["result"] == "42 + 42 = 84"
assert call_count == 1
async def test_context_and_depends_together(mcp: FastMCP):
"""Test that Context type injection and Depends can be used together."""
def get_multiplier() -> int:
return 10
@mcp.tool()
async def mixed_deps(
value: int, ctx: Context, multiplier: int = Depends(get_multiplier)
) -> str:
assert isinstance(ctx, Context)
assert ctx.request_id
assert len(ctx.request_id) > 0
return (
f"Request {ctx.request_id}: {value} * {multiplier} = {value * multiplier}"
)
async with Client(mcp) as client:
result = await client.call_tool("mixed_deps", {"value": 5})
assert len(result.content) == 1
content = result.content[0]
assert isinstance(content, TextContent)
assert "5 * 10 = 50" in content.text
assert "Request " in content.text
async def test_resource_with_dependency(mcp: FastMCP):
"""Test that resources support dependency injection."""
def get_storage_path() -> str:
return "/data/config"
@mcp.resource("config://settings")
async def get_settings(storage: str = Depends(get_storage_path)) -> str:
return f"Settings loaded from {storage}"
result = await mcp.read_resource("config://settings")
assert len(result.contents) == 1
assert result.contents[0].content == "Settings loaded from /data/config"
async def test_resource_with_context_and_dependency(mcp: FastMCP):
"""Test that resources can use both Context and Depends."""
def get_prefix() -> str:
return "DATA"
@mcp.resource("config://info")
async def get_info(ctx: Context, prefix: str = Depends(get_prefix)) -> str:
return f"{prefix}: Request {ctx.request_id}"
async with Client(mcp) as client:
result = await client.read_resource("config://info")
assert len(result) == 1
content = result[0]
assert isinstance(content, TextResourceContents)
assert "DATA: Request " in content.text
assert len(content.text.split("Request ")[1]) > 0
async def test_prompt_with_dependency(mcp: FastMCP):
"""Test that prompts support dependency injection."""
def get_tone() -> str:
return "friendly and helpful"
@mcp.prompt()
async def custom_prompt(topic: str, tone: str = Depends(get_tone)) -> str:
return f"Write about {topic} in a {tone} tone"
result = await mcp.render_prompt("custom_prompt", {"topic": "Python"})
assert len(result.messages) == 1
message = result.messages[0]
content = message.content
assert isinstance(content, TextContent)
assert content.text == "Write about Python in a friendly and helpful tone"
async def test_prompt_with_context_and_dependency(mcp: FastMCP):
"""Test that prompts can use both Context and Depends."""
def get_style() -> str:
return "concise"
@mcp.prompt()
async def styled_prompt(
query: str, ctx: Context, style: str = Depends(get_style)
) -> str:
assert isinstance(ctx, Context)
assert ctx.request_id
return f"Answer '{query}' in a {style} style"
async with Client(mcp) as client:
result = await client.get_prompt("styled_prompt", {"query": "What is MCP?"})
assert len(result.messages) == 1
message = result.messages[0]
content = message.content
assert isinstance(content, TextContent)
assert content.text == "Answer 'What is MCP?' in a concise style"
async def test_resource_template_with_dependency(mcp: FastMCP):
"""Test that resource templates support dependency injection."""
def get_base_path() -> str:
return "/var/data"
@mcp.resource("data://{filename}")
async def get_file(filename: str, base_path: str = Depends(get_base_path)) -> str:
return f"Reading {base_path}/{filename}"
result = await mcp.read_resource("data://config.txt")
assert len(result.contents) == 1
assert result.contents[0].content == "Reading /var/data/config.txt"
async def test_resource_template_with_context_and_dependency(mcp: FastMCP):
"""Test that resource templates can use both Context and Depends."""
def get_version() -> str:
return "v2"
@mcp.resource("api://{endpoint}")
async def call_endpoint(
endpoint: str, ctx: Context, version: str = Depends(get_version)
) -> str:
assert isinstance(ctx, Context)
assert ctx.request_id
return f"Calling {version}/{endpoint}"
async with Client(mcp) as client:
result = await client.read_resource("api://users")
assert len(result) == 1
content = result[0]
assert isinstance(content, TextResourceContents)
assert content.text == "Calling v2/users"
async def test_async_tool_context_manager_stays_open(mcp: FastMCP):
"""Test that context manager dependencies stay open during async tool execution.
Context managers must remain open while the async function executes, not just
while it's being called (which only returns a coroutine).
"""
@mcp.tool()
async def query_data(
query: str,
connection: Connection = Depends(get_connection),
) -> str:
assert connection.is_open
return f"open={connection.is_open}"
result = await mcp.call_tool("query_data", {"query": "test"})
assert result.structured_content is not None
assert result.structured_content["result"] == "open=True"
async def test_async_resource_context_manager_stays_open(mcp: FastMCP):
"""Test that context manager dependencies stay open during async resource execution."""
@mcp.resource("data://config")
async def load_config(connection: Connection = Depends(get_connection)) -> str:
assert connection.is_open
return f"open={connection.is_open}"
result = await mcp.read_resource("data://config")
assert result.contents[0].content == "open=True"
async def test_async_resource_template_context_manager_stays_open(mcp: FastMCP):
"""Test that context manager dependencies stay open during async resource template execution."""
@mcp.resource("user://{user_id}")
async def get_user(
user_id: str,
connection: Connection = Depends(get_connection),
) -> str:
assert connection.is_open
return f"open={connection.is_open},user={user_id}"
result = await mcp.read_resource("user://123")
assert isinstance(result.contents[0].content, str)
assert "open=True" in result.contents[0].content
async def test_async_prompt_context_manager_stays_open(mcp: FastMCP):
"""Test that context manager dependencies stay open during async prompt execution."""
@mcp.prompt()
async def research_prompt(
topic: str,
connection: Connection = Depends(get_connection),
) -> str:
assert connection.is_open
return f"open={connection.is_open},topic={topic}"
result = await mcp.render_prompt("research_prompt", {"topic": "AI"})
message = result.messages[0]
content = message.content
assert isinstance(content, TextContent)
assert "open=True" in content.text
async def test_argument_validation_with_dependencies(mcp: FastMCP):
"""Test that user arguments are still validated when dependencies are present."""
def get_config() -> dict[str, str]:
return {"key": "value"}
@mcp.tool()
async def validated_tool(
age: int, # Should validate type
config: dict[str, str] = Depends(get_config),
) -> str:
return f"age={age}"
# Valid argument
result = await mcp.call_tool("validated_tool", {"age": 25})
assert result.structured_content is not None
assert result.structured_content["result"] == "age=25"
# Invalid argument type should fail validation
import pydantic
with pytest.raises(pydantic.ValidationError):
await mcp.call_tool("validated_tool", {"age": "not a number"})
async def test_connection_dependency_excluded_from_tool_schema(mcp: FastMCP):
"""Test that Connection dependency parameter is excluded from tool schema."""
@mcp.tool()
async def with_connection(
name: str,
connection: Connection = Depends(get_connection),
) -> str:
return name
result = await mcp._list_tools_mcp(mcp_types.ListToolsRequest())
tool = next(t for t in result.tools if t.name == "with_connection")
assert "name" in tool.inputSchema["properties"]
assert "connection" not in tool.inputSchema["properties"]
async def test_sync_tool_context_manager_stays_open(mcp: FastMCP):
"""Test that sync context manager dependencies work with tools."""
conn = Connection()
@contextmanager
def get_sync_connection():
conn.is_open = True
try:
yield conn
finally:
conn.is_open = False
@mcp.tool()
async def query_sync(
query: str,
connection: Connection = Depends(get_sync_connection),
) -> str:
assert connection.is_open
return f"open={connection.is_open}"
result = await mcp.call_tool("query_sync", {"query": "test"})
assert result.structured_content is not None
assert result.structured_content["result"] == "open=True"
assert not conn.is_open
async def test_sync_resource_context_manager_stays_open(mcp: FastMCP):
"""Test that sync context manager dependencies work with resources."""
conn = Connection()
@contextmanager
def get_sync_connection():
conn.is_open = True
try:
yield conn
finally:
conn.is_open = False
@mcp.resource("data://sync")
async def load_sync(connection: Connection = Depends(get_sync_connection)) -> str:
assert connection.is_open
return f"open={connection.is_open}"
result = await mcp.read_resource("data://sync")
assert result.contents[0].content == "open=True"
assert not conn.is_open
async def test_sync_resource_template_context_manager_stays_open(mcp: FastMCP):
"""Test that sync context manager dependencies work with resource templates."""
conn = Connection()
@contextmanager
def get_sync_connection():
conn.is_open = True
try:
yield conn
finally:
conn.is_open = False
@mcp.resource("item://{item_id}")
async def get_item(
item_id: str,
connection: Connection = Depends(get_sync_connection),
) -> str:
assert connection.is_open
return f"open={connection.is_open},item={item_id}"
result = await mcp.read_resource("item://456")
assert isinstance(result.contents[0].content, str)
assert "open=True" in result.contents[0].content
assert not conn.is_open
async def test_sync_prompt_context_manager_stays_open(mcp: FastMCP):
"""Test that sync context manager dependencies work with prompts."""
conn = Connection()
@contextmanager
def get_sync_connection():
conn.is_open = True
try:
yield conn
finally:
conn.is_open = False
@mcp.prompt()
async def sync_prompt(
topic: str,
connection: Connection = Depends(get_sync_connection),
) -> str:
assert connection.is_open
return f"open={connection.is_open},topic={topic}"
result = await mcp.render_prompt("sync_prompt", {"topic": "test"})
message = result.messages[0]
content = message.content
assert isinstance(content, TextContent)
assert "open=True" in content.text
assert not conn.is_open
async def test_external_user_cannot_override_dependency(mcp: FastMCP):
"""Test that external MCP clients cannot override dependency parameters."""
def get_admin_status() -> str:
return "not_admin"
@mcp.tool()
async def check_permission(
action: str, admin: str = Depends(get_admin_status)
) -> str:
return f"action={action},admin={admin}"
# Verify dependency is NOT in the schema
result = await mcp._list_tools_mcp(mcp_types.ListToolsRequest())
tool = next(t for t in result.tools if t.name == "check_permission")
assert "admin" not in tool.inputSchema["properties"]
# Normal call - dependency is resolved
result = await mcp.call_tool("check_permission", {"action": "read"})
assert result.structured_content is not None
assert "admin=not_admin" in result.structured_content["result"]
# Try to override dependency - rejected (not in schema)
import pydantic
with pytest.raises(pydantic.ValidationError):
await mcp.call_tool("check_permission", {"action": "read", "admin": "hacker"})
async def test_prompt_dependency_cannot_be_overridden_externally(mcp: FastMCP):
"""Test that external callers cannot override prompt dependencies.
This is a security test - dependencies should NEVER be overridable from
outside the server, even for prompts which don't validate against strict schemas.
"""
def get_secret() -> str:
return "real_secret"
@mcp.prompt()
async def secure_prompt(topic: str, secret: str = Depends(get_secret)) -> str:
return f"Topic: {topic}, Secret: {secret}"
# Normal call - should use dependency
result = await mcp.render_prompt("secure_prompt", {"topic": "test"})
message = result.messages[0]
content = message.content
assert isinstance(content, TextContent)
assert "Secret: real_secret" in content.text
# Try to override dependency - should be ignored/rejected
result = await mcp.render_prompt(
"secure_prompt",
{"topic": "test", "secret": "HACKED"}, # Attempt override
)
message = result.messages[0]
content = message.content
assert isinstance(content, TextContent)
# Should still use real dependency, not hacked value
assert "Secret: real_secret" in content.text
assert "HACKED" not in content.text
async def test_resource_dependency_cannot_be_overridden_externally(mcp: FastMCP):
"""Test that external callers cannot override resource dependencies."""
def get_api_key() -> str:
return "real_api_key"
@mcp.resource("data://config")
async def get_config(api_key: str = Depends(get_api_key)) -> str:
return f"API Key: {api_key}"
# Normal call
result = await mcp.read_resource("data://config")
assert isinstance(result.contents[0].content, str)
assert "API Key: real_api_key" in result.contents[0].content
# Resources don't accept arguments from clients (static URI)
# so this scenario is less of a concern, but documenting it
async def test_resource_template_dependency_cannot_be_overridden_externally(
mcp: FastMCP,
):
"""Test that external callers cannot override resource template dependencies.
Resource templates extract parameters from the URI path, so there's a risk
that a dependency parameter name could match a URI parameter.
"""
def get_auth_token() -> str:
return "real_token"
@mcp.resource("user://{user_id}")
async def get_user(user_id: str, token: str = Depends(get_auth_token)) -> str:
return f"User: {user_id}, Token: {token}"
# Normal call
result = await mcp.read_resource("user://123")
assert isinstance(result.contents[0].content, str)
assert "User: 123, Token: real_token" in result.contents[0].content
# Try to inject token via URI (shouldn't be possible with this pattern)
# But if URI was user://{token}, it could extract it
async def test_resource_template_uri_cannot_match_dependency_name(mcp: FastMCP):
"""Test that URI parameters cannot have the same name as dependencies.
If a URI template tries to use a parameter name that's also a dependency,
the template creation should fail because the dependency is excluded from
the user-facing signature.
"""
def get_token() -> str:
return "real_token"
# This should fail - {token} in URI but token is a dependency parameter
with pytest.raises(ValueError, match="URI parameters.*must be a subset"):
@mcp.resource("auth://{token}/validate")
async def validate(token: str = Depends(get_token)) -> str:
return f"Validating with: {token}"
async def test_toolerror_propagates_from_dependency(mcp: FastMCP):
"""ToolError raised in a dependency should propagate unchanged (issue #2633).
When a dependency raises ToolError, it should not be wrapped in RuntimeError.
This allows developers to use ToolError for validation in dependencies.
"""
from fastmcp.exceptions import ToolError
def validate_client_id() -> str:
raise ToolError("Client ID is required - select a client first")
@mcp.tool()
async def my_tool(client_id: str = Depends(validate_client_id)) -> str:
return f"Working with client: {client_id}"
async with Client(mcp) as client:
# ToolError is converted to an error result by the server
result = await client.call_tool("my_tool", {}, raise_on_error=False)
assert result.is_error
# The original error message should be preserved (not wrapped in RuntimeError)
assert isinstance(result.content[0], TextContent)
assert result.content[0].text == "Client ID is required - select a client first"
async def test_validation_error_propagates_from_dependency(mcp: FastMCP):
"""ValidationError raised in a dependency should propagate unchanged."""
from fastmcp.exceptions import ValidationError
def validate_input() -> str:
raise ValidationError("Invalid input format")
@mcp.tool()
async def tool_with_validation(val: str = Depends(validate_input)) -> str:
return val
async with Client(mcp) as client:
# ValidationError is re-raised by the server and becomes an error result
# The original error message should be preserved (not wrapped in RuntimeError)
result = await client.call_tool(
"tool_with_validation", {}, raise_on_error=False
)
assert result.is_error
assert isinstance(result.content[0], TextContent)
assert result.content[0].text == "Invalid input format"
class TestDependencyInjection:
"""Tests for the uncalled-for DI engine."""
def test_is_docket_available(self):
"""Test is_docket_available returns True when docket is installed."""
from fastmcp.server.dependencies import is_docket_available
assert is_docket_available() is True
def test_require_docket_passes_when_installed(self):
"""Test require_docket doesn't raise when docket is installed."""
from fastmcp.server.dependencies import require_docket
require_docket("test feature")
def test_dependency_class_exists(self):
"""Test Dependency and Depends are importable from fastmcp."""
from fastmcp.dependencies import Dependency, Depends
assert Dependency is not None
assert Depends is not None
def test_depends_works(self):
"""Test Depends() creates proper dependency wrapper."""
from uncalled_for.resolution import _Depends
from fastmcp.dependencies import Depends
def get_value() -> str:
return "test_value"
dep = Depends(get_value)
assert isinstance(dep, _Depends)
assert dep.factory is get_value
async def test_depends_import_from_fastmcp(self):
"""Test that Depends can be imported from fastmcp.dependencies."""
from fastmcp.dependencies import Depends
def get_config() -> dict:
return {"key": "value"}
dep = Depends(get_config)
assert dep is not None
def test_get_dependency_parameters(self):
"""Test get_dependency_parameters finds dependency defaults."""
from uncalled_for import get_dependency_parameters
from uncalled_for.resolution import _Depends
from fastmcp.dependencies import Depends
def get_db() -> str:
return "database"
def my_func(name: str, db: str = Depends(get_db)) -> str:
return f"{name}: {db}"
deps = get_dependency_parameters(my_func)
assert "db" in deps
db_dep = deps["db"]
assert isinstance(db_dep, _Depends)
assert db_dep.factory is get_db
class TestAuthDependencies:
"""Tests for authentication dependencies (CurrentAccessToken, TokenClaim)."""
def test_current_access_token_is_importable(self):
"""Test that CurrentAccessToken can be imported."""
from fastmcp.server.dependencies import CurrentAccessToken
assert CurrentAccessToken is not None
def test_token_claim_is_importable(self):
"""Test that TokenClaim can be imported."""
from fastmcp.server.dependencies import TokenClaim
assert TokenClaim is not None
def test_current_access_token_is_dependency(self):
"""Test that CurrentAccessToken is a Dependency instance."""
from fastmcp.dependencies import Dependency
from fastmcp.server.dependencies import _CurrentAccessToken
dep = _CurrentAccessToken()
assert isinstance(dep, Dependency)
def test_token_claim_creates_dependency(self):
"""Test that TokenClaim creates a Dependency instance."""
from fastmcp.dependencies import Dependency
from fastmcp.server.dependencies import TokenClaim, _TokenClaim
dep = TokenClaim("oid")
assert isinstance(dep, _TokenClaim)
assert isinstance(dep, Dependency)
assert dep.claim_name == "oid"
async def test_current_access_token_raises_without_token(self):
"""Test that CurrentAccessToken raises when no token is available."""
from fastmcp.server.dependencies import _CurrentAccessToken
dep = _CurrentAccessToken()
with pytest.raises(RuntimeError, match="No access token found"):
await dep.__aenter__()
async def test_token_claim_raises_without_token(self):
"""Test that TokenClaim raises when no token is available."""
from fastmcp.server.dependencies import _TokenClaim
dep = _TokenClaim("oid")
with pytest.raises(RuntimeError, match="No access token available"):
await dep.__aenter__()
async def test_current_access_token_excluded_from_tool_schema(self, mcp: FastMCP):
"""Test that CurrentAccessToken dependency is excluded from tool schema."""
import mcp.types as mcp_types
from fastmcp.server.auth import AccessToken
from fastmcp.server.dependencies import CurrentAccessToken
@mcp.tool()
async def tool_with_token(
name: str,
token: AccessToken = CurrentAccessToken(),
) -> str:
return name
result = await mcp._list_tools_mcp(mcp_types.ListToolsRequest())
tool = next(t for t in result.tools if t.name == "tool_with_token")
assert "name" in tool.inputSchema["properties"]
assert "token" not in tool.inputSchema["properties"]
async def test_token_claim_excluded_from_tool_schema(self, mcp: FastMCP):
"""Test that TokenClaim dependency is excluded from tool schema."""
import mcp.types as mcp_types
from fastmcp.server.dependencies import TokenClaim
@mcp.tool()
async def tool_with_claim(
name: str,
user_id: str = TokenClaim("oid"),
) -> str:
return name
result = await mcp._list_tools_mcp(mcp_types.ListToolsRequest())
tool = next(t for t in result.tools if t.name == "tool_with_claim")
assert "name" in tool.inputSchema["properties"]
assert "user_id" not in tool.inputSchema["properties"]
def test_current_access_token_exported_from_all(self):
"""Test that CurrentAccessToken is exported from __all__."""
from fastmcp.server import dependencies
assert "CurrentAccessToken" in dependencies.__all__
def test_token_claim_exported_from_all(self):
"""Test that TokenClaim is exported from __all__."""
from fastmcp.server import dependencies
assert "TokenClaim" in dependencies.__all__
class TestSharedDependencies:
"""Tests for Shared() dependencies that resolve once and are reused."""
async def test_shared_sync_function(self, mcp: FastMCP):
"""Shared dependency from a sync function resolves and is reused."""
call_count = 0
def get_config() -> dict[str, str]:
nonlocal call_count
call_count += 1
return {"key": "value"}
@mcp.tool()
async def tool_a(config: dict[str, str] = Shared(get_config)) -> str:
return config["key"]
@mcp.tool()
async def tool_b(config: dict[str, str] = Shared(get_config)) -> str:
return config["key"]
async with Client(mcp) as client:
result_a = await client.call_tool("tool_a", {})
result_b = await client.call_tool("tool_b", {})
assert result_a.content[0].text == "value"
assert result_b.content[0].text == "value"
assert call_count == 1
async def test_shared_async_function(self, mcp: FastMCP):
"""Shared dependency from an async function resolves and is reused."""
call_count = 0
async def get_session() -> str:
nonlocal call_count
call_count += 1
return "session-abc"
@mcp.tool()
async def tool_a(session: str = Shared(get_session)) -> str:
return session
@mcp.tool()
async def tool_b(session: str = Shared(get_session)) -> str:
return session
async with Client(mcp) as client:
result_a = await client.call_tool("tool_a", {})
result_b = await client.call_tool("tool_b", {})
assert result_a.content[0].text == "session-abc"
assert result_b.content[0].text == "session-abc"
assert call_count == 1
async def test_shared_async_context_manager(self, mcp: FastMCP):
"""Shared dependency from an async context manager stays open across calls."""
enter_count = 0
@asynccontextmanager
async def get_connection():
nonlocal enter_count
enter_count += 1
conn = Connection()
async with conn:
yield conn
@mcp.tool()
async def tool_a(conn: Connection = Shared(get_connection)) -> bool:
return conn.is_open
@mcp.tool()
async def tool_b(conn: Connection = Shared(get_connection)) -> bool:
return conn.is_open
async with Client(mcp) as client:
result_a = await client.call_tool("tool_a", {})
result_b = await client.call_tool("tool_b", {})
assert result_a.content[0].text == "true"
assert result_b.content[0].text == "true"
assert enter_count == 1
async def test_shared_with_depends(self, mcp: FastMCP):
"""Shared and Depends can coexist in the same tool."""
shared_calls = 0
depends_calls = 0
def get_config() -> str:
nonlocal shared_calls
shared_calls += 1
return "shared-config"
def get_request_id() -> str:
nonlocal depends_calls
depends_calls += 1
return "request-123"
@mcp.tool()
async def my_tool(
config: str = Shared(get_config),
request_id: str = Depends(get_request_id),
) -> str:
return f"{config}/{request_id}"
async with Client(mcp) as client:
result1 = await client.call_tool("my_tool", {})
result2 = await client.call_tool("my_tool", {})
assert result1.content[0].text == "shared-config/request-123"
assert result2.content[0].text == "shared-config/request-123"
assert shared_calls == 1
assert depends_calls == 2
async def test_shared_excluded_from_schema(self, mcp: FastMCP):
"""Shared dependencies are not exposed in the tool schema."""
def get_db() -> str:
return "db"
@mcp.tool()
async def my_tool(name: str, db: str = Shared(get_db)) -> str:
return name
result = await mcp._list_tools_mcp(mcp_types.ListToolsRequest())
tool = next(t for t in result.tools if t.name == "my_tool")
assert "name" in tool.inputSchema["properties"]
assert "db" not in tool.inputSchema["properties"]
async def test_shared_in_resource(self, mcp: FastMCP):
"""Shared dependencies work in resource functions."""
call_count = 0
def get_config() -> str:
nonlocal call_count
call_count += 1
return "resource-config"
@mcp.resource("test://config")
async def config_resource(config: str = Shared(get_config)) -> str:
return config
async with Client(mcp) as client:
result = await client.read_resource("test://config")
assert result[0].text == "resource-config"
result = await client.read_resource("test://config")
assert result[0].text == "resource-config"
assert call_count == 1
async def test_shared_in_prompt(self, mcp: FastMCP):
"""Shared dependencies work in prompt functions."""
call_count = 0
def get_system_prompt() -> str:
nonlocal call_count
call_count += 1
return "You are a helpful assistant."
@mcp.prompt()
async def my_prompt(topic: str, system: str = Shared(get_system_prompt)) -> str:
return f"{system} Talk about {topic}."
async with Client(mcp) as client:
result = await client.get_prompt("my_prompt", {"topic": "dogs"})
assert (
"You are a helpful assistant. Talk about dogs."
in result.messages[0].content.text
)
result = await client.get_prompt("my_prompt", {"topic": "cats"})
assert (
"You are a helpful assistant. Talk about cats."
in result.messages[0].content.text
)
assert call_count == 1
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/server/test_dependencies.py",
"license": "Apache License 2.0",
"lines": 811,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:tests/server/test_server_docket.py | """Tests for Docket integration in FastMCP."""
import asyncio
from contextlib import asynccontextmanager
from docket import Docket
from docket.worker import Worker
from fastmcp import FastMCP
from fastmcp.client import Client
from fastmcp.dependencies import CurrentDocket, CurrentWorker
from fastmcp.server.dependencies import get_context
HUZZAH = "huzzah!"
async def test_docket_not_initialized_without_task_components():
"""Docket is only initialized when task-enabled components exist."""
mcp = FastMCP("test-server")
@mcp.tool()
def regular_tool() -> str:
return "no docket needed"
async with Client(mcp) as client:
# Docket should not be initialized
assert mcp._docket is None
# Regular tools still work
result = await client.call_tool("regular_tool", {})
assert result.data == "no docket needed"
async def test_current_docket():
"""CurrentDocket dependency provides access to Docket instance."""
mcp = FastMCP("test-server")
# Need a task-enabled component to trigger Docket initialization
@mcp.tool(task=True)
async def _trigger_docket() -> str:
return "trigger"
@mcp.tool()
def check_docket(docket: Docket = CurrentDocket()) -> str:
assert isinstance(docket, Docket)
return HUZZAH
async with Client(mcp) as client:
result = await client.call_tool("check_docket", {})
assert HUZZAH in str(result)
async def test_current_worker():
"""CurrentWorker dependency provides access to Worker instance."""
mcp = FastMCP("test-server")
# Need a task-enabled component to trigger Docket initialization
@mcp.tool(task=True)
async def _trigger_docket() -> str:
return "trigger"
@mcp.tool()
def check_worker(
worker: Worker = CurrentWorker(),
docket: Docket = CurrentDocket(),
) -> str:
assert isinstance(worker, Worker)
assert worker.docket is docket
return HUZZAH
async with Client(mcp) as client:
result = await client.call_tool("check_worker", {})
assert HUZZAH in str(result)
async def test_worker_executes_background_tasks():
"""Verify that the Docket Worker is running and executes tasks."""
task_completed = asyncio.Event()
mcp = FastMCP("test-server")
# Need a task-enabled component to trigger Docket initialization
@mcp.tool(task=True)
async def _trigger_docket() -> str:
return "trigger"
@mcp.tool()
async def schedule_work(
task_name: str,
docket: Docket = CurrentDocket(),
) -> str:
"""Schedule a background task."""
async def background_task(name: str):
"""Simple background task that signals completion."""
task_completed.set()
# Schedule the task (Worker running in background will execute it)
await docket.add(background_task)(task_name)
return f"Scheduled {task_name}"
async with Client(mcp) as client:
result = await client.call_tool("schedule_work", {"task_name": "test-task"})
assert "Scheduled test-task" in str(result)
# Wait for background task to execute (max 2 seconds)
await asyncio.wait_for(task_completed.wait(), timeout=2.0)
async def test_current_docket_in_resource():
"""CurrentDocket works in resources."""
mcp = FastMCP("test-server")
# Need a task-enabled component to trigger Docket initialization
@mcp.tool(task=True)
async def _trigger_docket() -> str:
return "trigger"
@mcp.resource("docket://info")
def get_docket_info(docket: Docket = CurrentDocket()) -> str:
assert isinstance(docket, Docket)
return HUZZAH
async with Client(mcp) as client:
result = await client.read_resource("docket://info")
assert HUZZAH in str(result)
async def test_current_docket_in_prompt():
"""CurrentDocket works in prompts."""
mcp = FastMCP("test-server")
# Need a task-enabled component to trigger Docket initialization
@mcp.tool(task=True)
async def _trigger_docket() -> str:
return "trigger"
@mcp.prompt()
def task_prompt(task_type: str, docket: Docket = CurrentDocket()) -> str:
assert isinstance(docket, Docket)
return HUZZAH
async with Client(mcp) as client:
result = await client.get_prompt("task_prompt", {"task_type": "background"})
assert HUZZAH in str(result)
async def test_current_docket_in_resource_template():
"""CurrentDocket works in resource templates."""
mcp = FastMCP("test-server")
# Need a task-enabled component to trigger Docket initialization
@mcp.tool(task=True)
async def _trigger_docket() -> str:
return "trigger"
@mcp.resource("docket://tasks/{task_id}")
def get_task_status(task_id: str, docket: Docket = CurrentDocket()) -> str:
assert isinstance(docket, Docket)
return HUZZAH
async with Client(mcp) as client:
result = await client.read_resource("docket://tasks/123")
assert HUZZAH in str(result)
async def test_concurrent_calls_maintain_isolation():
"""Multiple concurrent calls each get the same Docket instance."""
mcp = FastMCP("test-server")
docket_ids = []
# Need a task-enabled component to trigger Docket initialization
@mcp.tool(task=True)
async def _trigger_docket() -> str:
return "trigger"
@mcp.tool()
def capture_docket_id(call_num: int, docket: Docket = CurrentDocket()) -> str:
docket_ids.append((call_num, id(docket)))
return HUZZAH
async with Client(mcp) as client:
results = await asyncio.gather(
client.call_tool("capture_docket_id", {"call_num": 1}),
client.call_tool("capture_docket_id", {"call_num": 2}),
client.call_tool("capture_docket_id", {"call_num": 3}),
)
for result in results:
assert HUZZAH in str(result)
# All calls should see the same Docket instance
assert len(docket_ids) == 3
first_id = docket_ids[0][1]
assert all(docket_id == first_id for _, docket_id in docket_ids)
async def test_user_lifespan_still_works_with_docket():
"""User-provided lifespan works correctly alongside Docket."""
lifespan_entered = False
@asynccontextmanager
async def custom_lifespan(server: FastMCP):
nonlocal lifespan_entered
lifespan_entered = True
yield {"custom_data": "test_value"}
mcp = FastMCP("test-server", lifespan=custom_lifespan)
# Need a task-enabled component to trigger Docket initialization
@mcp.tool(task=True)
async def _trigger_docket() -> str:
return "trigger"
@mcp.tool()
def check_both(docket: Docket = CurrentDocket()) -> str:
assert isinstance(docket, Docket)
ctx = get_context()
assert ctx.request_context is not None
lifespan_data = ctx.request_context.lifespan_context
assert lifespan_data.get("custom_data") == "test_value"
return HUZZAH
async with Client(mcp) as client:
assert lifespan_entered
result = await client.call_tool("check_both", {})
assert HUZZAH in str(result)
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/server/test_server_docket.py",
"license": "Apache License 2.0",
"lines": 170,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:examples/auth/mounted/client.py | """Mounted OAuth servers client example for FastMCP.
This example demonstrates connecting to multiple mounted OAuth-protected MCP servers.
To run:
python client.py
"""
import asyncio
from fastmcp.client import Client
GITHUB_URL = "http://127.0.0.1:8000/api/mcp/github/mcp"
GOOGLE_URL = "http://127.0.0.1:8000/api/mcp/google/mcp"
async def main():
# Connect to GitHub server
print("\n--- GitHub Server ---")
try:
async with Client(GITHUB_URL, auth="oauth") as client:
assert await client.ping()
print("β
Successfully authenticated!")
tools = await client.list_tools()
print(f"π§ Available tools ({len(tools)}):")
for tool in tools:
print(f" - {tool.name}: {tool.description}")
except Exception as e:
print(f"β Authentication failed: {e}")
raise
# Connect to Google server
print("\n--- Google Server ---")
try:
async with Client(GOOGLE_URL, auth="oauth") as client:
assert await client.ping()
print("β
Successfully authenticated!")
tools = await client.list_tools()
print(f"π§ Available tools ({len(tools)}):")
for tool in tools:
print(f" - {tool.name}: {tool.description}")
except Exception as e:
print(f"β Authentication failed: {e}")
raise
if __name__ == "__main__":
asyncio.run(main())
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "examples/auth/mounted/client.py",
"license": "Apache License 2.0",
"lines": 38,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/fastmcp:examples/auth/mounted/server.py | """Mounted OAuth servers example for FastMCP.
This example demonstrates mounting multiple OAuth-protected MCP servers in a single
application, each with its own provider. It showcases RFC 8414 path-aware discovery
where each server has its own authorization server metadata endpoint.
URL structure:
- GitHub MCP: http://localhost:8000/api/mcp/github/mcp
- Google MCP: http://localhost:8000/api/mcp/google/mcp
- GitHub discovery: http://localhost:8000/.well-known/oauth-authorization-server/api/mcp/github
- Google discovery: http://localhost:8000/.well-known/oauth-authorization-server/api/mcp/google
Required environment variables:
- FASTMCP_SERVER_AUTH_GITHUB_CLIENT_ID: Your GitHub OAuth app client ID
- FASTMCP_SERVER_AUTH_GITHUB_CLIENT_SECRET: Your GitHub OAuth app client secret
- FASTMCP_SERVER_AUTH_GOOGLE_CLIENT_ID: Your Google OAuth client ID
- FASTMCP_SERVER_AUTH_GOOGLE_CLIENT_SECRET: Your Google OAuth client secret
To run:
python server.py
"""
import os
import uvicorn
from starlette.applications import Starlette
from starlette.routing import Mount
from fastmcp import FastMCP
from fastmcp.server.auth.providers.github import GitHubProvider
from fastmcp.server.auth.providers.google import GoogleProvider
# Configuration
ROOT_URL = "http://localhost:8000"
API_PREFIX = "/api/mcp"
# --- GitHub OAuth Server ---
github_auth = GitHubProvider(
client_id=os.getenv("FASTMCP_SERVER_AUTH_GITHUB_CLIENT_ID") or "",
client_secret=os.getenv("FASTMCP_SERVER_AUTH_GITHUB_CLIENT_SECRET") or "",
base_url=f"{ROOT_URL}{API_PREFIX}/github",
redirect_path="/auth/callback/github",
)
github_mcp = FastMCP("GitHub Server", auth=github_auth)
@github_mcp.tool
def github_echo(message: str) -> str:
"""Echo from the GitHub-authenticated server."""
return f"[GitHub] {message}"
@github_mcp.tool
def github_info() -> str:
"""Get info about the GitHub server."""
return "This is the GitHub OAuth protected MCP server"
# --- Google OAuth Server ---
google_auth = GoogleProvider(
client_id=os.getenv("FASTMCP_SERVER_AUTH_GOOGLE_CLIENT_ID") or "",
client_secret=os.getenv("FASTMCP_SERVER_AUTH_GOOGLE_CLIENT_SECRET") or "",
base_url=f"{ROOT_URL}{API_PREFIX}/google",
redirect_path="/auth/callback/google",
)
google_mcp = FastMCP("Google Server", auth=google_auth)
@google_mcp.tool
def google_echo(message: str) -> str:
"""Echo from the Google-authenticated server."""
return f"[Google] {message}"
@google_mcp.tool
def google_info() -> str:
"""Get info about the Google server."""
return "This is the Google OAuth protected MCP server"
# --- Create ASGI apps ---
github_app = github_mcp.http_app(path="/mcp")
google_app = google_mcp.http_app(path="/mcp")
# Get well-known routes for each provider (path-aware per RFC 8414)
github_well_known = github_auth.get_well_known_routes(mcp_path="/mcp")
google_well_known = google_auth.get_well_known_routes(mcp_path="/mcp")
# --- Combine into single application ---
# Note: Each provider has its own path-aware discovery endpoint:
# - /.well-known/oauth-authorization-server/api/mcp/github
# - /.well-known/oauth-authorization-server/api/mcp/google
app = Starlette(
routes=[
# Well-known routes at root level (path-aware)
*github_well_known,
*google_well_known,
# MCP servers under /api/mcp prefix
Mount(f"{API_PREFIX}/github", app=github_app),
Mount(f"{API_PREFIX}/google", app=google_app),
],
# Use one of the app lifespans (they're functionally equivalent)
lifespan=github_app.lifespan,
)
if __name__ == "__main__":
print("Starting mounted OAuth servers...")
print(f" GitHub MCP: {ROOT_URL}{API_PREFIX}/github/mcp")
print(f" Google MCP: {ROOT_URL}{API_PREFIX}/google/mcp")
print()
print("Discovery endpoints (RFC 8414 path-aware):")
print(
f" GitHub: {ROOT_URL}/.well-known/oauth-authorization-server{API_PREFIX}/github"
)
print(
f" Google: {ROOT_URL}/.well-known/oauth-authorization-server{API_PREFIX}/google"
)
print()
uvicorn.run(app, host="0.0.0.0", port=8000)
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "examples/auth/mounted/server.py",
"license": "Apache License 2.0",
"lines": 95,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
PrefectHQ/fastmcp:tests/deprecated/test_openapi_deprecations.py | """Tests for OpenAPI-related deprecations in 2.14."""
import importlib
import warnings
import pytest
pytestmark = pytest.mark.filterwarnings("default::DeprecationWarning")
class TestExperimentalOpenAPIImportDeprecation:
"""Test experimental OpenAPI import path deprecations."""
def test_experimental_server_openapi_import_warns(self):
"""Importing from fastmcp.experimental.server.openapi should warn."""
import fastmcp.experimental.server.openapi
with pytest.warns(
DeprecationWarning,
match=r"Importing from fastmcp\.experimental\.server\.openapi is deprecated",
):
importlib.reload(fastmcp.experimental.server.openapi)
def test_experimental_utilities_openapi_import_warns(self):
"""Importing from fastmcp.experimental.utilities.openapi should warn."""
import fastmcp.experimental.utilities.openapi
with pytest.warns(
DeprecationWarning,
match=r"Importing from fastmcp\.experimental\.utilities\.openapi is deprecated",
):
importlib.reload(fastmcp.experimental.utilities.openapi)
def test_experimental_imports_resolve_to_same_classes(self):
"""Experimental imports should resolve to the same classes as main imports."""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
from fastmcp.experimental.server.openapi import (
FastMCPOpenAPI as ExpFastMCPOpenAPI,
)
from fastmcp.experimental.server.openapi import MCPType as ExpMCPType
from fastmcp.experimental.server.openapi import RouteMap as ExpRouteMap
from fastmcp.experimental.utilities.openapi import (
HTTPRoute as ExpHTTPRoute,
)
from fastmcp.server.openapi import FastMCPOpenAPI, MCPType, RouteMap
from fastmcp.utilities.openapi import HTTPRoute
assert FastMCPOpenAPI is ExpFastMCPOpenAPI
assert RouteMap is ExpRouteMap
assert MCPType is ExpMCPType
assert HTTPRoute is ExpHTTPRoute
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/deprecated/test_openapi_deprecations.py",
"license": "Apache License 2.0",
"lines": 41,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:tests/utilities/openapi/test_legacy_compatibility.py | """Tests to ensure OpenAPI schema generation works correctly."""
import pytest
from fastmcp.utilities.openapi.models import (
HTTPRoute,
ParameterInfo,
RequestBodyInfo,
)
from fastmcp.utilities.openapi.schemas import (
_combine_schemas_and_map_params,
)
class TestSchemaGeneration:
"""Test that OpenAPI schema generation produces correct schemas."""
def test_optional_parameter_nullable_behavior(self):
"""Test that optional parameters are not made nullable - they can simply be omitted."""
route = HTTPRoute(
method="GET",
path="/test",
operation_id="test_op",
parameters=[
ParameterInfo(
name="required_param",
location="query",
required=True,
schema={"type": "string"},
),
ParameterInfo(
name="optional_param",
location="query",
required=False,
schema={"type": "string"},
),
],
)
schema, _ = _combine_schemas_and_map_params(route)
# Required parameter should have simple type
assert schema["properties"]["required_param"]["type"] == "string"
assert "anyOf" not in schema["properties"]["required_param"]
# Optional parameters should preserve original schema without making it nullable
assert "anyOf" not in schema["properties"]["optional_param"]
assert schema["properties"]["optional_param"]["type"] == "string"
# Required list should only include required parameters
assert "required_param" in schema["required"]
assert "optional_param" not in schema["required"]
def test_parameter_collision_handling(self):
"""Test that parameter collisions are handled with suffixes."""
route = HTTPRoute(
method="PUT",
path="/users/{id}",
operation_id="update_user",
parameters=[
ParameterInfo(
name="id",
location="path",
required=True,
schema={"type": "integer"},
)
],
request_body=RequestBodyInfo(
required=True,
content_schema={
"application/json": {
"type": "object",
"properties": {
"id": {"type": "integer"},
"name": {"type": "string"},
},
"required": ["name"],
}
},
),
)
schema, param_map = _combine_schemas_and_map_params(route)
# Should have path parameter with suffix
assert "id__path" in schema["properties"]
# Should have body parameter without suffix
assert "id" in schema["properties"]
# Should have name parameter from body
assert "name" in schema["properties"]
# Required should include path param (suffixed) and required body params
required = set(schema["required"])
assert "id__path" in required
assert "name" in required
# Parameter map should correctly map suffixed parameter
assert param_map["id__path"]["location"] == "path"
assert param_map["id__path"]["openapi_name"] == "id"
assert param_map["id"]["location"] == "body"
assert param_map["name"]["location"] == "body"
@pytest.mark.parametrize(
"param_type",
[
{"type": "integer"},
{"type": "number"},
{"type": "boolean"},
{"type": "array", "items": {"type": "string"}},
{"type": "object", "properties": {"name": {"type": "string"}}},
],
)
def test_nullable_behavior_different_types(self, param_type):
"""Test nullable behavior works for all parameter types."""
route = HTTPRoute(
method="GET",
path="/test",
operation_id="test_op",
parameters=[
ParameterInfo(
name="optional_param",
location="query",
required=False,
schema=param_type,
)
],
)
schema, _ = _combine_schemas_and_map_params(route)
# Should preserve original schema without making it nullable
param = schema["properties"]["optional_param"]
assert "anyOf" not in param
# Should match the original parameter schema
for key, value in param_type.items():
assert param[key] == value
def test_no_parameters_no_body(self):
"""Test schema generation when there are no parameters or body."""
route = HTTPRoute(
method="GET",
path="/health",
operation_id="health_check",
)
schema, param_map = _combine_schemas_and_map_params(route)
# Should have empty object schema
assert schema["type"] == "object"
assert schema["properties"] == {}
assert schema["required"] == []
assert param_map == {}
def test_body_only_no_parameters(self):
"""Test schema generation with only request body, no parameters."""
body_schema = {
"application/json": {
"type": "object",
"properties": {
"title": {"type": "string"},
"description": {"type": "string"},
},
"required": ["title"],
}
}
route = HTTPRoute(
method="POST",
path="/items",
operation_id="create_item",
request_body=RequestBodyInfo(
required=True,
content_schema=body_schema,
),
)
schema, param_map = _combine_schemas_and_map_params(route)
# Should have body properties
assert "title" in schema["properties"]
assert "description" in schema["properties"]
# Required should match body requirements
assert "title" in schema["required"]
assert "description" not in schema["required"]
# Parameter map should map body properties
assert param_map["title"]["location"] == "body"
assert param_map["description"]["location"] == "body"
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "tests/utilities/openapi/test_legacy_compatibility.py",
"license": "Apache License 2.0",
"lines": 163,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/fastmcp:examples/auth/discord_oauth/client.py | """Discord OAuth client example for connecting to FastMCP servers.
This example demonstrates how to connect to a Discord OAuth-protected FastMCP server.
To run:
python client.py
"""
import asyncio
from fastmcp.client import Client
SERVER_URL = "http://127.0.0.1:8000/mcp"
async def main():
try:
async with Client(SERVER_URL, auth="oauth") as client:
assert await client.ping()
print("β
Successfully authenticated!")
tools = await client.list_tools()
print(f"π§ Available tools ({len(tools)}):")
for tool in tools:
print(f" - {tool.name}: {tool.description}")
except Exception as e:
print(f"β Authentication failed: {e}")
raise
if __name__ == "__main__":
asyncio.run(main())
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "examples/auth/discord_oauth/client.py",
"license": "Apache License 2.0",
"lines": 22,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
PrefectHQ/fastmcp:examples/auth/discord_oauth/server.py | """Discord OAuth server example for FastMCP.
This example demonstrates how to protect a FastMCP server with Discord OAuth.
Required environment variables:
- FASTMCP_SERVER_AUTH_DISCORD_CLIENT_ID: Your Discord OAuth app client ID
- FASTMCP_SERVER_AUTH_DISCORD_CLIENT_SECRET: Your Discord OAuth app client secret
To run:
python server.py
"""
import os
from fastmcp import FastMCP
from fastmcp.server.auth.providers.discord import DiscordProvider
auth = DiscordProvider(
client_id=os.getenv("FASTMCP_SERVER_AUTH_DISCORD_CLIENT_ID") or "",
client_secret=os.getenv("FASTMCP_SERVER_AUTH_DISCORD_CLIENT_SECRET") or "",
base_url="http://localhost:8000",
# redirect_path="/auth/callback", # Default path - change if using a different callback URL
)
mcp = FastMCP("Discord OAuth Example Server", auth=auth)
@mcp.tool
def echo(message: str) -> str:
"""Echo the provided message."""
return message
if __name__ == "__main__":
mcp.run(transport="http", port=8000)
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "examples/auth/discord_oauth/server.py",
"license": "Apache License 2.0",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
PrefectHQ/fastmcp:src/fastmcp/server/auth/providers/discord.py | """Discord OAuth provider for FastMCP.
This module provides a complete Discord OAuth integration that's ready to use
with just a client ID and client secret. It handles all the complexity of
Discord's OAuth flow, token validation, and user management.
Example:
```python
from fastmcp import FastMCP
from fastmcp.server.auth.providers.discord import DiscordProvider
# Simple Discord OAuth protection
auth = DiscordProvider(
client_id="your-discord-client-id",
client_secret="your-discord-client-secret"
)
mcp = FastMCP("My Protected Server", auth=auth)
```
"""
from __future__ import annotations
import contextlib
import time
from datetime import datetime
import httpx
from key_value.aio.protocols import AsyncKeyValue
from pydantic import AnyHttpUrl
from fastmcp.server.auth import TokenVerifier
from fastmcp.server.auth.auth import AccessToken
from fastmcp.server.auth.oauth_proxy import OAuthProxy
from fastmcp.utilities.auth import parse_scopes
from fastmcp.utilities.logging import get_logger
logger = get_logger(__name__)
class DiscordTokenVerifier(TokenVerifier):
"""Token verifier for Discord OAuth tokens.
Discord OAuth tokens are opaque (not JWTs), so we verify them
by calling Discord's tokeninfo API to check if they're valid and get user info.
"""
def __init__(
self,
*,
expected_client_id: str,
required_scopes: list[str] | None = None,
timeout_seconds: int = 10,
http_client: httpx.AsyncClient | None = None,
):
"""Initialize the Discord token verifier.
Args:
expected_client_id: Expected Discord OAuth client ID for audience binding
required_scopes: Required OAuth scopes (e.g., ['email'])
timeout_seconds: HTTP request timeout
http_client: Optional httpx.AsyncClient for connection pooling. When provided,
the client is reused across calls and the caller is responsible for its
lifecycle. When None (default), a fresh client is created per call.
"""
super().__init__(required_scopes=required_scopes)
self.expected_client_id = expected_client_id
self.timeout_seconds = timeout_seconds
self._http_client = http_client
async def verify_token(self, token: str) -> AccessToken | None:
"""Verify Discord OAuth token by calling Discord's tokeninfo API."""
try:
async with (
contextlib.nullcontext(self._http_client)
if self._http_client is not None
else httpx.AsyncClient(timeout=self.timeout_seconds)
) as client:
# Use Discord's tokeninfo endpoint to validate the token
headers = {
"Authorization": f"Bearer {token}",
"User-Agent": "FastMCP-Discord-OAuth",
}
response = await client.get(
"https://discord.com/api/oauth2/@me",
headers=headers,
)
if response.status_code != 200:
logger.debug(
"Discord token verification failed: %d",
response.status_code,
)
return None
token_info = response.json()
# Check if token is expired (Discord returns ISO timestamp)
expires_str = token_info.get("expires")
expires_at = None
if expires_str:
expires_dt = datetime.fromisoformat(
expires_str.replace("Z", "+00:00")
)
expires_at = int(expires_dt.timestamp())
if expires_at <= int(time.time()):
logger.debug("Discord token has expired")
return None
token_scopes = token_info.get("scopes", [])
# Check required scopes
if self.required_scopes:
token_scopes_set = set(token_scopes)
required_scopes_set = set(self.required_scopes)
if not required_scopes_set.issubset(token_scopes_set):
logger.debug(
"Discord token missing required scopes. Has %d, needs %d",
len(token_scopes_set),
len(required_scopes_set),
)
return None
user_data = token_info.get("user", {})
application = token_info.get("application") or {}
client_id = str(application.get("id", "unknown"))
if client_id != self.expected_client_id:
logger.debug(
"Discord token app ID mismatch: expected %s, got %s",
self.expected_client_id,
client_id,
)
return None
# Create AccessToken with Discord user info
access_token = AccessToken(
token=token,
client_id=client_id,
scopes=token_scopes,
expires_at=expires_at,
claims={
"sub": user_data.get("id"),
"username": user_data.get("username"),
"discriminator": user_data.get("discriminator"),
"avatar": user_data.get("avatar"),
"email": user_data.get("email"),
"verified": user_data.get("verified"),
"locale": user_data.get("locale"),
"discord_user": user_data,
"discord_token_info": token_info,
},
)
logger.debug("Discord token verified successfully")
return access_token
except httpx.RequestError as e:
logger.debug("Failed to verify Discord token: %s", e)
return None
except Exception as e:
logger.debug("Discord token verification error: %s", e)
return None
class DiscordProvider(OAuthProxy):
"""Complete Discord OAuth provider for FastMCP.
This provider makes it trivial to add Discord OAuth protection to any
FastMCP server. Just provide your Discord OAuth app credentials and
a base URL, and you're ready to go.
Features:
- Transparent OAuth proxy to Discord
- Automatic token validation via Discord's API
- User information extraction from Discord APIs
- Minimal configuration required
Example:
```python
from fastmcp import FastMCP
from fastmcp.server.auth.providers.discord import DiscordProvider
auth = DiscordProvider(
client_id="123456789",
client_secret="discord-client-secret-abc123...",
base_url="https://my-server.com"
)
mcp = FastMCP("My App", auth=auth)
```
"""
def __init__(
self,
*,
client_id: str,
client_secret: str,
base_url: AnyHttpUrl | str,
issuer_url: AnyHttpUrl | str | None = None,
redirect_path: str | None = None,
required_scopes: list[str] | None = None,
timeout_seconds: int = 10,
allowed_client_redirect_uris: list[str] | None = None,
client_storage: AsyncKeyValue | None = None,
jwt_signing_key: str | bytes | None = None,
require_authorization_consent: bool = True,
consent_csp_policy: str | None = None,
http_client: httpx.AsyncClient | None = None,
):
"""Initialize Discord OAuth provider.
Args:
client_id: Discord OAuth client ID (e.g., "123456789")
client_secret: Discord OAuth client secret (e.g., "S....")
base_url: Public URL where OAuth endpoints will be accessible (includes any mount path)
issuer_url: Issuer URL for OAuth metadata (defaults to base_url). Use root-level URL
to avoid 404s during discovery when mounting under a path.
redirect_path: Redirect path configured in Discord OAuth app (defaults to "/auth/callback")
required_scopes: Required Discord scopes (defaults to ["identify"]). Common scopes include:
- "identify" for profile info (default)
- "email" for email access
- "guilds" for server membership info
timeout_seconds: HTTP request timeout for Discord API calls (defaults to 10)
allowed_client_redirect_uris: List of allowed redirect URI patterns for MCP clients.
If None (default), all URIs are allowed. If empty list, no URIs are allowed.
client_storage: Storage backend for OAuth state (client registrations, encrypted tokens).
If None, an encrypted file store will be created in the data directory
(derived from `platformdirs`).
jwt_signing_key: Secret for signing FastMCP JWT tokens (any string or bytes). If bytes are provided,
they will be used as is. If a string is provided, it will be derived into a 32-byte key. If not
provided, the upstream client secret will be used to derive a 32-byte key using PBKDF2.
require_authorization_consent: Whether to require user consent before authorizing clients (default True).
When True, users see a consent screen before being redirected to Discord.
When False, authorization proceeds directly without user confirmation.
SECURITY WARNING: Only disable for local development or testing environments.
http_client: Optional httpx.AsyncClient for connection pooling in token verification.
When provided, the client is reused across verify_token calls and the caller
is responsible for its lifecycle. When None (default), a fresh client is created per call.
"""
# Parse scopes if provided as string
required_scopes_final = (
parse_scopes(required_scopes)
if required_scopes is not None
else ["identify"]
)
# Create Discord token verifier
token_verifier = DiscordTokenVerifier(
expected_client_id=client_id,
required_scopes=required_scopes_final,
timeout_seconds=timeout_seconds,
http_client=http_client,
)
# Initialize OAuth proxy with Discord endpoints
super().__init__(
upstream_authorization_endpoint="https://discord.com/oauth2/authorize",
upstream_token_endpoint="https://discord.com/api/oauth2/token",
upstream_client_id=client_id,
upstream_client_secret=client_secret,
token_verifier=token_verifier,
base_url=base_url,
redirect_path=redirect_path,
issuer_url=issuer_url or base_url, # Default to base_url if not specified
allowed_client_redirect_uris=allowed_client_redirect_uris,
client_storage=client_storage,
jwt_signing_key=jwt_signing_key,
require_authorization_consent=require_authorization_consent,
consent_csp_policy=consent_csp_policy,
)
logger.debug(
"Initialized Discord OAuth provider for client %s with scopes: %s",
client_id,
required_scopes_final,
)
| {
"repo_id": "PrefectHQ/fastmcp",
"file_path": "src/fastmcp/server/auth/providers/discord.py",
"license": "Apache License 2.0",
"lines": 240,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.