sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
modelcontextprotocol/python-sdk:tests/experimental/tasks/test_message_queue.py | """Tests for TaskMessageQueue and InMemoryTaskMessageQueue."""
from collections import deque
from datetime import datetime, timezone
import anyio
import pytest
from mcp.shared.experimental.tasks.message_queue import InMemoryTaskMessageQueue, QueuedMessage
from mcp.shared.experimental.tasks.resolver import Resolver
from mcp.types import JSONRPCNotification, JSONRPCRequest
@pytest.fixture
def queue() -> InMemoryTaskMessageQueue:
return InMemoryTaskMessageQueue()
def make_request(id: int = 1, method: str = "test/method") -> JSONRPCRequest:
return JSONRPCRequest(jsonrpc="2.0", id=id, method=method)
def make_notification(method: str = "test/notify") -> JSONRPCNotification:
return JSONRPCNotification(jsonrpc="2.0", method=method)
class TestInMemoryTaskMessageQueue:
@pytest.mark.anyio
async def test_enqueue_and_dequeue(self, queue: InMemoryTaskMessageQueue) -> None:
"""Test basic enqueue and dequeue operations."""
task_id = "task-1"
msg = QueuedMessage(type="request", message=make_request())
await queue.enqueue(task_id, msg)
result = await queue.dequeue(task_id)
assert result is not None
assert result.type == "request"
assert result.message.method == "test/method"
@pytest.mark.anyio
async def test_dequeue_empty_returns_none(self, queue: InMemoryTaskMessageQueue) -> None:
"""Dequeue from empty queue returns None."""
result = await queue.dequeue("nonexistent-task")
assert result is None
@pytest.mark.anyio
async def test_fifo_ordering(self, queue: InMemoryTaskMessageQueue) -> None:
"""Messages are dequeued in FIFO order."""
task_id = "task-1"
await queue.enqueue(task_id, QueuedMessage(type="request", message=make_request(1, "first")))
await queue.enqueue(task_id, QueuedMessage(type="request", message=make_request(2, "second")))
await queue.enqueue(task_id, QueuedMessage(type="request", message=make_request(3, "third")))
msg1 = await queue.dequeue(task_id)
msg2 = await queue.dequeue(task_id)
msg3 = await queue.dequeue(task_id)
assert msg1 is not None and msg1.message.method == "first"
assert msg2 is not None and msg2.message.method == "second"
assert msg3 is not None and msg3.message.method == "third"
@pytest.mark.anyio
async def test_separate_queues_per_task(self, queue: InMemoryTaskMessageQueue) -> None:
"""Each task has its own queue."""
await queue.enqueue("task-1", QueuedMessage(type="request", message=make_request(1, "task1-msg")))
await queue.enqueue("task-2", QueuedMessage(type="request", message=make_request(2, "task2-msg")))
msg1 = await queue.dequeue("task-1")
msg2 = await queue.dequeue("task-2")
assert msg1 is not None and msg1.message.method == "task1-msg"
assert msg2 is not None and msg2.message.method == "task2-msg"
@pytest.mark.anyio
async def test_peek_does_not_remove(self, queue: InMemoryTaskMessageQueue) -> None:
"""Peek returns message without removing it."""
task_id = "task-1"
await queue.enqueue(task_id, QueuedMessage(type="request", message=make_request()))
peeked = await queue.peek(task_id)
dequeued = await queue.dequeue(task_id)
assert peeked is not None
assert dequeued is not None
assert isinstance(peeked.message, JSONRPCRequest)
assert isinstance(dequeued.message, JSONRPCRequest)
assert peeked.message.id == dequeued.message.id
@pytest.mark.anyio
async def test_is_empty(self, queue: InMemoryTaskMessageQueue) -> None:
"""Test is_empty method."""
task_id = "task-1"
assert await queue.is_empty(task_id) is True
await queue.enqueue(task_id, QueuedMessage(type="notification", message=make_notification()))
assert await queue.is_empty(task_id) is False
await queue.dequeue(task_id)
assert await queue.is_empty(task_id) is True
@pytest.mark.anyio
async def test_clear_returns_all_messages(self, queue: InMemoryTaskMessageQueue) -> None:
"""Clear removes and returns all messages."""
task_id = "task-1"
await queue.enqueue(task_id, QueuedMessage(type="request", message=make_request(1)))
await queue.enqueue(task_id, QueuedMessage(type="request", message=make_request(2)))
await queue.enqueue(task_id, QueuedMessage(type="request", message=make_request(3)))
messages = await queue.clear(task_id)
assert len(messages) == 3
assert await queue.is_empty(task_id) is True
@pytest.mark.anyio
async def test_clear_empty_queue(self, queue: InMemoryTaskMessageQueue) -> None:
"""Clear on empty queue returns empty list."""
messages = await queue.clear("nonexistent")
assert messages == []
@pytest.mark.anyio
async def test_notification_messages(self, queue: InMemoryTaskMessageQueue) -> None:
"""Test queuing notification messages."""
task_id = "task-1"
msg = QueuedMessage(type="notification", message=make_notification("log/message"))
await queue.enqueue(task_id, msg)
result = await queue.dequeue(task_id)
assert result is not None
assert result.type == "notification"
assert result.message.method == "log/message"
@pytest.mark.anyio
async def test_message_timestamp(self, queue: InMemoryTaskMessageQueue) -> None:
"""Messages have timestamps."""
before = datetime.now(timezone.utc)
msg = QueuedMessage(type="request", message=make_request())
after = datetime.now(timezone.utc)
assert before <= msg.timestamp <= after
@pytest.mark.anyio
async def test_message_with_resolver(self, queue: InMemoryTaskMessageQueue) -> None:
"""Messages can have resolvers."""
task_id = "task-1"
resolver: Resolver[dict[str, str]] = Resolver()
msg = QueuedMessage(
type="request",
message=make_request(),
resolver=resolver,
original_request_id=42,
)
await queue.enqueue(task_id, msg)
result = await queue.dequeue(task_id)
assert result is not None
assert result.resolver is resolver
assert result.original_request_id == 42
@pytest.mark.anyio
async def test_cleanup_specific_task(self, queue: InMemoryTaskMessageQueue) -> None:
"""Cleanup removes specific task's data."""
await queue.enqueue("task-1", QueuedMessage(type="request", message=make_request(1)))
await queue.enqueue("task-2", QueuedMessage(type="request", message=make_request(2)))
queue.cleanup("task-1")
assert await queue.is_empty("task-1") is True
assert await queue.is_empty("task-2") is False
@pytest.mark.anyio
async def test_cleanup_all(self, queue: InMemoryTaskMessageQueue) -> None:
"""Cleanup without task_id removes all data."""
await queue.enqueue("task-1", QueuedMessage(type="request", message=make_request(1)))
await queue.enqueue("task-2", QueuedMessage(type="request", message=make_request(2)))
queue.cleanup()
assert await queue.is_empty("task-1") is True
assert await queue.is_empty("task-2") is True
@pytest.mark.anyio
async def test_wait_for_message_returns_immediately_if_message_exists(
self, queue: InMemoryTaskMessageQueue
) -> None:
"""wait_for_message returns immediately if queue not empty."""
task_id = "task-1"
await queue.enqueue(task_id, QueuedMessage(type="request", message=make_request()))
# Should return immediately, not block
with anyio.fail_after(1):
await queue.wait_for_message(task_id)
@pytest.mark.anyio
async def test_wait_for_message_blocks_until_message(self, queue: InMemoryTaskMessageQueue) -> None:
"""wait_for_message blocks until a message is enqueued."""
task_id = "task-1"
received = False
waiter_started = anyio.Event()
async def enqueue_when_ready() -> None:
# Wait until the waiter has started before enqueueing
await waiter_started.wait()
await queue.enqueue(task_id, QueuedMessage(type="request", message=make_request()))
async def wait_for_msg() -> None:
nonlocal received
# Signal that we're about to start waiting
waiter_started.set()
await queue.wait_for_message(task_id)
received = True
async with anyio.create_task_group() as tg:
tg.start_soon(wait_for_msg)
tg.start_soon(enqueue_when_ready)
assert received is True
@pytest.mark.anyio
async def test_notify_message_available_wakes_waiter(self, queue: InMemoryTaskMessageQueue) -> None:
"""notify_message_available wakes up waiting coroutines."""
task_id = "task-1"
notified = False
waiter_started = anyio.Event()
async def notify_when_ready() -> None:
# Wait until the waiter has started before notifying
await waiter_started.wait()
await queue.notify_message_available(task_id)
async def wait_for_notification() -> None:
nonlocal notified
# Signal that we're about to start waiting
waiter_started.set()
await queue.wait_for_message(task_id)
notified = True
async with anyio.create_task_group() as tg:
tg.start_soon(wait_for_notification)
tg.start_soon(notify_when_ready)
assert notified is True
@pytest.mark.anyio
async def test_peek_empty_queue_returns_none(self, queue: InMemoryTaskMessageQueue) -> None:
"""Peek on empty queue returns None."""
result = await queue.peek("nonexistent-task")
assert result is None
@pytest.mark.anyio
async def test_wait_for_message_double_check_race_condition(self, queue: InMemoryTaskMessageQueue) -> None:
"""wait_for_message returns early if message arrives after event creation but before wait."""
task_id = "task-1"
# To test the double-check path (lines 223-225), we need a message to arrive
# after the event is created (line 220) but before event.wait() (line 228).
# We simulate this by injecting a message before is_empty is called the second time.
original_is_empty = queue.is_empty
call_count = 0
async def is_empty_with_injection(tid: str) -> bool:
nonlocal call_count
call_count += 1
if call_count == 2 and tid == task_id:
# Before second check, inject a message - this simulates a message
# arriving between event creation and the double-check
queue._queues[task_id] = deque([QueuedMessage(type="request", message=make_request())])
return await original_is_empty(tid)
queue.is_empty = is_empty_with_injection # type: ignore[method-assign]
# Should return immediately due to double-check finding the message
with anyio.fail_after(1):
await queue.wait_for_message(task_id)
class TestResolver:
@pytest.mark.anyio
async def test_set_result_and_wait(self) -> None:
"""Test basic set_result and wait flow."""
resolver: Resolver[str] = Resolver()
resolver.set_result("hello")
result = await resolver.wait()
assert result == "hello"
assert resolver.done()
@pytest.mark.anyio
async def test_set_exception_and_wait(self) -> None:
"""Test set_exception raises on wait."""
resolver: Resolver[str] = Resolver()
resolver.set_exception(ValueError("test error"))
with pytest.raises(ValueError, match="test error"):
await resolver.wait()
assert resolver.done()
@pytest.mark.anyio
async def test_set_result_when_already_completed_raises(self) -> None:
"""Test that set_result raises if resolver already completed."""
resolver: Resolver[str] = Resolver()
resolver.set_result("first")
with pytest.raises(RuntimeError, match="already completed"):
resolver.set_result("second")
@pytest.mark.anyio
async def test_set_exception_when_already_completed_raises(self) -> None:
"""Test that set_exception raises if resolver already completed."""
resolver: Resolver[str] = Resolver()
resolver.set_result("done")
with pytest.raises(RuntimeError, match="already completed"):
resolver.set_exception(ValueError("too late"))
@pytest.mark.anyio
async def test_done_returns_false_before_completion(self) -> None:
"""Test done() returns False before any result is set."""
resolver: Resolver[str] = Resolver()
assert resolver.done() is False
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "tests/experimental/tasks/test_message_queue.py",
"license": "MIT License",
"lines": 250,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
modelcontextprotocol/python-sdk:tests/experimental/tasks/test_request_context.py | """Tests for the RequestContext.experimental (Experimental class) task validation helpers."""
import pytest
from mcp.server.experimental.request_context import Experimental
from mcp.shared.exceptions import MCPError
from mcp.types import (
METHOD_NOT_FOUND,
TASK_FORBIDDEN,
TASK_OPTIONAL,
TASK_REQUIRED,
ClientCapabilities,
ClientTasksCapability,
TaskMetadata,
Tool,
ToolExecution,
)
def test_is_task_true_when_metadata_present() -> None:
exp = Experimental(task_metadata=TaskMetadata(ttl=60000))
assert exp.is_task is True
def test_is_task_false_when_no_metadata() -> None:
exp = Experimental(task_metadata=None)
assert exp.is_task is False
def test_client_supports_tasks_true() -> None:
exp = Experimental(_client_capabilities=ClientCapabilities(tasks=ClientTasksCapability()))
assert exp.client_supports_tasks is True
def test_client_supports_tasks_false_no_tasks() -> None:
exp = Experimental(_client_capabilities=ClientCapabilities())
assert exp.client_supports_tasks is False
def test_client_supports_tasks_false_no_capabilities() -> None:
exp = Experimental(_client_capabilities=None)
assert exp.client_supports_tasks is False
def test_validate_task_mode_required_with_task_is_valid() -> None:
exp = Experimental(task_metadata=TaskMetadata(ttl=60000))
error = exp.validate_task_mode(TASK_REQUIRED, raise_error=False)
assert error is None
def test_validate_task_mode_required_without_task_returns_error() -> None:
exp = Experimental(task_metadata=None)
error = exp.validate_task_mode(TASK_REQUIRED, raise_error=False)
assert error is not None
assert error.code == METHOD_NOT_FOUND
assert "requires task-augmented" in error.message
def test_validate_task_mode_required_without_task_raises_by_default() -> None:
exp = Experimental(task_metadata=None)
with pytest.raises(MCPError) as exc_info:
exp.validate_task_mode(TASK_REQUIRED)
assert exc_info.value.error.code == METHOD_NOT_FOUND
def test_validate_task_mode_forbidden_without_task_is_valid() -> None:
exp = Experimental(task_metadata=None)
error = exp.validate_task_mode(TASK_FORBIDDEN, raise_error=False)
assert error is None
def test_validate_task_mode_forbidden_with_task_returns_error() -> None:
exp = Experimental(task_metadata=TaskMetadata(ttl=60000))
error = exp.validate_task_mode(TASK_FORBIDDEN, raise_error=False)
assert error is not None
assert error.code == METHOD_NOT_FOUND
assert "does not support task-augmented" in error.message
def test_validate_task_mode_forbidden_with_task_raises_by_default() -> None:
exp = Experimental(task_metadata=TaskMetadata(ttl=60000))
with pytest.raises(MCPError) as exc_info:
exp.validate_task_mode(TASK_FORBIDDEN)
assert exc_info.value.error.code == METHOD_NOT_FOUND
def test_validate_task_mode_none_treated_as_forbidden() -> None:
exp = Experimental(task_metadata=TaskMetadata(ttl=60000))
error = exp.validate_task_mode(None, raise_error=False)
assert error is not None
assert "does not support task-augmented" in error.message
def test_validate_task_mode_optional_with_task_is_valid() -> None:
exp = Experimental(task_metadata=TaskMetadata(ttl=60000))
error = exp.validate_task_mode(TASK_OPTIONAL, raise_error=False)
assert error is None
def test_validate_task_mode_optional_without_task_is_valid() -> None:
exp = Experimental(task_metadata=None)
error = exp.validate_task_mode(TASK_OPTIONAL, raise_error=False)
assert error is None
def test_validate_for_tool_with_execution_required() -> None:
exp = Experimental(task_metadata=None)
tool = Tool(
name="test",
description="test",
input_schema={"type": "object"},
execution=ToolExecution(task_support=TASK_REQUIRED),
)
error = exp.validate_for_tool(tool, raise_error=False)
assert error is not None
assert "requires task-augmented" in error.message
def test_validate_for_tool_without_execution() -> None:
exp = Experimental(task_metadata=TaskMetadata(ttl=60000))
tool = Tool(
name="test",
description="test",
input_schema={"type": "object"},
execution=None,
)
error = exp.validate_for_tool(tool, raise_error=False)
assert error is not None
assert "does not support task-augmented" in error.message
def test_validate_for_tool_optional_with_task() -> None:
exp = Experimental(task_metadata=TaskMetadata(ttl=60000))
tool = Tool(
name="test",
description="test",
input_schema={"type": "object"},
execution=ToolExecution(task_support=TASK_OPTIONAL),
)
error = exp.validate_for_tool(tool, raise_error=False)
assert error is None
def test_can_use_tool_required_with_task_support() -> None:
exp = Experimental(_client_capabilities=ClientCapabilities(tasks=ClientTasksCapability()))
assert exp.can_use_tool(TASK_REQUIRED) is True
def test_can_use_tool_required_without_task_support() -> None:
exp = Experimental(_client_capabilities=ClientCapabilities())
assert exp.can_use_tool(TASK_REQUIRED) is False
def test_can_use_tool_optional_without_task_support() -> None:
exp = Experimental(_client_capabilities=ClientCapabilities())
assert exp.can_use_tool(TASK_OPTIONAL) is True
def test_can_use_tool_forbidden_without_task_support() -> None:
exp = Experimental(_client_capabilities=ClientCapabilities())
assert exp.can_use_tool(TASK_FORBIDDEN) is True
def test_can_use_tool_none_without_task_support() -> None:
exp = Experimental(_client_capabilities=ClientCapabilities())
assert exp.can_use_tool(None) is True
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "tests/experimental/tasks/test_request_context.py",
"license": "MIT License",
"lines": 120,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
modelcontextprotocol/python-sdk:tests/experimental/tasks/test_spec_compliance.py | """Tasks Spec Compliance Tests
===========================
Test structure mirrors: https://modelcontextprotocol.io/specification/draft/basic/utilities/tasks.md
Each section contains tests for normative requirements (MUST/SHOULD/MAY).
"""
from datetime import datetime, timezone
import pytest
from mcp.server import Server, ServerRequestContext
from mcp.server.lowlevel import NotificationOptions
from mcp.shared.experimental.tasks.helpers import MODEL_IMMEDIATE_RESPONSE_KEY
from mcp.types import (
CancelTaskRequestParams,
CancelTaskResult,
CreateTaskResult,
GetTaskRequestParams,
GetTaskResult,
ListTasksResult,
PaginatedRequestParams,
ServerCapabilities,
Task,
)
# Shared test datetime
TEST_DATETIME = datetime(2025, 1, 1, tzinfo=timezone.utc)
def _get_capabilities(server: Server) -> ServerCapabilities:
"""Helper to get capabilities from a server."""
return server.get_capabilities(
notification_options=NotificationOptions(),
experimental_capabilities={},
)
def test_server_without_task_handlers_has_no_tasks_capability() -> None:
"""Server without any task handlers has no tasks capability."""
server: Server = Server("test")
caps = _get_capabilities(server)
assert caps.tasks is None
async def _noop_get_task(ctx: ServerRequestContext, params: GetTaskRequestParams) -> GetTaskResult:
raise NotImplementedError
async def _noop_list_tasks(ctx: ServerRequestContext, params: PaginatedRequestParams | None) -> ListTasksResult:
raise NotImplementedError
async def _noop_cancel_task(ctx: ServerRequestContext, params: CancelTaskRequestParams) -> CancelTaskResult:
raise NotImplementedError
def test_server_with_list_tasks_handler_declares_list_capability() -> None:
"""Server with list_tasks handler declares tasks.list capability."""
server: Server = Server("test")
server.experimental.enable_tasks(on_list_tasks=_noop_list_tasks)
caps = _get_capabilities(server)
assert caps.tasks is not None
assert caps.tasks.list is not None
def test_server_with_cancel_task_handler_declares_cancel_capability() -> None:
"""Server with cancel_task handler declares tasks.cancel capability."""
server: Server = Server("test")
server.experimental.enable_tasks(on_cancel_task=_noop_cancel_task)
caps = _get_capabilities(server)
assert caps.tasks is not None
assert caps.tasks.cancel is not None
def test_server_with_get_task_handler_declares_requests_tools_call_capability() -> None:
"""Server with get_task handler declares tasks.requests.tools.call capability.
(get_task is required for task-augmented tools/call support)
"""
server: Server = Server("test")
server.experimental.enable_tasks(on_get_task=_noop_get_task)
caps = _get_capabilities(server)
assert caps.tasks is not None
assert caps.tasks.requests is not None
assert caps.tasks.requests.tools is not None
@pytest.mark.skip(
reason="TODO(maxisbey): enable_tasks registers default handlers for all task methods, "
"so partial capabilities aren't possible yet. Low-level API should support "
"selectively enabling/disabling task capabilities."
)
def test_server_without_list_handler_has_no_list_capability() -> None: # pragma: no cover
"""Server without list_tasks handler has no tasks.list capability."""
server: Server = Server("test")
server.experimental.enable_tasks(on_get_task=_noop_get_task)
caps = _get_capabilities(server)
assert caps.tasks is not None
assert caps.tasks.list is None
@pytest.mark.skip(
reason="TODO(maxisbey): enable_tasks registers default handlers for all task methods, "
"so partial capabilities aren't possible yet. Low-level API should support "
"selectively enabling/disabling task capabilities."
)
def test_server_without_cancel_handler_has_no_cancel_capability() -> None: # pragma: no cover
"""Server without cancel_task handler has no tasks.cancel capability."""
server: Server = Server("test")
server.experimental.enable_tasks(on_get_task=_noop_get_task)
caps = _get_capabilities(server)
assert caps.tasks is not None
assert caps.tasks.cancel is None
def test_server_with_all_task_handlers_has_full_capability() -> None:
"""Server with all task handlers declares complete tasks capability."""
server: Server = Server("test")
server.experimental.enable_tasks(
on_list_tasks=_noop_list_tasks,
on_cancel_task=_noop_cancel_task,
on_get_task=_noop_get_task,
)
caps = _get_capabilities(server)
assert caps.tasks is not None
assert caps.tasks.list is not None
assert caps.tasks.cancel is not None
assert caps.tasks.requests is not None
assert caps.tasks.requests.tools is not None
class TestClientCapabilities:
"""Clients declare:
- tasks.list β supports listing operations
- tasks.cancel β supports cancellation
- tasks.requests.sampling.createMessage β task-augmented sampling
- tasks.requests.elicitation.create β task-augmented elicitation
"""
def test_client_declares_tasks_capability(self) -> None:
"""Client can declare tasks capability."""
pytest.skip("TODO")
class TestToolLevelNegotiation:
"""Tools in tools/list responses include execution.taskSupport with values:
- Not present or "forbidden": No task augmentation allowed
- "optional": Task augmentation allowed at requestor discretion
- "required": Task augmentation is mandatory
"""
def test_tool_execution_task_forbidden_rejects_task_augmented_call(self) -> None:
"""Tool with execution.taskSupport="forbidden" MUST reject task-augmented calls (-32601)."""
pytest.skip("TODO")
def test_tool_execution_task_absent_rejects_task_augmented_call(self) -> None:
"""Tool without execution.taskSupport MUST reject task-augmented calls (-32601)."""
pytest.skip("TODO")
def test_tool_execution_task_optional_accepts_normal_call(self) -> None:
"""Tool with execution.taskSupport="optional" accepts normal calls."""
pytest.skip("TODO")
def test_tool_execution_task_optional_accepts_task_augmented_call(self) -> None:
"""Tool with execution.taskSupport="optional" accepts task-augmented calls."""
pytest.skip("TODO")
def test_tool_execution_task_required_rejects_normal_call(self) -> None:
"""Tool with execution.taskSupport="required" MUST reject non-task calls (-32601)."""
pytest.skip("TODO")
def test_tool_execution_task_required_accepts_task_augmented_call(self) -> None:
"""Tool with execution.taskSupport="required" accepts task-augmented calls."""
pytest.skip("TODO")
class TestCapabilityNegotiation:
"""Requestors SHOULD only augment requests with a task if the corresponding
capability has been declared by the receiver.
Receivers that do not declare the task capability for a request type
MUST process requests of that type normally, ignoring any task-augmentation
metadata if present.
"""
def test_receiver_without_capability_ignores_task_metadata(self) -> None:
"""Receiver without task capability MUST process request normally,
ignoring task-augmentation metadata.
"""
pytest.skip("TODO")
def test_receiver_with_capability_may_require_task_augmentation(self) -> None:
"""Receivers that declare task capability MAY return error (-32600)
for non-task-augmented requests, requiring task augmentation.
"""
pytest.skip("TODO")
class TestTaskStatusLifecycle:
"""Tasks begin in working status and follow valid transitions:
working β input_required β working β terminal
working β terminal (directly)
input_required β terminal (directly)
Terminal states (no further transitions allowed):
- completed
- failed
- cancelled
"""
def test_task_begins_in_working_status(self) -> None:
"""Tasks MUST begin in working status."""
pytest.skip("TODO")
def test_working_to_completed_transition(self) -> None:
"""working β completed is valid."""
pytest.skip("TODO")
def test_working_to_failed_transition(self) -> None:
"""working β failed is valid."""
pytest.skip("TODO")
def test_working_to_cancelled_transition(self) -> None:
"""working β cancelled is valid."""
pytest.skip("TODO")
def test_working_to_input_required_transition(self) -> None:
"""working β input_required is valid."""
pytest.skip("TODO")
def test_input_required_to_working_transition(self) -> None:
"""input_required β working is valid."""
pytest.skip("TODO")
def test_input_required_to_terminal_transition(self) -> None:
"""input_required β terminal is valid."""
pytest.skip("TODO")
def test_terminal_state_no_further_transitions(self) -> None:
"""Terminal states allow no further transitions."""
pytest.skip("TODO")
def test_completed_is_terminal(self) -> None:
"""completed is a terminal state."""
pytest.skip("TODO")
def test_failed_is_terminal(self) -> None:
"""failed is a terminal state."""
pytest.skip("TODO")
def test_cancelled_is_terminal(self) -> None:
"""cancelled is a terminal state."""
pytest.skip("TODO")
class TestInputRequiredStatus:
"""When a receiver needs information to proceed, it moves the task to input_required.
The requestor should call tasks/result to retrieve input requests.
The task must include io.modelcontextprotocol/related-task metadata in associated requests.
"""
def test_input_required_status_retrievable_via_tasks_get(self) -> None:
"""Task in input_required status is retrievable via tasks/get."""
pytest.skip("TODO")
def test_input_required_related_task_metadata_in_requests(self) -> None:
"""Task MUST include io.modelcontextprotocol/related-task metadata
in associated requests.
"""
pytest.skip("TODO")
class TestCreatingTask:
"""Request structure:
{"method": "tools/call", "params": {"name": "...", "arguments": {...}, "task": {"ttl": 60000}}}
Response (CreateTaskResult):
{"result": {"task": {"taskId": "...", "status": "working", ...}}}
Receivers may include io.modelcontextprotocol/model-immediate-response in _meta.
"""
def test_task_augmented_request_returns_create_task_result(self) -> None:
"""Task-augmented request MUST return CreateTaskResult immediately."""
pytest.skip("TODO")
def test_create_task_result_contains_task_id(self) -> None:
"""CreateTaskResult MUST contain taskId."""
pytest.skip("TODO")
def test_create_task_result_contains_status_working(self) -> None:
"""CreateTaskResult MUST have status=working initially."""
pytest.skip("TODO")
def test_create_task_result_contains_created_at(self) -> None:
"""CreateTaskResult MUST contain createdAt timestamp."""
pytest.skip("TODO")
def test_create_task_result_created_at_is_iso8601(self) -> None:
"""createdAt MUST be ISO 8601 formatted."""
pytest.skip("TODO")
def test_create_task_result_may_contain_ttl(self) -> None:
"""CreateTaskResult MAY contain ttl."""
pytest.skip("TODO")
def test_create_task_result_may_contain_poll_interval(self) -> None:
"""CreateTaskResult MAY contain pollInterval."""
pytest.skip("TODO")
def test_create_task_result_may_contain_status_message(self) -> None:
"""CreateTaskResult MAY contain statusMessage."""
pytest.skip("TODO")
def test_receiver_may_override_requested_ttl(self) -> None:
"""Receiver MAY override requested ttl but MUST return actual value."""
pytest.skip("TODO")
def test_model_immediate_response_in_meta(self) -> None:
"""Receiver MAY include io.modelcontextprotocol/model-immediate-response
in _meta to provide immediate response while task executes.
"""
# Verify the constant has the correct value per spec
assert MODEL_IMMEDIATE_RESPONSE_KEY == "io.modelcontextprotocol/model-immediate-response"
# CreateTaskResult can include model-immediate-response in _meta
task = Task(
task_id="test-123",
status="working",
created_at=TEST_DATETIME,
last_updated_at=TEST_DATETIME,
ttl=60000,
)
immediate_msg = "Task started, processing your request..."
# Note: Must use _meta= (alias) not meta= due to Pydantic alias handling
result = CreateTaskResult(
task=task,
**{"_meta": {MODEL_IMMEDIATE_RESPONSE_KEY: immediate_msg}},
)
# Verify the metadata is present and correct
assert result.meta is not None
assert MODEL_IMMEDIATE_RESPONSE_KEY in result.meta
assert result.meta[MODEL_IMMEDIATE_RESPONSE_KEY] == immediate_msg
# Verify it serializes correctly with _meta alias
serialized = result.model_dump(by_alias=True)
assert "_meta" in serialized
assert MODEL_IMMEDIATE_RESPONSE_KEY in serialized["_meta"]
assert serialized["_meta"][MODEL_IMMEDIATE_RESPONSE_KEY] == immediate_msg
class TestGettingTaskStatus:
"""Request: {"method": "tasks/get", "params": {"taskId": "..."}}
Response: Returns full Task object with current status and pollInterval.
"""
def test_tasks_get_returns_task_object(self) -> None:
"""tasks/get MUST return full Task object."""
pytest.skip("TODO")
def test_tasks_get_returns_current_status(self) -> None:
"""tasks/get MUST return current status."""
pytest.skip("TODO")
def test_tasks_get_may_return_poll_interval(self) -> None:
"""tasks/get MAY return pollInterval."""
pytest.skip("TODO")
def test_tasks_get_invalid_task_id_returns_error(self) -> None:
"""tasks/get with invalid taskId MUST return -32602."""
pytest.skip("TODO")
def test_tasks_get_nonexistent_task_id_returns_error(self) -> None:
"""tasks/get with nonexistent taskId MUST return -32602."""
pytest.skip("TODO")
class TestRetrievingResults:
"""Request: {"method": "tasks/result", "params": {"taskId": "..."}}
Response: The actual operation result structure (e.g., CallToolResult).
This call blocks until terminal status.
"""
def test_tasks_result_returns_underlying_result(self) -> None:
"""tasks/result MUST return exactly what underlying request would return."""
pytest.skip("TODO")
def test_tasks_result_blocks_until_terminal(self) -> None:
"""tasks/result MUST block for non-terminal tasks."""
pytest.skip("TODO")
def test_tasks_result_unblocks_on_terminal(self) -> None:
"""tasks/result MUST unblock upon reaching terminal status."""
pytest.skip("TODO")
def test_tasks_result_includes_related_task_metadata(self) -> None:
"""tasks/result MUST include io.modelcontextprotocol/related-task in _meta."""
pytest.skip("TODO")
def test_tasks_result_returns_error_for_failed_task(self) -> None:
"""tasks/result returns the same error the underlying request
would have produced for failed tasks.
"""
pytest.skip("TODO")
def test_tasks_result_invalid_task_id_returns_error(self) -> None:
"""tasks/result with invalid taskId MUST return -32602."""
pytest.skip("TODO")
class TestListingTasks:
"""Request: {"method": "tasks/list", "params": {"cursor": "optional"}}
Response: Array of tasks with pagination support via nextCursor.
"""
def test_tasks_list_returns_array_of_tasks(self) -> None:
"""tasks/list MUST return array of tasks."""
pytest.skip("TODO")
def test_tasks_list_pagination_with_cursor(self) -> None:
"""tasks/list supports pagination via cursor."""
pytest.skip("TODO")
def test_tasks_list_returns_next_cursor_when_more_results(self) -> None:
"""tasks/list MUST return nextCursor when more results available."""
pytest.skip("TODO")
def test_tasks_list_cursors_are_opaque(self) -> None:
"""Implementers MUST treat cursors as opaque tokens."""
pytest.skip("TODO")
def test_tasks_list_invalid_cursor_returns_error(self) -> None:
"""tasks/list with invalid cursor MUST return -32602."""
pytest.skip("TODO")
class TestCancellingTasks:
"""Request: {"method": "tasks/cancel", "params": {"taskId": "..."}}
Response: Returns the task object with status: "cancelled".
"""
def test_tasks_cancel_returns_cancelled_task(self) -> None:
"""tasks/cancel MUST return task with status=cancelled."""
pytest.skip("TODO")
def test_tasks_cancel_terminal_task_returns_error(self) -> None:
"""Cancelling already-terminal task MUST return -32602."""
pytest.skip("TODO")
def test_tasks_cancel_completed_task_returns_error(self) -> None:
"""Cancelling completed task MUST return -32602."""
pytest.skip("TODO")
def test_tasks_cancel_failed_task_returns_error(self) -> None:
"""Cancelling failed task MUST return -32602."""
pytest.skip("TODO")
def test_tasks_cancel_already_cancelled_task_returns_error(self) -> None:
"""Cancelling already-cancelled task MUST return -32602."""
pytest.skip("TODO")
def test_tasks_cancel_invalid_task_id_returns_error(self) -> None:
"""tasks/cancel with invalid taskId MUST return -32602."""
pytest.skip("TODO")
class TestStatusNotifications:
"""Receivers MAY send: {"method": "notifications/tasks/status", "params": {...}}
These are optional; requestors MUST NOT rely on them and SHOULD continue polling.
"""
def test_receiver_may_send_status_notification(self) -> None:
"""Receiver MAY send notifications/tasks/status."""
pytest.skip("TODO")
def test_status_notification_contains_task_id(self) -> None:
"""Status notification MUST contain taskId."""
pytest.skip("TODO")
def test_status_notification_contains_status(self) -> None:
"""Status notification MUST contain status."""
pytest.skip("TODO")
class TestTaskManagement:
"""- Receivers generate unique task IDs as strings
- Tasks must begin in working status
- createdAt timestamps must be ISO 8601 formatted
- Receivers may override requested ttl but must return actual value
- Receivers may delete tasks after TTL expires
- All task-related messages must include io.modelcontextprotocol/related-task
in _meta except for tasks/get, tasks/list, tasks/cancel operations
"""
def test_task_ids_are_unique_strings(self) -> None:
"""Receivers MUST generate unique task IDs as strings."""
pytest.skip("TODO")
def test_multiple_tasks_have_unique_ids(self) -> None:
"""Multiple tasks MUST have unique IDs."""
pytest.skip("TODO")
def test_receiver_may_delete_tasks_after_ttl(self) -> None:
"""Receivers MAY delete tasks after TTL expires."""
pytest.skip("TODO")
def test_related_task_metadata_in_task_messages(self) -> None:
"""All task-related messages MUST include io.modelcontextprotocol/related-task
in _meta.
"""
pytest.skip("TODO")
def test_tasks_get_does_not_require_related_task_metadata(self) -> None:
"""tasks/get does not require related-task metadata."""
pytest.skip("TODO")
def test_tasks_list_does_not_require_related_task_metadata(self) -> None:
"""tasks/list does not require related-task metadata."""
pytest.skip("TODO")
def test_tasks_cancel_does_not_require_related_task_metadata(self) -> None:
"""tasks/cancel does not require related-task metadata."""
pytest.skip("TODO")
class TestResultHandling:
"""- Receivers must return CreateTaskResult immediately upon accepting task-augmented requests
- tasks/result must return exactly what the underlying request would return
- tasks/result blocks for non-terminal tasks; must unblock upon reaching terminal status
"""
def test_create_task_result_returned_immediately(self) -> None:
"""Receiver MUST return CreateTaskResult immediately (not after work completes)."""
pytest.skip("TODO")
def test_tasks_result_matches_underlying_result_structure(self) -> None:
"""tasks/result MUST return same structure as underlying request."""
pytest.skip("TODO")
def test_tasks_result_for_tool_call_returns_call_tool_result(self) -> None:
"""tasks/result for tools/call returns CallToolResult."""
pytest.skip("TODO")
class TestProgressTracking:
"""Task-augmented requests support progress notifications using the progressToken
mechanism, which remains valid throughout the task lifetime.
"""
def test_progress_token_valid_throughout_task_lifetime(self) -> None:
"""progressToken remains valid throughout task lifetime."""
pytest.skip("TODO")
def test_progress_notifications_sent_during_task_execution(self) -> None:
"""Progress notifications can be sent during task execution."""
pytest.skip("TODO")
class TestProtocolErrors:
"""Protocol Errors (JSON-RPC standard codes):
- -32600 (Invalid request): Non-task requests to endpoint requiring task augmentation
- -32602 (Invalid params): Invalid/nonexistent taskId, invalid cursor, cancel terminal task
- -32603 (Internal error): Server-side execution failures
"""
def test_invalid_request_for_required_task_augmentation(self) -> None:
"""Non-task request to task-required endpoint returns -32600."""
pytest.skip("TODO")
def test_invalid_params_for_invalid_task_id(self) -> None:
"""Invalid taskId returns -32602."""
pytest.skip("TODO")
def test_invalid_params_for_nonexistent_task_id(self) -> None:
"""Nonexistent taskId returns -32602."""
pytest.skip("TODO")
def test_invalid_params_for_invalid_cursor(self) -> None:
"""Invalid cursor in tasks/list returns -32602."""
pytest.skip("TODO")
def test_invalid_params_for_cancel_terminal_task(self) -> None:
"""Attempt to cancel terminal task returns -32602."""
pytest.skip("TODO")
def test_internal_error_for_server_failure(self) -> None:
"""Server-side execution failure returns -32603."""
pytest.skip("TODO")
class TestTaskExecutionErrors:
"""When underlying requests fail, the task moves to failed status.
- tasks/get response should include statusMessage explaining failure
- tasks/result returns same error the underlying request would have produced
- For tool calls, isError: true moves task to failed status
"""
def test_underlying_failure_moves_task_to_failed(self) -> None:
"""Underlying request failure moves task to failed status."""
pytest.skip("TODO")
def test_failed_task_has_status_message(self) -> None:
"""Failed task SHOULD include statusMessage explaining failure."""
pytest.skip("TODO")
def test_tasks_result_returns_underlying_error(self) -> None:
"""tasks/result returns same error underlying request would produce."""
pytest.skip("TODO")
def test_tool_call_is_error_true_moves_to_failed(self) -> None:
"""Tool call with isError: true moves task to failed status."""
pytest.skip("TODO")
class TestTaskObject:
"""Task Object fields:
- taskId: String identifier
- status: Current execution state
- statusMessage: Optional human-readable description
- createdAt: ISO 8601 timestamp of creation
- ttl: Milliseconds before potential deletion
- pollInterval: Suggested milliseconds between polls
"""
def test_task_has_task_id_string(self) -> None:
"""Task MUST have taskId as string."""
pytest.skip("TODO")
def test_task_has_status(self) -> None:
"""Task MUST have status."""
pytest.skip("TODO")
def test_task_status_message_is_optional(self) -> None:
"""Task statusMessage is optional."""
pytest.skip("TODO")
def test_task_has_created_at(self) -> None:
"""Task MUST have createdAt."""
pytest.skip("TODO")
def test_task_ttl_is_optional(self) -> None:
"""Task ttl is optional."""
pytest.skip("TODO")
def test_task_poll_interval_is_optional(self) -> None:
"""Task pollInterval is optional."""
pytest.skip("TODO")
class TestRelatedTaskMetadata:
"""Related Task Metadata structure:
{"_meta": {"io.modelcontextprotocol/related-task": {"taskId": "..."}}}
"""
def test_related_task_metadata_structure(self) -> None:
"""Related task metadata has correct structure."""
pytest.skip("TODO")
def test_related_task_metadata_contains_task_id(self) -> None:
"""Related task metadata contains taskId."""
pytest.skip("TODO")
class TestAccessAndIsolation:
"""- Task IDs enable access to sensitive results
- Authorization context binding is essential where available
- For non-authorized environments: strong entropy IDs, strict TTL limits
"""
def test_task_bound_to_authorization_context(self) -> None:
"""Receivers receiving authorization context MUST bind tasks to that context."""
pytest.skip("TODO")
def test_reject_task_operations_outside_authorization_context(self) -> None:
"""Receivers MUST reject task operations for tasks outside
requestor's authorization context.
"""
pytest.skip("TODO")
def test_non_authorized_environments_use_secure_ids(self) -> None:
"""For non-authorized environments, receivers SHOULD use
cryptographically secure IDs.
"""
pytest.skip("TODO")
def test_non_authorized_environments_use_shorter_ttls(self) -> None:
"""For non-authorized environments, receivers SHOULD use shorter TTLs."""
pytest.skip("TODO")
class TestResourceLimits:
"""Receivers should:
- Enforce concurrent task limits per requestor
- Implement maximum TTL constraints
- Clean up expired tasks promptly
"""
def test_concurrent_task_limit_enforced(self) -> None:
"""Receiver SHOULD enforce concurrent task limits per requestor."""
pytest.skip("TODO")
def test_maximum_ttl_constraint_enforced(self) -> None:
"""Receiver SHOULD implement maximum TTL constraints."""
pytest.skip("TODO")
def test_expired_tasks_cleaned_up(self) -> None:
"""Receiver SHOULD clean up expired tasks promptly."""
pytest.skip("TODO")
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "tests/experimental/tasks/test_spec_compliance.py",
"license": "MIT License",
"lines": 541,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
modelcontextprotocol/python-sdk:tests/server/test_validation.py | """Tests for server validation functions."""
import pytest
from mcp.server.validation import (
check_sampling_tools_capability,
validate_sampling_tools,
validate_tool_use_result_messages,
)
from mcp.shared.exceptions import MCPError
from mcp.types import (
ClientCapabilities,
SamplingCapability,
SamplingMessage,
SamplingToolsCapability,
TextContent,
Tool,
ToolChoice,
ToolResultContent,
ToolUseContent,
)
# Tests for check_sampling_tools_capability function
def test_check_sampling_tools_capability_returns_false_when_caps_none() -> None:
"""Returns False when client_caps is None."""
assert check_sampling_tools_capability(None) is False
def test_check_sampling_tools_capability_returns_false_when_sampling_none() -> None:
"""Returns False when client_caps.sampling is None."""
caps = ClientCapabilities()
assert check_sampling_tools_capability(caps) is False
def test_check_sampling_tools_capability_returns_false_when_tools_none() -> None:
"""Returns False when client_caps.sampling.tools is None."""
caps = ClientCapabilities(sampling=SamplingCapability())
assert check_sampling_tools_capability(caps) is False
def test_check_sampling_tools_capability_returns_true_when_tools_present() -> None:
"""Returns True when sampling.tools is present."""
caps = ClientCapabilities(sampling=SamplingCapability(tools=SamplingToolsCapability()))
assert check_sampling_tools_capability(caps) is True
# Tests for validate_sampling_tools function
def test_validate_sampling_tools_no_error_when_tools_none() -> None:
"""No error when tools and tool_choice are None."""
validate_sampling_tools(None, None, None) # Should not raise
def test_validate_sampling_tools_raises_when_tools_provided_but_no_capability() -> None:
"""Raises MCPError when tools provided but client doesn't support."""
tool = Tool(name="test", input_schema={"type": "object"})
with pytest.raises(MCPError) as exc_info:
validate_sampling_tools(None, [tool], None)
assert "sampling tools capability" in str(exc_info.value)
def test_validate_sampling_tools_raises_when_tool_choice_provided_but_no_capability() -> None:
"""Raises MCPError when tool_choice provided but client doesn't support."""
with pytest.raises(MCPError) as exc_info:
validate_sampling_tools(None, None, ToolChoice(mode="auto"))
assert "sampling tools capability" in str(exc_info.value)
def test_validate_sampling_tools_no_error_when_capability_present() -> None:
"""No error when client has sampling.tools capability."""
caps = ClientCapabilities(sampling=SamplingCapability(tools=SamplingToolsCapability()))
tool = Tool(name="test", input_schema={"type": "object"})
validate_sampling_tools(caps, [tool], ToolChoice(mode="auto")) # Should not raise
# Tests for validate_tool_use_result_messages function
def test_validate_tool_use_result_messages_no_error_for_empty_messages() -> None:
"""No error when messages list is empty."""
validate_tool_use_result_messages([]) # Should not raise
def test_validate_tool_use_result_messages_no_error_for_simple_text_messages() -> None:
"""No error for simple text messages."""
messages = [
SamplingMessage(role="user", content=TextContent(type="text", text="Hello")),
SamplingMessage(role="assistant", content=TextContent(type="text", text="Hi")),
]
validate_tool_use_result_messages(messages) # Should not raise
def test_validate_tool_use_result_messages_raises_when_tool_result_mixed_with_other_content() -> None:
"""Raises when tool_result is mixed with other content types."""
messages = [
SamplingMessage(
role="user",
content=[
ToolResultContent(type="tool_result", tool_use_id="123"),
TextContent(type="text", text="also this"),
],
),
]
with pytest.raises(ValueError, match="only tool_result content"):
validate_tool_use_result_messages(messages)
def test_validate_tool_use_result_messages_raises_when_tool_result_without_previous_tool_use() -> None:
"""Raises when tool_result appears without preceding tool_use."""
messages = [
SamplingMessage(
role="user",
content=ToolResultContent(type="tool_result", tool_use_id="123"),
),
]
with pytest.raises(ValueError, match="previous message containing tool_use"):
validate_tool_use_result_messages(messages)
def test_validate_tool_use_result_messages_raises_when_tool_result_ids_dont_match_tool_use() -> None:
"""Raises when tool_result IDs don't match tool_use IDs."""
messages = [
SamplingMessage(
role="assistant",
content=ToolUseContent(type="tool_use", id="tool-1", name="test", input={}),
),
SamplingMessage(
role="user",
content=ToolResultContent(type="tool_result", tool_use_id="tool-2"),
),
]
with pytest.raises(ValueError, match="do not match"):
validate_tool_use_result_messages(messages)
def test_validate_tool_use_result_messages_no_error_when_tool_result_matches_tool_use() -> None:
"""No error when tool_result IDs match tool_use IDs."""
messages = [
SamplingMessage(
role="assistant",
content=ToolUseContent(type="tool_use", id="tool-1", name="test", input={}),
),
SamplingMessage(
role="user",
content=ToolResultContent(type="tool_result", tool_use_id="tool-1"),
),
]
validate_tool_use_result_messages(messages) # Should not raise
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "tests/server/test_validation.py",
"license": "MIT License",
"lines": 116,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
modelcontextprotocol/python-sdk:examples/snippets/clients/url_elicitation_client.py | """URL Elicitation Client Example.
Demonstrates how clients handle URL elicitation requests from servers.
This is the Python equivalent of TypeScript SDK's elicitationUrlExample.ts,
focused on URL elicitation patterns without OAuth complexity.
Features demonstrated:
1. Client elicitation capability declaration
2. Handling elicitation requests from servers via callback
3. Catching UrlElicitationRequiredError from tool calls
4. Browser interaction with security warnings
5. Interactive CLI for testing
Run with:
cd examples/snippets
uv run elicitation-client
Requires a server with URL elicitation tools running. Start the elicitation
server first:
uv run server elicitation sse
"""
from __future__ import annotations
import asyncio
import json
import webbrowser
from typing import Any
from urllib.parse import urlparse
from mcp import ClientSession, types
from mcp.client.context import ClientRequestContext
from mcp.client.sse import sse_client
from mcp.shared.exceptions import MCPError, UrlElicitationRequiredError
from mcp.types import URL_ELICITATION_REQUIRED
async def handle_elicitation(
context: ClientRequestContext,
params: types.ElicitRequestParams,
) -> types.ElicitResult | types.ErrorData:
"""Handle elicitation requests from the server.
This callback is invoked when the server sends an elicitation/request.
For URL mode, we prompt the user and optionally open their browser.
"""
if params.mode == "url":
return await handle_url_elicitation(params)
else:
# We only support URL mode in this example
return types.ErrorData(
code=types.INVALID_REQUEST,
message=f"Unsupported elicitation mode: {params.mode}",
)
ALLOWED_SCHEMES = {"http", "https"}
async def handle_url_elicitation(
params: types.ElicitRequestParams,
) -> types.ElicitResult:
"""Handle URL mode elicitation - show security warning and optionally open browser.
This function demonstrates the security-conscious approach to URL elicitation:
1. Validate the URL scheme before prompting the user
2. Display the full URL and domain for user inspection
3. Show the server's reason for requesting this interaction
4. Require explicit user consent before opening any URL
"""
# Extract URL parameters - these are available on URL mode requests
url = getattr(params, "url", None)
elicitation_id = getattr(params, "elicitationId", None)
message = params.message
if not url:
print("Error: No URL provided in elicitation request")
return types.ElicitResult(action="cancel")
# Reject dangerous URL schemes before prompting the user
parsed = urlparse(str(url))
if parsed.scheme.lower() not in ALLOWED_SCHEMES:
print(f"\nRejecting URL with disallowed scheme '{parsed.scheme}': {url}")
return types.ElicitResult(action="decline")
# Extract domain for security display
domain = extract_domain(url)
# Security warning - always show the user what they're being asked to do
print("\n" + "=" * 60)
print("SECURITY WARNING: External URL Request")
print("=" * 60)
print("\nThe server is requesting you to open an external URL.")
print(f"\n Domain: {domain}")
print(f" Full URL: {url}")
print("\n Server's reason:")
print(f" {message}")
print(f"\n Elicitation ID: {elicitation_id}")
print("\n" + "-" * 60)
# Get explicit user consent
try:
response = input("\nOpen this URL in your browser? (y/n): ").strip().lower()
except EOFError:
return types.ElicitResult(action="cancel")
if response in ("n", "no"):
print("URL navigation declined.")
return types.ElicitResult(action="decline")
elif response not in ("y", "yes"):
print("Invalid response. Cancelling.")
return types.ElicitResult(action="cancel")
# Open the browser
print(f"\nOpening browser to: {url}")
try:
webbrowser.open(url)
except Exception as e:
print(f"Failed to open browser: {e}")
print(f"Please manually open: {url}")
print("Waiting for you to complete the interaction in your browser...")
print("(The server will continue once you've finished)")
return types.ElicitResult(action="accept")
def extract_domain(url: str) -> str:
"""Extract domain from URL for security display."""
try:
return urlparse(url).netloc
except Exception:
return "unknown"
async def call_tool_with_error_handling(
session: ClientSession,
tool_name: str,
arguments: dict[str, Any],
) -> types.CallToolResult | None:
"""Call a tool, handling UrlElicitationRequiredError if raised.
When a server tool needs URL elicitation before it can proceed,
it can either:
1. Send an elicitation request directly (handled by elicitation_callback)
2. Return an error with code -32042 (URL_ELICITATION_REQUIRED)
This function demonstrates handling case 2 - catching the error
and processing the required URL elicitations.
"""
try:
result = await session.call_tool(tool_name, arguments)
# Check if the tool returned an error in the result
if result.is_error:
print(f"Tool returned error: {result.content}")
return None
return result
except MCPError as e:
# Check if this is a URL elicitation required error
if e.code == URL_ELICITATION_REQUIRED:
print("\n[Tool requires URL elicitation to proceed]")
# Convert to typed error to access elicitations
url_error = UrlElicitationRequiredError.from_error(e.error)
# Process each required elicitation
for elicitation in url_error.elicitations:
await handle_url_elicitation(elicitation)
return None
else:
# Re-raise other MCP errors
print(f"MCP Error: {e.error.message} (code: {e.error.code})")
return None
def print_help() -> None:
"""Print available commands."""
print("\nAvailable commands:")
print(" list-tools - List available tools")
print(" call <name> [json-args] - Call a tool with optional JSON arguments")
print(" secure-payment - Test URL elicitation via ctx.elicit_url()")
print(" connect-service - Test URL elicitation via UrlElicitationRequiredError")
print(" help - Show this help")
print(" quit - Exit the program")
def print_tool_result(result: types.CallToolResult | None) -> None:
"""Print a tool call result."""
if not result:
return
print("\nTool result:")
for content in result.content:
if isinstance(content, types.TextContent):
print(f" {content.text}")
else:
print(f" [{content.type}]")
async def handle_list_tools(session: ClientSession) -> None:
"""Handle the list-tools command."""
tools = await session.list_tools()
if tools.tools:
print("\nAvailable tools:")
for tool in tools.tools:
print(f" - {tool.name}: {tool.description or 'No description'}")
else:
print("No tools available")
async def handle_call_command(session: ClientSession, command: str) -> None:
"""Handle the call command."""
parts = command.split(maxsplit=2)
if len(parts) < 2:
print("Usage: call <tool-name> [json-args]")
return
tool_name = parts[1]
args: dict[str, Any] = {}
if len(parts) > 2:
try:
args = json.loads(parts[2])
except json.JSONDecodeError as e:
print(f"Invalid JSON arguments: {e}")
return
print(f"\nCalling tool '{tool_name}' with args: {args}")
result = await call_tool_with_error_handling(session, tool_name, args)
print_tool_result(result)
async def process_command(session: ClientSession, command: str) -> bool:
"""Process a single command. Returns False if should exit."""
if command in {"quit", "exit"}:
print("Goodbye!")
return False
if command == "help":
print_help()
elif command == "list-tools":
await handle_list_tools(session)
elif command.startswith("call "):
await handle_call_command(session, command)
elif command == "secure-payment":
print("\nTesting secure_payment tool (uses ctx.elicit_url())...")
result = await call_tool_with_error_handling(session, "secure_payment", {"amount": 99.99})
print_tool_result(result)
elif command == "connect-service":
print("\nTesting connect_service tool (raises UrlElicitationRequiredError)...")
result = await call_tool_with_error_handling(session, "connect_service", {"service_name": "github"})
print_tool_result(result)
else:
print(f"Unknown command: {command}")
print("Type 'help' for available commands.")
return True
async def run_command_loop(session: ClientSession) -> None:
"""Run the interactive command loop."""
while True:
try:
command = input("> ").strip()
except EOFError:
break
except KeyboardInterrupt:
print("\n")
break
if not command:
continue
if not await process_command(session, command):
break
async def main() -> None:
"""Run the interactive URL elicitation client."""
server_url = "http://localhost:8000/sse"
print("=" * 60)
print("URL Elicitation Client Example")
print("=" * 60)
print(f"\nConnecting to: {server_url}")
print("(Start server with: cd examples/snippets && uv run server elicitation sse)")
try:
async with sse_client(server_url) as (read, write):
async with ClientSession(
read,
write,
elicitation_callback=handle_elicitation,
) as session:
await session.initialize()
print("\nConnected! Type 'help' for available commands.\n")
await run_command_loop(session)
except ConnectionRefusedError:
print(f"\nError: Could not connect to {server_url}")
print("Make sure the elicitation server is running:")
print(" cd examples/snippets && uv run server elicitation sse")
except Exception as e:
print(f"\nError: {e}")
raise
def run() -> None:
"""Entry point for the client script."""
asyncio.run(main())
if __name__ == "__main__":
run()
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "examples/snippets/clients/url_elicitation_client.py",
"license": "MIT License",
"lines": 253,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
modelcontextprotocol/python-sdk:tests/shared/test_exceptions.py | """Tests for MCP exception classes."""
import pytest
from mcp.shared.exceptions import MCPError, UrlElicitationRequiredError
from mcp.types import URL_ELICITATION_REQUIRED, ElicitRequestURLParams, ErrorData
def test_url_elicitation_required_error_create_with_single_elicitation() -> None:
"""Test creating error with a single elicitation."""
elicitation = ElicitRequestURLParams(
mode="url",
message="Auth required",
url="https://example.com/auth",
elicitation_id="test-123",
)
error = UrlElicitationRequiredError([elicitation])
assert error.error.code == URL_ELICITATION_REQUIRED
assert error.error.message == "URL elicitation required"
assert len(error.elicitations) == 1
assert error.elicitations[0].elicitation_id == "test-123"
def test_url_elicitation_required_error_create_with_multiple_elicitations() -> None:
"""Test creating error with multiple elicitations uses plural message."""
elicitations = [
ElicitRequestURLParams(
mode="url",
message="Auth 1",
url="https://example.com/auth1",
elicitation_id="test-1",
),
ElicitRequestURLParams(
mode="url",
message="Auth 2",
url="https://example.com/auth2",
elicitation_id="test-2",
),
]
error = UrlElicitationRequiredError(elicitations)
assert error.error.message == "URL elicitations required" # Plural
assert len(error.elicitations) == 2
def test_url_elicitation_required_error_custom_message() -> None:
"""Test creating error with a custom message."""
elicitation = ElicitRequestURLParams(
mode="url",
message="Auth required",
url="https://example.com/auth",
elicitation_id="test-123",
)
error = UrlElicitationRequiredError([elicitation], message="Custom message")
assert error.error.message == "Custom message"
def test_url_elicitation_required_error_from_error_data() -> None:
"""Test reconstructing error from ErrorData."""
error_data = ErrorData(
code=URL_ELICITATION_REQUIRED,
message="URL elicitation required",
data={
"elicitations": [
{
"mode": "url",
"message": "Auth required",
"url": "https://example.com/auth",
"elicitationId": "test-123",
}
]
},
)
error = UrlElicitationRequiredError.from_error(error_data)
assert len(error.elicitations) == 1
assert error.elicitations[0].elicitation_id == "test-123"
assert error.elicitations[0].url == "https://example.com/auth"
def test_url_elicitation_required_error_from_error_data_wrong_code() -> None:
"""Test that from_error raises ValueError for wrong error code."""
error_data = ErrorData(
code=-32600, # Wrong code
message="Some other error",
data={},
)
with pytest.raises(ValueError, match="Expected error code"):
UrlElicitationRequiredError.from_error(error_data)
def test_url_elicitation_required_error_serialization_roundtrip() -> None:
"""Test that error can be serialized and reconstructed."""
original = UrlElicitationRequiredError(
[
ElicitRequestURLParams(
mode="url",
message="Auth required",
url="https://example.com/auth",
elicitation_id="test-123",
)
]
)
# Simulate serialization over wire
error_data = original.error
# Reconstruct
reconstructed = UrlElicitationRequiredError.from_error(error_data)
assert reconstructed.elicitations[0].elicitation_id == original.elicitations[0].elicitation_id
assert reconstructed.elicitations[0].url == original.elicitations[0].url
assert reconstructed.elicitations[0].message == original.elicitations[0].message
def test_url_elicitation_required_error_data_contains_elicitations() -> None:
"""Test that error data contains properly serialized elicitations."""
elicitation = ElicitRequestURLParams(
mode="url",
message="Please authenticate",
url="https://example.com/oauth",
elicitation_id="oauth-flow-1",
)
error = UrlElicitationRequiredError([elicitation])
assert error.error.data is not None
assert "elicitations" in error.error.data
elicit_data = error.error.data["elicitations"][0]
assert elicit_data["mode"] == "url"
assert elicit_data["message"] == "Please authenticate"
assert elicit_data["url"] == "https://example.com/oauth"
assert elicit_data["elicitationId"] == "oauth-flow-1"
def test_url_elicitation_required_error_inherits_from_mcp_error() -> None:
"""Test that UrlElicitationRequiredError inherits from MCPError."""
elicitation = ElicitRequestURLParams(
mode="url",
message="Auth required",
url="https://example.com/auth",
elicitation_id="test-123",
)
error = UrlElicitationRequiredError([elicitation])
assert isinstance(error, MCPError)
assert isinstance(error, Exception)
def test_url_elicitation_required_error_exception_message() -> None:
"""Test that exception message is set correctly."""
elicitation = ElicitRequestURLParams(
mode="url",
message="Auth required",
url="https://example.com/auth",
elicitation_id="test-123",
)
error = UrlElicitationRequiredError([elicitation])
# The exception's string representation should match the message
assert str(error) == "URL elicitation required"
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "tests/shared/test_exceptions.py",
"license": "MIT License",
"lines": 132,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
modelcontextprotocol/python-sdk:src/mcp/shared/tool_name_validation.py | """Tool name validation utilities according to SEP-986.
Tool names SHOULD be between 1 and 128 characters in length (inclusive).
Tool names are case-sensitive.
Allowed characters: uppercase and lowercase ASCII letters (A-Z, a-z),
digits (0-9), underscore (_), dash (-), and dot (.).
Tool names SHOULD NOT contain spaces, commas, or other special characters.
See: https://modelcontextprotocol.io/specification/2025-11-25/server/tools#tool-names
"""
from __future__ import annotations
import logging
import re
from dataclasses import dataclass, field
logger = logging.getLogger(__name__)
# Regular expression for valid tool names according to SEP-986 specification
TOOL_NAME_REGEX = re.compile(r"^[A-Za-z0-9._-]{1,128}$")
# SEP reference URL for warning messages
SEP_986_URL = "https://modelcontextprotocol.io/specification/2025-11-25/server/tools#tool-names"
@dataclass
class ToolNameValidationResult:
"""Result of tool name validation.
Attributes:
is_valid: Whether the tool name conforms to SEP-986 requirements.
warnings: List of warning messages for non-conforming aspects.
"""
is_valid: bool
warnings: list[str] = field(default_factory=lambda: [])
def validate_tool_name(name: str) -> ToolNameValidationResult:
"""Validate a tool name according to the SEP-986 specification.
Args:
name: The tool name to validate.
Returns:
ToolNameValidationResult containing validation status and any warnings.
"""
warnings: list[str] = []
# Check for empty name
if not name:
return ToolNameValidationResult(
is_valid=False,
warnings=["Tool name cannot be empty"],
)
# Check length
if len(name) > 128:
return ToolNameValidationResult(
is_valid=False,
warnings=[f"Tool name exceeds maximum length of 128 characters (current: {len(name)})"],
)
# Check for problematic patterns (warnings, not validation failures)
if " " in name:
warnings.append("Tool name contains spaces, which may cause parsing issues")
if "," in name:
warnings.append("Tool name contains commas, which may cause parsing issues")
# Check for potentially confusing leading/trailing characters
if name.startswith("-") or name.endswith("-"):
warnings.append("Tool name starts or ends with a dash, which may cause parsing issues in some contexts")
if name.startswith(".") or name.endswith("."):
warnings.append("Tool name starts or ends with a dot, which may cause parsing issues in some contexts")
# Check for invalid characters
if not TOOL_NAME_REGEX.match(name):
# Find all invalid characters (unique, preserving order)
invalid_chars: list[str] = []
seen: set[str] = set()
for char in name:
if not re.match(r"[A-Za-z0-9._-]", char) and char not in seen:
invalid_chars.append(char)
seen.add(char)
warnings.append(f"Tool name contains invalid characters: {', '.join(repr(c) for c in invalid_chars)}")
warnings.append("Allowed characters are: A-Z, a-z, 0-9, underscore (_), dash (-), and dot (.)")
return ToolNameValidationResult(is_valid=False, warnings=warnings)
return ToolNameValidationResult(is_valid=True, warnings=warnings)
def issue_tool_name_warning(name: str, warnings: list[str]) -> None:
"""Log warnings for non-conforming tool names.
Args:
name: The tool name that triggered the warnings.
warnings: List of warning messages to log.
"""
if not warnings:
return
logger.warning(f'Tool name validation warning for "{name}":')
for warning in warnings:
logger.warning(f" - {warning}")
logger.warning("Tool registration will proceed, but this may cause compatibility issues.")
logger.warning("Consider updating the tool name to conform to the MCP tool naming standard.")
logger.warning(f"See SEP-986 ({SEP_986_URL}) for more details.")
def validate_and_warn_tool_name(name: str) -> bool:
"""Validate a tool name and issue warnings for non-conforming names.
This is the primary entry point for tool name validation. It validates
the name and logs any warnings via the logging module.
Args:
name: The tool name to validate.
Returns:
True if the name is valid, False otherwise.
"""
result = validate_tool_name(name)
issue_tool_name_warning(name, result.warnings)
return result.is_valid
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "src/mcp/shared/tool_name_validation.py",
"license": "MIT License",
"lines": 95,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
modelcontextprotocol/python-sdk:tests/shared/test_tool_name_validation.py | """Tests for tool name validation utilities (SEP-986)."""
import logging
import pytest
from mcp.shared.tool_name_validation import (
issue_tool_name_warning,
validate_and_warn_tool_name,
validate_tool_name,
)
# Tests for validate_tool_name function - valid names
@pytest.mark.parametrize(
"tool_name",
[
"getUser",
"get_user_profile",
"user-profile-update",
"admin.tools.list",
"DATA_EXPORT_v2.1",
"a",
"a" * 128,
],
ids=[
"simple_alphanumeric",
"with_underscores",
"with_dashes",
"with_dots",
"mixed_characters",
"single_character",
"max_length_128",
],
)
def test_validate_tool_name_accepts_valid_names(tool_name: str) -> None:
"""Valid tool names should pass validation with no warnings."""
result = validate_tool_name(tool_name)
assert result.is_valid is True
assert result.warnings == []
# Tests for validate_tool_name function - invalid names
def test_validate_tool_name_rejects_empty_name() -> None:
"""Empty names should be rejected."""
result = validate_tool_name("")
assert result.is_valid is False
assert "Tool name cannot be empty" in result.warnings
def test_validate_tool_name_rejects_name_exceeding_max_length() -> None:
"""Names exceeding 128 characters should be rejected."""
result = validate_tool_name("a" * 129)
assert result.is_valid is False
assert any("exceeds maximum length of 128 characters (current: 129)" in w for w in result.warnings)
@pytest.mark.parametrize(
"tool_name,expected_char",
[
("get user profile", "' '"),
("get,user,profile", "','"),
("user/profile/update", "'/'"),
("user@domain.com", "'@'"),
],
ids=[
"with_spaces",
"with_commas",
"with_slashes",
"with_at_symbol",
],
)
def test_validate_tool_name_rejects_invalid_characters(tool_name: str, expected_char: str) -> None:
"""Names with invalid characters should be rejected."""
result = validate_tool_name(tool_name)
assert result.is_valid is False
assert any("invalid characters" in w and expected_char in w for w in result.warnings)
def test_validate_tool_name_rejects_multiple_invalid_chars() -> None:
"""Names with multiple invalid chars should list all of them."""
result = validate_tool_name("user name@domain,com")
assert result.is_valid is False
warning = next(w for w in result.warnings if "invalid characters" in w)
assert "' '" in warning
assert "'@'" in warning
assert "','" in warning
def test_validate_tool_name_rejects_unicode_characters() -> None:
"""Names with unicode characters should be rejected."""
result = validate_tool_name("user-\u00f1ame") # n with tilde
assert result.is_valid is False
# Tests for validate_tool_name function - warnings for problematic patterns
def test_validate_tool_name_warns_on_leading_dash() -> None:
"""Names starting with dash should generate warning but be valid."""
result = validate_tool_name("-get-user")
assert result.is_valid is True
assert any("starts or ends with a dash" in w for w in result.warnings)
def test_validate_tool_name_warns_on_trailing_dash() -> None:
"""Names ending with dash should generate warning but be valid."""
result = validate_tool_name("get-user-")
assert result.is_valid is True
assert any("starts or ends with a dash" in w for w in result.warnings)
def test_validate_tool_name_warns_on_leading_dot() -> None:
"""Names starting with dot should generate warning but be valid."""
result = validate_tool_name(".get.user")
assert result.is_valid is True
assert any("starts or ends with a dot" in w for w in result.warnings)
def test_validate_tool_name_warns_on_trailing_dot() -> None:
"""Names ending with dot should generate warning but be valid."""
result = validate_tool_name("get.user.")
assert result.is_valid is True
assert any("starts or ends with a dot" in w for w in result.warnings)
# Tests for issue_tool_name_warning function
def test_issue_tool_name_warning_logs_warnings(caplog: pytest.LogCaptureFixture) -> None:
"""Warnings should be logged at WARNING level."""
warnings = ["Warning 1", "Warning 2"]
with caplog.at_level(logging.WARNING):
issue_tool_name_warning("test-tool", warnings)
assert 'Tool name validation warning for "test-tool"' in caplog.text
assert "- Warning 1" in caplog.text
assert "- Warning 2" in caplog.text
assert "Tool registration will proceed" in caplog.text
assert "SEP-986" in caplog.text
def test_issue_tool_name_warning_no_logging_for_empty_warnings(caplog: pytest.LogCaptureFixture) -> None:
"""Empty warnings list should not produce any log output."""
with caplog.at_level(logging.WARNING):
issue_tool_name_warning("test-tool", [])
assert caplog.text == ""
# Tests for validate_and_warn_tool_name function
def test_validate_and_warn_tool_name_returns_true_for_valid_name() -> None:
"""Valid names should return True."""
assert validate_and_warn_tool_name("valid-tool-name") is True
def test_validate_and_warn_tool_name_returns_false_for_invalid_name() -> None:
"""Invalid names should return False."""
assert validate_and_warn_tool_name("") is False
assert validate_and_warn_tool_name("a" * 129) is False
assert validate_and_warn_tool_name("invalid name") is False
def test_validate_and_warn_tool_name_logs_warnings_for_invalid_name(caplog: pytest.LogCaptureFixture) -> None:
"""Invalid names should trigger warning logs."""
with caplog.at_level(logging.WARNING):
validate_and_warn_tool_name("invalid name")
assert "Tool name validation warning" in caplog.text
def test_validate_and_warn_tool_name_no_warnings_for_clean_valid_name(caplog: pytest.LogCaptureFixture) -> None:
"""Clean valid names should not produce any log output."""
with caplog.at_level(logging.WARNING):
result = validate_and_warn_tool_name("clean-tool-name")
assert result is True
assert caplog.text == ""
# Tests for edge cases
@pytest.mark.parametrize(
"tool_name,is_valid,expected_warning_fragment",
[
("...", True, "starts or ends with a dot"),
("---", True, "starts or ends with a dash"),
("///", False, "invalid characters"),
("user@name123", False, "invalid characters"),
],
ids=[
"only_dots",
"only_dashes",
"only_slashes",
"mixed_valid_invalid",
],
)
def test_edge_cases(tool_name: str, is_valid: bool, expected_warning_fragment: str) -> None:
"""Various edge cases should be handled correctly."""
result = validate_tool_name(tool_name)
assert result.is_valid is is_valid
assert any(expected_warning_fragment in w for w in result.warnings)
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "tests/shared/test_tool_name_validation.py",
"license": "MIT License",
"lines": 156,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
modelcontextprotocol/python-sdk:tests/client/test_scope_bug_1630.py | """Regression test for issue #1630: OAuth2 scope incorrectly set to resource_metadata URL.
This test verifies that when a 401 response contains both resource_metadata and scope
in the WWW-Authenticate header, the actual scope is used (not the resource_metadata URL).
"""
from unittest import mock
import httpx
import pytest
from pydantic import AnyUrl
from mcp.client.auth import OAuthClientProvider
from mcp.shared.auth import OAuthClientInformationFull, OAuthClientMetadata, OAuthToken
class MockTokenStorage:
"""Mock token storage for testing."""
def __init__(self) -> None:
self._tokens: OAuthToken | None = None
self._client_info: OAuthClientInformationFull | None = None
async def get_tokens(self) -> OAuthToken | None:
return self._tokens # pragma: no cover
async def set_tokens(self, tokens: OAuthToken) -> None:
self._tokens = tokens
async def get_client_info(self) -> OAuthClientInformationFull | None:
return self._client_info # pragma: no cover
async def set_client_info(self, client_info: OAuthClientInformationFull) -> None:
self._client_info = client_info # pragma: no cover
@pytest.mark.anyio
async def test_401_uses_www_auth_scope_not_resource_metadata_url():
"""Regression test for #1630: Ensure scope is extracted from WWW-Authenticate header,
not the resource_metadata URL.
When a 401 response contains:
WWW-Authenticate: Bearer resource_metadata="https://...", scope="read write"
The client should use "read write" as the scope, NOT the resource_metadata URL.
"""
async def redirect_handler(url: str) -> None:
pass # pragma: no cover
async def callback_handler() -> tuple[str, str | None]:
return "test_auth_code", "test_state" # pragma: no cover
client_metadata = OAuthClientMetadata(
redirect_uris=[AnyUrl("http://localhost:3030/callback")],
client_name="Test Client",
)
provider = OAuthClientProvider(
server_url="https://api.example.com/mcp",
client_metadata=client_metadata,
storage=MockTokenStorage(),
redirect_handler=redirect_handler,
callback_handler=callback_handler,
)
provider.context.current_tokens = None
provider.context.token_expiry_time = None
provider._initialized = True
# Pre-set client info to skip DCR
provider.context.client_info = OAuthClientInformationFull(
client_id="test_client",
redirect_uris=[AnyUrl("http://localhost:3030/callback")],
)
test_request = httpx.Request("GET", "https://api.example.com/mcp")
auth_flow = provider.async_auth_flow(test_request)
# First request (no auth header yet)
await auth_flow.__anext__()
# 401 response with BOTH resource_metadata URL and scope in WWW-Authenticate
# This is the key: the bug would use the URL as scope instead of "read write"
resource_metadata_url = "https://api.example.com/.well-known/oauth-protected-resource"
expected_scope = "read write"
response_401 = httpx.Response(
401,
headers={"WWW-Authenticate": (f'Bearer resource_metadata="{resource_metadata_url}", scope="{expected_scope}"')},
request=test_request,
)
# Send 401, expect PRM discovery request
prm_request = await auth_flow.asend(response_401)
assert ".well-known/oauth-protected-resource" in str(prm_request.url)
# PRM response with scopes_supported (these should be overridden by WWW-Auth scope)
prm_response = httpx.Response(
200,
content=(
b'{"resource": "https://api.example.com/mcp", '
b'"authorization_servers": ["https://auth.example.com"], '
b'"scopes_supported": ["fallback:scope1", "fallback:scope2"]}'
),
request=prm_request,
)
# Send PRM response, expect OAuth metadata discovery
oauth_metadata_request = await auth_flow.asend(prm_response)
assert ".well-known/oauth-authorization-server" in str(oauth_metadata_request.url)
# OAuth metadata response
oauth_metadata_response = httpx.Response(
200,
content=(
b'{"issuer": "https://auth.example.com", '
b'"authorization_endpoint": "https://auth.example.com/authorize", '
b'"token_endpoint": "https://auth.example.com/token"}'
),
request=oauth_metadata_request,
)
# Mock authorization to skip interactive flow
provider._perform_authorization_code_grant = mock.AsyncMock(return_value=("test_auth_code", "test_code_verifier"))
# Send OAuth metadata response, expect token request
token_request = await auth_flow.asend(oauth_metadata_response)
assert "token" in str(token_request.url)
# NOW CHECK: The scope should be the WWW-Authenticate scope, NOT the URL
# This is where the bug manifested - scope was set to resource_metadata_url
actual_scope = provider.context.client_metadata.scope
# This assertion would FAIL on main (scope would be the URL)
# but PASS on the fix branch (scope is "read write")
assert actual_scope == expected_scope, (
f"Expected scope to be '{expected_scope}' from WWW-Authenticate header, "
f"but got '{actual_scope}'. "
f"If scope is '{resource_metadata_url}', the bug from #1630 is present."
)
# Verify it's definitely not the URL (explicit check for the bug)
assert actual_scope != resource_metadata_url, (
f"BUG #1630: Scope was incorrectly set to resource_metadata URL '{resource_metadata_url}' "
f"instead of the actual scope '{expected_scope}'"
)
# Complete the flow to properly release the lock
token_response = httpx.Response(
200,
content=b'{"access_token": "test_token", "token_type": "Bearer", "expires_in": 3600}',
request=token_request,
)
final_request = await auth_flow.asend(token_response)
assert final_request.headers["Authorization"] == "Bearer test_token"
# Finish the flow
final_response = httpx.Response(200, request=final_request)
try:
await auth_flow.asend(final_response)
except StopAsyncIteration:
pass
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "tests/client/test_scope_bug_1630.py",
"license": "MIT License",
"lines": 127,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
modelcontextprotocol/python-sdk:src/mcp/client/auth/exceptions.py | class OAuthFlowError(Exception):
"""Base exception for OAuth flow errors."""
class OAuthTokenError(OAuthFlowError):
"""Raised when token operations fail."""
class OAuthRegistrationError(OAuthFlowError):
"""Raised when client registration fails."""
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "src/mcp/client/auth/exceptions.py",
"license": "MIT License",
"lines": 6,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
modelcontextprotocol/python-sdk:src/mcp/client/auth/utils.py | import re
from urllib.parse import urljoin, urlparse
from httpx import Request, Response
from pydantic import AnyUrl, ValidationError
from mcp.client.auth import OAuthRegistrationError, OAuthTokenError
from mcp.client.streamable_http import MCP_PROTOCOL_VERSION
from mcp.shared.auth import (
OAuthClientInformationFull,
OAuthClientMetadata,
OAuthMetadata,
OAuthToken,
ProtectedResourceMetadata,
)
from mcp.types import LATEST_PROTOCOL_VERSION
def extract_field_from_www_auth(response: Response, field_name: str) -> str | None:
"""Extract field from WWW-Authenticate header.
Returns:
Field value if found in WWW-Authenticate header, None otherwise
"""
www_auth_header = response.headers.get("WWW-Authenticate")
if not www_auth_header:
return None
# Pattern matches: field_name="value" or field_name=value (unquoted)
pattern = rf'{field_name}=(?:"([^"]+)"|([^\s,]+))'
match = re.search(pattern, www_auth_header)
if match:
# Return quoted value if present, otherwise unquoted value
return match.group(1) or match.group(2)
return None
def extract_scope_from_www_auth(response: Response) -> str | None:
"""Extract scope parameter from WWW-Authenticate header as per RFC 6750.
Returns:
Scope string if found in WWW-Authenticate header, None otherwise
"""
return extract_field_from_www_auth(response, "scope")
def extract_resource_metadata_from_www_auth(response: Response) -> str | None:
"""Extract protected resource metadata URL from WWW-Authenticate header as per RFC 9728.
Returns:
Resource metadata URL if found in WWW-Authenticate header, None otherwise
"""
if not response or response.status_code != 401:
return None # pragma: no cover
return extract_field_from_www_auth(response, "resource_metadata")
def build_protected_resource_metadata_discovery_urls(www_auth_url: str | None, server_url: str) -> list[str]:
"""Build ordered list of URLs to try for protected resource metadata discovery.
Per SEP-985, the client MUST:
1. Try resource_metadata from WWW-Authenticate header (if present)
2. Fall back to path-based well-known URI: /.well-known/oauth-protected-resource/{path}
3. Fall back to root-based well-known URI: /.well-known/oauth-protected-resource
Args:
www_auth_url: Optional resource_metadata URL extracted from the WWW-Authenticate header
server_url: Server URL
Returns:
Ordered list of URLs to try for discovery
"""
urls: list[str] = []
# Priority 1: WWW-Authenticate header with resource_metadata parameter
if www_auth_url:
urls.append(www_auth_url)
# Priority 2-3: Well-known URIs (RFC 9728)
parsed = urlparse(server_url)
base_url = f"{parsed.scheme}://{parsed.netloc}"
# Priority 2: Path-based well-known URI (if server has a path component)
if parsed.path and parsed.path != "/":
path_based_url = urljoin(base_url, f"/.well-known/oauth-protected-resource{parsed.path}")
urls.append(path_based_url)
# Priority 3: Root-based well-known URI
root_based_url = urljoin(base_url, "/.well-known/oauth-protected-resource")
urls.append(root_based_url)
return urls
def get_client_metadata_scopes(
www_authenticate_scope: str | None,
protected_resource_metadata: ProtectedResourceMetadata | None,
authorization_server_metadata: OAuthMetadata | None = None,
) -> str | None:
"""Select scopes as outlined in the 'Scope Selection Strategy' in the MCP spec."""
# Per MCP spec, scope selection priority order:
# 1. Use scope from WWW-Authenticate header (if provided)
# 2. Use all scopes from PRM scopes_supported (if available)
# 3. Omit scope parameter if neither is available
if www_authenticate_scope is not None:
# Priority 1: WWW-Authenticate header scope
return www_authenticate_scope
elif protected_resource_metadata is not None and protected_resource_metadata.scopes_supported is not None:
# Priority 2: PRM scopes_supported
return " ".join(protected_resource_metadata.scopes_supported)
elif authorization_server_metadata is not None and authorization_server_metadata.scopes_supported is not None:
return " ".join(authorization_server_metadata.scopes_supported) # pragma: no cover
else:
# Priority 3: Omit scope parameter
return None
def build_oauth_authorization_server_metadata_discovery_urls(auth_server_url: str | None, server_url: str) -> list[str]:
"""Generate an ordered list of URLs for authorization server metadata discovery.
Args:
auth_server_url: OAuth Authorization Server Metadata URL if found, otherwise None
server_url: URL for the MCP server, used as a fallback if auth_server_url is None
"""
if not auth_server_url:
# Legacy path using the 2025-03-26 spec:
# link: https://modelcontextprotocol.io/specification/2025-03-26/basic/authorization
parsed = urlparse(server_url)
return [f"{parsed.scheme}://{parsed.netloc}/.well-known/oauth-authorization-server"]
urls: list[str] = []
parsed = urlparse(auth_server_url)
base_url = f"{parsed.scheme}://{parsed.netloc}"
# RFC 8414: Path-aware OAuth discovery
if parsed.path and parsed.path != "/":
oauth_path = f"/.well-known/oauth-authorization-server{parsed.path.rstrip('/')}"
urls.append(urljoin(base_url, oauth_path))
# RFC 8414 section 5: Path-aware OIDC discovery
# See https://www.rfc-editor.org/rfc/rfc8414.html#section-5
oidc_path = f"/.well-known/openid-configuration{parsed.path.rstrip('/')}"
urls.append(urljoin(base_url, oidc_path))
# https://openid.net/specs/openid-connect-discovery-1_0.html
oidc_path = f"{parsed.path.rstrip('/')}/.well-known/openid-configuration"
urls.append(urljoin(base_url, oidc_path))
return urls
# OAuth root
urls.append(urljoin(base_url, "/.well-known/oauth-authorization-server"))
# OIDC 1.0 fallback (appends to full URL per OIDC spec)
# https://openid.net/specs/openid-connect-discovery-1_0.html
urls.append(urljoin(base_url, "/.well-known/openid-configuration"))
return urls
async def handle_protected_resource_response(
response: Response,
) -> ProtectedResourceMetadata | None:
"""Handle protected resource metadata discovery response.
Per SEP-985, supports fallback when discovery fails at one URL.
Returns:
ProtectedResourceMetadata if successfully discovered, None if we should try next URL
"""
if response.status_code == 200:
try:
content = await response.aread()
metadata = ProtectedResourceMetadata.model_validate_json(content)
return metadata
except ValidationError: # pragma: no cover
# Invalid metadata - try next URL
return None
else:
# Not found - try next URL in fallback chain
return None
async def handle_auth_metadata_response(response: Response) -> tuple[bool, OAuthMetadata | None]:
if response.status_code == 200:
try:
content = await response.aread()
asm = OAuthMetadata.model_validate_json(content)
return True, asm
except ValidationError: # pragma: no cover
return True, None
elif response.status_code < 400 or response.status_code >= 500:
return False, None # Non-4XX error, stop trying
return True, None
def create_oauth_metadata_request(url: str) -> Request:
return Request("GET", url, headers={MCP_PROTOCOL_VERSION: LATEST_PROTOCOL_VERSION})
def create_client_registration_request(
auth_server_metadata: OAuthMetadata | None, client_metadata: OAuthClientMetadata, auth_base_url: str
) -> Request:
"""Build a client registration request."""
if auth_server_metadata and auth_server_metadata.registration_endpoint:
registration_url = str(auth_server_metadata.registration_endpoint)
else:
registration_url = urljoin(auth_base_url, "/register")
registration_data = client_metadata.model_dump(by_alias=True, mode="json", exclude_none=True)
return Request("POST", registration_url, json=registration_data, headers={"Content-Type": "application/json"})
async def handle_registration_response(response: Response) -> OAuthClientInformationFull:
"""Handle registration response."""
if response.status_code not in (200, 201):
await response.aread()
raise OAuthRegistrationError(f"Registration failed: {response.status_code} {response.text}")
try:
content = await response.aread()
client_info = OAuthClientInformationFull.model_validate_json(content)
return client_info
# self.context.client_info = client_info
# await self.context.storage.set_client_info(client_info)
except ValidationError as e: # pragma: no cover
raise OAuthRegistrationError(f"Invalid registration response: {e}")
def is_valid_client_metadata_url(url: str | None) -> bool:
"""Validate that a URL is suitable for use as a client_id (CIMD).
The URL must be HTTPS with a non-root pathname.
Args:
url: The URL to validate
Returns:
True if the URL is a valid HTTPS URL with a non-root pathname
"""
if not url:
return False
try:
parsed = urlparse(url)
return parsed.scheme == "https" and parsed.path not in ("", "/")
except Exception:
return False
def should_use_client_metadata_url(
oauth_metadata: OAuthMetadata | None,
client_metadata_url: str | None,
) -> bool:
"""Determine if URL-based client ID (CIMD) should be used instead of DCR.
URL-based client IDs should be used when:
1. The server advertises client_id_metadata_document_supported=True
2. The client has a valid client_metadata_url configured
Args:
oauth_metadata: OAuth authorization server metadata
client_metadata_url: URL-based client ID (already validated)
Returns:
True if CIMD should be used, False if DCR should be used
"""
if not client_metadata_url:
return False
if not oauth_metadata:
return False
return oauth_metadata.client_id_metadata_document_supported is True
def create_client_info_from_metadata_url(
client_metadata_url: str, redirect_uris: list[AnyUrl] | None = None
) -> OAuthClientInformationFull:
"""Create client information using a URL-based client ID (CIMD).
When using URL-based client IDs, the URL itself becomes the client_id
and no client_secret is used (token_endpoint_auth_method="none").
Args:
client_metadata_url: The URL to use as the client_id
redirect_uris: The redirect URIs from the client metadata (passed through for
compatibility with OAuthClientInformationFull which inherits from OAuthClientMetadata)
Returns:
OAuthClientInformationFull with the URL as client_id
"""
return OAuthClientInformationFull(
client_id=client_metadata_url,
token_endpoint_auth_method="none",
redirect_uris=redirect_uris,
)
async def handle_token_response_scopes(
response: Response,
) -> OAuthToken:
"""Parse and validate a token response.
Parses token response JSON. Callers should check response.status_code before calling.
Args:
response: HTTP response from token endpoint (status already checked by caller)
Returns:
Validated OAuthToken model
Raises:
OAuthTokenError: If response JSON is invalid
"""
try:
content = await response.aread()
token_response = OAuthToken.model_validate_json(content)
return token_response
except ValidationError as e: # pragma: no cover
raise OAuthTokenError(f"Invalid token response: {e}")
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "src/mcp/client/auth/utils.py",
"license": "MIT License",
"lines": 248,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
modelcontextprotocol/python-sdk:examples/servers/everything-server/mcp_everything_server/server.py | #!/usr/bin/env python3
"""MCP Everything Server - Conformance Test Server
Server implementing all MCP features for conformance testing based on Conformance Server Specification.
"""
import asyncio
import base64
import json
import logging
import click
from mcp.server import ServerRequestContext
from mcp.server.mcpserver import Context, MCPServer
from mcp.server.mcpserver.prompts.base import UserMessage
from mcp.server.session import ServerSession
from mcp.server.streamable_http import EventCallback, EventMessage, EventStore
from mcp.types import (
AudioContent,
Completion,
CompletionArgument,
CompletionContext,
EmbeddedResource,
EmptyResult,
ImageContent,
JSONRPCMessage,
PromptReference,
ResourceTemplateReference,
SamplingMessage,
SetLevelRequestParams,
SubscribeRequestParams,
TextContent,
TextResourceContents,
UnsubscribeRequestParams,
)
from pydantic import BaseModel, Field
logger = logging.getLogger(__name__)
# Type aliases for event store
StreamId = str
EventId = str
class InMemoryEventStore(EventStore):
"""Simple in-memory event store for SSE resumability testing."""
def __init__(self) -> None:
self._events: list[tuple[StreamId, EventId, JSONRPCMessage | None]] = []
self._event_id_counter = 0
async def store_event(self, stream_id: StreamId, message: JSONRPCMessage | None) -> EventId:
"""Store an event and return its ID."""
self._event_id_counter += 1
event_id = str(self._event_id_counter)
self._events.append((stream_id, event_id, message))
return event_id
async def replay_events_after(self, last_event_id: EventId, send_callback: EventCallback) -> StreamId | None:
"""Replay events after the specified ID."""
target_stream_id = None
for stream_id, event_id, _ in self._events:
if event_id == last_event_id:
target_stream_id = stream_id
break
if target_stream_id is None:
return None
last_event_id_int = int(last_event_id)
for stream_id, event_id, message in self._events:
if stream_id == target_stream_id and int(event_id) > last_event_id_int:
# Skip priming events (None message)
if message is not None:
await send_callback(EventMessage(message, event_id))
return target_stream_id
# Test data
TEST_IMAGE_BASE64 = "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mP8z8DwHwAFBQIAX8jx0gAAAABJRU5ErkJggg=="
TEST_AUDIO_BASE64 = "UklGRiYAAABXQVZFZm10IBAAAAABAAEAQB8AAAB9AAACABAAZGF0YQIAAAA="
# Server state
resource_subscriptions: set[str] = set()
watched_resource_content = "Watched resource content"
# Create event store for SSE resumability (SEP-1699)
event_store = InMemoryEventStore()
mcp = MCPServer(
name="mcp-conformance-test-server",
)
# Tools
@mcp.tool()
def test_simple_text() -> str:
"""Tests simple text content response"""
return "This is a simple text response for testing."
@mcp.tool()
def test_image_content() -> list[ImageContent]:
"""Tests image content response"""
return [ImageContent(type="image", data=TEST_IMAGE_BASE64, mime_type="image/png")]
@mcp.tool()
def test_audio_content() -> list[AudioContent]:
"""Tests audio content response"""
return [AudioContent(type="audio", data=TEST_AUDIO_BASE64, mime_type="audio/wav")]
@mcp.tool()
def test_embedded_resource() -> list[EmbeddedResource]:
"""Tests embedded resource content response"""
return [
EmbeddedResource(
type="resource",
resource=TextResourceContents(
uri="test://embedded-resource",
mime_type="text/plain",
text="This is an embedded resource content.",
),
)
]
@mcp.tool()
def test_multiple_content_types() -> list[TextContent | ImageContent | EmbeddedResource]:
"""Tests response with multiple content types (text, image, resource)"""
return [
TextContent(type="text", text="Multiple content types test:"),
ImageContent(type="image", data=TEST_IMAGE_BASE64, mime_type="image/png"),
EmbeddedResource(
type="resource",
resource=TextResourceContents(
uri="test://mixed-content-resource",
mime_type="application/json",
text='{"test": "data", "value": 123}',
),
),
]
@mcp.tool()
async def test_tool_with_logging(ctx: Context[ServerSession, None]) -> str:
"""Tests tool that emits log messages during execution"""
await ctx.info("Tool execution started")
await asyncio.sleep(0.05)
await ctx.info("Tool processing data")
await asyncio.sleep(0.05)
await ctx.info("Tool execution completed")
return "Tool with logging executed successfully"
@mcp.tool()
async def test_tool_with_progress(ctx: Context[ServerSession, None]) -> str:
"""Tests tool that reports progress notifications"""
await ctx.report_progress(progress=0, total=100, message="Completed step 0 of 100")
await asyncio.sleep(0.05)
await ctx.report_progress(progress=50, total=100, message="Completed step 50 of 100")
await asyncio.sleep(0.05)
await ctx.report_progress(progress=100, total=100, message="Completed step 100 of 100")
# Return progress token as string
progress_token = (
ctx.request_context.meta.get("progress_token") if ctx.request_context and ctx.request_context.meta else 0
)
return str(progress_token)
@mcp.tool()
async def test_sampling(prompt: str, ctx: Context[ServerSession, None]) -> str:
"""Tests server-initiated sampling (LLM completion request)"""
try:
# Request sampling from client
result = await ctx.session.create_message(
messages=[SamplingMessage(role="user", content=TextContent(type="text", text=prompt))],
max_tokens=100,
)
# Since we're not passing tools param, result.content is single content
if result.content.type == "text":
model_response = result.content.text
else:
model_response = "No response"
return f"LLM response: {model_response}"
except Exception as e:
return f"Sampling not supported or error: {str(e)}"
class UserResponse(BaseModel):
response: str = Field(description="User's response")
@mcp.tool()
async def test_elicitation(message: str, ctx: Context[ServerSession, None]) -> str:
"""Tests server-initiated elicitation (user input request)"""
try:
# Request user input from client
result = await ctx.elicit(message=message, schema=UserResponse)
# Type-safe discriminated union narrowing using action field
if result.action == "accept":
content = result.data.model_dump_json()
else: # decline or cancel
content = "{}"
return f"User response: action={result.action}, content={content}"
except Exception as e:
return f"Elicitation not supported or error: {str(e)}"
class SEP1034DefaultsSchema(BaseModel):
"""Schema for testing SEP-1034 elicitation with default values for all primitive types"""
name: str = Field(default="John Doe", description="User name")
age: int = Field(default=30, description="User age")
score: float = Field(default=95.5, description="User score")
status: str = Field(
default="active",
description="User status",
json_schema_extra={"enum": ["active", "inactive", "pending"]},
)
verified: bool = Field(default=True, description="Verification status")
@mcp.tool()
async def test_elicitation_sep1034_defaults(ctx: Context[ServerSession, None]) -> str:
"""Tests elicitation with default values for all primitive types (SEP-1034)"""
try:
# Request user input with defaults for all primitive types
result = await ctx.elicit(message="Please provide user information", schema=SEP1034DefaultsSchema)
# Type-safe discriminated union narrowing using action field
if result.action == "accept":
content = result.data.model_dump_json()
else: # decline or cancel
content = "{}"
return f"Elicitation result: action={result.action}, content={content}"
except Exception as e:
return f"Elicitation not supported or error: {str(e)}"
class EnumSchemasTestSchema(BaseModel):
"""Schema for testing enum schema variations (SEP-1330)"""
untitledSingle: str = Field(
description="Simple enum without titles", json_schema_extra={"enum": ["active", "inactive", "pending"]}
)
titledSingle: str = Field(
description="Enum with titled options (oneOf)",
json_schema_extra={
"oneOf": [
{"const": "low", "title": "Low Priority"},
{"const": "medium", "title": "Medium Priority"},
{"const": "high", "title": "High Priority"},
]
},
)
untitledMulti: list[str] = Field(
description="Multi-select without titles",
json_schema_extra={"items": {"type": "string", "enum": ["read", "write", "execute"]}},
)
titledMulti: list[str] = Field(
description="Multi-select with titled options",
json_schema_extra={
"items": {
"anyOf": [
{"const": "feature", "title": "New Feature"},
{"const": "bug", "title": "Bug Fix"},
{"const": "docs", "title": "Documentation"},
]
}
},
)
legacyEnum: str = Field(
description="Legacy enum with enumNames",
json_schema_extra={
"enum": ["small", "medium", "large"],
"enumNames": ["Small Size", "Medium Size", "Large Size"],
},
)
@mcp.tool()
async def test_elicitation_sep1330_enums(ctx: Context[ServerSession, None]) -> str:
"""Tests elicitation with enum schema variations per SEP-1330"""
try:
result = await ctx.elicit(
message="Please select values using different enum schema types", schema=EnumSchemasTestSchema
)
if result.action == "accept":
content = result.data.model_dump_json()
else:
content = "{}"
return f"Elicitation completed: action={result.action}, content={content}"
except Exception as e:
return f"Elicitation not supported or error: {str(e)}"
@mcp.tool()
def test_error_handling() -> str:
"""Tests error response handling"""
raise RuntimeError("This tool intentionally returns an error for testing")
@mcp.tool()
async def test_reconnection(ctx: Context[ServerSession, None]) -> str:
"""Tests SSE polling by closing stream mid-call (SEP-1699)"""
await ctx.info("Before disconnect")
await ctx.close_sse_stream()
await asyncio.sleep(0.2) # Wait for client to reconnect
await ctx.info("After reconnect")
return "Reconnection test completed"
# Resources
@mcp.resource("test://static-text")
def static_text_resource() -> str:
"""A static text resource for testing"""
return "This is the content of the static text resource."
@mcp.resource("test://static-binary")
def static_binary_resource() -> bytes:
"""A static binary resource (image) for testing"""
return base64.b64decode(TEST_IMAGE_BASE64)
@mcp.resource("test://template/{id}/data")
def template_resource(id: str) -> str:
"""A resource template with parameter substitution"""
return json.dumps({"id": id, "templateTest": True, "data": f"Data for ID: {id}"})
@mcp.resource("test://watched-resource")
def watched_resource() -> str:
"""A resource that can be subscribed to for updates"""
return watched_resource_content
# Prompts
@mcp.prompt()
def test_simple_prompt() -> list[UserMessage]:
"""A simple prompt without arguments"""
return [UserMessage(role="user", content=TextContent(type="text", text="This is a simple prompt for testing."))]
@mcp.prompt()
def test_prompt_with_arguments(arg1: str, arg2: str) -> list[UserMessage]:
"""A prompt with required arguments"""
return [
UserMessage(
role="user", content=TextContent(type="text", text=f"Prompt with arguments: arg1='{arg1}', arg2='{arg2}'")
)
]
@mcp.prompt()
def test_prompt_with_embedded_resource(resourceUri: str) -> list[UserMessage]:
"""A prompt that includes an embedded resource"""
return [
UserMessage(
role="user",
content=EmbeddedResource(
type="resource",
resource=TextResourceContents(
uri=resourceUri,
mime_type="text/plain",
text="Embedded resource content for testing.",
),
),
),
UserMessage(role="user", content=TextContent(type="text", text="Please process the embedded resource above.")),
]
@mcp.prompt()
def test_prompt_with_image() -> list[UserMessage]:
"""A prompt that includes image content"""
return [
UserMessage(role="user", content=ImageContent(type="image", data=TEST_IMAGE_BASE64, mime_type="image/png")),
UserMessage(role="user", content=TextContent(type="text", text="Please analyze the image above.")),
]
# Custom request handlers
# TODO(felix): Add public APIs to MCPServer for subscribe_resource, unsubscribe_resource,
# and set_logging_level to avoid accessing protected _lowlevel_server attribute.
async def handle_set_logging_level(ctx: ServerRequestContext, params: SetLevelRequestParams) -> EmptyResult:
"""Handle logging level changes"""
logger.info(f"Log level set to: {params.level}")
return EmptyResult()
async def handle_subscribe(ctx: ServerRequestContext, params: SubscribeRequestParams) -> EmptyResult:
"""Handle resource subscription"""
resource_subscriptions.add(str(params.uri))
logger.info(f"Subscribed to resource: {params.uri}")
return EmptyResult()
async def handle_unsubscribe(ctx: ServerRequestContext, params: UnsubscribeRequestParams) -> EmptyResult:
"""Handle resource unsubscription"""
resource_subscriptions.discard(str(params.uri))
logger.info(f"Unsubscribed from resource: {params.uri}")
return EmptyResult()
mcp._lowlevel_server._add_request_handler("logging/setLevel", handle_set_logging_level) # pyright: ignore[reportPrivateUsage]
mcp._lowlevel_server._add_request_handler("resources/subscribe", handle_subscribe) # pyright: ignore[reportPrivateUsage]
mcp._lowlevel_server._add_request_handler("resources/unsubscribe", handle_unsubscribe) # pyright: ignore[reportPrivateUsage]
@mcp.completion()
async def _handle_completion(
ref: PromptReference | ResourceTemplateReference,
argument: CompletionArgument,
context: CompletionContext | None,
) -> Completion:
"""Handle completion requests"""
# Basic completion support - returns empty array for conformance
# Real implementations would provide contextual suggestions
return Completion(values=[], total=0, has_more=False)
# CLI
@click.command()
@click.option("--port", default=3001, help="Port to listen on for HTTP")
@click.option(
"--log-level",
default="INFO",
help="Logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL)",
)
def main(port: int, log_level: str) -> int:
"""Run the MCP Everything Server."""
logging.basicConfig(
level=getattr(logging, log_level.upper()),
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
)
logger.info(f"Starting MCP Everything Server on port {port}")
logger.info(f"Endpoint will be: http://localhost:{port}/mcp")
mcp.run(
transport="streamable-http",
port=port,
event_store=event_store,
retry_interval=100, # 100ms retry interval for SSE polling
)
return 0
if __name__ == "__main__":
main()
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "examples/servers/everything-server/mcp_everything_server/server.py",
"license": "MIT License",
"lines": 369,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
modelcontextprotocol/python-sdk:src/mcp/client/auth/extensions/client_credentials.py | """OAuth client credential extensions for MCP.
Provides OAuth providers for machine-to-machine authentication flows:
- ClientCredentialsOAuthProvider: For client_credentials with client_id + client_secret
- PrivateKeyJWTOAuthProvider: For client_credentials with private_key_jwt authentication
(typically using a pre-built JWT from workload identity federation)
- RFC7523OAuthClientProvider: For jwt-bearer grant (RFC 7523 Section 2.1)
"""
import time
import warnings
from collections.abc import Awaitable, Callable
from typing import Any, Literal
from uuid import uuid4
import httpx
import jwt
from pydantic import BaseModel, Field
from mcp.client.auth import OAuthClientProvider, OAuthFlowError, OAuthTokenError, TokenStorage
from mcp.shared.auth import OAuthClientInformationFull, OAuthClientMetadata
class ClientCredentialsOAuthProvider(OAuthClientProvider):
"""OAuth provider for client_credentials grant with client_id + client_secret.
This provider sets client_info directly, bypassing dynamic client registration.
Use this when you already have client credentials (client_id and client_secret).
Example:
```python
provider = ClientCredentialsOAuthProvider(
server_url="https://api.example.com",
storage=my_token_storage,
client_id="my-client-id",
client_secret="my-client-secret",
)
```
"""
def __init__(
self,
server_url: str,
storage: TokenStorage,
client_id: str,
client_secret: str,
token_endpoint_auth_method: Literal["client_secret_basic", "client_secret_post"] = "client_secret_basic",
scopes: str | None = None,
) -> None:
"""Initialize client_credentials OAuth provider.
Args:
server_url: The MCP server URL.
storage: Token storage implementation.
client_id: The OAuth client ID.
client_secret: The OAuth client secret.
token_endpoint_auth_method: Authentication method for token endpoint.
Either "client_secret_basic" (default) or "client_secret_post".
scopes: Optional space-separated list of scopes to request.
"""
# Build minimal client_metadata for the base class
client_metadata = OAuthClientMetadata(
redirect_uris=None,
grant_types=["client_credentials"],
token_endpoint_auth_method=token_endpoint_auth_method,
scope=scopes,
)
super().__init__(server_url, client_metadata, storage, None, None, 300.0)
# Store client_info to be set during _initialize - no dynamic registration needed
self._fixed_client_info = OAuthClientInformationFull(
redirect_uris=None,
client_id=client_id,
client_secret=client_secret,
grant_types=["client_credentials"],
token_endpoint_auth_method=token_endpoint_auth_method,
scope=scopes,
)
async def _initialize(self) -> None:
"""Load stored tokens and set pre-configured client_info."""
self.context.current_tokens = await self.context.storage.get_tokens()
self.context.client_info = self._fixed_client_info
self._initialized = True
async def _perform_authorization(self) -> httpx.Request:
"""Perform client_credentials authorization."""
return await self._exchange_token_client_credentials()
async def _exchange_token_client_credentials(self) -> httpx.Request:
"""Build token exchange request for client_credentials grant."""
token_data: dict[str, Any] = {
"grant_type": "client_credentials",
}
headers: dict[str, str] = {"Content-Type": "application/x-www-form-urlencoded"}
# Use standard auth methods (client_secret_basic, client_secret_post, none)
token_data, headers = self.context.prepare_token_auth(token_data, headers)
if self.context.should_include_resource_param(self.context.protocol_version):
token_data["resource"] = self.context.get_resource_url()
if self.context.client_metadata.scope:
token_data["scope"] = self.context.client_metadata.scope
token_url = self._get_token_endpoint()
return httpx.Request("POST", token_url, data=token_data, headers=headers)
def static_assertion_provider(token: str) -> Callable[[str], Awaitable[str]]:
"""Create an assertion provider that returns a static JWT token.
Use this when you have a pre-built JWT (e.g., from workload identity federation)
that doesn't need the audience parameter.
Example:
```python
provider = PrivateKeyJWTOAuthProvider(
server_url="https://api.example.com",
storage=my_token_storage,
client_id="my-client-id",
assertion_provider=static_assertion_provider(my_prebuilt_jwt),
)
```
Args:
token: The pre-built JWT assertion string.
Returns:
An async callback suitable for use as an assertion_provider.
"""
async def provider(audience: str) -> str:
return token
return provider
class SignedJWTParameters(BaseModel):
"""Parameters for creating SDK-signed JWT assertions.
Use `create_assertion_provider()` to create an assertion provider callback
for use with `PrivateKeyJWTOAuthProvider`.
Example:
```python
jwt_params = SignedJWTParameters(
issuer="my-client-id",
subject="my-client-id",
signing_key=private_key_pem,
)
provider = PrivateKeyJWTOAuthProvider(
server_url="https://api.example.com",
storage=my_token_storage,
client_id="my-client-id",
assertion_provider=jwt_params.create_assertion_provider(),
)
```
"""
issuer: str = Field(description="Issuer for JWT assertions (typically client_id).")
subject: str = Field(description="Subject identifier for JWT assertions (typically client_id).")
signing_key: str = Field(description="Private key for JWT signing (PEM format).")
signing_algorithm: str = Field(default="RS256", description="Algorithm for signing JWT assertions.")
lifetime_seconds: int = Field(default=300, description="Lifetime of generated JWT in seconds.")
additional_claims: dict[str, Any] | None = Field(default=None, description="Additional claims.")
def create_assertion_provider(self) -> Callable[[str], Awaitable[str]]:
"""Create an assertion provider callback for use with PrivateKeyJWTOAuthProvider.
Returns:
An async callback that takes the audience (authorization server issuer URL)
and returns a signed JWT assertion.
"""
async def provider(audience: str) -> str:
now = int(time.time())
claims: dict[str, Any] = {
"iss": self.issuer,
"sub": self.subject,
"aud": audience,
"exp": now + self.lifetime_seconds,
"iat": now,
"jti": str(uuid4()),
}
if self.additional_claims:
claims.update(self.additional_claims)
return jwt.encode(claims, self.signing_key, algorithm=self.signing_algorithm)
return provider
class PrivateKeyJWTOAuthProvider(OAuthClientProvider):
"""OAuth provider for client_credentials grant with private_key_jwt authentication.
Uses RFC 7523 Section 2.2 for client authentication via JWT assertion.
The JWT assertion's audience MUST be the authorization server's issuer identifier
(per RFC 7523bis security updates). The `assertion_provider` callback receives
this audience value and must return a JWT with that audience.
**Option 1: Pre-built JWT via Workload Identity Federation**
In production scenarios, the JWT assertion is typically obtained from a workload
identity provider (e.g., GCP, AWS IAM, Azure AD):
```python
async def get_workload_identity_token(audience: str) -> str:
# Fetch JWT from your identity provider
# The JWT's audience must match the provided audience parameter
return await fetch_token_from_identity_provider(audience=audience)
provider = PrivateKeyJWTOAuthProvider(
server_url="https://api.example.com",
storage=my_token_storage,
client_id="my-client-id",
assertion_provider=get_workload_identity_token,
)
```
**Option 2: Static pre-built JWT**
If you have a static JWT that doesn't need the audience parameter:
```python
provider = PrivateKeyJWTOAuthProvider(
server_url="https://api.example.com",
storage=my_token_storage,
client_id="my-client-id",
assertion_provider=static_assertion_provider(my_prebuilt_jwt),
)
```
**Option 3: SDK-signed JWT (for testing/simple setups)**
For testing or simple deployments, use `SignedJWTParameters.create_assertion_provider()`:
```python
jwt_params = SignedJWTParameters(
issuer="my-client-id",
subject="my-client-id",
signing_key=private_key_pem,
)
provider = PrivateKeyJWTOAuthProvider(
server_url="https://api.example.com",
storage=my_token_storage,
client_id="my-client-id",
assertion_provider=jwt_params.create_assertion_provider(),
)
```
"""
def __init__(
self,
server_url: str,
storage: TokenStorage,
client_id: str,
assertion_provider: Callable[[str], Awaitable[str]],
scopes: str | None = None,
) -> None:
"""Initialize private_key_jwt OAuth provider.
Args:
server_url: The MCP server URL.
storage: Token storage implementation.
client_id: The OAuth client ID.
assertion_provider: Async callback that takes the audience (authorization
server's issuer identifier) and returns a JWT assertion. Use
`SignedJWTParameters.create_assertion_provider()` for SDK-signed JWTs,
`static_assertion_provider()` for pre-built JWTs, or provide your own
callback for workload identity federation.
scopes: Optional space-separated list of scopes to request.
"""
# Build minimal client_metadata for the base class
client_metadata = OAuthClientMetadata(
redirect_uris=None,
grant_types=["client_credentials"],
token_endpoint_auth_method="private_key_jwt",
scope=scopes,
)
super().__init__(server_url, client_metadata, storage, None, None, 300.0)
self._assertion_provider = assertion_provider
# Store client_info to be set during _initialize - no dynamic registration needed
self._fixed_client_info = OAuthClientInformationFull(
redirect_uris=None,
client_id=client_id,
grant_types=["client_credentials"],
token_endpoint_auth_method="private_key_jwt",
scope=scopes,
)
async def _initialize(self) -> None:
"""Load stored tokens and set pre-configured client_info."""
self.context.current_tokens = await self.context.storage.get_tokens()
self.context.client_info = self._fixed_client_info
self._initialized = True
async def _perform_authorization(self) -> httpx.Request:
"""Perform client_credentials authorization with private_key_jwt."""
return await self._exchange_token_client_credentials()
async def _add_client_authentication_jwt(self, *, token_data: dict[str, Any]) -> None:
"""Add JWT assertion for client authentication to token endpoint parameters."""
if not self.context.oauth_metadata:
raise OAuthFlowError("Missing OAuth metadata for private_key_jwt flow") # pragma: no cover
# Audience MUST be the issuer identifier of the authorization server
# https://datatracker.ietf.org/doc/html/draft-ietf-oauth-rfc7523bis-01
audience = str(self.context.oauth_metadata.issuer)
assertion = await self._assertion_provider(audience)
# RFC 7523 Section 2.2: client authentication via JWT
token_data["client_assertion"] = assertion
token_data["client_assertion_type"] = "urn:ietf:params:oauth:client-assertion-type:jwt-bearer"
async def _exchange_token_client_credentials(self) -> httpx.Request:
"""Build token exchange request for client_credentials grant with private_key_jwt."""
token_data: dict[str, Any] = {
"grant_type": "client_credentials",
}
headers: dict[str, str] = {"Content-Type": "application/x-www-form-urlencoded"}
# Add JWT client authentication (RFC 7523 Section 2.2)
await self._add_client_authentication_jwt(token_data=token_data)
if self.context.should_include_resource_param(self.context.protocol_version):
token_data["resource"] = self.context.get_resource_url()
if self.context.client_metadata.scope:
token_data["scope"] = self.context.client_metadata.scope
token_url = self._get_token_endpoint()
return httpx.Request("POST", token_url, data=token_data, headers=headers)
class JWTParameters(BaseModel):
"""JWT parameters."""
assertion: str | None = Field(
default=None,
description="JWT assertion for JWT authentication. "
"Will be used instead of generating a new assertion if provided.",
)
issuer: str | None = Field(default=None, description="Issuer for JWT assertions.")
subject: str | None = Field(default=None, description="Subject identifier for JWT assertions.")
audience: str | None = Field(default=None, description="Audience for JWT assertions.")
claims: dict[str, Any] | None = Field(default=None, description="Additional claims for JWT assertions.")
jwt_signing_algorithm: str | None = Field(default="RS256", description="Algorithm for signing JWT assertions.")
jwt_signing_key: str | None = Field(default=None, description="Private key for JWT signing.")
jwt_lifetime_seconds: int = Field(default=300, description="Lifetime of generated JWT in seconds.")
def to_assertion(self, with_audience_fallback: str | None = None) -> str:
if self.assertion is not None:
# Prebuilt JWT (e.g. acquired out-of-band)
assertion = self.assertion
else:
if not self.jwt_signing_key:
raise OAuthFlowError("Missing signing key for JWT bearer grant") # pragma: no cover
if not self.issuer:
raise OAuthFlowError("Missing issuer for JWT bearer grant") # pragma: no cover
if not self.subject:
raise OAuthFlowError("Missing subject for JWT bearer grant") # pragma: no cover
audience = self.audience if self.audience else with_audience_fallback
if not audience:
raise OAuthFlowError("Missing audience for JWT bearer grant") # pragma: no cover
now = int(time.time())
claims: dict[str, Any] = {
"iss": self.issuer,
"sub": self.subject,
"aud": audience,
"exp": now + self.jwt_lifetime_seconds,
"iat": now,
"jti": str(uuid4()),
}
claims.update(self.claims or {})
assertion = jwt.encode(
claims,
self.jwt_signing_key,
algorithm=self.jwt_signing_algorithm or "RS256",
)
return assertion
class RFC7523OAuthClientProvider(OAuthClientProvider):
"""OAuth client provider for RFC 7523 jwt-bearer grant.
.. deprecated::
Use :class:`ClientCredentialsOAuthProvider` for client_credentials with
client_id + client_secret, or :class:`PrivateKeyJWTOAuthProvider` for
client_credentials with private_key_jwt authentication instead.
This provider supports the jwt-bearer authorization grant (RFC 7523 Section 2.1)
where the JWT itself is the authorization grant.
"""
def __init__(
self,
server_url: str,
client_metadata: OAuthClientMetadata,
storage: TokenStorage,
redirect_handler: Callable[[str], Awaitable[None]] | None = None,
callback_handler: Callable[[], Awaitable[tuple[str, str | None]]] | None = None,
timeout: float = 300.0,
jwt_parameters: JWTParameters | None = None,
) -> None:
warnings.warn(
"RFC7523OAuthClientProvider is deprecated. Use ClientCredentialsOAuthProvider "
"or PrivateKeyJWTOAuthProvider instead.",
DeprecationWarning,
stacklevel=2,
)
super().__init__(server_url, client_metadata, storage, redirect_handler, callback_handler, timeout)
self.jwt_parameters = jwt_parameters
async def _exchange_token_authorization_code(
self, auth_code: str, code_verifier: str, *, token_data: dict[str, Any] | None = None
) -> httpx.Request: # pragma: no cover
"""Build token exchange request for authorization_code flow."""
token_data = token_data or {}
if self.context.client_metadata.token_endpoint_auth_method == "private_key_jwt":
self._add_client_authentication_jwt(token_data=token_data)
return await super()._exchange_token_authorization_code(auth_code, code_verifier, token_data=token_data)
async def _perform_authorization(self) -> httpx.Request: # pragma: no cover
"""Perform the authorization flow."""
if "urn:ietf:params:oauth:grant-type:jwt-bearer" in self.context.client_metadata.grant_types:
token_request = await self._exchange_token_jwt_bearer()
return token_request
else:
return await super()._perform_authorization()
def _add_client_authentication_jwt(self, *, token_data: dict[str, Any]): # pragma: no cover
"""Add JWT assertion for client authentication to token endpoint parameters."""
if not self.jwt_parameters:
raise OAuthTokenError("Missing JWT parameters for private_key_jwt flow")
if not self.context.oauth_metadata:
raise OAuthTokenError("Missing OAuth metadata for private_key_jwt flow")
# We need to set the audience to the issuer identifier of the authorization server
# https://datatracker.ietf.org/doc/html/draft-ietf-oauth-rfc7523bis-01#name-updates-to-rfc-7523
issuer = str(self.context.oauth_metadata.issuer)
assertion = self.jwt_parameters.to_assertion(with_audience_fallback=issuer)
# When using private_key_jwt, in a client_credentials flow, we use RFC 7523 Section 2.2
token_data["client_assertion"] = assertion
token_data["client_assertion_type"] = "urn:ietf:params:oauth:client-assertion-type:jwt-bearer"
# We need to set the audience to the resource server, the audience is different from the one in claims
# it represents the resource server that will validate the token
token_data["audience"] = self.context.get_resource_url()
async def _exchange_token_jwt_bearer(self) -> httpx.Request:
"""Build token exchange request for JWT bearer grant."""
if not self.context.client_info:
raise OAuthFlowError("Missing client info") # pragma: no cover
if not self.jwt_parameters:
raise OAuthFlowError("Missing JWT parameters") # pragma: no cover
if not self.context.oauth_metadata:
raise OAuthTokenError("Missing OAuth metadata") # pragma: no cover
# We need to set the audience to the issuer identifier of the authorization server
# https://datatracker.ietf.org/doc/html/draft-ietf-oauth-rfc7523bis-01#name-updates-to-rfc-7523
issuer = str(self.context.oauth_metadata.issuer)
assertion = self.jwt_parameters.to_assertion(with_audience_fallback=issuer)
token_data = {
"grant_type": "urn:ietf:params:oauth:grant-type:jwt-bearer",
"assertion": assertion,
}
if self.context.should_include_resource_param(self.context.protocol_version): # pragma: no branch
token_data["resource"] = self.context.get_resource_url()
if self.context.client_metadata.scope: # pragma: no branch
token_data["scope"] = self.context.client_metadata.scope
token_url = self._get_token_endpoint()
return httpx.Request(
"POST", token_url, data=token_data, headers={"Content-Type": "application/x-www-form-urlencoded"}
)
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "src/mcp/client/auth/extensions/client_credentials.py",
"license": "MIT License",
"lines": 398,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
modelcontextprotocol/python-sdk:tests/client/auth/extensions/test_client_credentials.py | import urllib.parse
import warnings
import jwt
import pytest
from pydantic import AnyHttpUrl, AnyUrl
from mcp.client.auth.extensions.client_credentials import (
ClientCredentialsOAuthProvider,
JWTParameters,
PrivateKeyJWTOAuthProvider,
RFC7523OAuthClientProvider,
SignedJWTParameters,
static_assertion_provider,
)
from mcp.shared.auth import OAuthClientInformationFull, OAuthClientMetadata, OAuthMetadata, OAuthToken
class MockTokenStorage:
"""Mock token storage for testing."""
def __init__(self):
self._tokens: OAuthToken | None = None
self._client_info: OAuthClientInformationFull | None = None
async def get_tokens(self) -> OAuthToken | None:
return self._tokens
async def set_tokens(self, tokens: OAuthToken) -> None: # pragma: no cover
self._tokens = tokens
async def get_client_info(self) -> OAuthClientInformationFull | None: # pragma: no cover
return self._client_info
async def set_client_info(self, client_info: OAuthClientInformationFull) -> None: # pragma: no cover
self._client_info = client_info
@pytest.fixture
def mock_storage():
return MockTokenStorage()
@pytest.fixture
def client_metadata():
return OAuthClientMetadata(
client_name="Test Client",
client_uri=AnyHttpUrl("https://example.com"),
redirect_uris=[AnyUrl("http://localhost:3030/callback")],
scope="read write",
)
@pytest.fixture
def rfc7523_oauth_provider(client_metadata: OAuthClientMetadata, mock_storage: MockTokenStorage):
async def redirect_handler(url: str) -> None: # pragma: no cover
"""Mock redirect handler."""
pass
async def callback_handler() -> tuple[str, str | None]: # pragma: no cover
"""Mock callback handler."""
return "test_auth_code", "test_state"
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
return RFC7523OAuthClientProvider(
server_url="https://api.example.com/v1/mcp",
client_metadata=client_metadata,
storage=mock_storage,
redirect_handler=redirect_handler,
callback_handler=callback_handler,
)
class TestOAuthFlowClientCredentials:
"""Test OAuth flow behavior for client credentials flows."""
@pytest.mark.anyio
async def test_token_exchange_request_jwt_predefined(self, rfc7523_oauth_provider: RFC7523OAuthClientProvider):
"""Test token exchange request building with a predefined JWT assertion."""
# Set up required context
rfc7523_oauth_provider.context.client_info = OAuthClientInformationFull(
grant_types=["urn:ietf:params:oauth:grant-type:jwt-bearer"],
token_endpoint_auth_method="private_key_jwt",
redirect_uris=None,
scope="read write",
)
rfc7523_oauth_provider.context.oauth_metadata = OAuthMetadata(
issuer=AnyHttpUrl("https://api.example.com"),
authorization_endpoint=AnyHttpUrl("https://api.example.com/authorize"),
token_endpoint=AnyHttpUrl("https://api.example.com/token"),
registration_endpoint=AnyHttpUrl("https://api.example.com/register"),
)
rfc7523_oauth_provider.context.client_metadata = rfc7523_oauth_provider.context.client_info
rfc7523_oauth_provider.context.protocol_version = "2025-06-18"
rfc7523_oauth_provider.jwt_parameters = JWTParameters(
# https://www.jwt.io
assertion="eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiYWRtaW4iOnRydWUsImlhdCI6MTUxNjIzOTAyMn0.KMUFsIDTnFmyG3nMiGM6H9FNFUROf3wh7SmqJp-QV30"
)
request = await rfc7523_oauth_provider._exchange_token_jwt_bearer()
assert request.method == "POST"
assert str(request.url) == "https://api.example.com/token"
assert request.headers["Content-Type"] == "application/x-www-form-urlencoded"
# Check form data
content = urllib.parse.unquote_plus(request.content.decode())
assert "grant_type=urn:ietf:params:oauth:grant-type:jwt-bearer" in content
assert "scope=read write" in content
assert "resource=https://api.example.com/v1/mcp" in content
assert (
"assertion=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiYWRtaW4iOnRydWUsImlhdCI6MTUxNjIzOTAyMn0.KMUFsIDTnFmyG3nMiGM6H9FNFUROf3wh7SmqJp-QV30"
in content
)
@pytest.mark.anyio
async def test_token_exchange_request_jwt(self, rfc7523_oauth_provider: RFC7523OAuthClientProvider):
"""Test token exchange request building wiith a generated JWT assertion."""
# Set up required context
rfc7523_oauth_provider.context.client_info = OAuthClientInformationFull(
grant_types=["urn:ietf:params:oauth:grant-type:jwt-bearer"],
token_endpoint_auth_method="private_key_jwt",
redirect_uris=None,
scope="read write",
)
rfc7523_oauth_provider.context.oauth_metadata = OAuthMetadata(
issuer=AnyHttpUrl("https://api.example.com"),
authorization_endpoint=AnyHttpUrl("https://api.example.com/authorize"),
token_endpoint=AnyHttpUrl("https://api.example.com/token"),
registration_endpoint=AnyHttpUrl("https://api.example.com/register"),
)
rfc7523_oauth_provider.context.client_metadata = rfc7523_oauth_provider.context.client_info
rfc7523_oauth_provider.context.protocol_version = "2025-06-18"
rfc7523_oauth_provider.jwt_parameters = JWTParameters(
issuer="foo",
subject="1234567890",
claims={
"name": "John Doe",
"admin": True,
"iat": 1516239022,
},
jwt_signing_algorithm="HS256",
jwt_signing_key="a-string-secret-at-least-256-bits-long",
jwt_lifetime_seconds=300,
)
request = await rfc7523_oauth_provider._exchange_token_jwt_bearer()
assert request.method == "POST"
assert str(request.url) == "https://api.example.com/token"
assert request.headers["Content-Type"] == "application/x-www-form-urlencoded"
# Check form data
content = urllib.parse.unquote_plus(request.content.decode()).split("&")
assert "grant_type=urn:ietf:params:oauth:grant-type:jwt-bearer" in content
assert "scope=read write" in content
assert "resource=https://api.example.com/v1/mcp" in content
# Check assertion
assertion = next(param for param in content if param.startswith("assertion="))[len("assertion=") :]
claims = jwt.decode(
assertion,
key="a-string-secret-at-least-256-bits-long",
algorithms=["HS256"],
audience="https://api.example.com/",
subject="1234567890",
issuer="foo",
verify=True,
)
assert claims["name"] == "John Doe"
assert claims["admin"]
assert claims["iat"] == 1516239022
class TestClientCredentialsOAuthProvider:
"""Test ClientCredentialsOAuthProvider."""
@pytest.mark.anyio
async def test_init_sets_client_info(self, mock_storage: MockTokenStorage):
"""Test that _initialize sets client_info."""
provider = ClientCredentialsOAuthProvider(
server_url="https://api.example.com",
storage=mock_storage,
client_id="test-client-id",
client_secret="test-client-secret",
)
# client_info is set during _initialize
await provider._initialize()
assert provider.context.client_info is not None
assert provider.context.client_info.client_id == "test-client-id"
assert provider.context.client_info.client_secret == "test-client-secret"
assert provider.context.client_info.grant_types == ["client_credentials"]
assert provider.context.client_info.token_endpoint_auth_method == "client_secret_basic"
@pytest.mark.anyio
async def test_init_with_scopes(self, mock_storage: MockTokenStorage):
"""Test that constructor accepts scopes."""
provider = ClientCredentialsOAuthProvider(
server_url="https://api.example.com",
storage=mock_storage,
client_id="test-client-id",
client_secret="test-client-secret",
scopes="read write",
)
await provider._initialize()
assert provider.context.client_info is not None
assert provider.context.client_info.scope == "read write"
@pytest.mark.anyio
async def test_init_with_client_secret_post(self, mock_storage: MockTokenStorage):
"""Test that constructor accepts client_secret_post auth method."""
provider = ClientCredentialsOAuthProvider(
server_url="https://api.example.com",
storage=mock_storage,
client_id="test-client-id",
client_secret="test-client-secret",
token_endpoint_auth_method="client_secret_post",
)
await provider._initialize()
assert provider.context.client_info is not None
assert provider.context.client_info.token_endpoint_auth_method == "client_secret_post"
@pytest.mark.anyio
async def test_exchange_token_client_credentials(self, mock_storage: MockTokenStorage):
"""Test token exchange request building."""
provider = ClientCredentialsOAuthProvider(
server_url="https://api.example.com/v1/mcp",
storage=mock_storage,
client_id="test-client-id",
client_secret="test-client-secret",
scopes="read write",
)
provider.context.oauth_metadata = OAuthMetadata(
issuer=AnyHttpUrl("https://api.example.com"),
authorization_endpoint=AnyHttpUrl("https://api.example.com/authorize"),
token_endpoint=AnyHttpUrl("https://api.example.com/token"),
)
provider.context.protocol_version = "2025-06-18"
request = await provider._perform_authorization()
assert request.method == "POST"
assert str(request.url) == "https://api.example.com/token"
content = urllib.parse.unquote_plus(request.content.decode())
assert "grant_type=client_credentials" in content
assert "scope=read write" in content
assert "resource=https://api.example.com/v1/mcp" in content
@pytest.mark.anyio
async def test_exchange_token_client_secret_post_includes_client_id(self, mock_storage: MockTokenStorage):
"""Test that client_secret_post includes both client_id and client_secret in body (RFC 6749 Β§2.3.1)."""
provider = ClientCredentialsOAuthProvider(
server_url="https://api.example.com/v1/mcp",
storage=mock_storage,
client_id="test-client-id",
client_secret="test-client-secret",
token_endpoint_auth_method="client_secret_post",
scopes="read write",
)
await provider._initialize()
provider.context.oauth_metadata = OAuthMetadata(
issuer=AnyHttpUrl("https://api.example.com"),
authorization_endpoint=AnyHttpUrl("https://api.example.com/authorize"),
token_endpoint=AnyHttpUrl("https://api.example.com/token"),
)
provider.context.protocol_version = "2025-06-18"
request = await provider._perform_authorization()
content = urllib.parse.unquote_plus(request.content.decode())
assert "grant_type=client_credentials" in content
assert "client_id=test-client-id" in content
assert "client_secret=test-client-secret" in content
# Should NOT have Basic auth header
assert "Authorization" not in request.headers
@pytest.mark.anyio
async def test_exchange_token_client_secret_post_without_client_id(self, mock_storage: MockTokenStorage):
"""Test client_secret_post skips body credentials when client_id is None."""
provider = ClientCredentialsOAuthProvider(
server_url="https://api.example.com/v1/mcp",
storage=mock_storage,
client_id="placeholder",
client_secret="test-client-secret",
token_endpoint_auth_method="client_secret_post",
scopes="read write",
)
await provider._initialize()
provider.context.oauth_metadata = OAuthMetadata(
issuer=AnyHttpUrl("https://api.example.com"),
authorization_endpoint=AnyHttpUrl("https://api.example.com/authorize"),
token_endpoint=AnyHttpUrl("https://api.example.com/token"),
)
provider.context.protocol_version = "2025-06-18"
# Override client_info to have client_id=None (edge case)
provider.context.client_info = OAuthClientInformationFull(
redirect_uris=None,
client_id=None,
client_secret="test-client-secret",
grant_types=["client_credentials"],
token_endpoint_auth_method="client_secret_post",
scope="read write",
)
request = await provider._perform_authorization()
content = urllib.parse.unquote_plus(request.content.decode())
assert "grant_type=client_credentials" in content
# Neither client_id nor client_secret should be in body since client_id is None
# (RFC 6749 Β§2.3.1 requires both for client_secret_post)
assert "client_id=" not in content
assert "client_secret=" not in content
assert "Authorization" not in request.headers
@pytest.mark.anyio
async def test_exchange_token_without_scopes(self, mock_storage: MockTokenStorage):
"""Test token exchange without scopes."""
provider = ClientCredentialsOAuthProvider(
server_url="https://api.example.com/v1/mcp",
storage=mock_storage,
client_id="test-client-id",
client_secret="test-client-secret",
)
provider.context.oauth_metadata = OAuthMetadata(
issuer=AnyHttpUrl("https://api.example.com"),
authorization_endpoint=AnyHttpUrl("https://api.example.com/authorize"),
token_endpoint=AnyHttpUrl("https://api.example.com/token"),
)
provider.context.protocol_version = "2024-11-05" # Old version - no resource param
request = await provider._perform_authorization()
content = urllib.parse.unquote_plus(request.content.decode())
assert "grant_type=client_credentials" in content
assert "scope=" not in content
assert "resource=" not in content
class TestPrivateKeyJWTOAuthProvider:
"""Test PrivateKeyJWTOAuthProvider."""
@pytest.mark.anyio
async def test_init_sets_client_info(self, mock_storage: MockTokenStorage):
"""Test that _initialize sets client_info."""
async def mock_assertion_provider(audience: str) -> str: # pragma: no cover
return "mock-jwt"
provider = PrivateKeyJWTOAuthProvider(
server_url="https://api.example.com",
storage=mock_storage,
client_id="test-client-id",
assertion_provider=mock_assertion_provider,
)
# client_info is set during _initialize
await provider._initialize()
assert provider.context.client_info is not None
assert provider.context.client_info.client_id == "test-client-id"
assert provider.context.client_info.grant_types == ["client_credentials"]
assert provider.context.client_info.token_endpoint_auth_method == "private_key_jwt"
@pytest.mark.anyio
async def test_exchange_token_client_credentials(self, mock_storage: MockTokenStorage):
"""Test token exchange request building with assertion provider."""
async def mock_assertion_provider(audience: str) -> str:
return f"jwt-for-{audience}"
provider = PrivateKeyJWTOAuthProvider(
server_url="https://api.example.com/v1/mcp",
storage=mock_storage,
client_id="test-client-id",
assertion_provider=mock_assertion_provider,
scopes="read write",
)
provider.context.oauth_metadata = OAuthMetadata(
issuer=AnyHttpUrl("https://auth.example.com"),
authorization_endpoint=AnyHttpUrl("https://auth.example.com/authorize"),
token_endpoint=AnyHttpUrl("https://auth.example.com/token"),
)
provider.context.protocol_version = "2025-06-18"
request = await provider._perform_authorization()
assert request.method == "POST"
assert str(request.url) == "https://auth.example.com/token"
content = urllib.parse.unquote_plus(request.content.decode())
assert "grant_type=client_credentials" in content
assert "client_assertion=jwt-for-https://auth.example.com/" in content
assert "client_assertion_type=urn:ietf:params:oauth:client-assertion-type:jwt-bearer" in content
assert "scope=read write" in content
@pytest.mark.anyio
async def test_exchange_token_without_scopes(self, mock_storage: MockTokenStorage):
"""Test token exchange without scopes."""
async def mock_assertion_provider(audience: str) -> str:
return f"jwt-for-{audience}"
provider = PrivateKeyJWTOAuthProvider(
server_url="https://api.example.com/v1/mcp",
storage=mock_storage,
client_id="test-client-id",
assertion_provider=mock_assertion_provider,
)
provider.context.oauth_metadata = OAuthMetadata(
issuer=AnyHttpUrl("https://auth.example.com"),
authorization_endpoint=AnyHttpUrl("https://auth.example.com/authorize"),
token_endpoint=AnyHttpUrl("https://auth.example.com/token"),
)
provider.context.protocol_version = "2024-11-05" # Old version - no resource param
request = await provider._perform_authorization()
content = urllib.parse.unquote_plus(request.content.decode())
assert "grant_type=client_credentials" in content
assert "scope=" not in content
assert "resource=" not in content
class TestSignedJWTParameters:
"""Test SignedJWTParameters."""
@pytest.mark.anyio
async def test_create_assertion_provider(self):
"""Test that create_assertion_provider creates valid JWTs."""
params = SignedJWTParameters(
issuer="test-issuer",
subject="test-subject",
signing_key="a-string-secret-at-least-256-bits-long",
signing_algorithm="HS256",
lifetime_seconds=300,
)
provider = params.create_assertion_provider()
assertion = await provider("https://auth.example.com")
claims = jwt.decode(
assertion,
key="a-string-secret-at-least-256-bits-long",
algorithms=["HS256"],
audience="https://auth.example.com",
)
assert claims["iss"] == "test-issuer"
assert claims["sub"] == "test-subject"
assert claims["aud"] == "https://auth.example.com"
assert "exp" in claims
assert "iat" in claims
assert "jti" in claims
@pytest.mark.anyio
async def test_create_assertion_provider_with_additional_claims(self):
"""Test that additional_claims are included in the JWT."""
params = SignedJWTParameters(
issuer="test-issuer",
subject="test-subject",
signing_key="a-string-secret-at-least-256-bits-long",
signing_algorithm="HS256",
additional_claims={"custom": "value"},
)
provider = params.create_assertion_provider()
assertion = await provider("https://auth.example.com")
claims = jwt.decode(
assertion,
key="a-string-secret-at-least-256-bits-long",
algorithms=["HS256"],
audience="https://auth.example.com",
)
assert claims["custom"] == "value"
class TestStaticAssertionProvider:
"""Test static_assertion_provider helper."""
@pytest.mark.anyio
async def test_returns_static_token(self):
"""Test that static_assertion_provider returns the same token regardless of audience."""
token = "my-static-jwt-token"
provider = static_assertion_provider(token)
result1 = await provider("https://auth1.example.com")
result2 = await provider("https://auth2.example.com")
assert result1 == token
assert result2 == token
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "tests/client/auth/extensions/test_client_credentials.py",
"license": "MIT License",
"lines": 415,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
modelcontextprotocol/python-sdk:tests/test_helpers.py | """Common test utilities for MCP server tests."""
import socket
import time
def wait_for_server(port: int, timeout: float = 20.0) -> None:
"""Wait for server to be ready to accept connections.
Polls the server port until it accepts connections or timeout is reached.
This eliminates race conditions without arbitrary sleeps.
Args:
port: The port number to check
timeout: Maximum time to wait in seconds (default 5.0)
Raises:
TimeoutError: If server doesn't start within the timeout period
"""
start_time = time.time()
while time.time() - start_time < timeout:
try:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.settimeout(0.1)
s.connect(("127.0.0.1", port))
# Server is ready
return
except (ConnectionRefusedError, OSError):
# Server not ready yet, retry quickly
time.sleep(0.01)
raise TimeoutError(f"Server on port {port} did not start within {timeout} seconds") # pragma: no cover
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "tests/test_helpers.py",
"license": "MIT License",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
modelcontextprotocol/python-sdk:examples/snippets/servers/direct_call_tool_result.py | """Example showing direct CallToolResult return for advanced control."""
from typing import Annotated
from pydantic import BaseModel
from mcp.server.mcpserver import MCPServer
from mcp.types import CallToolResult, TextContent
mcp = MCPServer("CallToolResult Example")
class ValidationModel(BaseModel):
"""Model for validating structured output."""
status: str
data: dict[str, int]
@mcp.tool()
def advanced_tool() -> CallToolResult:
"""Return CallToolResult directly for full control including _meta field."""
return CallToolResult(
content=[TextContent(type="text", text="Response visible to the model")],
_meta={"hidden": "data for client applications only"},
)
@mcp.tool()
def validated_tool() -> Annotated[CallToolResult, ValidationModel]:
"""Return CallToolResult with structured output validation."""
return CallToolResult(
content=[TextContent(type="text", text="Validated response")],
structured_content={"status": "success", "data": {"result": 42}},
_meta={"internal": "metadata"},
)
@mcp.tool()
def empty_result_tool() -> CallToolResult:
"""For empty results, return CallToolResult with empty content."""
return CallToolResult(content=[])
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "examples/snippets/servers/direct_call_tool_result.py",
"license": "MIT License",
"lines": 29,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
modelcontextprotocol/python-sdk:examples/snippets/servers/lowlevel/direct_call_tool_result.py | """Run from the repository root:
uv run examples/snippets/servers/lowlevel/direct_call_tool_result.py
"""
import asyncio
import mcp.server.stdio
from mcp import types
from mcp.server import Server, ServerRequestContext
async def handle_list_tools(
ctx: ServerRequestContext, params: types.PaginatedRequestParams | None
) -> types.ListToolsResult:
"""List available tools."""
return types.ListToolsResult(
tools=[
types.Tool(
name="advanced_tool",
description="Tool with full control including _meta field",
input_schema={
"type": "object",
"properties": {"message": {"type": "string"}},
"required": ["message"],
},
)
]
)
async def handle_call_tool(ctx: ServerRequestContext, params: types.CallToolRequestParams) -> types.CallToolResult:
"""Handle tool calls by returning CallToolResult directly."""
if params.name == "advanced_tool":
message = (params.arguments or {}).get("message", "")
return types.CallToolResult(
content=[types.TextContent(type="text", text=f"Processed: {message}")],
structured_content={"result": "success", "message": message},
_meta={"hidden": "data for client applications only"},
)
raise ValueError(f"Unknown tool: {params.name}")
server = Server(
"example-server",
on_list_tools=handle_list_tools,
on_call_tool=handle_call_tool,
)
async def run():
"""Run the server."""
async with mcp.server.stdio.stdio_server() as (read_stream, write_stream):
await server.run(
read_stream,
write_stream,
server.create_initialization_options(),
)
if __name__ == "__main__":
asyncio.run(run())
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "examples/snippets/servers/lowlevel/direct_call_tool_result.py",
"license": "MIT License",
"lines": 49,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
modelcontextprotocol/python-sdk:tests/server/test_session_race_condition.py | """Test for race condition fix in initialization flow.
This test verifies that requests can be processed immediately after
responding to InitializeRequest, without waiting for InitializedNotification.
This is critical for HTTP transport where requests can arrive in any order.
"""
import anyio
import pytest
from mcp import types
from mcp.server.models import InitializationOptions
from mcp.server.session import ServerSession
from mcp.shared.message import SessionMessage
from mcp.shared.session import RequestResponder
from mcp.types import ServerCapabilities, Tool
@pytest.mark.anyio
async def test_request_immediately_after_initialize_response():
"""Test that requests are accepted immediately after initialize response.
This reproduces the race condition in stateful HTTP mode where:
1. Client sends InitializeRequest
2. Server responds with InitializeResult
3. Client immediately sends tools/list (before server receives InitializedNotification)
4. Without fix: Server rejects with "Received request before initialization was complete"
5. With fix: Server accepts and processes the request
This test simulates the HTTP transport behavior where InitializedNotification
may arrive in a separate POST request after other requests.
"""
server_to_client_send, server_to_client_receive = anyio.create_memory_object_stream[SessionMessage](10)
client_to_server_send, client_to_server_receive = anyio.create_memory_object_stream[SessionMessage | Exception](10)
tools_list_success = False
error_received = None
async def run_server():
nonlocal tools_list_success
async with ServerSession(
client_to_server_receive,
server_to_client_send,
InitializationOptions(
server_name="test-server",
server_version="1.0.0",
capabilities=ServerCapabilities(
tools=types.ToolsCapability(list_changed=False),
),
),
) as server_session:
async for message in server_session.incoming_messages: # pragma: no branch
if isinstance(message, Exception): # pragma: no cover
raise message
# Handle tools/list request
if isinstance(message, RequestResponder):
if isinstance(message.request, types.ListToolsRequest): # pragma: no branch
tools_list_success = True
# Respond with a tool list
with message:
await message.respond(
types.ListToolsResult(
tools=[
Tool(
name="example_tool",
description="An example tool",
input_schema={"type": "object", "properties": {}},
)
]
)
)
# Handle InitializedNotification
if isinstance(message, types.ClientNotification):
if isinstance(message, types.InitializedNotification): # pragma: no branch
# Done - exit gracefully
return
async def mock_client():
nonlocal error_received
# Step 1: Send InitializeRequest
await client_to_server_send.send(
SessionMessage(
types.JSONRPCRequest(
jsonrpc="2.0",
id=1,
method="initialize",
params=types.InitializeRequestParams(
protocol_version=types.LATEST_PROTOCOL_VERSION,
capabilities=types.ClientCapabilities(),
client_info=types.Implementation(name="test-client", version="1.0.0"),
).model_dump(by_alias=True, mode="json", exclude_none=True),
)
)
)
# Step 2: Wait for InitializeResult
init_msg = await server_to_client_receive.receive()
assert isinstance(init_msg.message, types.JSONRPCResponse)
# Step 3: Immediately send tools/list BEFORE InitializedNotification
# This is the race condition scenario
await client_to_server_send.send(SessionMessage(types.JSONRPCRequest(jsonrpc="2.0", id=2, method="tools/list")))
# Step 4: Check the response
tools_msg = await server_to_client_receive.receive()
if isinstance(tools_msg.message, types.JSONRPCError): # pragma: no cover
error_received = tools_msg.message.error.message
# Step 5: Send InitializedNotification
await client_to_server_send.send(
SessionMessage(types.JSONRPCNotification(jsonrpc="2.0", method="notifications/initialized"))
)
async with (
client_to_server_send,
client_to_server_receive,
server_to_client_send,
server_to_client_receive,
anyio.create_task_group() as tg,
):
tg.start_soon(run_server)
tg.start_soon(mock_client)
# With the PR fix: tools_list_success should be True, error_received should be None
# Without the fix: error_received would contain "Received request before initialization was complete"
assert tools_list_success, f"tools/list should have succeeded. Error received: {error_received}"
assert error_received is None, f"Expected no error, but got: {error_received}"
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "tests/server/test_session_race_condition.py",
"license": "MIT License",
"lines": 111,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
modelcontextprotocol/python-sdk:tests/server/test_lowlevel_exception_handling.py | from unittest.mock import AsyncMock, Mock
import pytest
from mcp import types
from mcp.server.lowlevel.server import Server
from mcp.server.session import ServerSession
from mcp.shared.session import RequestResponder
@pytest.mark.anyio
async def test_exception_handling_with_raise_exceptions_true():
"""Test that exceptions are re-raised when raise_exceptions=True"""
server = Server("test-server")
session = Mock(spec=ServerSession)
session.send_log_message = AsyncMock()
test_exception = RuntimeError("Test error")
with pytest.raises(RuntimeError, match="Test error"):
await server._handle_message(test_exception, session, {}, raise_exceptions=True)
session.send_log_message.assert_called_once()
@pytest.mark.anyio
@pytest.mark.parametrize(
"exception_class,message",
[
(ValueError, "Test validation error"),
(RuntimeError, "Test runtime error"),
(KeyError, "Test key error"),
(Exception, "Basic error"),
],
)
async def test_exception_handling_with_raise_exceptions_false(exception_class: type[Exception], message: str):
"""Test that exceptions are logged when raise_exceptions=False"""
server = Server("test-server")
session = Mock(spec=ServerSession)
session.send_log_message = AsyncMock()
test_exception = exception_class(message)
await server._handle_message(test_exception, session, {}, raise_exceptions=False)
# Should send log message
session.send_log_message.assert_called_once()
call_args = session.send_log_message.call_args
assert call_args.kwargs["level"] == "error"
assert call_args.kwargs["data"] == "Internal Server Error"
assert call_args.kwargs["logger"] == "mcp.server.exception_handler"
@pytest.mark.anyio
async def test_normal_message_handling_not_affected():
"""Test that normal messages still work correctly"""
server = Server("test-server")
session = Mock(spec=ServerSession)
# Create a mock RequestResponder
responder = Mock(spec=RequestResponder)
responder.request = types.PingRequest(method="ping")
responder.__enter__ = Mock(return_value=responder)
responder.__exit__ = Mock(return_value=None)
# Mock the _handle_request method to avoid complex setup
server._handle_request = AsyncMock()
# Should handle normally without any exception handling
await server._handle_message(responder, session, {}, raise_exceptions=False)
# Verify _handle_request was called
server._handle_request.assert_called_once()
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "tests/server/test_lowlevel_exception_handling.py",
"license": "MIT License",
"lines": 55,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
modelcontextprotocol/python-sdk:tests/issues/test_1338_icons_and_metadata.py | """Test icon and metadata support (SEP-973)."""
import pytest
from mcp.server.mcpserver import MCPServer
from mcp.types import Icon
pytestmark = pytest.mark.anyio
async def test_icons_and_website_url():
"""Test that icons and websiteUrl are properly returned in API calls."""
# Create test icon
test_icon = Icon(
src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNkYPhfDwAChwGA60e6kgAAAABJRU5ErkJggg==",
mime_type="image/png",
sizes=["1x1"],
)
# Create server with website URL and icon
mcp = MCPServer("TestServer", website_url="https://example.com", icons=[test_icon])
# Create tool with icon
@mcp.tool(icons=[test_icon])
def test_tool(message: str) -> str: # pragma: no cover
"""A test tool with an icon."""
return message
# Create resource with icon
@mcp.resource("test://resource", icons=[test_icon])
def test_resource() -> str: # pragma: no cover
"""A test resource with an icon."""
return "test content"
# Create prompt with icon
@mcp.prompt("test_prompt", icons=[test_icon])
def test_prompt(text: str) -> str: # pragma: no cover
"""A test prompt with an icon."""
return text
# Create resource template with icon
@mcp.resource("test://weather/{city}", icons=[test_icon])
def test_resource_template(city: str) -> str: # pragma: no cover
"""Get weather for a city."""
return f"Weather for {city}"
# Test server metadata includes websiteUrl and icons
assert mcp.name == "TestServer"
assert mcp.website_url == "https://example.com"
assert mcp.icons is not None
assert len(mcp.icons) == 1
assert mcp.icons[0].src == test_icon.src
assert mcp.icons[0].mime_type == test_icon.mime_type
assert mcp.icons[0].sizes == test_icon.sizes
# Test tool includes icon
tools = await mcp.list_tools()
assert len(tools) == 1
tool = tools[0]
assert tool.name == "test_tool"
assert tool.icons is not None
assert len(tool.icons) == 1
assert tool.icons[0].src == test_icon.src
# Test resource includes icon
resources = await mcp.list_resources()
assert len(resources) == 1
resource = resources[0]
assert str(resource.uri) == "test://resource"
assert resource.icons is not None
assert len(resource.icons) == 1
assert resource.icons[0].src == test_icon.src
# Test prompt includes icon
prompts = await mcp.list_prompts()
assert len(prompts) == 1
prompt = prompts[0]
assert prompt.name == "test_prompt"
assert prompt.icons is not None
assert len(prompt.icons) == 1
assert prompt.icons[0].src == test_icon.src
# Test resource template includes icon
templates = await mcp.list_resource_templates()
assert len(templates) == 1
template = templates[0]
assert template.name == "test_resource_template"
assert template.uri_template == "test://weather/{city}"
assert template.icons is not None
assert len(template.icons) == 1
assert template.icons[0].src == test_icon.src
async def test_multiple_icons():
"""Test that multiple icons can be added to tools, resources, and prompts."""
# Create multiple test icons
icon1 = Icon(src="data:image/png;base64,icon1", mime_type="image/png", sizes=["16x16"])
icon2 = Icon(src="data:image/png;base64,icon2", mime_type="image/png", sizes=["32x32"])
icon3 = Icon(src="data:image/png;base64,icon3", mime_type="image/png", sizes=["64x64"])
mcp = MCPServer("MultiIconServer")
# Create tool with multiple icons
@mcp.tool(icons=[icon1, icon2, icon3])
def multi_icon_tool() -> str: # pragma: no cover
"""A tool with multiple icons."""
return "success"
# Test tool has all icons
tools = await mcp.list_tools()
assert len(tools) == 1
tool = tools[0]
assert tool.icons is not None
assert len(tool.icons) == 3
assert tool.icons[0].sizes == ["16x16"]
assert tool.icons[1].sizes == ["32x32"]
assert tool.icons[2].sizes == ["64x64"]
async def test_no_icons_or_website():
"""Test that server works without icons or websiteUrl."""
mcp = MCPServer("BasicServer")
@mcp.tool()
def basic_tool() -> str: # pragma: no cover
"""A basic tool without icons."""
return "success"
# Test server metadata has no websiteUrl or icons
assert mcp.name == "BasicServer"
assert mcp.website_url is None
assert mcp.icons is None
# Test tool has no icons
tools = await mcp.list_tools()
assert len(tools) == 1
tool = tools[0]
assert tool.name == "basic_tool"
assert tool.icons is None
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "tests/issues/test_1338_icons_and_metadata.py",
"license": "MIT License",
"lines": 114,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
modelcontextprotocol/python-sdk:examples/servers/simple-pagination/mcp_simple_pagination/server.py | """Simple MCP server demonstrating pagination for tools, resources, and prompts.
This example shows how to implement pagination with the low-level server API
to handle large lists of items that need to be split across multiple pages.
"""
from typing import TypeVar
import anyio
import click
from mcp import types
from mcp.server import Server, ServerRequestContext
from starlette.requests import Request
T = TypeVar("T")
# Sample data - in real scenarios, this might come from a database
SAMPLE_TOOLS = [
types.Tool(
name=f"tool_{i}",
title=f"Tool {i}",
description=f"This is sample tool number {i}",
input_schema={"type": "object", "properties": {"input": {"type": "string"}}},
)
for i in range(1, 26) # 25 tools total
]
SAMPLE_RESOURCES = [
types.Resource(
uri=f"file:///path/to/resource_{i}.txt",
name=f"resource_{i}",
description=f"This is sample resource number {i}",
)
for i in range(1, 31) # 30 resources total
]
SAMPLE_PROMPTS = [
types.Prompt(
name=f"prompt_{i}",
description=f"This is sample prompt number {i}",
arguments=[
types.PromptArgument(name="arg1", description="First argument", required=True),
],
)
for i in range(1, 21) # 20 prompts total
]
def _paginate(cursor: str | None, items: list[T], page_size: int) -> tuple[list[T], str | None]:
"""Helper to paginate a list of items given a cursor."""
if cursor is not None:
try:
start_idx = int(cursor)
except (ValueError, TypeError):
return [], None
else:
start_idx = 0
page = items[start_idx : start_idx + page_size]
next_cursor = str(start_idx + page_size) if start_idx + page_size < len(items) else None
return page, next_cursor
# Paginated list_tools - returns 5 tools per page
async def handle_list_tools(
ctx: ServerRequestContext, params: types.PaginatedRequestParams | None
) -> types.ListToolsResult:
cursor = params.cursor if params is not None else None
page, next_cursor = _paginate(cursor, SAMPLE_TOOLS, page_size=5)
return types.ListToolsResult(tools=page, next_cursor=next_cursor)
# Paginated list_resources - returns 10 resources per page
async def handle_list_resources(
ctx: ServerRequestContext, params: types.PaginatedRequestParams | None
) -> types.ListResourcesResult:
cursor = params.cursor if params is not None else None
page, next_cursor = _paginate(cursor, SAMPLE_RESOURCES, page_size=10)
return types.ListResourcesResult(resources=page, next_cursor=next_cursor)
# Paginated list_prompts - returns 7 prompts per page
async def handle_list_prompts(
ctx: ServerRequestContext, params: types.PaginatedRequestParams | None
) -> types.ListPromptsResult:
cursor = params.cursor if params is not None else None
page, next_cursor = _paginate(cursor, SAMPLE_PROMPTS, page_size=7)
return types.ListPromptsResult(prompts=page, next_cursor=next_cursor)
async def handle_call_tool(ctx: ServerRequestContext, params: types.CallToolRequestParams) -> types.CallToolResult:
# Find the tool in our sample data
tool = next((t for t in SAMPLE_TOOLS if t.name == params.name), None)
if not tool:
raise ValueError(f"Unknown tool: {params.name}")
return types.CallToolResult(
content=[
types.TextContent(
type="text",
text=f"Called tool '{params.name}' with arguments: {params.arguments}",
)
]
)
async def handle_read_resource(
ctx: ServerRequestContext, params: types.ReadResourceRequestParams
) -> types.ReadResourceResult:
resource = next((r for r in SAMPLE_RESOURCES if r.uri == str(params.uri)), None)
if not resource:
raise ValueError(f"Unknown resource: {params.uri}")
return types.ReadResourceResult(
contents=[
types.TextResourceContents(
uri=str(params.uri),
text=f"Content of {resource.name}: This is sample content for the resource.",
mime_type="text/plain",
)
]
)
async def handle_get_prompt(ctx: ServerRequestContext, params: types.GetPromptRequestParams) -> types.GetPromptResult:
prompt = next((p for p in SAMPLE_PROMPTS if p.name == params.name), None)
if not prompt:
raise ValueError(f"Unknown prompt: {params.name}")
message_text = f"This is the prompt '{params.name}'"
if params.arguments:
message_text += f" with arguments: {params.arguments}"
return types.GetPromptResult(
description=prompt.description,
messages=[
types.PromptMessage(
role="user",
content=types.TextContent(type="text", text=message_text),
)
],
)
@click.command()
@click.option("--port", default=8000, help="Port to listen on for SSE")
@click.option(
"--transport",
type=click.Choice(["stdio", "sse"]),
default="stdio",
help="Transport type",
)
def main(port: int, transport: str) -> int:
app = Server(
"mcp-simple-pagination",
on_list_tools=handle_list_tools,
on_list_resources=handle_list_resources,
on_list_prompts=handle_list_prompts,
on_call_tool=handle_call_tool,
on_read_resource=handle_read_resource,
on_get_prompt=handle_get_prompt,
)
if transport == "sse":
from mcp.server.sse import SseServerTransport
from starlette.applications import Starlette
from starlette.responses import Response
from starlette.routing import Mount, Route
sse = SseServerTransport("/messages/")
async def handle_sse(request: Request):
async with sse.connect_sse(request.scope, request.receive, request._send) as streams: # type: ignore[reportPrivateUsage]
await app.run(streams[0], streams[1], app.create_initialization_options())
return Response()
starlette_app = Starlette(
debug=True,
routes=[
Route("/sse", endpoint=handle_sse, methods=["GET"]),
Mount("/messages/", app=sse.handle_post_message),
],
)
import uvicorn
uvicorn.run(starlette_app, host="127.0.0.1", port=port)
else:
from mcp.server.stdio import stdio_server
async def arun():
async with stdio_server() as streams:
await app.run(streams[0], streams[1], app.create_initialization_options())
anyio.run(arun)
return 0
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "examples/servers/simple-pagination/mcp_simple_pagination/server.py",
"license": "MIT License",
"lines": 160,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
modelcontextprotocol/python-sdk:examples/snippets/clients/pagination_client.py | """Example of consuming paginated MCP endpoints from a client."""
import asyncio
from mcp.client.session import ClientSession
from mcp.client.stdio import StdioServerParameters, stdio_client
from mcp.types import PaginatedRequestParams, Resource
async def list_all_resources() -> None:
"""Fetch all resources using pagination."""
async with stdio_client(StdioServerParameters(command="uv", args=["run", "mcp-simple-pagination"])) as (
read,
write,
):
async with ClientSession(read, write) as session:
await session.initialize()
all_resources: list[Resource] = []
cursor = None
while True:
# Fetch a page of resources
result = await session.list_resources(params=PaginatedRequestParams(cursor=cursor))
all_resources.extend(result.resources)
print(f"Fetched {len(result.resources)} resources")
# Check if there are more pages
if result.next_cursor:
cursor = result.next_cursor
else:
break
print(f"Total resources: {len(all_resources)}")
if __name__ == "__main__":
asyncio.run(list_all_resources())
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "examples/snippets/clients/pagination_client.py",
"license": "MIT License",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
modelcontextprotocol/python-sdk:examples/snippets/servers/pagination_example.py | """Example of implementing pagination with the low-level MCP server."""
from mcp import types
from mcp.server import Server, ServerRequestContext
# Sample data to paginate
ITEMS = [f"Item {i}" for i in range(1, 101)] # 100 items
async def handle_list_resources(
ctx: ServerRequestContext, params: types.PaginatedRequestParams | None
) -> types.ListResourcesResult:
"""List resources with pagination support."""
page_size = 10
# Extract cursor from request params
cursor = params.cursor if params is not None else None
# Parse cursor to get offset
start = 0 if cursor is None else int(cursor)
end = start + page_size
# Get page of resources
page_items = [
types.Resource(uri=f"resource://items/{item}", name=item, description=f"Description for {item}")
for item in ITEMS[start:end]
]
# Determine next cursor
next_cursor = str(end) if end < len(ITEMS) else None
return types.ListResourcesResult(resources=page_items, next_cursor=next_cursor)
server = Server("paginated-server", on_list_resources=handle_list_resources)
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "examples/snippets/servers/pagination_example.py",
"license": "MIT License",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
modelcontextprotocol/python-sdk:tests/server/lowlevel/test_server_listing.py | """Basic tests for list_prompts, list_resources, and list_tools handlers without pagination."""
import pytest
from mcp import Client
from mcp.server import Server, ServerRequestContext
from mcp.types import (
ListPromptsResult,
ListResourcesResult,
ListToolsResult,
PaginatedRequestParams,
Prompt,
Resource,
Tool,
)
@pytest.mark.anyio
async def test_list_prompts_basic() -> None:
"""Test basic prompt listing without pagination."""
test_prompts = [
Prompt(name="prompt1", description="First prompt"),
Prompt(name="prompt2", description="Second prompt"),
]
async def handle_list_prompts(
ctx: ServerRequestContext, params: PaginatedRequestParams | None
) -> ListPromptsResult:
return ListPromptsResult(prompts=test_prompts)
server = Server("test", on_list_prompts=handle_list_prompts)
async with Client(server) as client:
result = await client.list_prompts()
assert result.prompts == test_prompts
@pytest.mark.anyio
async def test_list_resources_basic() -> None:
"""Test basic resource listing without pagination."""
test_resources = [
Resource(uri="file:///test1.txt", name="Test 1"),
Resource(uri="file:///test2.txt", name="Test 2"),
]
async def handle_list_resources(
ctx: ServerRequestContext, params: PaginatedRequestParams | None
) -> ListResourcesResult:
return ListResourcesResult(resources=test_resources)
server = Server("test", on_list_resources=handle_list_resources)
async with Client(server) as client:
result = await client.list_resources()
assert result.resources == test_resources
@pytest.mark.anyio
async def test_list_tools_basic() -> None:
"""Test basic tool listing without pagination."""
test_tools = [
Tool(
name="tool1",
description="First tool",
input_schema={
"type": "object",
"properties": {
"message": {"type": "string"},
},
"required": ["message"],
},
),
Tool(
name="tool2",
description="Second tool",
input_schema={
"type": "object",
"properties": {
"count": {"type": "number"},
"enabled": {"type": "boolean"},
},
"required": ["count"],
},
),
]
async def handle_list_tools(ctx: ServerRequestContext, params: PaginatedRequestParams | None) -> ListToolsResult:
return ListToolsResult(tools=test_tools)
server = Server("test", on_list_tools=handle_list_tools)
async with Client(server) as client:
result = await client.list_tools()
assert result.tools == test_tools
@pytest.mark.anyio
async def test_list_prompts_empty() -> None:
"""Test listing with empty results."""
async def handle_list_prompts(
ctx: ServerRequestContext, params: PaginatedRequestParams | None
) -> ListPromptsResult:
return ListPromptsResult(prompts=[])
server = Server("test", on_list_prompts=handle_list_prompts)
async with Client(server) as client:
result = await client.list_prompts()
assert result.prompts == []
@pytest.mark.anyio
async def test_list_resources_empty() -> None:
"""Test listing with empty results."""
async def handle_list_resources(
ctx: ServerRequestContext, params: PaginatedRequestParams | None
) -> ListResourcesResult:
return ListResourcesResult(resources=[])
server = Server("test", on_list_resources=handle_list_resources)
async with Client(server) as client:
result = await client.list_resources()
assert result.resources == []
@pytest.mark.anyio
async def test_list_tools_empty() -> None:
"""Test listing with empty results."""
async def handle_list_tools(ctx: ServerRequestContext, params: PaginatedRequestParams | None) -> ListToolsResult:
return ListToolsResult(tools=[])
server = Server("test", on_list_tools=handle_list_tools)
async with Client(server) as client:
result = await client.list_tools()
assert result.tools == []
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "tests/server/lowlevel/test_server_listing.py",
"license": "MIT License",
"lines": 108,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
modelcontextprotocol/python-sdk:tests/server/lowlevel/test_server_pagination.py | import pytest
from mcp import Client
from mcp.server import Server, ServerRequestContext
from mcp.types import (
ListPromptsResult,
ListResourcesResult,
ListToolsResult,
PaginatedRequestParams,
)
@pytest.mark.anyio
async def test_list_prompts_pagination() -> None:
test_cursor = "test-cursor-123"
received_params: PaginatedRequestParams | None = None
async def handle_list_prompts(
ctx: ServerRequestContext, params: PaginatedRequestParams | None
) -> ListPromptsResult:
nonlocal received_params
received_params = params
return ListPromptsResult(prompts=[], next_cursor="next")
server = Server("test", on_list_prompts=handle_list_prompts)
async with Client(server) as client:
# No cursor provided
await client.list_prompts()
assert received_params is not None
assert received_params.cursor is None
# Cursor provided
await client.list_prompts(cursor=test_cursor)
assert received_params is not None
assert received_params.cursor == test_cursor
@pytest.mark.anyio
async def test_list_resources_pagination() -> None:
test_cursor = "resource-cursor-456"
received_params: PaginatedRequestParams | None = None
async def handle_list_resources(
ctx: ServerRequestContext, params: PaginatedRequestParams | None
) -> ListResourcesResult:
nonlocal received_params
received_params = params
return ListResourcesResult(resources=[], next_cursor="next")
server = Server("test", on_list_resources=handle_list_resources)
async with Client(server) as client:
# No cursor provided
await client.list_resources()
assert received_params is not None
assert received_params.cursor is None
# Cursor provided
await client.list_resources(cursor=test_cursor)
assert received_params is not None
assert received_params.cursor == test_cursor
@pytest.mark.anyio
async def test_list_tools_pagination() -> None:
test_cursor = "tools-cursor-789"
received_params: PaginatedRequestParams | None = None
async def handle_list_tools(ctx: ServerRequestContext, params: PaginatedRequestParams | None) -> ListToolsResult:
nonlocal received_params
received_params = params
return ListToolsResult(tools=[], next_cursor="next")
server = Server("test", on_list_tools=handle_list_tools)
async with Client(server) as client:
# No cursor provided
await client.list_tools()
assert received_params is not None
assert received_params.cursor is None
# Cursor provided
await client.list_tools(cursor=test_cursor)
assert received_params is not None
assert received_params.cursor == test_cursor
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "tests/server/lowlevel/test_server_pagination.py",
"license": "MIT License",
"lines": 67,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
modelcontextprotocol/python-sdk:tests/server/auth/test_protected_resource.py | """Integration tests for MCP Oauth Protected Resource."""
from urllib.parse import urlparse
import httpx
import pytest
from inline_snapshot import snapshot
from pydantic import AnyHttpUrl
from starlette.applications import Starlette
from mcp.server.auth.routes import build_resource_metadata_url, create_protected_resource_routes
@pytest.fixture
def test_app():
"""Fixture to create protected resource routes for testing."""
# Create the protected resource routes
protected_resource_routes = create_protected_resource_routes(
resource_url=AnyHttpUrl("https://example.com/resource"),
authorization_servers=[AnyHttpUrl("https://auth.example.com/authorization")],
scopes_supported=["read", "write"],
resource_name="Example Resource",
resource_documentation=AnyHttpUrl("https://docs.example.com/resource"),
)
app = Starlette(routes=protected_resource_routes)
return app
@pytest.fixture
async def test_client(test_app: Starlette):
"""Fixture to create an HTTP client for the protected resource app."""
async with httpx.AsyncClient(transport=httpx.ASGITransport(app=test_app), base_url="https://mcptest.com") as client:
yield client
@pytest.mark.anyio
async def test_metadata_endpoint_with_path(test_client: httpx.AsyncClient):
"""Test the OAuth 2.0 Protected Resource metadata endpoint for path-based resource."""
# For resource with path "/resource", metadata should be accessible at the path-aware location
response = await test_client.get("/.well-known/oauth-protected-resource/resource")
assert response.json() == snapshot(
{
"resource": "https://example.com/resource",
"authorization_servers": ["https://auth.example.com/authorization"],
"scopes_supported": ["read", "write"],
"resource_name": "Example Resource",
"resource_documentation": "https://docs.example.com/resource",
"bearer_methods_supported": ["header"],
}
)
@pytest.mark.anyio
async def test_metadata_endpoint_root_path_returns_404(test_client: httpx.AsyncClient):
"""Test that root path returns 404 for path-based resource."""
# Root path should return 404 for path-based resources
response = await test_client.get("/.well-known/oauth-protected-resource")
assert response.status_code == 404
@pytest.fixture
def root_resource_app():
"""Fixture to create protected resource routes for root-level resource."""
# Create routes for a resource without path component
protected_resource_routes = create_protected_resource_routes(
resource_url=AnyHttpUrl("https://example.com"),
authorization_servers=[AnyHttpUrl("https://auth.example.com")],
scopes_supported=["read"],
resource_name="Root Resource",
)
app = Starlette(routes=protected_resource_routes)
return app
@pytest.fixture
async def root_resource_client(root_resource_app: Starlette):
"""Fixture to create an HTTP client for the root resource app."""
async with httpx.AsyncClient(
transport=httpx.ASGITransport(app=root_resource_app), base_url="https://mcptest.com"
) as client:
yield client
@pytest.mark.anyio
async def test_metadata_endpoint_without_path(root_resource_client: httpx.AsyncClient):
"""Test metadata endpoint for root-level resource."""
# For root resource, metadata should be at standard location
response = await root_resource_client.get("/.well-known/oauth-protected-resource")
assert response.status_code == 200
assert response.json() == snapshot(
{
"resource": "https://example.com/",
"authorization_servers": ["https://auth.example.com/"],
"scopes_supported": ["read"],
"resource_name": "Root Resource",
"bearer_methods_supported": ["header"],
}
)
# Tests for URL construction utility function
def test_metadata_url_construction_url_without_path():
"""Test URL construction for resource without path component."""
resource_url = AnyHttpUrl("https://example.com")
result = build_resource_metadata_url(resource_url)
assert str(result) == "https://example.com/.well-known/oauth-protected-resource"
def test_metadata_url_construction_url_with_path_component():
"""Test URL construction for resource with path component."""
resource_url = AnyHttpUrl("https://example.com/mcp")
result = build_resource_metadata_url(resource_url)
assert str(result) == "https://example.com/.well-known/oauth-protected-resource/mcp"
def test_metadata_url_construction_url_with_trailing_slash_only():
"""Test URL construction for resource with trailing slash only."""
resource_url = AnyHttpUrl("https://example.com/")
result = build_resource_metadata_url(resource_url)
# Trailing slash should be treated as empty path
assert str(result) == "https://example.com/.well-known/oauth-protected-resource"
@pytest.mark.parametrize(
"resource_url,expected_url",
[
("https://example.com", "https://example.com/.well-known/oauth-protected-resource"),
("https://example.com/", "https://example.com/.well-known/oauth-protected-resource"),
("https://example.com/mcp", "https://example.com/.well-known/oauth-protected-resource/mcp"),
("http://localhost:8001/mcp", "http://localhost:8001/.well-known/oauth-protected-resource/mcp"),
],
)
def test_metadata_url_construction_various_resource_configurations(resource_url: str, expected_url: str):
"""Test URL construction with various resource configurations."""
result = build_resource_metadata_url(AnyHttpUrl(resource_url))
assert str(result) == expected_url
# Tests for consistency between URL generation and route registration
def test_route_consistency_route_path_matches_metadata_url():
"""Test that route path matches the generated metadata URL."""
resource_url = AnyHttpUrl("https://example.com/mcp")
# Generate metadata URL
metadata_url = build_resource_metadata_url(resource_url)
# Create routes
routes = create_protected_resource_routes(
resource_url=resource_url,
authorization_servers=[AnyHttpUrl("https://auth.example.com")],
)
# Extract path from metadata URL
metadata_path = urlparse(str(metadata_url)).path
# Verify consistency
assert len(routes) == 1
assert routes[0].path == metadata_path
@pytest.mark.parametrize(
"resource_url,expected_path",
[
("https://example.com", "/.well-known/oauth-protected-resource"),
("https://example.com/", "/.well-known/oauth-protected-resource"),
("https://example.com/mcp", "/.well-known/oauth-protected-resource/mcp"),
],
)
def test_route_consistency_consistent_paths_for_various_resources(resource_url: str, expected_path: str):
"""Test that URL generation and route creation are consistent."""
resource_url_obj = AnyHttpUrl(resource_url)
# Test URL generation
metadata_url = build_resource_metadata_url(resource_url_obj)
url_path = urlparse(str(metadata_url)).path
# Test route creation
routes = create_protected_resource_routes(
resource_url=resource_url_obj,
authorization_servers=[AnyHttpUrl("https://auth.example.com")],
)
route_path = routes[0].path
# Both should match expected path
assert url_path == expected_path
assert route_path == expected_path
assert url_path == route_path
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "tests/server/auth/test_protected_resource.py",
"license": "MIT License",
"lines": 151,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
modelcontextprotocol/python-sdk:tests/client/test_notification_response.py | """Tests for StreamableHTTP client transport with non-SDK servers.
These tests verify client behavior when interacting with servers
that don't follow SDK conventions.
"""
import json
import httpx
import pytest
from starlette.applications import Starlette
from starlette.requests import Request
from starlette.responses import JSONResponse, Response
from starlette.routing import Route
from mcp import ClientSession, MCPError, types
from mcp.client.streamable_http import streamable_http_client
from mcp.shared.session import RequestResponder
from mcp.types import RootsListChangedNotification
pytestmark = pytest.mark.anyio
INIT_RESPONSE = {
"serverInfo": {"name": "test-non-sdk-server", "version": "1.0.0"},
"protocolVersion": "2024-11-05",
"capabilities": {},
}
def _init_json_response(data: dict[str, object]) -> JSONResponse:
return JSONResponse({"jsonrpc": "2.0", "id": data["id"], "result": INIT_RESPONSE})
def _create_non_sdk_server_app() -> Starlette:
"""Create a minimal server that doesn't follow SDK conventions."""
async def handle_mcp_request(request: Request) -> Response:
body = await request.body()
data = json.loads(body)
if data.get("method") == "initialize":
return _init_json_response(data)
# For notifications, return 204 No Content (non-SDK behavior)
if "id" not in data:
return Response(status_code=204, headers={"Content-Type": "application/json"})
return JSONResponse( # pragma: no cover
{"jsonrpc": "2.0", "id": data.get("id"), "error": {"code": -32601, "message": "Method not found"}}
)
return Starlette(debug=True, routes=[Route("/mcp", handle_mcp_request, methods=["POST"])])
def _create_unexpected_content_type_app() -> Starlette:
"""Create a server that returns an unexpected content type for requests."""
async def handle_mcp_request(request: Request) -> Response:
body = await request.body()
data = json.loads(body)
if data.get("method") == "initialize":
return _init_json_response(data)
if "id" not in data:
return Response(status_code=202)
# Return text/plain for all other requests β an unexpected content type.
return Response(content="this is plain text, not json or sse", status_code=200, media_type="text/plain")
return Starlette(debug=True, routes=[Route("/mcp", handle_mcp_request, methods=["POST"])])
async def test_non_compliant_notification_response() -> None:
"""Verify the client ignores unexpected responses to notifications.
The spec states notifications should get either 202 + no response body, or 4xx + optional error body
(https://modelcontextprotocol.io/specification/2025-06-18/basic/transports#sending-messages-to-the-server),
but some servers wrongly return other 2xx codes (e.g. 204). For now we simply ignore unexpected responses
(aligning behaviour w/ the TS SDK).
"""
returned_exception = None
async def message_handler( # pragma: no cover
message: RequestResponder[types.ServerRequest, types.ClientResult] | types.ServerNotification | Exception,
) -> None:
nonlocal returned_exception
if isinstance(message, Exception):
returned_exception = message
async with httpx.AsyncClient(transport=httpx.ASGITransport(app=_create_non_sdk_server_app())) as client:
async with streamable_http_client("http://localhost/mcp", http_client=client) as (read_stream, write_stream):
async with ClientSession(read_stream, write_stream, message_handler=message_handler) as session:
await session.initialize()
# The test server returns a 204 instead of the expected 202
await session.send_notification(RootsListChangedNotification(method="notifications/roots/list_changed"))
if returned_exception: # pragma: no cover
pytest.fail(f"Server encountered an exception: {returned_exception}")
async def test_unexpected_content_type_sends_jsonrpc_error() -> None:
"""Verify unexpected content types unblock the pending request with an MCPError.
When a server returns a content type that is neither application/json nor text/event-stream,
the client should send a JSONRPCError so the pending request resolves immediately
instead of hanging until timeout.
"""
async with httpx.AsyncClient(transport=httpx.ASGITransport(app=_create_unexpected_content_type_app())) as client:
async with streamable_http_client("http://localhost/mcp", http_client=client) as (read_stream, write_stream):
async with ClientSession(read_stream, write_stream) as session: # pragma: no branch
await session.initialize()
with pytest.raises(MCPError, match="Unexpected content type: text/plain"): # pragma: no branch
await session.list_tools()
def _create_http_error_app(error_status: int, *, error_on_notifications: bool = False) -> Starlette:
"""Create a server that returns an HTTP error for non-init requests."""
async def handle_mcp_request(request: Request) -> Response:
body = await request.body()
data = json.loads(body)
if data.get("method") == "initialize":
return _init_json_response(data)
if "id" not in data:
if error_on_notifications:
return Response(status_code=error_status)
return Response(status_code=202)
return Response(status_code=error_status)
return Starlette(debug=True, routes=[Route("/mcp", handle_mcp_request, methods=["POST"])])
async def test_http_error_status_sends_jsonrpc_error() -> None:
"""Verify HTTP 5xx errors unblock the pending request with an MCPError.
When a server returns a non-2xx status code (e.g. 500), the client should
send a JSONRPCError so the pending request resolves immediately instead of
raising an unhandled httpx.HTTPStatusError that causes the caller to hang.
"""
async with httpx.AsyncClient(transport=httpx.ASGITransport(app=_create_http_error_app(500))) as client:
async with streamable_http_client("http://localhost/mcp", http_client=client) as (read_stream, write_stream):
async with ClientSession(read_stream, write_stream) as session: # pragma: no branch
await session.initialize()
with pytest.raises(MCPError, match="Server returned an error response"): # pragma: no branch
await session.list_tools()
async def test_http_error_on_notification_does_not_hang() -> None:
"""Verify HTTP errors on notifications are silently ignored.
When a notification gets an HTTP error, there is no pending request to
unblock, so the client should just return without sending a JSONRPCError.
"""
app = _create_http_error_app(500, error_on_notifications=True)
async with httpx.AsyncClient(transport=httpx.ASGITransport(app=app)) as client:
async with streamable_http_client("http://localhost/mcp", http_client=client) as (read_stream, write_stream):
async with ClientSession(read_stream, write_stream) as session: # pragma: no branch
await session.initialize()
# Should not raise or hang β the error is silently ignored for notifications
await session.send_notification(RootsListChangedNotification(method="notifications/roots/list_changed"))
def _create_invalid_json_response_app() -> Starlette:
"""Create a server that returns invalid JSON for requests."""
async def handle_mcp_request(request: Request) -> Response:
body = await request.body()
data = json.loads(body)
if data.get("method") == "initialize":
return _init_json_response(data)
if "id" not in data:
return Response(status_code=202)
# Return application/json content type but with invalid JSON body.
return Response(content="not valid json{{{", status_code=200, media_type="application/json")
return Starlette(debug=True, routes=[Route("/mcp", handle_mcp_request, methods=["POST"])])
async def test_invalid_json_response_sends_jsonrpc_error() -> None:
"""Verify invalid JSON responses unblock the pending request with an MCPError.
When a server returns application/json with an unparseable body, the client
should send a JSONRPCError so the pending request resolves immediately
instead of hanging until timeout.
"""
async with httpx.AsyncClient(transport=httpx.ASGITransport(app=_create_invalid_json_response_app())) as client:
async with streamable_http_client("http://localhost/mcp", http_client=client) as (read_stream, write_stream):
async with ClientSession(read_stream, write_stream) as session: # pragma: no branch
await session.initialize()
with pytest.raises(MCPError, match="Failed to parse JSON response"): # pragma: no branch
await session.list_tools()
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "tests/client/test_notification_response.py",
"license": "MIT License",
"lines": 144,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
modelcontextprotocol/python-sdk:tests/server/auth/test_provider.py | """Tests for mcp.server.auth.provider module."""
from mcp.server.auth.provider import construct_redirect_uri
def test_construct_redirect_uri_no_existing_params():
"""Test construct_redirect_uri with no existing query parameters."""
base_uri = "http://localhost:8000/callback"
result = construct_redirect_uri(base_uri, code="auth_code", state="test_state")
assert "http://localhost:8000/callback?code=auth_code&state=test_state" == result
def test_construct_redirect_uri_with_existing_params():
"""Test construct_redirect_uri with existing query parameters (regression test for #1279)."""
base_uri = "http://localhost:8000/callback?session_id=1234"
result = construct_redirect_uri(base_uri, code="auth_code", state="test_state")
# Should preserve existing params and add new ones
assert "session_id=1234" in result
assert "code=auth_code" in result
assert "state=test_state" in result
assert result.startswith("http://localhost:8000/callback?")
def test_construct_redirect_uri_multiple_existing_params():
"""Test construct_redirect_uri with multiple existing query parameters."""
base_uri = "http://localhost:8000/callback?session_id=1234&user=test"
result = construct_redirect_uri(base_uri, code="auth_code")
assert "session_id=1234" in result
assert "user=test" in result
assert "code=auth_code" in result
def test_construct_redirect_uri_with_none_values():
"""Test construct_redirect_uri filters out None values."""
base_uri = "http://localhost:8000/callback"
result = construct_redirect_uri(base_uri, code="auth_code", state=None)
assert result == "http://localhost:8000/callback?code=auth_code"
assert "state" not in result
def test_construct_redirect_uri_empty_params():
"""Test construct_redirect_uri with no additional parameters."""
base_uri = "http://localhost:8000/callback?existing=param"
result = construct_redirect_uri(base_uri)
assert result == "http://localhost:8000/callback?existing=param"
def test_construct_redirect_uri_duplicate_param_names():
"""Test construct_redirect_uri when adding param that already exists."""
base_uri = "http://localhost:8000/callback?code=existing"
result = construct_redirect_uri(base_uri, code="new_code")
# Should contain both values (this is expected behavior of parse_qs/urlencode)
assert "code=existing" in result
assert "code=new_code" in result
def test_construct_redirect_uri_multivalued_existing_params():
"""Test construct_redirect_uri with existing multi-valued parameters."""
base_uri = "http://localhost:8000/callback?scope=read&scope=write"
result = construct_redirect_uri(base_uri, code="auth_code")
assert "scope=read" in result
assert "scope=write" in result
assert "code=auth_code" in result
def test_construct_redirect_uri_encoded_values():
"""Test construct_redirect_uri handles URL encoding properly."""
base_uri = "http://localhost:8000/callback"
result = construct_redirect_uri(base_uri, state="test state with spaces")
# urlencode uses + for spaces by default
assert "state=test+state+with+spaces" in result
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "tests/server/auth/test_provider.py",
"license": "MIT License",
"lines": 54,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
modelcontextprotocol/python-sdk:examples/snippets/servers/streamable_http_basic_mounting.py | """Basic example showing how to mount StreamableHTTP server in Starlette.
Run from the repository root:
uvicorn examples.snippets.servers.streamable_http_basic_mounting:app --reload
"""
import contextlib
from starlette.applications import Starlette
from starlette.routing import Mount
from mcp.server.mcpserver import MCPServer
# Create MCP server
mcp = MCPServer("My App")
@mcp.tool()
def hello() -> str:
"""A simple hello tool"""
return "Hello from MCP!"
# Create a lifespan context manager to run the session manager
@contextlib.asynccontextmanager
async def lifespan(app: Starlette):
async with mcp.session_manager.run():
yield
# Mount the StreamableHTTP server to the existing ASGI server
# Transport-specific options are passed to streamable_http_app()
app = Starlette(
routes=[
Mount("/", app=mcp.streamable_http_app(json_response=True)),
],
lifespan=lifespan,
)
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "examples/snippets/servers/streamable_http_basic_mounting.py",
"license": "MIT License",
"lines": 27,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
modelcontextprotocol/python-sdk:examples/snippets/servers/streamable_http_host_mounting.py | """Example showing how to mount StreamableHTTP server using Host-based routing.
Run from the repository root:
uvicorn examples.snippets.servers.streamable_http_host_mounting:app --reload
"""
import contextlib
from starlette.applications import Starlette
from starlette.routing import Host
from mcp.server.mcpserver import MCPServer
# Create MCP server
mcp = MCPServer("MCP Host App")
@mcp.tool()
def domain_info() -> str:
"""Get domain-specific information"""
return "This is served from mcp.acme.corp"
# Create a lifespan context manager to run the session manager
@contextlib.asynccontextmanager
async def lifespan(app: Starlette):
async with mcp.session_manager.run():
yield
# Mount using Host-based routing
# Transport-specific options are passed to streamable_http_app()
app = Starlette(
routes=[
Host("mcp.acme.corp", app=mcp.streamable_http_app(json_response=True)),
],
lifespan=lifespan,
)
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "examples/snippets/servers/streamable_http_host_mounting.py",
"license": "MIT License",
"lines": 27,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
modelcontextprotocol/python-sdk:examples/snippets/servers/streamable_http_multiple_servers.py | """Example showing how to mount multiple StreamableHTTP servers with path configuration.
Run from the repository root:
uvicorn examples.snippets.servers.streamable_http_multiple_servers:app --reload
"""
import contextlib
from starlette.applications import Starlette
from starlette.routing import Mount
from mcp.server.mcpserver import MCPServer
# Create multiple MCP servers
api_mcp = MCPServer("API Server")
chat_mcp = MCPServer("Chat Server")
@api_mcp.tool()
def api_status() -> str:
"""Get API status"""
return "API is running"
@chat_mcp.tool()
def send_message(message: str) -> str:
"""Send a chat message"""
return f"Message sent: {message}"
# Create a combined lifespan to manage both session managers
@contextlib.asynccontextmanager
async def lifespan(app: Starlette):
async with contextlib.AsyncExitStack() as stack:
await stack.enter_async_context(api_mcp.session_manager.run())
await stack.enter_async_context(chat_mcp.session_manager.run())
yield
# Mount the servers with transport-specific options passed to streamable_http_app()
# streamable_http_path="/" means endpoints will be at /api and /chat instead of /api/mcp and /chat/mcp
app = Starlette(
routes=[
Mount("/api", app=api_mcp.streamable_http_app(json_response=True, streamable_http_path="/")),
Mount("/chat", app=chat_mcp.streamable_http_app(json_response=True, streamable_http_path="/")),
],
lifespan=lifespan,
)
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "examples/snippets/servers/streamable_http_multiple_servers.py",
"license": "MIT License",
"lines": 35,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
modelcontextprotocol/python-sdk:examples/snippets/servers/streamable_http_path_config.py | """Example showing path configuration when mounting MCPServer.
Run from the repository root:
uvicorn examples.snippets.servers.streamable_http_path_config:app --reload
"""
from starlette.applications import Starlette
from starlette.routing import Mount
from mcp.server.mcpserver import MCPServer
# Create a simple MCPServer server
mcp_at_root = MCPServer("My Server")
@mcp_at_root.tool()
def process_data(data: str) -> str:
"""Process some data"""
return f"Processed: {data}"
# Mount at /process with streamable_http_path="/" so the endpoint is /process (not /process/mcp)
# Transport-specific options like json_response are passed to streamable_http_app()
app = Starlette(
routes=[
Mount(
"/process",
app=mcp_at_root.streamable_http_app(json_response=True, streamable_http_path="/"),
),
]
)
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "examples/snippets/servers/streamable_http_path_config.py",
"license": "MIT License",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
modelcontextprotocol/python-sdk:tests/server/test_cancel_handling.py | """Test that cancelled requests don't cause double responses."""
import anyio
import pytest
from mcp import Client
from mcp.server import Server, ServerRequestContext
from mcp.shared.exceptions import MCPError
from mcp.types import (
CallToolRequest,
CallToolRequestParams,
CallToolResult,
CancelledNotification,
CancelledNotificationParams,
ListToolsResult,
PaginatedRequestParams,
TextContent,
Tool,
)
@pytest.mark.anyio
async def test_server_remains_functional_after_cancel():
"""Verify server can handle new requests after a cancellation."""
# Track tool calls
call_count = 0
ev_first_call = anyio.Event()
first_request_id = None
async def handle_list_tools(ctx: ServerRequestContext, params: PaginatedRequestParams | None) -> ListToolsResult:
return ListToolsResult(
tools=[
Tool(
name="test_tool",
description="Tool for testing",
input_schema={},
)
]
)
async def handle_call_tool(ctx: ServerRequestContext, params: CallToolRequestParams) -> CallToolResult:
nonlocal call_count, first_request_id
if params.name == "test_tool":
call_count += 1
if call_count == 1:
first_request_id = ctx.request_id
ev_first_call.set()
await anyio.sleep(5) # First call is slow
return CallToolResult(content=[TextContent(type="text", text=f"Call number: {call_count}")])
raise ValueError(f"Unknown tool: {params.name}") # pragma: no cover
server = Server("test-server", on_list_tools=handle_list_tools, on_call_tool=handle_call_tool)
async with Client(server) as client:
# First request (will be cancelled)
async def first_request():
try:
await client.session.send_request(
CallToolRequest(params=CallToolRequestParams(name="test_tool", arguments={})),
CallToolResult,
)
pytest.fail("First request should have been cancelled") # pragma: no cover
except MCPError:
pass # Expected
# Start first request
async with anyio.create_task_group() as tg:
tg.start_soon(first_request)
# Wait for it to start
await ev_first_call.wait()
# Cancel it
assert first_request_id is not None
await client.session.send_notification(
CancelledNotification(
params=CancelledNotificationParams(request_id=first_request_id, reason="Testing server recovery"),
)
)
# Second request (should work normally)
result = await client.call_tool("test_tool", {})
# Verify second request completed successfully
assert len(result.content) == 1
# Type narrowing for pyright
content = result.content[0]
assert content.type == "text"
assert isinstance(content, TextContent)
assert content.text == "Call number: 2"
assert call_count == 2
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "tests/server/test_cancel_handling.py",
"license": "MIT License",
"lines": 78,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
modelcontextprotocol/python-sdk:tests/shared/test_auth.py | """Tests for OAuth 2.0 shared code."""
from mcp.shared.auth import OAuthMetadata
def test_oauth():
"""Should not throw when parsing OAuth metadata."""
OAuthMetadata.model_validate(
{
"issuer": "https://example.com",
"authorization_endpoint": "https://example.com/oauth2/authorize",
"token_endpoint": "https://example.com/oauth2/token",
"scopes_supported": ["read", "write"],
"response_types_supported": ["code", "token"],
"token_endpoint_auth_methods_supported": ["client_secret_basic", "client_secret_post"],
}
)
def test_oidc():
"""Should not throw when parsing OIDC metadata."""
OAuthMetadata.model_validate(
{
"issuer": "https://example.com",
"authorization_endpoint": "https://example.com/oauth2/authorize",
"token_endpoint": "https://example.com/oauth2/token",
"end_session_endpoint": "https://example.com/logout",
"id_token_signing_alg_values_supported": ["RS256"],
"jwks_uri": "https://example.com/.well-known/jwks.json",
"response_types_supported": ["code", "token"],
"revocation_endpoint": "https://example.com/oauth2/revoke",
"scopes_supported": ["openid", "read", "write"],
"subject_types_supported": ["public"],
"token_endpoint_auth_methods_supported": ["client_secret_basic", "client_secret_post"],
"userinfo_endpoint": "https://example.com/oauth2/userInfo",
}
)
def test_oauth_with_jarm():
"""Should not throw when parsing OAuth metadata that includes JARM response modes."""
OAuthMetadata.model_validate(
{
"issuer": "https://example.com",
"authorization_endpoint": "https://example.com/oauth2/authorize",
"token_endpoint": "https://example.com/oauth2/token",
"scopes_supported": ["read", "write"],
"response_types_supported": ["code", "token"],
"response_modes_supported": [
"query",
"fragment",
"form_post",
"query.jwt",
"fragment.jwt",
"form_post.jwt",
"jwt",
],
"token_endpoint_auth_methods_supported": ["client_secret_basic", "client_secret_post"],
}
)
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "tests/shared/test_auth.py",
"license": "MIT License",
"lines": 53,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
modelcontextprotocol/python-sdk:examples/snippets/clients/oauth_client.py | """Before running, specify running MCP RS server URL.
To spin up RS server locally, see
examples/servers/simple-auth/README.md
cd to the `examples/snippets` directory and run:
uv run oauth-client
"""
import asyncio
from urllib.parse import parse_qs, urlparse
import httpx
from pydantic import AnyUrl
from mcp import ClientSession
from mcp.client.auth import OAuthClientProvider, TokenStorage
from mcp.client.streamable_http import streamable_http_client
from mcp.shared.auth import OAuthClientInformationFull, OAuthClientMetadata, OAuthToken
class InMemoryTokenStorage(TokenStorage):
"""Demo In-memory token storage implementation."""
def __init__(self):
self.tokens: OAuthToken | None = None
self.client_info: OAuthClientInformationFull | None = None
async def get_tokens(self) -> OAuthToken | None:
"""Get stored tokens."""
return self.tokens
async def set_tokens(self, tokens: OAuthToken) -> None:
"""Store tokens."""
self.tokens = tokens
async def get_client_info(self) -> OAuthClientInformationFull | None:
"""Get stored client information."""
return self.client_info
async def set_client_info(self, client_info: OAuthClientInformationFull) -> None:
"""Store client information."""
self.client_info = client_info
async def handle_redirect(auth_url: str) -> None:
print(f"Visit: {auth_url}")
async def handle_callback() -> tuple[str, str | None]:
callback_url = input("Paste callback URL: ")
params = parse_qs(urlparse(callback_url).query)
return params["code"][0], params.get("state", [None])[0]
async def main():
"""Run the OAuth client example."""
oauth_auth = OAuthClientProvider(
server_url="http://localhost:8001",
client_metadata=OAuthClientMetadata(
client_name="Example MCP Client",
redirect_uris=[AnyUrl("http://localhost:3000/callback")],
grant_types=["authorization_code", "refresh_token"],
response_types=["code"],
scope="user",
),
storage=InMemoryTokenStorage(),
redirect_handler=handle_redirect,
callback_handler=handle_callback,
)
async with httpx.AsyncClient(auth=oauth_auth, follow_redirects=True) as custom_client:
async with streamable_http_client("http://localhost:8001/mcp", http_client=custom_client) as (read, write):
async with ClientSession(read, write) as session:
await session.initialize()
tools = await session.list_tools()
print(f"Available tools: {[tool.name for tool in tools.tools]}")
resources = await session.list_resources()
print(f"Available resources: {[r.uri for r in resources.resources]}")
def run():
asyncio.run(main())
if __name__ == "__main__":
run()
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "examples/snippets/clients/oauth_client.py",
"license": "MIT License",
"lines": 64,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
modelcontextprotocol/python-sdk:examples/snippets/servers/oauth_server.py | """Run from the repository root:
uv run examples/snippets/servers/oauth_server.py
"""
from pydantic import AnyHttpUrl
from mcp.server.auth.provider import AccessToken, TokenVerifier
from mcp.server.auth.settings import AuthSettings
from mcp.server.mcpserver import MCPServer
class SimpleTokenVerifier(TokenVerifier):
"""Simple token verifier for demonstration."""
async def verify_token(self, token: str) -> AccessToken | None:
pass # This is where you would implement actual token validation
# Create MCPServer instance as a Resource Server
mcp = MCPServer(
"Weather Service",
# Token verifier for authentication
token_verifier=SimpleTokenVerifier(),
# Auth settings for RFC 9728 Protected Resource Metadata
auth=AuthSettings(
issuer_url=AnyHttpUrl("https://auth.example.com"), # Authorization Server URL
resource_server_url=AnyHttpUrl("http://localhost:3001"), # This server's URL
required_scopes=["user"],
),
)
@mcp.tool()
async def get_weather(city: str = "London") -> dict[str, str]:
"""Get weather data for a city"""
return {
"city": city,
"temperature": "22",
"condition": "Partly cloudy",
"humidity": "65%",
}
if __name__ == "__main__":
mcp.run(transport="streamable-http", json_response=True)
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "examples/snippets/servers/oauth_server.py",
"license": "MIT License",
"lines": 34,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
modelcontextprotocol/python-sdk:examples/snippets/clients/parsing_tool_results.py | """examples/snippets/clients/parsing_tool_results.py"""
import asyncio
from mcp import ClientSession, StdioServerParameters, types
from mcp.client.stdio import stdio_client
async def parse_tool_results():
"""Demonstrates how to parse different types of content in CallToolResult."""
server_params = StdioServerParameters(command="python", args=["path/to/mcp_server.py"])
async with stdio_client(server_params) as (read, write):
async with ClientSession(read, write) as session:
await session.initialize()
# Example 1: Parsing text content
result = await session.call_tool("get_data", {"format": "text"})
for content in result.content:
if isinstance(content, types.TextContent):
print(f"Text: {content.text}")
# Example 2: Parsing structured content from JSON tools
result = await session.call_tool("get_user", {"id": "123"})
if hasattr(result, "structured_content") and result.structured_content:
# Access structured data directly
user_data = result.structured_content
print(f"User: {user_data.get('name')}, Age: {user_data.get('age')}")
# Example 3: Parsing embedded resources
result = await session.call_tool("read_config", {})
for content in result.content:
if isinstance(content, types.EmbeddedResource):
resource = content.resource
if isinstance(resource, types.TextResourceContents):
print(f"Config from {resource.uri}: {resource.text}")
else:
print(f"Binary data from {resource.uri}")
# Example 4: Parsing image content
result = await session.call_tool("generate_chart", {"data": [1, 2, 3]})
for content in result.content:
if isinstance(content, types.ImageContent):
print(f"Image ({content.mime_type}): {len(content.data)} bytes")
# Example 5: Handling errors
result = await session.call_tool("failing_tool", {})
if result.is_error:
print("Tool execution failed!")
for content in result.content:
if isinstance(content, types.TextContent):
print(f"Error: {content.text}")
async def main():
await parse_tool_results()
if __name__ == "__main__":
asyncio.run(main())
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "examples/snippets/clients/parsing_tool_results.py",
"license": "MIT License",
"lines": 46,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
modelcontextprotocol/python-sdk:tests/cli/test_utils.py | import subprocess
import sys
from pathlib import Path
from typing import Any
import pytest
from mcp.cli.cli import _build_uv_command, _get_npx_command, _parse_file_path # type: ignore[reportPrivateUsage]
@pytest.mark.parametrize(
"spec, expected_obj",
[
("server.py", None),
("foo.py:srv_obj", "srv_obj"),
],
)
def test_parse_file_path_accepts_valid_specs(tmp_path: Path, spec: str, expected_obj: str | None):
"""Should accept valid file specs."""
file = tmp_path / spec.split(":")[0]
file.write_text("x = 1")
path, obj = _parse_file_path(f"{file}:{expected_obj}" if ":" in spec else str(file))
assert path == file.resolve()
assert obj == expected_obj
def test_parse_file_path_missing(tmp_path: Path):
"""Should system exit if a file is missing."""
with pytest.raises(SystemExit):
_parse_file_path(str(tmp_path / "missing.py"))
def test_parse_file_exit_on_dir(tmp_path: Path):
"""Should system exit if a directory is passed"""
dir_path = tmp_path / "dir"
dir_path.mkdir()
with pytest.raises(SystemExit):
_parse_file_path(str(dir_path))
def test_build_uv_command_minimal():
"""Should emit core command when no extras specified."""
cmd = _build_uv_command("foo.py")
assert cmd == ["uv", "run", "--with", "mcp", "mcp", "run", "foo.py"]
def test_build_uv_command_adds_editable_and_packages():
"""Should include --with-editable and every --with pkg in correct order."""
test_path = Path("/pkg")
cmd = _build_uv_command(
"foo.py",
with_editable=test_path,
with_packages=["package1", "package2"],
)
assert cmd == [
"uv",
"run",
"--with",
"mcp",
"--with-editable",
str(test_path), # Use str() to match what the function does
"--with",
"package1",
"--with",
"package2",
"mcp",
"run",
"foo.py",
]
def test_get_npx_unix_like(monkeypatch: pytest.MonkeyPatch):
"""Should return "npx" on unix-like systems."""
monkeypatch.setattr(sys, "platform", "linux")
assert _get_npx_command() == "npx"
def test_get_npx_windows(monkeypatch: pytest.MonkeyPatch):
"""Should return one of the npx candidates on Windows."""
candidates = ["npx.cmd", "npx.exe", "npx"]
def fake_run(cmd: list[str], **kw: Any) -> subprocess.CompletedProcess[bytes]:
if cmd[0] in candidates:
return subprocess.CompletedProcess(cmd, 0)
else: # pragma: no cover
raise subprocess.CalledProcessError(1, cmd[0])
monkeypatch.setattr(sys, "platform", "win32")
monkeypatch.setattr(subprocess, "run", fake_run)
assert _get_npx_command() in candidates
def test_get_npx_returns_none_when_npx_missing(monkeypatch: pytest.MonkeyPatch):
"""Should give None if every candidate fails."""
monkeypatch.setattr(sys, "platform", "win32", raising=False)
def always_fail(*args: Any, **kwargs: Any) -> subprocess.CompletedProcess[bytes]:
raise subprocess.CalledProcessError(1, args[0])
monkeypatch.setattr(subprocess, "run", always_fail)
assert _get_npx_command() is None
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "tests/cli/test_utils.py",
"license": "MIT License",
"lines": 79,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
modelcontextprotocol/python-sdk:examples/snippets/clients/streamable_basic.py | """Run from the repository root:
uv run examples/snippets/clients/streamable_basic.py
"""
import asyncio
from mcp import ClientSession
from mcp.client.streamable_http import streamable_http_client
async def main():
# Connect to a streamable HTTP server
async with streamable_http_client("http://localhost:8000/mcp") as (read_stream, write_stream):
# Create a session using the client streams
async with ClientSession(read_stream, write_stream) as session:
# Initialize the connection
await session.initialize()
# List available tools
tools = await session.list_tools()
print(f"Available tools: {[tool.name for tool in tools.tools]}")
if __name__ == "__main__":
asyncio.run(main())
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "examples/snippets/clients/streamable_basic.py",
"license": "MIT License",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
modelcontextprotocol/python-sdk:examples/snippets/servers/streamable_config.py | """Run from the repository root:
uv run examples/snippets/servers/streamable_config.py
"""
from mcp.server.mcpserver import MCPServer
mcp = MCPServer("StatelessServer")
# Add a simple tool to demonstrate the server
@mcp.tool()
def greet(name: str = "World") -> str:
"""Greet someone by name."""
return f"Hello, {name}!"
# Run server with streamable_http transport
# Transport-specific options (stateless_http, json_response) are passed to run()
if __name__ == "__main__":
# Stateless server with JSON responses (recommended)
mcp.run(transport="streamable-http", stateless_http=True, json_response=True)
# Other configuration options:
# Stateless server with SSE streaming responses
# mcp.run(transport="streamable-http", stateless_http=True)
# Stateful server with session persistence
# mcp.run(transport="streamable-http")
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "examples/snippets/servers/streamable_config.py",
"license": "MIT License",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
modelcontextprotocol/python-sdk:examples/snippets/servers/streamable_starlette_mount.py | """Run from the repository root:
uvicorn examples.snippets.servers.streamable_starlette_mount:app --reload
"""
import contextlib
from starlette.applications import Starlette
from starlette.routing import Mount
from mcp.server.mcpserver import MCPServer
# Create the Echo server
echo_mcp = MCPServer(name="EchoServer")
@echo_mcp.tool()
def echo(message: str) -> str:
"""A simple echo tool"""
return f"Echo: {message}"
# Create the Math server
math_mcp = MCPServer(name="MathServer")
@math_mcp.tool()
def add_two(n: int) -> int:
"""Tool to add two to the input"""
return n + 2
# Create a combined lifespan to manage both session managers
@contextlib.asynccontextmanager
async def lifespan(app: Starlette):
async with contextlib.AsyncExitStack() as stack:
await stack.enter_async_context(echo_mcp.session_manager.run())
await stack.enter_async_context(math_mcp.session_manager.run())
yield
# Create the Starlette app and mount the MCP servers
app = Starlette(
routes=[
Mount("/echo", echo_mcp.streamable_http_app(stateless_http=True, json_response=True)),
Mount("/math", math_mcp.streamable_http_app(stateless_http=True, json_response=True)),
],
lifespan=lifespan,
)
# Note: Clients connect to http://localhost:8000/echo/mcp and http://localhost:8000/math/mcp
# To mount at the root of each path (e.g., /echo instead of /echo/mcp):
# echo_mcp.streamable_http_app(streamable_http_path="/", stateless_http=True, json_response=True)
# math_mcp.streamable_http_app(streamable_http_path="/", stateless_http=True, json_response=True)
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "examples/snippets/servers/streamable_starlette_mount.py",
"license": "MIT License",
"lines": 38,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
modelcontextprotocol/python-sdk:examples/snippets/servers/lowlevel/basic.py | """Run from the repository root:
uv run examples/snippets/servers/lowlevel/basic.py
"""
import asyncio
import mcp.server.stdio
from mcp import types
from mcp.server import Server, ServerRequestContext
async def handle_list_prompts(
ctx: ServerRequestContext, params: types.PaginatedRequestParams | None
) -> types.ListPromptsResult:
"""List available prompts."""
return types.ListPromptsResult(
prompts=[
types.Prompt(
name="example-prompt",
description="An example prompt template",
arguments=[types.PromptArgument(name="arg1", description="Example argument", required=True)],
)
]
)
async def handle_get_prompt(ctx: ServerRequestContext, params: types.GetPromptRequestParams) -> types.GetPromptResult:
"""Get a specific prompt by name."""
if params.name != "example-prompt":
raise ValueError(f"Unknown prompt: {params.name}")
arg1_value = (params.arguments or {}).get("arg1", "default")
return types.GetPromptResult(
description="Example prompt",
messages=[
types.PromptMessage(
role="user",
content=types.TextContent(type="text", text=f"Example prompt text with argument: {arg1_value}"),
)
],
)
server = Server(
"example-server",
on_list_prompts=handle_list_prompts,
on_get_prompt=handle_get_prompt,
)
async def run():
"""Run the basic low-level server."""
async with mcp.server.stdio.stdio_server() as (read_stream, write_stream):
await server.run(
read_stream,
write_stream,
server.create_initialization_options(),
)
if __name__ == "__main__":
asyncio.run(run())
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "examples/snippets/servers/lowlevel/basic.py",
"license": "MIT License",
"lines": 49,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
modelcontextprotocol/python-sdk:examples/snippets/servers/lowlevel/lifespan.py | """Run from the repository root:
uv run examples/snippets/servers/lowlevel/lifespan.py
"""
from collections.abc import AsyncIterator
from contextlib import asynccontextmanager
from typing import TypedDict
import mcp.server.stdio
from mcp import types
from mcp.server import Server, ServerRequestContext
# Mock database class for example
class Database:
"""Mock database class for example."""
@classmethod
async def connect(cls) -> "Database":
"""Connect to database."""
print("Database connected")
return cls()
async def disconnect(self) -> None:
"""Disconnect from database."""
print("Database disconnected")
async def query(self, query_str: str) -> list[dict[str, str]]:
"""Execute a query."""
# Simulate database query
return [{"id": "1", "name": "Example", "query": query_str}]
class AppContext(TypedDict):
db: Database
@asynccontextmanager
async def server_lifespan(_server: Server[AppContext]) -> AsyncIterator[AppContext]:
"""Manage server startup and shutdown lifecycle."""
db = await Database.connect()
try:
yield {"db": db}
finally:
await db.disconnect()
async def handle_list_tools(
ctx: ServerRequestContext[AppContext], params: types.PaginatedRequestParams | None
) -> types.ListToolsResult:
"""List available tools."""
return types.ListToolsResult(
tools=[
types.Tool(
name="query_db",
description="Query the database",
input_schema={
"type": "object",
"properties": {"query": {"type": "string", "description": "SQL query to execute"}},
"required": ["query"],
},
)
]
)
async def handle_call_tool(
ctx: ServerRequestContext[AppContext], params: types.CallToolRequestParams
) -> types.CallToolResult:
"""Handle database query tool call."""
if params.name != "query_db":
raise ValueError(f"Unknown tool: {params.name}")
db = ctx.lifespan_context["db"]
results = await db.query((params.arguments or {})["query"])
return types.CallToolResult(content=[types.TextContent(type="text", text=f"Query results: {results}")])
server = Server(
"example-server",
lifespan=server_lifespan,
on_list_tools=handle_list_tools,
on_call_tool=handle_call_tool,
)
async def run():
"""Run the server with lifespan management."""
async with mcp.server.stdio.stdio_server() as (read_stream, write_stream):
await server.run(
read_stream,
write_stream,
server.create_initialization_options(),
)
if __name__ == "__main__":
import asyncio
asyncio.run(run())
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "examples/snippets/servers/lowlevel/lifespan.py",
"license": "MIT License",
"lines": 77,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
modelcontextprotocol/python-sdk:examples/snippets/servers/lowlevel/structured_output.py | """Run from the repository root:
uv run examples/snippets/servers/lowlevel/structured_output.py
"""
import asyncio
import json
import mcp.server.stdio
from mcp import types
from mcp.server import Server, ServerRequestContext
async def handle_list_tools(
ctx: ServerRequestContext, params: types.PaginatedRequestParams | None
) -> types.ListToolsResult:
"""List available tools with structured output schemas."""
return types.ListToolsResult(
tools=[
types.Tool(
name="get_weather",
description="Get current weather for a city",
input_schema={
"type": "object",
"properties": {"city": {"type": "string", "description": "City name"}},
"required": ["city"],
},
output_schema={
"type": "object",
"properties": {
"temperature": {"type": "number", "description": "Temperature in Celsius"},
"condition": {"type": "string", "description": "Weather condition"},
"humidity": {"type": "number", "description": "Humidity percentage"},
"city": {"type": "string", "description": "City name"},
},
"required": ["temperature", "condition", "humidity", "city"],
},
)
]
)
async def handle_call_tool(ctx: ServerRequestContext, params: types.CallToolRequestParams) -> types.CallToolResult:
"""Handle tool calls with structured output."""
if params.name == "get_weather":
city = (params.arguments or {})["city"]
weather_data = {
"temperature": 22.5,
"condition": "partly cloudy",
"humidity": 65,
"city": city,
}
return types.CallToolResult(
content=[types.TextContent(type="text", text=json.dumps(weather_data, indent=2))],
structured_content=weather_data,
)
raise ValueError(f"Unknown tool: {params.name}")
server = Server(
"example-server",
on_list_tools=handle_list_tools,
on_call_tool=handle_call_tool,
)
async def run():
"""Run the structured output server."""
async with mcp.server.stdio.stdio_server() as (read_stream, write_stream):
await server.run(
read_stream,
write_stream,
server.create_initialization_options(),
)
if __name__ == "__main__":
asyncio.run(run())
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "examples/snippets/servers/lowlevel/structured_output.py",
"license": "MIT License",
"lines": 65,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
modelcontextprotocol/python-sdk:examples/snippets/clients/display_utilities.py | """cd to the `examples/snippets` directory and run:
uv run display-utilities-client
"""
import asyncio
import os
from mcp import ClientSession, StdioServerParameters
from mcp.client.stdio import stdio_client
from mcp.shared.metadata_utils import get_display_name
# Create server parameters for stdio connection
server_params = StdioServerParameters(
command="uv", # Using uv to run the server
args=["run", "server", "mcpserver_quickstart", "stdio"],
env={"UV_INDEX": os.environ.get("UV_INDEX", "")},
)
async def display_tools(session: ClientSession):
"""Display available tools with human-readable names"""
tools_response = await session.list_tools()
for tool in tools_response.tools:
# get_display_name() returns the title if available, otherwise the name
display_name = get_display_name(tool)
print(f"Tool: {display_name}")
if tool.description:
print(f" {tool.description}")
async def display_resources(session: ClientSession):
"""Display available resources with human-readable names"""
resources_response = await session.list_resources()
for resource in resources_response.resources:
display_name = get_display_name(resource)
print(f"Resource: {display_name} ({resource.uri})")
templates_response = await session.list_resource_templates()
for template in templates_response.resource_templates:
display_name = get_display_name(template)
print(f"Resource Template: {display_name}")
async def run():
"""Run the display utilities example."""
async with stdio_client(server_params) as (read, write):
async with ClientSession(read, write) as session:
# Initialize the connection
await session.initialize()
print("=== Available Tools ===")
await display_tools(session)
print("\n=== Available Resources ===")
await display_resources(session)
def main():
"""Entry point for the display utilities client."""
asyncio.run(run())
if __name__ == "__main__":
main()
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "examples/snippets/clients/display_utilities.py",
"license": "MIT License",
"lines": 48,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
modelcontextprotocol/python-sdk:examples/snippets/servers/direct_execution.py | """Example showing direct execution of an MCP server.
This is the simplest way to run an MCP server directly.
cd to the `examples/snippets` directory and run:
uv run direct-execution-server
or
python servers/direct_execution.py
"""
from mcp.server.mcpserver import MCPServer
mcp = MCPServer("My App")
@mcp.tool()
def hello(name: str = "World") -> str:
"""Say hello to someone."""
return f"Hello, {name}!"
def main():
"""Entry point for the direct execution server."""
mcp.run()
if __name__ == "__main__":
main()
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "examples/snippets/servers/direct_execution.py",
"license": "MIT License",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
modelcontextprotocol/python-sdk:examples/snippets/clients/completion_client.py | """cd to the `examples/snippets` directory and run:
uv run completion-client
"""
import asyncio
import os
from mcp import ClientSession, StdioServerParameters
from mcp.client.stdio import stdio_client
from mcp.types import PromptReference, ResourceTemplateReference
# Create server parameters for stdio connection
server_params = StdioServerParameters(
command="uv", # Using uv to run the server
args=["run", "server", "completion", "stdio"], # Server with completion support
env={"UV_INDEX": os.environ.get("UV_INDEX", "")},
)
async def run():
"""Run the completion client example."""
async with stdio_client(server_params) as (read, write):
async with ClientSession(read, write) as session:
# Initialize the connection
await session.initialize()
# List available resource templates
templates = await session.list_resource_templates()
print("Available resource templates:")
for template in templates.resource_templates:
print(f" - {template.uri_template}")
# List available prompts
prompts = await session.list_prompts()
print("\nAvailable prompts:")
for prompt in prompts.prompts:
print(f" - {prompt.name}")
# Complete resource template arguments
if templates.resource_templates:
template = templates.resource_templates[0]
print(f"\nCompleting arguments for resource template: {template.uri_template}")
# Complete without context
result = await session.complete(
ref=ResourceTemplateReference(type="ref/resource", uri=template.uri_template),
argument={"name": "owner", "value": "model"},
)
print(f"Completions for 'owner' starting with 'model': {result.completion.values}")
# Complete with context - repo suggestions based on owner
result = await session.complete(
ref=ResourceTemplateReference(type="ref/resource", uri=template.uri_template),
argument={"name": "repo", "value": ""},
context_arguments={"owner": "modelcontextprotocol"},
)
print(f"Completions for 'repo' with owner='modelcontextprotocol': {result.completion.values}")
# Complete prompt arguments
if prompts.prompts:
prompt_name = prompts.prompts[0].name
print(f"\nCompleting arguments for prompt: {prompt_name}")
result = await session.complete(
ref=PromptReference(type="ref/prompt", name=prompt_name),
argument={"name": "style", "value": ""},
)
print(f"Completions for 'style' argument: {result.completion.values}")
def main():
"""Entry point for the completion client."""
asyncio.run(run())
if __name__ == "__main__":
main()
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "examples/snippets/clients/completion_client.py",
"license": "MIT License",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
modelcontextprotocol/python-sdk:examples/snippets/clients/stdio_client.py | """cd to the `examples/snippets/clients` directory and run:
uv run client
"""
import asyncio
import os
from mcp import ClientSession, StdioServerParameters, types
from mcp.client.context import ClientRequestContext
from mcp.client.stdio import stdio_client
# Create server parameters for stdio connection
server_params = StdioServerParameters(
command="uv", # Using uv to run the server
args=["run", "server", "mcpserver_quickstart", "stdio"], # We're already in snippets dir
env={"UV_INDEX": os.environ.get("UV_INDEX", "")},
)
# Optional: create a sampling callback
async def handle_sampling_message(
context: ClientRequestContext, params: types.CreateMessageRequestParams
) -> types.CreateMessageResult:
print(f"Sampling request: {params.messages}")
return types.CreateMessageResult(
role="assistant",
content=types.TextContent(
type="text",
text="Hello, world! from model",
),
model="gpt-3.5-turbo",
stop_reason="endTurn",
)
async def run():
async with stdio_client(server_params) as (read, write):
async with ClientSession(read, write, sampling_callback=handle_sampling_message) as session:
# Initialize the connection
await session.initialize()
# List available prompts
prompts = await session.list_prompts()
print(f"Available prompts: {[p.name for p in prompts.prompts]}")
# Get a prompt (greet_user prompt from mcpserver_quickstart)
if prompts.prompts:
prompt = await session.get_prompt("greet_user", arguments={"name": "Alice", "style": "friendly"})
print(f"Prompt result: {prompt.messages[0].content}")
# List available resources
resources = await session.list_resources()
print(f"Available resources: {[r.uri for r in resources.resources]}")
# List available tools
tools = await session.list_tools()
print(f"Available tools: {[t.name for t in tools.tools]}")
# Read a resource (greeting resource from mcpserver_quickstart)
resource_content = await session.read_resource("greeting://World")
content_block = resource_content.contents[0]
if isinstance(content_block, types.TextContent):
print(f"Resource content: {content_block.text}")
# Call a tool (add tool from mcpserver_quickstart)
result = await session.call_tool("add", arguments={"a": 5, "b": 3})
result_unstructured = result.content[0]
if isinstance(result_unstructured, types.TextContent):
print(f"Tool result: {result_unstructured.text}")
result_structured = result.structured_content
print(f"Structured tool result: {result_structured}")
def main():
"""Entry point for the client script."""
asyncio.run(run())
if __name__ == "__main__":
main()
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "examples/snippets/clients/stdio_client.py",
"license": "MIT License",
"lines": 63,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
modelcontextprotocol/python-sdk:examples/snippets/servers/images.py | """Example showing image handling with MCPServer."""
from PIL import Image as PILImage
from mcp.server.mcpserver import Image, MCPServer
mcp = MCPServer("Image Example")
@mcp.tool()
def create_thumbnail(image_path: str) -> Image:
"""Create a thumbnail from an image"""
img = PILImage.open(image_path)
img.thumbnail((100, 100))
return Image(data=img.tobytes(), format="png")
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "examples/snippets/servers/images.py",
"license": "MIT License",
"lines": 10,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
modelcontextprotocol/python-sdk:examples/snippets/servers/lifespan_example.py | """Example showing lifespan support for startup/shutdown with strong typing."""
from collections.abc import AsyncIterator
from contextlib import asynccontextmanager
from dataclasses import dataclass
from mcp.server.mcpserver import Context, MCPServer
# Mock database class for example
class Database:
"""Mock database class for example."""
@classmethod
async def connect(cls) -> "Database":
"""Connect to database."""
return cls()
async def disconnect(self) -> None:
"""Disconnect from database."""
pass
def query(self) -> str:
"""Execute a query."""
return "Query result"
@dataclass
class AppContext:
"""Application context with typed dependencies."""
db: Database
@asynccontextmanager
async def app_lifespan(server: MCPServer) -> AsyncIterator[AppContext]:
"""Manage application lifecycle with type-safe context."""
# Initialize on startup
db = await Database.connect()
try:
yield AppContext(db=db)
finally:
# Cleanup on shutdown
await db.disconnect()
# Pass lifespan to server
mcp = MCPServer("My App", lifespan=app_lifespan)
# Access type-safe lifespan context in tools
@mcp.tool()
def query_db(ctx: Context[AppContext]) -> str:
"""Tool that uses initialized resources."""
db = ctx.request_context.lifespan_context.db
return db.query()
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "examples/snippets/servers/lifespan_example.py",
"license": "MIT License",
"lines": 40,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
modelcontextprotocol/python-sdk:examples/snippets/servers/structured_output.py | """Example showing structured output with tools."""
from typing import TypedDict
from pydantic import BaseModel, Field
from mcp.server.mcpserver import MCPServer
mcp = MCPServer("Structured Output Example")
# Using Pydantic models for rich structured data
class WeatherData(BaseModel):
"""Weather information structure."""
temperature: float = Field(description="Temperature in Celsius")
humidity: float = Field(description="Humidity percentage")
condition: str
wind_speed: float
@mcp.tool()
def get_weather(city: str) -> WeatherData:
"""Get weather for a city - returns structured data."""
# Simulated weather data
return WeatherData(
temperature=22.5,
humidity=45.0,
condition="sunny",
wind_speed=5.2,
)
# Using TypedDict for simpler structures
class LocationInfo(TypedDict):
latitude: float
longitude: float
name: str
@mcp.tool()
def get_location(address: str) -> LocationInfo:
"""Get location coordinates"""
return LocationInfo(latitude=51.5074, longitude=-0.1278, name="London, UK")
# Using dict[str, Any] for flexible schemas
@mcp.tool()
def get_statistics(data_type: str) -> dict[str, float]:
"""Get various statistics"""
return {"mean": 42.5, "median": 40.0, "std_dev": 5.2}
# Ordinary classes with type hints work for structured output
class UserProfile:
name: str
age: int
email: str | None = None
def __init__(self, name: str, age: int, email: str | None = None):
self.name = name
self.age = age
self.email = email
@mcp.tool()
def get_user(user_id: str) -> UserProfile:
"""Get user profile - returns structured data"""
return UserProfile(name="Alice", age=30, email="alice@example.com")
# Classes WITHOUT type hints cannot be used for structured output
class UntypedConfig:
def __init__(self, setting1, setting2): # type: ignore[reportMissingParameterType]
self.setting1 = setting1
self.setting2 = setting2
@mcp.tool()
def get_config() -> UntypedConfig:
"""This returns unstructured output - no schema generated"""
return UntypedConfig("value1", "value2")
# Lists and other types are wrapped automatically
@mcp.tool()
def list_cities() -> list[str]:
"""Get a list of cities"""
return ["London", "Paris", "Tokyo"]
# Returns: {"result": ["London", "Paris", "Tokyo"]}
@mcp.tool()
def get_temperature(city: str) -> float:
"""Get temperature as a simple float"""
return 22.5
# Returns: {"result": 22.5}
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "examples/snippets/servers/structured_output.py",
"license": "MIT License",
"lines": 69,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
modelcontextprotocol/python-sdk:tests/issues/test_1027_win_unreachable_cleanup.py | """Regression test for issue #1027: Ensure cleanup procedures run properly during shutdown
Issue #1027 reported that cleanup code after "yield" in lifespan was unreachable when
processes were terminated. This has been fixed by implementing the MCP spec-compliant
stdio shutdown sequence that closes stdin first, allowing graceful exit.
These tests verify the fix continues to work correctly across all platforms.
"""
import sys
import tempfile
import textwrap
from pathlib import Path
import anyio
import pytest
from mcp import ClientSession, StdioServerParameters
from mcp.client.stdio import _create_platform_compatible_process, stdio_client
from tests.shared.test_win32_utils import escape_path_for_python
@pytest.mark.anyio
async def test_lifespan_cleanup_executed():
"""Regression test ensuring MCP server cleanup code runs during shutdown.
This test verifies that the fix for issue #1027 works correctly by:
1. Starting an MCP server that writes a marker file on startup
2. Shutting down the server normally via stdio_client
3. Verifying the cleanup code (after yield) executed and wrote its marker file
The fix implements proper stdin closure before termination, giving servers
time to run their cleanup handlers.
"""
# Create marker files to track server lifecycle
with tempfile.NamedTemporaryFile(mode="w", delete=False, suffix=".txt") as f:
startup_marker = f.name
with tempfile.NamedTemporaryFile(mode="w", delete=False, suffix=".txt") as f:
cleanup_marker = f.name
# Remove the files so we can detect when they're created
Path(startup_marker).unlink()
Path(cleanup_marker).unlink()
# Create a minimal MCP server using MCPServer that tracks lifecycle
server_code = textwrap.dedent(f"""
import asyncio
import sys
from pathlib import Path
from contextlib import asynccontextmanager
from mcp.server.mcpserver import MCPServer
STARTUP_MARKER = {escape_path_for_python(startup_marker)}
CLEANUP_MARKER = {escape_path_for_python(cleanup_marker)}
@asynccontextmanager
async def lifespan(server):
# Write startup marker
Path(STARTUP_MARKER).write_text("started")
try:
yield {{"started": True}}
finally:
# This cleanup code now runs properly during shutdown
Path(CLEANUP_MARKER).write_text("cleaned up")
mcp = MCPServer("test-server", lifespan=lifespan)
@mcp.tool()
def echo(text: str) -> str:
return text
if __name__ == "__main__":
mcp.run()
""")
# Write the server script to a temporary file
with tempfile.NamedTemporaryFile(mode="w", delete=False, suffix=".py") as f:
server_script = f.name
f.write(server_code)
try:
# Launch the MCP server
params = StdioServerParameters(command=sys.executable, args=[server_script])
async with stdio_client(params) as (read, write):
async with ClientSession(read, write) as session:
# Initialize the session
result = await session.initialize()
assert result.protocol_version in ["2024-11-05", "2025-06-18", "2025-11-25"]
# Verify startup marker was created
assert Path(startup_marker).exists(), "Server startup marker not created"
assert Path(startup_marker).read_text() == "started"
# Make a test request to ensure server is working
response = await session.call_tool("echo", {"text": "hello"})
assert response.content[0].type == "text"
assert getattr(response.content[0], "text") == "hello"
# Session will be closed when exiting the context manager
# Give server a moment to complete cleanup
with anyio.move_on_after(5.0):
while not Path(cleanup_marker).exists(): # pragma: lax no cover
await anyio.sleep(0.1)
# Verify cleanup marker was created - this works now that stdio_client
# properly closes stdin before termination, allowing graceful shutdown
assert Path(cleanup_marker).exists(), "Server cleanup marker not created - regression in issue #1027 fix"
assert Path(cleanup_marker).read_text() == "cleaned up"
finally:
# Clean up files
for path in [server_script, startup_marker, cleanup_marker]:
try: # pragma: lax no cover
Path(path).unlink()
except FileNotFoundError: # pragma: lax no cover
pass
@pytest.mark.anyio
@pytest.mark.filterwarnings("ignore::ResourceWarning" if sys.platform == "win32" else "default")
async def test_stdin_close_triggers_cleanup():
"""Regression test verifying the stdin-based graceful shutdown mechanism.
This test ensures the core fix for issue #1027 continues to work by:
1. Manually managing a server process
2. Closing stdin to trigger graceful shutdown
3. Verifying cleanup handlers run before the process exits
This mimics the behavior now implemented in stdio_client's shutdown sequence.
Note on Windows ResourceWarning:
On Windows, we may see ResourceWarning about unclosed file descriptors.
This is expected behavior because:
- We're manually managing the process lifecycle
- Windows file handle cleanup works differently than Unix
- The warning doesn't indicate a real issue - cleanup still works
We filter this warning on Windows only to avoid test noise.
"""
# Create marker files to track server lifecycle
with tempfile.NamedTemporaryFile(mode="w", delete=False, suffix=".txt") as f:
startup_marker = f.name
with tempfile.NamedTemporaryFile(mode="w", delete=False, suffix=".txt") as f:
cleanup_marker = f.name
# Remove the files so we can detect when they're created
Path(startup_marker).unlink()
Path(cleanup_marker).unlink()
# Create an MCP server that handles stdin closure gracefully
server_code = textwrap.dedent(f"""
import asyncio
import sys
from pathlib import Path
from contextlib import asynccontextmanager
from mcp.server.mcpserver import MCPServer
STARTUP_MARKER = {escape_path_for_python(startup_marker)}
CLEANUP_MARKER = {escape_path_for_python(cleanup_marker)}
@asynccontextmanager
async def lifespan(server):
# Write startup marker
Path(STARTUP_MARKER).write_text("started")
try:
yield {{"started": True}}
finally:
# This cleanup code runs when stdin closes, enabling graceful shutdown
Path(CLEANUP_MARKER).write_text("cleaned up")
mcp = MCPServer("test-server", lifespan=lifespan)
@mcp.tool()
def echo(text: str) -> str:
return text
if __name__ == "__main__":
# The server should exit gracefully when stdin closes
try:
mcp.run()
except Exception:
# Server might get EOF or other errors when stdin closes
pass
""")
# Write the server script to a temporary file
with tempfile.NamedTemporaryFile(mode="w", delete=False, suffix=".py") as f:
server_script = f.name
f.write(server_code)
try:
# This test manually manages the process to verify stdin-based shutdown
# Start the server process
process = await _create_platform_compatible_process(
command=sys.executable, args=[server_script], env=None, errlog=sys.stderr, cwd=None
)
# Wait for server to start
with anyio.move_on_after(10.0):
while not Path(startup_marker).exists():
await anyio.sleep(0.1)
# Check if process is still running
if hasattr(process, "returncode") and process.returncode is not None: # pragma: lax no cover
pytest.fail(f"Server process exited with code {process.returncode}")
assert Path(startup_marker).exists(), "Server startup marker not created"
# Close stdin to signal shutdown
if process.stdin: # pragma: no branch
await process.stdin.aclose()
# Wait for process to exit gracefully
try:
with anyio.fail_after(5.0): # Increased from 2.0 to 5.0
await process.wait()
except TimeoutError: # pragma: lax no cover
# If it doesn't exit after stdin close, terminate it
process.terminate()
await process.wait()
# Check if cleanup ran
with anyio.move_on_after(5.0):
while not Path(cleanup_marker).exists(): # pragma: lax no cover
await anyio.sleep(0.1)
# Verify the cleanup ran - stdin closure enables graceful shutdown
assert Path(cleanup_marker).exists(), "Server cleanup marker not created - stdin-based shutdown failed"
assert Path(cleanup_marker).read_text() == "cleaned up"
finally:
# Clean up files
for path in [server_script, startup_marker, cleanup_marker]:
try: # pragma: lax no cover
Path(path).unlink()
except FileNotFoundError: # pragma: lax no cover
pass
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "tests/issues/test_1027_win_unreachable_cleanup.py",
"license": "MIT License",
"lines": 191,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
modelcontextprotocol/python-sdk:tests/shared/test_win32_utils.py | """Windows-specific test utilities."""
def escape_path_for_python(path: str) -> str:
"""Escape a file path for use in Python code strings.
Converts backslashes to forward slashes which work on all platforms
and don't need escaping in Python strings.
"""
return repr(path.replace("\\", "/"))
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "tests/shared/test_win32_utils.py",
"license": "MIT License",
"lines": 7,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
modelcontextprotocol/python-sdk:tests/issues/test_552_windows_hang.py | """Test for issue #552: stdio_client hangs on Windows."""
import sys
from textwrap import dedent
import anyio
import pytest
from mcp import ClientSession, StdioServerParameters
from mcp.client.stdio import stdio_client
@pytest.mark.skipif(sys.platform != "win32", reason="Windows-specific test") # pragma: no cover
@pytest.mark.anyio
async def test_windows_stdio_client_with_session():
"""Test the exact scenario from issue #552: Using ClientSession with stdio_client.
This reproduces the original bug report where stdio_client hangs on Windows 11
when used with ClientSession.
"""
# Create a minimal MCP server that responds to initialization
server_script = dedent("""
import json
import sys
# Read initialization request
line = sys.stdin.readline()
# Send initialization response
response = {
"jsonrpc": "2.0",
"id": 1,
"result": {
"protocolVersion": "1.0",
"capabilities": {},
"serverInfo": {"name": "test-server", "version": "1.0"}
}
}
print(json.dumps(response))
sys.stdout.flush()
# Exit after a short delay
import time
time.sleep(0.1)
sys.exit(0)
""").strip()
params = StdioServerParameters(
command=sys.executable,
args=["-c", server_script],
)
# This is the exact pattern from the bug report
with anyio.fail_after(10):
try:
async with stdio_client(params) as (read, write):
async with ClientSession(read, write) as session:
await session.initialize()
# Should exit ClientSession without hanging
# Should exit stdio_client without hanging
except Exception:
# Connection errors are expected when process exits
pass
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "tests/issues/test_552_windows_hang.py",
"license": "MIT License",
"lines": 52,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
modelcontextprotocol/python-sdk:src/mcp/os/posix/utilities.py | """POSIX-specific functionality for stdio client operations."""
import logging
import os
import signal
import anyio
from anyio.abc import Process
logger = logging.getLogger(__name__)
async def terminate_posix_process_tree(process: Process, timeout_seconds: float = 2.0) -> None:
"""Terminate a process and all its children on POSIX systems.
Uses os.killpg() for atomic process group termination.
Args:
process: The process to terminate
timeout_seconds: Timeout in seconds before force killing (default: 2.0)
"""
pid = getattr(process, "pid", None) or getattr(getattr(process, "popen", None), "pid", None)
if not pid:
# No PID means there's no process to terminate - it either never started,
# already exited, or we have an invalid process object
return
try:
pgid = os.getpgid(pid)
os.killpg(pgid, signal.SIGTERM)
with anyio.move_on_after(timeout_seconds):
while True:
try:
# Check if process group still exists (signal 0 = check only)
os.killpg(pgid, 0)
await anyio.sleep(0.1)
except ProcessLookupError:
return
try:
os.killpg(pgid, signal.SIGKILL)
except ProcessLookupError:
pass
except (ProcessLookupError, PermissionError, OSError) as e:
logger.warning(f"Process group termination failed for PID {pid}: {e}, falling back to simple terminate")
try:
process.terminate()
with anyio.fail_after(timeout_seconds):
await process.wait()
except Exception:
logger.warning(f"Process termination failed for PID {pid}, attempting force kill")
try:
process.kill()
except Exception:
logger.exception(f"Failed to kill process {pid}")
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "src/mcp/os/posix/utilities.py",
"license": "MIT License",
"lines": 46,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
modelcontextprotocol/python-sdk:examples/snippets/servers/basic_prompt.py | from mcp.server.mcpserver import MCPServer
from mcp.server.mcpserver.prompts import base
mcp = MCPServer(name="Prompt Example")
@mcp.prompt(title="Code Review")
def review_code(code: str) -> str:
return f"Please review this code:\n\n{code}"
@mcp.prompt(title="Debug Assistant")
def debug_error(error: str) -> list[base.Message]:
return [
base.UserMessage("I'm seeing this error:"),
base.UserMessage(error),
base.AssistantMessage("I'll help debug that. What have you tried so far?"),
]
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "examples/snippets/servers/basic_prompt.py",
"license": "MIT License",
"lines": 13,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
modelcontextprotocol/python-sdk:examples/snippets/servers/basic_resource.py | from mcp.server.mcpserver import MCPServer
mcp = MCPServer(name="Resource Example")
@mcp.resource("file://documents/{name}")
def read_document(name: str) -> str:
"""Read a document by name."""
# This would normally read from disk
return f"Content of {name}"
@mcp.resource("config://settings")
def get_settings() -> str:
"""Get application settings."""
return """{
"theme": "dark",
"language": "en",
"debug": false
}"""
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "examples/snippets/servers/basic_resource.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
modelcontextprotocol/python-sdk:examples/snippets/servers/basic_tool.py | from mcp.server.mcpserver import MCPServer
mcp = MCPServer(name="Tool Example")
@mcp.tool()
def sum(a: int, b: int) -> int:
"""Add two numbers together."""
return a + b
@mcp.tool()
def get_weather(city: str, unit: str = "celsius") -> str:
"""Get weather for a city."""
# This would normally call a weather API
return f"Weather in {city}: 22degrees{unit[0].upper()}"
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "examples/snippets/servers/basic_tool.py",
"license": "MIT License",
"lines": 11,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
modelcontextprotocol/python-sdk:examples/snippets/servers/completion.py | from mcp.server.mcpserver import MCPServer
from mcp.types import (
Completion,
CompletionArgument,
CompletionContext,
PromptReference,
ResourceTemplateReference,
)
mcp = MCPServer(name="Example")
@mcp.resource("github://repos/{owner}/{repo}")
def github_repo(owner: str, repo: str) -> str:
"""GitHub repository resource."""
return f"Repository: {owner}/{repo}"
@mcp.prompt(description="Code review prompt")
def review_code(language: str, code: str) -> str:
"""Generate a code review."""
return f"Review this {language} code:\n{code}"
@mcp.completion()
async def handle_completion(
ref: PromptReference | ResourceTemplateReference,
argument: CompletionArgument,
context: CompletionContext | None,
) -> Completion | None:
"""Provide completions for prompts and resources."""
# Complete programming languages for the prompt
if isinstance(ref, PromptReference):
if ref.name == "review_code" and argument.name == "language":
languages = ["python", "javascript", "typescript", "go", "rust"]
return Completion(
values=[lang for lang in languages if lang.startswith(argument.value)],
has_more=False,
)
# Complete repository names for GitHub resources
if isinstance(ref, ResourceTemplateReference):
if ref.uri == "github://repos/{owner}/{repo}" and argument.name == "repo":
if context and context.arguments and context.arguments.get("owner") == "modelcontextprotocol":
repos = ["python-sdk", "typescript-sdk", "specification"]
return Completion(values=repos, has_more=False)
return None
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "examples/snippets/servers/completion.py",
"license": "MIT License",
"lines": 39,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
modelcontextprotocol/python-sdk:examples/snippets/servers/elicitation.py | """Elicitation examples demonstrating form and URL mode elicitation.
Form mode elicitation collects structured, non-sensitive data through a schema.
URL mode elicitation directs users to external URLs for sensitive operations
like OAuth flows, credential collection, or payment processing.
"""
import uuid
from pydantic import BaseModel, Field
from mcp.server.mcpserver import Context, MCPServer
from mcp.server.session import ServerSession
from mcp.shared.exceptions import UrlElicitationRequiredError
from mcp.types import ElicitRequestURLParams
mcp = MCPServer(name="Elicitation Example")
class BookingPreferences(BaseModel):
"""Schema for collecting user preferences."""
checkAlternative: bool = Field(description="Would you like to check another date?")
alternativeDate: str = Field(
default="2024-12-26",
description="Alternative date (YYYY-MM-DD)",
)
@mcp.tool()
async def book_table(date: str, time: str, party_size: int, ctx: Context[ServerSession, None]) -> str:
"""Book a table with date availability check.
This demonstrates form mode elicitation for collecting non-sensitive user input.
"""
# Check if date is available
if date == "2024-12-25":
# Date unavailable - ask user for alternative
result = await ctx.elicit(
message=(f"No tables available for {party_size} on {date}. Would you like to try another date?"),
schema=BookingPreferences,
)
if result.action == "accept" and result.data:
if result.data.checkAlternative:
return f"[SUCCESS] Booked for {result.data.alternativeDate}"
return "[CANCELLED] No booking made"
return "[CANCELLED] Booking cancelled"
# Date available
return f"[SUCCESS] Booked for {date} at {time}"
@mcp.tool()
async def secure_payment(amount: float, ctx: Context[ServerSession, None]) -> str:
"""Process a secure payment requiring URL confirmation.
This demonstrates URL mode elicitation using ctx.elicit_url() for
operations that require out-of-band user interaction.
"""
elicitation_id = str(uuid.uuid4())
result = await ctx.elicit_url(
message=f"Please confirm payment of ${amount:.2f}",
url=f"https://payments.example.com/confirm?amount={amount}&id={elicitation_id}",
elicitation_id=elicitation_id,
)
if result.action == "accept":
# In a real app, the payment confirmation would happen out-of-band
# and you'd verify the payment status from your backend
return f"Payment of ${amount:.2f} initiated - check your browser to complete"
elif result.action == "decline":
return "Payment declined by user"
return "Payment cancelled"
@mcp.tool()
async def connect_service(service_name: str, ctx: Context[ServerSession, None]) -> str:
"""Connect to a third-party service requiring OAuth authorization.
This demonstrates the "throw error" pattern using UrlElicitationRequiredError.
Use this pattern when the tool cannot proceed without user authorization.
"""
elicitation_id = str(uuid.uuid4())
# Raise UrlElicitationRequiredError to signal that the client must complete
# a URL elicitation before this request can be processed.
# The MCP framework will convert this to a -32042 error response.
raise UrlElicitationRequiredError(
[
ElicitRequestURLParams(
mode="url",
message=f"Authorization required to connect to {service_name}",
url=f"https://{service_name}.example.com/oauth/authorize?elicit={elicitation_id}",
elicitation_id=elicitation_id,
)
]
)
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "examples/snippets/servers/elicitation.py",
"license": "MIT License",
"lines": 77,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
modelcontextprotocol/python-sdk:examples/snippets/servers/notifications.py | from mcp.server.mcpserver import Context, MCPServer
from mcp.server.session import ServerSession
mcp = MCPServer(name="Notifications Example")
@mcp.tool()
async def process_data(data: str, ctx: Context[ServerSession, None]) -> str:
"""Process data with logging."""
# Different log levels
await ctx.debug(f"Debug: Processing '{data}'")
await ctx.info("Info: Starting processing")
await ctx.warning("Warning: This is experimental")
await ctx.error("Error: (This is just a demo)")
# Notify about resource changes
await ctx.session.send_resource_list_changed()
return f"Processed: {data}"
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "examples/snippets/servers/notifications.py",
"license": "MIT License",
"lines": 14,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
modelcontextprotocol/python-sdk:examples/snippets/servers/sampling.py | from mcp.server.mcpserver import Context, MCPServer
from mcp.server.session import ServerSession
from mcp.types import SamplingMessage, TextContent
mcp = MCPServer(name="Sampling Example")
@mcp.tool()
async def generate_poem(topic: str, ctx: Context[ServerSession, None]) -> str:
"""Generate a poem using LLM sampling."""
prompt = f"Write a short poem about {topic}"
result = await ctx.session.create_message(
messages=[
SamplingMessage(
role="user",
content=TextContent(type="text", text=prompt),
)
],
max_tokens=100,
)
# Since we're not passing tools param, result.content is single content
if result.content.type == "text":
return result.content.text
return str(result.content)
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "examples/snippets/servers/sampling.py",
"license": "MIT License",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
modelcontextprotocol/python-sdk:examples/snippets/servers/tool_progress.py | from mcp.server.mcpserver import Context, MCPServer
from mcp.server.session import ServerSession
mcp = MCPServer(name="Progress Example")
@mcp.tool()
async def long_running_task(task_name: str, ctx: Context[ServerSession, None], steps: int = 5) -> str:
"""Execute a task with progress updates."""
await ctx.info(f"Starting: {task_name}")
for i in range(steps):
progress = (i + 1) / steps
await ctx.report_progress(
progress=progress,
total=1.0,
message=f"Step {i + 1}/{steps}",
)
await ctx.debug(f"Completed step {i + 1}")
return f"Task '{task_name}' completed"
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "examples/snippets/servers/tool_progress.py",
"license": "MIT License",
"lines": 16,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
modelcontextprotocol/python-sdk:scripts/update_readme_snippets.py | #!/usr/bin/env python3
"""Update README.md with live code snippets from example files.
This script finds specially marked code blocks in README.md and updates them
with the actual code from the referenced files.
Usage:
python scripts/update_readme_snippets.py
python scripts/update_readme_snippets.py --check # Check mode for CI
"""
import argparse
import re
import sys
from pathlib import Path
def get_github_url(file_path: str) -> str:
"""Generate a GitHub URL for the file.
Args:
file_path: Path to the file relative to repo root
Returns:
GitHub URL
"""
base_url = "https://github.com/modelcontextprotocol/python-sdk/blob/main"
return f"{base_url}/{file_path}"
def process_snippet_block(match: re.Match[str], check_mode: bool = False) -> str:
"""Process a single snippet-source block.
Args:
match: The regex match object
check_mode: If True, return original if no changes needed
Returns:
The updated block content
"""
full_match = match.group(0)
indent = match.group(1)
file_path = match.group(2)
try:
# Read the entire file
file = Path(file_path)
if not file.exists():
print(f"Warning: File not found: {file_path}")
return full_match
code = file.read_text().rstrip()
github_url = get_github_url(file_path)
# Build the replacement block
indented_code = code.replace("\n", f"\n{indent}")
replacement = f"""{indent}<!-- snippet-source {file_path} -->
{indent}```python
{indent}{indented_code}
{indent}```
{indent}_Full example: [{file_path}]({github_url})_
{indent}<!-- /snippet-source -->"""
# In check mode, only check if code has changed
if check_mode:
# Extract existing code from the match
existing_content = match.group(3)
if existing_content is not None:
existing_lines = existing_content.strip().split("\n")
# Find code between ```python and ```
code_lines = []
in_code = False
for line in existing_lines:
if line.strip() == "```python":
in_code = True
elif line.strip() == "```":
break
elif in_code:
code_lines.append(line)
existing_code = "\n".join(code_lines).strip()
# Compare with the indented version we would generate
expected_code = code.replace("\n", f"\n{indent}").strip()
if existing_code == expected_code:
return full_match
return replacement
except Exception as e:
print(f"Error processing {file_path}: {e}")
return full_match
def update_readme_snippets(readme_path: Path = Path("README.md"), check_mode: bool = False) -> bool:
"""Update code snippets in README.md with live code from source files.
Args:
readme_path: Path to the README file
check_mode: If True, only check if updates are needed without modifying
Returns:
True if file is up to date or was updated, False if check failed
"""
if not readme_path.exists():
print(f"Error: README file not found: {readme_path}")
return False
content = readme_path.read_text()
original_content = content
# Pattern to match snippet-source blocks
# Matches: <!-- snippet-source path/to/file.py -->
# ... any content ...
# <!-- /snippet-source -->
pattern = r"^(\s*)<!-- snippet-source ([^\s]+) -->\n" r"(.*?)" r"^\1<!-- /snippet-source -->"
# Process all snippet-source blocks
updated_content = re.sub(
pattern, lambda m: process_snippet_block(m, check_mode), content, flags=re.MULTILINE | re.DOTALL
)
if check_mode:
if updated_content != original_content:
print(
f"Error: {readme_path} has outdated code snippets. "
"Run 'python scripts/update_readme_snippets.py' to update."
)
return False
else:
print(f"β {readme_path} code snippets are up to date")
return True
else:
if updated_content != original_content:
readme_path.write_text(updated_content)
print(f"β Updated {readme_path}")
else:
print(f"β {readme_path} already up to date")
return True
def main():
"""Main entry point."""
parser = argparse.ArgumentParser(description="Update README code snippets from source files")
parser.add_argument(
"--check", action="store_true", help="Check mode - verify snippets are up to date without modifying"
)
# TODO(v2): Drop the `--readme` argument when v2 is released, and set to `README.md`.
parser.add_argument("--readme", default="README.v2.md", help="Path to README file (default: README.v2.md)")
args = parser.parse_args()
success = update_readme_snippets(Path(args.readme), check_mode=args.check)
if not success:
sys.exit(1)
if __name__ == "__main__":
main()
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "scripts/update_readme_snippets.py",
"license": "MIT License",
"lines": 126,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
modelcontextprotocol/python-sdk:tests/client/test_output_schema_validation.py | import logging
from typing import Any
import pytest
from mcp import Client
from mcp.server import Server, ServerRequestContext
from mcp.types import (
CallToolRequestParams,
CallToolResult,
ListToolsResult,
PaginatedRequestParams,
TextContent,
Tool,
)
def _make_server(
tools: list[Tool],
structured_content: dict[str, Any],
) -> Server:
"""Create a low-level server that returns the given structured_content for any tool call."""
async def on_list_tools(ctx: ServerRequestContext, params: PaginatedRequestParams | None) -> ListToolsResult:
return ListToolsResult(tools=tools)
async def on_call_tool(ctx: ServerRequestContext, params: CallToolRequestParams) -> CallToolResult:
return CallToolResult(
content=[TextContent(type="text", text="result")],
structured_content=structured_content,
)
return Server("test-server", on_list_tools=on_list_tools, on_call_tool=on_call_tool)
@pytest.mark.anyio
async def test_tool_structured_output_client_side_validation_basemodel():
"""Test that client validates structured content against schema for BaseModel outputs"""
output_schema = {
"type": "object",
"properties": {"name": {"type": "string", "title": "Name"}, "age": {"type": "integer", "title": "Age"}},
"required": ["name", "age"],
"title": "UserOutput",
}
server = _make_server(
tools=[
Tool(
name="get_user",
description="Get user data",
input_schema={"type": "object"},
output_schema=output_schema,
)
],
structured_content={"name": "John", "age": "invalid"}, # Invalid: age should be int
)
async with Client(server) as client:
with pytest.raises(RuntimeError) as exc_info:
await client.call_tool("get_user", {})
assert "Invalid structured content returned by tool get_user" in str(exc_info.value)
@pytest.mark.anyio
async def test_tool_structured_output_client_side_validation_primitive():
"""Test that client validates structured content for primitive outputs"""
output_schema = {
"type": "object",
"properties": {"result": {"type": "integer", "title": "Result"}},
"required": ["result"],
"title": "calculate_Output",
}
server = _make_server(
tools=[
Tool(
name="calculate",
description="Calculate something",
input_schema={"type": "object"},
output_schema=output_schema,
)
],
structured_content={"result": "not_a_number"}, # Invalid: should be int
)
async with Client(server) as client:
with pytest.raises(RuntimeError) as exc_info:
await client.call_tool("calculate", {})
assert "Invalid structured content returned by tool calculate" in str(exc_info.value)
@pytest.mark.anyio
async def test_tool_structured_output_client_side_validation_dict_typed():
"""Test that client validates dict[str, T] structured content"""
output_schema = {"type": "object", "additionalProperties": {"type": "integer"}, "title": "get_scores_Output"}
server = _make_server(
tools=[
Tool(
name="get_scores",
description="Get scores",
input_schema={"type": "object"},
output_schema=output_schema,
)
],
structured_content={"alice": "100", "bob": "85"}, # Invalid: values should be int
)
async with Client(server) as client:
with pytest.raises(RuntimeError) as exc_info:
await client.call_tool("get_scores", {})
assert "Invalid structured content returned by tool get_scores" in str(exc_info.value)
@pytest.mark.anyio
async def test_tool_structured_output_client_side_validation_missing_required():
"""Test that client validates missing required fields"""
output_schema = {
"type": "object",
"properties": {"name": {"type": "string"}, "age": {"type": "integer"}, "email": {"type": "string"}},
"required": ["name", "age", "email"],
"title": "PersonOutput",
}
server = _make_server(
tools=[
Tool(
name="get_person",
description="Get person data",
input_schema={"type": "object"},
output_schema=output_schema,
)
],
structured_content={"name": "John", "age": 30}, # Missing required 'email'
)
async with Client(server) as client:
with pytest.raises(RuntimeError) as exc_info:
await client.call_tool("get_person", {})
assert "Invalid structured content returned by tool get_person" in str(exc_info.value)
@pytest.mark.anyio
async def test_tool_not_listed_warning(caplog: pytest.LogCaptureFixture):
"""Test that client logs warning when tool is not in list_tools but has output_schema"""
async def on_list_tools(ctx: ServerRequestContext, params: PaginatedRequestParams | None) -> ListToolsResult:
return ListToolsResult(tools=[])
async def on_call_tool(ctx: ServerRequestContext, params: CallToolRequestParams) -> CallToolResult:
return CallToolResult(
content=[TextContent(type="text", text="result")],
structured_content={"result": 42},
)
server = Server("test-server", on_list_tools=on_list_tools, on_call_tool=on_call_tool)
caplog.set_level(logging.WARNING)
async with Client(server) as client:
result = await client.call_tool("mystery_tool", {})
assert result.structured_content == {"result": 42}
assert result.is_error is False
assert "Tool mystery_tool not listed" in caplog.text
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "tests/client/test_output_schema_validation.py",
"license": "MIT License",
"lines": 134,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
modelcontextprotocol/python-sdk:examples/servers/simple-auth/mcp_simple_auth/simple_auth_provider.py | """Simple OAuth provider for MCP servers.
This module contains a basic OAuth implementation using hardcoded user credentials
for demonstration purposes. No external authentication provider is required.
NOTE: this is a simplified example for demonstration purposes.
This is not a production-ready implementation.
"""
import secrets
import time
from typing import Any
from pydantic import AnyHttpUrl
from pydantic_settings import BaseSettings, SettingsConfigDict
from starlette.exceptions import HTTPException
from starlette.requests import Request
from starlette.responses import HTMLResponse, RedirectResponse, Response
from mcp.server.auth.provider import (
AccessToken,
AuthorizationCode,
AuthorizationParams,
OAuthAuthorizationServerProvider,
RefreshToken,
construct_redirect_uri,
)
from mcp.shared.auth import OAuthClientInformationFull, OAuthToken
class SimpleAuthSettings(BaseSettings):
"""Simple OAuth settings for demo purposes."""
model_config = SettingsConfigDict(env_prefix="MCP_")
# Demo user credentials
demo_username: str = "demo_user"
demo_password: str = "demo_password"
# MCP OAuth scope
mcp_scope: str = "user"
class SimpleOAuthProvider(OAuthAuthorizationServerProvider[AuthorizationCode, RefreshToken, AccessToken]):
"""Simple OAuth provider for demo purposes.
This provider handles the OAuth flow by:
1. Providing a simple login form for demo credentials
2. Issuing MCP tokens after successful authentication
3. Maintaining token state for introspection
"""
def __init__(self, settings: SimpleAuthSettings, auth_callback_url: str, server_url: str):
self.settings = settings
self.auth_callback_url = auth_callback_url
self.server_url = server_url
self.clients: dict[str, OAuthClientInformationFull] = {}
self.auth_codes: dict[str, AuthorizationCode] = {}
self.tokens: dict[str, AccessToken] = {}
self.state_mapping: dict[str, dict[str, str | None]] = {}
# Store authenticated user information
self.user_data: dict[str, dict[str, Any]] = {}
async def get_client(self, client_id: str) -> OAuthClientInformationFull | None:
"""Get OAuth client information."""
return self.clients.get(client_id)
async def register_client(self, client_info: OAuthClientInformationFull):
"""Register a new OAuth client."""
if not client_info.client_id:
raise ValueError("No client_id provided")
self.clients[client_info.client_id] = client_info
async def authorize(self, client: OAuthClientInformationFull, params: AuthorizationParams) -> str:
"""Generate an authorization URL for simple login flow."""
state = params.state or secrets.token_hex(16)
# Store state mapping for callback
self.state_mapping[state] = {
"redirect_uri": str(params.redirect_uri),
"code_challenge": params.code_challenge,
"redirect_uri_provided_explicitly": str(params.redirect_uri_provided_explicitly),
"client_id": client.client_id,
"resource": params.resource, # RFC 8707
}
# Build simple login URL that points to login page
auth_url = f"{self.auth_callback_url}?state={state}&client_id={client.client_id}"
return auth_url
async def get_login_page(self, state: str) -> HTMLResponse:
"""Generate login page HTML for the given state."""
if not state:
raise HTTPException(400, "Missing state parameter")
# Create simple login form HTML
html_content = f"""
<!DOCTYPE html>
<html>
<head>
<title>MCP Demo Authentication</title>
<style>
body {{ font-family: Arial, sans-serif; max-width: 500px; margin: 0 auto; padding: 20px; }}
.form-group {{ margin-bottom: 15px; }}
input {{ width: 100%; padding: 8px; margin-top: 5px; }}
button {{ background-color: #4CAF50; color: white; padding: 10px 15px; border: none; cursor: pointer; }}
</style>
</head>
<body>
<h2>MCP Demo Authentication</h2>
<p>This is a simplified authentication demo. Use the demo credentials below:</p>
<p><strong>Username:</strong> demo_user<br>
<strong>Password:</strong> demo_password</p>
<form action="{self.server_url.rstrip("/")}/login/callback" method="post">
<input type="hidden" name="state" value="{state}">
<div class="form-group">
<label>Username:</label>
<input type="text" name="username" value="demo_user" required>
</div>
<div class="form-group">
<label>Password:</label>
<input type="password" name="password" value="demo_password" required>
</div>
<button type="submit">Sign In</button>
</form>
</body>
</html>
"""
return HTMLResponse(content=html_content)
async def handle_login_callback(self, request: Request) -> Response:
"""Handle login form submission callback."""
form = await request.form()
username = form.get("username")
password = form.get("password")
state = form.get("state")
if not username or not password or not state:
raise HTTPException(400, "Missing username, password, or state parameter")
# Ensure we have strings, not UploadFile objects
if not isinstance(username, str) or not isinstance(password, str) or not isinstance(state, str):
raise HTTPException(400, "Invalid parameter types")
redirect_uri = await self.handle_simple_callback(username, password, state)
return RedirectResponse(url=redirect_uri, status_code=302)
async def handle_simple_callback(self, username: str, password: str, state: str) -> str:
"""Handle simple authentication callback and return redirect URI."""
state_data = self.state_mapping.get(state)
if not state_data:
raise HTTPException(400, "Invalid state parameter")
redirect_uri = state_data["redirect_uri"]
code_challenge = state_data["code_challenge"]
redirect_uri_provided_explicitly = state_data["redirect_uri_provided_explicitly"] == "True"
client_id = state_data["client_id"]
resource = state_data.get("resource") # RFC 8707
# These are required values from our own state mapping
assert redirect_uri is not None
assert code_challenge is not None
assert client_id is not None
# Validate demo credentials
if username != self.settings.demo_username or password != self.settings.demo_password:
raise HTTPException(401, "Invalid credentials")
# Create MCP authorization code
new_code = f"mcp_{secrets.token_hex(16)}"
auth_code = AuthorizationCode(
code=new_code,
client_id=client_id,
redirect_uri=AnyHttpUrl(redirect_uri),
redirect_uri_provided_explicitly=redirect_uri_provided_explicitly,
expires_at=time.time() + 300,
scopes=[self.settings.mcp_scope],
code_challenge=code_challenge,
resource=resource, # RFC 8707
)
self.auth_codes[new_code] = auth_code
# Store user data
self.user_data[username] = {
"username": username,
"user_id": f"user_{secrets.token_hex(8)}",
"authenticated_at": time.time(),
}
del self.state_mapping[state]
return construct_redirect_uri(redirect_uri, code=new_code, state=state)
async def load_authorization_code(
self, client: OAuthClientInformationFull, authorization_code: str
) -> AuthorizationCode | None:
"""Load an authorization code."""
return self.auth_codes.get(authorization_code)
async def exchange_authorization_code(
self, client: OAuthClientInformationFull, authorization_code: AuthorizationCode
) -> OAuthToken:
"""Exchange authorization code for tokens."""
if authorization_code.code not in self.auth_codes:
raise ValueError("Invalid authorization code")
if not client.client_id:
raise ValueError("No client_id provided")
# Generate MCP access token
mcp_token = f"mcp_{secrets.token_hex(32)}"
# Store MCP token
self.tokens[mcp_token] = AccessToken(
token=mcp_token,
client_id=client.client_id,
scopes=authorization_code.scopes,
expires_at=int(time.time()) + 3600,
resource=authorization_code.resource, # RFC 8707
)
# Store user data mapping for this token
self.user_data[mcp_token] = {
"username": self.settings.demo_username,
"user_id": f"user_{secrets.token_hex(8)}",
"authenticated_at": time.time(),
}
del self.auth_codes[authorization_code.code]
return OAuthToken(
access_token=mcp_token,
token_type="Bearer",
expires_in=3600,
scope=" ".join(authorization_code.scopes),
)
async def load_access_token(self, token: str) -> AccessToken | None:
"""Load and validate an access token."""
access_token = self.tokens.get(token)
if not access_token:
return None
# Check if expired
if access_token.expires_at and access_token.expires_at < time.time():
del self.tokens[token]
return None
return access_token
async def load_refresh_token(self, client: OAuthClientInformationFull, refresh_token: str) -> RefreshToken | None:
"""Load a refresh token - not supported in this example."""
return None
async def exchange_refresh_token(
self,
client: OAuthClientInformationFull,
refresh_token: RefreshToken,
scopes: list[str],
) -> OAuthToken:
"""Exchange refresh token - not supported in this example."""
raise NotImplementedError("Refresh tokens not supported")
# TODO(Marcelo): The type hint is wrong. We need to fix, and test to check if it works.
async def revoke_token(self, token: str, token_type_hint: str | None = None) -> None: # type: ignore
"""Revoke a token."""
if token in self.tokens:
del self.tokens[token]
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "examples/servers/simple-auth/mcp_simple_auth/simple_auth_provider.py",
"license": "MIT License",
"lines": 221,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
modelcontextprotocol/python-sdk:src/mcp/shared/auth_utils.py | """Utilities for OAuth 2.0 Resource Indicators (RFC 8707) and PKCE (RFC 7636)."""
import time
from urllib.parse import urlparse, urlsplit, urlunsplit
from pydantic import AnyUrl, HttpUrl
def resource_url_from_server_url(url: str | HttpUrl | AnyUrl) -> str:
"""Convert server URL to canonical resource URL per RFC 8707.
RFC 8707 section 2 states that resource URIs "MUST NOT include a fragment component".
Returns absolute URI with lowercase scheme/host for canonical form.
Args:
url: Server URL to convert
Returns:
Canonical resource URL string
"""
# Convert to string if needed
url_str = str(url)
# Parse the URL and remove fragment, create canonical form
parsed = urlsplit(url_str)
canonical = urlunsplit(parsed._replace(scheme=parsed.scheme.lower(), netloc=parsed.netloc.lower(), fragment=""))
return canonical
def check_resource_allowed(requested_resource: str, configured_resource: str) -> bool:
"""Check if a requested resource URL matches a configured resource URL.
A requested resource matches if it has the same scheme, domain, port,
and its path starts with the configured resource's path. This allows
hierarchical matching where a token for a parent resource can be used
for child resources.
Args:
requested_resource: The resource URL being requested
configured_resource: The resource URL that has been configured
Returns:
True if the requested resource matches the configured resource
"""
# Parse both URLs
requested = urlparse(requested_resource)
configured = urlparse(configured_resource)
# Compare scheme, host, and port (origin)
if requested.scheme.lower() != configured.scheme.lower() or requested.netloc.lower() != configured.netloc.lower():
return False
# Normalize trailing slashes before comparison so that
# "/foo" and "/foo/" are treated as equivalent.
requested_path = requested.path
configured_path = configured.path
if not requested_path.endswith("/"):
requested_path += "/"
if not configured_path.endswith("/"):
configured_path += "/"
# Check hierarchical match: requested must start with configured path.
# The trailing-slash normalization ensures "/api123/" won't match "/api/".
return requested_path.startswith(configured_path)
def calculate_token_expiry(expires_in: int | str | None) -> float | None:
"""Calculate token expiry timestamp from expires_in seconds.
Args:
expires_in: Seconds until token expiration (may be string from some servers)
Returns:
Unix timestamp when token expires, or None if no expiry specified
"""
if expires_in is None:
return None # pragma: no cover
# Defensive: handle servers that return expires_in as string
return time.time() + int(expires_in)
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "src/mcp/shared/auth_utils.py",
"license": "MIT License",
"lines": 59,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
modelcontextprotocol/python-sdk:tests/shared/test_auth_utils.py | """Tests for OAuth 2.0 Resource Indicators utilities."""
from pydantic import HttpUrl
from mcp.shared.auth_utils import check_resource_allowed, resource_url_from_server_url
# Tests for resource_url_from_server_url function
def test_resource_url_from_server_url_removes_fragment():
"""Fragment should be removed per RFC 8707."""
assert resource_url_from_server_url("https://example.com/path#fragment") == "https://example.com/path"
assert resource_url_from_server_url("https://example.com/#fragment") == "https://example.com/"
def test_resource_url_from_server_url_preserves_path():
"""Path should be preserved."""
assert (
resource_url_from_server_url("https://example.com/path/to/resource") == "https://example.com/path/to/resource"
)
assert resource_url_from_server_url("https://example.com/") == "https://example.com/"
assert resource_url_from_server_url("https://example.com") == "https://example.com"
def test_resource_url_from_server_url_preserves_query():
"""Query parameters should be preserved."""
assert resource_url_from_server_url("https://example.com/path?foo=bar") == "https://example.com/path?foo=bar"
assert resource_url_from_server_url("https://example.com/?key=value") == "https://example.com/?key=value"
def test_resource_url_from_server_url_preserves_port():
"""Non-default ports should be preserved."""
assert resource_url_from_server_url("https://example.com:8443/path") == "https://example.com:8443/path"
assert resource_url_from_server_url("http://example.com:8080/") == "http://example.com:8080/"
def test_resource_url_from_server_url_lowercase_scheme_and_host():
"""Scheme and host should be lowercase for canonical form."""
assert resource_url_from_server_url("HTTPS://EXAMPLE.COM/path") == "https://example.com/path"
assert resource_url_from_server_url("Http://Example.Com:8080/") == "http://example.com:8080/"
def test_resource_url_from_server_url_handles_pydantic_urls():
"""Should handle Pydantic URL types."""
url = HttpUrl("https://example.com/path")
assert resource_url_from_server_url(url) == "https://example.com/path"
# Tests for check_resource_allowed function
def test_check_resource_allowed_identical_urls():
"""Identical URLs should match."""
assert check_resource_allowed("https://example.com/path", "https://example.com/path") is True
assert check_resource_allowed("https://example.com/", "https://example.com/") is True
assert check_resource_allowed("https://example.com", "https://example.com") is True
def test_check_resource_allowed_different_schemes():
"""Different schemes should not match."""
assert check_resource_allowed("https://example.com/path", "http://example.com/path") is False
assert check_resource_allowed("http://example.com/", "https://example.com/") is False
def test_check_resource_allowed_different_domains():
"""Different domains should not match."""
assert check_resource_allowed("https://example.com/path", "https://example.org/path") is False
assert check_resource_allowed("https://sub.example.com/", "https://example.com/") is False
def test_check_resource_allowed_different_ports():
"""Different ports should not match."""
assert check_resource_allowed("https://example.com:8443/path", "https://example.com/path") is False
assert check_resource_allowed("https://example.com:8080/", "https://example.com:8443/") is False
def test_check_resource_allowed_hierarchical_matching():
"""Child paths should match parent paths."""
# Parent resource allows child resources
assert check_resource_allowed("https://example.com/api/v1/users", "https://example.com/api") is True
assert check_resource_allowed("https://example.com/api/v1", "https://example.com/api") is True
assert check_resource_allowed("https://example.com/mcp/server", "https://example.com/mcp") is True
# Exact match
assert check_resource_allowed("https://example.com/api", "https://example.com/api") is True
# Parent cannot use child's token
assert check_resource_allowed("https://example.com/api", "https://example.com/api/v1") is False
assert check_resource_allowed("https://example.com/", "https://example.com/api") is False
def test_check_resource_allowed_path_boundary_matching():
"""Path matching should respect boundaries."""
# Should not match partial path segments
assert check_resource_allowed("https://example.com/apiextra", "https://example.com/api") is False
assert check_resource_allowed("https://example.com/api123", "https://example.com/api") is False
# Should match with trailing slash
assert check_resource_allowed("https://example.com/api/", "https://example.com/api") is True
assert check_resource_allowed("https://example.com/api/v1", "https://example.com/api/") is True
def test_check_resource_allowed_trailing_slash_handling():
"""Trailing slashes should be handled correctly."""
# With and without trailing slashes
assert check_resource_allowed("https://example.com/api/", "https://example.com/api") is True
assert check_resource_allowed("https://example.com/api", "https://example.com/api/") is True
assert check_resource_allowed("https://example.com/api/v1", "https://example.com/api") is True
assert check_resource_allowed("https://example.com/api/v1", "https://example.com/api/") is True
def test_check_resource_allowed_case_insensitive_origin():
"""Origin comparison should be case-insensitive."""
assert check_resource_allowed("https://EXAMPLE.COM/path", "https://example.com/path") is True
assert check_resource_allowed("HTTPS://example.com/path", "https://example.com/path") is True
assert check_resource_allowed("https://Example.Com:8080/api", "https://example.com:8080/api") is True
def test_check_resource_allowed_empty_paths():
"""Empty paths should be handled correctly."""
assert check_resource_allowed("https://example.com", "https://example.com") is True
assert check_resource_allowed("https://example.com/", "https://example.com") is True
assert check_resource_allowed("https://example.com/api", "https://example.com") is True
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "tests/shared/test_auth_utils.py",
"license": "MIT License",
"lines": 85,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
modelcontextprotocol/python-sdk:examples/servers/simple-auth/mcp_simple_auth/auth_server.py | """Authorization Server for MCP Split Demo.
This server handles OAuth flows, client registration, and token issuance.
Can be replaced with enterprise authorization servers like Auth0, Entra ID, etc.
NOTE: this is a simplified example for demonstration purposes.
This is not a production-ready implementation.
"""
import asyncio
import logging
import time
import click
from pydantic import AnyHttpUrl, BaseModel
from starlette.applications import Starlette
from starlette.exceptions import HTTPException
from starlette.requests import Request
from starlette.responses import JSONResponse, Response
from starlette.routing import Route
from uvicorn import Config, Server
from mcp.server.auth.routes import cors_middleware, create_auth_routes
from mcp.server.auth.settings import AuthSettings, ClientRegistrationOptions
from .simple_auth_provider import SimpleAuthSettings, SimpleOAuthProvider
logger = logging.getLogger(__name__)
class AuthServerSettings(BaseModel):
"""Settings for the Authorization Server."""
# Server settings
host: str = "localhost"
port: int = 9000
server_url: AnyHttpUrl = AnyHttpUrl("http://localhost:9000")
auth_callback_path: str = "http://localhost:9000/login/callback"
class SimpleAuthProvider(SimpleOAuthProvider):
"""Authorization Server provider with simple demo authentication.
This provider:
1. Issues MCP tokens after simple credential authentication
2. Stores token state for introspection by Resource Servers
"""
def __init__(self, auth_settings: SimpleAuthSettings, auth_callback_path: str, server_url: str):
super().__init__(auth_settings, auth_callback_path, server_url)
def create_authorization_server(server_settings: AuthServerSettings, auth_settings: SimpleAuthSettings) -> Starlette:
"""Create the Authorization Server application."""
oauth_provider = SimpleAuthProvider(
auth_settings, server_settings.auth_callback_path, str(server_settings.server_url)
)
mcp_auth_settings = AuthSettings(
issuer_url=server_settings.server_url,
client_registration_options=ClientRegistrationOptions(
enabled=True,
valid_scopes=[auth_settings.mcp_scope],
default_scopes=[auth_settings.mcp_scope],
),
required_scopes=[auth_settings.mcp_scope],
resource_server_url=None,
)
# Create OAuth routes
routes = create_auth_routes(
provider=oauth_provider,
issuer_url=mcp_auth_settings.issuer_url,
service_documentation_url=mcp_auth_settings.service_documentation_url,
client_registration_options=mcp_auth_settings.client_registration_options,
revocation_options=mcp_auth_settings.revocation_options,
)
# Add login page route (GET)
async def login_page_handler(request: Request) -> Response:
"""Show login form."""
state = request.query_params.get("state")
if not state:
raise HTTPException(400, "Missing state parameter")
return await oauth_provider.get_login_page(state)
routes.append(Route("/login", endpoint=login_page_handler, methods=["GET"]))
# Add login callback route (POST)
async def login_callback_handler(request: Request) -> Response:
"""Handle simple authentication callback."""
return await oauth_provider.handle_login_callback(request)
routes.append(Route("/login/callback", endpoint=login_callback_handler, methods=["POST"]))
# Add token introspection endpoint (RFC 7662) for Resource Servers
async def introspect_handler(request: Request) -> Response:
"""Token introspection endpoint for Resource Servers.
Resource Servers call this endpoint to validate tokens without
needing direct access to token storage.
"""
form = await request.form()
token = form.get("token")
if not token or not isinstance(token, str):
return JSONResponse({"active": False}, status_code=400)
# Look up token in provider
access_token = await oauth_provider.load_access_token(token)
if not access_token:
return JSONResponse({"active": False})
return JSONResponse(
{
"active": True,
"client_id": access_token.client_id,
"scope": " ".join(access_token.scopes),
"exp": access_token.expires_at,
"iat": int(time.time()),
"token_type": "Bearer",
"aud": access_token.resource, # RFC 8707 audience claim
}
)
routes.append(
Route(
"/introspect",
endpoint=cors_middleware(introspect_handler, ["POST", "OPTIONS"]),
methods=["POST", "OPTIONS"],
)
)
return Starlette(routes=routes)
async def run_server(server_settings: AuthServerSettings, auth_settings: SimpleAuthSettings):
"""Run the Authorization Server."""
auth_server = create_authorization_server(server_settings, auth_settings)
config = Config(
auth_server,
host=server_settings.host,
port=server_settings.port,
log_level="info",
)
server = Server(config)
logger.info(f"π MCP Authorization Server running on {server_settings.server_url}")
await server.serve()
@click.command()
@click.option("--port", default=9000, help="Port to listen on")
def main(port: int) -> int:
"""Run the MCP Authorization Server.
This server handles OAuth flows and can be used by multiple Resource Servers.
Uses simple hardcoded credentials for demo purposes.
"""
logging.basicConfig(level=logging.INFO)
# Load simple auth settings
auth_settings = SimpleAuthSettings()
# Create server settings
host = "localhost"
server_url = f"http://{host}:{port}"
server_settings = AuthServerSettings(
host=host,
port=port,
server_url=AnyHttpUrl(server_url),
auth_callback_path=f"{server_url}/login",
)
asyncio.run(run_server(server_settings, auth_settings))
return 0
if __name__ == "__main__":
main() # type: ignore[call-arg]
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "examples/servers/simple-auth/mcp_simple_auth/auth_server.py",
"license": "MIT License",
"lines": 140,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
modelcontextprotocol/python-sdk:examples/servers/simple-auth/mcp_simple_auth/legacy_as_server.py | """Legacy Combined Authorization Server + Resource Server for MCP.
This server implements the old spec where MCP servers could act as both AS and RS.
Used for backwards compatibility testing with the new split AS/RS architecture.
NOTE: this is a simplified example for demonstration purposes.
This is not a production-ready implementation.
"""
import datetime
import logging
from typing import Any, Literal
import click
from pydantic import AnyHttpUrl, BaseModel
from starlette.exceptions import HTTPException
from starlette.requests import Request
from starlette.responses import Response
from mcp.server.auth.settings import AuthSettings, ClientRegistrationOptions
from mcp.server.mcpserver.server import MCPServer
from .simple_auth_provider import SimpleAuthSettings, SimpleOAuthProvider
logger = logging.getLogger(__name__)
class ServerSettings(BaseModel):
"""Settings for the simple auth MCP server."""
# Server settings
host: str = "localhost"
port: int = 8000
server_url: AnyHttpUrl = AnyHttpUrl("http://localhost:8000")
auth_callback_path: str = "http://localhost:8000/login/callback"
class LegacySimpleOAuthProvider(SimpleOAuthProvider):
"""Simple OAuth provider for legacy MCP server."""
def __init__(self, auth_settings: SimpleAuthSettings, auth_callback_path: str, server_url: str):
super().__init__(auth_settings, auth_callback_path, server_url)
def create_simple_mcp_server(server_settings: ServerSettings, auth_settings: SimpleAuthSettings) -> MCPServer:
"""Create a simple MCPServer server with simple authentication."""
oauth_provider = LegacySimpleOAuthProvider(
auth_settings, server_settings.auth_callback_path, str(server_settings.server_url)
)
mcp_auth_settings = AuthSettings(
issuer_url=server_settings.server_url,
client_registration_options=ClientRegistrationOptions(
enabled=True,
valid_scopes=[auth_settings.mcp_scope],
default_scopes=[auth_settings.mcp_scope],
),
required_scopes=[auth_settings.mcp_scope],
# No resource_server_url parameter in legacy mode
resource_server_url=None,
)
app = MCPServer(
name="Simple Auth MCP Server",
instructions="A simple MCP server with simple credential authentication",
auth_server_provider=oauth_provider,
debug=True,
auth=mcp_auth_settings,
)
# Store server settings for later use in run()
app._server_settings = server_settings # type: ignore[attr-defined]
@app.custom_route("/login", methods=["GET"])
async def login_page_handler(request: Request) -> Response:
"""Show login form."""
state = request.query_params.get("state")
if not state:
raise HTTPException(400, "Missing state parameter")
return await oauth_provider.get_login_page(state)
@app.custom_route("/login/callback", methods=["POST"])
async def login_callback_handler(request: Request) -> Response:
"""Handle simple authentication callback."""
return await oauth_provider.handle_login_callback(request)
@app.tool()
async def get_time() -> dict[str, Any]:
"""Get the current server time.
This tool demonstrates that system information can be protected
by OAuth authentication. User must be authenticated to access it.
"""
now = datetime.datetime.now()
return {
"current_time": now.isoformat(),
"timezone": "UTC", # Simplified for demo
"timestamp": now.timestamp(),
"formatted": now.strftime("%Y-%m-%d %H:%M:%S"),
}
return app
@click.command()
@click.option("--port", default=8000, help="Port to listen on")
@click.option(
"--transport",
default="streamable-http",
type=click.Choice(["sse", "streamable-http"]),
help="Transport protocol to use ('sse' or 'streamable-http')",
)
def main(port: int, transport: Literal["sse", "streamable-http"]) -> int:
"""Run the simple auth MCP server."""
logging.basicConfig(level=logging.INFO)
auth_settings = SimpleAuthSettings()
# Create server settings
host = "localhost"
server_url = f"http://{host}:{port}"
server_settings = ServerSettings(
host=host,
port=port,
server_url=AnyHttpUrl(server_url),
auth_callback_path=f"{server_url}/login",
)
mcp_server = create_simple_mcp_server(server_settings, auth_settings)
logger.info(f"π MCP Legacy Server running on {server_url}")
mcp_server.run(transport=transport, host=host, port=port)
return 0
if __name__ == "__main__":
main() # type: ignore[call-arg]
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "examples/servers/simple-auth/mcp_simple_auth/legacy_as_server.py",
"license": "MIT License",
"lines": 106,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
modelcontextprotocol/python-sdk:examples/servers/simple-auth/mcp_simple_auth/token_verifier.py | """Example token verifier implementation using OAuth 2.0 Token Introspection (RFC 7662)."""
import logging
from typing import Any
from mcp.server.auth.provider import AccessToken, TokenVerifier
from mcp.shared.auth_utils import check_resource_allowed, resource_url_from_server_url
logger = logging.getLogger(__name__)
class IntrospectionTokenVerifier(TokenVerifier):
"""Example token verifier that uses OAuth 2.0 Token Introspection (RFC 7662).
This is a simple example implementation for demonstration purposes.
Production implementations should consider:
- Connection pooling and reuse
- More sophisticated error handling
- Rate limiting and retry logic
- Comprehensive configuration options
"""
def __init__(
self,
introspection_endpoint: str,
server_url: str,
validate_resource: bool = False,
):
self.introspection_endpoint = introspection_endpoint
self.server_url = server_url
self.validate_resource = validate_resource
self.resource_url = resource_url_from_server_url(server_url)
async def verify_token(self, token: str) -> AccessToken | None:
"""Verify token via introspection endpoint."""
import httpx
# Validate URL to prevent SSRF attacks
if not self.introspection_endpoint.startswith(("https://", "http://localhost", "http://127.0.0.1")):
logger.warning(f"Rejecting introspection endpoint with unsafe scheme: {self.introspection_endpoint}")
return None
# Configure secure HTTP client
timeout = httpx.Timeout(10.0, connect=5.0)
limits = httpx.Limits(max_connections=10, max_keepalive_connections=5)
async with httpx.AsyncClient(
timeout=timeout,
limits=limits,
verify=True, # Enforce SSL verification
) as client:
try:
response = await client.post(
self.introspection_endpoint,
data={"token": token},
headers={"Content-Type": "application/x-www-form-urlencoded"},
)
if response.status_code != 200:
logger.debug(f"Token introspection returned status {response.status_code}")
return None
data = response.json()
if not data.get("active", False):
return None
# RFC 8707 resource validation (only when --oauth-strict is set)
if self.validate_resource and not self._validate_resource(data):
logger.warning(f"Token resource validation failed. Expected: {self.resource_url}")
return None
return AccessToken(
token=token,
client_id=data.get("client_id", "unknown"),
scopes=data.get("scope", "").split() if data.get("scope") else [],
expires_at=data.get("exp"),
resource=data.get("aud"), # Include resource in token
)
except Exception as e:
logger.warning(f"Token introspection failed: {e}")
return None
def _validate_resource(self, token_data: dict[str, Any]) -> bool:
"""Validate token was issued for this resource server."""
if not self.server_url or not self.resource_url:
return False # Fail if strict validation requested but URLs missing
# Check 'aud' claim first (standard JWT audience)
aud: list[str] | str | None = token_data.get("aud")
if isinstance(aud, list):
for audience in aud:
if self._is_valid_resource(audience):
return True
return False
elif aud:
return self._is_valid_resource(aud)
# No resource binding - invalid per RFC 8707
return False
def _is_valid_resource(self, resource: str) -> bool:
"""Check if resource matches this server using hierarchical matching."""
if not self.resource_url:
return False
return check_resource_allowed(requested_resource=self.resource_url, configured_resource=resource)
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "examples/servers/simple-auth/mcp_simple_auth/token_verifier.py",
"license": "MIT License",
"lines": 86,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
modelcontextprotocol/python-sdk:src/mcp/server/transport_security.py | """DNS rebinding protection for MCP server transports."""
import logging
from pydantic import BaseModel, Field
from starlette.requests import Request
from starlette.responses import Response
logger = logging.getLogger(__name__)
# TODO(Marcelo): We should flatten these settings. To be fair, I don't think we should even have this middleware.
class TransportSecuritySettings(BaseModel):
"""Settings for MCP transport security features.
These settings help protect against DNS rebinding attacks by validating incoming request headers.
"""
enable_dns_rebinding_protection: bool = True
"""Enable DNS rebinding protection (recommended for production)."""
allowed_hosts: list[str] = Field(default_factory=list)
"""List of allowed Host header values.
Only applies when `enable_dns_rebinding_protection` is `True`.
"""
allowed_origins: list[str] = Field(default_factory=list)
"""List of allowed Origin header values.
Only applies when `enable_dns_rebinding_protection` is `True`.
"""
# TODO(Marcelo): This should be a proper ASGI middleware. I'm sad to see this.
class TransportSecurityMiddleware:
"""Middleware to enforce DNS rebinding protection for MCP transport endpoints."""
def __init__(self, settings: TransportSecuritySettings | None = None):
# If not specified, disable DNS rebinding protection by default for backwards compatibility
self.settings = settings or TransportSecuritySettings(enable_dns_rebinding_protection=False)
def _validate_host(self, host: str | None) -> bool: # pragma: no cover
"""Validate the Host header against allowed values."""
if not host:
logger.warning("Missing Host header in request")
return False
# Check exact match first
if host in self.settings.allowed_hosts:
return True
# Check wildcard port patterns
for allowed in self.settings.allowed_hosts:
if allowed.endswith(":*"):
# Extract base host from pattern
base_host = allowed[:-2]
# Check if the actual host starts with base host and has a port
if host.startswith(base_host + ":"):
return True
logger.warning(f"Invalid Host header: {host}")
return False
def _validate_origin(self, origin: str | None) -> bool: # pragma: no cover
"""Validate the Origin header against allowed values."""
# Origin can be absent for same-origin requests
if not origin:
return True
# Check exact match first
if origin in self.settings.allowed_origins:
return True
# Check wildcard port patterns
for allowed in self.settings.allowed_origins:
if allowed.endswith(":*"):
# Extract base origin from pattern
base_origin = allowed[:-2]
# Check if the actual origin starts with base origin and has a port
if origin.startswith(base_origin + ":"):
return True
logger.warning(f"Invalid Origin header: {origin}")
return False
def _validate_content_type(self, content_type: str | None) -> bool:
"""Validate the Content-Type header for POST requests."""
return content_type is not None and content_type.lower().startswith("application/json")
async def validate_request(self, request: Request, is_post: bool = False) -> Response | None:
"""Validate request headers for DNS rebinding protection.
Returns None if validation passes, or an error Response if validation fails.
"""
# Always validate Content-Type for POST requests
if is_post: # pragma: no branch
content_type = request.headers.get("content-type")
if not self._validate_content_type(content_type):
return Response("Invalid Content-Type header", status_code=400)
# Skip remaining validation if DNS rebinding protection is disabled
if not self.settings.enable_dns_rebinding_protection:
return None
# Validate Host header # pragma: no cover
host = request.headers.get("host") # pragma: no cover
if not self._validate_host(host): # pragma: no cover
return Response("Invalid Host header", status_code=421) # pragma: no cover
# Validate Origin header # pragma: no cover
origin = request.headers.get("origin") # pragma: no cover
if not self._validate_origin(origin): # pragma: no cover
return Response("Invalid Origin header", status_code=403) # pragma: no cover
return None # pragma: no cover
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "src/mcp/server/transport_security.py",
"license": "MIT License",
"lines": 87,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
modelcontextprotocol/python-sdk:tests/server/test_sse_security.py | """Tests for SSE server DNS rebinding protection."""
import logging
import multiprocessing
import socket
import httpx
import pytest
import uvicorn
from starlette.applications import Starlette
from starlette.requests import Request
from starlette.responses import Response
from starlette.routing import Mount, Route
from mcp.server import Server
from mcp.server.sse import SseServerTransport
from mcp.server.transport_security import TransportSecuritySettings
from mcp.types import Tool
from tests.test_helpers import wait_for_server
logger = logging.getLogger(__name__)
SERVER_NAME = "test_sse_security_server"
@pytest.fixture
def server_port() -> int:
with socket.socket() as s:
s.bind(("127.0.0.1", 0))
return s.getsockname()[1]
@pytest.fixture
def server_url(server_port: int) -> str: # pragma: no cover
return f"http://127.0.0.1:{server_port}"
class SecurityTestServer(Server): # pragma: no cover
def __init__(self):
super().__init__(SERVER_NAME)
async def on_list_tools(self) -> list[Tool]:
return []
def run_server_with_settings(port: int, security_settings: TransportSecuritySettings | None = None): # pragma: no cover
"""Run the SSE server with specified security settings."""
app = SecurityTestServer()
sse_transport = SseServerTransport("/messages/", security_settings)
async def handle_sse(request: Request):
try:
async with sse_transport.connect_sse(request.scope, request.receive, request._send) as streams:
if streams:
await app.run(streams[0], streams[1], app.create_initialization_options())
except ValueError as e:
# Validation error was already handled inside connect_sse
logger.debug(f"SSE connection failed validation: {e}")
return Response()
routes = [
Route("/sse", endpoint=handle_sse),
Mount("/messages/", app=sse_transport.handle_post_message),
]
starlette_app = Starlette(routes=routes)
uvicorn.run(starlette_app, host="127.0.0.1", port=port, log_level="error")
def start_server_process(port: int, security_settings: TransportSecuritySettings | None = None):
"""Start server in a separate process."""
process = multiprocessing.Process(target=run_server_with_settings, args=(port, security_settings))
process.start()
# Wait for server to be ready to accept connections
wait_for_server(port)
return process
@pytest.mark.anyio
async def test_sse_security_default_settings(server_port: int):
"""Test SSE with default security settings (protection disabled)."""
process = start_server_process(server_port)
try:
headers = {"Host": "evil.com", "Origin": "http://evil.com"}
async with httpx.AsyncClient(timeout=5.0) as client:
async with client.stream("GET", f"http://127.0.0.1:{server_port}/sse", headers=headers) as response:
assert response.status_code == 200
finally:
process.terminate()
process.join()
@pytest.mark.anyio
async def test_sse_security_invalid_host_header(server_port: int):
"""Test SSE with invalid Host header."""
# Enable security by providing settings with an empty allowed_hosts list
security_settings = TransportSecuritySettings(enable_dns_rebinding_protection=True, allowed_hosts=["example.com"])
process = start_server_process(server_port, security_settings)
try:
# Test with invalid host header
headers = {"Host": "evil.com"}
async with httpx.AsyncClient() as client:
response = await client.get(f"http://127.0.0.1:{server_port}/sse", headers=headers)
assert response.status_code == 421
assert response.text == "Invalid Host header"
finally:
process.terminate()
process.join()
@pytest.mark.anyio
async def test_sse_security_invalid_origin_header(server_port: int):
"""Test SSE with invalid Origin header."""
# Configure security to allow the host but restrict origins
security_settings = TransportSecuritySettings(
enable_dns_rebinding_protection=True, allowed_hosts=["127.0.0.1:*"], allowed_origins=["http://localhost:*"]
)
process = start_server_process(server_port, security_settings)
try:
# Test with invalid origin header
headers = {"Origin": "http://evil.com"}
async with httpx.AsyncClient() as client:
response = await client.get(f"http://127.0.0.1:{server_port}/sse", headers=headers)
assert response.status_code == 403
assert response.text == "Invalid Origin header"
finally:
process.terminate()
process.join()
@pytest.mark.anyio
async def test_sse_security_post_invalid_content_type(server_port: int):
"""Test POST endpoint with invalid Content-Type header."""
# Configure security to allow the host
security_settings = TransportSecuritySettings(
enable_dns_rebinding_protection=True, allowed_hosts=["127.0.0.1:*"], allowed_origins=["http://127.0.0.1:*"]
)
process = start_server_process(server_port, security_settings)
try:
async with httpx.AsyncClient(timeout=5.0) as client:
# Test POST with invalid content type
fake_session_id = "12345678123456781234567812345678"
response = await client.post(
f"http://127.0.0.1:{server_port}/messages/?session_id={fake_session_id}",
headers={"Content-Type": "text/plain"},
content="test",
)
assert response.status_code == 400
assert response.text == "Invalid Content-Type header"
# Test POST with missing content type
response = await client.post(
f"http://127.0.0.1:{server_port}/messages/?session_id={fake_session_id}", content="test"
)
assert response.status_code == 400
assert response.text == "Invalid Content-Type header"
finally:
process.terminate()
process.join()
@pytest.mark.anyio
async def test_sse_security_disabled(server_port: int):
"""Test SSE with security disabled."""
settings = TransportSecuritySettings(enable_dns_rebinding_protection=False)
process = start_server_process(server_port, settings)
try:
# Test with invalid host header - should still work
headers = {"Host": "evil.com"}
async with httpx.AsyncClient(timeout=5.0) as client:
# For SSE endpoints, we need to use stream to avoid timeout
async with client.stream("GET", f"http://127.0.0.1:{server_port}/sse", headers=headers) as response:
# Should connect successfully even with invalid host
assert response.status_code == 200
finally:
process.terminate()
process.join()
@pytest.mark.anyio
async def test_sse_security_custom_allowed_hosts(server_port: int):
"""Test SSE with custom allowed hosts."""
settings = TransportSecuritySettings(
enable_dns_rebinding_protection=True,
allowed_hosts=["localhost", "127.0.0.1", "custom.host"],
allowed_origins=["http://localhost", "http://127.0.0.1", "http://custom.host"],
)
process = start_server_process(server_port, settings)
try:
# Test with custom allowed host
headers = {"Host": "custom.host"}
async with httpx.AsyncClient(timeout=5.0) as client:
# For SSE endpoints, we need to use stream to avoid timeout
async with client.stream("GET", f"http://127.0.0.1:{server_port}/sse", headers=headers) as response:
# Should connect successfully with custom host
assert response.status_code == 200
# Test with non-allowed host
headers = {"Host": "evil.com"}
async with httpx.AsyncClient() as client:
response = await client.get(f"http://127.0.0.1:{server_port}/sse", headers=headers)
assert response.status_code == 421
assert response.text == "Invalid Host header"
finally:
process.terminate()
process.join()
@pytest.mark.anyio
async def test_sse_security_wildcard_ports(server_port: int):
"""Test SSE with wildcard port patterns."""
settings = TransportSecuritySettings(
enable_dns_rebinding_protection=True,
allowed_hosts=["localhost:*", "127.0.0.1:*"],
allowed_origins=["http://localhost:*", "http://127.0.0.1:*"],
)
process = start_server_process(server_port, settings)
try:
# Test with various port numbers
for test_port in [8080, 3000, 9999]:
headers = {"Host": f"localhost:{test_port}"}
async with httpx.AsyncClient(timeout=5.0) as client:
# For SSE endpoints, we need to use stream to avoid timeout
async with client.stream("GET", f"http://127.0.0.1:{server_port}/sse", headers=headers) as response:
# Should connect successfully with any port
assert response.status_code == 200
headers = {"Origin": f"http://localhost:{test_port}"}
async with httpx.AsyncClient(timeout=5.0) as client:
# For SSE endpoints, we need to use stream to avoid timeout
async with client.stream("GET", f"http://127.0.0.1:{server_port}/sse", headers=headers) as response:
# Should connect successfully with any port
assert response.status_code == 200
finally:
process.terminate()
process.join()
@pytest.mark.anyio
async def test_sse_security_post_valid_content_type(server_port: int):
"""Test POST endpoint with valid Content-Type headers."""
# Configure security to allow the host
security_settings = TransportSecuritySettings(
enable_dns_rebinding_protection=True, allowed_hosts=["127.0.0.1:*"], allowed_origins=["http://127.0.0.1:*"]
)
process = start_server_process(server_port, security_settings)
try:
async with httpx.AsyncClient() as client:
# Test with various valid content types
valid_content_types = [
"application/json",
"application/json; charset=utf-8",
"application/json;charset=utf-8",
"APPLICATION/JSON", # Case insensitive
]
for content_type in valid_content_types:
# Use a valid UUID format (even though session won't exist)
fake_session_id = "12345678123456781234567812345678"
response = await client.post(
f"http://127.0.0.1:{server_port}/messages/?session_id={fake_session_id}",
headers={"Content-Type": content_type},
json={"test": "data"},
)
# Will get 404 because session doesn't exist, but that's OK
# We're testing that it passes the content-type check
assert response.status_code == 404
assert response.text == "Could not find session"
finally:
process.terminate()
process.join()
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "tests/server/test_sse_security.py",
"license": "MIT License",
"lines": 232,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
modelcontextprotocol/python-sdk:tests/server/test_streamable_http_security.py | """Tests for StreamableHTTP server DNS rebinding protection."""
import multiprocessing
import socket
from collections.abc import AsyncGenerator
from contextlib import asynccontextmanager
import httpx
import pytest
import uvicorn
from starlette.applications import Starlette
from starlette.routing import Mount
from starlette.types import Receive, Scope, Send
from mcp.server import Server
from mcp.server.streamable_http_manager import StreamableHTTPSessionManager
from mcp.server.transport_security import TransportSecuritySettings
from mcp.types import Tool
from tests.test_helpers import wait_for_server
SERVER_NAME = "test_streamable_http_security_server"
@pytest.fixture
def server_port() -> int:
with socket.socket() as s:
s.bind(("127.0.0.1", 0))
return s.getsockname()[1]
@pytest.fixture
def server_url(server_port: int) -> str: # pragma: no cover
return f"http://127.0.0.1:{server_port}"
class SecurityTestServer(Server): # pragma: no cover
def __init__(self):
super().__init__(SERVER_NAME)
async def on_list_tools(self) -> list[Tool]:
return []
def run_server_with_settings(port: int, security_settings: TransportSecuritySettings | None = None): # pragma: no cover
"""Run the StreamableHTTP server with specified security settings."""
app = SecurityTestServer()
# Create session manager with security settings
session_manager = StreamableHTTPSessionManager(
app=app,
json_response=False,
stateless=False,
security_settings=security_settings,
)
# Create the ASGI handler
async def handle_streamable_http(scope: Scope, receive: Receive, send: Send) -> None:
await session_manager.handle_request(scope, receive, send)
# Create Starlette app with lifespan
@asynccontextmanager
async def lifespan(app: Starlette) -> AsyncGenerator[None, None]:
async with session_manager.run():
yield
routes = [
Mount("/", app=handle_streamable_http),
]
starlette_app = Starlette(routes=routes, lifespan=lifespan)
uvicorn.run(starlette_app, host="127.0.0.1", port=port, log_level="error")
def start_server_process(port: int, security_settings: TransportSecuritySettings | None = None):
"""Start server in a separate process."""
process = multiprocessing.Process(target=run_server_with_settings, args=(port, security_settings))
process.start()
# Wait for server to be ready to accept connections
wait_for_server(port)
return process
@pytest.mark.anyio
async def test_streamable_http_security_default_settings(server_port: int):
"""Test StreamableHTTP with default security settings (protection enabled)."""
process = start_server_process(server_port)
try:
# Test with valid localhost headers
async with httpx.AsyncClient(timeout=5.0) as client:
# POST request to initialize session
response = await client.post(
f"http://127.0.0.1:{server_port}/",
json={"jsonrpc": "2.0", "method": "initialize", "id": 1, "params": {}},
headers={
"Accept": "application/json, text/event-stream",
"Content-Type": "application/json",
},
)
assert response.status_code == 200
assert "mcp-session-id" in response.headers
finally:
process.terminate()
process.join()
@pytest.mark.anyio
async def test_streamable_http_security_invalid_host_header(server_port: int):
"""Test StreamableHTTP with invalid Host header."""
security_settings = TransportSecuritySettings(enable_dns_rebinding_protection=True)
process = start_server_process(server_port, security_settings)
try:
# Test with invalid host header
headers = {
"Host": "evil.com",
"Accept": "application/json, text/event-stream",
"Content-Type": "application/json",
}
async with httpx.AsyncClient(timeout=5.0) as client:
response = await client.post(
f"http://127.0.0.1:{server_port}/",
json={"jsonrpc": "2.0", "method": "initialize", "id": 1, "params": {}},
headers=headers,
)
assert response.status_code == 421
assert response.text == "Invalid Host header"
finally:
process.terminate()
process.join()
@pytest.mark.anyio
async def test_streamable_http_security_invalid_origin_header(server_port: int):
"""Test StreamableHTTP with invalid Origin header."""
security_settings = TransportSecuritySettings(enable_dns_rebinding_protection=True, allowed_hosts=["127.0.0.1:*"])
process = start_server_process(server_port, security_settings)
try:
# Test with invalid origin header
headers = {
"Origin": "http://evil.com",
"Accept": "application/json, text/event-stream",
"Content-Type": "application/json",
}
async with httpx.AsyncClient(timeout=5.0) as client:
response = await client.post(
f"http://127.0.0.1:{server_port}/",
json={"jsonrpc": "2.0", "method": "initialize", "id": 1, "params": {}},
headers=headers,
)
assert response.status_code == 403
assert response.text == "Invalid Origin header"
finally:
process.terminate()
process.join()
@pytest.mark.anyio
async def test_streamable_http_security_invalid_content_type(server_port: int):
"""Test StreamableHTTP POST with invalid Content-Type header."""
process = start_server_process(server_port)
try:
async with httpx.AsyncClient(timeout=5.0) as client:
# Test POST with invalid content type
response = await client.post(
f"http://127.0.0.1:{server_port}/",
headers={
"Content-Type": "text/plain",
"Accept": "application/json, text/event-stream",
},
content="test",
)
assert response.status_code == 400
assert response.text == "Invalid Content-Type header"
# Test POST with missing content type
response = await client.post(
f"http://127.0.0.1:{server_port}/",
headers={"Accept": "application/json, text/event-stream"},
content="test",
)
assert response.status_code == 400
assert response.text == "Invalid Content-Type header"
finally:
process.terminate()
process.join()
@pytest.mark.anyio
async def test_streamable_http_security_disabled(server_port: int):
"""Test StreamableHTTP with security disabled."""
settings = TransportSecuritySettings(enable_dns_rebinding_protection=False)
process = start_server_process(server_port, settings)
try:
# Test with invalid host header - should still work
headers = {
"Host": "evil.com",
"Accept": "application/json, text/event-stream",
"Content-Type": "application/json",
}
async with httpx.AsyncClient(timeout=5.0) as client:
response = await client.post(
f"http://127.0.0.1:{server_port}/",
json={"jsonrpc": "2.0", "method": "initialize", "id": 1, "params": {}},
headers=headers,
)
# Should connect successfully even with invalid host
assert response.status_code == 200
finally:
process.terminate()
process.join()
@pytest.mark.anyio
async def test_streamable_http_security_custom_allowed_hosts(server_port: int):
"""Test StreamableHTTP with custom allowed hosts."""
settings = TransportSecuritySettings(
enable_dns_rebinding_protection=True,
allowed_hosts=["localhost", "127.0.0.1", "custom.host"],
allowed_origins=["http://localhost", "http://127.0.0.1", "http://custom.host"],
)
process = start_server_process(server_port, settings)
try:
# Test with custom allowed host
headers = {
"Host": "custom.host",
"Accept": "application/json, text/event-stream",
"Content-Type": "application/json",
}
async with httpx.AsyncClient(timeout=5.0) as client:
response = await client.post(
f"http://127.0.0.1:{server_port}/",
json={"jsonrpc": "2.0", "method": "initialize", "id": 1, "params": {}},
headers=headers,
)
# Should connect successfully with custom host
assert response.status_code == 200
finally:
process.terminate()
process.join()
@pytest.mark.anyio
async def test_streamable_http_security_get_request(server_port: int):
"""Test StreamableHTTP GET request with security."""
security_settings = TransportSecuritySettings(enable_dns_rebinding_protection=True, allowed_hosts=["127.0.0.1"])
process = start_server_process(server_port, security_settings)
try:
# Test GET request with invalid host header
headers = {
"Host": "evil.com",
"Accept": "text/event-stream",
}
async with httpx.AsyncClient(timeout=5.0) as client:
response = await client.get(f"http://127.0.0.1:{server_port}/", headers=headers)
assert response.status_code == 421
assert response.text == "Invalid Host header"
# Test GET request with valid host header
headers = {
"Host": "127.0.0.1",
"Accept": "text/event-stream",
}
async with httpx.AsyncClient(timeout=5.0) as client:
# GET requests need a session ID in StreamableHTTP
# So it will fail with "Missing session ID" not security error
response = await client.get(f"http://127.0.0.1:{server_port}/", headers=headers)
# This should pass security but fail on session validation
assert response.status_code == 400
body = response.json()
assert "Missing session ID" in body["error"]["message"]
finally:
process.terminate()
process.join()
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "tests/server/test_streamable_http_security.py",
"license": "MIT License",
"lines": 236,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
modelcontextprotocol/python-sdk:src/mcp/shared/metadata_utils.py | """Utility functions for working with metadata in MCP types.
These utilities are primarily intended for client-side usage to properly display
human-readable names in user interfaces in a spec-compliant way.
"""
from mcp.types import Implementation, Prompt, Resource, ResourceTemplate, Tool
def get_display_name(obj: Tool | Resource | Prompt | ResourceTemplate | Implementation) -> str:
"""Get the display name for an MCP object with proper precedence.
This is a client-side utility function designed to help MCP clients display
human-readable names in their user interfaces. When servers provide a 'title'
field, it should be preferred over the programmatic 'name' field for display.
For tools: title > annotations.title > name
For other objects: title > name
Example:
```python
# In a client displaying available tools
tools = await session.list_tools()
for tool in tools.tools:
display_name = get_display_name(tool)
print(f"Available tool: {display_name}")
```
Args:
obj: An MCP object with name and optional title fields
Returns:
The display name to use for UI presentation
"""
if isinstance(obj, Tool):
# Tools have special precedence: title > annotations.title > name
if hasattr(obj, "title") and obj.title is not None:
return obj.title
if obj.annotations and hasattr(obj.annotations, "title") and obj.annotations.title is not None:
return obj.annotations.title
return obj.name
else:
# All other objects: title > name
if hasattr(obj, "title") and obj.title is not None:
return obj.title
return obj.name
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "src/mcp/shared/metadata_utils.py",
"license": "MIT License",
"lines": 37,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
modelcontextprotocol/python-sdk:src/mcp/server/elicitation.py | """Elicitation utilities for MCP servers."""
from __future__ import annotations
import types
from collections.abc import Sequence
from typing import Generic, Literal, TypeVar, Union, get_args, get_origin
from pydantic import BaseModel
from mcp.server.session import ServerSession
from mcp.types import RequestId
ElicitSchemaModelT = TypeVar("ElicitSchemaModelT", bound=BaseModel)
class AcceptedElicitation(BaseModel, Generic[ElicitSchemaModelT]):
"""Result when user accepts the elicitation."""
action: Literal["accept"] = "accept"
data: ElicitSchemaModelT
class DeclinedElicitation(BaseModel):
"""Result when user declines the elicitation."""
action: Literal["decline"] = "decline"
class CancelledElicitation(BaseModel):
"""Result when user cancels the elicitation."""
action: Literal["cancel"] = "cancel"
ElicitationResult = AcceptedElicitation[ElicitSchemaModelT] | DeclinedElicitation | CancelledElicitation
class AcceptedUrlElicitation(BaseModel):
"""Result when user accepts a URL mode elicitation."""
action: Literal["accept"] = "accept"
UrlElicitationResult = AcceptedUrlElicitation | DeclinedElicitation | CancelledElicitation
# Primitive types allowed in elicitation schemas
_ELICITATION_PRIMITIVE_TYPES = (str, int, float, bool)
def _validate_elicitation_schema(schema: type[BaseModel]) -> None:
"""Validate that a Pydantic model only contains primitive field types."""
for field_name, field_info in schema.model_fields.items():
annotation = field_info.annotation
if annotation is None or annotation is types.NoneType: # pragma: no cover
continue
elif _is_primitive_field(annotation):
continue
elif _is_string_sequence(annotation):
continue
else:
raise TypeError(
f"Elicitation schema field '{field_name}' must be a primitive type "
f"{_ELICITATION_PRIMITIVE_TYPES}, a sequence of strings (list[str], etc.), "
f"or Optional of these types. Nested models and complex types are not allowed."
)
def _is_string_sequence(annotation: type) -> bool:
"""Check if annotation is a sequence of strings (list[str], Sequence[str], etc)."""
origin = get_origin(annotation)
# Check if it's a sequence-like type with str elements
if origin:
try:
if issubclass(origin, Sequence):
args = get_args(annotation)
# Should have single str type arg
return len(args) == 1 and args[0] is str
except TypeError: # pragma: no cover
# origin is not a class, so it can't be a subclass of Sequence
pass
return False
def _is_primitive_field(annotation: type) -> bool:
"""Check if a field is a primitive type allowed in elicitation schemas."""
# Handle basic primitive types
if annotation in _ELICITATION_PRIMITIVE_TYPES:
return True
# Handle Union types
origin = get_origin(annotation)
if origin is Union or origin is types.UnionType:
args = get_args(annotation)
# All args must be primitive types, None, or string sequences
return all(
arg is types.NoneType or arg in _ELICITATION_PRIMITIVE_TYPES or _is_string_sequence(arg) for arg in args
)
return False
async def elicit_with_validation(
session: ServerSession,
message: str,
schema: type[ElicitSchemaModelT],
related_request_id: RequestId | None = None,
) -> ElicitationResult[ElicitSchemaModelT]:
"""Elicit information from the client/user with schema validation (form mode).
This method can be used to interactively ask for additional information from the
client within a tool's execution. The client might display the message to the
user and collect a response according to the provided schema. If the client
is an agent, it might decide how to handle the elicitation -- either by asking
the user or automatically generating a response.
For sensitive data like credentials or OAuth flows, use elicit_url() instead.
"""
# Validate that schema only contains primitive types and fail loudly if not
_validate_elicitation_schema(schema)
json_schema = schema.model_json_schema()
result = await session.elicit_form(
message=message,
requested_schema=json_schema,
related_request_id=related_request_id,
)
if result.action == "accept" and result.content is not None:
# Validate and parse the content using the schema
validated_data = schema.model_validate(result.content)
return AcceptedElicitation(data=validated_data)
elif result.action == "decline":
return DeclinedElicitation()
elif result.action == "cancel": # pragma: no cover
return CancelledElicitation()
else: # pragma: no cover
# This should never happen, but handle it just in case
raise ValueError(f"Unexpected elicitation action: {result.action}")
async def elicit_url(
session: ServerSession,
message: str,
url: str,
elicitation_id: str,
related_request_id: RequestId | None = None,
) -> UrlElicitationResult:
"""Elicit information from the user via out-of-band URL navigation (URL mode).
This method directs the user to an external URL where sensitive interactions can
occur without passing data through the MCP client. Use this for:
- Collecting sensitive credentials (API keys, passwords)
- OAuth authorization flows with third-party services
- Payment and subscription flows
- Any interaction where data should not pass through the LLM context
The response indicates whether the user consented to navigate to the URL.
The actual interaction happens out-of-band. When the elicitation completes,
the server should send an ElicitCompleteNotification to notify the client.
Args:
session: The server session
message: Human-readable explanation of why the interaction is needed
url: The URL the user should navigate to
elicitation_id: Unique identifier for tracking this elicitation
related_request_id: Optional ID of the request that triggered this elicitation
Returns:
UrlElicitationResult indicating accept, decline, or cancel
"""
result = await session.elicit_url(
message=message,
url=url,
elicitation_id=elicitation_id,
related_request_id=related_request_id,
)
if result.action == "accept":
return AcceptedUrlElicitation()
elif result.action == "decline":
return DeclinedElicitation()
elif result.action == "cancel":
return CancelledElicitation()
else: # pragma: no cover
# This should never happen, but handle it just in case
raise ValueError(f"Unexpected elicitation action: {result.action}")
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "src/mcp/server/elicitation.py",
"license": "MIT License",
"lines": 144,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
modelcontextprotocol/python-sdk:tests/server/test_completion_with_context.py | """Tests for completion handler with context functionality."""
import pytest
from mcp import Client
from mcp.server import Server, ServerRequestContext
from mcp.types import (
CompleteRequestParams,
CompleteResult,
Completion,
PromptReference,
ResourceTemplateReference,
)
@pytest.mark.anyio
async def test_completion_handler_receives_context():
"""Test that the completion handler receives context correctly."""
# Track what the handler receives
received_params: CompleteRequestParams | None = None
async def handle_completion(ctx: ServerRequestContext, params: CompleteRequestParams) -> CompleteResult:
nonlocal received_params
received_params = params
return CompleteResult(completion=Completion(values=["test-completion"], total=1, has_more=False))
server = Server("test-server", on_completion=handle_completion)
async with Client(server) as client:
# Test with context
result = await client.complete(
ref=ResourceTemplateReference(type="ref/resource", uri="test://resource/{param}"),
argument={"name": "param", "value": "test"},
context_arguments={"previous": "value"},
)
# Verify handler received the context
assert received_params is not None
assert received_params.context is not None
assert received_params.context.arguments == {"previous": "value"}
assert result.completion.values == ["test-completion"]
@pytest.mark.anyio
async def test_completion_backward_compatibility():
"""Test that completion works without context (backward compatibility)."""
context_was_none = False
async def handle_completion(ctx: ServerRequestContext, params: CompleteRequestParams) -> CompleteResult:
nonlocal context_was_none
context_was_none = params.context is None
return CompleteResult(completion=Completion(values=["no-context-completion"], total=1, has_more=False))
server = Server("test-server", on_completion=handle_completion)
async with Client(server) as client:
# Test without context
result = await client.complete(
ref=PromptReference(type="ref/prompt", name="test-prompt"), argument={"name": "arg", "value": "val"}
)
# Verify context was None
assert context_was_none
assert result.completion.values == ["no-context-completion"]
@pytest.mark.anyio
async def test_dependent_completion_scenario():
"""Test a real-world scenario with dependent completions."""
async def handle_completion(ctx: ServerRequestContext, params: CompleteRequestParams) -> CompleteResult:
# Simulate database/table completion scenario
assert isinstance(params.ref, ResourceTemplateReference)
assert params.ref.uri == "db://{database}/{table}"
if params.argument.name == "database":
return CompleteResult(
completion=Completion(values=["users_db", "products_db", "analytics_db"], total=3, has_more=False)
)
assert params.argument.name == "table"
assert params.context and params.context.arguments
db = params.context.arguments.get("database")
if db == "users_db":
return CompleteResult(
completion=Completion(values=["users", "sessions", "permissions"], total=3, has_more=False)
)
else:
assert db == "products_db"
return CompleteResult(
completion=Completion(values=["products", "categories", "inventory"], total=3, has_more=False)
)
server = Server("test-server", on_completion=handle_completion)
async with Client(server) as client:
# First, complete database
db_result = await client.complete(
ref=ResourceTemplateReference(type="ref/resource", uri="db://{database}/{table}"),
argument={"name": "database", "value": ""},
)
assert "users_db" in db_result.completion.values
assert "products_db" in db_result.completion.values
# Then complete table with database context
table_result = await client.complete(
ref=ResourceTemplateReference(type="ref/resource", uri="db://{database}/{table}"),
argument={"name": "table", "value": ""},
context_arguments={"database": "users_db"},
)
assert table_result.completion.values == ["users", "sessions", "permissions"]
# Different database gives different tables
table_result2 = await client.complete(
ref=ResourceTemplateReference(type="ref/resource", uri="db://{database}/{table}"),
argument={"name": "table", "value": ""},
context_arguments={"database": "products_db"},
)
assert table_result2.completion.values == ["products", "categories", "inventory"]
@pytest.mark.anyio
async def test_completion_error_on_missing_context():
"""Test that server can raise error when required context is missing."""
async def handle_completion(ctx: ServerRequestContext, params: CompleteRequestParams) -> CompleteResult:
assert isinstance(params.ref, ResourceTemplateReference)
assert params.ref.uri == "db://{database}/{table}"
assert params.argument.name == "table"
if not params.context or not params.context.arguments or "database" not in params.context.arguments:
raise ValueError("Please select a database first to see available tables")
db = params.context.arguments.get("database")
assert db == "test_db"
return CompleteResult(completion=Completion(values=["users", "orders", "products"], total=3, has_more=False))
server = Server("test-server", on_completion=handle_completion)
async with Client(server) as client:
# Try to complete table without database context - should raise error
with pytest.raises(Exception) as exc_info:
await client.complete(
ref=ResourceTemplateReference(type="ref/resource", uri="db://{database}/{table}"),
argument={"name": "table", "value": ""},
)
# Verify error message
assert "Please select a database first" in str(exc_info.value)
# Now complete with proper context - should work normally
result_with_context = await client.complete(
ref=ResourceTemplateReference(type="ref/resource", uri="db://{database}/{table}"),
argument={"name": "table", "value": ""},
context_arguments={"database": "test_db"},
)
# Should get normal completions
assert result_with_context.completion.values == ["users", "orders", "products"]
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "tests/server/test_completion_with_context.py",
"license": "MIT License",
"lines": 126,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
modelcontextprotocol/python-sdk:tests/issues/test_malformed_input.py | # Claude Debug
"""Test for HackerOne vulnerability report #3156202 - malformed input DOS."""
import anyio
import pytest
from mcp.server.models import InitializationOptions
from mcp.server.session import ServerSession
from mcp.shared.message import SessionMessage
from mcp.types import INVALID_PARAMS, JSONRPCError, JSONRPCMessage, JSONRPCRequest, ServerCapabilities
@pytest.mark.anyio
async def test_malformed_initialize_request_does_not_crash_server():
"""Test that malformed initialize requests return proper error responses
instead of crashing the server (HackerOne #3156202).
"""
# Create in-memory streams for testing
read_send_stream, read_receive_stream = anyio.create_memory_object_stream[SessionMessage | Exception](10)
write_send_stream, write_receive_stream = anyio.create_memory_object_stream[SessionMessage](10)
try:
# Create a malformed initialize request (missing required params field)
malformed_request = JSONRPCRequest(
jsonrpc="2.0",
id="f20fe86132ed4cd197f89a7134de5685",
method="initialize",
# params=None # Missing required params field
)
# Wrap in session message
request_message = SessionMessage(message=malformed_request)
# Start a server session
async with ServerSession(
read_stream=read_receive_stream,
write_stream=write_send_stream,
init_options=InitializationOptions(
server_name="test_server",
server_version="1.0.0",
capabilities=ServerCapabilities(),
),
):
# Send the malformed request
await read_send_stream.send(request_message)
# Give the session time to process the request
await anyio.sleep(0.1)
# Check that we received an error response instead of a crash
try:
response_message = write_receive_stream.receive_nowait()
response = response_message.message
# Verify it's a proper JSON-RPC error response
assert isinstance(response, JSONRPCError)
assert response.jsonrpc == "2.0"
assert response.id == "f20fe86132ed4cd197f89a7134de5685"
assert response.error.code == INVALID_PARAMS
assert "Invalid request parameters" in response.error.message
# Verify the session is still alive and can handle more requests
# Send another malformed request to confirm server stability
another_malformed_request = JSONRPCRequest(
jsonrpc="2.0",
id="test_id_2",
method="tools/call",
# params=None # Missing required params
)
another_request_message = SessionMessage(message=another_malformed_request)
await read_send_stream.send(another_request_message)
await anyio.sleep(0.1)
# Should get another error response, not a crash
second_response_message = write_receive_stream.receive_nowait()
second_response = second_response_message.message
assert isinstance(second_response, JSONRPCError)
assert second_response.id == "test_id_2"
assert second_response.error.code == INVALID_PARAMS
except anyio.WouldBlock: # pragma: no cover
pytest.fail("No response received - server likely crashed")
finally: # pragma: lax no cover
# Close all streams to ensure proper cleanup
await read_send_stream.aclose()
await write_send_stream.aclose()
await read_receive_stream.aclose()
await write_receive_stream.aclose()
@pytest.mark.anyio
async def test_multiple_concurrent_malformed_requests():
"""Test that multiple concurrent malformed requests don't crash the server."""
# Create in-memory streams for testing
read_send_stream, read_receive_stream = anyio.create_memory_object_stream[SessionMessage | Exception](100)
write_send_stream, write_receive_stream = anyio.create_memory_object_stream[SessionMessage](100)
try:
# Start a server session
async with ServerSession(
read_stream=read_receive_stream,
write_stream=write_send_stream,
init_options=InitializationOptions(
server_name="test_server",
server_version="1.0.0",
capabilities=ServerCapabilities(),
),
):
# Send multiple malformed requests concurrently
malformed_requests: list[SessionMessage] = []
for i in range(10):
malformed_request = JSONRPCRequest(
jsonrpc="2.0",
id=f"malformed_{i}",
method="initialize",
# params=None # Missing required params
)
request_message = SessionMessage(message=malformed_request)
malformed_requests.append(request_message)
# Send all requests
for request in malformed_requests:
await read_send_stream.send(request)
# Give time to process
await anyio.sleep(0.2)
# Verify we get error responses for all requests
error_responses: list[JSONRPCMessage] = []
try:
while True:
response_message = write_receive_stream.receive_nowait()
error_responses.append(response_message.message)
except anyio.WouldBlock:
pass # No more messages
# Should have received 10 error responses
assert len(error_responses) == 10
for i, response in enumerate(error_responses):
assert isinstance(response, JSONRPCError)
assert response.id == f"malformed_{i}"
assert response.error.code == INVALID_PARAMS
finally: # pragma: lax no cover
# Close all streams to ensure proper cleanup
await read_send_stream.aclose()
await write_send_stream.aclose()
await read_receive_stream.aclose()
await write_receive_stream.aclose()
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "tests/issues/test_malformed_input.py",
"license": "MIT License",
"lines": 128,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
modelcontextprotocol/python-sdk:examples/clients/simple-auth-client/mcp_simple_auth_client/main.py | #!/usr/bin/env python3
"""Simple MCP client example with OAuth authentication support.
This client connects to an MCP server using streamable HTTP transport with OAuth.
"""
from __future__ import annotations as _annotations
import asyncio
import os
import socketserver
import threading
import time
import webbrowser
from http.server import BaseHTTPRequestHandler, HTTPServer
from typing import Any
from urllib.parse import parse_qs, urlparse
import httpx
from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream
from mcp.client.auth import OAuthClientProvider, TokenStorage
from mcp.client.session import ClientSession
from mcp.client.sse import sse_client
from mcp.client.streamable_http import streamable_http_client
from mcp.shared.auth import OAuthClientInformationFull, OAuthClientMetadata, OAuthToken
from mcp.shared.message import SessionMessage
class InMemoryTokenStorage(TokenStorage):
"""Simple in-memory token storage implementation."""
def __init__(self):
self._tokens: OAuthToken | None = None
self._client_info: OAuthClientInformationFull | None = None
async def get_tokens(self) -> OAuthToken | None:
return self._tokens
async def set_tokens(self, tokens: OAuthToken) -> None:
self._tokens = tokens
async def get_client_info(self) -> OAuthClientInformationFull | None:
return self._client_info
async def set_client_info(self, client_info: OAuthClientInformationFull) -> None:
self._client_info = client_info
class CallbackHandler(BaseHTTPRequestHandler):
"""Simple HTTP handler to capture OAuth callback."""
def __init__(
self,
request: Any,
client_address: tuple[str, int],
server: socketserver.BaseServer,
callback_data: dict[str, Any],
):
"""Initialize with callback data storage."""
self.callback_data = callback_data
super().__init__(request, client_address, server)
def do_GET(self):
"""Handle GET request from OAuth redirect."""
parsed = urlparse(self.path)
query_params = parse_qs(parsed.query)
if "code" in query_params:
self.callback_data["authorization_code"] = query_params["code"][0]
self.callback_data["state"] = query_params.get("state", [None])[0]
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(b"""
<html>
<body>
<h1>Authorization Successful!</h1>
<p>You can close this window and return to the terminal.</p>
<script>setTimeout(() => window.close(), 2000);</script>
</body>
</html>
""")
elif "error" in query_params:
self.callback_data["error"] = query_params["error"][0]
self.send_response(400)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(
f"""
<html>
<body>
<h1>Authorization Failed</h1>
<p>Error: {query_params["error"][0]}</p>
<p>You can close this window and return to the terminal.</p>
</body>
</html>
""".encode()
)
else:
self.send_response(404)
self.end_headers()
def log_message(self, format: str, *args: Any):
"""Suppress default logging."""
class CallbackServer:
"""Simple server to handle OAuth callbacks."""
def __init__(self, port: int = 3000):
self.port = port
self.server = None
self.thread = None
self.callback_data = {"authorization_code": None, "state": None, "error": None}
def _create_handler_with_data(self):
"""Create a handler class with access to callback data."""
callback_data = self.callback_data
class DataCallbackHandler(CallbackHandler):
def __init__(
self,
request: BaseHTTPRequestHandler,
client_address: tuple[str, int],
server: socketserver.BaseServer,
):
super().__init__(request, client_address, server, callback_data)
return DataCallbackHandler
def start(self):
"""Start the callback server in a background thread."""
handler_class = self._create_handler_with_data()
self.server = HTTPServer(("localhost", self.port), handler_class)
self.thread = threading.Thread(target=self.server.serve_forever, daemon=True)
self.thread.start()
print(f"π₯οΈ Started callback server on http://localhost:{self.port}")
def stop(self):
"""Stop the callback server."""
if self.server:
self.server.shutdown()
self.server.server_close()
if self.thread:
self.thread.join(timeout=1)
def wait_for_callback(self, timeout: int = 300):
"""Wait for OAuth callback with timeout."""
start_time = time.time()
while time.time() - start_time < timeout:
if self.callback_data["authorization_code"]:
return self.callback_data["authorization_code"]
elif self.callback_data["error"]:
raise Exception(f"OAuth error: {self.callback_data['error']}")
time.sleep(0.1)
raise Exception("Timeout waiting for OAuth callback")
def get_state(self):
"""Get the received state parameter."""
return self.callback_data["state"]
class SimpleAuthClient:
"""Simple MCP client with auth support."""
def __init__(
self,
server_url: str,
transport_type: str = "streamable-http",
client_metadata_url: str | None = None,
):
self.server_url = server_url
self.transport_type = transport_type
self.client_metadata_url = client_metadata_url
self.session: ClientSession | None = None
async def connect(self):
"""Connect to the MCP server."""
print(f"π Attempting to connect to {self.server_url}...")
try:
callback_server = CallbackServer(port=3030)
callback_server.start()
async def callback_handler() -> tuple[str, str | None]:
"""Wait for OAuth callback and return auth code and state."""
print("β³ Waiting for authorization callback...")
try:
auth_code = callback_server.wait_for_callback(timeout=300)
return auth_code, callback_server.get_state()
finally:
callback_server.stop()
client_metadata_dict = {
"client_name": "Simple Auth Client",
"redirect_uris": ["http://localhost:3030/callback"],
"grant_types": ["authorization_code", "refresh_token"],
"response_types": ["code"],
}
async def _default_redirect_handler(authorization_url: str) -> None:
"""Default redirect handler that opens the URL in a browser."""
print(f"Opening browser for authorization: {authorization_url}")
webbrowser.open(authorization_url)
# Create OAuth authentication handler using the new interface
# Use client_metadata_url to enable CIMD when the server supports it
oauth_auth = OAuthClientProvider(
server_url=self.server_url.replace("/mcp", ""),
client_metadata=OAuthClientMetadata.model_validate(client_metadata_dict),
storage=InMemoryTokenStorage(),
redirect_handler=_default_redirect_handler,
callback_handler=callback_handler,
client_metadata_url=self.client_metadata_url,
)
# Create transport with auth handler based on transport type
if self.transport_type == "sse":
print("π‘ Opening SSE transport connection with auth...")
async with sse_client(
url=self.server_url,
auth=oauth_auth,
timeout=60.0,
) as (read_stream, write_stream):
await self._run_session(read_stream, write_stream)
else:
print("π‘ Opening StreamableHTTP transport connection with auth...")
async with httpx.AsyncClient(auth=oauth_auth, follow_redirects=True) as custom_client:
async with streamable_http_client(url=self.server_url, http_client=custom_client) as (
read_stream,
write_stream,
):
await self._run_session(read_stream, write_stream)
except Exception as e:
print(f"β Failed to connect: {e}")
import traceback
traceback.print_exc()
async def _run_session(
self,
read_stream: MemoryObjectReceiveStream[SessionMessage | Exception],
write_stream: MemoryObjectSendStream[SessionMessage],
):
"""Run the MCP session with the given streams."""
print("π€ Initializing MCP session...")
async with ClientSession(read_stream, write_stream) as session:
self.session = session
print("β‘ Starting session initialization...")
await session.initialize()
print("β¨ Session initialization complete!")
print(f"\nβ
Connected to MCP server at {self.server_url}")
# Run interactive loop
await self.interactive_loop()
async def list_tools(self):
"""List available tools from the server."""
if not self.session:
print("β Not connected to server")
return
try:
result = await self.session.list_tools()
if hasattr(result, "tools") and result.tools:
print("\nπ Available tools:")
for i, tool in enumerate(result.tools, 1):
print(f"{i}. {tool.name}")
if tool.description:
print(f" Description: {tool.description}")
print()
else:
print("No tools available")
except Exception as e:
print(f"β Failed to list tools: {e}")
async def call_tool(self, tool_name: str, arguments: dict[str, Any] | None = None):
"""Call a specific tool."""
if not self.session:
print("β Not connected to server")
return
try:
result = await self.session.call_tool(tool_name, arguments or {})
print(f"\nπ§ Tool '{tool_name}' result:")
if hasattr(result, "content"):
for content in result.content:
if content.type == "text":
print(content.text)
else:
print(content)
else:
print(result)
except Exception as e:
print(f"β Failed to call tool '{tool_name}': {e}")
async def interactive_loop(self):
"""Run interactive command loop."""
print("\nπ― Interactive MCP Client")
print("Commands:")
print(" list - List available tools")
print(" call <tool_name> [args] - Call a tool")
print(" quit - Exit the client")
print()
while True:
try:
command = input("mcp> ").strip()
if not command:
continue
if command == "quit":
break
elif command == "list":
await self.list_tools()
elif command.startswith("call "):
parts = command.split(maxsplit=2)
tool_name = parts[1] if len(parts) > 1 else ""
if not tool_name:
print("β Please specify a tool name")
continue
# Parse arguments (simple JSON-like format)
arguments: dict[str, Any] = {}
if len(parts) > 2:
import json
try:
arguments = json.loads(parts[2])
except json.JSONDecodeError:
print("β Invalid arguments format (expected JSON)")
continue
await self.call_tool(tool_name, arguments)
else:
print("β Unknown command. Try 'list', 'call <tool_name>', or 'quit'")
except KeyboardInterrupt:
print("\n\nπ Goodbye!")
break
except EOFError:
break
async def main():
"""Main entry point."""
# Default server URL - can be overridden with environment variable
# Most MCP streamable HTTP servers use /mcp as the endpoint
server_url = os.getenv("MCP_SERVER_PORT", 8000)
transport_type = os.getenv("MCP_TRANSPORT_TYPE", "streamable-http")
client_metadata_url = os.getenv("MCP_CLIENT_METADATA_URL")
server_url = (
f"http://localhost:{server_url}/mcp"
if transport_type == "streamable-http"
else f"http://localhost:{server_url}/sse"
)
print("π Simple MCP Auth Client")
print(f"Connecting to: {server_url}")
print(f"Transport type: {transport_type}")
if client_metadata_url:
print(f"Client metadata URL: {client_metadata_url}")
# Start connection flow - OAuth will be handled automatically
client = SimpleAuthClient(server_url, transport_type, client_metadata_url)
await client.connect()
def cli():
"""CLI entry point for uv script."""
asyncio.run(main())
if __name__ == "__main__":
cli()
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "examples/clients/simple-auth-client/mcp_simple_auth_client/main.py",
"license": "MIT License",
"lines": 316,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
modelcontextprotocol/python-sdk:tests/client/test_auth.py | """Tests for refactored OAuth client authentication implementation."""
import base64
import time
from unittest import mock
from urllib.parse import parse_qs, quote, unquote, urlparse
import httpx
import pytest
from inline_snapshot import Is, snapshot
from pydantic import AnyHttpUrl, AnyUrl
from mcp.client.auth import OAuthClientProvider, PKCEParameters
from mcp.client.auth.exceptions import OAuthFlowError
from mcp.client.auth.utils import (
build_oauth_authorization_server_metadata_discovery_urls,
build_protected_resource_metadata_discovery_urls,
create_client_info_from_metadata_url,
create_client_registration_request,
create_oauth_metadata_request,
extract_field_from_www_auth,
extract_resource_metadata_from_www_auth,
extract_scope_from_www_auth,
get_client_metadata_scopes,
handle_registration_response,
is_valid_client_metadata_url,
should_use_client_metadata_url,
)
from mcp.server.auth.routes import build_metadata
from mcp.server.auth.settings import ClientRegistrationOptions, RevocationOptions
from mcp.shared.auth import (
OAuthClientInformationFull,
OAuthClientMetadata,
OAuthMetadata,
OAuthToken,
ProtectedResourceMetadata,
)
class MockTokenStorage:
"""Mock token storage for testing."""
def __init__(self):
self._tokens: OAuthToken | None = None
self._client_info: OAuthClientInformationFull | None = None
async def get_tokens(self) -> OAuthToken | None:
return self._tokens # pragma: no cover
async def set_tokens(self, tokens: OAuthToken) -> None:
self._tokens = tokens
async def get_client_info(self) -> OAuthClientInformationFull | None:
return self._client_info # pragma: no cover
async def set_client_info(self, client_info: OAuthClientInformationFull) -> None:
self._client_info = client_info
@pytest.fixture
def mock_storage():
return MockTokenStorage()
@pytest.fixture
def client_metadata():
return OAuthClientMetadata(
client_name="Test Client",
client_uri=AnyHttpUrl("https://example.com"),
redirect_uris=[AnyUrl("http://localhost:3030/callback")],
scope="read write",
)
@pytest.fixture
def valid_tokens():
return OAuthToken(
access_token="test_access_token",
token_type="Bearer",
expires_in=3600,
refresh_token="test_refresh_token",
scope="read write",
)
@pytest.fixture
def oauth_provider(client_metadata: OAuthClientMetadata, mock_storage: MockTokenStorage):
async def redirect_handler(url: str) -> None:
"""Mock redirect handler."""
pass # pragma: no cover
async def callback_handler() -> tuple[str, str | None]:
"""Mock callback handler."""
return "test_auth_code", "test_state" # pragma: no cover
return OAuthClientProvider(
server_url="https://api.example.com/v1/mcp",
client_metadata=client_metadata,
storage=mock_storage,
redirect_handler=redirect_handler,
callback_handler=callback_handler,
)
@pytest.fixture
def prm_metadata_response():
"""PRM metadata response with scopes."""
return httpx.Response(
200,
content=(
b'{"resource": "https://api.example.com/v1/mcp", '
b'"authorization_servers": ["https://auth.example.com"], '
b'"scopes_supported": ["resource:read", "resource:write"]}'
),
)
@pytest.fixture
def prm_metadata_without_scopes_response():
"""PRM metadata response without scopes."""
return httpx.Response(
200,
content=(
b'{"resource": "https://api.example.com/v1/mcp", '
b'"authorization_servers": ["https://auth.example.com"], '
b'"scopes_supported": null}'
),
)
@pytest.fixture
def init_response_with_www_auth_scope():
"""Initial 401 response with WWW-Authenticate header containing scope."""
return httpx.Response(
401,
headers={"WWW-Authenticate": 'Bearer scope="special:scope from:www-authenticate"'},
request=httpx.Request("GET", "https://api.example.com/test"),
)
@pytest.fixture
def init_response_without_www_auth_scope():
"""Initial 401 response without WWW-Authenticate scope."""
return httpx.Response(
401,
headers={},
request=httpx.Request("GET", "https://api.example.com/test"),
)
class TestPKCEParameters:
"""Test PKCE parameter generation."""
def test_pkce_generation(self):
"""Test PKCE parameter generation creates valid values."""
pkce = PKCEParameters.generate()
# Verify lengths
assert len(pkce.code_verifier) == 128
assert 43 <= len(pkce.code_challenge) <= 128
# Verify characters used in verifier
allowed_chars = set("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-._~")
assert all(c in allowed_chars for c in pkce.code_verifier)
# Verify base64url encoding in challenge (no padding)
assert "=" not in pkce.code_challenge
def test_pkce_uniqueness(self):
"""Test PKCE generates unique values each time."""
pkce1 = PKCEParameters.generate()
pkce2 = PKCEParameters.generate()
assert pkce1.code_verifier != pkce2.code_verifier
assert pkce1.code_challenge != pkce2.code_challenge
class TestOAuthContext:
"""Test OAuth context functionality."""
@pytest.mark.anyio
async def test_oauth_provider_initialization(
self, oauth_provider: OAuthClientProvider, client_metadata: OAuthClientMetadata, mock_storage: MockTokenStorage
):
"""Test OAuthClientProvider basic setup."""
assert oauth_provider.context.server_url == "https://api.example.com/v1/mcp"
assert oauth_provider.context.client_metadata == client_metadata
assert oauth_provider.context.storage == mock_storage
assert oauth_provider.context.timeout == 300.0
assert oauth_provider.context is not None
def test_context_url_parsing(self, oauth_provider: OAuthClientProvider):
"""Test get_authorization_base_url() extracts base URLs correctly."""
context = oauth_provider.context
# Test with path
assert context.get_authorization_base_url("https://api.example.com/v1/mcp") == "https://api.example.com"
# Test with no path
assert context.get_authorization_base_url("https://api.example.com") == "https://api.example.com"
# Test with port
assert (
context.get_authorization_base_url("https://api.example.com:8080/path/to/mcp")
== "https://api.example.com:8080"
)
# Test with query params
assert (
context.get_authorization_base_url("https://api.example.com/path?param=value") == "https://api.example.com"
)
@pytest.mark.anyio
async def test_token_validity_checking(self, oauth_provider: OAuthClientProvider, valid_tokens: OAuthToken):
"""Test is_token_valid() and can_refresh_token() logic."""
context = oauth_provider.context
# No tokens - should be invalid
assert not context.is_token_valid()
assert not context.can_refresh_token()
# Set valid tokens and client info
context.current_tokens = valid_tokens
context.token_expiry_time = time.time() + 1800 # 30 minutes from now
context.client_info = OAuthClientInformationFull(
client_id="test_client_id",
client_secret="test_client_secret",
redirect_uris=[AnyUrl("http://localhost:3030/callback")],
)
# Should be valid
assert context.is_token_valid()
assert context.can_refresh_token() # Has refresh token and client info
# Expire the token
context.token_expiry_time = time.time() - 100 # Expired 100 seconds ago
assert not context.is_token_valid()
assert context.can_refresh_token() # Can still refresh
# Remove refresh token
context.current_tokens.refresh_token = None
assert not context.can_refresh_token()
# Remove client info
context.current_tokens.refresh_token = "test_refresh_token"
context.client_info = None
assert not context.can_refresh_token()
def test_clear_tokens(self, oauth_provider: OAuthClientProvider, valid_tokens: OAuthToken):
"""Test clear_tokens() removes token data."""
context = oauth_provider.context
context.current_tokens = valid_tokens
context.token_expiry_time = time.time() + 1800
# Clear tokens
context.clear_tokens()
# Verify cleared
assert context.current_tokens is None
assert context.token_expiry_time is None
class TestOAuthFlow:
"""Test OAuth flow methods."""
@pytest.mark.anyio
async def test_build_protected_resource_discovery_urls(
self, client_metadata: OAuthClientMetadata, mock_storage: MockTokenStorage
):
"""Test protected resource metadata discovery URL building with fallback."""
async def redirect_handler(url: str) -> None:
pass # pragma: no cover
async def callback_handler() -> tuple[str, str | None]:
return "test_auth_code", "test_state" # pragma: no cover
provider = OAuthClientProvider(
server_url="https://api.example.com",
client_metadata=client_metadata,
storage=mock_storage,
redirect_handler=redirect_handler,
callback_handler=callback_handler,
)
# Test without WWW-Authenticate (fallback)
init_response = httpx.Response(
status_code=401, headers={}, request=httpx.Request("GET", "https://request-api.example.com")
)
urls = build_protected_resource_metadata_discovery_urls(
extract_resource_metadata_from_www_auth(init_response), provider.context.server_url
)
assert len(urls) == 1
assert urls[0] == "https://api.example.com/.well-known/oauth-protected-resource"
# Test with WWW-Authenticate header
init_response.headers["WWW-Authenticate"] = (
'Bearer resource_metadata="https://prm.example.com/.well-known/oauth-protected-resource/path"'
)
urls = build_protected_resource_metadata_discovery_urls(
extract_resource_metadata_from_www_auth(init_response), provider.context.server_url
)
assert len(urls) == 2
assert urls[0] == "https://prm.example.com/.well-known/oauth-protected-resource/path"
assert urls[1] == "https://api.example.com/.well-known/oauth-protected-resource"
@pytest.mark.anyio
def test_create_oauth_metadata_request(self, oauth_provider: OAuthClientProvider):
"""Test OAuth metadata discovery request building."""
request = create_oauth_metadata_request("https://example.com")
# Ensure correct method and headers, and that the URL is unmodified
assert request.method == "GET"
assert str(request.url) == "https://example.com"
assert "mcp-protocol-version" in request.headers
class TestOAuthFallback:
"""Test OAuth discovery fallback behavior for legacy (act as AS not RS) servers."""
@pytest.mark.anyio
async def test_oauth_discovery_legacy_fallback_when_no_prm(self):
"""Test that when PRM discovery fails, only root OAuth URL is tried (March 2025 spec)."""
# When auth_server_url is None (PRM failed), we use server_url and only try root
discovery_urls = build_oauth_authorization_server_metadata_discovery_urls(None, "https://mcp.linear.app/sse")
# Should only try the root URL (legacy behavior)
assert discovery_urls == [
"https://mcp.linear.app/.well-known/oauth-authorization-server",
]
@pytest.mark.anyio
async def test_oauth_discovery_path_aware_when_auth_server_has_path(self):
"""Test that when auth server URL has a path, only path-based URLs are tried."""
discovery_urls = build_oauth_authorization_server_metadata_discovery_urls(
"https://auth.example.com/tenant1", "https://api.example.com/mcp"
)
# Should try path-based URLs only (no root URLs)
assert discovery_urls == [
"https://auth.example.com/.well-known/oauth-authorization-server/tenant1",
"https://auth.example.com/.well-known/openid-configuration/tenant1",
"https://auth.example.com/tenant1/.well-known/openid-configuration",
]
@pytest.mark.anyio
async def test_oauth_discovery_root_when_auth_server_has_no_path(self):
"""Test that when auth server URL has no path, only root URLs are tried."""
discovery_urls = build_oauth_authorization_server_metadata_discovery_urls(
"https://auth.example.com", "https://api.example.com/mcp"
)
# Should try root URLs only
assert discovery_urls == [
"https://auth.example.com/.well-known/oauth-authorization-server",
"https://auth.example.com/.well-known/openid-configuration",
]
@pytest.mark.anyio
async def test_oauth_discovery_root_when_auth_server_has_only_slash(self):
"""Test that when auth server URL has only trailing slash, treated as root."""
discovery_urls = build_oauth_authorization_server_metadata_discovery_urls(
"https://auth.example.com/", "https://api.example.com/mcp"
)
# Should try root URLs only
assert discovery_urls == [
"https://auth.example.com/.well-known/oauth-authorization-server",
"https://auth.example.com/.well-known/openid-configuration",
]
@pytest.mark.anyio
async def test_oauth_discovery_fallback_order(self, oauth_provider: OAuthClientProvider):
"""Test fallback URL construction order when auth server URL has a path."""
# Simulate PRM discovery returning an auth server URL with a path
oauth_provider.context.auth_server_url = oauth_provider.context.server_url
discovery_urls = build_oauth_authorization_server_metadata_discovery_urls(
oauth_provider.context.auth_server_url, oauth_provider.context.server_url
)
assert discovery_urls == [
"https://api.example.com/.well-known/oauth-authorization-server/v1/mcp",
"https://api.example.com/.well-known/openid-configuration/v1/mcp",
"https://api.example.com/v1/mcp/.well-known/openid-configuration",
]
@pytest.mark.anyio
async def test_oauth_discovery_fallback_conditions(self, oauth_provider: OAuthClientProvider):
"""Test the conditions during which an AS metadata discovery fallback will be attempted."""
# Ensure no tokens are stored
oauth_provider.context.current_tokens = None
oauth_provider.context.token_expiry_time = None
oauth_provider._initialized = True
# Mock client info to skip DCR
oauth_provider.context.client_info = OAuthClientInformationFull(
client_id="existing_client",
redirect_uris=[AnyUrl("http://localhost:3030/callback")],
)
# Create a test request
test_request = httpx.Request("GET", "https://api.example.com/v1/mcp")
# Mock the auth flow
auth_flow = oauth_provider.async_auth_flow(test_request)
# First request should be the original request without auth header
request = await auth_flow.__anext__()
assert "Authorization" not in request.headers
# Send a 401 response to trigger the OAuth flow
response = httpx.Response(
401,
headers={
"WWW-Authenticate": 'Bearer resource_metadata="https://api.example.com/.well-known/oauth-protected-resource"'
},
request=test_request,
)
# Next request should be to discover protected resource metadata
discovery_request = await auth_flow.asend(response)
assert str(discovery_request.url) == "https://api.example.com/.well-known/oauth-protected-resource"
assert discovery_request.method == "GET"
# Send a successful discovery response with minimal protected resource metadata
# Note: auth server URL has a path (/v1/mcp), so only path-based URLs will be tried
discovery_response = httpx.Response(
200,
content=b'{"resource": "https://api.example.com/v1/mcp", "authorization_servers": ["https://auth.example.com/v1/mcp"]}',
request=discovery_request,
)
# Next request should be to discover OAuth metadata at path-aware OAuth URL
oauth_metadata_request_1 = await auth_flow.asend(discovery_response)
assert (
str(oauth_metadata_request_1.url)
== "https://auth.example.com/.well-known/oauth-authorization-server/v1/mcp"
)
assert oauth_metadata_request_1.method == "GET"
# Send a 404 response
oauth_metadata_response_1 = httpx.Response(
404,
content=b"Not Found",
request=oauth_metadata_request_1,
)
# Next request should be path-aware OIDC URL (not root URL since auth server has path)
oauth_metadata_request_2 = await auth_flow.asend(oauth_metadata_response_1)
assert str(oauth_metadata_request_2.url) == "https://auth.example.com/.well-known/openid-configuration/v1/mcp"
assert oauth_metadata_request_2.method == "GET"
# Send a 400 response
oauth_metadata_response_2 = httpx.Response(
400,
content=b"Bad Request",
request=oauth_metadata_request_2,
)
# Next request should be OIDC path-appended URL
oauth_metadata_request_3 = await auth_flow.asend(oauth_metadata_response_2)
assert str(oauth_metadata_request_3.url) == "https://auth.example.com/v1/mcp/.well-known/openid-configuration"
assert oauth_metadata_request_3.method == "GET"
# Send a 500 response
oauth_metadata_response_3 = httpx.Response(
500,
content=b"Internal Server Error",
request=oauth_metadata_request_3,
)
# Mock the authorization process to minimize unnecessary state in this test
oauth_provider._perform_authorization_code_grant = mock.AsyncMock(
return_value=("test_auth_code", "test_code_verifier")
)
# All path-based URLs failed, flow continues with default endpoints
# Next request should be token exchange using MCP server base URL (fallback when OAuth metadata not found)
token_request = await auth_flow.asend(oauth_metadata_response_3)
assert str(token_request.url) == "https://api.example.com/token"
assert token_request.method == "POST"
# Send a successful token response
token_response = httpx.Response(
200,
content=(
b'{"access_token": "new_access_token", "token_type": "Bearer", "expires_in": 3600, '
b'"refresh_token": "new_refresh_token"}'
),
request=token_request,
)
# After OAuth flow completes, the original request is retried with auth header
final_request = await auth_flow.asend(token_response)
assert final_request.headers["Authorization"] == "Bearer new_access_token"
assert final_request.method == "GET"
assert str(final_request.url) == "https://api.example.com/v1/mcp"
# Send final success response to properly close the generator
final_response = httpx.Response(200, request=final_request)
try:
await auth_flow.asend(final_response)
except StopAsyncIteration:
pass # Expected - generator should complete
@pytest.mark.anyio
async def test_handle_metadata_response_success(self, oauth_provider: OAuthClientProvider):
"""Test successful metadata response handling."""
# Create minimal valid OAuth metadata
content = b"""{
"issuer": "https://auth.example.com",
"authorization_endpoint": "https://auth.example.com/authorize",
"token_endpoint": "https://auth.example.com/token"
}"""
response = httpx.Response(200, content=content)
# Should set metadata
await oauth_provider._handle_oauth_metadata_response(response)
assert oauth_provider.context.oauth_metadata is not None
assert str(oauth_provider.context.oauth_metadata.issuer) == "https://auth.example.com/"
@pytest.mark.anyio
async def test_prioritize_www_auth_scope_over_prm(
self,
oauth_provider: OAuthClientProvider,
prm_metadata_response: httpx.Response,
init_response_with_www_auth_scope: httpx.Response,
):
"""Test that WWW-Authenticate scope is prioritized over PRM scopes."""
# First, process PRM metadata to set protected_resource_metadata with scopes
await oauth_provider._handle_protected_resource_response(prm_metadata_response)
# Process the scope selection with WWW-Authenticate header
scopes = get_client_metadata_scopes(
extract_scope_from_www_auth(init_response_with_www_auth_scope),
oauth_provider.context.protected_resource_metadata,
)
# Verify that WWW-Authenticate scope is used (not PRM scopes)
assert scopes == "special:scope from:www-authenticate"
@pytest.mark.anyio
async def test_prioritize_prm_scopes_when_no_www_auth_scope(
self,
oauth_provider: OAuthClientProvider,
prm_metadata_response: httpx.Response,
init_response_without_www_auth_scope: httpx.Response,
):
"""Test that PRM scopes are prioritized when WWW-Authenticate header has no scopes."""
# Process the PRM metadata to set protected_resource_metadata with scopes
await oauth_provider._handle_protected_resource_response(prm_metadata_response)
# Process the scope selection without WWW-Authenticate scope
scopes = get_client_metadata_scopes(
extract_scope_from_www_auth(init_response_without_www_auth_scope),
oauth_provider.context.protected_resource_metadata,
)
# Verify that PRM scopes are used
assert scopes == "resource:read resource:write"
@pytest.mark.anyio
async def test_omit_scope_when_no_prm_scopes_or_www_auth(
self,
oauth_provider: OAuthClientProvider,
prm_metadata_without_scopes_response: httpx.Response,
init_response_without_www_auth_scope: httpx.Response,
):
"""Test that scope is omitted when PRM has no scopes and WWW-Authenticate doesn't specify scope."""
# Process the PRM metadata without scopes
await oauth_provider._handle_protected_resource_response(prm_metadata_without_scopes_response)
# Process the scope selection without WWW-Authenticate scope
scopes = get_client_metadata_scopes(
extract_scope_from_www_auth(init_response_without_www_auth_scope),
oauth_provider.context.protected_resource_metadata,
)
# Verify that scope is omitted
assert scopes is None
@pytest.mark.anyio
async def test_token_exchange_request_authorization_code(self, oauth_provider: OAuthClientProvider):
"""Test token exchange request building."""
# Set up required context
oauth_provider.context.client_info = OAuthClientInformationFull(
client_id="test_client",
client_secret="test_secret",
redirect_uris=[AnyUrl("http://localhost:3030/callback")],
token_endpoint_auth_method="client_secret_post",
)
request = await oauth_provider._exchange_token_authorization_code("test_auth_code", "test_verifier")
assert request.method == "POST"
assert str(request.url) == "https://api.example.com/token"
assert request.headers["Content-Type"] == "application/x-www-form-urlencoded"
# Check form data
content = request.content.decode()
assert "grant_type=authorization_code" in content
assert "code=test_auth_code" in content
assert "code_verifier=test_verifier" in content
assert "client_id=test_client" in content
assert "client_secret=test_secret" in content
@pytest.mark.anyio
async def test_refresh_token_request(self, oauth_provider: OAuthClientProvider, valid_tokens: OAuthToken):
"""Test refresh token request building."""
# Set up required context
oauth_provider.context.current_tokens = valid_tokens
oauth_provider.context.client_info = OAuthClientInformationFull(
client_id="test_client",
client_secret="test_secret",
redirect_uris=[AnyUrl("http://localhost:3030/callback")],
token_endpoint_auth_method="client_secret_post",
)
request = await oauth_provider._refresh_token()
assert request.method == "POST"
assert str(request.url) == "https://api.example.com/token"
assert request.headers["Content-Type"] == "application/x-www-form-urlencoded"
# Check form data
content = request.content.decode()
assert "grant_type=refresh_token" in content
assert "refresh_token=test_refresh_token" in content
assert "client_id=test_client" in content
assert "client_secret=test_secret" in content
@pytest.mark.anyio
async def test_basic_auth_token_exchange(self, oauth_provider: OAuthClientProvider):
"""Test token exchange with client_secret_basic authentication."""
# Set up OAuth metadata to support basic auth
oauth_provider.context.oauth_metadata = OAuthMetadata(
issuer=AnyHttpUrl("https://auth.example.com"),
authorization_endpoint=AnyHttpUrl("https://auth.example.com/authorize"),
token_endpoint=AnyHttpUrl("https://auth.example.com/token"),
token_endpoint_auth_methods_supported=["client_secret_basic", "client_secret_post"],
)
client_id_raw = "test@client" # Include special character to test URL encoding
client_secret_raw = "test:secret" # Include colon to test URL encoding
oauth_provider.context.client_info = OAuthClientInformationFull(
client_id=client_id_raw,
client_secret=client_secret_raw,
redirect_uris=[AnyUrl("http://localhost:3030/callback")],
token_endpoint_auth_method="client_secret_basic",
)
request = await oauth_provider._exchange_token_authorization_code("test_auth_code", "test_verifier")
# Should use basic auth (registered method)
assert "Authorization" in request.headers
assert request.headers["Authorization"].startswith("Basic ")
# Decode and verify credentials are properly URL-encoded
encoded_creds = request.headers["Authorization"][6:] # Remove "Basic " prefix
decoded = base64.b64decode(encoded_creds).decode()
client_id, client_secret = decoded.split(":", 1)
# Check URL encoding was applied
assert client_id == "test%40client" # @ should be encoded as %40
assert client_secret == "test%3Asecret" # : should be encoded as %3A
# Verify decoded values match original
assert unquote(client_id) == client_id_raw
assert unquote(client_secret) == client_secret_raw
# client_secret should NOT be in body for basic auth
content = request.content.decode()
assert "client_secret=" not in content
assert "client_id=test%40client" in content # client_id still in body
@pytest.mark.anyio
async def test_basic_auth_refresh_token(self, oauth_provider: OAuthClientProvider, valid_tokens: OAuthToken):
"""Test token refresh with client_secret_basic authentication."""
oauth_provider.context.current_tokens = valid_tokens
# Set up OAuth metadata to only support basic auth
oauth_provider.context.oauth_metadata = OAuthMetadata(
issuer=AnyHttpUrl("https://auth.example.com"),
authorization_endpoint=AnyHttpUrl("https://auth.example.com/authorize"),
token_endpoint=AnyHttpUrl("https://auth.example.com/token"),
token_endpoint_auth_methods_supported=["client_secret_basic"],
)
client_id = "test_client"
client_secret = "test_secret"
oauth_provider.context.client_info = OAuthClientInformationFull(
client_id=client_id,
client_secret=client_secret,
redirect_uris=[AnyUrl("http://localhost:3030/callback")],
token_endpoint_auth_method="client_secret_basic",
)
request = await oauth_provider._refresh_token()
assert "Authorization" in request.headers
assert request.headers["Authorization"].startswith("Basic ")
encoded_creds = request.headers["Authorization"][6:]
decoded = base64.b64decode(encoded_creds).decode()
assert decoded == f"{client_id}:{client_secret}"
# client_secret should NOT be in body
content = request.content.decode()
assert "client_secret=" not in content
@pytest.mark.anyio
async def test_none_auth_method(self, oauth_provider: OAuthClientProvider):
"""Test 'none' authentication method (public client)."""
oauth_provider.context.oauth_metadata = OAuthMetadata(
issuer=AnyHttpUrl("https://auth.example.com"),
authorization_endpoint=AnyHttpUrl("https://auth.example.com/authorize"),
token_endpoint=AnyHttpUrl("https://auth.example.com/token"),
token_endpoint_auth_methods_supported=["none"],
)
client_id = "public_client"
oauth_provider.context.client_info = OAuthClientInformationFull(
client_id=client_id,
client_secret=None, # No secret for public client
redirect_uris=[AnyUrl("http://localhost:3030/callback")],
token_endpoint_auth_method="none",
)
request = await oauth_provider._exchange_token_authorization_code("test_auth_code", "test_verifier")
# Should NOT have Authorization header
assert "Authorization" not in request.headers
# Should NOT have client_secret in body
content = request.content.decode()
assert "client_secret=" not in content
assert "client_id=public_client" in content
class TestProtectedResourceMetadata:
"""Test protected resource handling."""
@pytest.mark.anyio
async def test_resource_param_included_with_recent_protocol_version(self, oauth_provider: OAuthClientProvider):
"""Test resource parameter is included for protocol version >= 2025-06-18."""
# Set protocol version to 2025-06-18
oauth_provider.context.protocol_version = "2025-06-18"
oauth_provider.context.client_info = OAuthClientInformationFull(
client_id="test_client",
client_secret="test_secret",
redirect_uris=[AnyUrl("http://localhost:3030/callback")],
)
# Test in token exchange
request = await oauth_provider._exchange_token_authorization_code("test_code", "test_verifier")
content = request.content.decode()
assert "resource=" in content
# Check URL-encoded resource parameter
expected_resource = quote(oauth_provider.context.get_resource_url(), safe="")
assert f"resource={expected_resource}" in content
# Test in refresh token
oauth_provider.context.current_tokens = OAuthToken(
access_token="test_access",
token_type="Bearer",
refresh_token="test_refresh",
)
refresh_request = await oauth_provider._refresh_token()
refresh_content = refresh_request.content.decode()
assert "resource=" in refresh_content
@pytest.mark.anyio
async def test_resource_param_excluded_with_old_protocol_version(self, oauth_provider: OAuthClientProvider):
"""Test resource parameter is excluded for protocol version < 2025-06-18."""
# Set protocol version to older version
oauth_provider.context.protocol_version = "2025-03-26"
oauth_provider.context.client_info = OAuthClientInformationFull(
client_id="test_client",
client_secret="test_secret",
redirect_uris=[AnyUrl("http://localhost:3030/callback")],
)
# Test in token exchange
request = await oauth_provider._exchange_token_authorization_code("test_code", "test_verifier")
content = request.content.decode()
assert "resource=" not in content
# Test in refresh token
oauth_provider.context.current_tokens = OAuthToken(
access_token="test_access",
token_type="Bearer",
refresh_token="test_refresh",
)
refresh_request = await oauth_provider._refresh_token()
refresh_content = refresh_request.content.decode()
assert "resource=" not in refresh_content
@pytest.mark.anyio
async def test_resource_param_included_with_protected_resource_metadata(self, oauth_provider: OAuthClientProvider):
"""Test resource parameter is always included when protected resource metadata exists."""
# Set old protocol version but with protected resource metadata
oauth_provider.context.protocol_version = "2025-03-26"
oauth_provider.context.protected_resource_metadata = ProtectedResourceMetadata(
resource=AnyHttpUrl("https://api.example.com/v1/mcp"),
authorization_servers=[AnyHttpUrl("https://api.example.com")],
)
oauth_provider.context.client_info = OAuthClientInformationFull(
client_id="test_client",
client_secret="test_secret",
redirect_uris=[AnyUrl("http://localhost:3030/callback")],
)
# Test in token exchange
request = await oauth_provider._exchange_token_authorization_code("test_code", "test_verifier")
content = request.content.decode()
assert "resource=" in content
@pytest.mark.anyio
async def test_validate_resource_rejects_mismatched_resource(
client_metadata: OAuthClientMetadata, mock_storage: MockTokenStorage
) -> None:
"""Client must reject PRM resource that doesn't match server URL."""
provider = OAuthClientProvider(
server_url="https://api.example.com/v1/mcp",
client_metadata=client_metadata,
storage=mock_storage,
)
provider._initialized = True
prm = ProtectedResourceMetadata(
resource=AnyHttpUrl("https://evil.example.com/mcp"),
authorization_servers=[AnyHttpUrl("https://auth.example.com")],
)
with pytest.raises(OAuthFlowError, match="does not match expected"):
await provider._validate_resource_match(prm)
@pytest.mark.anyio
async def test_validate_resource_accepts_matching_resource(
client_metadata: OAuthClientMetadata, mock_storage: MockTokenStorage
) -> None:
"""Client must accept PRM resource that matches server URL."""
provider = OAuthClientProvider(
server_url="https://api.example.com/v1/mcp",
client_metadata=client_metadata,
storage=mock_storage,
)
provider._initialized = True
prm = ProtectedResourceMetadata(
resource=AnyHttpUrl("https://api.example.com/v1/mcp"),
authorization_servers=[AnyHttpUrl("https://auth.example.com")],
)
# Should not raise
await provider._validate_resource_match(prm)
@pytest.mark.anyio
async def test_validate_resource_custom_callback(
client_metadata: OAuthClientMetadata, mock_storage: MockTokenStorage
) -> None:
"""Custom callback overrides default validation."""
callback_called_with: list[tuple[str, str | None]] = []
async def custom_validate(server_url: str, prm_resource: str | None) -> None:
callback_called_with.append((server_url, prm_resource))
provider = OAuthClientProvider(
server_url="https://api.example.com/v1/mcp",
client_metadata=client_metadata,
storage=mock_storage,
validate_resource_url=custom_validate,
)
provider._initialized = True
# This would normally fail default validation (different origin),
# but custom callback accepts it
prm = ProtectedResourceMetadata(
resource=AnyHttpUrl("https://evil.example.com/mcp"),
authorization_servers=[AnyHttpUrl("https://auth.example.com")],
)
await provider._validate_resource_match(prm)
assert callback_called_with == snapshot([("https://api.example.com/v1/mcp", "https://evil.example.com/mcp")])
@pytest.mark.anyio
async def test_validate_resource_accepts_root_url_with_trailing_slash(
client_metadata: OAuthClientMetadata, mock_storage: MockTokenStorage
) -> None:
"""Root URLs with trailing slash normalization should match."""
provider = OAuthClientProvider(
server_url="https://api.example.com",
client_metadata=client_metadata,
storage=mock_storage,
)
provider._initialized = True
prm = ProtectedResourceMetadata(
resource=AnyHttpUrl("https://api.example.com/"),
authorization_servers=[AnyHttpUrl("https://auth.example.com")],
)
# Should not raise despite trailing slash difference
await provider._validate_resource_match(prm)
@pytest.mark.anyio
async def test_validate_resource_accepts_server_url_with_trailing_slash(
client_metadata: OAuthClientMetadata, mock_storage: MockTokenStorage
) -> None:
"""Server URL with trailing slash should match PRM resource."""
provider = OAuthClientProvider(
server_url="https://api.example.com/v1/mcp/",
client_metadata=client_metadata,
storage=mock_storage,
)
provider._initialized = True
prm = ProtectedResourceMetadata(
resource=AnyHttpUrl("https://api.example.com/v1/mcp"),
authorization_servers=[AnyHttpUrl("https://auth.example.com")],
)
# Should not raise - both normalize to the same URL with trailing slash
await provider._validate_resource_match(prm)
@pytest.mark.anyio
async def test_get_resource_url_uses_canonical_when_prm_mismatches(
client_metadata: OAuthClientMetadata, mock_storage: MockTokenStorage
) -> None:
"""get_resource_url falls back to canonical URL when PRM resource doesn't match."""
provider = OAuthClientProvider(
server_url="https://api.example.com/v1/mcp",
client_metadata=client_metadata,
storage=mock_storage,
)
provider._initialized = True
# Set PRM with a resource that is NOT a parent of the server URL
provider.context.protected_resource_metadata = ProtectedResourceMetadata(
resource=AnyHttpUrl("https://other.example.com/mcp"),
authorization_servers=[AnyHttpUrl("https://auth.example.com")],
)
# get_resource_url should return the canonical server URL, not the PRM resource
assert provider.context.get_resource_url() == snapshot("https://api.example.com/v1/mcp")
class TestRegistrationResponse:
"""Test client registration response handling."""
@pytest.mark.anyio
async def test_handle_registration_response_reads_before_accessing_text(self):
"""Test that response.aread() is called before accessing response.text."""
# Track if aread() was called
class MockResponse(httpx.Response):
def __init__(self):
self.status_code = 400
self._aread_called = False
self._text = "Registration failed with error"
async def aread(self):
self._aread_called = True
return b"test content"
@property
def text(self):
if not self._aread_called:
raise RuntimeError("Response.text accessed before response.aread()") # pragma: no cover
return self._text
mock_response = MockResponse()
# This should call aread() before accessing text
with pytest.raises(Exception) as exc_info:
await handle_registration_response(mock_response)
# Verify aread() was called
assert mock_response._aread_called
# Verify the error message includes the response text
assert "Registration failed: 400" in str(exc_info.value)
class TestCreateClientRegistrationRequest:
"""Test client registration request creation."""
def test_uses_registration_endpoint_from_metadata(self):
"""Test that registration URL comes from metadata when available."""
oauth_metadata = OAuthMetadata(
issuer=AnyHttpUrl("https://auth.example.com"),
authorization_endpoint=AnyHttpUrl("https://auth.example.com/authorize"),
token_endpoint=AnyHttpUrl("https://auth.example.com/token"),
registration_endpoint=AnyHttpUrl("https://auth.example.com/register"),
)
client_metadata = OAuthClientMetadata(redirect_uris=[AnyHttpUrl("http://localhost:3000/callback")])
request = create_client_registration_request(oauth_metadata, client_metadata, "https://auth.example.com")
assert str(request.url) == "https://auth.example.com/register"
assert request.method == "POST"
def test_falls_back_to_default_register_endpoint_when_no_metadata(self):
"""Test that registration uses fallback URL when auth_server_metadata is None."""
client_metadata = OAuthClientMetadata(redirect_uris=[AnyHttpUrl("http://localhost:3000/callback")])
request = create_client_registration_request(None, client_metadata, "https://auth.example.com")
assert str(request.url) == "https://auth.example.com/register"
assert request.method == "POST"
def test_falls_back_when_metadata_has_no_registration_endpoint(self):
"""Test fallback when metadata exists but lacks registration_endpoint."""
oauth_metadata = OAuthMetadata(
issuer=AnyHttpUrl("https://auth.example.com"),
authorization_endpoint=AnyHttpUrl("https://auth.example.com/authorize"),
token_endpoint=AnyHttpUrl("https://auth.example.com/token"),
# No registration_endpoint
)
client_metadata = OAuthClientMetadata(redirect_uris=[AnyHttpUrl("http://localhost:3000/callback")])
request = create_client_registration_request(oauth_metadata, client_metadata, "https://auth.example.com")
assert str(request.url) == "https://auth.example.com/register"
assert request.method == "POST"
class TestAuthFlow:
"""Test the auth flow in httpx."""
@pytest.mark.anyio
async def test_auth_flow_with_valid_tokens(
self, oauth_provider: OAuthClientProvider, mock_storage: MockTokenStorage, valid_tokens: OAuthToken
):
"""Test auth flow when tokens are already valid."""
# Pre-store valid tokens
await mock_storage.set_tokens(valid_tokens)
oauth_provider.context.current_tokens = valid_tokens
oauth_provider.context.token_expiry_time = time.time() + 1800
oauth_provider._initialized = True
# Create a test request
test_request = httpx.Request("GET", "https://api.example.com/test")
# Mock the auth flow
auth_flow = oauth_provider.async_auth_flow(test_request)
# Should get the request with auth header added
request = await auth_flow.__anext__()
assert request.headers["Authorization"] == "Bearer test_access_token"
# Send a successful response
response = httpx.Response(200)
try:
await auth_flow.asend(response)
except StopAsyncIteration:
pass # Expected
@pytest.mark.anyio
async def test_auth_flow_with_no_tokens(self, oauth_provider: OAuthClientProvider, mock_storage: MockTokenStorage):
"""Test auth flow when no tokens are available, triggering the full OAuth flow."""
# Ensure no tokens are stored
oauth_provider.context.current_tokens = None
oauth_provider.context.token_expiry_time = None
oauth_provider._initialized = True
# Create a test request
test_request = httpx.Request("GET", "https://api.example.com/mcp")
# Mock the auth flow
auth_flow = oauth_provider.async_auth_flow(test_request)
# First request should be the original request without auth header
request = await auth_flow.__anext__()
assert "Authorization" not in request.headers
# Send a 401 response to trigger the OAuth flow
response = httpx.Response(
401,
headers={
"WWW-Authenticate": 'Bearer resource_metadata="https://api.example.com/.well-known/oauth-protected-resource"'
},
request=test_request,
)
# Next request should be to discover protected resource metadata
discovery_request = await auth_flow.asend(response)
assert discovery_request.method == "GET"
assert str(discovery_request.url) == "https://api.example.com/.well-known/oauth-protected-resource"
# Send a successful discovery response with minimal protected resource metadata
discovery_response = httpx.Response(
200,
content=b'{"resource": "https://api.example.com/v1/mcp", "authorization_servers": ["https://auth.example.com"]}',
request=discovery_request,
)
# Next request should be to discover OAuth metadata
oauth_metadata_request = await auth_flow.asend(discovery_response)
assert oauth_metadata_request.method == "GET"
assert str(oauth_metadata_request.url).startswith("https://auth.example.com/")
assert "mcp-protocol-version" in oauth_metadata_request.headers
# Send a successful OAuth metadata response
oauth_metadata_response = httpx.Response(
200,
content=(
b'{"issuer": "https://auth.example.com", '
b'"authorization_endpoint": "https://auth.example.com/authorize", '
b'"token_endpoint": "https://auth.example.com/token", '
b'"registration_endpoint": "https://auth.example.com/register"}'
),
request=oauth_metadata_request,
)
# Next request should be to register client
registration_request = await auth_flow.asend(oauth_metadata_response)
assert registration_request.method == "POST"
assert str(registration_request.url) == "https://auth.example.com/register"
# Send a successful registration response
registration_response = httpx.Response(
201,
content=b'{"client_id": "test_client_id", "client_secret": "test_client_secret", "redirect_uris": ["http://localhost:3030/callback"]}',
request=registration_request,
)
# Mock the authorization process
oauth_provider._perform_authorization_code_grant = mock.AsyncMock(
return_value=("test_auth_code", "test_code_verifier")
)
# Next request should be to exchange token
token_request = await auth_flow.asend(registration_response)
assert token_request.method == "POST"
assert str(token_request.url) == "https://auth.example.com/token"
assert "code=test_auth_code" in token_request.content.decode()
# Send a successful token response
token_response = httpx.Response(
200,
content=(
b'{"access_token": "new_access_token", "token_type": "Bearer", "expires_in": 3600, '
b'"refresh_token": "new_refresh_token"}'
),
request=token_request,
)
# Final request should be the original request with auth header
final_request = await auth_flow.asend(token_response)
assert final_request.headers["Authorization"] == "Bearer new_access_token"
assert final_request.method == "GET"
assert str(final_request.url) == "https://api.example.com/mcp"
# Send final success response to properly close the generator
final_response = httpx.Response(200, request=final_request)
try:
await auth_flow.asend(final_response)
except StopAsyncIteration:
pass # Expected - generator should complete
# Verify tokens were stored
assert oauth_provider.context.current_tokens is not None
assert oauth_provider.context.current_tokens.access_token == "new_access_token"
assert oauth_provider.context.token_expiry_time is not None
@pytest.mark.anyio
async def test_auth_flow_no_unnecessary_retry_after_oauth(
self, oauth_provider: OAuthClientProvider, mock_storage: MockTokenStorage, valid_tokens: OAuthToken
):
"""Test that requests are not retried unnecessarily - the core bug that caused 2x performance degradation."""
# Pre-store valid tokens so no OAuth flow is needed
await mock_storage.set_tokens(valid_tokens)
oauth_provider.context.current_tokens = valid_tokens
oauth_provider.context.token_expiry_time = time.time() + 1800
oauth_provider._initialized = True
test_request = httpx.Request("GET", "https://api.example.com/mcp")
auth_flow = oauth_provider.async_auth_flow(test_request)
# Count how many times the request is yielded
request_yields = 0
# First request - should have auth header already
request = await auth_flow.__anext__()
request_yields += 1
assert request.headers["Authorization"] == "Bearer test_access_token"
# Send a successful 200 response
response = httpx.Response(200, request=request)
# In the buggy version, this would yield the request AGAIN unconditionally
# In the fixed version, this should end the generator
try:
await auth_flow.asend(response) # extra request
request_yields += 1 # pragma: no cover
# If we reach here, the bug is present
pytest.fail(
f"Unnecessary retry detected! Request was yielded {request_yields} times. "
f"This indicates the retry logic bug that caused 2x performance degradation. "
f"The request should only be yielded once for successful responses."
) # pragma: no cover
except StopAsyncIteration:
# This is the expected behavior - no unnecessary retry
pass
# Verify exactly one request was yielded (no double-sending)
assert request_yields == 1, f"Expected 1 request yield, got {request_yields}"
@pytest.mark.anyio
async def test_token_exchange_accepts_201_status(
self, oauth_provider: OAuthClientProvider, mock_storage: MockTokenStorage
):
"""Test that token exchange accepts both 200 and 201 status codes."""
# Ensure no tokens are stored
oauth_provider.context.current_tokens = None
oauth_provider.context.token_expiry_time = None
oauth_provider._initialized = True
# Create a test request
test_request = httpx.Request("GET", "https://api.example.com/mcp")
# Mock the auth flow
auth_flow = oauth_provider.async_auth_flow(test_request)
# First request should be the original request without auth header
request = await auth_flow.__anext__()
assert "Authorization" not in request.headers
# Send a 401 response to trigger the OAuth flow
response = httpx.Response(
401,
headers={
"WWW-Authenticate": 'Bearer resource_metadata="https://api.example.com/.well-known/oauth-protected-resource"'
},
request=test_request,
)
# Next request should be to discover protected resource metadata
discovery_request = await auth_flow.asend(response)
assert discovery_request.method == "GET"
assert str(discovery_request.url) == "https://api.example.com/.well-known/oauth-protected-resource"
# Send a successful discovery response with minimal protected resource metadata
discovery_response = httpx.Response(
200,
content=b'{"resource": "https://api.example.com/v1/mcp", "authorization_servers": ["https://auth.example.com"]}',
request=discovery_request,
)
# Next request should be to discover OAuth metadata
oauth_metadata_request = await auth_flow.asend(discovery_response)
assert oauth_metadata_request.method == "GET"
assert str(oauth_metadata_request.url).startswith("https://auth.example.com/")
assert "mcp-protocol-version" in oauth_metadata_request.headers
# Send a successful OAuth metadata response
oauth_metadata_response = httpx.Response(
200,
content=(
b'{"issuer": "https://auth.example.com", '
b'"authorization_endpoint": "https://auth.example.com/authorize", '
b'"token_endpoint": "https://auth.example.com/token", '
b'"registration_endpoint": "https://auth.example.com/register"}'
),
request=oauth_metadata_request,
)
# Next request should be to register client
registration_request = await auth_flow.asend(oauth_metadata_response)
assert registration_request.method == "POST"
assert str(registration_request.url) == "https://auth.example.com/register"
# Send a successful registration response with 201 status
registration_response = httpx.Response(
201,
content=b'{"client_id": "test_client_id", "client_secret": "test_client_secret", "redirect_uris": ["http://localhost:3030/callback"]}',
request=registration_request,
)
# Mock the authorization process
oauth_provider._perform_authorization_code_grant = mock.AsyncMock(
return_value=("test_auth_code", "test_code_verifier")
)
# Next request should be to exchange token
token_request = await auth_flow.asend(registration_response)
assert token_request.method == "POST"
assert str(token_request.url) == "https://auth.example.com/token"
assert "code=test_auth_code" in token_request.content.decode()
# Send a successful token response with 201 status code (test both 200 and 201 are accepted)
token_response = httpx.Response(
201,
content=(
b'{"access_token": "new_access_token", "token_type": "Bearer", "expires_in": 3600, '
b'"refresh_token": "new_refresh_token"}'
),
request=token_request,
)
# Final request should be the original request with auth header
final_request = await auth_flow.asend(token_response)
assert final_request.headers["Authorization"] == "Bearer new_access_token"
assert final_request.method == "GET"
assert str(final_request.url) == "https://api.example.com/mcp"
# Send final success response to properly close the generator
final_response = httpx.Response(200, request=final_request)
try:
await auth_flow.asend(final_response)
except StopAsyncIteration:
pass # Expected - generator should complete
# Verify tokens were stored
assert oauth_provider.context.current_tokens is not None
assert oauth_provider.context.current_tokens.access_token == "new_access_token"
assert oauth_provider.context.token_expiry_time is not None
@pytest.mark.anyio
async def test_403_insufficient_scope_updates_scope_from_header(
self,
oauth_provider: OAuthClientProvider,
mock_storage: MockTokenStorage,
valid_tokens: OAuthToken,
):
"""Test that 403 response correctly updates scope from WWW-Authenticate header."""
# Pre-store valid tokens and client info
client_info = OAuthClientInformationFull(
client_id="test_client_id",
client_secret="test_client_secret",
redirect_uris=[AnyUrl("http://localhost:3030/callback")],
)
await mock_storage.set_tokens(valid_tokens)
await mock_storage.set_client_info(client_info)
oauth_provider.context.current_tokens = valid_tokens
oauth_provider.context.token_expiry_time = time.time() + 1800
oauth_provider.context.client_info = client_info
oauth_provider._initialized = True
# Original scope
assert oauth_provider.context.client_metadata.scope == "read write"
redirect_captured = False
captured_state = None
async def capture_redirect(url: str) -> None:
nonlocal redirect_captured, captured_state
redirect_captured = True
# Verify the new scope is included in authorization URL
assert "scope=admin%3Awrite+admin%3Adelete" in url or "scope=admin:write+admin:delete" in url.replace(
"%3A", ":"
).replace("+", " ")
# Extract state from redirect URL
parsed = urlparse(url)
params = parse_qs(parsed.query)
captured_state = params.get("state", [None])[0]
oauth_provider.context.redirect_handler = capture_redirect
# Mock callback
async def mock_callback() -> tuple[str, str | None]:
return "auth_code", captured_state
oauth_provider.context.callback_handler = mock_callback
test_request = httpx.Request("GET", "https://api.example.com/mcp")
auth_flow = oauth_provider.async_auth_flow(test_request)
# First request
request = await auth_flow.__anext__()
# Send 403 with new scope requirement
response_403 = httpx.Response(
403,
headers={"WWW-Authenticate": 'Bearer error="insufficient_scope", scope="admin:write admin:delete"'},
request=request,
)
# Trigger step-up - should get token exchange request
token_exchange_request = await auth_flow.asend(response_403)
# Verify scope was updated
assert oauth_provider.context.client_metadata.scope == "admin:write admin:delete"
assert redirect_captured
# Complete the flow with successful token response
token_response = httpx.Response(
200,
json={
"access_token": "new_token_with_new_scope",
"token_type": "Bearer",
"expires_in": 3600,
"scope": "admin:write admin:delete",
},
request=token_exchange_request,
)
# Should get final retry request
final_request = await auth_flow.asend(token_response)
# Send success response - flow should complete
success_response = httpx.Response(200, request=final_request)
try:
await auth_flow.asend(success_response)
pytest.fail("Should have stopped after successful response") # pragma: no cover
except StopAsyncIteration:
pass # Expected
@pytest.mark.parametrize(
(
"issuer_url",
"service_documentation_url",
"authorization_endpoint",
"token_endpoint",
"registration_endpoint",
"revocation_endpoint",
),
(
# Pydantic's AnyUrl incorrectly adds trailing slash to base URLs
# This is being fixed in https://github.com/pydantic/pydantic-core/pull/1719 (Pydantic 2.12+)
pytest.param(
"https://auth.example.com",
"https://auth.example.com/docs",
"https://auth.example.com/authorize",
"https://auth.example.com/token",
"https://auth.example.com/register",
"https://auth.example.com/revoke",
id="simple-url",
marks=pytest.mark.xfail(
reason="Pydantic AnyUrl adds trailing slash to base URLs - fixed in Pydantic 2.12+"
),
),
pytest.param(
"https://auth.example.com/",
"https://auth.example.com/docs",
"https://auth.example.com/authorize",
"https://auth.example.com/token",
"https://auth.example.com/register",
"https://auth.example.com/revoke",
id="with-trailing-slash",
),
pytest.param(
"https://auth.example.com/v1/mcp",
"https://auth.example.com/v1/mcp/docs",
"https://auth.example.com/v1/mcp/authorize",
"https://auth.example.com/v1/mcp/token",
"https://auth.example.com/v1/mcp/register",
"https://auth.example.com/v1/mcp/revoke",
id="with-path-param",
),
),
)
def test_build_metadata(
issuer_url: str,
service_documentation_url: str,
authorization_endpoint: str,
token_endpoint: str,
registration_endpoint: str,
revocation_endpoint: str,
):
metadata = build_metadata(
issuer_url=AnyHttpUrl(issuer_url),
service_documentation_url=AnyHttpUrl(service_documentation_url),
client_registration_options=ClientRegistrationOptions(enabled=True, valid_scopes=["read", "write", "admin"]),
revocation_options=RevocationOptions(enabled=True),
)
assert metadata.model_dump(exclude_defaults=True, mode="json") == snapshot(
{
"issuer": Is(issuer_url),
"authorization_endpoint": Is(authorization_endpoint),
"token_endpoint": Is(token_endpoint),
"registration_endpoint": Is(registration_endpoint),
"scopes_supported": ["read", "write", "admin"],
"grant_types_supported": ["authorization_code", "refresh_token"],
"token_endpoint_auth_methods_supported": ["client_secret_post", "client_secret_basic"],
"service_documentation": Is(service_documentation_url),
"revocation_endpoint": Is(revocation_endpoint),
"revocation_endpoint_auth_methods_supported": ["client_secret_post", "client_secret_basic"],
"code_challenge_methods_supported": ["S256"],
}
)
class TestLegacyServerFallback:
"""Test backward compatibility with legacy servers that don't support PRM (issue #1495)."""
@pytest.mark.anyio
async def test_legacy_server_no_prm_falls_back_to_root_oauth_discovery(
self, client_metadata: OAuthClientMetadata, mock_storage: MockTokenStorage
):
"""Test that when PRM discovery fails completely, we fall back to root OAuth discovery (March 2025 spec)."""
async def redirect_handler(url: str) -> None:
pass # pragma: no cover
async def callback_handler() -> tuple[str, str | None]:
return "test_auth_code", "test_state" # pragma: no cover
# Simulate a legacy server like Linear
provider = OAuthClientProvider(
server_url="https://mcp.linear.app/sse",
client_metadata=client_metadata,
storage=mock_storage,
redirect_handler=redirect_handler,
callback_handler=callback_handler,
)
provider.context.current_tokens = None
provider.context.token_expiry_time = None
provider._initialized = True
# Mock client info to skip DCR
provider.context.client_info = OAuthClientInformationFull(
client_id="existing_client",
redirect_uris=[AnyUrl("http://localhost:3030/callback")],
)
test_request = httpx.Request("GET", "https://mcp.linear.app/sse")
auth_flow = provider.async_auth_flow(test_request)
# First request
request = await auth_flow.__anext__()
assert "Authorization" not in request.headers
# Send 401 without WWW-Authenticate header (typical legacy server)
response = httpx.Response(401, headers={}, request=test_request)
# Should try path-based PRM first
prm_request_1 = await auth_flow.asend(response)
assert str(prm_request_1.url) == "https://mcp.linear.app/.well-known/oauth-protected-resource/sse"
# PRM returns 404
prm_response_1 = httpx.Response(404, request=prm_request_1)
# Should try root-based PRM
prm_request_2 = await auth_flow.asend(prm_response_1)
assert str(prm_request_2.url) == "https://mcp.linear.app/.well-known/oauth-protected-resource"
# PRM returns 404 again - all PRM URLs failed
prm_response_2 = httpx.Response(404, request=prm_request_2)
# Should fall back to root OAuth discovery (March 2025 spec behavior)
oauth_metadata_request = await auth_flow.asend(prm_response_2)
assert str(oauth_metadata_request.url) == "https://mcp.linear.app/.well-known/oauth-authorization-server"
assert oauth_metadata_request.method == "GET"
# Send successful OAuth metadata response
oauth_metadata_response = httpx.Response(
200,
content=(
b'{"issuer": "https://mcp.linear.app", '
b'"authorization_endpoint": "https://mcp.linear.app/authorize", '
b'"token_endpoint": "https://mcp.linear.app/token"}'
),
request=oauth_metadata_request,
)
# Mock authorization
provider._perform_authorization_code_grant = mock.AsyncMock(
return_value=("test_auth_code", "test_code_verifier")
)
# Next should be token exchange
token_request = await auth_flow.asend(oauth_metadata_response)
assert str(token_request.url) == "https://mcp.linear.app/token"
# Send successful token response
token_response = httpx.Response(
200,
content=b'{"access_token": "linear_token", "token_type": "Bearer", "expires_in": 3600}',
request=token_request,
)
# Final request with auth header
final_request = await auth_flow.asend(token_response)
assert final_request.headers["Authorization"] == "Bearer linear_token"
assert str(final_request.url) == "https://mcp.linear.app/sse"
# Complete flow
final_response = httpx.Response(200, request=final_request)
try:
await auth_flow.asend(final_response)
except StopAsyncIteration:
pass
@pytest.mark.anyio
async def test_legacy_server_with_different_prm_and_root_urls(
self, client_metadata: OAuthClientMetadata, mock_storage: MockTokenStorage
):
"""Test PRM fallback with different WWW-Authenticate and root URLs."""
async def redirect_handler(url: str) -> None:
pass # pragma: no cover
async def callback_handler() -> tuple[str, str | None]:
return "test_auth_code", "test_state" # pragma: no cover
provider = OAuthClientProvider(
server_url="https://api.example.com/v1/mcp",
client_metadata=client_metadata,
storage=mock_storage,
redirect_handler=redirect_handler,
callback_handler=callback_handler,
)
provider.context.current_tokens = None
provider.context.token_expiry_time = None
provider._initialized = True
provider.context.client_info = OAuthClientInformationFull(
client_id="existing_client",
redirect_uris=[AnyUrl("http://localhost:3030/callback")],
)
test_request = httpx.Request("GET", "https://api.example.com/v1/mcp")
auth_flow = provider.async_auth_flow(test_request)
await auth_flow.__anext__()
# 401 with custom WWW-Authenticate PRM URL
response = httpx.Response(
401,
headers={
"WWW-Authenticate": 'Bearer resource_metadata="https://custom.prm.com/.well-known/oauth-protected-resource"'
},
request=test_request,
)
# Try custom PRM URL first
prm_request_1 = await auth_flow.asend(response)
assert str(prm_request_1.url) == "https://custom.prm.com/.well-known/oauth-protected-resource"
# Returns 500
prm_response_1 = httpx.Response(500, request=prm_request_1)
# Try path-based fallback
prm_request_2 = await auth_flow.asend(prm_response_1)
assert str(prm_request_2.url) == "https://api.example.com/.well-known/oauth-protected-resource/v1/mcp"
# Returns 404
prm_response_2 = httpx.Response(404, request=prm_request_2)
# Try root fallback
prm_request_3 = await auth_flow.asend(prm_response_2)
assert str(prm_request_3.url) == "https://api.example.com/.well-known/oauth-protected-resource"
# Also returns 404 - all PRM URLs failed
prm_response_3 = httpx.Response(404, request=prm_request_3)
# Should fall back to root OAuth discovery
oauth_metadata_request = await auth_flow.asend(prm_response_3)
assert str(oauth_metadata_request.url) == "https://api.example.com/.well-known/oauth-authorization-server"
# Complete the flow
oauth_metadata_response = httpx.Response(
200,
content=(
b'{"issuer": "https://api.example.com", '
b'"authorization_endpoint": "https://api.example.com/authorize", '
b'"token_endpoint": "https://api.example.com/token"}'
),
request=oauth_metadata_request,
)
provider._perform_authorization_code_grant = mock.AsyncMock(
return_value=("test_auth_code", "test_code_verifier")
)
token_request = await auth_flow.asend(oauth_metadata_response)
assert str(token_request.url) == "https://api.example.com/token"
token_response = httpx.Response(
200,
content=b'{"access_token": "test_token", "token_type": "Bearer", "expires_in": 3600}',
request=token_request,
)
final_request = await auth_flow.asend(token_response)
assert final_request.headers["Authorization"] == "Bearer test_token"
final_response = httpx.Response(200, request=final_request)
try:
await auth_flow.asend(final_response)
except StopAsyncIteration:
pass
class TestSEP985Discovery:
"""Test SEP-985 protected resource metadata discovery with fallback."""
@pytest.mark.anyio
async def test_path_based_fallback_when_no_www_authenticate(
self, client_metadata: OAuthClientMetadata, mock_storage: MockTokenStorage
):
"""Test that client falls back to path-based well-known URI when WWW-Authenticate is absent."""
async def redirect_handler(url: str) -> None:
pass # pragma: no cover
async def callback_handler() -> tuple[str, str | None]:
return "test_auth_code", "test_state" # pragma: no cover
provider = OAuthClientProvider(
server_url="https://api.example.com/v1/mcp",
client_metadata=client_metadata,
storage=mock_storage,
redirect_handler=redirect_handler,
callback_handler=callback_handler,
)
# Test with 401 response without WWW-Authenticate header
init_response = httpx.Response(
status_code=401, headers={}, request=httpx.Request("GET", "https://api.example.com/v1/mcp")
)
# Build discovery URLs
discovery_urls = build_protected_resource_metadata_discovery_urls(
extract_resource_metadata_from_www_auth(init_response), provider.context.server_url
)
# Should have path-based URL first, then root-based URL
assert len(discovery_urls) == 2
assert discovery_urls[0] == "https://api.example.com/.well-known/oauth-protected-resource/v1/mcp"
assert discovery_urls[1] == "https://api.example.com/.well-known/oauth-protected-resource"
@pytest.mark.anyio
async def test_root_based_fallback_after_path_based_404(
self, client_metadata: OAuthClientMetadata, mock_storage: MockTokenStorage
):
"""Test that client falls back to root-based URI when path-based returns 404."""
async def redirect_handler(url: str) -> None:
pass # pragma: no cover
async def callback_handler() -> tuple[str, str | None]:
return "test_auth_code", "test_state" # pragma: no cover
provider = OAuthClientProvider(
server_url="https://api.example.com/v1/mcp",
client_metadata=client_metadata,
storage=mock_storage,
redirect_handler=redirect_handler,
callback_handler=callback_handler,
)
# Ensure no tokens are stored
provider.context.current_tokens = None
provider.context.token_expiry_time = None
provider._initialized = True
# Mock client info to skip DCR
provider.context.client_info = OAuthClientInformationFull(
client_id="existing_client",
redirect_uris=[AnyUrl("http://localhost:3030/callback")],
)
# Create a test request
test_request = httpx.Request("GET", "https://api.example.com/v1/mcp")
# Mock the auth flow
auth_flow = provider.async_auth_flow(test_request)
# First request should be the original request without auth header
request = await auth_flow.__anext__()
assert "Authorization" not in request.headers
# Send a 401 response without WWW-Authenticate header
response = httpx.Response(401, headers={}, request=test_request)
# Next request should be to discover protected resource metadata (path-based)
discovery_request_1 = await auth_flow.asend(response)
assert str(discovery_request_1.url) == "https://api.example.com/.well-known/oauth-protected-resource/v1/mcp"
assert discovery_request_1.method == "GET"
# Send 404 response for path-based discovery
discovery_response_1 = httpx.Response(404, request=discovery_request_1)
# Next request should be to root-based well-known URI
discovery_request_2 = await auth_flow.asend(discovery_response_1)
assert str(discovery_request_2.url) == "https://api.example.com/.well-known/oauth-protected-resource"
assert discovery_request_2.method == "GET"
# Send successful discovery response
discovery_response_2 = httpx.Response(
200,
content=(
b'{"resource": "https://api.example.com/v1/mcp", "authorization_servers": ["https://auth.example.com"]}'
),
request=discovery_request_2,
)
# Mock the rest of the OAuth flow
provider._perform_authorization = mock.AsyncMock(return_value=("test_auth_code", "test_code_verifier"))
# Next should be OAuth metadata discovery
oauth_metadata_request = await auth_flow.asend(discovery_response_2)
assert oauth_metadata_request.method == "GET"
# Complete the flow
oauth_metadata_response = httpx.Response(
200,
content=(
b'{"issuer": "https://auth.example.com", '
b'"authorization_endpoint": "https://auth.example.com/authorize", '
b'"token_endpoint": "https://auth.example.com/token"}'
),
request=oauth_metadata_request,
)
token_request = await auth_flow.asend(oauth_metadata_response)
token_response = httpx.Response(
200,
content=(
b'{"access_token": "new_access_token", "token_type": "Bearer", "expires_in": 3600, '
b'"refresh_token": "new_refresh_token"}'
),
request=token_request,
)
final_request = await auth_flow.asend(token_response)
final_response = httpx.Response(200, request=final_request)
try:
await auth_flow.asend(final_response)
except StopAsyncIteration:
pass
@pytest.mark.anyio
async def test_www_authenticate_takes_priority_over_well_known(
self, client_metadata: OAuthClientMetadata, mock_storage: MockTokenStorage
):
"""Test that WWW-Authenticate header resource_metadata takes priority over well-known URIs."""
async def redirect_handler(url: str) -> None:
pass # pragma: no cover
async def callback_handler() -> tuple[str, str | None]:
return "test_auth_code", "test_state" # pragma: no cover
provider = OAuthClientProvider(
server_url="https://api.example.com/v1/mcp",
client_metadata=client_metadata,
storage=mock_storage,
redirect_handler=redirect_handler,
callback_handler=callback_handler,
)
# Test with 401 response with WWW-Authenticate header
init_response = httpx.Response(
status_code=401,
headers={
"WWW-Authenticate": 'Bearer resource_metadata="https://custom.example.com/.well-known/oauth-protected-resource"'
},
request=httpx.Request("GET", "https://api.example.com/v1/mcp"),
)
# Build discovery URLs
discovery_urls = build_protected_resource_metadata_discovery_urls(
extract_resource_metadata_from_www_auth(init_response), provider.context.server_url
)
# Should have WWW-Authenticate URL first, then fallback URLs
assert len(discovery_urls) == 3
assert discovery_urls[0] == "https://custom.example.com/.well-known/oauth-protected-resource"
assert discovery_urls[1] == "https://api.example.com/.well-known/oauth-protected-resource/v1/mcp"
assert discovery_urls[2] == "https://api.example.com/.well-known/oauth-protected-resource"
class TestWWWAuthenticate:
"""Test WWW-Authenticate header parsing functionality."""
@pytest.mark.parametrize(
"www_auth_header,field_name,expected_value",
[
# Quoted values
('Bearer scope="read write"', "scope", "read write"),
(
'Bearer resource_metadata="https://api.example.com/.well-known/oauth-protected-resource"',
"resource_metadata",
"https://api.example.com/.well-known/oauth-protected-resource",
),
('Bearer error="insufficient_scope"', "error", "insufficient_scope"),
# Unquoted values
("Bearer scope=read", "scope", "read"),
(
"Bearer resource_metadata=https://api.example.com/.well-known/oauth-protected-resource",
"resource_metadata",
"https://api.example.com/.well-known/oauth-protected-resource",
),
("Bearer error=invalid_token", "error", "invalid_token"),
# Multiple parameters with quoted value
(
'Bearer realm="api", scope="admin:write resource:read", error="insufficient_scope"',
"scope",
"admin:write resource:read",
),
(
'Bearer realm="api", resource_metadata="https://api.example.com/.well-known/oauth-protected-resource", '
'error="insufficient_scope"',
"resource_metadata",
"https://api.example.com/.well-known/oauth-protected-resource",
),
# Multiple parameters with unquoted value
('Bearer realm="api", scope=basic', "scope", "basic"),
# Values with special characters
(
'Bearer scope="resource:read resource:write user_profile"',
"scope",
"resource:read resource:write user_profile",
),
(
'Bearer resource_metadata="https://api.example.com/auth/metadata?version=1"',
"resource_metadata",
"https://api.example.com/auth/metadata?version=1",
),
],
)
def test_extract_field_from_www_auth_valid_cases(
self,
client_metadata: OAuthClientMetadata,
mock_storage: MockTokenStorage,
www_auth_header: str,
field_name: str,
expected_value: str,
):
"""Test extraction of various fields from valid WWW-Authenticate headers."""
init_response = httpx.Response(
status_code=401,
headers={"WWW-Authenticate": www_auth_header},
request=httpx.Request("GET", "https://api.example.com/test"),
)
result = extract_field_from_www_auth(init_response, field_name)
assert result == expected_value
@pytest.mark.parametrize(
"www_auth_header,field_name,description",
[
# No header
(None, "scope", "no WWW-Authenticate header"),
# Empty header
("", "scope", "empty WWW-Authenticate header"),
# Header without requested field
('Bearer realm="api", error="insufficient_scope"', "scope", "no scope parameter"),
('Bearer realm="api", scope="read write"', "resource_metadata", "no resource_metadata parameter"),
# Malformed field (empty value)
("Bearer scope=", "scope", "malformed scope parameter"),
("Bearer resource_metadata=", "resource_metadata", "malformed resource_metadata parameter"),
],
)
def test_extract_field_from_www_auth_invalid_cases(
self,
client_metadata: OAuthClientMetadata,
mock_storage: MockTokenStorage,
www_auth_header: str | None,
field_name: str,
description: str,
):
"""Test extraction returns None for invalid cases."""
headers = {"WWW-Authenticate": www_auth_header} if www_auth_header is not None else {}
init_response = httpx.Response(
status_code=401, headers=headers, request=httpx.Request("GET", "https://api.example.com/test")
)
result = extract_field_from_www_auth(init_response, field_name)
assert result is None, f"Should return None for {description}"
class TestCIMD:
"""Test Client ID Metadata Document (CIMD) support."""
@pytest.mark.parametrize(
"url,expected",
[
# Valid CIMD URLs
("https://example.com/client", True),
("https://example.com/client-metadata.json", True),
("https://example.com/path/to/client", True),
("https://example.com:8443/client", True),
# Invalid URLs - HTTP (not HTTPS)
("http://example.com/client", False),
# Invalid URLs - root path
("https://example.com", False),
("https://example.com/", False),
# Invalid URLs - None or empty
(None, False),
("", False),
# Invalid URLs - malformed (triggers urlparse exception)
("http://[::1/foo/", False),
],
)
def test_is_valid_client_metadata_url(self, url: str | None, expected: bool):
"""Test CIMD URL validation."""
assert is_valid_client_metadata_url(url) == expected
def test_should_use_client_metadata_url_when_server_supports(self):
"""Test that CIMD is used when server supports it and URL is provided."""
oauth_metadata = OAuthMetadata(
issuer=AnyHttpUrl("https://auth.example.com"),
authorization_endpoint=AnyHttpUrl("https://auth.example.com/authorize"),
token_endpoint=AnyHttpUrl("https://auth.example.com/token"),
client_id_metadata_document_supported=True,
)
assert should_use_client_metadata_url(oauth_metadata, "https://example.com/client") is True
def test_should_not_use_client_metadata_url_when_server_does_not_support(self):
"""Test that CIMD is not used when server doesn't support it."""
oauth_metadata = OAuthMetadata(
issuer=AnyHttpUrl("https://auth.example.com"),
authorization_endpoint=AnyHttpUrl("https://auth.example.com/authorize"),
token_endpoint=AnyHttpUrl("https://auth.example.com/token"),
client_id_metadata_document_supported=False,
)
assert should_use_client_metadata_url(oauth_metadata, "https://example.com/client") is False
def test_should_not_use_client_metadata_url_when_not_provided(self):
"""Test that CIMD is not used when no URL is provided."""
oauth_metadata = OAuthMetadata(
issuer=AnyHttpUrl("https://auth.example.com"),
authorization_endpoint=AnyHttpUrl("https://auth.example.com/authorize"),
token_endpoint=AnyHttpUrl("https://auth.example.com/token"),
client_id_metadata_document_supported=True,
)
assert should_use_client_metadata_url(oauth_metadata, None) is False
def test_should_not_use_client_metadata_url_when_no_metadata(self):
"""Test that CIMD is not used when OAuth metadata is None."""
assert should_use_client_metadata_url(None, "https://example.com/client") is False
def test_create_client_info_from_metadata_url(self):
"""Test creating client info from CIMD URL."""
client_info = create_client_info_from_metadata_url(
"https://example.com/client",
redirect_uris=[AnyUrl("http://localhost:3030/callback")],
)
assert client_info.client_id == "https://example.com/client"
assert client_info.token_endpoint_auth_method == "none"
assert client_info.redirect_uris == [AnyUrl("http://localhost:3030/callback")]
assert client_info.client_secret is None
def test_oauth_provider_with_valid_client_metadata_url(
self, client_metadata: OAuthClientMetadata, mock_storage: MockTokenStorage
):
"""Test OAuthClientProvider initialization with valid client_metadata_url."""
async def redirect_handler(url: str) -> None:
pass # pragma: no cover
async def callback_handler() -> tuple[str, str | None]:
return "test_auth_code", "test_state" # pragma: no cover
provider = OAuthClientProvider(
server_url="https://api.example.com/v1/mcp",
client_metadata=client_metadata,
storage=mock_storage,
redirect_handler=redirect_handler,
callback_handler=callback_handler,
client_metadata_url="https://example.com/client",
)
assert provider.context.client_metadata_url == "https://example.com/client"
def test_oauth_provider_with_invalid_client_metadata_url_raises_error(
self, client_metadata: OAuthClientMetadata, mock_storage: MockTokenStorage
):
"""Test OAuthClientProvider raises error for invalid client_metadata_url."""
async def redirect_handler(url: str) -> None:
pass # pragma: no cover
async def callback_handler() -> tuple[str, str | None]:
return "test_auth_code", "test_state" # pragma: no cover
with pytest.raises(ValueError) as exc_info:
OAuthClientProvider(
server_url="https://api.example.com/v1/mcp",
client_metadata=client_metadata,
storage=mock_storage,
redirect_handler=redirect_handler,
callback_handler=callback_handler,
client_metadata_url="http://example.com/client", # HTTP instead of HTTPS
)
assert "HTTPS URL with a non-root pathname" in str(exc_info.value)
@pytest.mark.anyio
async def test_auth_flow_uses_cimd_when_server_supports(
self, client_metadata: OAuthClientMetadata, mock_storage: MockTokenStorage
):
"""Test that auth flow uses CIMD URL as client_id when server supports it."""
async def redirect_handler(url: str) -> None:
pass # pragma: no cover
async def callback_handler() -> tuple[str, str | None]:
return "test_auth_code", "test_state" # pragma: no cover
provider = OAuthClientProvider(
server_url="https://api.example.com/v1/mcp",
client_metadata=client_metadata,
storage=mock_storage,
redirect_handler=redirect_handler,
callback_handler=callback_handler,
client_metadata_url="https://example.com/client",
)
provider.context.current_tokens = None
provider.context.token_expiry_time = None
provider._initialized = True
test_request = httpx.Request("GET", "https://api.example.com/v1/mcp")
auth_flow = provider.async_auth_flow(test_request)
# First request
request = await auth_flow.__anext__()
assert "Authorization" not in request.headers
# Send 401 response
response = httpx.Response(401, headers={}, request=test_request)
# PRM discovery
prm_request = await auth_flow.asend(response)
prm_response = httpx.Response(
200,
content=b'{"resource": "https://api.example.com/v1/mcp", "authorization_servers": ["https://auth.example.com"]}',
request=prm_request,
)
# OAuth metadata discovery
oauth_request = await auth_flow.asend(prm_response)
oauth_response = httpx.Response(
200,
content=(
b'{"issuer": "https://auth.example.com", '
b'"authorization_endpoint": "https://auth.example.com/authorize", '
b'"token_endpoint": "https://auth.example.com/token", '
b'"client_id_metadata_document_supported": true}'
),
request=oauth_request,
)
# Mock authorization
provider._perform_authorization_code_grant = mock.AsyncMock(
return_value=("test_auth_code", "test_code_verifier")
)
# Should skip DCR and go directly to token exchange
token_request = await auth_flow.asend(oauth_response)
assert token_request.method == "POST"
assert str(token_request.url) == "https://auth.example.com/token"
# Verify client_id is the CIMD URL
content = token_request.content.decode()
assert "client_id=https%3A%2F%2Fexample.com%2Fclient" in content
# Verify client info was set correctly
assert provider.context.client_info is not None
assert provider.context.client_info.client_id == "https://example.com/client"
assert provider.context.client_info.token_endpoint_auth_method == "none"
# Complete the flow
token_response = httpx.Response(
200,
content=b'{"access_token": "test_token", "token_type": "Bearer", "expires_in": 3600}',
request=token_request,
)
final_request = await auth_flow.asend(token_response)
assert final_request.headers["Authorization"] == "Bearer test_token"
final_response = httpx.Response(200, request=final_request)
try:
await auth_flow.asend(final_response)
except StopAsyncIteration:
pass
@pytest.mark.anyio
async def test_auth_flow_falls_back_to_dcr_when_no_cimd_support(
self, client_metadata: OAuthClientMetadata, mock_storage: MockTokenStorage
):
"""Test that auth flow falls back to DCR when server doesn't support CIMD."""
async def redirect_handler(url: str) -> None:
pass # pragma: no cover
async def callback_handler() -> tuple[str, str | None]:
return "test_auth_code", "test_state" # pragma: no cover
provider = OAuthClientProvider(
server_url="https://api.example.com/v1/mcp",
client_metadata=client_metadata,
storage=mock_storage,
redirect_handler=redirect_handler,
callback_handler=callback_handler,
client_metadata_url="https://example.com/client",
)
provider.context.current_tokens = None
provider.context.token_expiry_time = None
provider._initialized = True
test_request = httpx.Request("GET", "https://api.example.com/v1/mcp")
auth_flow = provider.async_auth_flow(test_request)
# First request
await auth_flow.__anext__()
# Send 401 response
response = httpx.Response(401, headers={}, request=test_request)
# PRM discovery
prm_request = await auth_flow.asend(response)
prm_response = httpx.Response(
200,
content=b'{"resource": "https://api.example.com/v1/mcp", "authorization_servers": ["https://auth.example.com"]}',
request=prm_request,
)
# OAuth metadata discovery - server does NOT support CIMD
oauth_request = await auth_flow.asend(prm_response)
oauth_response = httpx.Response(
200,
content=(
b'{"issuer": "https://auth.example.com", '
b'"authorization_endpoint": "https://auth.example.com/authorize", '
b'"token_endpoint": "https://auth.example.com/token", '
b'"registration_endpoint": "https://auth.example.com/register"}'
),
request=oauth_request,
)
# Should proceed to DCR instead of skipping it
registration_request = await auth_flow.asend(oauth_response)
assert registration_request.method == "POST"
assert str(registration_request.url) == "https://auth.example.com/register"
# Complete the flow to avoid generator cleanup issues
registration_response = httpx.Response(
201,
content=b'{"client_id": "dcr_client_id", "redirect_uris": ["http://localhost:3030/callback"]}',
request=registration_request,
)
# Mock authorization
provider._perform_authorization_code_grant = mock.AsyncMock(
return_value=("test_auth_code", "test_code_verifier")
)
token_request = await auth_flow.asend(registration_response)
token_response = httpx.Response(
200,
content=b'{"access_token": "test_token", "token_type": "Bearer", "expires_in": 3600}',
request=token_request,
)
final_request = await auth_flow.asend(token_response)
final_response = httpx.Response(200, request=final_request)
try:
await auth_flow.asend(final_response)
except StopAsyncIteration:
pass
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "tests/client/test_auth.py",
"license": "MIT License",
"lines": 1877,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
modelcontextprotocol/python-sdk:tests/client/test_list_methods_cursor.py | from collections.abc import Callable
import pytest
from mcp import Client, types
from mcp.server import Server, ServerRequestContext
from mcp.server.mcpserver import MCPServer
from mcp.types import ListToolsResult
from .conftest import StreamSpyCollection
pytestmark = pytest.mark.anyio
@pytest.fixture
async def full_featured_server():
"""Create a server with tools, resources, prompts, and templates."""
server = MCPServer("test")
# pragma: no cover on handlers below - these exist only to register items with the
# server so list_* methods return results. The handlers themselves are never called
# because these tests only verify pagination/cursor behavior, not tool/resource invocation.
@server.tool()
def greet(name: str) -> str: # pragma: no cover
"""Greet someone by name."""
return f"Hello, {name}!"
@server.resource("test://resource")
def test_resource() -> str: # pragma: no cover
"""A test resource."""
return "Test content"
@server.resource("test://template/{id}")
def test_template(id: str) -> str: # pragma: no cover
"""A test resource template."""
return f"Template content for {id}"
@server.prompt()
def greeting_prompt(name: str) -> str: # pragma: no cover
"""A greeting prompt."""
return f"Please greet {name}."
return server
@pytest.mark.parametrize(
"method_name,request_method",
[
("list_tools", "tools/list"),
("list_resources", "resources/list"),
("list_prompts", "prompts/list"),
("list_resource_templates", "resources/templates/list"),
],
)
async def test_list_methods_params_parameter(
stream_spy: Callable[[], StreamSpyCollection],
full_featured_server: MCPServer,
method_name: str,
request_method: str,
):
"""Test that the params parameter is accepted and correctly passed to the server.
Covers: list_tools, list_resources, list_prompts, list_resource_templates
See: https://modelcontextprotocol.io/specification/2025-03-26/server/utilities/pagination#request-format
"""
async with Client(full_featured_server) as client:
spies = stream_spy()
# Test without params (omitted)
method = getattr(client, method_name)
_ = await method()
requests = spies.get_client_requests(method=request_method)
assert len(requests) == 1
assert requests[0].params is None or "cursor" not in requests[0].params
spies.clear()
# Test with params containing cursor
_ = await method(cursor="from_params")
requests = spies.get_client_requests(method=request_method)
assert len(requests) == 1
assert requests[0].params is not None
assert requests[0].params["cursor"] == "from_params"
spies.clear()
# Test with empty params
_ = await method()
requests = spies.get_client_requests(method=request_method)
assert len(requests) == 1
# Empty params means no cursor
assert requests[0].params is None or "cursor" not in requests[0].params
async def test_list_tools_with_strict_server_validation(
full_featured_server: MCPServer,
):
"""Test pagination with a server that validates request format strictly."""
async with Client(full_featured_server) as client:
result = await client.list_tools()
assert isinstance(result, ListToolsResult)
assert len(result.tools) > 0
async def test_list_tools_with_lowlevel_server():
"""Test that list_tools works with a lowlevel Server using params."""
async def handle_list_tools(
ctx: ServerRequestContext, params: types.PaginatedRequestParams | None
) -> ListToolsResult:
# Echo back what cursor we received in the tool description
cursor = params.cursor if params else None
return ListToolsResult(tools=[types.Tool(name="test_tool", description=f"cursor={cursor}", input_schema={})])
server = Server("test-lowlevel", on_list_tools=handle_list_tools)
async with Client(server) as client:
result = await client.list_tools()
assert result.tools[0].description == "cursor=None"
result = await client.list_tools(cursor="page2")
assert result.tools[0].description == "cursor=page2"
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "tests/client/test_list_methods_cursor.py",
"license": "MIT License",
"lines": 95,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
modelcontextprotocol/python-sdk:tests/shared/test_progress_notifications.py | from typing import Any
from unittest.mock import patch
import anyio
import pytest
from mcp import Client, types
from mcp.client.session import ClientSession
from mcp.server import Server, ServerRequestContext
from mcp.server.lowlevel import NotificationOptions
from mcp.server.models import InitializationOptions
from mcp.server.session import ServerSession
from mcp.shared.message import SessionMessage
from mcp.shared.session import RequestResponder
@pytest.mark.anyio
async def test_bidirectional_progress_notifications():
"""Test that both client and server can send progress notifications."""
# Create memory streams for client/server
server_to_client_send, server_to_client_receive = anyio.create_memory_object_stream[SessionMessage](5)
client_to_server_send, client_to_server_receive = anyio.create_memory_object_stream[SessionMessage](5)
# Run a server session so we can send progress updates in tool
async def run_server():
# Create a server session
async with ServerSession(
client_to_server_receive,
server_to_client_send,
InitializationOptions(
server_name="ProgressTestServer",
server_version="0.1.0",
capabilities=server.get_capabilities(NotificationOptions(), {}),
),
) as server_session:
async for message in server_session.incoming_messages:
try:
await server._handle_message(message, server_session, {})
except Exception as e: # pragma: no cover
raise e
# Track progress updates
server_progress_updates: list[dict[str, Any]] = []
client_progress_updates: list[dict[str, Any]] = []
# Progress tokens
server_progress_token = "server_token_123"
client_progress_token = "client_token_456"
# Register progress handler
async def handle_progress(ctx: ServerRequestContext, params: types.ProgressNotificationParams) -> None:
server_progress_updates.append(
{
"token": params.progress_token,
"progress": params.progress,
"total": params.total,
"message": params.message,
}
)
# Register list tool handler
async def handle_list_tools(
ctx: ServerRequestContext, params: types.PaginatedRequestParams | None
) -> types.ListToolsResult:
return types.ListToolsResult(
tools=[
types.Tool(
name="test_tool",
description="A tool that sends progress notifications <o/",
input_schema={},
)
]
)
# Register tool handler
async def handle_call_tool(ctx: ServerRequestContext, params: types.CallToolRequestParams) -> types.CallToolResult:
# Make sure we received a progress token
if params.name == "test_tool":
assert params.meta is not None
progress_token = params.meta.get("progress_token")
assert progress_token is not None
assert progress_token == client_progress_token
# Send progress notifications using ctx.session
await ctx.session.send_progress_notification(
progress_token=progress_token,
progress=0.25,
total=1.0,
message="Server progress 25%",
)
await ctx.session.send_progress_notification(
progress_token=progress_token,
progress=0.5,
total=1.0,
message="Server progress 50%",
)
await ctx.session.send_progress_notification(
progress_token=progress_token,
progress=1.0,
total=1.0,
message="Server progress 100%",
)
return types.CallToolResult(content=[types.TextContent(type="text", text="Tool executed successfully")])
raise ValueError(f"Unknown tool: {params.name}") # pragma: no cover
# Create a server with progress capability
server = Server(
name="ProgressTestServer",
on_progress=handle_progress,
on_list_tools=handle_list_tools,
on_call_tool=handle_call_tool,
)
# Client message handler to store progress notifications
async def handle_client_message(
message: RequestResponder[types.ServerRequest, types.ClientResult] | types.ServerNotification | Exception,
) -> None:
if isinstance(message, Exception): # pragma: no cover
raise message
if isinstance(message, types.ServerNotification): # pragma: no branch
if isinstance(message, types.ProgressNotification): # pragma: no branch
params = message.params
client_progress_updates.append(
{
"token": params.progress_token,
"progress": params.progress,
"total": params.total,
"message": params.message,
}
)
# Test using client
async with (
ClientSession(
server_to_client_receive,
client_to_server_send,
message_handler=handle_client_message,
) as client_session,
anyio.create_task_group() as tg,
):
# Start the server in a background task
tg.start_soon(run_server)
# Initialize the client connection
await client_session.initialize()
# Call list_tools with progress token
await client_session.list_tools()
# Call test_tool with progress token
await client_session.call_tool("test_tool", meta={"progress_token": client_progress_token})
# Send progress notifications from client to server
await client_session.send_progress_notification(
progress_token=server_progress_token,
progress=0.33,
total=1.0,
message="Client progress 33%",
)
await client_session.send_progress_notification(
progress_token=server_progress_token,
progress=0.66,
total=1.0,
message="Client progress 66%",
)
await client_session.send_progress_notification(
progress_token=server_progress_token,
progress=1.0,
total=1.0,
message="Client progress 100%",
)
# Wait and exit
await anyio.sleep(0.5)
tg.cancel_scope.cancel()
# Verify client received progress updates from server
assert len(client_progress_updates) == 3
assert client_progress_updates[0]["token"] == client_progress_token
assert client_progress_updates[0]["progress"] == 0.25
assert client_progress_updates[0]["message"] == "Server progress 25%"
assert client_progress_updates[2]["progress"] == 1.0
# Verify server received progress updates from client
assert len(server_progress_updates) == 3
assert server_progress_updates[0]["token"] == server_progress_token
assert server_progress_updates[0]["progress"] == 0.33
assert server_progress_updates[0]["message"] == "Client progress 33%"
assert server_progress_updates[2]["progress"] == 1.0
@pytest.mark.anyio
async def test_progress_callback_exception_logging():
"""Test that exceptions in progress callbacks are logged and \
don't crash the session."""
# Track logged warnings
logged_errors: list[str] = []
def mock_log_exception(msg: str, *args: Any, **kwargs: Any) -> None:
logged_errors.append(msg % args if args else msg)
# Create a progress callback that raises an exception
async def failing_progress_callback(progress: float, total: float | None, message: str | None) -> None:
raise ValueError("Progress callback failed!")
# Create a server with a tool that sends progress notifications
async def handle_call_tool(ctx: ServerRequestContext, params: types.CallToolRequestParams) -> types.CallToolResult:
if params.name == "progress_tool":
assert ctx.request_id is not None
# Send a progress notification
await ctx.session.send_progress_notification(
progress_token=ctx.request_id,
progress=50.0,
total=100.0,
message="Halfway done",
)
return types.CallToolResult(content=[types.TextContent(type="text", text="progress_result")])
raise ValueError(f"Unknown tool: {params.name}") # pragma: no cover
async def handle_list_tools(
ctx: ServerRequestContext, params: types.PaginatedRequestParams | None
) -> types.ListToolsResult:
return types.ListToolsResult(
tools=[
types.Tool(
name="progress_tool",
description="A tool that sends progress notifications",
input_schema={},
)
]
)
server = Server(
name="TestProgressServer",
on_call_tool=handle_call_tool,
on_list_tools=handle_list_tools,
)
# Test with mocked logging
with patch("mcp.shared.session.logging.exception", side_effect=mock_log_exception):
async with Client(server) as client:
# Call tool with a failing progress callback
result = await client.call_tool(
"progress_tool",
arguments={},
progress_callback=failing_progress_callback,
)
# Verify the request completed successfully despite the callback failure
assert len(result.content) == 1
content = result.content[0]
assert isinstance(content, types.TextContent)
assert content.text == "progress_result"
# Check that a warning was logged for the progress callback exception
assert len(logged_errors) > 0
assert any("Progress callback raised an exception" in warning for warning in logged_errors)
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "tests/shared/test_progress_notifications.py",
"license": "MIT License",
"lines": 226,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
modelcontextprotocol/python-sdk:src/mcp/client/session_group.py | """SessionGroup concurrently manages multiple MCP session connections.
Tools, resources, and prompts are aggregated across servers. Servers may
be connected to or disconnected from at any point after initialization.
This abstraction can handle naming collisions using a custom user-provided hook.
"""
import contextlib
import logging
from collections.abc import Callable
from dataclasses import dataclass
from types import TracebackType
from typing import Any, TypeAlias
import anyio
import httpx
from pydantic import BaseModel, Field
from typing_extensions import Self
import mcp
from mcp import types
from mcp.client.session import ElicitationFnT, ListRootsFnT, LoggingFnT, MessageHandlerFnT, SamplingFnT
from mcp.client.sse import sse_client
from mcp.client.stdio import StdioServerParameters
from mcp.client.streamable_http import streamable_http_client
from mcp.shared._httpx_utils import create_mcp_http_client
from mcp.shared.exceptions import MCPError
from mcp.shared.session import ProgressFnT
class SseServerParameters(BaseModel):
"""Parameters for initializing an sse_client."""
# The endpoint URL.
url: str
# Optional headers to include in requests.
headers: dict[str, Any] | None = None
# HTTP timeout for regular operations (in seconds).
timeout: float = 5.0
# Timeout for SSE read operations (in seconds).
sse_read_timeout: float = 300.0
class StreamableHttpParameters(BaseModel):
"""Parameters for initializing a streamable_http_client."""
# The endpoint URL.
url: str
# Optional headers to include in requests.
headers: dict[str, Any] | None = None
# HTTP timeout for regular operations (in seconds).
timeout: float = 30.0
# Timeout for SSE read operations (in seconds).
sse_read_timeout: float = 300.0
# Close the client session when the transport closes.
terminate_on_close: bool = True
ServerParameters: TypeAlias = StdioServerParameters | SseServerParameters | StreamableHttpParameters
# Use dataclass instead of Pydantic BaseModel
# because Pydantic BaseModel cannot handle Protocol fields.
@dataclass
class ClientSessionParameters:
"""Parameters for establishing a client session to an MCP server."""
read_timeout_seconds: float | None = None
sampling_callback: SamplingFnT | None = None
elicitation_callback: ElicitationFnT | None = None
list_roots_callback: ListRootsFnT | None = None
logging_callback: LoggingFnT | None = None
message_handler: MessageHandlerFnT | None = None
client_info: types.Implementation | None = None
class ClientSessionGroup:
"""Client for managing connections to multiple MCP servers.
This class is responsible for encapsulating management of server connections.
It aggregates tools, resources, and prompts from all connected servers.
For auxiliary handlers, such as resource subscription, this is delegated to
the client and can be accessed via the session.
Example:
```python
name_fn = lambda name, server_info: f"{(server_info.name)}_{name}"
async with ClientSessionGroup(component_name_hook=name_fn) as group:
for server_param in server_params:
await group.connect_to_server(server_param)
...
```
"""
class _ComponentNames(BaseModel):
"""Used for reverse index to find components."""
prompts: set[str] = Field(default_factory=set)
resources: set[str] = Field(default_factory=set)
tools: set[str] = Field(default_factory=set)
# Standard MCP components.
_prompts: dict[str, types.Prompt]
_resources: dict[str, types.Resource]
_tools: dict[str, types.Tool]
# Client-server connection management.
_sessions: dict[mcp.ClientSession, _ComponentNames]
_tool_to_session: dict[str, mcp.ClientSession]
_exit_stack: contextlib.AsyncExitStack
_session_exit_stacks: dict[mcp.ClientSession, contextlib.AsyncExitStack]
# Optional fn consuming (component_name, server_info) for custom names.
# This is to provide a means to mitigate naming conflicts across servers.
# Example: (tool_name, server_info) => "{result.server_info.name}.{tool_name}"
_ComponentNameHook: TypeAlias = Callable[[str, types.Implementation], str]
_component_name_hook: _ComponentNameHook | None
def __init__(
self,
exit_stack: contextlib.AsyncExitStack | None = None,
component_name_hook: _ComponentNameHook | None = None,
) -> None:
"""Initializes the MCP client."""
self._tools = {}
self._resources = {}
self._prompts = {}
self._sessions = {}
self._tool_to_session = {}
if exit_stack is None:
self._exit_stack = contextlib.AsyncExitStack()
self._owns_exit_stack = True
else:
self._exit_stack = exit_stack
self._owns_exit_stack = False
self._session_exit_stacks = {}
self._component_name_hook = component_name_hook
async def __aenter__(self) -> Self: # pragma: no cover
# Enter the exit stack only if we created it ourselves
if self._owns_exit_stack:
await self._exit_stack.__aenter__()
return self
async def __aexit__(
self,
_exc_type: type[BaseException] | None,
_exc_val: BaseException | None,
_exc_tb: TracebackType | None,
) -> bool | None: # pragma: no cover
"""Closes session exit stacks and main exit stack upon completion."""
# Only close the main exit stack if we created it
if self._owns_exit_stack:
await self._exit_stack.aclose()
# Concurrently close session stacks.
async with anyio.create_task_group() as tg:
for exit_stack in self._session_exit_stacks.values():
tg.start_soon(exit_stack.aclose)
@property
def sessions(self) -> list[mcp.ClientSession]:
"""Returns the list of sessions being managed."""
return list(self._sessions.keys()) # pragma: no cover
@property
def prompts(self) -> dict[str, types.Prompt]:
"""Returns the prompts as a dictionary of names to prompts."""
return self._prompts
@property
def resources(self) -> dict[str, types.Resource]:
"""Returns the resources as a dictionary of names to resources."""
return self._resources
@property
def tools(self) -> dict[str, types.Tool]:
"""Returns the tools as a dictionary of names to tools."""
return self._tools
async def call_tool(
self,
name: str,
arguments: dict[str, Any] | None = None,
read_timeout_seconds: float | None = None,
progress_callback: ProgressFnT | None = None,
*,
meta: types.RequestParamsMeta | None = None,
) -> types.CallToolResult:
"""Executes a tool given its name and arguments."""
session = self._tool_to_session[name]
session_tool_name = self.tools[name].name
return await session.call_tool(
session_tool_name,
arguments=arguments,
read_timeout_seconds=read_timeout_seconds,
progress_callback=progress_callback,
meta=meta,
)
async def disconnect_from_server(self, session: mcp.ClientSession) -> None:
"""Disconnects from a single MCP server."""
session_known_for_components = session in self._sessions
session_known_for_stack = session in self._session_exit_stacks
if not session_known_for_components and not session_known_for_stack:
raise MCPError(
code=types.INVALID_PARAMS,
message="Provided session is not managed or already disconnected.",
)
if session_known_for_components: # pragma: no branch
component_names = self._sessions.pop(session) # Pop from _sessions tracking
# Remove prompts associated with the session.
for name in component_names.prompts:
if name in self._prompts: # pragma: no branch
del self._prompts[name]
# Remove resources associated with the session.
for name in component_names.resources:
if name in self._resources: # pragma: no branch
del self._resources[name]
# Remove tools associated with the session.
for name in component_names.tools:
if name in self._tools: # pragma: no branch
del self._tools[name]
if name in self._tool_to_session: # pragma: no branch
del self._tool_to_session[name]
# Clean up the session's resources via its dedicated exit stack
if session_known_for_stack:
session_stack_to_close = self._session_exit_stacks.pop(session) # pragma: no cover
await session_stack_to_close.aclose() # pragma: no cover
async def connect_with_session(
self, server_info: types.Implementation, session: mcp.ClientSession
) -> mcp.ClientSession:
"""Connects to a single MCP server."""
await self._aggregate_components(server_info, session)
return session
async def connect_to_server(
self,
server_params: ServerParameters,
session_params: ClientSessionParameters | None = None,
) -> mcp.ClientSession:
"""Connects to a single MCP server."""
server_info, session = await self._establish_session(server_params, session_params or ClientSessionParameters())
return await self.connect_with_session(server_info, session)
async def _establish_session(
self,
server_params: ServerParameters,
session_params: ClientSessionParameters,
) -> tuple[types.Implementation, mcp.ClientSession]:
"""Establish a client session to an MCP server."""
session_stack = contextlib.AsyncExitStack()
try:
# Create read and write streams that facilitate io with the server.
if isinstance(server_params, StdioServerParameters):
client = mcp.stdio_client(server_params)
read, write = await session_stack.enter_async_context(client)
elif isinstance(server_params, SseServerParameters):
client = sse_client(
url=server_params.url,
headers=server_params.headers,
timeout=server_params.timeout,
sse_read_timeout=server_params.sse_read_timeout,
)
read, write = await session_stack.enter_async_context(client)
else:
httpx_client = create_mcp_http_client(
headers=server_params.headers,
timeout=httpx.Timeout(
server_params.timeout,
read=server_params.sse_read_timeout,
),
)
await session_stack.enter_async_context(httpx_client)
client = streamable_http_client(
url=server_params.url,
http_client=httpx_client,
terminate_on_close=server_params.terminate_on_close,
)
read, write = await session_stack.enter_async_context(client)
session = await session_stack.enter_async_context(
mcp.ClientSession(
read,
write,
read_timeout_seconds=session_params.read_timeout_seconds,
sampling_callback=session_params.sampling_callback,
elicitation_callback=session_params.elicitation_callback,
list_roots_callback=session_params.list_roots_callback,
logging_callback=session_params.logging_callback,
message_handler=session_params.message_handler,
client_info=session_params.client_info,
)
)
result = await session.initialize()
# Session successfully initialized.
# Store its stack and register the stack with the main group stack.
self._session_exit_stacks[session] = session_stack
# session_stack itself becomes a resource managed by the
# main _exit_stack.
await self._exit_stack.enter_async_context(session_stack)
return result.server_info, session
except Exception: # pragma: no cover
# If anything during this setup fails, ensure the session-specific
# stack is closed.
await session_stack.aclose()
raise
async def _aggregate_components(self, server_info: types.Implementation, session: mcp.ClientSession) -> None:
"""Aggregates prompts, resources, and tools from a given session."""
# Create a reverse index so we can find all prompts, resources, and
# tools belonging to this session. Used for removing components from
# the session group via self.disconnect_from_server.
component_names = self._ComponentNames()
# Temporary components dicts. We do not want to modify the aggregate
# lists in case of an intermediate failure.
prompts_temp: dict[str, types.Prompt] = {}
resources_temp: dict[str, types.Resource] = {}
tools_temp: dict[str, types.Tool] = {}
tool_to_session_temp: dict[str, mcp.ClientSession] = {}
# Query the server for its prompts and aggregate to list.
try:
prompts = (await session.list_prompts()).prompts
for prompt in prompts:
name = self._component_name(prompt.name, server_info)
prompts_temp[name] = prompt
component_names.prompts.add(name)
except MCPError as err: # pragma: no cover
logging.warning(f"Could not fetch prompts: {err}")
# Query the server for its resources and aggregate to list.
try:
resources = (await session.list_resources()).resources
for resource in resources:
name = self._component_name(resource.name, server_info)
resources_temp[name] = resource
component_names.resources.add(name)
except MCPError as err: # pragma: no cover
logging.warning(f"Could not fetch resources: {err}")
# Query the server for its tools and aggregate to list.
try:
tools = (await session.list_tools()).tools
for tool in tools:
name = self._component_name(tool.name, server_info)
tools_temp[name] = tool
tool_to_session_temp[name] = session
component_names.tools.add(name)
except MCPError as err: # pragma: no cover
logging.warning(f"Could not fetch tools: {err}")
# Clean up exit stack for session if we couldn't retrieve anything
# from the server.
if not any((prompts_temp, resources_temp, tools_temp)):
del self._session_exit_stacks[session] # pragma: no cover
# Check for duplicates.
matching_prompts = prompts_temp.keys() & self._prompts.keys()
if matching_prompts:
raise MCPError( # pragma: no cover
code=types.INVALID_PARAMS,
message=f"{matching_prompts} already exist in group prompts.",
)
matching_resources = resources_temp.keys() & self._resources.keys()
if matching_resources:
raise MCPError( # pragma: no cover
code=types.INVALID_PARAMS,
message=f"{matching_resources} already exist in group resources.",
)
matching_tools = tools_temp.keys() & self._tools.keys()
if matching_tools:
raise MCPError(code=types.INVALID_PARAMS, message=f"{matching_tools} already exist in group tools.")
# Aggregate components.
self._sessions[session] = component_names
self._prompts.update(prompts_temp)
self._resources.update(resources_temp)
self._tools.update(tools_temp)
self._tool_to_session.update(tool_to_session_temp)
def _component_name(self, name: str, server_info: types.Implementation) -> str:
if self._component_name_hook:
return self._component_name_hook(name, server_info)
return name
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "src/mcp/client/session_group.py",
"license": "MIT License",
"lines": 340,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
modelcontextprotocol/python-sdk:tests/client/test_session_group.py | import contextlib
from unittest import mock
import httpx
import pytest
import mcp
from mcp import types
from mcp.client.session_group import (
ClientSessionGroup,
ClientSessionParameters,
SseServerParameters,
StreamableHttpParameters,
)
from mcp.client.stdio import StdioServerParameters
from mcp.shared.exceptions import MCPError
@pytest.fixture
def mock_exit_stack():
"""Fixture for a mocked AsyncExitStack."""
# Use unittest.mock.Mock directly if needed, or just a plain object
# if only attribute access/existence is needed.
# For AsyncExitStack, Mock or MagicMock is usually fine.
return mock.MagicMock(spec=contextlib.AsyncExitStack)
def test_client_session_group_init():
mcp_session_group = ClientSessionGroup()
assert not mcp_session_group._tools
assert not mcp_session_group._resources
assert not mcp_session_group._prompts
assert not mcp_session_group._tool_to_session
def test_client_session_group_component_properties():
# --- Mock Dependencies ---
mock_prompt = mock.Mock()
mock_resource = mock.Mock()
mock_tool = mock.Mock()
# --- Prepare Session Group ---
mcp_session_group = ClientSessionGroup()
mcp_session_group._prompts = {"my_prompt": mock_prompt}
mcp_session_group._resources = {"my_resource": mock_resource}
mcp_session_group._tools = {"my_tool": mock_tool}
# --- Assertions ---
assert mcp_session_group.prompts == {"my_prompt": mock_prompt}
assert mcp_session_group.resources == {"my_resource": mock_resource}
assert mcp_session_group.tools == {"my_tool": mock_tool}
@pytest.mark.anyio
async def test_client_session_group_call_tool():
# --- Mock Dependencies ---
mock_session = mock.AsyncMock()
# --- Prepare Session Group ---
def hook(name: str, server_info: types.Implementation) -> str: # pragma: no cover
return f"{(server_info.name)}-{name}"
mcp_session_group = ClientSessionGroup(component_name_hook=hook)
mcp_session_group._tools = {"server1-my_tool": types.Tool(name="my_tool", input_schema={})}
mcp_session_group._tool_to_session = {"server1-my_tool": mock_session}
text_content = types.TextContent(type="text", text="OK")
mock_session.call_tool.return_value = types.CallToolResult(content=[text_content])
# --- Test Execution ---
result = await mcp_session_group.call_tool(
name="server1-my_tool",
arguments={
"name": "value1",
"args": {},
},
)
# --- Assertions ---
assert result.content == [text_content]
mock_session.call_tool.assert_called_once_with(
"my_tool",
arguments={"name": "value1", "args": {}},
read_timeout_seconds=None,
progress_callback=None,
meta=None,
)
@pytest.mark.anyio
async def test_client_session_group_connect_to_server(mock_exit_stack: contextlib.AsyncExitStack):
"""Test connecting to a server and aggregating components."""
# --- Mock Dependencies ---
mock_server_info = mock.Mock(spec=types.Implementation)
mock_server_info.name = "TestServer1"
mock_session = mock.AsyncMock(spec=mcp.ClientSession)
mock_tool1 = mock.Mock(spec=types.Tool)
mock_tool1.name = "tool_a"
mock_resource1 = mock.Mock(spec=types.Resource)
mock_resource1.name = "resource_b"
mock_prompt1 = mock.Mock(spec=types.Prompt)
mock_prompt1.name = "prompt_c"
mock_session.list_tools.return_value = mock.AsyncMock(tools=[mock_tool1])
mock_session.list_resources.return_value = mock.AsyncMock(resources=[mock_resource1])
mock_session.list_prompts.return_value = mock.AsyncMock(prompts=[mock_prompt1])
# --- Test Execution ---
group = ClientSessionGroup(exit_stack=mock_exit_stack)
with mock.patch.object(group, "_establish_session", return_value=(mock_server_info, mock_session)):
await group.connect_to_server(StdioServerParameters(command="test"))
# --- Assertions ---
assert mock_session in group._sessions
assert len(group.tools) == 1
assert "tool_a" in group.tools
assert group.tools["tool_a"] == mock_tool1
assert group._tool_to_session["tool_a"] == mock_session
assert len(group.resources) == 1
assert "resource_b" in group.resources
assert group.resources["resource_b"] == mock_resource1
assert len(group.prompts) == 1
assert "prompt_c" in group.prompts
assert group.prompts["prompt_c"] == mock_prompt1
mock_session.list_tools.assert_awaited_once()
mock_session.list_resources.assert_awaited_once()
mock_session.list_prompts.assert_awaited_once()
@pytest.mark.anyio
async def test_client_session_group_connect_to_server_with_name_hook(mock_exit_stack: contextlib.AsyncExitStack):
"""Test connecting with a component name hook."""
# --- Mock Dependencies ---
mock_server_info = mock.Mock(spec=types.Implementation)
mock_server_info.name = "HookServer"
mock_session = mock.AsyncMock(spec=mcp.ClientSession)
mock_tool = mock.Mock(spec=types.Tool)
mock_tool.name = "base_tool"
mock_session.list_tools.return_value = mock.AsyncMock(tools=[mock_tool])
mock_session.list_resources.return_value = mock.AsyncMock(resources=[])
mock_session.list_prompts.return_value = mock.AsyncMock(prompts=[])
# --- Test Setup ---
def name_hook(name: str, server_info: types.Implementation) -> str:
return f"{server_info.name}.{name}"
# --- Test Execution ---
group = ClientSessionGroup(exit_stack=mock_exit_stack, component_name_hook=name_hook)
with mock.patch.object(group, "_establish_session", return_value=(mock_server_info, mock_session)):
await group.connect_to_server(StdioServerParameters(command="test"))
# --- Assertions ---
assert mock_session in group._sessions
assert len(group.tools) == 1
expected_tool_name = "HookServer.base_tool"
assert expected_tool_name in group.tools
assert group.tools[expected_tool_name] == mock_tool
assert group._tool_to_session[expected_tool_name] == mock_session
@pytest.mark.anyio
async def test_client_session_group_disconnect_from_server():
"""Test disconnecting from a server."""
# --- Test Setup ---
group = ClientSessionGroup()
server_name = "ServerToDisconnect"
# Manually populate state using standard mocks
mock_session1 = mock.MagicMock(spec=mcp.ClientSession)
mock_session2 = mock.MagicMock(spec=mcp.ClientSession)
mock_tool1 = mock.Mock(spec=types.Tool)
mock_tool1.name = "tool1"
mock_resource1 = mock.Mock(spec=types.Resource)
mock_resource1.name = "res1"
mock_prompt1 = mock.Mock(spec=types.Prompt)
mock_prompt1.name = "prm1"
mock_tool2 = mock.Mock(spec=types.Tool)
mock_tool2.name = "tool2"
mock_component_named_like_server = mock.Mock()
mock_session = mock.Mock(spec=mcp.ClientSession)
group._tools = {
"tool1": mock_tool1,
"tool2": mock_tool2,
server_name: mock_component_named_like_server,
}
group._tool_to_session = {
"tool1": mock_session1,
"tool2": mock_session2,
server_name: mock_session1,
}
group._resources = {
"res1": mock_resource1,
server_name: mock_component_named_like_server,
}
group._prompts = {
"prm1": mock_prompt1,
server_name: mock_component_named_like_server,
}
group._sessions = {
mock_session: ClientSessionGroup._ComponentNames(
prompts=set({"prm1"}),
resources=set({"res1"}),
tools=set({"tool1", "tool2"}),
)
}
# --- Assertions ---
assert mock_session in group._sessions
assert "tool1" in group._tools
assert "tool2" in group._tools
assert "res1" in group._resources
assert "prm1" in group._prompts
# --- Test Execution ---
await group.disconnect_from_server(mock_session)
# --- Assertions ---
assert mock_session not in group._sessions
assert "tool1" not in group._tools
assert "tool2" not in group._tools
assert "res1" not in group._resources
assert "prm1" not in group._prompts
@pytest.mark.anyio
async def test_client_session_group_connect_to_server_duplicate_tool_raises_error(
mock_exit_stack: contextlib.AsyncExitStack,
):
"""Test MCPError raised when connecting a server with a dup name."""
# --- Setup Pre-existing State ---
group = ClientSessionGroup(exit_stack=mock_exit_stack)
existing_tool_name = "shared_tool"
# Manually add a tool to simulate a previous connection
group._tools[existing_tool_name] = mock.Mock(spec=types.Tool)
group._tools[existing_tool_name].name = existing_tool_name
# Need a dummy session associated with the existing tool
mock_session = mock.MagicMock(spec=mcp.ClientSession)
group._tool_to_session[existing_tool_name] = mock_session
group._session_exit_stacks[mock_session] = mock.Mock(spec=contextlib.AsyncExitStack)
# --- Mock New Connection Attempt ---
mock_server_info_new = mock.Mock(spec=types.Implementation)
mock_server_info_new.name = "ServerWithDuplicate"
mock_session_new = mock.AsyncMock(spec=mcp.ClientSession)
# Configure the new session to return a tool with the *same name*
duplicate_tool = mock.Mock(spec=types.Tool)
duplicate_tool.name = existing_tool_name
mock_session_new.list_tools.return_value = mock.AsyncMock(tools=[duplicate_tool])
# Keep other lists empty for simplicity
mock_session_new.list_resources.return_value = mock.AsyncMock(resources=[])
mock_session_new.list_prompts.return_value = mock.AsyncMock(prompts=[])
# --- Test Execution and Assertion ---
with pytest.raises(MCPError) as excinfo:
with mock.patch.object(
group,
"_establish_session",
return_value=(mock_server_info_new, mock_session_new),
):
await group.connect_to_server(StdioServerParameters(command="test"))
# Assert details about the raised error
assert excinfo.value.error.code == types.INVALID_PARAMS
assert existing_tool_name in excinfo.value.error.message
assert "already exist " in excinfo.value.error.message
# Verify the duplicate tool was *not* added again (state should be unchanged)
assert len(group._tools) == 1 # Should still only have the original
assert group._tools[existing_tool_name] is not duplicate_tool # Ensure it's the original mock
@pytest.mark.anyio
async def test_client_session_group_disconnect_non_existent_server():
"""Test disconnecting a server that isn't connected."""
session = mock.Mock(spec=mcp.ClientSession)
group = ClientSessionGroup()
with pytest.raises(MCPError):
await group.disconnect_from_server(session)
# TODO(Marcelo): This is horrible. We should drop this test.
@pytest.mark.anyio
@pytest.mark.parametrize(
"server_params_instance, client_type_name, patch_target_for_client_func",
[
(
StdioServerParameters(command="test_stdio_cmd"),
"stdio",
"mcp.client.session_group.mcp.stdio_client",
),
(
SseServerParameters(url="http://test.com/sse", timeout=10.0),
"sse",
"mcp.client.session_group.sse_client",
), # url, headers, timeout, sse_read_timeout
(
StreamableHttpParameters(url="http://test.com/stream", terminate_on_close=False),
"streamablehttp",
"mcp.client.session_group.streamable_http_client",
), # url, headers, timeout, sse_read_timeout, terminate_on_close
],
)
async def test_client_session_group_establish_session_parameterized(
server_params_instance: StdioServerParameters | SseServerParameters | StreamableHttpParameters,
client_type_name: str, # Just for clarity or conditional logic if needed
patch_target_for_client_func: str,
):
with mock.patch("mcp.client.session_group.mcp.ClientSession") as mock_ClientSession_class:
with mock.patch(patch_target_for_client_func) as mock_specific_client_func:
mock_client_cm_instance = mock.AsyncMock(name=f"{client_type_name}ClientCM")
mock_read_stream = mock.AsyncMock(name=f"{client_type_name}Read")
mock_write_stream = mock.AsyncMock(name=f"{client_type_name}Write")
# All client context managers return (read_stream, write_stream)
mock_client_cm_instance.__aenter__.return_value = (mock_read_stream, mock_write_stream)
mock_client_cm_instance.__aexit__ = mock.AsyncMock(return_value=None)
mock_specific_client_func.return_value = mock_client_cm_instance
# --- Mock mcp.ClientSession (class) ---
# mock_ClientSession_class is already provided by the outer patch
mock_raw_session_cm = mock.AsyncMock(name="RawSessionCM")
mock_ClientSession_class.return_value = mock_raw_session_cm
mock_entered_session = mock.AsyncMock(name="EnteredSessionInstance")
mock_raw_session_cm.__aenter__.return_value = mock_entered_session
mock_raw_session_cm.__aexit__ = mock.AsyncMock(return_value=None)
# Mock session.initialize()
mock_initialize_result = mock.AsyncMock(name="InitializeResult")
mock_initialize_result.server_info = types.Implementation(name="foo", version="1")
mock_entered_session.initialize.return_value = mock_initialize_result
# --- Test Execution ---
group = ClientSessionGroup()
returned_server_info = None
returned_session = None
async with contextlib.AsyncExitStack() as stack:
group._exit_stack = stack
(
returned_server_info,
returned_session,
) = await group._establish_session(server_params_instance, ClientSessionParameters())
# --- Assertions ---
# 1. Assert the correct specific client function was called
if client_type_name == "stdio":
assert isinstance(server_params_instance, StdioServerParameters)
mock_specific_client_func.assert_called_once_with(server_params_instance)
elif client_type_name == "sse":
assert isinstance(server_params_instance, SseServerParameters)
mock_specific_client_func.assert_called_once_with(
url=server_params_instance.url,
headers=server_params_instance.headers,
timeout=server_params_instance.timeout,
sse_read_timeout=server_params_instance.sse_read_timeout,
)
elif client_type_name == "streamablehttp": # pragma: no branch
assert isinstance(server_params_instance, StreamableHttpParameters)
# Verify streamable_http_client was called with url, httpx_client, and terminate_on_close
# The http_client is created by the real create_mcp_http_client
call_args = mock_specific_client_func.call_args
assert call_args.kwargs["url"] == server_params_instance.url
assert call_args.kwargs["terminate_on_close"] == server_params_instance.terminate_on_close
assert isinstance(call_args.kwargs["http_client"], httpx.AsyncClient)
mock_client_cm_instance.__aenter__.assert_awaited_once()
# 2. Assert ClientSession was called correctly
mock_ClientSession_class.assert_called_once_with(
mock_read_stream,
mock_write_stream,
read_timeout_seconds=None,
sampling_callback=None,
elicitation_callback=None,
list_roots_callback=None,
logging_callback=None,
message_handler=None,
client_info=None,
)
mock_raw_session_cm.__aenter__.assert_awaited_once()
mock_entered_session.initialize.assert_awaited_once()
# 3. Assert returned values
assert returned_server_info is mock_initialize_result.server_info
assert returned_session is mock_entered_session
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "tests/client/test_session_group.py",
"license": "MIT License",
"lines": 333,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
modelcontextprotocol/python-sdk:src/mcp/shared/_httpx_utils.py | """Utilities for creating standardized httpx AsyncClient instances."""
from typing import Any, Protocol
import httpx
__all__ = ["create_mcp_http_client", "MCP_DEFAULT_TIMEOUT", "MCP_DEFAULT_SSE_READ_TIMEOUT"]
# Default MCP timeout configuration
MCP_DEFAULT_TIMEOUT = 30.0 # General operations (seconds)
MCP_DEFAULT_SSE_READ_TIMEOUT = 300.0 # SSE streams - 5 minutes (seconds)
class McpHttpClientFactory(Protocol): # pragma: no branch
def __call__( # pragma: no branch
self,
headers: dict[str, str] | None = None,
timeout: httpx.Timeout | None = None,
auth: httpx.Auth | None = None,
) -> httpx.AsyncClient: ...
def create_mcp_http_client(
headers: dict[str, str] | None = None,
timeout: httpx.Timeout | None = None,
auth: httpx.Auth | None = None,
) -> httpx.AsyncClient:
"""Create a standardized httpx AsyncClient with MCP defaults.
This function provides common defaults used throughout the MCP codebase:
- follow_redirects=True (always enabled)
- Default timeout of 30 seconds if not specified
Args:
headers: Optional headers to include with all requests.
timeout: Request timeout as httpx.Timeout object.
Defaults to 30 seconds if not specified.
auth: Optional authentication handler.
Returns:
Configured httpx.AsyncClient instance with MCP defaults.
Note:
The returned AsyncClient must be used as a context manager to ensure
proper cleanup of connections.
Example:
Basic usage with MCP defaults:
```python
async with create_mcp_http_client() as client:
response = await client.get("https://api.example.com")
```
With custom headers:
```python
headers = {"Authorization": "Bearer token"}
async with create_mcp_http_client(headers) as client:
response = await client.get("/endpoint")
```
With both custom headers and timeout:
```python
timeout = httpx.Timeout(60.0, read=300.0)
async with create_mcp_http_client(headers, timeout) as client:
response = await client.get("/long-request")
```
With authentication:
```python
from httpx import BasicAuth
auth = BasicAuth(username="user", password="pass")
async with create_mcp_http_client(headers, timeout, auth) as client:
response = await client.get("/protected-endpoint")
```
"""
# Set MCP defaults
kwargs: dict[str, Any] = {"follow_redirects": True}
# Handle timeout
if timeout is None:
kwargs["timeout"] = httpx.Timeout(MCP_DEFAULT_TIMEOUT, read=MCP_DEFAULT_SSE_READ_TIMEOUT)
else:
kwargs["timeout"] = timeout
# Handle headers
if headers is not None:
kwargs["headers"] = headers
# Handle authentication
if auth is not None: # pragma: no cover
kwargs["auth"] = auth
return httpx.AsyncClient(**kwargs)
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "src/mcp/shared/_httpx_utils.py",
"license": "MIT License",
"lines": 73,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
modelcontextprotocol/python-sdk:tests/shared/test_httpx_utils.py | """Tests for httpx utility functions."""
import httpx
from mcp.shared._httpx_utils import create_mcp_http_client
def test_default_settings():
"""Test that default settings are applied correctly."""
client = create_mcp_http_client()
assert client.follow_redirects is True
assert client.timeout.connect == 30.0
def test_custom_parameters():
"""Test custom headers and timeout are set correctly."""
headers = {"Authorization": "Bearer token"}
timeout = httpx.Timeout(60.0)
client = create_mcp_http_client(headers, timeout)
assert client.headers["Authorization"] == "Bearer token"
assert client.timeout.connect == 60.0
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "tests/shared/test_httpx_utils.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
modelcontextprotocol/python-sdk:src/mcp/server/streamable_http_manager.py | """StreamableHTTP Session Manager for MCP servers."""
from __future__ import annotations
import contextlib
import logging
from collections.abc import AsyncIterator
from http import HTTPStatus
from typing import TYPE_CHECKING, Any
from uuid import uuid4
import anyio
from anyio.abc import TaskStatus
from starlette.requests import Request
from starlette.responses import Response
from starlette.types import Receive, Scope, Send
from mcp.server.streamable_http import (
MCP_SESSION_ID_HEADER,
EventStore,
StreamableHTTPServerTransport,
)
from mcp.server.transport_security import TransportSecuritySettings
from mcp.types import INVALID_REQUEST, ErrorData, JSONRPCError
if TYPE_CHECKING:
from mcp.server.lowlevel.server import Server
logger = logging.getLogger(__name__)
class StreamableHTTPSessionManager:
"""Manages StreamableHTTP sessions with optional resumability via event store.
This class abstracts away the complexity of session management, event storage,
and request handling for StreamableHTTP transports. It handles:
1. Session tracking for clients
2. Resumability via an optional event store
3. Connection management and lifecycle
4. Request handling and transport setup
5. Idle session cleanup via optional timeout
Important: Only one StreamableHTTPSessionManager instance should be created
per application. The instance cannot be reused after its run() context has
completed. If you need to restart the manager, create a new instance.
Args:
app: The MCP server instance
event_store: Optional event store for resumability support. If provided, enables resumable connections
where clients can reconnect and receive missed events. If None, sessions are still tracked but not
resumable.
json_response: Whether to use JSON responses instead of SSE streams
stateless: If True, creates a completely fresh transport for each request with no session tracking or
state persistence between requests.
security_settings: Optional transport security settings.
retry_interval: Retry interval in milliseconds to suggest to clients in SSE retry field. Used for SSE
polling behavior.
session_idle_timeout: Optional idle timeout in seconds for stateful sessions. If set, sessions that
receive no HTTP requests for this duration will be automatically terminated and removed. When
retry_interval is also configured, ensure the idle timeout comfortably exceeds the retry interval to
avoid reaping sessions during normal SSE polling gaps. Default is None (no timeout). A value of 1800
(30 minutes) is recommended for most deployments.
"""
def __init__(
self,
app: Server[Any],
event_store: EventStore | None = None,
json_response: bool = False,
stateless: bool = False,
security_settings: TransportSecuritySettings | None = None,
retry_interval: int | None = None,
session_idle_timeout: float | None = None,
):
if session_idle_timeout is not None and session_idle_timeout <= 0:
raise ValueError("session_idle_timeout must be a positive number of seconds")
if stateless and session_idle_timeout is not None:
raise RuntimeError("session_idle_timeout is not supported in stateless mode")
self.app = app
self.event_store = event_store
self.json_response = json_response
self.stateless = stateless
self.security_settings = security_settings
self.retry_interval = retry_interval
self.session_idle_timeout = session_idle_timeout
# Session tracking (only used if not stateless)
self._session_creation_lock = anyio.Lock()
self._server_instances: dict[str, StreamableHTTPServerTransport] = {}
# The task group will be set during lifespan
self._task_group = None
# Thread-safe tracking of run() calls
self._run_lock = anyio.Lock()
self._has_started = False
@contextlib.asynccontextmanager
async def run(self) -> AsyncIterator[None]:
"""Run the session manager with proper lifecycle management.
This creates and manages the task group for all session operations.
Important: This method can only be called once per instance. The same
StreamableHTTPSessionManager instance cannot be reused after this
context manager exits. Create a new instance if you need to restart.
Use this in the lifespan context manager of your Starlette app:
@contextlib.asynccontextmanager
async def lifespan(app: Starlette) -> AsyncIterator[None]:
async with session_manager.run():
yield
"""
# Thread-safe check to ensure run() is only called once
async with self._run_lock:
if self._has_started:
raise RuntimeError(
"StreamableHTTPSessionManager .run() can only be called "
"once per instance. Create a new instance if you need to run again."
)
self._has_started = True
async with anyio.create_task_group() as tg:
# Store the task group for later use
self._task_group = tg
logger.info("StreamableHTTP session manager started")
try:
yield # Let the application run
finally:
logger.info("StreamableHTTP session manager shutting down")
# Cancel task group to stop all spawned tasks
tg.cancel_scope.cancel()
self._task_group = None
# Clear any remaining server instances
self._server_instances.clear()
async def handle_request(self, scope: Scope, receive: Receive, send: Send) -> None:
"""Process ASGI request with proper session handling and transport setup.
Dispatches to the appropriate handler based on stateless mode.
"""
if self._task_group is None:
raise RuntimeError("Task group is not initialized. Make sure to use run().")
# Dispatch to the appropriate handler
if self.stateless:
await self._handle_stateless_request(scope, receive, send)
else:
await self._handle_stateful_request(scope, receive, send)
async def _handle_stateless_request(self, scope: Scope, receive: Receive, send: Send) -> None:
"""Process request in stateless mode - creating a new transport for each request."""
logger.debug("Stateless mode: Creating new transport for this request")
# No session ID needed in stateless mode
http_transport = StreamableHTTPServerTransport(
mcp_session_id=None, # No session tracking in stateless mode
is_json_response_enabled=self.json_response,
event_store=None, # No event store in stateless mode
security_settings=self.security_settings,
)
# Start server in a new task
async def run_stateless_server(*, task_status: TaskStatus[None] = anyio.TASK_STATUS_IGNORED):
async with http_transport.connect() as streams:
read_stream, write_stream = streams
task_status.started()
try:
await self.app.run(
read_stream,
write_stream,
self.app.create_initialization_options(),
stateless=True,
)
except Exception: # pragma: no cover
logger.exception("Stateless session crashed")
# Assert task group is not None for type checking
assert self._task_group is not None
# Start the server task
await self._task_group.start(run_stateless_server)
# Handle the HTTP request and return the response
await http_transport.handle_request(scope, receive, send)
# Terminate the transport after the request is handled
await http_transport.terminate()
async def _handle_stateful_request(self, scope: Scope, receive: Receive, send: Send) -> None:
"""Process request in stateful mode - maintaining session state between requests."""
request = Request(scope, receive)
request_mcp_session_id = request.headers.get(MCP_SESSION_ID_HEADER)
# Existing session case
if request_mcp_session_id is not None and request_mcp_session_id in self._server_instances:
transport = self._server_instances[request_mcp_session_id]
logger.debug("Session already exists, handling request directly")
# Push back idle deadline on activity
if transport.idle_scope is not None and self.session_idle_timeout is not None:
transport.idle_scope.deadline = anyio.current_time() + self.session_idle_timeout # pragma: no cover
await transport.handle_request(scope, receive, send)
return
if request_mcp_session_id is None:
# New session case
logger.debug("Creating new transport")
async with self._session_creation_lock:
new_session_id = uuid4().hex
http_transport = StreamableHTTPServerTransport(
mcp_session_id=new_session_id,
is_json_response_enabled=self.json_response,
event_store=self.event_store, # May be None (no resumability)
security_settings=self.security_settings,
retry_interval=self.retry_interval,
)
assert http_transport.mcp_session_id is not None
self._server_instances[http_transport.mcp_session_id] = http_transport
logger.info(f"Created new transport with session ID: {new_session_id}")
# Define the server runner
async def run_server(*, task_status: TaskStatus[None] = anyio.TASK_STATUS_IGNORED) -> None:
async with http_transport.connect() as streams:
read_stream, write_stream = streams
task_status.started()
try:
# Use a cancel scope for idle timeout β when the
# deadline passes the scope cancels app.run() and
# execution continues after the ``with`` block.
# Incoming requests push the deadline forward.
idle_scope = anyio.CancelScope()
if self.session_idle_timeout is not None:
idle_scope.deadline = anyio.current_time() + self.session_idle_timeout
http_transport.idle_scope = idle_scope
with idle_scope:
await self.app.run(
read_stream,
write_stream,
self.app.create_initialization_options(),
stateless=False,
)
if idle_scope.cancelled_caught:
assert http_transport.mcp_session_id is not None
logger.info(f"Session {http_transport.mcp_session_id} idle timeout")
self._server_instances.pop(http_transport.mcp_session_id, None)
await http_transport.terminate()
except Exception:
logger.exception(f"Session {http_transport.mcp_session_id} crashed")
finally:
if ( # pragma: no branch
http_transport.mcp_session_id
and http_transport.mcp_session_id in self._server_instances
and not http_transport.is_terminated
):
logger.info(
"Cleaning up crashed session "
f"{http_transport.mcp_session_id} from active instances."
)
del self._server_instances[http_transport.mcp_session_id]
# Assert task group is not None for type checking
assert self._task_group is not None
# Start the server task
await self._task_group.start(run_server)
# Handle the HTTP request and return the response
await http_transport.handle_request(scope, receive, send)
else:
# Unknown or expired session ID - return 404 per MCP spec
# TODO: Align error code once spec clarifies
# See: https://github.com/modelcontextprotocol/python-sdk/issues/1821
logger.info(f"Rejected request with unknown or expired session ID: {request_mcp_session_id[:64]}")
error_response = JSONRPCError(
jsonrpc="2.0",
id=None,
error=ErrorData(code=INVALID_REQUEST, message="Session not found"),
)
response = Response(
content=error_response.model_dump_json(by_alias=True, exclude_unset=True),
status_code=HTTPStatus.NOT_FOUND,
media_type="application/json",
)
await response(scope, receive, send)
class StreamableHTTPASGIApp:
"""ASGI application for Streamable HTTP server transport."""
def __init__(self, session_manager: StreamableHTTPSessionManager):
self.session_manager = session_manager
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
await self.session_manager.handle_request(scope, receive, send)
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "src/mcp/server/streamable_http_manager.py",
"license": "MIT License",
"lines": 253,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
modelcontextprotocol/python-sdk:tests/server/test_streamable_http_manager.py | """Tests for StreamableHTTPSessionManager."""
import json
import logging
from typing import Any
from unittest.mock import AsyncMock, patch
import anyio
import httpx
import pytest
from starlette.types import Message
from mcp import Client
from mcp.client.streamable_http import streamable_http_client
from mcp.server import Server, ServerRequestContext, streamable_http_manager
from mcp.server.streamable_http import MCP_SESSION_ID_HEADER, StreamableHTTPServerTransport
from mcp.server.streamable_http_manager import StreamableHTTPSessionManager
from mcp.types import INVALID_REQUEST, ListToolsResult, PaginatedRequestParams
@pytest.mark.anyio
async def test_run_can_only_be_called_once():
"""Test that run() can only be called once per instance."""
app = Server("test-server")
manager = StreamableHTTPSessionManager(app=app)
# First call should succeed
async with manager.run():
pass
# Second call should raise RuntimeError
with pytest.raises(RuntimeError) as excinfo:
async with manager.run():
pass # pragma: no cover
assert "StreamableHTTPSessionManager .run() can only be called once per instance" in str(excinfo.value)
@pytest.mark.anyio
async def test_run_prevents_concurrent_calls():
"""Test that concurrent calls to run() are prevented."""
app = Server("test-server")
manager = StreamableHTTPSessionManager(app=app)
errors: list[Exception] = []
async def try_run():
try:
async with manager.run():
# Simulate some work
await anyio.sleep(0.1)
except RuntimeError as e:
errors.append(e)
# Try to run concurrently
async with anyio.create_task_group() as tg:
tg.start_soon(try_run)
tg.start_soon(try_run)
# One should succeed, one should fail
assert len(errors) == 1
assert "StreamableHTTPSessionManager .run() can only be called once per instance" in str(errors[0])
@pytest.mark.anyio
async def test_handle_request_without_run_raises_error():
"""Test that handle_request raises error if run() hasn't been called."""
app = Server("test-server")
manager = StreamableHTTPSessionManager(app=app)
# Mock ASGI parameters
scope = {"type": "http", "method": "POST", "path": "/test"}
async def receive(): # pragma: no cover
return {"type": "http.request", "body": b""}
async def send(message: Message): # pragma: no cover
pass
# Should raise error because run() hasn't been called
with pytest.raises(RuntimeError) as excinfo:
await manager.handle_request(scope, receive, send)
assert "Task group is not initialized. Make sure to use run()." in str(excinfo.value)
class TestException(Exception):
__test__ = False # Prevent pytest from collecting this as a test class
pass
@pytest.fixture
async def running_manager():
app = Server("test-cleanup-server")
# It's important that the app instance used by the manager is the one we can patch
manager = StreamableHTTPSessionManager(app=app)
async with manager.run():
# Patch app.run here if it's simpler, or patch it within the test
yield manager, app
@pytest.mark.anyio
async def test_stateful_session_cleanup_on_graceful_exit(running_manager: tuple[StreamableHTTPSessionManager, Server]):
manager, app = running_manager
mock_mcp_run = AsyncMock(return_value=None)
# This will be called by StreamableHTTPSessionManager's run_server -> self.app.run
app.run = mock_mcp_run
sent_messages: list[Message] = []
async def mock_send(message: Message):
sent_messages.append(message)
scope = {
"type": "http",
"method": "POST",
"path": "/mcp",
"headers": [(b"content-type", b"application/json")],
}
async def mock_receive(): # pragma: no cover
return {"type": "http.request", "body": b"", "more_body": False}
# Trigger session creation
await manager.handle_request(scope, mock_receive, mock_send)
# Extract session ID from response headers
session_id = None
for msg in sent_messages: # pragma: no branch
if msg["type"] == "http.response.start": # pragma: no branch
for header_name, header_value in msg.get("headers", []): # pragma: no branch
if header_name.decode().lower() == MCP_SESSION_ID_HEADER.lower():
session_id = header_value.decode()
break
if session_id: # Break outer loop if session_id is found # pragma: no branch
break
assert session_id is not None, "Session ID not found in response headers"
# Ensure MCPServer.run was called
mock_mcp_run.assert_called_once()
# At this point, mock_mcp_run has completed, and the finally block in
# StreamableHTTPSessionManager's run_server should have executed.
# To ensure the task spawned by handle_request finishes and cleanup occurs:
# Give other tasks a chance to run. This is important for the finally block.
await anyio.sleep(0.01)
assert session_id not in manager._server_instances, (
"Session ID should be removed from _server_instances after graceful exit"
)
assert not manager._server_instances, "No sessions should be tracked after the only session exits gracefully"
@pytest.mark.anyio
async def test_stateful_session_cleanup_on_exception(running_manager: tuple[StreamableHTTPSessionManager, Server]):
manager, app = running_manager
mock_mcp_run = AsyncMock(side_effect=TestException("Simulated crash"))
app.run = mock_mcp_run
sent_messages: list[Message] = []
async def mock_send(message: Message):
sent_messages.append(message)
# If an exception occurs, the transport might try to send an error response
# For this test, we mostly care that the session is established enough
# to get an ID
if message["type"] == "http.response.start" and message["status"] >= 500: # pragma: no cover
pass # Expected if TestException propagates that far up the transport
scope = {
"type": "http",
"method": "POST",
"path": "/mcp",
"headers": [(b"content-type", b"application/json")],
}
async def mock_receive(): # pragma: no cover
return {"type": "http.request", "body": b"", "more_body": False}
# Trigger session creation
await manager.handle_request(scope, mock_receive, mock_send)
session_id = None
for msg in sent_messages: # pragma: no branch
if msg["type"] == "http.response.start": # pragma: no branch
for header_name, header_value in msg.get("headers", []): # pragma: no branch
if header_name.decode().lower() == MCP_SESSION_ID_HEADER.lower():
session_id = header_value.decode()
break
if session_id: # Break outer loop if session_id is found # pragma: no branch
break
assert session_id is not None, "Session ID not found in response headers"
mock_mcp_run.assert_called_once()
# Give other tasks a chance to run to ensure the finally block executes
await anyio.sleep(0.01)
assert session_id not in manager._server_instances, (
"Session ID should be removed from _server_instances after an exception"
)
assert not manager._server_instances, "No sessions should be tracked after the only session crashes"
@pytest.mark.anyio
async def test_stateless_requests_memory_cleanup():
"""Test that stateless requests actually clean up resources using real transports."""
app = Server("test-stateless-real-cleanup")
manager = StreamableHTTPSessionManager(app=app, stateless=True)
# Track created transport instances
created_transports: list[StreamableHTTPServerTransport] = []
# Patch StreamableHTTPServerTransport constructor to track instances
original_constructor = StreamableHTTPServerTransport
def track_transport(*args: Any, **kwargs: Any) -> StreamableHTTPServerTransport:
transport = original_constructor(*args, **kwargs)
created_transports.append(transport)
return transport
with patch.object(streamable_http_manager, "StreamableHTTPServerTransport", side_effect=track_transport):
async with manager.run():
# Mock app.run to complete immediately
app.run = AsyncMock(return_value=None)
# Send a simple request
sent_messages: list[Message] = []
async def mock_send(message: Message):
sent_messages.append(message)
scope = {
"type": "http",
"method": "POST",
"path": "/mcp",
"headers": [
(b"content-type", b"application/json"),
(b"accept", b"application/json, text/event-stream"),
],
}
# Empty body to trigger early return
async def mock_receive():
return {
"type": "http.request",
"body": b"",
"more_body": False,
}
# Send a request
await manager.handle_request(scope, mock_receive, mock_send)
# Verify transport was created
assert len(created_transports) == 1, "Should have created one transport"
transport = created_transports[0]
# The key assertion - transport should be terminated
assert transport._terminated, "Transport should be terminated after stateless request"
# Verify internal state is cleaned up
assert len(transport._request_streams) == 0, "Transport should have no active request streams"
@pytest.mark.anyio
async def test_unknown_session_id_returns_404(caplog: pytest.LogCaptureFixture):
"""Test that requests with unknown session IDs return HTTP 404 per MCP spec."""
app = Server("test-unknown-session")
manager = StreamableHTTPSessionManager(app=app)
async with manager.run():
sent_messages: list[Message] = []
response_body = b""
async def mock_send(message: Message):
nonlocal response_body
sent_messages.append(message)
if message["type"] == "http.response.body":
response_body += message.get("body", b"")
# Request with a non-existent session ID
scope = {
"type": "http",
"method": "POST",
"path": "/mcp",
"headers": [
(b"content-type", b"application/json"),
(b"accept", b"application/json, text/event-stream"),
(b"mcp-session-id", b"non-existent-session-id"),
],
}
async def mock_receive():
return {"type": "http.request", "body": b"{}", "more_body": False} # pragma: no cover
with caplog.at_level(logging.INFO):
await manager.handle_request(scope, mock_receive, mock_send)
# Find the response start message
response_start = next(
(msg for msg in sent_messages if msg["type"] == "http.response.start"),
None,
)
assert response_start is not None, "Should have sent a response"
assert response_start["status"] == 404, "Should return HTTP 404 for unknown session ID"
# Verify JSON-RPC error format
error_data = json.loads(response_body)
assert error_data["jsonrpc"] == "2.0"
assert error_data["id"] is None
assert error_data["error"]["code"] == INVALID_REQUEST
assert error_data["error"]["message"] == "Session not found"
assert "Rejected request with unknown or expired session ID: non-existent-session-id" in caplog.text
@pytest.mark.anyio
async def test_e2e_streamable_http_server_cleanup():
host = "testserver"
async def handle_list_tools(ctx: ServerRequestContext, params: PaginatedRequestParams | None) -> ListToolsResult:
return ListToolsResult(tools=[])
app = Server("test-server", on_list_tools=handle_list_tools)
mcp_app = app.streamable_http_app(host=host)
async with (
mcp_app.router.lifespan_context(mcp_app),
httpx.ASGITransport(mcp_app) as transport,
httpx.AsyncClient(transport=transport) as http_client,
Client(streamable_http_client(f"http://{host}/mcp", http_client=http_client)) as client,
):
await client.list_tools()
@pytest.mark.anyio
async def test_idle_session_is_reaped():
"""After idle timeout fires, the session returns 404."""
app = Server("test-idle-reap")
manager = StreamableHTTPSessionManager(app=app, session_idle_timeout=0.05)
async with manager.run():
sent_messages: list[Message] = []
async def mock_send(message: Message):
sent_messages.append(message)
scope = {
"type": "http",
"method": "POST",
"path": "/mcp",
"headers": [(b"content-type", b"application/json")],
}
async def mock_receive(): # pragma: no cover
return {"type": "http.request", "body": b"", "more_body": False}
await manager.handle_request(scope, mock_receive, mock_send)
session_id = None
for msg in sent_messages: # pragma: no branch
if msg["type"] == "http.response.start": # pragma: no branch
for header_name, header_value in msg.get("headers", []): # pragma: no branch
if header_name.decode().lower() == MCP_SESSION_ID_HEADER.lower():
session_id = header_value.decode()
break
if session_id: # pragma: no branch
break
assert session_id is not None, "Session ID not found in response headers"
# Wait for the 50ms idle timeout to fire and cleanup to complete
await anyio.sleep(0.1)
# Verify via public API: old session ID now returns 404
response_messages: list[Message] = []
async def capture_send(message: Message):
response_messages.append(message)
scope_with_session = {
"type": "http",
"method": "POST",
"path": "/mcp",
"headers": [
(b"content-type", b"application/json"),
(b"mcp-session-id", session_id.encode()),
],
}
await manager.handle_request(scope_with_session, mock_receive, capture_send)
response_start = next(
(msg for msg in response_messages if msg["type"] == "http.response.start"),
None,
)
assert response_start is not None
assert response_start["status"] == 404
def test_session_idle_timeout_rejects_non_positive():
with pytest.raises(ValueError, match="positive number"):
StreamableHTTPSessionManager(app=Server("test"), session_idle_timeout=-1)
with pytest.raises(ValueError, match="positive number"):
StreamableHTTPSessionManager(app=Server("test"), session_idle_timeout=0)
def test_session_idle_timeout_rejects_stateless():
with pytest.raises(RuntimeError, match="not supported in stateless"):
StreamableHTTPSessionManager(app=Server("test"), session_idle_timeout=30, stateless=True)
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "tests/server/test_streamable_http_manager.py",
"license": "MIT License",
"lines": 315,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
modelcontextprotocol/python-sdk:examples/servers/simple-auth/mcp_simple_auth/server.py | """MCP Resource Server with Token Introspection.
This server validates tokens via Authorization Server introspection and serves MCP resources.
Demonstrates RFC 9728 Protected Resource Metadata for AS/RS separation.
NOTE: this is a simplified example for demonstration purposes.
This is not a production-ready implementation.
"""
import datetime
import logging
from typing import Any, Literal
import click
from pydantic import AnyHttpUrl
from pydantic_settings import BaseSettings, SettingsConfigDict
from mcp.server.auth.settings import AuthSettings
from mcp.server.mcpserver.server import MCPServer
from .token_verifier import IntrospectionTokenVerifier
logger = logging.getLogger(__name__)
class ResourceServerSettings(BaseSettings):
"""Settings for the MCP Resource Server."""
model_config = SettingsConfigDict(env_prefix="MCP_RESOURCE_")
# Server settings
host: str = "localhost"
port: int = 8001
server_url: AnyHttpUrl = AnyHttpUrl("http://localhost:8001/mcp")
# Authorization Server settings
auth_server_url: AnyHttpUrl = AnyHttpUrl("http://localhost:9000")
auth_server_introspection_endpoint: str = "http://localhost:9000/introspect"
# No user endpoint needed - we get user data from token introspection
# MCP settings
mcp_scope: str = "user"
# RFC 8707 resource validation
oauth_strict: bool = False
def create_resource_server(settings: ResourceServerSettings) -> MCPServer:
"""Create MCP Resource Server with token introspection.
This server:
1. Provides protected resource metadata (RFC 9728)
2. Validates tokens via Authorization Server introspection
3. Serves MCP tools and resources
"""
# Create token verifier for introspection with RFC 8707 resource validation
token_verifier = IntrospectionTokenVerifier(
introspection_endpoint=settings.auth_server_introspection_endpoint,
server_url=str(settings.server_url),
validate_resource=settings.oauth_strict, # Only validate when --oauth-strict is set
)
# Create MCPServer server as a Resource Server
app = MCPServer(
name="MCP Resource Server",
instructions="Resource Server that validates tokens via Authorization Server introspection",
debug=True,
# Auth configuration for RS mode
token_verifier=token_verifier,
auth=AuthSettings(
issuer_url=settings.auth_server_url,
required_scopes=[settings.mcp_scope],
resource_server_url=settings.server_url,
),
)
# Store settings for later use in run()
app._resource_server_settings = settings # type: ignore[attr-defined]
@app.tool()
async def get_time() -> dict[str, Any]:
"""Get the current server time.
This tool demonstrates that system information can be protected
by OAuth authentication. User must be authenticated to access it.
"""
now = datetime.datetime.now()
return {
"current_time": now.isoformat(),
"timezone": "UTC", # Simplified for demo
"timestamp": now.timestamp(),
"formatted": now.strftime("%Y-%m-%d %H:%M:%S"),
}
return app
@click.command()
@click.option("--port", default=8001, help="Port to listen on")
@click.option("--auth-server", default="http://localhost:9000", help="Authorization Server URL")
@click.option(
"--transport",
default="streamable-http",
type=click.Choice(["sse", "streamable-http"]),
help="Transport protocol to use ('sse' or 'streamable-http')",
)
@click.option(
"--oauth-strict",
is_flag=True,
help="Enable RFC 8707 resource validation",
)
def main(port: int, auth_server: str, transport: Literal["sse", "streamable-http"], oauth_strict: bool) -> int:
"""Run the MCP Resource Server.
This server:
- Provides RFC 9728 Protected Resource Metadata
- Validates tokens via Authorization Server introspection
- Serves MCP tools requiring authentication
Must be used with a running Authorization Server.
"""
logging.basicConfig(level=logging.INFO)
try:
# Parse auth server URL
auth_server_url = AnyHttpUrl(auth_server)
# Create settings
host = "localhost"
server_url = f"http://{host}:{port}/mcp"
settings = ResourceServerSettings(
host=host,
port=port,
server_url=AnyHttpUrl(server_url),
auth_server_url=auth_server_url,
auth_server_introspection_endpoint=f"{auth_server}/introspect",
oauth_strict=oauth_strict,
)
except ValueError as e:
logger.error(f"Configuration error: {e}")
logger.error("Make sure to provide a valid Authorization Server URL")
return 1
try:
mcp_server = create_resource_server(settings)
logger.info(f"π MCP Resource Server running on {settings.server_url}")
logger.info(f"π Using Authorization Server: {settings.auth_server_url}")
# Run the server - this should block and keep running
mcp_server.run(transport=transport, host=host, port=port)
logger.info("Server stopped")
return 0
except Exception:
logger.exception("Server error")
return 1
if __name__ == "__main__":
main() # type: ignore[call-arg]
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "examples/servers/simple-auth/mcp_simple_auth/server.py",
"license": "MIT License",
"lines": 127,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
modelcontextprotocol/python-sdk:src/mcp/shared/message.py | """Message wrapper with metadata support.
This module defines a wrapper type that combines JSONRPCMessage with metadata
to support transport-specific features like resumability.
"""
from collections.abc import Awaitable, Callable
from dataclasses import dataclass
from typing import Any
from mcp.types import JSONRPCMessage, RequestId
ResumptionToken = str
ResumptionTokenUpdateCallback = Callable[[ResumptionToken], Awaitable[None]]
# Callback type for closing SSE streams without terminating
CloseSSEStreamCallback = Callable[[], Awaitable[None]]
@dataclass
class ClientMessageMetadata:
"""Metadata specific to client messages."""
resumption_token: ResumptionToken | None = None
on_resumption_token_update: Callable[[ResumptionToken], Awaitable[None]] | None = None
@dataclass
class ServerMessageMetadata:
"""Metadata specific to server messages."""
related_request_id: RequestId | None = None
# Transport-specific request context (e.g. starlette Request for HTTP
# transports, None for stdio). Typed as Any because the server layer is
# transport-agnostic.
request_context: Any = None
# Callback to close SSE stream for the current request without terminating
close_sse_stream: CloseSSEStreamCallback | None = None
# Callback to close the standalone GET SSE stream (for unsolicited notifications)
close_standalone_sse_stream: CloseSSEStreamCallback | None = None
MessageMetadata = ClientMessageMetadata | ServerMessageMetadata | None
@dataclass
class SessionMessage:
"""A message with specific metadata for transport-specific features."""
message: JSONRPCMessage
metadata: MessageMetadata = None
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "src/mcp/shared/message.py",
"license": "MIT License",
"lines": 35,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
modelcontextprotocol/python-sdk:examples/servers/simple-streamablehttp/mcp_simple_streamablehttp/event_store.py | """In-memory event store for demonstrating resumability functionality.
This is a simple implementation intended for examples and testing,
not for production use where a persistent storage solution would be more appropriate.
"""
import logging
from collections import deque
from dataclasses import dataclass
from uuid import uuid4
from mcp.server.streamable_http import EventCallback, EventId, EventMessage, EventStore, StreamId
from mcp.types import JSONRPCMessage
logger = logging.getLogger(__name__)
@dataclass
class EventEntry:
"""Represents an event entry in the event store."""
event_id: EventId
stream_id: StreamId
message: JSONRPCMessage | None
class InMemoryEventStore(EventStore):
"""Simple in-memory implementation of the EventStore interface for resumability.
This is primarily intended for examples and testing, not for production use
where a persistent storage solution would be more appropriate.
This implementation keeps only the last N events per stream for memory efficiency.
"""
def __init__(self, max_events_per_stream: int = 100):
"""Initialize the event store.
Args:
max_events_per_stream: Maximum number of events to keep per stream
"""
self.max_events_per_stream = max_events_per_stream
# for maintaining last N events per stream
self.streams: dict[StreamId, deque[EventEntry]] = {}
# event_id -> EventEntry for quick lookup
self.event_index: dict[EventId, EventEntry] = {}
async def store_event(self, stream_id: StreamId, message: JSONRPCMessage | None) -> EventId:
"""Stores an event with a generated event ID."""
event_id = str(uuid4())
event_entry = EventEntry(event_id=event_id, stream_id=stream_id, message=message)
# Get or create deque for this stream
if stream_id not in self.streams:
self.streams[stream_id] = deque(maxlen=self.max_events_per_stream)
# If deque is full, the oldest event will be automatically removed
# We need to remove it from the event_index as well
if len(self.streams[stream_id]) == self.max_events_per_stream:
oldest_event = self.streams[stream_id][0]
self.event_index.pop(oldest_event.event_id, None)
# Add new event
self.streams[stream_id].append(event_entry)
self.event_index[event_id] = event_entry
return event_id
async def replay_events_after(
self,
last_event_id: EventId,
send_callback: EventCallback,
) -> StreamId | None:
"""Replays events that occurred after the specified event ID."""
if last_event_id not in self.event_index:
logger.warning(f"Event ID {last_event_id} not found in store")
return None
# Get the stream and find events after the last one
last_event = self.event_index[last_event_id]
stream_id = last_event.stream_id
stream_events = self.streams.get(last_event.stream_id, deque())
# Events in deque are already in chronological order
found_last = False
for event in stream_events:
if found_last:
# Skip priming events (None message)
if event.message is not None:
await send_callback(EventMessage(event.message, event.event_id))
elif event.event_id == last_event_id:
found_last = True
return stream_id
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "examples/servers/simple-streamablehttp/mcp_simple_streamablehttp/event_store.py",
"license": "MIT License",
"lines": 72,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
modelcontextprotocol/python-sdk:src/mcp/client/streamable_http.py | """Implements StreamableHTTP transport for MCP clients."""
from __future__ import annotations as _annotations
import contextlib
import logging
from collections.abc import AsyncGenerator, Awaitable, Callable
from contextlib import asynccontextmanager
from dataclasses import dataclass
import anyio
import httpx
from anyio.abc import TaskGroup
from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream
from httpx_sse import EventSource, ServerSentEvent, aconnect_sse
from pydantic import ValidationError
from mcp.client._transport import TransportStreams
from mcp.shared._httpx_utils import create_mcp_http_client
from mcp.shared.message import ClientMessageMetadata, SessionMessage
from mcp.types import (
INTERNAL_ERROR,
INVALID_REQUEST,
PARSE_ERROR,
ErrorData,
InitializeResult,
JSONRPCError,
JSONRPCMessage,
JSONRPCNotification,
JSONRPCRequest,
JSONRPCResponse,
RequestId,
jsonrpc_message_adapter,
)
logger = logging.getLogger(__name__)
# TODO(Marcelo): Put the TransportStreams in a module under shared, so we can import here.
SessionMessageOrError = SessionMessage | Exception
StreamWriter = MemoryObjectSendStream[SessionMessageOrError]
StreamReader = MemoryObjectReceiveStream[SessionMessage]
MCP_SESSION_ID = "mcp-session-id"
MCP_PROTOCOL_VERSION = "mcp-protocol-version"
LAST_EVENT_ID = "last-event-id"
# Reconnection defaults
DEFAULT_RECONNECTION_DELAY_MS = 1000 # 1 second fallback when server doesn't provide retry
MAX_RECONNECTION_ATTEMPTS = 2 # Max retry attempts before giving up
class StreamableHTTPError(Exception):
"""Base exception for StreamableHTTP transport errors."""
class ResumptionError(StreamableHTTPError):
"""Raised when resumption request is invalid."""
@dataclass
class RequestContext:
"""Context for a request operation."""
client: httpx.AsyncClient
session_id: str | None
session_message: SessionMessage
metadata: ClientMessageMetadata | None
read_stream_writer: StreamWriter
class StreamableHTTPTransport:
"""StreamableHTTP client transport implementation."""
def __init__(self, url: str) -> None:
"""Initialize the StreamableHTTP transport.
Args:
url: The endpoint URL.
"""
self.url = url
self.session_id: str | None = None
self.protocol_version: str | None = None
def _prepare_headers(self) -> dict[str, str]:
"""Build MCP-specific request headers.
These headers will be merged with the httpx.AsyncClient's default headers,
with these MCP-specific headers taking precedence.
"""
headers: dict[str, str] = {
"accept": "application/json, text/event-stream",
"content-type": "application/json",
}
# Add session headers if available
if self.session_id:
headers[MCP_SESSION_ID] = self.session_id
if self.protocol_version:
headers[MCP_PROTOCOL_VERSION] = self.protocol_version
return headers
def _is_initialization_request(self, message: JSONRPCMessage) -> bool:
"""Check if the message is an initialization request."""
return isinstance(message, JSONRPCRequest) and message.method == "initialize"
def _is_initialized_notification(self, message: JSONRPCMessage) -> bool:
"""Check if the message is an initialized notification."""
return isinstance(message, JSONRPCNotification) and message.method == "notifications/initialized"
def _maybe_extract_session_id_from_response(self, response: httpx.Response) -> None:
"""Extract and store session ID from response headers."""
new_session_id = response.headers.get(MCP_SESSION_ID)
if new_session_id:
self.session_id = new_session_id
logger.info(f"Received session ID: {self.session_id}")
def _maybe_extract_protocol_version_from_message(self, message: JSONRPCMessage) -> None:
"""Extract protocol version from initialization response message."""
if isinstance(message, JSONRPCResponse) and message.result: # pragma: no branch
try:
# Parse the result as InitializeResult for type safety
init_result = InitializeResult.model_validate(message.result, by_name=False)
self.protocol_version = str(init_result.protocol_version)
logger.info(f"Negotiated protocol version: {self.protocol_version}")
except Exception: # pragma: no cover
logger.warning("Failed to parse initialization response as InitializeResult", exc_info=True)
logger.warning(f"Raw result: {message.result}")
async def _handle_sse_event(
self,
sse: ServerSentEvent,
read_stream_writer: StreamWriter,
original_request_id: RequestId | None = None,
resumption_callback: Callable[[str], Awaitable[None]] | None = None,
is_initialization: bool = False,
) -> bool:
"""Handle an SSE event, returning True if the response is complete."""
if sse.event == "message":
# Handle priming events (empty data with ID) for resumability
if not sse.data:
# Call resumption callback for priming events that have an ID
if sse.id and resumption_callback:
await resumption_callback(sse.id)
return False
try:
message = jsonrpc_message_adapter.validate_json(sse.data, by_name=False)
logger.debug(f"SSE message: {message}")
# Extract protocol version from initialization response
if is_initialization:
self._maybe_extract_protocol_version_from_message(message)
# If this is a response and we have original_request_id, replace it
if original_request_id is not None and isinstance(message, JSONRPCResponse | JSONRPCError):
message.id = original_request_id
session_message = SessionMessage(message)
await read_stream_writer.send(session_message)
# Call resumption token callback if we have an ID
if sse.id and resumption_callback:
await resumption_callback(sse.id)
# If this is a response or error return True indicating completion
# Otherwise, return False to continue listening
return isinstance(message, JSONRPCResponse | JSONRPCError)
except Exception as exc: # pragma: no cover
logger.exception("Error parsing SSE message")
if original_request_id is not None:
error_data = ErrorData(code=PARSE_ERROR, message=f"Failed to parse SSE message: {exc}")
error_msg = SessionMessage(JSONRPCError(jsonrpc="2.0", id=original_request_id, error=error_data))
await read_stream_writer.send(error_msg)
return True
await read_stream_writer.send(exc)
return False
else: # pragma: no cover
logger.warning(f"Unknown SSE event: {sse.event}")
return False
async def handle_get_stream(self, client: httpx.AsyncClient, read_stream_writer: StreamWriter) -> None:
"""Handle GET stream for server-initiated messages with auto-reconnect."""
last_event_id: str | None = None
retry_interval_ms: int | None = None
attempt: int = 0
while attempt < MAX_RECONNECTION_ATTEMPTS: # pragma: no branch
try:
if not self.session_id:
return
headers = self._prepare_headers()
if last_event_id:
headers[LAST_EVENT_ID] = last_event_id
async with aconnect_sse(client, "GET", self.url, headers=headers) as event_source:
event_source.response.raise_for_status()
logger.debug("GET SSE connection established")
async for sse in event_source.aiter_sse():
# Track last event ID for reconnection
if sse.id:
last_event_id = sse.id
# Track retry interval from server
if sse.retry is not None:
retry_interval_ms = sse.retry
await self._handle_sse_event(sse, read_stream_writer)
# Stream ended normally (server closed) - reset attempt counter
attempt = 0
except Exception: # pragma: lax no cover
logger.debug("GET stream error", exc_info=True)
attempt += 1
if attempt >= MAX_RECONNECTION_ATTEMPTS: # pragma: no cover
logger.debug(f"GET stream max reconnection attempts ({MAX_RECONNECTION_ATTEMPTS}) exceeded")
return
# Wait before reconnecting
delay_ms = retry_interval_ms if retry_interval_ms is not None else DEFAULT_RECONNECTION_DELAY_MS
logger.info(f"GET stream disconnected, reconnecting in {delay_ms}ms...")
await anyio.sleep(delay_ms / 1000.0)
async def _handle_resumption_request(self, ctx: RequestContext) -> None:
"""Handle a resumption request using GET with SSE."""
headers = self._prepare_headers()
if ctx.metadata and ctx.metadata.resumption_token:
headers[LAST_EVENT_ID] = ctx.metadata.resumption_token
else:
raise ResumptionError("Resumption request requires a resumption token") # pragma: no cover
# Extract original request ID to map responses
original_request_id = None
if isinstance(ctx.session_message.message, JSONRPCRequest): # pragma: no branch
original_request_id = ctx.session_message.message.id
async with aconnect_sse(ctx.client, "GET", self.url, headers=headers) as event_source:
event_source.response.raise_for_status()
logger.debug("Resumption GET SSE connection established")
async for sse in event_source.aiter_sse(): # pragma: no branch
is_complete = await self._handle_sse_event(
sse,
ctx.read_stream_writer,
original_request_id,
ctx.metadata.on_resumption_token_update if ctx.metadata else None,
)
if is_complete:
await event_source.response.aclose()
break
async def _handle_post_request(self, ctx: RequestContext) -> None:
"""Handle a POST request with response processing."""
headers = self._prepare_headers()
message = ctx.session_message.message
is_initialization = self._is_initialization_request(message)
async with ctx.client.stream(
"POST",
self.url,
json=message.model_dump(by_alias=True, mode="json", exclude_unset=True),
headers=headers,
) as response:
if response.status_code == 202:
logger.debug("Received 202 Accepted")
return
if response.status_code == 404: # pragma: no branch
if isinstance(message, JSONRPCRequest): # pragma: no branch
error_data = ErrorData(code=INVALID_REQUEST, message="Session terminated")
session_message = SessionMessage(JSONRPCError(jsonrpc="2.0", id=message.id, error=error_data))
await ctx.read_stream_writer.send(session_message)
return
if response.status_code >= 400:
if isinstance(message, JSONRPCRequest):
error_data = ErrorData(code=INTERNAL_ERROR, message="Server returned an error response")
session_message = SessionMessage(JSONRPCError(jsonrpc="2.0", id=message.id, error=error_data))
await ctx.read_stream_writer.send(session_message)
return
if is_initialization:
self._maybe_extract_session_id_from_response(response)
# Per https://modelcontextprotocol.io/specification/2025-06-18/basic#notifications:
# The server MUST NOT send a response to notifications.
if isinstance(message, JSONRPCRequest):
content_type = response.headers.get("content-type", "").lower()
if content_type.startswith("application/json"):
await self._handle_json_response(
response, ctx.read_stream_writer, is_initialization, request_id=message.id
)
elif content_type.startswith("text/event-stream"):
await self._handle_sse_response(response, ctx, is_initialization)
else:
logger.error(f"Unexpected content type: {content_type}")
error_data = ErrorData(code=INVALID_REQUEST, message=f"Unexpected content type: {content_type}")
error_msg = SessionMessage(JSONRPCError(jsonrpc="2.0", id=message.id, error=error_data))
await ctx.read_stream_writer.send(error_msg)
async def _handle_json_response(
self,
response: httpx.Response,
read_stream_writer: StreamWriter,
is_initialization: bool = False,
*,
request_id: RequestId,
) -> None:
"""Handle JSON response from the server."""
try:
content = await response.aread()
message = jsonrpc_message_adapter.validate_json(content, by_name=False)
# Extract protocol version from initialization response
if is_initialization:
self._maybe_extract_protocol_version_from_message(message)
session_message = SessionMessage(message)
await read_stream_writer.send(session_message)
except (httpx.StreamError, ValidationError) as exc:
logger.exception("Error parsing JSON response")
error_data = ErrorData(code=PARSE_ERROR, message=f"Failed to parse JSON response: {exc}")
error_msg = SessionMessage(JSONRPCError(jsonrpc="2.0", id=request_id, error=error_data))
await read_stream_writer.send(error_msg)
async def _handle_sse_response(
self,
response: httpx.Response,
ctx: RequestContext,
is_initialization: bool = False,
) -> None:
"""Handle SSE response from the server."""
last_event_id: str | None = None
retry_interval_ms: int | None = None
# The caller (_handle_post_request) only reaches here inside
# isinstance(message, JSONRPCRequest), so this is always a JSONRPCRequest.
assert isinstance(ctx.session_message.message, JSONRPCRequest)
original_request_id = ctx.session_message.message.id
try:
event_source = EventSource(response)
async for sse in event_source.aiter_sse(): # pragma: no branch
# Track last event ID for potential reconnection
if sse.id:
last_event_id = sse.id
# Track retry interval from server
if sse.retry is not None:
retry_interval_ms = sse.retry
is_complete = await self._handle_sse_event(
sse,
ctx.read_stream_writer,
original_request_id=original_request_id,
resumption_callback=(ctx.metadata.on_resumption_token_update if ctx.metadata else None),
is_initialization=is_initialization,
)
# If the SSE event indicates completion, like returning response/error
# break the loop
if is_complete:
await response.aclose()
return # Normal completion, no reconnect needed
except Exception:
logger.debug("SSE stream ended", exc_info=True) # pragma: no cover
# Stream ended without response - reconnect if we received an event with ID
if last_event_id is not None: # pragma: no branch
logger.info("SSE stream disconnected, reconnecting...")
await self._handle_reconnection(ctx, last_event_id, retry_interval_ms)
async def _handle_reconnection(
self,
ctx: RequestContext,
last_event_id: str,
retry_interval_ms: int | None = None,
attempt: int = 0,
) -> None:
"""Reconnect with Last-Event-ID to resume stream after server disconnect."""
# Bail if max retries exceeded
if attempt >= MAX_RECONNECTION_ATTEMPTS: # pragma: no cover
logger.debug(f"Max reconnection attempts ({MAX_RECONNECTION_ATTEMPTS}) exceeded")
return
# Always wait - use server value or default
delay_ms = retry_interval_ms if retry_interval_ms is not None else DEFAULT_RECONNECTION_DELAY_MS
await anyio.sleep(delay_ms / 1000.0)
headers = self._prepare_headers()
headers[LAST_EVENT_ID] = last_event_id
# Extract original request ID to map responses
original_request_id = None
if isinstance(ctx.session_message.message, JSONRPCRequest): # pragma: no branch
original_request_id = ctx.session_message.message.id
try:
async with aconnect_sse(ctx.client, "GET", self.url, headers=headers) as event_source:
event_source.response.raise_for_status()
logger.info("Reconnected to SSE stream")
# Track for potential further reconnection
reconnect_last_event_id: str = last_event_id
reconnect_retry_ms = retry_interval_ms
async for sse in event_source.aiter_sse():
if sse.id: # pragma: no branch
reconnect_last_event_id = sse.id
if sse.retry is not None:
reconnect_retry_ms = sse.retry
is_complete = await self._handle_sse_event(
sse,
ctx.read_stream_writer,
original_request_id,
ctx.metadata.on_resumption_token_update if ctx.metadata else None,
)
if is_complete:
await event_source.response.aclose()
return
# Stream ended again without response - reconnect again (reset attempt counter)
logger.info("SSE stream disconnected, reconnecting...")
await self._handle_reconnection(ctx, reconnect_last_event_id, reconnect_retry_ms, 0)
except Exception as e: # pragma: no cover
logger.debug(f"Reconnection failed: {e}")
# Try to reconnect again if we still have an event ID
await self._handle_reconnection(ctx, last_event_id, retry_interval_ms, attempt + 1)
async def post_writer(
self,
client: httpx.AsyncClient,
write_stream_reader: StreamReader,
read_stream_writer: StreamWriter,
write_stream: MemoryObjectSendStream[SessionMessage],
start_get_stream: Callable[[], None],
tg: TaskGroup,
) -> None:
"""Handle writing requests to the server."""
try:
async with write_stream_reader:
async for session_message in write_stream_reader:
message = session_message.message
metadata = (
session_message.metadata
if isinstance(session_message.metadata, ClientMessageMetadata)
else None
)
# Check if this is a resumption request
is_resumption = bool(metadata and metadata.resumption_token)
logger.debug(f"Sending client message: {message}")
# Handle initialized notification
if self._is_initialized_notification(message):
start_get_stream()
ctx = RequestContext(
client=client,
session_id=self.session_id,
session_message=session_message,
metadata=metadata,
read_stream_writer=read_stream_writer,
)
async def handle_request_async():
if is_resumption:
await self._handle_resumption_request(ctx)
else:
await self._handle_post_request(ctx)
# If this is a request, start a new task to handle it
if isinstance(message, JSONRPCRequest):
tg.start_soon(handle_request_async)
else:
await handle_request_async()
except Exception: # pragma: lax no cover
logger.exception("Error in post_writer")
finally:
await read_stream_writer.aclose()
await write_stream.aclose()
async def terminate_session(self, client: httpx.AsyncClient) -> None:
"""Terminate the session by sending a DELETE request."""
if not self.session_id: # pragma: lax no cover
return
try:
headers = self._prepare_headers()
response = await client.delete(self.url, headers=headers)
if response.status_code == 405: # pragma: lax no cover
logger.debug("Server does not allow session termination")
elif response.status_code not in (200, 204): # pragma: lax no cover
logger.warning(f"Session termination failed: {response.status_code}")
except Exception as exc: # pragma: no cover
logger.warning(f"Session termination failed: {exc}")
# TODO(Marcelo): Check the TODO below, and cover this with tests if necessary.
def get_session_id(self) -> str | None:
"""Get the current session ID."""
return self.session_id # pragma: no cover
# TODO(Marcelo): I've dropped the `get_session_id` callback because it breaks the Transport protocol. Is that needed?
# It's a completely wrong abstraction, so removal is a good idea. But if we need the client to find the session ID,
# we should think about a better way to do it. I believe we can achieve it with other means.
@asynccontextmanager
async def streamable_http_client(
url: str,
*,
http_client: httpx.AsyncClient | None = None,
terminate_on_close: bool = True,
) -> AsyncGenerator[TransportStreams, None]:
"""Client transport for StreamableHTTP.
Args:
url: The MCP server endpoint URL.
http_client: Optional pre-configured httpx.AsyncClient. If None, a default
client with recommended MCP timeouts will be created. To configure headers,
authentication, or other HTTP settings, create an httpx.AsyncClient and pass it here.
terminate_on_close: If True, send a DELETE request to terminate the session when the context exits.
Yields:
Tuple containing:
- read_stream: Stream for reading messages from the server
- write_stream: Stream for sending messages to the server
Example:
See examples/snippets/clients/ for usage patterns.
"""
read_stream_writer, read_stream = anyio.create_memory_object_stream[SessionMessage | Exception](0)
write_stream, write_stream_reader = anyio.create_memory_object_stream[SessionMessage](0)
# Determine if we need to create and manage the client
client_provided = http_client is not None
client = http_client
if client is None:
# Create default client with recommended MCP timeouts
client = create_mcp_http_client()
transport = StreamableHTTPTransport(url)
async with anyio.create_task_group() as tg:
try:
logger.debug(f"Connecting to StreamableHTTP endpoint: {url}")
async with contextlib.AsyncExitStack() as stack:
# Only manage client lifecycle if we created it
if not client_provided:
await stack.enter_async_context(client)
def start_get_stream() -> None:
tg.start_soon(transport.handle_get_stream, client, read_stream_writer)
tg.start_soon(
transport.post_writer,
client,
write_stream_reader,
read_stream_writer,
write_stream,
start_get_stream,
tg,
)
try:
yield read_stream, write_stream
finally:
if transport.session_id and terminate_on_close:
await transport.terminate_session(client)
tg.cancel_scope.cancel()
finally:
await read_stream_writer.aclose()
await write_stream.aclose()
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "src/mcp/client/streamable_http.py",
"license": "MIT License",
"lines": 483,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
modelcontextprotocol/python-sdk:examples/servers/simple-streamablehttp-stateless/mcp_simple_streamablehttp_stateless/__main__.py | from .server import main
if __name__ == "__main__":
# Click will handle CLI arguments
import sys
sys.exit(main()) # type: ignore[call-arg]
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "examples/servers/simple-streamablehttp-stateless/mcp_simple_streamablehttp_stateless/__main__.py",
"license": "MIT License",
"lines": 5,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
modelcontextprotocol/python-sdk:examples/servers/simple-streamablehttp-stateless/mcp_simple_streamablehttp_stateless/server.py | import contextlib
import logging
from collections.abc import AsyncIterator
import anyio
import click
import uvicorn
from mcp import types
from mcp.server import Server, ServerRequestContext
from mcp.server.streamable_http_manager import StreamableHTTPSessionManager
from starlette.applications import Starlette
from starlette.middleware.cors import CORSMiddleware
from starlette.routing import Mount
from starlette.types import Receive, Scope, Send
logger = logging.getLogger(__name__)
async def handle_list_tools(
ctx: ServerRequestContext, params: types.PaginatedRequestParams | None
) -> types.ListToolsResult:
return types.ListToolsResult(
tools=[
types.Tool(
name="start-notification-stream",
description=("Sends a stream of notifications with configurable count and interval"),
input_schema={
"type": "object",
"required": ["interval", "count", "caller"],
"properties": {
"interval": {
"type": "number",
"description": "Interval between notifications in seconds",
},
"count": {
"type": "number",
"description": "Number of notifications to send",
},
"caller": {
"type": "string",
"description": ("Identifier of the caller to include in notifications"),
},
},
},
)
]
)
async def handle_call_tool(ctx: ServerRequestContext, params: types.CallToolRequestParams) -> types.CallToolResult:
arguments = params.arguments or {}
interval = arguments.get("interval", 1.0)
count = arguments.get("count", 5)
caller = arguments.get("caller", "unknown")
# Send the specified number of notifications with the given interval
for i in range(count):
await ctx.session.send_log_message(
level="info",
data=f"Notification {i + 1}/{count} from caller: {caller}",
logger="notification_stream",
related_request_id=ctx.request_id,
)
if i < count - 1: # Don't wait after the last notification
await anyio.sleep(interval)
return types.CallToolResult(
content=[
types.TextContent(
type="text",
text=(f"Sent {count} notifications with {interval}s interval for caller: {caller}"),
)
]
)
@click.command()
@click.option("--port", default=3000, help="Port to listen on for HTTP")
@click.option(
"--log-level",
default="INFO",
help="Logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL)",
)
@click.option(
"--json-response",
is_flag=True,
default=False,
help="Enable JSON responses instead of SSE streams",
)
def main(
port: int,
log_level: str,
json_response: bool,
) -> None:
# Configure logging
logging.basicConfig(
level=getattr(logging, log_level.upper()),
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
)
app = Server(
"mcp-streamable-http-stateless-demo",
on_list_tools=handle_list_tools,
on_call_tool=handle_call_tool,
)
# Create the session manager with true stateless mode
session_manager = StreamableHTTPSessionManager(
app=app,
event_store=None,
json_response=json_response,
stateless=True,
)
async def handle_streamable_http(scope: Scope, receive: Receive, send: Send) -> None:
await session_manager.handle_request(scope, receive, send)
@contextlib.asynccontextmanager
async def lifespan(app: Starlette) -> AsyncIterator[None]:
"""Context manager for session manager."""
async with session_manager.run():
logger.info("Application started with StreamableHTTP session manager!")
try:
yield
finally:
logger.info("Application shutting down...")
# Create an ASGI application using the transport
starlette_app = Starlette(
debug=True,
routes=[Mount("/mcp", app=handle_streamable_http)],
lifespan=lifespan,
)
# Wrap ASGI application with CORS middleware to expose Mcp-Session-Id header
# for browser-based clients (ensures 500 errors get proper CORS headers)
starlette_app = CORSMiddleware(
starlette_app,
allow_origins=["*"], # Allow all origins - adjust as needed for production
allow_methods=["GET", "POST", "DELETE"], # MCP streamable HTTP methods
expose_headers=["Mcp-Session-Id"],
)
uvicorn.run(starlette_app, host="127.0.0.1", port=port)
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "examples/servers/simple-streamablehttp-stateless/mcp_simple_streamablehttp_stateless/server.py",
"license": "MIT License",
"lines": 127,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
modelcontextprotocol/python-sdk:examples/servers/simple-streamablehttp/mcp_simple_streamablehttp/server.py | import contextlib
import logging
from collections.abc import AsyncIterator
import anyio
import click
from mcp import types
from mcp.server import Server, ServerRequestContext
from mcp.server.streamable_http_manager import StreamableHTTPSessionManager
from starlette.applications import Starlette
from starlette.middleware.cors import CORSMiddleware
from starlette.routing import Mount
from starlette.types import Receive, Scope, Send
from .event_store import InMemoryEventStore
# Configure logging
logger = logging.getLogger(__name__)
async def handle_list_tools(
ctx: ServerRequestContext, params: types.PaginatedRequestParams | None
) -> types.ListToolsResult:
return types.ListToolsResult(
tools=[
types.Tool(
name="start-notification-stream",
description="Sends a stream of notifications with configurable count and interval",
input_schema={
"type": "object",
"required": ["interval", "count", "caller"],
"properties": {
"interval": {
"type": "number",
"description": "Interval between notifications in seconds",
},
"count": {
"type": "number",
"description": "Number of notifications to send",
},
"caller": {
"type": "string",
"description": "Identifier of the caller to include in notifications",
},
},
},
)
]
)
async def handle_call_tool(ctx: ServerRequestContext, params: types.CallToolRequestParams) -> types.CallToolResult:
arguments = params.arguments or {}
interval = arguments.get("interval", 1.0)
count = arguments.get("count", 5)
caller = arguments.get("caller", "unknown")
# Send the specified number of notifications with the given interval
for i in range(count):
# Include more detailed message for resumability demonstration
notification_msg = f"[{i + 1}/{count}] Event from '{caller}' - Use Last-Event-ID to resume if disconnected"
await ctx.session.send_log_message(
level="info",
data=notification_msg,
logger="notification_stream",
# Associates this notification with the original request
# Ensures notifications are sent to the correct response stream
# Without this, notifications will either go to:
# - a standalone SSE stream (if GET request is supported)
# - nowhere (if GET request isn't supported)
related_request_id=ctx.request_id,
)
logger.debug(f"Sent notification {i + 1}/{count} for caller: {caller}")
if i < count - 1: # Don't wait after the last notification
await anyio.sleep(interval)
# This will send a resource notification through standalone SSE
# established by GET request
await ctx.session.send_resource_updated(uri="http:///test_resource")
return types.CallToolResult(
content=[
types.TextContent(
type="text",
text=(f"Sent {count} notifications with {interval}s interval for caller: {caller}"),
)
]
)
@click.command()
@click.option("--port", default=3000, help="Port to listen on for HTTP")
@click.option(
"--log-level",
default="INFO",
help="Logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL)",
)
@click.option(
"--json-response",
is_flag=True,
default=False,
help="Enable JSON responses instead of SSE streams",
)
def main(
port: int,
log_level: str,
json_response: bool,
) -> int:
# Configure logging
logging.basicConfig(
level=getattr(logging, log_level.upper()),
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
)
app = Server(
"mcp-streamable-http-demo",
on_list_tools=handle_list_tools,
on_call_tool=handle_call_tool,
)
# Create event store for resumability
# The InMemoryEventStore enables resumability support for StreamableHTTP transport.
# It stores SSE events with unique IDs, allowing clients to:
# 1. Receive event IDs for each SSE message
# 2. Resume streams by sending Last-Event-ID in GET requests
# 3. Replay missed events after reconnection
# Note: This in-memory implementation is for demonstration ONLY.
# For production, use a persistent storage solution.
event_store = InMemoryEventStore()
# Create the session manager with our app and event store
session_manager = StreamableHTTPSessionManager(
app=app,
event_store=event_store, # Enable resumability
json_response=json_response,
)
# ASGI handler for streamable HTTP connections
async def handle_streamable_http(scope: Scope, receive: Receive, send: Send) -> None:
await session_manager.handle_request(scope, receive, send)
@contextlib.asynccontextmanager
async def lifespan(app: Starlette) -> AsyncIterator[None]:
"""Context manager for managing session manager lifecycle."""
async with session_manager.run():
logger.info("Application started with StreamableHTTP session manager!")
try:
yield
finally:
logger.info("Application shutting down...")
# Create an ASGI application using the transport
starlette_app = Starlette(
debug=True,
routes=[
Mount("/mcp", app=handle_streamable_http),
],
lifespan=lifespan,
)
# Wrap ASGI application with CORS middleware to expose Mcp-Session-Id header
# for browser-based clients (ensures 500 errors get proper CORS headers)
starlette_app = CORSMiddleware(
starlette_app,
allow_origins=["*"], # Allow all origins - adjust as needed for production
allow_methods=["GET", "POST", "DELETE"], # MCP streamable HTTP methods
expose_headers=["Mcp-Session-Id"],
)
import uvicorn
uvicorn.run(starlette_app, host="127.0.0.1", port=port)
return 0
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "examples/servers/simple-streamablehttp/mcp_simple_streamablehttp/server.py",
"license": "MIT License",
"lines": 152,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
modelcontextprotocol/python-sdk:src/mcp/server/auth/handlers/authorize.py | import logging
from dataclasses import dataclass
from typing import Any, Literal
# TODO(Marcelo): We should drop the `RootModel`.
from pydantic import AnyUrl, BaseModel, Field, RootModel, ValidationError # noqa: TID251
from starlette.datastructures import FormData, QueryParams
from starlette.requests import Request
from starlette.responses import RedirectResponse, Response
from mcp.server.auth.errors import stringify_pydantic_error
from mcp.server.auth.json_response import PydanticJSONResponse
from mcp.server.auth.provider import (
AuthorizationErrorCode,
AuthorizationParams,
AuthorizeError,
OAuthAuthorizationServerProvider,
construct_redirect_uri,
)
from mcp.shared.auth import InvalidRedirectUriError, InvalidScopeError
logger = logging.getLogger(__name__)
class AuthorizationRequest(BaseModel):
# See https://datatracker.ietf.org/doc/html/rfc6749#section-4.1.1
client_id: str = Field(..., description="The client ID")
redirect_uri: AnyUrl | None = Field(None, description="URL to redirect to after authorization")
# see OAuthClientMetadata; we only support `code`
response_type: Literal["code"] = Field(..., description="Must be 'code' for authorization code flow")
code_challenge: str = Field(..., description="PKCE code challenge")
code_challenge_method: Literal["S256"] = Field("S256", description="PKCE code challenge method, must be S256")
state: str | None = Field(None, description="Optional state parameter")
scope: str | None = Field(
None,
description="Optional scope; if specified, should be a space-separated list of scope strings",
)
resource: str | None = Field(
None,
description="RFC 8707 resource indicator - the MCP server this token will be used with",
)
class AuthorizationErrorResponse(BaseModel):
error: AuthorizationErrorCode
error_description: str | None
error_uri: AnyUrl | None = None
# must be set if provided in the request
state: str | None = None
def best_effort_extract_string(key: str, params: None | FormData | QueryParams) -> str | None:
if params is None: # pragma: no cover
return None
value = params.get(key)
if isinstance(value, str):
return value
return None
class AnyUrlModel(RootModel[AnyUrl]):
root: AnyUrl
@dataclass
class AuthorizationHandler:
provider: OAuthAuthorizationServerProvider[Any, Any, Any]
async def handle(self, request: Request) -> Response:
# implements authorization requests for grant_type=code;
# see https://datatracker.ietf.org/doc/html/rfc6749#section-4.1.1
state = None
redirect_uri = None
client = None
params = None
async def error_response(
error: AuthorizationErrorCode,
error_description: str | None,
attempt_load_client: bool = True,
):
# Error responses take two different formats:
# 1. The request has a valid client ID & redirect_uri: we issue a redirect
# back to the redirect_uri with the error response fields as query
# parameters. This allows the client to be notified of the error.
# 2. Otherwise, we return an error response directly to the end user;
# we choose to do so in JSON, but this is left undefined in the
# specification.
# See https://datatracker.ietf.org/doc/html/rfc6749#section-4.1.2.1
#
# This logic is a bit awkward to handle, because the error might be thrown
# very early in request validation, before we've done the usual Pydantic
# validation, loaded the client, etc. To handle this, error_response()
# contains fallback logic which attempts to load the parameters directly
# from the request.
nonlocal client, redirect_uri, state
if client is None and attempt_load_client:
# make last-ditch attempt to load the client
client_id = best_effort_extract_string("client_id", params)
client = await self.provider.get_client(client_id) if client_id else None
if redirect_uri is None and client:
# make last-ditch effort to load the redirect uri
try:
if params is not None and "redirect_uri" not in params:
raw_redirect_uri = None
else:
raw_redirect_uri = AnyUrlModel.model_validate(
best_effort_extract_string("redirect_uri", params)
).root
redirect_uri = client.validate_redirect_uri(raw_redirect_uri)
except (ValidationError, InvalidRedirectUriError):
# if the redirect URI is invalid, ignore it & just return the
# initial error
pass
# the error response MUST contain the state specified by the client, if any
if state is None: # pragma: no cover
# make last-ditch effort to load state
state = best_effort_extract_string("state", params)
error_resp = AuthorizationErrorResponse(
error=error,
error_description=error_description,
state=state,
)
if redirect_uri and client:
return RedirectResponse(
url=construct_redirect_uri(str(redirect_uri), **error_resp.model_dump(exclude_none=True)),
status_code=302,
headers={"Cache-Control": "no-store"},
)
else:
return PydanticJSONResponse(
status_code=400,
content=error_resp,
headers={"Cache-Control": "no-store"},
)
try:
# Parse request parameters
if request.method == "GET":
# Convert query_params to dict for pydantic validation
params = request.query_params
else:
# Parse form data for POST requests
params = await request.form()
# Save state if it exists, even before validation
state = best_effort_extract_string("state", params)
try:
auth_request = AuthorizationRequest.model_validate(params)
state = auth_request.state # Update with validated state
except ValidationError as validation_error:
error: AuthorizationErrorCode = "invalid_request"
for e in validation_error.errors():
if e["loc"] == ("response_type",) and e["type"] == "literal_error":
error = "unsupported_response_type"
break
return await error_response(error, stringify_pydantic_error(validation_error))
# Get client information
client = await self.provider.get_client(
auth_request.client_id,
)
if not client:
# For client_id validation errors, return direct error (no redirect)
return await error_response(
error="invalid_request",
error_description=f"Client ID '{auth_request.client_id}' not found",
attempt_load_client=False,
)
# Validate redirect_uri against client's registered URIs
try:
redirect_uri = client.validate_redirect_uri(auth_request.redirect_uri)
except InvalidRedirectUriError as validation_error:
# For redirect_uri validation errors, return direct error (no redirect)
return await error_response(
error="invalid_request",
error_description=validation_error.message,
)
# Validate scope - for scope errors, we can redirect
try:
scopes = client.validate_scope(auth_request.scope)
except InvalidScopeError as validation_error:
# For scope errors, redirect with error parameters
return await error_response(
error="invalid_scope",
error_description=validation_error.message,
)
# Setup authorization parameters
auth_params = AuthorizationParams(
state=state,
scopes=scopes,
code_challenge=auth_request.code_challenge,
redirect_uri=redirect_uri,
redirect_uri_provided_explicitly=auth_request.redirect_uri is not None,
resource=auth_request.resource, # RFC 8707
)
try:
# Let the provider pick the next URI to redirect to
return RedirectResponse(
url=await self.provider.authorize(
client,
auth_params,
),
status_code=302,
headers={"Cache-Control": "no-store"},
)
except AuthorizeError as e:
# Handle authorization errors as defined in RFC 6749 Section 4.1.2.1
return await error_response(error=e.error, error_description=e.error_description)
except Exception as validation_error: # pragma: no cover
# Catch-all for unexpected errors
logger.exception("Unexpected error in authorization_handler", exc_info=validation_error)
return await error_response(error="server_error", error_description="An unexpected error occurred")
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "src/mcp/server/auth/handlers/authorize.py",
"license": "MIT License",
"lines": 195,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
modelcontextprotocol/python-sdk:src/mcp/server/auth/handlers/metadata.py | from dataclasses import dataclass
from starlette.requests import Request
from starlette.responses import Response
from mcp.server.auth.json_response import PydanticJSONResponse
from mcp.shared.auth import OAuthMetadata, ProtectedResourceMetadata
@dataclass
class MetadataHandler:
metadata: OAuthMetadata
async def handle(self, request: Request) -> Response:
return PydanticJSONResponse(
content=self.metadata,
headers={"Cache-Control": "public, max-age=3600"}, # Cache for 1 hour
)
@dataclass
class ProtectedResourceMetadataHandler:
metadata: ProtectedResourceMetadata
async def handle(self, request: Request) -> Response:
return PydanticJSONResponse(
content=self.metadata,
headers={"Cache-Control": "public, max-age=3600"}, # Cache for 1 hour
)
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "src/mcp/server/auth/handlers/metadata.py",
"license": "MIT License",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
modelcontextprotocol/python-sdk:src/mcp/server/auth/handlers/register.py | import secrets
import time
from dataclasses import dataclass
from typing import Any
from uuid import uuid4
from pydantic import BaseModel, ValidationError
from starlette.requests import Request
from starlette.responses import Response
from mcp.server.auth.errors import stringify_pydantic_error
from mcp.server.auth.json_response import PydanticJSONResponse
from mcp.server.auth.provider import OAuthAuthorizationServerProvider, RegistrationError, RegistrationErrorCode
from mcp.server.auth.settings import ClientRegistrationOptions
from mcp.shared.auth import OAuthClientInformationFull, OAuthClientMetadata
# this alias is a no-op; it's just to separate out the types exposed to the
# provider from what we use in the HTTP handler
RegistrationRequest = OAuthClientMetadata
class RegistrationErrorResponse(BaseModel):
error: RegistrationErrorCode
error_description: str | None
@dataclass
class RegistrationHandler:
provider: OAuthAuthorizationServerProvider[Any, Any, Any]
options: ClientRegistrationOptions
async def handle(self, request: Request) -> Response:
# Implements dynamic client registration as defined in https://datatracker.ietf.org/doc/html/rfc7591#section-3.1
try:
body = await request.body()
client_metadata = OAuthClientMetadata.model_validate_json(body)
# Scope validation is handled below
except ValidationError as validation_error:
return PydanticJSONResponse(
content=RegistrationErrorResponse(
error="invalid_client_metadata",
error_description=stringify_pydantic_error(validation_error),
),
status_code=400,
)
client_id = str(uuid4())
# If auth method is None, default to client_secret_post
if client_metadata.token_endpoint_auth_method is None:
client_metadata.token_endpoint_auth_method = "client_secret_post"
client_secret = None
if client_metadata.token_endpoint_auth_method != "none": # pragma: no branch
# cryptographically secure random 32-byte hex string
client_secret = secrets.token_hex(32)
if client_metadata.scope is None and self.options.default_scopes is not None:
client_metadata.scope = " ".join(self.options.default_scopes)
elif client_metadata.scope is not None and self.options.valid_scopes is not None:
requested_scopes = set(client_metadata.scope.split())
valid_scopes = set(self.options.valid_scopes)
if not requested_scopes.issubset(valid_scopes): # pragma: no branch
return PydanticJSONResponse(
content=RegistrationErrorResponse(
error="invalid_client_metadata",
error_description="Requested scopes are not valid: "
f"{', '.join(requested_scopes - valid_scopes)}",
),
status_code=400,
)
if "authorization_code" not in client_metadata.grant_types:
return PydanticJSONResponse(
content=RegistrationErrorResponse(
error="invalid_client_metadata",
error_description="grant_types must include 'authorization_code'",
),
status_code=400,
)
# The MCP spec requires servers to use the authorization `code` flow
# with PKCE
if "code" not in client_metadata.response_types:
return PydanticJSONResponse(
content=RegistrationErrorResponse(
error="invalid_client_metadata",
error_description="response_types must include 'code' for authorization_code grant",
),
status_code=400,
)
client_id_issued_at = int(time.time())
client_secret_expires_at = (
client_id_issued_at + self.options.client_secret_expiry_seconds
if self.options.client_secret_expiry_seconds is not None
else None
)
client_info = OAuthClientInformationFull(
client_id=client_id,
client_id_issued_at=client_id_issued_at,
client_secret=client_secret,
client_secret_expires_at=client_secret_expires_at,
# passthrough information from the client request
redirect_uris=client_metadata.redirect_uris,
token_endpoint_auth_method=client_metadata.token_endpoint_auth_method,
grant_types=client_metadata.grant_types,
response_types=client_metadata.response_types,
client_name=client_metadata.client_name,
client_uri=client_metadata.client_uri,
logo_uri=client_metadata.logo_uri,
scope=client_metadata.scope,
contacts=client_metadata.contacts,
tos_uri=client_metadata.tos_uri,
policy_uri=client_metadata.policy_uri,
jwks_uri=client_metadata.jwks_uri,
jwks=client_metadata.jwks,
software_id=client_metadata.software_id,
software_version=client_metadata.software_version,
)
try:
# Register client
await self.provider.register_client(client_info)
# Return client information
return PydanticJSONResponse(content=client_info, status_code=201)
except RegistrationError as e:
# Handle registration errors as defined in RFC 7591 Section 3.2.2
return PydanticJSONResponse(
content=RegistrationErrorResponse(error=e.error, error_description=e.error_description),
status_code=400,
)
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "src/mcp/server/auth/handlers/register.py",
"license": "MIT License",
"lines": 116,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
modelcontextprotocol/python-sdk:src/mcp/server/auth/handlers/revoke.py | from dataclasses import dataclass
from functools import partial
from typing import Any, Literal
from pydantic import BaseModel, ValidationError
from starlette.requests import Request
from starlette.responses import Response
from mcp.server.auth.errors import (
stringify_pydantic_error,
)
from mcp.server.auth.json_response import PydanticJSONResponse
from mcp.server.auth.middleware.client_auth import AuthenticationError, ClientAuthenticator
from mcp.server.auth.provider import AccessToken, OAuthAuthorizationServerProvider, RefreshToken
class RevocationRequest(BaseModel):
"""See https://datatracker.ietf.org/doc/html/rfc7009#section-2.1"""
token: str
token_type_hint: Literal["access_token", "refresh_token"] | None = None
client_id: str
client_secret: str | None
class RevocationErrorResponse(BaseModel):
error: Literal["invalid_request", "unauthorized_client"]
error_description: str | None = None
@dataclass
class RevocationHandler:
provider: OAuthAuthorizationServerProvider[Any, Any, Any]
client_authenticator: ClientAuthenticator
async def handle(self, request: Request) -> Response:
"""Handler for the OAuth 2.0 Token Revocation endpoint."""
try:
client = await self.client_authenticator.authenticate_request(request)
except AuthenticationError as e: # pragma: no cover
return PydanticJSONResponse(
status_code=401,
content=RevocationErrorResponse(
error="unauthorized_client",
error_description=e.message,
),
)
try:
form_data = await request.form()
revocation_request = RevocationRequest.model_validate(dict(form_data))
except ValidationError as e:
return PydanticJSONResponse(
status_code=400,
content=RevocationErrorResponse(
error="invalid_request",
error_description=stringify_pydantic_error(e),
),
)
loaders = [
self.provider.load_access_token,
partial(self.provider.load_refresh_token, client),
]
if revocation_request.token_type_hint == "refresh_token": # pragma: no cover
loaders = reversed(loaders)
token: None | AccessToken | RefreshToken = None
for loader in loaders:
token = await loader(revocation_request.token)
if token is not None:
break
# if token is not found, just return HTTP 200 per the RFC
if token and token.client_id == client.client_id:
# Revoke token; provider is not meant to be able to do validation
# at this point that would result in an error
await self.provider.revoke_token(token)
# Return successful empty response
return Response(
status_code=200,
headers={
"Cache-Control": "no-store",
"Pragma": "no-cache",
},
)
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "src/mcp/server/auth/handlers/revoke.py",
"license": "MIT License",
"lines": 72,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
modelcontextprotocol/python-sdk:src/mcp/server/auth/handlers/token.py | import base64
import hashlib
import time
from dataclasses import dataclass
from typing import Annotated, Any, Literal
from pydantic import AnyHttpUrl, AnyUrl, BaseModel, Field, TypeAdapter, ValidationError
from starlette.requests import Request
from mcp.server.auth.errors import stringify_pydantic_error
from mcp.server.auth.json_response import PydanticJSONResponse
from mcp.server.auth.middleware.client_auth import AuthenticationError, ClientAuthenticator
from mcp.server.auth.provider import OAuthAuthorizationServerProvider, TokenError, TokenErrorCode
from mcp.shared.auth import OAuthToken
class AuthorizationCodeRequest(BaseModel):
# See https://datatracker.ietf.org/doc/html/rfc6749#section-4.1.3
grant_type: Literal["authorization_code"]
code: str = Field(..., description="The authorization code")
redirect_uri: AnyUrl | None = Field(None, description="Must be the same as redirect URI provided in /authorize")
client_id: str
# we use the client_secret param, per https://datatracker.ietf.org/doc/html/rfc6749#section-2.3.1
client_secret: str | None = None
# See https://datatracker.ietf.org/doc/html/rfc7636#section-4.5
code_verifier: str = Field(..., description="PKCE code verifier")
# RFC 8707 resource indicator
resource: str | None = Field(None, description="Resource indicator for the token")
class RefreshTokenRequest(BaseModel):
# See https://datatracker.ietf.org/doc/html/rfc6749#section-6
grant_type: Literal["refresh_token"]
refresh_token: str = Field(..., description="The refresh token")
scope: str | None = Field(None, description="Optional scope parameter")
client_id: str
# we use the client_secret param, per https://datatracker.ietf.org/doc/html/rfc6749#section-2.3.1
client_secret: str | None = None
# RFC 8707 resource indicator
resource: str | None = Field(None, description="Resource indicator for the token")
TokenRequest = Annotated[AuthorizationCodeRequest | RefreshTokenRequest, Field(discriminator="grant_type")]
token_request_adapter = TypeAdapter[TokenRequest](TokenRequest)
class TokenErrorResponse(BaseModel):
"""See https://datatracker.ietf.org/doc/html/rfc6749#section-5.2"""
error: TokenErrorCode
error_description: str | None = None
error_uri: AnyHttpUrl | None = None
# this is just an alias over OAuthToken; the only reason we do this
# is to have some separation between the HTTP response type, and the
# type returned by the provider
TokenSuccessResponse = OAuthToken
@dataclass
class TokenHandler:
provider: OAuthAuthorizationServerProvider[Any, Any, Any]
client_authenticator: ClientAuthenticator
def response(self, obj: TokenSuccessResponse | TokenErrorResponse):
status_code = 200
if isinstance(obj, TokenErrorResponse):
status_code = 400
return PydanticJSONResponse(
content=obj,
status_code=status_code,
headers={
"Cache-Control": "no-store",
"Pragma": "no-cache",
},
)
async def handle(self, request: Request):
try:
client_info = await self.client_authenticator.authenticate_request(request)
except AuthenticationError as e:
# Authentication failures should return 401
return PydanticJSONResponse(
content=TokenErrorResponse(
error="invalid_client",
error_description=e.message,
),
status_code=401,
headers={
"Cache-Control": "no-store",
"Pragma": "no-cache",
},
)
try:
form_data = await request.form()
# TODO(Marcelo): Can someone check if this `dict()` wrapper is necessary?
token_request = token_request_adapter.validate_python(dict(form_data))
except ValidationError as validation_error: # pragma: no cover
return self.response(
TokenErrorResponse(
error="invalid_request",
error_description=stringify_pydantic_error(validation_error),
)
)
if token_request.grant_type not in client_info.grant_types: # pragma: no cover
return self.response(
TokenErrorResponse(
error="unsupported_grant_type",
error_description=(f"Unsupported grant type (supported grant types are {client_info.grant_types})"),
)
)
tokens: OAuthToken
match token_request:
case AuthorizationCodeRequest():
auth_code = await self.provider.load_authorization_code(client_info, token_request.code)
if auth_code is None or auth_code.client_id != token_request.client_id:
# if code belongs to different client, pretend it doesn't exist
return self.response(
TokenErrorResponse(
error="invalid_grant",
error_description="authorization code does not exist",
)
)
# make auth codes expire after a deadline
# see https://datatracker.ietf.org/doc/html/rfc6749#section-10.5
if auth_code.expires_at < time.time():
return self.response(
TokenErrorResponse(
error="invalid_grant",
error_description="authorization code has expired",
)
)
# verify redirect_uri doesn't change between /authorize and /tokens
# see https://datatracker.ietf.org/doc/html/rfc6749#section-10.6
if auth_code.redirect_uri_provided_explicitly:
authorize_request_redirect_uri = auth_code.redirect_uri
else: # pragma: no cover
authorize_request_redirect_uri = None
# Convert both sides to strings for comparison to handle AnyUrl vs string issues
token_redirect_str = str(token_request.redirect_uri) if token_request.redirect_uri is not None else None
auth_redirect_str = (
str(authorize_request_redirect_uri) if authorize_request_redirect_uri is not None else None
)
if token_redirect_str != auth_redirect_str:
return self.response(
TokenErrorResponse(
error="invalid_request",
error_description=("redirect_uri did not match the one used when creating auth code"),
)
)
# Verify PKCE code verifier
sha256 = hashlib.sha256(token_request.code_verifier.encode()).digest()
hashed_code_verifier = base64.urlsafe_b64encode(sha256).decode().rstrip("=")
if hashed_code_verifier != auth_code.code_challenge:
# see https://datatracker.ietf.org/doc/html/rfc7636#section-4.6
return self.response(
TokenErrorResponse(
error="invalid_grant",
error_description="incorrect code_verifier",
)
)
try:
# Exchange authorization code for tokens
tokens = await self.provider.exchange_authorization_code(client_info, auth_code)
except TokenError as e:
return self.response(TokenErrorResponse(error=e.error, error_description=e.error_description))
case RefreshTokenRequest(): # pragma: no branch
refresh_token = await self.provider.load_refresh_token(client_info, token_request.refresh_token)
if refresh_token is None or refresh_token.client_id != token_request.client_id:
# if token belongs to different client, pretend it doesn't exist
return self.response(
TokenErrorResponse(
error="invalid_grant",
error_description="refresh token does not exist",
)
)
if refresh_token.expires_at and refresh_token.expires_at < time.time():
# if the refresh token has expired, pretend it doesn't exist
return self.response(
TokenErrorResponse(
error="invalid_grant",
error_description="refresh token has expired",
)
)
# Parse scopes if provided
scopes = token_request.scope.split(" ") if token_request.scope else refresh_token.scopes
for scope in scopes:
if scope not in refresh_token.scopes:
return self.response(
TokenErrorResponse(
error="invalid_scope",
error_description=(f"cannot request scope `{scope}` not provided by refresh token"),
)
)
try:
# Exchange refresh token for new tokens
tokens = await self.provider.exchange_refresh_token(client_info, refresh_token, scopes)
except TokenError as e:
return self.response(TokenErrorResponse(error=e.error, error_description=e.error_description))
return self.response(tokens)
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "src/mcp/server/auth/handlers/token.py",
"license": "MIT License",
"lines": 184,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.